rush hour 3

This commit is contained in:
2018-02-01 01:40:07 +01:00
parent 95d35f2470
commit 970af03c09
18 changed files with 183 additions and 177 deletions

View File

@@ -147,8 +147,7 @@
month = {May},
organization = {theguardian},
publisher = {theguardian},
title = {{'Accidental hero' halts ransomware attack and warns: this is not over}},
url = {https://www.theguardian.com/technology/2017/may/13/accidental-hero-finds-kill-switch-to-stop-spread-of-ransomware-cyber-attack},
title = {The Guardian - 'Accidental hero' halts ransomware attack and warns: this is not over},
year = 2017
}
@@ -167,8 +166,7 @@
month = {September},
organization = {DELL SecureWorks Counter Threat Unit},
publisher = {DELL CTU},
title = {{2016 Underground Hacker Marketplace Report}},
url = {https://www.secureworks.com/resources/rp-2016-underground-hacker-marketplace-report},
title = {{Secureworks - 2016 Underground Hacker Marketplace Report}},
year = 2016
}
@@ -262,7 +260,7 @@
}
@misc{SBLOnline,
author={The Spamhaus Project, Ltd},
author={Spamhaus},
title={{The Spamhaus Block List}},
month=dec,
year={2017},
@@ -321,7 +319,7 @@
title={{scikit-learn - Decision Trees}},
month=jan,
year={2018},
howpublished={\url{http://scikit-learn.org/stable/modules/tree.html#tree-algorithms-id3-c4-5-c5-0-and-cart}}
howpublished={\url{http://scikit-learn.org/stable/modules/tree.html\#tree-algorithms-id3-c4-5-c5-0-and-cart}}
}
@misc{DENICOnline,
@@ -342,7 +340,7 @@
@misc{WannaCryTwitterOnline,
author={Darien Huss},
title={{WannaCry propagation payload contains previously unregistered domain}},
title={WannaCry propagation payload contains previously unregistered domain},
month=jan,
year={2018},
howpublished={\url{https://twitter.com/darienhuss/status/863083680528576512}}
@@ -350,12 +348,35 @@
@misc{WhyDGOWinsOnline,
author={LITAL ASHER-DOTAN},
title={{THE FBI VS. GAMEOVER ZEUS: WHY THE DGA-BASED BOTNET WINS}},
month=jan,
year={2018},
title={THE FBI VS. GAMEOVER ZEUS: WHY THE DGA-BASED BOTNET WINS},
month=dec,
year={2017},
howpublished={\url{https://www.cybereason.com/blog/the-fbi-vs-gameover-zeus-why-the-dga-based-botnet-wins}}
}
@misc{IDNOnline,
author={ICANN},
title={{Internationalized Domain Names}},
month=feb,
year={2012},
howpublished={\url{https://www.icann.org/resources/pages/idn-2012-02-25-en}}
}
@Article{Salzberg1994,
author="Salzberg, Steven L.",
title="C4.5: Programs for Machine Learning by J. Ross Quinlan. Morgan Kaufmann Publishers, Inc., 1993",
journal="Machine Learning",
year="1994",
month="Sep",
day="01",
volume="16",
number="3",
pages="235--240",
issn="1573-0565",
doi="10.1007/BF00993309",
url="https://doi.org/10.1007/BF00993309"
}
@inproceedings{Stone-Gross:2009:YBM:1653662.1653738,
author = {Stone-Gross, Brett and Cova, Marco and Cavallaro, Lorenzo and Gilbert, Bob and Szydlowski, Martin and Kemmerer, Richard and Kruegel, Christopher and Vigna, Giovanni},

View File

@@ -1,21 +1,21 @@
\chapter{Abusive use of Domain Names}
\label{cha:abuse_of_domain_names}
The \gls{dns} makes it easy to browse the internet with human readable domain names. It adds an extra layer on top of TCP/IP that allows administrators to reliably maintain services, especially for large applications which are served by many servers in different locations. Using techniques like round robin, where the DNS server has a list of available servers and rotationally returns clients one of those servers, \gls{dns} enables efficient use of multiple machines, decreases access time for different users and enhances availability if single nodes in the machine cluster fail (by removing failing servers from the round robin rotation). Although this leads to the described advantages it can also be used by malicious applications. In this work three major types of domain name misuses are taken into account, malware, Phishing and Botnets.
The Domain Name System (DNS) makes it easy to browse the internet with human readable domain names. It adds an extra layer on top of TCP/IP that allows administrators to reliably maintain services, especially for large applications which are served by many servers in different locations. Using techniques like round robin, where the DNS server has a list of available servers and rotationally returns clients one of those servers, DNS enables efficient use of multiple machines, decreases access time for different users and enhances availability if single nodes in the machine cluster fail (by removing failing servers from the round robin rotation). Although this leads to the described advantages, it can also be used by malicious applications. In this work three major types of domain name misuses are taken into account: general \textit{Malware}, \textit{Phishing} and \textit{Botnets}.
\section{Malware}
\label{sec:malware}
On May 12th 2017, British security researchers discovered malware which was spreading massively at the time, especially in central Europe. After successful attack the ``WannaCry'' called malware encrypted files and pretended that the only solution to get back the decrypted files was to pay an amount of about \$ 300 in a cryptocurrency. This a popular case of a so called ransomware. Ransomware in general is a type of malicious software that threatens to publish the victim's data or blocks access to it unless a ransom is paid. Researchers quickly discovered a request that was made by the malware to an unregistered domain. The purpose of the very long nonsensical domain name (\texttt{iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea.com}) was not known at the time one of the researchers (\fsAuthor{WannaCryTwitterOnline}) registered it. Afterwards Huss registered many thousands of requests every second to this domain. After more investigations it was clear that the domain was acting as a kill switch for the ransomware and by registering the domain, further spreading could be slowed down \fsCite{theguardiancom_wannacry}.
On May 12th 2017, British security researchers discovered malware which was spreading massively at the time, especially in central Europe. After successfully attacking a target, the ``WannaCry'' called malware encrypted files and pretended that the only solution to get back the decrypted files was to pay an amount of about \$ 300 in a cryptocurrency. This is one popular case of a so called ransomware. Ransomware in general is a type of malicious software that threatens to publish the victim's data or blocks access to it unless a ransom is paid. Researchers quickly discovered a request to an unregistered domain that was made by ``WannaCry''. The purpose of the very long nonsensical domain name (\texttt{iuqerfsodp9ifjaposdfjhgosurijfaewrwergwea.com}) was not known at the time one of the researchers (\fsAuthor{WannaCryTwitterOnline}) registered it. Afterwards Huss registered many thousands of requests every second to this domain. After more investigations it was clear that the domain was acting as a kill switch for the ransomware and by registering the domain, further spreading could be slowed down \fsCite{theguardiancom_wannacry}.
This case shows an example of how domains can be used by attackers to control their malware. Usually domains are more often used to connect to command and control servers or to communicate with other infected machines (see Section~\ref{sec:botnets} for an). To infect a machine, attackers often use so called \textit{droppers} or \textit{injectors} that do not ship the malicious code at first but are little programs to download further source code or binaries containing the harmful functionality. It is much easier for malware authors to use domains for this purpose instead of hard coding the IP addresses for many reasons: If machines that serve the down-loadable content are e.g. confiscated by the police or taken down for other reasons, domains can simply be pointed to a different servers' IP address to maintain the malicious service. Reliable endpoints are also used to maintain the malicious software and load additional code. Domains do generally have three advantages for malware authors over IP addresses, they are much more cheaper (few cents a year compared to at least \$ 10), the efforts to point a domain to a new IP address are much lower than assigning a new IP to a machine and finally it is much faster. This follows that attackers can build a pool of many domains and to compensate for take downs of some domain names. This could change when IPv6 is widely adopted (with IPv6 addresses being much cheaper) but according to statistics of Google, only about 20\% of worldwide users accessing google where IPv6 enabled (natively or using IPv6 to IPv4 bridges) \fsCite{googlecom_ipv6adoption}. This prevents the usage of IPv6 as the primary protocol in malware for obvious reasons.
This case shows an example of how domains can be used by attackers to control their malware. Usually domains are more often used to connect to command and control servers or to communicate with other infected machines (see Section~\ref{sec:botnets} for more details on botnets). To infect a machine, attackers often use so called \textit{droppers} or \textit{injectors} that do not ship the malicious code at first but are little programs to download further source code or binaries containing the harmful functionality. It is much easier for malware authors to use domains for this purpose instead of hard coding the IP addresses for many reasons: If machines that serve the downloadable content are taken down e.g. confiscated by the police, domains can simply be pointed to a different servers' IP address to maintain the malicious service. Reliable endpoints are also used to control (C\&C) the malicious software and load additional code. Domains do generally have three advantages for malware authors over IP addresses: they are much more cheaper (few cents a year compared to at least \$ 10), the efforts (e.g. in terms of configuration) to point a domain to a new IP address are much lower than assigning a new IP to a machine and another advantage is that it is much faster to change domain names compared to IP addresses. This follows that attackers can quickly build a pool of many domains to compensate for take downs of some of those domain names. This could change when IPv6 is widely adopted (with IPv6 addresses being much cheaper) but according to statistics of Google, only about 20\% of worldwide users accessing google are IPv6 ready (natively or using IPv6 to IPv4 bridges) \fsCite{googlecom_ipv6adoption}. This prevents the usage of IPv6 as the primary protocol in malware for obvious reasons.
\section{Phishing}
\label{sec:phishing}
Phishing describes malicious activities where attackers try to steal private information from internet users which are mostly used to gain financial benefit from (\fsCite{6151979}). There are various different types of phishing attacks that have been identified. Starting long before emails and the world wide web had significant popularity, criminals used social engineering on phones to trick users into handing over private personal and financial information. This method is known as vishing (Voice phishing). In the mid 90s, AOL was the number one provider of Internet access and the first big target of phishing activities like it is known today. At the time, people from the warez community used phishing to get passwords for AOL accounts. By impersonating AOL employees in instant messengers as well as email conversations they could obtain free internet access or financially harm people using their credit card information. With the success of the world wide web including the movement of more financial services to the internet, criminals used another approach to trick users. By registering domains that look very much like a benign service and imitating the appearance of the corresponding benign website many internet users unknowingly put their banking credentials into fake sites and suffer financial harm (also known as cybersquatting or domaine squatting). Those credentials may be sold on black markets e.g. in the dark web and can worth up to 5\% of the balance for online banking credentials according to the SecureWorks Counter Threat Unit \fsCite{rp-2016-underground-hacker-marketplace-report}.
Phishing describes malicious activities where attackers try to steal private information from internet users which are mostly used to gain financial benefit from (\fsCite{6151979}). There are various different types of phishing attacks that have been identified. Starting long before emails and the world wide web having significant popularity, criminals used social engineering on phones to trick users into handing over private personal and financial information. This method is known as vishing (Voice phishing). In the mid 90s, AOL was the number one provider of Internet access and the first big target of phishing activities like it is known today. At the time, people from the warez community used phishing to get passwords for AOL accounts. By impersonating AOL employees in instant messengers as well as email conversations they could obtain free internet access or financially harm people using their credit card information. With the success of the world wide web including the movement of more financial services to the internet, criminals used another approach to trick users. By registering domains that look very much like a benign service and imitating the appearance of the corresponding benign website, many internet users unknowingly put their banking credentials into fake sites and suffered financial harm (also known as cybersquatting or domaine squatting). Those credentials may be sold on black markets e.g. in the dark web and can worth up to 5\% of the balance for online banking credentials according to the SecureWorks Counter Threat Unit \fsCite{rp-2016-underground-hacker-marketplace-report}.
@@ -29,4 +29,4 @@ To understand how botnets can be detected, mainly considering how botnets make u
\subsection{Fast-Flux service networks}
\label{subsec:fast-flux_service_networks}
Fast-Flux service network is a technique, for example often used to serve illegal web pages or in botnets (\fsCite{nazario2008net}), to hide the actual location of core components like command and control servers (C\&C servers is used as an example here). Using DNS round robin which helps e.g. legitimate services to reduce downtimes when a single node fails, command and control servers are hidden behind groups of bots which are acting as proxies and are accessible by a domain name. As botnets usually contain a large number of bots, these proxies can quickly be changed and leave no trace back to the actual C\&C server. To be able to quickly change the hiding proxy, the time to live of the domain names to those proxies has to be set to a low value. This is one characteristic that can be used to distinguish legitimate from malicious services. Domain-Flux networks are the successor of Fast-Flux service networks and in addition do use dynamic domains for the proxies. Domain-Flux networks use changing domain names for the proxies that hide the location of core components in the botnet. For this method to work properly, all bots in the botnet do have to know under which domain the C\&C server is reachable. To be able to communicate with the C\&C server, the bot first generates the currently valid domain name (e.g. based on time) and afterwards is able to send data through the proxy to the command and control server. Some examples for malware that uses DGAs are the Srizbi botnet, the Conficker worm and the GameOver Zeus botnet. One major difference of algorithmically generated domains in contrast to legitimate domains is that they usually contain more numbers and fewer/no human readable words.
Fast-Flux service network is a technique often used to serve illegal web pages or used in botnets to hide the actual location of core components like command and control servers (\fsCite{nazario2008net}). Using DNS round robin, which helps e.g. legitimate services to reduce downtimes when a single node fails, command and control servers are hidden behind groups of bots which are acting as proxies and are accessible by a domain name. As botnets usually contain a large number of bots, these proxies can quickly be changed and leave no trace back to the actual C\&C server. To be able to quickly change the hiding proxy, the time to live (TTL, see Section~\ref{sec:DNS}) of the domain names to those proxies has to be set to a low value. This is one characteristic that can be used to distinguish legitimate from malicious services. Domain-Flux networks are the successor of Fast-Flux service networks. Compared to Fast-Flux networks, Domain-Flux networks additionally use changing domain names for the proxies that hide the location of core components in the botnet. For this method to work properly, all bots in the botnet do have to know under which domain the C\&C server is reachable. To be able to communicate with the C\&C server, the bot first has to generates the currently valid domain name (e.g. based on time) and is then able to send data through the proxy to the command and control server. Some examples for malware that uses DGAs are the Srizbi botnet, the Conficker worm and the GameOver Zeus botnet. One major difference of algorithmically generated domains in contrast to legitimate domains is that they usually contain more numbers and fewer/no human readable words.

View File

@@ -1,6 +1,7 @@
\chapter{Conclusion}
\label{cha:conclusion}
All existing machine learning systems show a promising accuracy in detecting malicious domains with different feature sets. This shows that such system can effectively detect domains that are involved in a variety of malicious activities like, botnets, phishing and spam-campaigns. The three most popular systems that have been published, \textit{Notos}, \textit{Exposure} and \textit{Kopis} are however either hard to deploy and/or require a lot of manual work to get started and can generally be seen more like academic prototypes than mature products.
All evaluated machine learning systems show high detection rates and low false rates in detecting domains that are involved in malicious activities like, botnets, phishing and spam-campaigns with a variety of different features. The three most popular systems that have been proposed, \textit{Notos}, \textit{Exposure} and \textit{Kopis} are however either hard to deploy and/or require a lot of manual work to get started and can generally be seen more like research prototypes than mature products.
In this work, a dynamic reputation system (\textit{DoresA}) has been implemented by combining different aspects of the previously evaluated systems. Most aspects have been adopted from one system \textit{Exposure}, mostly due to its simplicity while maintaining similar detection rates and because the passive DNS data that has been available for this work showed most similarities to the data that was used in that system. To test \textit{DoresA}, a model with a total of 1 million data samples has been trained using a decision tree learning algorithm. The characteristics of the DNS resource usage, especially how often the TTL value for a domain is changed, has shown be useful to distinguish between malicious and benign domains. In the time of writing this thesis, no evaluation of the implemented algorithm could be finished. Future work can use this implementation and investigate the accuracy of this approach. Furthermore, built on top of this work, a monitoring system can be realized to proactively warn of requests to domains, involved in malicious activities. To the best of my knowledge, no system that can easily be deployed to networks exists, neither commercial or non-commercial. A dynamic domain reputation system could be run in addition to traditional malware detection software and shows advantages, especially in the discovery of unknown malware with a lightweight approach using a passive DNS database.
In the time of writing this thesis, no evaluation of the implemented algorithm could be finished. Future work can use this implementation and investigate the accuracy of this approach. Furthermore, built on top of this work, a monitoring system can be realized to proactively warn of requests to domains, involved in malicious activities. To the best of my knowledge, no system that can easily be deployed to networks exists, neither commercial or non-commercial.

View File

@@ -1,12 +1,12 @@
\chapter{Development of DoresA}
\label{cha:development_of_doresa}
The last part of this work the development of a dynamic domain reputation system, \textit{DoresA} (or Domain reputation scoring Algoriothm). A lot of concepts for this system will be adopted from the previously evaluated systems, most concepts will be taken from \textit{Exposure} with some general ideas of \textit{Notos} and \textit{Kopis}. In general, there are some limitations to be taken into account which arise mostly by the specific type of data that is available for this work and where it has been monitored. The passive DNS logs that have been provided for this work have been collected on three recursive DNS servers in a large company in locations in Europe, Asia and the United States. As those logs do contain sensitive data, raw logs used in this work can not be published mostly due to privacy reasons. It also has to be noted, that the DNS requests are not available for this work for the same reason.
The last part of this work describes the development of a dynamic domain reputation system, \textit{DoresA} (or Domain reputation scoring Algorithm). A lot of concepts for this system will be adopted from the previously evaluated systems, most concepts will be taken from \textit{Exposure} with some general ideas of \textit{Notos} and \textit{Kopis}. In general, there are some limitations to be taken into account which arise mostly by the specific type of data that is available for this work and where it has been monitored. The passive DNS logs that have been provided for this work have been collected on three recursive DNS servers in a large company network in locations in Europe, Asia and the United States. As those logs do contain sensitive data, raw logs used in this work can not be published mostly due to privacy reasons. It also has to be noted that the DNS requests are not available for this work for the same reason.
\section{Initial Situation and Goals}
\label{sec:initial_situation_and_goals}
Ultimately, this work should come up with an algorithm to find domains that are involved in malicious activities. Most of the latest approached work has been working with machine learning techniques to build domain reputation scoring algorithms. As those publications have generally shown promising results (see Section~\ref{cha:evaluation_of_existing_systems}), this work is also focusing on a dynamic approach with machine learning algorithms involved. The network, in which the logs for this work have been collected is different from most ISP or other public networks. There is a lot of effort made to keep the network malware-free. This includes both software solutions (like anti-virus software and firewalls) as well as a team that proactively and reactively monitors and removes malware. Another defensive task is to train the employees to be aware of current and upcoming threats (e.g., to pay attention on hyperlinks in emails, distrust public usb sticks and physical access guidelines). Although this should lead to a mostly malware free network with few requests to malicious domains, 2017 has shown to be the year of ransomware (see Section~\ref{sec:malware}). Private internet users and companies have been infected with malware that was encrypting their data and requiring the target to pay an amount of money to decrypt it. There are of course other ongoing threats that have existed for many years, like spam campaigns (\fsCite{TrendMicroOnline}). The particular task in this work is to discover whether a dynamic reputation system for domains is useful and applicable under this circumstances. The ultimate goal (not part of this work) is an automated warning system that triggers when a malicious domain is requested.
Ultimately, this work should come up with an algorithm to find domains that are involved in malicious activities. Most of the latest approached work has been working with machine learning techniques to build domain reputation scoring algorithms. As those publications have generally shown promising results (see Section~\ref{cha:evaluation_of_existing_systems}), this work is also focusing on a dynamic approach with machine learning algorithms involved. The network, in which logs used for analysis and the development of the new algorithm, have been collected, is different from most ISP or other public networks. There is a lot of effort made to keep the network malware-free. This includes both software solutions (like anti-virus software and firewalls) as well as a team that proactively and reactively monitors and removes malware. Another defensive task is to train the employees to be aware of current and upcoming threats (e.g., to pay attention on hyperlinks in emails, distrust public usb sticks and physical access guidelines). Although this should lead to a mostly malware free environment (in this particular and similarly hardened networks) with few requests to malicious domains, 2017 has shown to be the year of ransomware (see Section~\ref{sec:malware}). Companies as well as private internet users have been infected with malware that was encrypting their data and requiring the target to pay an amount of money to decrypt it. There are of course other ongoing threats that have existed for many years, like spam campaigns (\fsCite{TrendMicroOnline}). The task in this work is to discover whether a dynamic reputation system for domains is useful and applicable under these circumstances. The ultimate goal (not part of this work) is an automated warning system that triggers when a malicious domain is requested.
\section{System Architecture}
@@ -16,8 +16,8 @@ The overall system will take an similar approach which was first introduced by \
\begin{itemize}
\item \textit{Malware Prevention through Domain Blocking} list from malwaredomains.com which is an professionally maintained list with domains involved in malicious activities like the distribution of malware and spyware (\fsCite{malwaredomainsInformationOnline}).
\item \textit{Phishtank}: A list that targets domains that are engaged in spam activities (\fsCite{PhishtankInformationOnline}).
\item \textit{ZeuS Tracker}: Blocking list for domains and IP addresses involved in the ZeuS botnet as command and control (C\&C)servers.
\item \textit{Alexa} with a list of the most popular domains in various countries as well as a global overview (total of 2000 domains).
\item \textit{ZeuS Tracker}: Blocking list for domains and IP addresses involved in the ZeuS botnet as command and control (C\&C) servers.
\item \textit{Alexa} with a list of the most popular domains from a global perspective (total of 2000 domains).
\end{itemize}
\begin{figure}[!htbp]
@@ -28,63 +28,71 @@ The overall system will take an similar approach which was first introduced by \
\end{figure}
The malicious domains list from those three services consisted of 28367 individual entries when first collected. This information is later used to label benign and malicious domains in the training process. The \textit{Malicious/Benign Domains Collector} can be rerun at any time to keep up with known malicious and benign domains at a later stage and increase the accuracy of \textit{DoresA}. The second module, \textit{Data Aggregation Module} is collecting all passive DNS logs and persisting those. The \textit{Data Aggregation Module} is also responsible for extracting and persisting all feature values that are needed in the training step and such consumed by the \textit{Training Module}. This \textit{Training Module}'s primary concern is to learn a model that holds information about resource usage of certain DNS responses as well as labeling those data samples. Due to the limitation of available time, the training period has been reduced to three days (starting from the first of september in 2017) and for simplicity has been reduced to 1 million samples (which have been chosen randomly over the three days). The training model thus consisted of a total of 1 million DNS responses and included resolutions for \textit{how many individual domains} individual domains. The accuracy of this model can be also be increased by retraining the model e.v. once a day or week to keep up with new characteristics of malicious usage. This training model can then be used in the last module, the \textit{Classification Module}, to classify resolutions (feature vector) of unlabeled domains. The \textit{Classification Module} could e.g. be used to act as a real-time warning system when deployed in a network.
The malicious domains list from those three services consisted of 28367 individual entries when first collected. This information is later used to label benign and malicious domains in the training process. The \textit{Malicious/Benign Domains Collector} can be rerun at any time to keep up with known malicious and benign domains at a later stage and thus increase the accuracy of \textit{DoresA}. The second module, \textit{Data Aggregation Module} is collecting all passive DNS logs and persisting those. The \textit{Data Aggregation Module} is also responsible for extracting and persisting all feature values that are needed in the training step and such consumed by the \textit{Training Module}. This \textit{Training Module}'s primary concern is to learn a model that holds information about resource usage of certain DNS responses as well as labeling those data samples. Due to the limitation of available time, the training period has been reduced to three days (starting from the first of september in 2017) and for simplicity has been reduced to 1 million samples (which have been chosen randomly over the three days). The accuracy of this model can be also be increased by retraining the model e.g. once a week to keep up with new characteristics of malicious usage. This training model can then be used in the last module, the \textit{Classification Module}, to classify resolutions (feature vector) of unlabeled domains. The \textit{Classification Module} could e.g. be used to act as a real-time warning system when deployed on a resolver.
The logs that are provided have been collected in different locations all over the world and are aggregated on a single machine as csv files. As operating on the raw csv logs in the training step has shown to be very inefficient (with roughly one week of training time for one day), especially when performing multiple analysis cycles, a different solution for accessing the logs had to be found. Experimenting with putting the raw passive DNS logs into a NoSQL database (MongoDB \fsCite{MongoDBOnline}) as well as a relational database (MariaDB \fsCite{MariaDBOnline}) did not show a significant decrease in accessing the data so a slightly different approach has been used. By using an in-memory database (redis \fsCite{RedisOnline}) and only keeping those information, that are needed for the analysis has shown to give much better results, e.g. one day for the training of 1 million samples. It has to be stated though that while retaining most of the needed information, information like the timestamp of individual requests could not be kept. The following attributes are stored inside the redis instance.
The logs that are provided have been collected in different locations all over the world and are aggregated on a single machine as csv files. As operating on the raw csv logs in the training step has shown to be very inefficient (with roughly one week of training time for one day), especially when performing multiple analysis cycles, a different solution for accessing the logs had to be found. Experimenting with putting the raw passive DNS logs into a NoSQL database (MongoDB \fsCite{MongoDBOnline}) as well as a relational database (MariaDB \fsCite{MariaDBOnline}) did not show a significant decrease in accessing the data so a slightly different approach has been used. By using an in-memory database (Redis \fsCite{RedisOnline}) and only keeping information that are needed for the analysis has shown to give much better results: for a training set with 1 million samples, the execution time could be reduced to two day. It has to be stated though that while retaining most of the needed information, aspects like the timestamp of individual requests could not be kept. As the time patterns of single requests could not be used for the classification anyway, due to caching in lower hierarchies, this has not shown to be a problem. The following attributes are stored inside the Redis instance.
\begin{itemize}
\item \textbf{Resource record}, i.e. the domain name in this scope
\item The \textbf{type of the resource record}, DoresA does only take A records into account as most features can not be extracted from other types. See Section~\ref{subsubsec:dns_resource_records} for an explanation of the DNS resource record types..
\item All \textbf{TTL} values that this domain has had in the analysis period.
\item \textbf{Resource record}, i.e. the domain name in this scope.
\item The \textbf{type} of the resource record, DoresA does only take A records into account as most features can not be extracted from other types. See Section~\ref{subsubsec:dns_resource_records} for an explanation of the DNS resource record types.
\item All \textbf{TTL} values that this domain has been assigned to in the analysis period.
\item \textbf{Resolution}: The IP addresses, that the record type resolved to.
\item \textbf{First/last-seen}: Timestamps of when the domain has been seen for the first and last time.
\item Additionally, all \textbf{reverse DNS} results are persisted, e.g. to find all historic domains that resolved to a known IP address.
\end{itemize}
Using an in-memory database for this application led to a different challenge. Even though trimmed down to the minimum set of information, the data. For this reason, a machine with an appropriate amount of internal RAM had to be used. In this case, a total of 512 Gigabyte of RAM was available with an Intel Xeon CPU with 32 cores.
Using an in-memory database for this application led to a different challenge. Even though trimmed down to the minimum set of information, the Redis database used 3354 Megabyte of memory for traffic of one week. For this reason, a machine with an appropriate amount of internal RAM had to be used. In this case, a total of 512 Gigabyte of RAM was available with an Intel Xeon CPU with 32 cores to be able to perform analysis in a reasonable time. As training on a single core with those amounts of data was not feasible in the available time, a multi core processing approach has been targeted.
\subsection{Decision Tree Classifier}
\label{subsec:decision_tree_classifier}
While evaluating previous work, mainly two classification algorithms have shown to provide good results in this area. A decision tree classifier has some advantages over different other classification systems: the training time is comparably low, especially in contrast to neural networks. It delivers quite easily interpretable results when plotting the resulting decision tree, it requires little data preparation (e.g. no normalization of the input is needed like in many other algorithms and can handle both numerical and categorical inputs) and it is possible to validate the results of the training using techniques like cross validation. In this work, the implementation of the python library scikit-learn is used. The current implementation of the scikit-learn algorithm is called \textit{CART} (Classification and Regression Trees) and is based on the C4.5 decision tree implementation that is also used in \textit{Exposure}. For a detailed comparison of classification algorithms see \fsCite{Lim2000}.
While evaluating previous work, two classification algorithms have shown to provide good results in this area. While using a random forest implementation was giving good results in the case of \textit{Kopis}, decision tree classifiers has one major advantage over a random forest implementation. Performance has shown to be a major challenge in this work and as random forest is basically an implementation consisting of multiple (arbitrary sized) decision trees, the runtime of the training time increases by the factor of trees the random forest is generating. As \textit{Exposure} and \textit{Notos} have proved to achieve good results with a decision tree, this classification algorithm will also be used in this work. Decision tree classification further features: easily interpretable results when plotting the resulting tree, it requires little data preparation (e.g. no normalization of the input is needed like in many other algorithms and can handle both numerical and categorical inputs) and it is possible to validate the results of the training using techniques like cross validation. In this work, the implementation of the python library scikit-learn is used. The current implementation of the scikit-learn algorithm is called \textit{CART} (Classification and Regression Trees) and is based on the C4.5 decision tree implementation that is also used in \textit{Exposure}. For a detailed comparison of classification algorithms see \fsCite{Lim2000}.
\section{Feature Selection}
\label{sec:feature_selection}
The feature selection is primarily motivated by the results of the evaluation of previously proposed systems. As \textit{Exposure} has shown to be the system that shares most similarities compared to the network and traffic that is available, also most features are adopted from \textit{Exposure} in the first place. Due to the restricted analysis time, the \textit{Time-Based Features} can unfortunately not be used in this work. To recapture, at least one week of traffic has to be trained to benefit from those features. Besides from that, nearly all features of \textit{Exposure} could be used for the training. See Table~\ref{tab:doresa_features} for all features that are used to model the resource usage characteristics of domains, used in legitimate and malicious activities. For a detailed explanation of why these features have been included, see Section~\ref{subsec:exposure_features}.
The feature selection is primarily motivated by the results of the evaluation of previously proposed systems. As \textit{Exposure} has shown to be the system that shares most similarities, mostly regarding the traffic that is available, most features are also adopted from \textit{Exposure} in the first place. Due to the restricted analysis time, the \textit{Time-Based Features} can unfortunately not be used in this work. To recapture, at least one week of traffic has to be trained to benefit from those features. Besides from that, nearly all features of \textit{Exposure} could be used for the training. See Table~\ref{tab:doresa_features} for all features that are used to model the resource usage characteristics of domains, used in legitimate and malicious activities. For a detailed explanation of why these features have been included, see Section~\ref{subsec:exposure_features}. This sums up to a total of nine different features with some features having multiple feature values in the feature vector: The \textit{Reverse DNS query results} contain the ratio of IPs that can not be resolved (NX domains), the number of all resolved IPs for a domain, the ratio of ip addresses that are known to be used as digital subscriber lines (DSL), the ratio of IPs that are used for web hosting, the ratio of IPs that are used by internet service providers (ISPs) and the ratio of IPs that can be matched with a valid domain name. Please not that a software that would have been used to generate these features could not be shipped in time and the NX domains have not yet been available in the database so these features are ignored in the sample training. The percentage usage of specific TTL ranges includes the following individual features (in seconds): [0, 1], [1, 10], [10, 100], [100, 300], [300, 900], [900, inf].
\begin{table}[!htbp]
\centering
\caption{Doresa: Features}
\label{tab:doresa_features}
\begin{tabularx}{\textwidth}{|l|X|}
\begin{tabularx}{\textwidth}{|l|X|l|}
\hline
\textbf{Feature Set} & \textbf{Feature Name} \\ \hline
\multirow{4}{*}{\textit{DNS Answer-Based Features}} & Number of distinct IP addresses \\ \cline{2-2}
& Number of distinct countries \\ \cline{2-2}
& Number of domains share the IP with \\ \cline{2-2}
& Reverse DNS query results \\ \hline
\multirow{5}{*}{\textit{TTL Value-Based Features}} & Average TTL \\ \cline{2-2}
& Standard Deviation of TTL \\ \cline{2-2}
& Number of distinct TTL values \\ \cline{2-2}
& Number of TTL change \\ \cline{2-2}
& Percentage usage of specific TTL ranges \\ \hline
\multirow{2}{*}{\textit{Domain Name-Based Features}} & \% of numerical characters \\ \cline{2-2}
& \% of the length of the LMS \\ \hline
\end{tabularx}
\end{table}
\textbf{Feature Set} & \textbf{Feature Name} & \textbf{\# in Vector} \\ \hline
\multirow{4}{*}{\textit{DNS Answer-Based Features}} & Number of distinct IP addresses & \#1 \\ \cline{2-3}
& Number of distinct countries & \#2 \\ \cline{2-3}
& Number of domains share the IP with & \#3 \\ \cline{2-3}
& Reverse DNS query results & \#4 - \#8 \\ \hline
\multirow{5}{*}{\textit{TTL Value-Based Features}} & Average TTL & \#9 \\ \cline{2-3}
& Standard Deviation of TTL & \#10 \\ \cline{2-3}
& Number of distinct TTL values & \#11 \\ \cline{2-3}
& Number of TTL change & \#12 \\ \cline{2-3}
& Percentage usage of specific TTL ranges & \#13 - \#17 \\ \hline
\multirow{2}{*}{Domain Name-Based Features} & \% of numerical characters & \#18 \\ \cline{2-3}
& \% of the length of the LMS & \#19 \\ \hline
\end{tabularx}
\end{table}
\section{Implementation}
\label{sec:implementation}
The implementation of \textit{DoresA} does include several different pieces of software. The main part is implemented in python and consists of the \textit{Training Module} and the \textit{Classification Module}. Apart from the main application, the \textit{Malicious/Benign Domains Collector} is a collection of bash scripts to fetch the filter lists and combine them into lists that can easily be consumed by the main application. The \textit{Data Aggregation Module} is written in C (\fsCite{kernighan2006c}), mostly for performance reasons as these logs are aggregated in real time and fed into the redis database. Most of the \textit{Data Aggregation Module} implementation has been available for this work but had to be extended to also persist all TTL changes for a domain.
The implementation of \textit{DoresA} does include several different pieces of software. The main part is implemented in python and consists of the \textit{Training Module} and the \textit{Classification Module}. Apart from the main application, the \textit{Malicious/Benign Domains Collector} is a collection of bash scripts to fetch the filter lists and combine them into lists that can easily be consumed by the main application. The \textit{Data Aggregation Module} is written in C (\fsCite{kernighan2006c}), mostly for performance reasons as these logs are aggregated in real time and fed into the Redis database. For the \textit{Data Aggregation Module}, a previously available implementation could be extended to also persist all TTL changes for a domain. To further decrease training time, the Redis database actually consists of nine different instances which can be accessed in parallel. To actually benefit from multiple instances, the domain (acting as a key) has been hashed and the modulo operation has been used to evenly fill the instances.
The main application is mainly working in two modes. In the training mode, all entries are first loaded from the raw csv logs for the given period. The next step extracts and calculates the values that are needed for each feature and uses the filter lists, gathered by the \textit{Malicious/Benign Domains Collector} to label the dataset. After this, the feature values along with the label is persisted as serialized python objects. This persistence step is on the one side needed to do the final step of training but can also be useful if for some reason, the training is crashing or stopped, it can be continued and picked up where the previous training left off. The last step is using the preprocessed features and the corresponding labels to build the decision model, i.e. generate the decision tree. The training can mostly (apart from the last step) be done in parallel to get a reasonable training time the implementation in this work has efficiently been executed on 32 cores and took roughly two days for training a dataset with 1 million samples. In the second mode, the \textit{Classification Module} classifies a dataset as either being benign or malicious. While the evaluated systems do have a variable reputation score from zero to one, this system does a binary classification for the dataset in the first place. This could be changed to a variable reputation score, e.g. using the probability for each class that can also be retrieved by the scikit-learn decision tree implementation.
The main application is working in two modes. In the training mode, all entries are first loaded from the raw csv logs for the given period. The next step extracts and calculates the values that are needed for each feature and uses the filter lists, gathered by the \textit{Malicious/Benign Domains Collector} to label the dataset. After this, the feature values along with the label are persisted. The last step is using the preprocessed features and the corresponding labels to build the decision model, i.e. generate the decision tree. The training can mostly (apart from the last step) be done in parallel to get a reasonable training time the implementation in this work has efficiently been executed on 32 cores and took roughly two days for training a dataset with 1 million samples. In the second mode, the \textit{Classification Module} classifies a dataset as either being benign or malicious. While the evaluated systems do have a variable reputation score from zero to one, this system does a binary classification for the dataset in the first place. This could be changed to a variable reputation score, e.g. using the probability for each class that can also be retrieved by the scikit-learn decision tree implementation. A variable score has one major advantage over a binary classification. Operators can set a threshold to make sure that no false positives occur, for example in an automated blocking system.
Figure~\ref{fig:doresa_selection_decision_tree} shows an excerpt of the resulting decision tree from the test training with 1 million data samples. \todo{describe what is seen on the decision tree excerpt}
Figure~\ref{fig:doresa_selection_decision_tree} shows an excerpt of the resulting decision tree from the test training with 1 million data samples. Looking at the root node (see Figure~\ref{fig:doresa_selection_decision_tree_root}) we can see that the overall model consists of 1 million samples and the first row shows that feature twelve (the number of TTL changes) \(X[11]\) has the most information gain to split the initial dataset. The second column shows the equality distribution, where zero represents an equal distribution and one an completely unequal distribution (all samples in one class). Considering any leaf in Figure~\ref{fig:doresa_selection_decision_tree} we can see the amount of samples that belongs to a class and the resulting class/label (zero represents a benign, one a malicious domain).
\begin{figure}[!htbp]
\centering
\includegraphics[width=.8\textwidth, clip=true]{content/Development_of_DoresA/doresa_example_tree_root.png}
\caption{DoresA: root node of resulting decision tree}
\label{fig:doresa_selection_decision_tree_root}
\end{figure}
\begin{figure}[!htbp]
\centering

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -6,7 +6,7 @@ This chapter deals with work around domain reputation scoring systems that has b
\section{Evaluation Schema}
\label{sec:evaluation_scheme}
For a comprehensive evaluation, all input and output as well as the exact implementations (and/or the corresponding parameters that have been used for the analysis) of the algorithm was needed. Unfortunately, none of the publications we are dealing with here have released any (raw) input data, specifically the passive DNS logs and the filter lists for the training set. Neither has any of the algorithm's actual implementation been published. For this reason the evaluation of the existing systems is focusing on the results that have individually been published. Most importantly the detection rate as well as the false positive rate. Another important fact for this overview is what data has actually been used for the training and classification and where the data has been obtained. Passive DNS logs may be collected in different stages of the DNS resolution and might, e.g. due to caching, lead to the extraction of different information. A resolver running on the users machine might obtain much more traffic and thus benefit from e.g. time based patterns which are not possible at higher level DNS servers that are not able to collect that traffic because the response has been cached on resolvers in a lower (DNS-) hierarchy.
For a comprehensive evaluation, all input and output as well as the exact implementations (and/or the corresponding parameters that have been used for the analysis) of the algorithm was needed. Unfortunately, none of the publications we are dealing with here have released any (raw) input data, specifically the passive DNS logs and the filter lists for the training set. Neither has any of the algorithm's actual implementation been published. For this reason the evaluation of the existing systems is focusing on the results that have individually been published. Most importantly, the detection rate as well as the false positive rate. Another important fact for this overview is what data has actually been used for the training and classification and where the data has been obtained. Passive DNS logs may be collected in different stages of the DNS resolution and might, e.g. due to caching in a lower DNS hierarchy, lead to the extraction of different information. A resolver running on the users machine might obtain much more traffic and thus benefit from e.g. time based patterns which are not possible at higher level DNS servers that are not able to collect that traffic because the response has been cached on resolvers in a lower (DNS-) hierarchy.
\input{content/Evaluation_of_existing_Systems/Notos/Notos.tex}
@@ -19,6 +19,6 @@ For a comprehensive evaluation, all input and output as well as the exact implem
\section{Results and Comparison}
\label{sec:results_and_comparison}
After investigating those three systems, we want to demonstrate the major differences and similarities. The results discussed here are the base for the implementation of the own algorithm. All three systems are based on machine learning techniques. Two of the systems use a decision tree classifier and \textit{Kopis} uses a random forest classifier which is not significantly different from a decision tree but has some advantages in some areas (see a detailed comparision on Section~\ref{sec:model_selection}) One major difference of these systems is the data they are working with. While \textit{Notos} and \textit{Exposure} are operated with data collected at recursive DNS servers in lower DNS layers, \textit{Exposure} is gathering traffic from a top level domain name server and two AuthNS from major domain name registrars. As the data available for this work has also been gathered at RDNS servers in a lower DNS hierarchy and no data from higher DNS layers is available, most concepts of \textit{Kopis} can not be used for the system that is proposed in this work. Nevertheless there are general aspects of \textit{Kopis} that can be useful, e.g. which sources have been used to build the knowledge base for the classification of test samples in the training or how the overall architecture has been designed. It also has to be noted though, that \textit{Kopis} is the only system that is able to operate without having reputation information for domains and IPs available. Having data available that is collected similarly to \textit{Notos} and \textit{Exposure} does not mean that all concepts and features can be applied in the new system. A company network has much different characteristics then a network operated by e.g. an ISP. The network, in which the logs for this work has been collected in, is hardened with much more effort so that malware should generally be rarely found. Especially \textit{Notos} uses public traffic from an ISP RDNS server that is handling clients of this ISP network which, by design, can not be taken care of like in a closed company network and is much more likely to contain a lot of different malware. One major difference between \textit{Notos} and \textit{Exposure} is the complexity of the overall system. \textit{Notos}, being the first dynamic domain reputation system, has a much higher amount of features that are used. Some of these features, like the Network-based features (see Table~\ref{tab:notos_network-based_features}) are much more fine grain (e.g. independently operating on the top level, second level and third level domains) compared to the similar group of features in \textit{Exposure} (see Table~\ref{tab:exposure_features}, \textit{DNS Answer-Based Features}). For this reason, \textit{Notos} does also need much more detailed reputation information, e.g. for the IP spaces. Although not having such fine grain features, \textit{Exposure} shows similar detection rates like \textit{Notos}. Another general advantages of \textit{Exposure} over \textit{Notos} is the reduced training time (again for example due to fewer features) and that it does not need information about malware that has been gathered in self hosted honeypots (which in fact, done right is a completely different topic on its own and therefore not part of this work).
After investigating those three systems, we want to demonstrate the major differences and similarities. The results discussed here are the base for the implementation of the own algorithm. All three systems are based on machine learning techniques. Two of the systems use a decision tree classifier and \textit{Kopis} uses a random forest classifier which is not significantly different from a decision tree but requires more effort to be implemented. One major difference of these systems is the data they are working with. While \textit{Notos} and \textit{Exposure} are operated with data collected at recursive DNS servers in lower DNS layers, \textit{Kopis} is gathering traffic from a top level domain name server and two AuthNS from major domain name registrars. As the data available for this work has also been gathered at RDNS servers in a lower DNS hierarchy and no data from higher DNS layers is available, most concepts of \textit{Kopis} can not be used for the system that is proposed in this work. Nevertheless, there are general aspects of \textit{Kopis} that can be useful, e.g. which sources have been used to build the knowledge base for the classification of test samples in the training or how the overall architecture has been designed. It also has to be noted though, that \textit{Kopis} is the only system that is able to operate without having reputation information for domains and IPs available. Having data available that is collected similarly to \textit{Notos} and \textit{Exposure} does not mean that all concepts and features can be applied in the new system. A company network has much different characteristics than a network operated by e.g. an ISP. The network, in which the logs for this work has been collected in, is hardened with much more effort so that malware should generally be rarely found. Especially \textit{Notos} uses public traffic from an ISP RDNS server that is handling clients of this ISP network which, by design, can not be taken care of like in a closed company network and is much more likely to contain a lot of different malware. One major difference between \textit{Notos} and \textit{Exposure} is the complexity of the overall system. \textit{Notos}, being the first dynamic domain reputation system, has a much higher amount of features that are used. Some of these features, like the Network-based features (see Table~\ref{tab:notos_network-based_features}) are much more fine grain (e.g. independently operating on the top level, second level and third level domains) compared to the similar group of features in \textit{Exposure} (see Table~\ref{tab:exposure_features}, \textit{DNS Answer-Based Features}). For this reason, \textit{Notos} does also need much more detailed reputation information, e.g. for the IP spaces. Although not having such fine grain features, \textit{Exposure} shows similar detection rates like \textit{Notos}. Another general advantages of \textit{Exposure} over \textit{Notos} is the reduced training time (again for example due to fewer features) and that it does not need information about malware that has been gathered in self hosted honeypots (which in fact, done right is a completely different topic on its own and therefore not part of this work).
It also has to be noted, that while all three systems show a high detection rate in general with a high true positive and low false positive rate, they can not be operated with a 100\% success rate and should always be deployed along with other detection systems like firewalls, malware detection software and/or traditional filter systems like DNS black- and whitelists. Dynamic reputation system can however be used to find domains used in malicious activities before other systems are aware of the threat.

View File

@@ -4,19 +4,19 @@
\subsection{General}
\label{subsec:exposure_general}
\textit{Exposure} is ``a system that employs large-scale, passive DNS analysis techniques to detect domains that are involved in malicious activity''\fsCite{Bilge11exposure:finding}, which was first introduced in 2011 by the \textit{Institute Eurecom} in Sophia Antipolis, the \textit{Northeastern University} from Boston and the \textit{University of California} in Santa Barbara. \textit{Exposure} is the second published system to detect malicious domains using passive DNS data and is built on the key premise, that most malicious services are dependent on the domain name system and compared to benign services should expose enough differences in behaviour for an automated discovery, see Section~\ref{subsec:exposure_features} for what differences the features are targeted at. The main analysis for \textit{Exposure} has been run on data of a period of 2.5 month with more than 100 billion DNS queries. \textit{Exposure} is not targeted at a specific threat but rather covers a wide variety of malicious activities like phishing, Fast-Flux services, spamming, botnets (using domain generation algorithms), and similar others. It uses fifteen features, with nine features, that have not been proposed in previous research. Ultimately, \textit{Exposure} offers a real-time detection system which has been made available to the public in 2014 \fsCite{Bilge:2014:EPD:2617317.2584679}. Unfortunately, the service was not accessible at the time of this writing.
\textit{Exposure} is ``a system that employs large-scale, passive DNS analysis techniques to detect domains that are involved in malicious activity''\fsCite{Bilge11exposure:finding}, which was first introduced in 2011 by the \textit{Institute Eurecom} in Sophia Antipolis, the \textit{Northeastern University} from Boston and the \textit{University of California} in Santa Barbara. \textit{Exposure} is the second published system to detect malicious domains using passive DNS data and is built on the key premise, that most malicious services are dependent on the Domain Name System and compared to benign services should expose enough differences in behaviour for an automated discovery, see Section~\ref{subsec:exposure_features} for what differences the features are targeted at. The main analysis for \textit{Exposure} has been run on data of a period of 2.5 month with more than 100 billion DNS queries. \textit{Exposure} is not targeted at a specific threat but rather covers a wide variety of malicious activities like phishing, Fast-Flux services, spamming, botnets (using domain generation algorithms), and similar others. It uses fifteen features, with nine features, that have not been proposed in previous research. Ultimately, \textit{Exposure} offers a real-time detection system which has been made available to the public in 2014 \fsCite{Bilge:2014:EPD:2617317.2584679}. Unfortunately, the service was not accessible at the time of this writing.
\subsection{Architecture}
\label{subsec:exposure_architecture}
For the distinction of benign and malicious domains to perform well, a large set of training data is used in \textit{Exposure} (seven days). The offline training has been powered by recursive DNS traffic (RDNS), gathered from the Security Information Exchange (SIE). Specifically, only the answer of the RDNS traffic has been used, that comprises of: the queried domain name, timestamp of the request, caching time TTL and the list of resolved IP addresses. The overall system consists of five main components. How those modules are interacting with each other and which input data is required for each module can be seen in Figure~\ref{fig:exposure_system_overview}.
For the distinction of benign and malicious domains to perform well, a large set of training data is used in \textit{Exposure} (seven days). The offline training has been powered by recursive DNS traffic (RDNS), gathered from the Security Information Exchange (SIE). Specifically, only the answers of the RDNS traffic have been used and comprises of: the queried domain name, timestamp of the request, caching time TTL and the list of resolved IP addresses. The overall system consists of five main components. How those modules are interacting with each other and which input data is required for each module can be seen in Figure~\ref{fig:exposure_system_overview}.
\begin{itemize}
\item The \textit{Data Collector} module passively captures the DNS traffic in the monitored network.
\item The \textit{Feature Attribution} component is attributing the captured domains with a vector containing the associated features.
\item The third component \textit{Malicious and Benign Domains Collector} is running in parallel to the first two modules and constantly gathers information about known good and known bad domains. These lists are used to label the output of the \textit{Feature Attribution} module afterwards, as it can be seen in picture~\ref{fig:exposure_system_overview}. The list of benign domains is extracted from the Alexa top list \fsCite{AlexaWebInformationOnline} and externally confirmed \gls{whois} data. The list of known malicious domains is collected from several external, both professionally provisioned and user maintained, sources and includes domains in different threat classes, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline}, Phishtank \fsCite{PhishtankInformationOnline}, Anubis (no longer available), the Zeus Block List \fsCite{zeusblocklistInformationOnline} and domains from DGAs for Conficker \fsCite{porras2009foray} and Mebroot \fsCite{Stone-Gross:2009:YBM:1653662.1653738}.
\item The labeled dataset is then fed into the \textit{Learning Module} and trains the domain detection model that is used in the final step. This classifier may also be retrained on a regular basis to keep up with malicious behavior (daily in \textit{Exposure}).
\item The third component \textit{Malicious and Benign Domains Collector} is running in parallel to the first two modules and constantly gathers information about known good and known bad domains. These lists are used to label the output of the \textit{Feature Attribution} module afterwards, as it can be seen in Figure~\ref{fig:exposure_system_overview}. The list of benign domains is extracted from the Alexa top list \fsCite{AlexaWebInformationOnline} and externally confirmed \gls{whois} data. The list of known malicious domains is collected from several external, both professionally provisioned and user maintained, sources and includes domains in different threat classes, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline}, Phishtank \fsCite{PhishtankInformationOnline}, Anubis (no longer available), the Zeus Block List \fsCite{zeusblocklistInformationOnline} and domains from DGAs for Conficker \fsCite{porras2009foray} and Mebroot \fsCite{Stone-Gross:2009:YBM:1653662.1653738}.
\item The labeled dataset is then fed into the \textit{Learning Module} and trains the domain detection model that is used in the final step. This classifier may also be retrained on a regular basis to keep up with malicious behavior (daily in \textit{Exposure}).
\item The \textit{Classifier} uses the decision model to classify unlabeled (new) domains into benign and malicious groups. For this, the same feature vector that is produced by the \textit{Feature Attribution} module is used.
\end{itemize}
@@ -35,11 +35,11 @@ For the distinction of benign and malicious domains to perform well, a large set
\textit{Exposure} uses a total of fifteen features that have been chosen after several month of study with thousands of well-known benign and malicious domains. These features are grouped into four different categories which can be seen in Table~\ref{tab:exposure_features}.
The first group, \textit{Time-Based Features} has not been approached in publications before. These features investigate the time, at which the request with domain \textit{d} has been issued. The main idea behind this group of features is to find malicious services that use techniques like \textit{domain flux}
(see Section~\ref{subsec:fast-flux_service_networks}) to circumvent take downs and make their infrastructure more agile. ``[\textit{Domain flux}] often show a sudden increase followed by a sudden decrease in the number of requests'' \fsCite[Section 3.1]{Bilge:2014:EPD:2617317.2584679}. Domains of malicious services using a DGA do only exist for a short period of time by design. \fsAuthor{Bilge:2014:EPD:2617317.2584679} defines the first feature as follows: ``A domain is defined to be a short-lived domain [...] if it is queried only between time \(t_0\) and \(t_1\), and if this duration is comparably short (e.g., less than several days).'' The next three features are subject to the change point detection (CPD) problem: Change point detection is about the identification of (abrupt) changes in the distribution of values, for example in time series. \textit{Exposure} implemented a CPD algorithm based on the popular CUSUM (cumulative sum) algorithm. At first, the time series of request timestamps is split into periods of 3600 seconds (one hour was tested to work well). After that, all time intervals are iterated and for each interval, the average request count of the previous eight hours \(P_t^-\) and following eight intervals \(P_t^+\) is calculated. In the next step, the distance of these two values is calculated \(d(t)=|P_t^--P_t^+|\) for each interval and the resulting ordered sequence \(d(t)\) of distances is fed to the CUSUM algorithm to finally retrieve all change points (For more information on the implemented CPD algorithm, see \fsCite[Section 3.1]{Bilge:2014:EPD:2617317.2584679}). To calculate the \textit{Daily similarity} features, the Euclidean Distance of the time series of each day for \textit{d} is calculated. Intuitively, a low distance denotes similar time series and thus high daily similarity whereas two days with higher distance do show a less similar request volume. All the features of this group naturally only perform well when having a larger number of requests to \textit{d} over a significant period of time.
(see Section~\ref{subsec:fast-flux_service_networks}) to circumvent take downs and make their infrastructure more agile. \fsAuthor{Bilge:2014:EPD:2617317.2584679} infer that ``[\textit{Domain flux}] often show a sudden increase followed by a sudden decrease in the number of requests''. Domains of malicious services using a DGA do only exist for a short period of time by design. \fsAuthor{Bilge:2014:EPD:2617317.2584679} defines the first feature as follows: ``A domain is defined to be a short-lived domain [...] if it is queried only between time \(t_0\) and \(t_1\), and if this duration is comparably short (e.g., less than several days).'' The next three features are subject to the change point detection (CPD) problem: Change point detection is about the identification of (abrupt) changes in the distribution of values, for example in time series. \textit{Exposure} implemented a CPD algorithm based on the popular CUSUM (cumulative sum) algorithm. At first, the time series of request timestamps is split into periods of 3600 seconds (one hour was tested to work well). After that, all time intervals are iterated and for each interval, the average request count of the previous eight hours \(P_t^-\) and following eight intervals \(P_t^+\) is calculated. In the next step, the distance of these two values is calculated for each interval \(d(t)=|P_t^--P_t^+|\) and the resulting ordered sequence \(d(t)\) of distances is fed to the CUSUM algorithm to finally retrieve all change points (for more information on the implemented CPD algorithm, see \fsCite[Section 3.1]{Bilge:2014:EPD:2617317.2584679}). To calculate the \textit{Daily similarity} features, the Euclidean Distance of the time series of each day for \textit{d} is calculated. Intuitively, a low distance denotes similar time series and thus high daily similarity whereas two days with higher distance do show a less similar request volume. All the features of this group naturally only perform well when having a larger number of requests to \textit{d} over a significant period of time.
The next group of Features (\textit{DNS Answer-Based Features}) investigates resolutions of the requested domain \textit{d}. While one domain can map to multiple IP addresses for benign services, most harmless services show a much smaller network profile in terms of e.g. location and \glspl{as}. To benefit from those findings, four features have been extracted: The number of distinct IP addresses, the amount of different countries these IP addresses are assigned to, the number of other domains that share an IP address \textit{d} resolves to and the fourth feature is the amount of results of the reverse dns query for all IPs of \textit{d}. It is worth noting, that some hosting providers also use one IP address for many domains and an extra layer helps preventing those false positives.
The next group of Features (\textit{DNS Answer-Based Features}) investigates resolutions of the requested domain \textit{d}. While one domain can map to multiple IP addresses for benign services, most harmless services show a much smaller network profile in terms of e.g. the location or the distribution of \glspl{as}/\glspl{bgp}. To benefit from those findings, four features have been extracted: The number of distinct IP addresses, the amount of different countries these IP addresses are assigned to, the number of other domains that share an IP address (\textit{d} resolves to) and the fourth feature is the amount of results of the reverse DNS query for all IPs of \textit{d}. It is worth noting, that some hosting providers also use one IP address for many domains but in conjunction with other features those false positives can be reduced.
The \textit{TTL Value-Based Features} cover five individual features. Each answer for a DNS request contains the TTL attribute, which is the recommendation, configured by the operator of \textit{d}, of how long the resolution will be valid and should be cached for this reason. Whereas RFC 1033 recommends a TTL of one day (86400 seconds) \fsCite{RFC1033}, it is getting more common, especially for content delivery networks to use much lower values (e.g. Cloudflare, one of the biggest managed DNS providers is using a default of 5 minutes). Botnets are also usually applying low TTL values to avoid long outages of C\&C servers and bots. As \fsAuthor{Bilge:2014:EPD:2617317.2584679} states, botnets also change their TTL values more frequently and use values in different ranges depending on their availability. While applying a higher value to high bandwidth servers with low downtimes, home computers behind a digital subscriber line are much likely to fail and get lower TTL values. For this reason, all TTL values for a domain are checked against the following range (in seconds): [0, 1], [1, 10], [10, 100], [100, 300], [300, 900], [900, inf].
The \textit{TTL Value-Based Features} cover five individual features. Each answer for a DNS request contains the TTL attribute, which is the recommendation (configured by the operator of \textit{d}) of how long the resolution will be valid and should be cached for this reason. Whereas RFC 1033 recommends a TTL of one day (86400 seconds) \fsCite{RFC1033}, it is getting more common, especially for content delivery networks to use much lower values (e.g. Cloudflare, one of the biggest managed DNS providers is using a default of 5 minutes). Botnets are also usually applying low TTL values to avoid long outages of C\&C servers and bots. As \fsAuthor{Bilge:2014:EPD:2617317.2584679} states, botnets also change their TTL values more frequently and use values in different ranges depending on their availability. While applying a higher value to high bandwidth servers with low downtimes, home computers behind a digital subscriber line are much likely to get offline and therefore are assigned lower TTL values. For this reason, all TTL values for a domain are checked against the following ranges (in seconds): [0, 1], [1, 10], [10, 100], [100, 300], [300, 900], [900, inf].
The last group of features are the \textit{Domain Name-Based Features}. Domain names of benign services mostly use easy to remember names which consist of valid words. Attackers often are not interested in human readable domain names. This is especially true for domains generated by a DGA. \textit{Exposure} extracts two statistical features out of the domain name, the first being the percentage of numerical characters and secondly the length of the longest (english) meaningful string (LMS).

View File

@@ -24,21 +24,21 @@ The last evaluated System is called \textit{Kopis} and has been proposed in 2011
\label{fig:kopis_system_overview}
\end{figure}
The overall system architecture can be seen in Figure~\ref{fig:kopis_system_overview}. The first step in the reputation system is to gather all (streamed) DNS queries and responses and divide this traffic into fixed epochs (e.g. one day in \textit{Kopis}). After collecting the traffic of each epoch \(E_i\), different statistics about a domain \textit{d} are extracted by the \textit{Feature Computation} function into a feature vector \(v_d^i\). A detailed table of which features are used is listed in Section~\ref{subsec:kopis_features}. \textit{Kopis} tries to separate benign from malicious domains by characteristics like the volume of DNS requests to domain \textit{d}, the diversity of IP addresses of the querying machines and the historic information relating to the IP space \textit{d} is pointing to. Like the first two investigated systems, \textit{Kopis} is operating in two different modes. In training mode, the reputation model is built in an offline fashion (\textit{Learning Module}) which is later used in the operational mode (\textit{Statistical Classifier}) to assign \textit{d} a reputation score in a streamed fashion. The \textit{Learning Module} takes the feature vector of a period of \textit{m} days that is generated by the \textit{Feature Computation} function as input and uses the \textit{Knowledge Base (KB)} to label each sample in that training set as being a malicious or legitimate domain (training set: \(V_{train} = \{v_d^i\}_{i=1..m}, \forall d \in \textit{KB}\)). The \textit{KB} consists of various public and undisclosed sources: \\
The overall system architecture can be seen in Figure~\ref{fig:kopis_system_overview}. The first step in the reputation system is to gather all (streamed) DNS queries and responses and divide this traffic into fixed epochs (e.g. one day in \textit{Kopis}). After collecting the traffic of each epoch \(E_i\), different statistics about a domain \textit{d} are extracted by the \textit{Feature Computation} function into a feature vector \(v_d^i\). A detailed table of which features are used is listed in Section~\ref{subsec:kopis_features}. \textit{Kopis} tries to separate benign from malicious domains by characteristics like the volume of DNS requests to domain \textit{d}, the diversity of IP addresses of the querying machines and the historic information, relating to the IP space \textit{d} is pointing to. Like the first two investigated systems, \textit{Kopis} is operating in two different modes. In training mode, the reputation model is built in an offline fashion (\textit{Learning Module}) which is later used in the operational mode (\textit{Statistical Classifier}) to assign \textit{d} a reputation score in a streamed fashion. The \textit{Learning Module} takes the feature vector of a period of \textit{m} days that is generated by the \textit{Feature Computation} function as input and uses the \textit{Knowledge Base (KB)} to label each sample in that training set as being a malicious or legitimate domain (training set: \(V_{train} = \{v_d^i\}_{i=1..m}, \forall d \in \textit{KB}\)). The \textit{KB} consists of various public and undisclosed sources: \\
\textbf{Malicious domain sources: }
\begin{itemize}
\item Information about malware from a commercial feed with a volume between 400 MB and 2GB a day
\item Malware, captured from two corporate networks
\item Public blacklists, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline} and the Zeus Block List \fsCite{zeusblocklistInformationOnline} \\
\item Information about malware from a commercial feed with a volume between 400 MB and 2GB a day.
\item Malware, captured from two corporate networks.
\item Public blacklists, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline} and the Zeus Block List \fsCite{zeusblocklistInformationOnline}. \\
\end{itemize}
\textbf{Benign domain sources: }
\begin{itemize}
\item Domain and IP whitelists from DNSWL \fsCite{DNSWLOnline}
\item Address space of the top 30 Alexa domains \fsCite{AlexaWebInformationOnline}
\item Dihe's IP-Index Browser \fsCite{DIHEOnline}
\item Domain and IP whitelists from DNSWL \fsCite{DNSWLOnline}.
\item Address space of the top 30 Alexa domains \fsCite{AlexaWebInformationOnline}.
\item Dihe's IP-Index Browser \fsCite{DIHEOnline}.
\end{itemize}
The operational mode first captures all DNS traffic streams. At the end of each epoch \(E_j\), the feature vector \(v_{d'}^j\) for all unknown domains \(d' \notin \textit{KB}\) is extracted and the \textit{Statistical Classifier} assigns a label (either malicious or legitimate) \(l_{d', j}\) and a confidence score \(c(l_{d', j})\). While the label classifies if the domain \textit{d'} is expected to be malicious or legitimate, the confidence score expresses the probability of this label. For the final reputation score, \textit{Kopis} first computes a series of label/confidence tuples for \textit{m} epochs starting at epoch \(E_t\): \(S(v_{d'}^j) = \{l_{d', j}, c(l_{d', j})\}, j = t, .., (t + m)\) and by averaging the confidence scores of the malicious labels (\textit{M}), the reputation score can be expressed as \(\overline{C}_M = avg_j\{c(l_{d', j})\}\)
@@ -47,14 +47,14 @@ The operational mode first captures all DNS traffic streams. At the end of each
\subsection{Features}
\label{subsec:kopis_features}
Much like the previous investigated systems, \textit{Kopis} is extracting different features that are grouped in three sets. Two of those groups, the \textit{Requester Diversity} and the \textit{Requester Profile} features, have not been proposed in research before and due to the system architecture are differing from those that are used in \textit{Notos} and \textit{Exposure}. In contrast to \textit{Notos} and \textit{Exposure}, which use traffic monitored from recursive DNS servers in lower DNS layers, \textit{Kopis} is operating with data from two large AuthNS as well as a country level TLD server (.ca space) in the upper DNS layers (see \ref{fig:kopis_data_sources}). Operating in this level in the DNS hierarchy leads to different challenges as well. A top-level domain server is rarely answering a request itself but most of the time is only delegating the request to a more specific server, e.g. a server responsible for the zone of a second-level domain in a company. For this reason, to get the actual resolved record (IP), the delegated name server can be queried straightly or a passive DNS database (e.g. from the Security Information Exchange \fsCite{SIEOnline}) can be engaged.
Much like the previous investigated systems, \textit{Kopis} is extracting different features that are grouped in three sets. Two of those groups, the \textit{Requester Diversity} and the \textit{Requester Profile} features, have not been proposed in research before and due to the system architecture are differing from those that are used in \textit{Notos} and \textit{Exposure}. In contrast to \textit{Notos} and \textit{Exposure}, which use traffic monitored from recursive DNS servers in lower DNS layers, \textit{Kopis} is operating with data from two large AuthNS as well as a country level TLD server (.ca space) in the upper DNS layers (see \ref{fig:kopis_data_sources}). Operating in this level in the DNS hierarchy leads to different challenges as well. A top-level domain server is rarely answering a request itself but most of the time is only delegating the request to a more specific server, e.g. a server responsible for the zone of a second-level domain in a company. For this reason, to get the actual resolved record (IP), the delegated name server can be directly queried or a passive DNS database (e.g. from the Security Information Exchange \fsCite{SIEOnline}) can be engaged.
The first step of extracting features out of the captured traffic for each dns query \(q_j\) (to resolve a domain \textit{d}), is to find the epoch \(T_j\), in which the request has been made, the IP address of the machine \(R_j\) that run the query and the resolved records \(IPs_j\). Using these raw values, \textit{Kopis} extracts the following specific features:
The first step of extracting features out of the captured traffic for each DNS query \(q_j\) (to resolve a domain \textit{d}) is to find the epoch \(T_j\), in which the request has been made, the IP address of the machine \(R_j\) that run the query and the resolved records \(IPs_j\). Using these raw values, \textit{Kopis} extracts the following specific features:
\subsubsection{Requester Diversity (RD)}
\label{subsubsec:kopis_requester_diversity}
This group of features tries to map the requester diversity, i.e. where the requests originate, into values that can be used in the \textit{Feature Computation} function. In general, this aims to find if related machines of a domain \textit{d} are globally distributed or acting in a bound location. It first important to notice that to map an IP address to its corresponding ASN, country and BGP prefix, the Team Cymru IP TO ASN MAPPING database has been leveraged \fsCite{CymruOnline}. This set of features is motivated on the premise that the machines involved with a domain used in malicious purposes, usually have a different distribution than those for legitimate usage. While benign services will show a consistent pattern of IP addresses that are looking up \textit{d}, malicious domains are queried from many machines from different locations around the world, e.g. bots in a botnet or spambots involved in a spam campaign. Recapturing that botnets are usually not targeted at specific geographical regions. Figure~\ref{fig:kopis_requester_distribution} shows the distribution of the ASNs as well as the country codes, calculated by the cumulative distribution function (CDF). In both cases, benign domains have a either low or very high distribution (bimodal distribution). In contrast, malicious domains show a larger spectrum of diversities, mainly depending on how successful the malware is spreading. There are mainly three values involved here. For all requester IP addresses \(\{R_j\}_{j=1..m}\), the BGP prefixes, the autonomous system numbers and the country codes (CC) are resolved. After this, the distribution of the occurrence frequency of these three sets is computed and for each distribution, the mean, standard deviation and variance is calculated (total of nine features). Another four features are extracted simply using the total number of distinct IP addresses (\textit{d} resolved to), the amount of BGP prefixes of these IPs, the total number of different ASNs and the total amount of distinct countries, these IPs reside in.
This group of features tries to map the requester diversity, i.e. where the requests originate, into values that can be used in the \textit{Feature Computation} function. In general, this aims to find if related machines of a domain \textit{d} are globally distributed or acting in a bound location. To map an IP address to its corresponding ASN, country and BGP prefix, the Team Cymru IP TO ASN MAPPING database has been leveraged \fsCite{CymruOnline}. This set of features is motivated on the premise that the machines, involved with a domain used in malicious purposes, usually have a different distribution than those for legitimate usage. While benign services will show a consistent pattern of IP addresses that are looking up \textit{d}, malicious domains are queried from many machines from different locations around the world, e.g. bots in a botnet or spambots involved in a spam campaign. Recapturing that botnets are usually not targeted at specific geographical regions. Figure~\ref{fig:kopis_requester_distribution} shows the distribution of the ASNs as well as the country codes calculated by the cumulative distribution function (CDF). In both cases, benign domains have an either low or very high distribution (bimodal distribution). In contrast, malicious domains show a larger spectrum of diversities, mainly depending on how successful the malware is spreading. There are mainly three values involved here. For all requester IP addresses \(\{R_j\}_{j=1..m}\), the BGP prefixes, the autonomous system numbers and the country codes (CC) are resolved. After this, the distribution of the occurrence frequency of these three sets is computed and for each distribution, the mean, standard deviation and variance is calculated (total of nine features). Another four features are extracted simply using the total number of distinct IP addresses (\textit{d} resolved to), the amount of BGP prefixes of these IPs, the total number of different ASNs and the total amount of distinct countries, these IPs reside in.
\begin{figure}[!htbp]
\centering
@@ -67,7 +67,7 @@ This group of features tries to map the requester diversity, i.e. where the requ
\subsubsection{Requester Profile (RP)}
\label{subsubsec:kopis_requester_profile}
The \textit{Requester Profile} features are aiming to separate request that are coming from hardened networks (like enterprise networks) from less secure networks, e.g. ISP networks. Most smaller networks like enterprise or university networks are much better protected against malware in general and such should show less requests to malicious domains. On the other hand, ISPs do usually not invest much effort into cleaning their network from malware and do not offer a high level of protection against malware propagation inside the network. As \textit{Kopis} is operating in the upper DNS layers, it is often not possible to simply measure the population behind the requesting RDNS server (due to e.g. caching \fsCite{10.1007/978-3-540-24668-8_15}) and a different metric has to be found to measure the size of the network, a request has been submitted from. Assuming traffic, monitored from a large AuthNS in epoch \(E_t\) that has authority for a set of Domains \(D\) and all unique requesting IP addresses \(R\). For each requester IP \(R_k \in R\) the amount of different domains, queried by \(R_k\) in \(E_t\), is counted \(c_{t,k}\). A weight can then be applied to each requester \(R_k\) as \(w_{t,k} = \frac{c_{t,k}}{max_{l=1}^{|R|}c_{t,l}}\). Subsequent, the more domains in \(D\) a requester \(R_k\) is querying, the higher the weight will be. This way, high weights are corresponding to larger networks and following the explanation above, the more likely it is that this requester is infected with malicious software. Given a domain \textit{d} and let \(R_d\) being the set of all requester IP addresses, the count \(c_{t,k}\) is computed for each epoch \(E_t\) like previously described. In the following, the count for each epoch is multiplied with each weight \(w_{t-n,k}\), where \(w_{t-n,k}\) are the weights of \textit{n} days before epoch \(E_t\), get the set of weighted counts of \textit{d} during \(E_t\): \(WC_t(d) = \{c_{t,k} * w_{t-n,k}\}_k\). Finally, five different feature values are calculated with the values of \(WC_t(d)\): the average, the biased and unbiased standard deviation and the biased and unbiased variance. The biased and unbiased estimator for the standard deviation of a random variable \textit{X} are defined as \(\sqrt{\sum_{i=1}^N \frac{1}{N}(\bar{X}_i - \mu)^2}\) and respectively \(\sqrt{\sum_{i=1}^N \frac{1}{N-1}(\bar{X}_i - \mu)^2}\) (with \(N\) being the amount of samples and \(\mu\) the empirical mean).
The \textit{Requester Profile} features are aiming to separate request that are coming from hardened networks (like enterprise networks) from less secure networks, e.g. ISP networks. Most smaller networks like enterprise or university networks are much better protected against malware in general and thus should show less requests to malicious domains. On the other hand, ISPs do usually not invest much effort into cleaning their network from malware and do not offer a high level of protection against malware propagation inside the network. As \textit{Kopis} is operating in the upper DNS layers, it is often not possible to simply measure the population behind the requesting RDNS server (due to e.g. caching \fsCite{10.1007/978-3-540-24668-8_15}) and a different metric has to be found to measure the size of the network a request has been submitted from. Assuming traffic, monitored from a large AuthNS in epoch \(E_t\) that has authority for a set of Domains \(D\) and all unique requesting IP addresses \(R\). For each requester IP \(R_k \in R\) the amount of different domains, queried by \(R_k\) in \(E_t\), is counted \(c_{t,k}\). A weight can then be applied to each requester \(R_k\) as \(w_{t,k} = \frac{c_{t,k}}{max_{l=1}^{|R|}c_{t,l}}\). Subsequent, the more domains in \(D\) a requester \(R_k\) is querying, the higher the weight will be. This way, high weights are corresponding to larger networks and following the explanation above, the more likely it is that this requester is infected with malicious software. Given a domain \textit{d} and let \(R_d\) being the set of all requester IP addresses, the count \(c_{t,k}\) is computed for each epoch \(E_t\) like previously described. In the following, the count for each epoch is multiplied with each weight \(w_{t-n,k}\) (where \(w_{t-n,k}\) are the weights of \textit{n} days before epoch \(E_t\)) to get the set of weighted counts of \textit{d} during \(E_t\): \(WC_t(d) = \{c_{t,k} * w_{t-n,k}\}_k\). Finally, five different feature values are calculated with the values of \(WC_t(d)\): the average, the biased and unbiased standard deviation and the biased and unbiased variance. The biased and unbiased estimator for the standard deviation of a random variable \textit{X} are defined as \(\sqrt{\sum_{i=1}^N \frac{1}{N}(\bar{X}_i - \mu)^2}\) and respectively \(\sqrt{\sum_{i=1}^N \frac{1}{N-1}(\bar{X}_i - \mu)^2}\) (with \(N\) being the amount of samples and \(\mu\) the empirical mean).
\subsubsection{Resolved-IPs Reputation (IPR)}
@@ -77,8 +77,9 @@ The set of \textit{Resolved-IPs Reputation} features consists of nine individual
\begin{itemize}
\item \textit{Malware Evidence: } contains three individual features, the amount of IP addresses in the last month (in respect to \(E_t\)) that have been pointed to by any malicious domain and much likely the amount of BGP prefixes and AS numbers that a malicious domain has been resolved to.
\item \textit{SBL Evidence: } using the domains in the Spamhaus Block list \fsCite{SBLOnline}, the average number of IP addresses, BGP prefixes and ASNs that have been pointed to by these domains is calculated.
\item \textit{Whitelist Evidence: } the list of domains \(WL\) that are suspected to be legitimate is constructed using the DNS whitelist of DNSWL \fsCite{DNSWLOnline} and the top 30 popular domains from Alexa \fsCite{AlexaWebInformationOnline}. Then the set of known good IPs \(WL_{IPs}\) is resolved from all domains in the white list \(WL\). Let \(IPs(d,t)\) be all addresses that \textit{d} points to (similarly to the first two groups) the amount of matching IP addresses and the amount of ASNs and BGP prefixes that include IP addresses of \(WL_{IPs}\) is calculated.
\item \textit{SBL Evidence: } using the domains in the Spamhaus Block list \
\fsCite{SBLOnline}, the average number of IP addresses, BGP prefixes and ASNs that have been pointed to by these domains is calculated.
\item \textit{Whitelist Evidence: } the list of domains \(WL\) that are suspected to be legitimate is constructed using the DNS whitelist of DNSWL \fsCite{DNSWLOnline} and the top 30 popular domains from Alexa \fsCite{AlexaWebInformationOnline}. Then, the set of known good IPs \(WL_{IPs}\) is resolved from all domains in the white list \(WL\). Let \(IPs(d,t)\) be all addresses that \textit{d} points to (similarly to the first two groups) the amount of matching IP addresses and the amount of ASNs and BGP prefixes that include IP addresses of \(WL_{IPs}\) is calculated.
\end{itemize}
@@ -86,7 +87,7 @@ The set of \textit{Resolved-IPs Reputation} features consists of nine individual
\subsection{Results}
\label{subsec:kopis_results}
\textit{Kopis} used DNS traffic captured at two major domain name registrars (AuthNS servers) between 01.01.2010 and 31.08.2010 as well as a country code top level domain server (.ca) from 26.08.2010 up to 18.10.2010. As the TLD server was operated in delegate-only mode, passive DNS traffic had to be additionally collected to get the resolutions for these queries. In total, this led to 321 million lookups a day in average. This amount of data showed to be a significant problem and the overall traffic size to be analysed had to be reduced. The most significant reduction was to remove all duplicate queries and only take unique requests into account. Finally, about 12.5 million daily unique requests remained in average. Using the \textit{KB}, that consists of various sources (see \fsCite{subsec:kopis_architecture}), a sample with 225,429 unique RRs (corresponding to 28,915 unique domain names) could be split into groups with 27,317 malicious and 1,598 benign domains. All raw data was indexed in a relational database and was enriched with information like first and last seen timestamps. Like any system that uses a machine learning approach, it was important for \textit{Kopis} to select significant features and a period that was sufficient for the training to deliver good results. Figure~\ref{fig:kopis_train_period_selection} shows the \glspl{roc} (ROC) of different models, generated with data from periods of one up to five days and validated using 10-fold cross validation. According to \fsAuthor[Section 5.3]{Antonakakis:2011:DMD:2028067.2028094}: ``When we increased the observation window beyond the mark of five days we did not see a significant improvement in the detection results.'' Using these models, the best classification algorithm had to be found. This has been accomplished using a technique called model selection (see e.g. \fsCite{Kohavi:1995:SCB:1643031.1643047}). The most accurate classifier for these models has shown to be the \textit{random forest} implementation with a true positive rate of 98.4\% and a false positive rate of 0.3\% (with training data from a period of five days). \textit{Random forest} is the combination of different decision trees, either trained on different training sets or using different sets of features. Unfortunately, the exact random forest classification implementation of \textit{Kopis} has not been published. Other classifiers that have been experimented with are: Naive Bayes, k-nearest neighbors, Support Vector Machines, MLP Neural Network and random committee.
\textit{Kopis} used DNS traffic captured at two major domain name registrars (AuthNS servers) between 01.01.2010 and 31.08.2010 as well as a country code top level domain server (.ca) from 26.08.2010 up to 18.10.2010. As the TLD server was operated in delegate-only mode, passive DNS traffic had to be additionally collected to get the resolutions for these queries. In total, this led to 321 million lookups a day in average. This amount of data showed to be a significant problem and the overall traffic size to be analysed had to be reduced. The most significant reduction was to remove all duplicate queries and only take unique requests into account. Finally, about 12.5 million daily unique requests remained in average. Using the \textit{KB}, that consists of various sources (see \ref{subsec:kopis_architecture}), a sample with 225,429 unique RRs (corresponding to 28,915 unique domain names) could be split into groups with 27,317 malicious and 1,598 benign domains. All raw data was indexed in a relational database and was enriched with information like first and last seen timestamps. Like any system that uses a machine learning approach, it was important for \textit{Kopis} to select significant features and a period that was sufficient for the training to deliver good results. Figure~\ref{fig:kopis_train_period_selection} shows the \glspl{roc} (ROC) of different models, generated with data from periods of one up to five days and validated using 10-fold cross validation. According to \fsAuthor{Antonakakis:2011:DMD:2028067.2028094}: ``When we increased the observation window beyond the mark of five days we did not see a significant improvement in the detection results.'' Using these models, the best classification algorithm had to be found. This has been accomplished using a technique called model selection (see e.g. \fsCite{Kohavi:1995:SCB:1643031.1643047}). The most accurate classifier for these models has shown to be a \textit{random forest} implementation with a true positive rate of 98.4\% and a false positive rate of 0.3\% (with training data from a period of five days). \textit{Random forest} is the combination of different decision trees, either trained on different training sets or using different sets of features. Unfortunately, the exact random forest classification implementation of \textit{Kopis} has not been published. Other classifiers that have been experimented with are: Naive Bayes, k-nearest neighbors, Support Vector Machines, MLP Neural Network and random committee.
\begin{figure}[!htbp]
\centering

View File

@@ -12,7 +12,7 @@
\textbf{Top-level domain:} TLD, where \(TLD(d)\) is the top-level domain of \textit{d}. \\
\textbf{Second-level domain:} \(2LD(d)\) being the second-level domain of domain \textit{d}. \\
\textbf{Third-level domain: } \(3LD(d)\) containing the three rightmost substrings separated by period for \textit{d}.
\item Given domain \(d\) \(Zone(d)\) describes the set of domains that include \textit{d} and all subdomains of \textit{d}.
\item Given domain \(d\), \(Zone(d)\) describes the set of domains that include \textit{d} and all subdomains of \textit{d}.
\item \(D = \{d_1, d_2, ..., d_m\}\) representing a set of domains and \(A(D)\) all IP addresses that, at any time, any domain \(d \in D\) resolved to.
\item \(BGP(a)\) consists of all IP addresses that are residing in the same \gls{bgp} prefix as \textit{a}.
\item Analogously, \(AS(a)\) as the set of IP addresses located in the same \gls{as} as \textit{a}.
@@ -23,10 +23,10 @@
\label{subsec:notos_architecture}
The main goal of \textit{Notos} is to assign a dynamic reputation score to domain names. Domains that are likely to be involved in malicious activities are tagged with a low reputation score, whereas legitimate Internet services are assigned with a high reputation score.
\textit{Notos'} primary source of information for the training and classification is a database that contains historical data about domains and resolved IP addresses. This database is built using DNS traffic from two recursive ISP DNS servers (RDNS) and pDNS logs collected by the Security Information Exchange (SIE) which covers authoritative name servers in North America and Europe. For building a list of known malicious domain names, several honeypots and spam-traps have been deployed. A large list of known benign domains has been gathered from the top sites list on \textit{alexa.com} which ranks the most popular websites in several regions \fsCite{AlexaWebInformationOnline}. These two lists are referred to as the \textit{knowledge base} and are used to train the reputation training model.
\textit{Notos'} primary source of information for the training and classification is a database that contains historical data about domains and resolved IP addresses. This database is built using DNS traffic from two recursive ISP DNS servers (RDNS) and pDNS logs collected by the Security Information Exchange (SIE) which covers authoritative name servers in North America and Europe. For building a list of known malicious domain names, several honeypots and spam-traps have been deployed. A large list of known benign domains has been gathered from the top sites list on \textit{alexa.com} which ranks the most popular websites in several regions \fsCite{AlexaWebInformationOnline}. These two lists are referred to as the \textit{knowledge base} and are particularly used to train the reputation training model.
To assign a reputation score to a domain \textit{d}, the most current set of IP addresses \(A_{c}(d) = \left\{a_{i}\right\}_{i=1..m}\) to which \textit{d} points is first fetched. Afterwards the pDNS database is queried for several information for this domain \textit{d}. The \textit{Related Historic IPs (RHIPs)} is the set of all IP addresses that ever pointed to this domain. In case domain \textit{d} is a third-level domain, all IP addresses that pointed to the corresponding second-level domain are also included. See Chapter~\ref{subsec:domain_names} for more information on the structure of domain names. If \textit{d} is a second-level domain, then all IPs that are pointed to from any of the third-level subdomains are also added to the RHIPs. In the next step, the set of \textit{Related Historic Domains (RHDNs)} is queried and covers all domains that are related to the currently processed domain \textit{d}. Specifically, all domains which ever resolved to an IP address that is residing in any of the ASNs of those IPs that \textit{d} currently resolves to.
To assign a reputation score to a domain \textit{d}, the most current set of IP addresses \(A_{c}(d) = \left\{a_{i}\right\}_{i=1..m}\), to which \textit{d} points to, is first fetched. Afterwards the pDNS database is queried for several information for this domain \textit{d}. The \textit{Related Historic IPs (RHIPs)} is the set of all IP addresses that ever pointed to this domain. In case domain \textit{d} is a third-level domain, all IP addresses that pointed to the corresponding second-level domain are also included. Also see Section~\ref{subsec:domain_names} for more information on the structure of domain names. If \textit{d} is a second-level domain, then all IPs that are pointed to from any of the third-level subdomains are also added to the RHIPs. The reason why second- and third-level domains are combined here is that according to \fsAuthor{Antonakakis:2010:BDR:1929820.1929844} most third level domains are related to their corresponding second-level domain and therefore are treated similarly. In the next step, the set of \textit{Related Historic Domains (RHDNs)} is queried and covers all domains that are related to the currently processed domain \textit{d}. Specifically, all domains which ever resolved to an IP address that is residing in any of the ASNs of those IPs that \textit{d} currently resolves to.
There are three types of features extracted from the database for \textit{Notos} that are used for training the reputation model (quotation from \fsCite[Section 3.1]{Antonakakis:2010:BDR:1929820.1929844}):
@@ -34,11 +34,11 @@ There are three types of features extracted from the database for \textit{Notos}
\begin{enumerate}
\item \textbf{Network-based features:} The first group of statistical features is extracted from the set of RHIPs. We measure quantities such as the total number of IPs historically associated with \textit{d}, the diversity of their geographical location, the number of distinct autonomous systems (ASs) in which they reside, etc.
\item \textbf{Zone-based features:} The second group of features we extract are those from the RHDNs set. We measure the average length of domain names in RHDNs, the number of distinct TLDs, the occurrence frequency of different characters, etc.
\item \textbf{Evidence-based features:} The last set of features includes the measurement of quantities such as the number of distinct malware samples that contacted the domain \textit{d}, the number of malware samples that connected to any of the IPs pointed by \textit{d}, etc.
\item \textbf{Evidence-based features:} The last set of features includes the measurement of quantities such as the number of distinct malware samples that contacted the domain \textit{d}, the number of malware samples that connected to any of the IPs pointed by \textit{d}, etc.
\end{enumerate}
\end{quote}
Figure~\ref{fig:notos_system_overview} shows the overall system architecture of \textit{Notos}. After all the features are extracted from the passive DNS database and prepared for further processing, the reputation engine is initialized. \textit{Notos'} reputation engine is operating in two modes. In offline mode, the reputation model is constructed for a set of domains using the feature set of each domain and the classification which can be calculated using the \textit{knowledge base} with black- and whitelist (also referred as training). This model can later be used in the online mode to dynamically assign a reputation score. In online mode, the same features that are used for the initial training are extracted for a new domain (resource record or RR, see Section~\nameref{subsubsec:dns_resource_records}) and \textit{Notos} uses the trained reputation engine to calculate a dynamic reputation rating (see Figure~\ref{fig:notos_online_offline_mode}). The data for labeling domains and IPs originates from various sources: the blacklist primarily consists of filter lists from malware services like malwaredomainlist.com and malwaredomains.com. Additional IP and domain labeling blacklists are the Sender Policy Block from Spamhaus (\fsCite{SBLOnline}) and the ZeuS blocklist from ZeuS Tracker (\fsCite{zeusblocklistInformationOnline}). The base has been downloaded before the main analyzation period (fifteen days from the first of August 2009) and as filter lists usually lag behind state-of-the art malware, the blacklists have continuously been updated. The whitelist was built using the top 500 popular Alexa websites. Additionally, the 18 most common second level domains from various content delivery networks for classifying the CDN clusters and a list of 464 dynamic DNS 2LD for identifying domains and IPs in dynamic DNS zones have been gathered.
Figure~\ref{fig:notos_system_overview} shows the overall system architecture of \textit{Notos}. After all the features are extracted from the passive DNS database and prepared for further processing, the reputation engine is initialized. \textit{Notos'} reputation engine is operating in two modes. In offline mode, the reputation model is constructed for a set of domains using the feature set of each domain and the classification which can be calculated using the \textit{knowledge base} with black- and whitelist (also referred to as training). This model can later be used in the online mode to dynamically assign a reputation score. In online mode, the same features that are used for the initial training are extracted for a new domain (resource record or RR, see Section~\nameref{subsubsec:dns_resource_records}) and \textit{Notos} uses the trained reputation engine to calculate a dynamic reputation rating (see Figure~\ref{fig:notos_online_offline_mode}). The data for labeling domains and IPs originates from various sources: the blacklist primarily consists of filter lists from malware services like malwaredomainlist.com and malwaredomains.com. Additional IP and domain labeling blacklists are the Sender Policy Block from Spamhaus (\fsCite{SBLOnline}) and the ZeuS blocklist from ZeuS Tracker (\fsCite{zeusblocklistInformationOnline}). The base has been downloaded before the main analyzation period (fifteen days from the first of August 2009) and as filter lists usually lag behind state-of-the art malware, the blacklists have continuously been updated. The whitelist was built using the top 500 popular Alexa websites. Additionally, the 18 most common second level domains from various content delivery networks for classifying the CDN clusters and a list of 464 dynamic DNS second level domains, for identifying domains and IPs in dynamic DNS zones, have been gathered.
\begin{figure}[!htbp]
\centering
@@ -63,7 +63,7 @@ In this Section, all statistical features are listed and a short explanation, fo
\subsubsection{Network-based features}
\label{subsubsec:notos_network-based_features}
The first group of features handles network-related keys. This group mostly describe how the owning operators of \textit{d} allocate network resources to achieve different goals. While most legitimate and professionally operated internet services feature have a rather stable network profile, malicious usage usually involves short living domain names and IP addresses with high agility to circumvent blacklisting and other simple types of resource blocking. Botnets usually contain machines in many different networks (\glspl{as} and \glspl{bgp}) operated by different organizations in different countries. Appropriate companies mostly acquire bigger IP blocks and such use consecutive IPs for their services in the same address space. This homogeneity also applies to other registration related information like registrars and registration dates. To measure this level of agility and homogeneity, eighteen statistical network-based features are extracted from the RHIPs (see Table~\ref{tab:notos_network-based_features}).
The first group of features handles network-related keys. This group mostly describe how the owning operators of \textit{d} allocate network resources to achieve different goals. While most legitimate and professionally operated internet services have a rather stable network profile, malicious usage usually involves short living domain names and IP addresses with high agility to circumvent blacklisting and other simple types of resource blocking. Botnets usually contain machines in many different networks (\glspl{as} and \glspl{bgp}) operated by different organizations in different countries. Appropriate companies mostly acquire bigger IP blocks and thus use consecutive IPs for their services in the same address space. This homogeneity also applies to other registration related information like registrars and registration dates. To measure this level of agility and homogeneity, eighteen statistical network-based features are extracted from the RHIPs (see Table~\ref{tab:notos_network-based_features}).
\begin{table}[!htbp]
\centering
@@ -96,7 +96,7 @@ The first group of features handles network-related keys. This group mostly desc
\subsubsection{Zone-based features}
\label{subsubsec:notos_zone-based_features}
The second group is about zone-based features and is extracted from the RHDNs. In contrast to the network-based features which compares characteristics of the historic IPs, the zone-based features handles characteristics of all historically involved domains. While legitimate services often involve many domains, they usually share similarities. For example, google.com, googlemail.com, googleplus.com, etc., are all services provided by Google and contain the string 'google' in their domains. In contrast, randomly generated domains used in spam campaigns are rarely sharing similarities. By calculating the mean, median and standard deviation for some key, the summarize of the shape of its distribution is investigated \fsCite{Antonakakis:2010:BDR:1929820.1929844}. To calculate this level of diversity, seventeen features are extracted which can be found in Table~\ref{tab:notos_zone-based_features}
The second group is about zone-based features and is extracted from the RHDNs. In contrast to the network-based features which compares characteristics of the historic IPs, the zone-based features handles characteristics of all historically involved domains. While legitimate services often involve many domains, they usually share similarities. For example, google.com, googlemail.com, googleplus.com, etc., are all services provided by Google and contain the string 'google' in their domains. In contrast, randomly generated domains used in spam campaigns are rarely sharing similarities. By calculating the mean, median and standard deviation for some key, the overall shape of its distribution is investigated \fsCite{Antonakakis:2010:BDR:1929820.1929844}. To calculate this level of diversity, seventeen features are extracted which can be found in Table~\ref{tab:notos_zone-based_features}
\begin{table}[!htbp]
\centering
@@ -128,7 +128,7 @@ The second group is about zone-based features and is extracted from the RHDNs. I
\subsubsection{Evidence-based features}
\label{subsubsec:notos_evidence-based_features}
For the evidence-based features, public information and data from honeypots and spam-traps was collected. This \textit{knowledge base} primarily helps to discover if a domain \textit{d} is in some way interacting with known malicious IPs and domains. As domain names are much cheaper to obtain than IP addresses, malware authors tend to reuse IPs with updated domain names. The blacklist features indicate the reuse of known malicious resources like IP addresses, \gls{bgp} prefixes and \glspl{as}.
For the evidence-based features, public information and data from honeypots and spam-traps was collected. This \textit{knowledge base} primarily helps to discover if a domain \textit{d} is in some way interacting with known malicious IPs and domains. As domain names are much cheaper to obtain than IP addresses, malware authors tend to reuse IPs with updated domain names. Consequently, the blacklist features indicate the reuse of known malicious resources like IP addresses, \gls{bgp} prefixes and \glspl{as}.
\begin{table}[!htbp]
\centering
@@ -159,14 +159,14 @@ Figure~\ref{fig:notos_features} shows how the three different feature groups are
\subsection{Reputation Engine}
\label{subsec:notos_reputation_engine}
The reputation engine is used to dynamically assign a reputation score to a domain \textit{d}. In the first step, the engine has to be trained with the available training set (temporal defined as the \textit{training period}). The training is performed in an offline fashion which means all data is statically available at the beginning of this step. The training mode consists of three modules: The \textit{Network Profile Model} is a model of how known good domains are using resources. This model uses popular content delivery networks (e.g., Akamai, Amazon CloudFront) and large sites (e.g., google.com, yahoo.com) as a base. In total the \textit{Network Profile Model} consists of five classes of domains: \textit{Popular Domains}, \textit{Common Domains}, \textit{Akamai Domains}, \textit{CDN Domains} and \textit{Dynamic DNS Domains}. The second module \textit{Domain Name Clusters} performs a general clustering of all domains (respectively their statistical feature vectors) of the training set. There are two consecutive clustering processes: The \textit{network-based} clustering aims to group domains with similar characteristics in terms of the agility, e.g. how often DNS resources are changed. To refine those clusters, a \textit{zone-based} clustering is performed which groups domains that are similar in terms of its RHDNs (see explanation for the \textit{zone-based features}). Those clusters of domains with similar characteristics can then be used to identify mostly benign and malicious sets of domains. In the last step of the offline mode, the \textit{Reputation Function} is built. As seen in Figure~\ref{fig:notos_online_offline_mode} this module takes the results of the \textit{Network Profile Model} (\(NM(d_i)\)) and the \textit{Domain Name Clusters} (\(DC(d_i)\)) for each domain \textit{d} in \(d_i, i = 1..n\) as inputs, calculates an \textit{Evidence Features Vector} \(EV(d_i)\), which basically checks if \(d_i\) or any of its resolved IPs is known to be benign or malicious, and builds a model that can assign a reputation score between zero and one to \textit{d}. This \textit{Reputation Function} is implemented as a statistical classifier. These three modules form the reputation model that can be used in the last step to compute the reputation score. A rebuild of the training model can be done at any time, for example given an updated training set.
The reputation engine is used to dynamically assign a reputation score to a domain \textit{d}. In the first step, the engine has to be trained with the available training set (temporal defined as the \textit{training period}). The training is performed in an offline fashion which means all data is statically available at the beginning of this step. The training mode consists of three modules: The \textit{Network Profile Model} is a model of how known good domains are using resources. This model uses popular content delivery networks (e.g., Akamai, Amazon CloudFront) and large sites (e.g., google.com, yahoo.com) as a base. In total the \textit{Network Profile Model} consists of five classes of domains: \textit{Popular Domains}, \textit{Common Domains}, \textit{Akamai Domains}, \textit{CDN Domains} and \textit{Dynamic DNS Domains}. The second module \textit{Domain Name Clusters} performs a general clustering of all domains (respectively their statistical feature vectors) of the training set. There are two consecutive clustering processes: The \textit{network-based} clustering aims to group domains with similar characteristics in terms of the agility, e.g. how often DNS resources are changed. To refine those clusters, a \textit{zone-based} clustering is performed which groups domains that are similar in terms of its RHDNs (see explanation for the \textit{zone-based features}). Those clusters of domains with similar characteristics can then be used to identify benign and malicious sets of domains. In the last step of the offline mode, the \textit{Reputation Function} is built. As seen in Figure~\ref{fig:notos_online_offline_mode}, this module takes the results of the \textit{Network Profile Model} (\(NM(d_i)\)) and the \textit{Domain Name Clusters} (\(DC(d_i)\)) for each domain \textit{d} in \(d_i, i = 1..n\) as inputs, calculates an \textit{Evidence Features Vector} \(EV(d_i)\), which basically checks if \(d_i\) or any of its resolved IPs is known to be benign or malicious, and builds a model that can assign a reputation score between zero and one to \textit{d}. This \textit{Reputation Function} is implemented as a statistical classifier. These three modules form the reputation model that can be used in the last step to compute the reputation score. A rebuild of the training model can be done at any time, for example given an updated training set.
The final stage of the reputation engine is the online (streaming like) mode. Any considered domain \textit{d} is first supplied to the \textit{network profiles} module which returns a probability vector \(NM(d) = \{c_1, c_2, ..., c_5\}\) of how likely \textit{d} belongs to one of the five classes (e.g. probability \(c_1\) that \textit{d} belongs to \textit{Popular Domains}). \(DC(d)\) is the resulting vector of the \textit{domain clusters} module and can be broken down into the following parts: For the domain \textit{d} of interest, the network-based features are extracted and the closest network-based cluster \(C_d\), generated in the training mode by the \textit{Domain Name Clusters} module, is calculated. The following step takes all zone-based feature vectors \(v_j \in C_d\) and eliminates those vectors that do not fulfill \(dist(z_d , v_j ) < R\), where \(z_d\) is the zone-based feature vector for \textit{d} and \textit{R} being a predefined radius; or \(v_j \in KNN(z_d)\), with \(KNN(z_d)\)) being the k nearest-neighbors of \(z_d\). Each vector \(v_i\) of the resulting subset \(V_d \subseteq C_d\) is then assigned one of this eight labels: \textit{Popular Domains}, \textit{Common Domains}, \textit{Akamai}, \textit{CDN}, \textit{Dynamic DNS}, \textit{Spam Domains}, \textit{Flux Domains}, and \textit{Malware Domains}. The next step is to calculate the five statistical features that form the resulting vector \(DC(d) = \{l_1, l_2, ..., l_5\}\).
\begin{enumerate}
\item \(l_1\) the \textit{majority class label} \textit{L}, i.e. the most common label in \(v_i \in V_d\) (e.g. \textit{Spam Domains})
\item \(l_2\) the standard deviation of the occurrence frequency of each label
\item \(l_3\) mean of the distribution of distances between \(z_d\) and the vectors \(v_j \in V_{d}^{(L)}\), where \(V_{d}^{(L)} \subseteq V_d\) is the subset of those vectors, associated with the \textit{majority class label} \textit{L}
\item \(l_1\) the \textit{majority class label} \textit{L}, i.e. the most common label in \(v_i \in V_d\) (e.g. \textit{Spam Domains}).
\item \(l_2\) the standard deviation of the occurrence frequency of each label.
\item \(l_3\) mean of the distribution of distances between \(z_d\) and the vectors \(v_j \in V_{d}^{(L)}\), where \(V_{d}^{(L)} \subseteq V_d\) is the subset of those vectors, associated with the \textit{majority class label} \textit{L}.
\end{enumerate}
Having the \textit{Network Profile Model} \(NM(d)\), the \textit{Domain Name Clusters} \(DC(d_i)\), and the \textit{Evidence Features Vector} \(EV(d)\), these vectors are combined into a sixteen dimensional feature vector \(v(d)\) which is then fed into the trained reputation function. This results in a reputation score \textit{S} in the range of [0, 1], where values close to zero represent a low reputation and such more likely represent malicious usage of the domain.
@@ -177,7 +177,7 @@ Having the \textit{Network Profile Model} \(NM(d)\), the \textit{Domain Name Clu
In the last Section of the evaluation of \textit{Notos}, experimental results that have been published are listed. This covers metrics about the usage of raw data, lessons learned in the analyzation process (i.e. examined algorithms) and final acquisitions like precision and accuracy of the classification.
\textit{Notos} being the first dynamic reputation system in the context of domain names, it is able to identify malicious domain names before they appear in public filter lists. To be able to assign reputation scores to new domains, \fsAuthor{Antonakakis:2010:BDR:1929820.1929844} used historic passive dns logs of a time span of 68 days with a total volume of 27,377,461 unique, successful A-type resolutions mainly from two recursive ISP DNS servers in North America (plus pDNS logs from various networks, aggregated by the SIE \ref{subsec:notos_architecture}). Figure~\ref{fig:notos_volume_new_rr} shows that after a few days, the number of new domains (RR) stabilizes at about 100,000 to 150,000 new domains a day compared to a much higher total load of unique resource records (about 94.7\% duplicates) (see Figure~\ref{fig:notos_total_volume_unique_rr}). The amount of new IPs is analogously nearly constant. After few weeks, even big content delivery networks with a large (but nearly constant) number of IP addresses will get scanned, in contrast to botnets where continuously new machines are infected. The authors infer that a relatively small pDNS database is therefor sufficient for \textit{Notos} to produce good results.
\textit{Notos} was the first dynamic reputation system in the context of domain names and it was able to identify malicious domain names before they appeared in public filter lists which ultimately led to the discovery of an previously unknown ZeuS botnet \fsCite{Antonakakis:2010:BDR:1929820.1929844}. To be able to assign reputation scores to new domains, \fsAuthor{Antonakakis:2010:BDR:1929820.1929844} used historic passive DNS logs of a time span of 68 days with a total volume of 27,377,461 unique, successful A-type resolutions mainly from two recursive ISP DNS servers in North America (plus pDNS logs from various networks, aggregated by the SIE \ref{subsec:notos_architecture}). Figure~\ref{fig:notos_volume_new_rr} shows that after a few days, the number of new RR stabilizes at about 100,000 to 150,000 domains a day compared to a much higher total load of unique resource records (about 94.7\% duplicates) (see Figure~\ref{fig:notos_total_volume_unique_rr}). The amount of new IPs is analogously nearly constant. After few weeks, even big content delivery networks with a large (but nearly constant) number of IP addresses will get scanned, in contrast to botnets where continuously new machines are infected. The authors infer that a relatively small pDNS database is therefor sufficient for \textit{Notos} to produce good results.
\begin{figure}[!htbp]
\centering
@@ -193,7 +193,7 @@ In the last Section of the evaluation of \textit{Notos}, experimental results th
\label{fig:notos_total_volume_unique_rr}
\end{figure}
To get optimal results with the \textit{Reputation Function}, several classifiers have been tested and selected for the given circumstances (time complexity, detection results and precision [true positives over all positives]). A decision tree with Logit-Boost strategy (see \fsCite{Friedman98additivelogistic} for implementation details) has shown to provide the best results with a low false positive rate (FP) of 0.38\% and a high true positive rate (TP) of 96.8\%. These results have been verified using a 10-fold cross validation with a reputation score threshold of 0.5. This 10-fold cross validation method splits the dataset in ten partitions/folds (each partition optimally containing roughly the same class label distribution). One fold is then used as the validation sample (testing set) and the remaining nine partitions are used as the training set. The training set is used to train the model which is then cross validated with the testing set. This step is repeated for ten times using the same partitions, each partition being the testing set once. For the validation in \textit{Notos}, a dataset of 20,249 domains with 9,530 known bad RR has been used. As the list of known good domains, the Alexa top 500 websites have been used. Taking a bigger amount of Alexa popular sites has shown to decrease accuracy of the overall system, which could be interpreted as smaller/less popular sites are more likely to get compromised. To compare \textit{Notos}' performance with static filter lists, a pre-trained instance has been fed with 250,000 unique domains collected on 1. August 2009. 10,294 distinct entries have been reported with a reputation score below 0.5. 7,984 of this 10,294 or 77.6\% could be found in at least one blacklist (see Section~\nameref{subsec:notos_architecture} for a list of included blacklists). The remaining 22.4\% could not be precisely revealed. It is worth stating that 7,980 of the 7,984 confirmed bad domain names were assigned a reputation score of less than or equal to 0.15.
To get optimal results with the \textit{Reputation Function}, several classifiers have been tested and selected for the given circumstances (time complexity, detection results and precision [true positives over all positives]). A decision tree with Logit-Boost strategy (see \fsCite{Friedman98additivelogistic} for implementation details) has shown to provide the best results with a low false positive rate (FP) of 0.38\% and a high true positive rate (TP) of 96.8\%. These results have been verified using a 10-fold cross validation with a reputation score threshold of 0.5. This 10-fold cross validation method splits the dataset in ten partitions/folds (each partition optimally containing roughly the same class label distribution). One fold is then used as the validation sample (testing set) and the remaining nine partitions are used as the training set. The training set is used to train the model which is then cross validated with the testing set. This step is repeated for ten times using the same partitions, each partition being the testing set once. For the validation in \textit{Notos}, a dataset of 20,249 domains with 9,530 known bad RR has been used. As the list of known good domains, the Alexa top 500 websites have been used. Taking a bigger amount of Alexa popular sites has shown to decrease accuracy of the overall system, which could be interpreted that smaller/less popular sites are more likely to get compromised. To compare \textit{Notos}' performance with static filter lists, a pre-trained instance has been fed with 250,000 unique domains collected on 1. August 2009. 10,294 distinct entries have been reported with a reputation score below 0.5. 7,984 of this 10,294 or 77.6\% could be found in at least one blacklist (see Section~\nameref{subsec:notos_architecture} for a list of included blacklists). The remaining 22.4\% could not be precisely revealed. It is worth stating that 7,980 of the 7,984 confirmed bad domain names were assigned a reputation score of less than or equal to 0.15.
\subsection{Limitations}

View File

@@ -1,29 +1,29 @@
\chapter{Introduction}
\label{cha:Introduction}
The domain name system (\gls{dns}) has been one of the corner stones of the internet for a long time. It acts as a hierarchical, bidirectional translation device between mnemonic domain names and network addresses. It also provides service lookup or enrichment capabilities for a range of application protocols like HTTP, SMTP, and SSH (e.g. verifying SSH host keys using DNS). In the context of defensive IT security, investigating aspects of the \gls{dns} can facilitate protection efforts tremendously. Estimating the reputation of domains can help in identifying hostile activities. Such a score can, for example, consider features like quickly changing network blocks for a given domain or clustering of already known malicious domains and newly observed ones.
The Domain Name System (DNS) has been one of the corner stones of the internet for a long time. It acts as an hierarchical, bidirectional translation device between mnemonic domain names and network addresses. It also provides service lookup or enrichment capabilities for a range of application protocols like HTTP, SMTP, and SSH (e.g. verifying SSH host keys using DNS). In the context of defensive IT security, investigating aspects of the DNS can facilitate protection efforts tremendously. Estimating the reputation of domains can help in identifying hostile activities. Such a score can, for example, consider features like quickly changing network blocks for a given domain or clustering of already known malicious domains and newly observed ones.
\section{Motivation}
\label{sec:motivation}
Malware like botnets, phishing sites and spam heavily rely on the domain name system to either hide behind proxies or communicate with command and control servers. Malware authors are getting more and more creative in bypassing traditional countermeasures. Using techniques like domain generation algorithms and fast-flux service networks make it hard to eliminate the roots of, for example botnets. The ZeuS botnet family exists since 2007 and further propagation could not be stopped until today (\fsCite{WhyDGOWinsOnline}). This leads to a situation where static filter list can not keep pace with evolving malware authors. To eliminate malware in the long run, malware has to be stopped before it can be widely spread across the internet. There are three major systems that have been proposed as dynamic domain reputation systems using passive DNS data in the past. With passive DNS databases getting more common, setting up a domain reputation system using pDNS data promises a lightweight monitoring system.
Malware like botnets, phishing sites and spam heavily rely on the Domain Name System to either hide behind proxies or communicate with command and control servers. Malware authors are getting more and more creative in bypassing traditional countermeasures. Using techniques like domain generation algorithms and fast-flux service networks makes it hard to eliminate the roots of botnets. The ZeuS botnet family exists since 2007 and further propagation could not be stopped until today (\fsCite{WhyDGOWinsOnline}). This leads to a situation where static filter list can not keep pace with evolving malware authors. To eliminate malware in the long run, malware has to be stopped before it can be widely spread across the internet. There are three major systems that have been proposed as dynamic domain reputation systems using passive DNS data in the past. With passive DNS databases getting more common, setting up a domain reputation system using pDNS data promises a lightweight monitoring system.
\section{Challenges}
\label{sec:challenges}
All of the investigated approaches are using \gls{pdns} logs to generate a reputation score for a specific domain. These logs are monitored on central \gls{dns} resolvers and capture lookup results of arbitrary scale users (see Section~\ref{subsec:passive_dns}), so one challenge of this work is handling huge volumes of data. With about seven Gigabytes of uncompressed \gls{pdns} logs for a single day, various general issues might occur: General purpose computers nowadays usually have up to 16 Gigabytes of RAM (rarely 32 GB) which concludes that multiple tasks (i.e. building a training set) may not be performed purely in-memory. The time of analysis might also become a bottleneck. Simply loading one single day (see benchmark example~\ref{lst:load_and_iterate_one_day_of_compressed_pdns_logs}) of (compressed) logs from disk and iterating it without actual calculations takes roughly 148 seconds. To evaluate existing algorithms certain requirements have to be met. Passive DNS logs usually contain sensitive data which is one reason why most papers do not publish test data. For a precise evaluation the raw input data is needed. Some previously developed classifications have not completely disclosed the involved algorithms so these have to be reconstructed as closely as possible taking all available information into account.
All of the investigated approaches are using passive DNS (pDNS) logs to generate a reputation score for a specific domain. These logs are monitored on central DNS resolvers and capture lookup results of arbitrary scale users (see Section~\ref{subsec:passive_dns}), so one challenge of this work is handling huge volumes of data. With about seven Gigabytes of uncompressed pDNS logs for a single day, various general issues might occur: General purpose computers nowadays usually have up to 32 Gigabytes of RAM which concludes that multiple tasks (i.e. building a training set) may not be performed purely in-memory. The time of analysis might also become a bottleneck (see Section~\ref{sec:system_architecture}). To evaluate existing algorithms certain requirements have to be met. Passive DNS logs usually contain sensitive data which is one reason why most papers do not publish test data. For a precise evaluation the raw input data is needed. Some previously developed classifications have not completely disclosed the involved algorithms so these have to be reconstructed as closely as possible taking all available information into account.
\section{Goals}
\label{sec:goals}
The task of this work is to evaluate for existing scoring mechanisms of domains in the special context of IT security, and also research the potential for combining different measurement approaches. It ultimately shall come up with an improved algorithm by combining existing algorithms for determining the probability of a domain being related to hostile activities.
The task of this work is to evaluate existing scoring mechanisms of domains in the special context of IT security, and also research the potential for combining different measurement approaches. It ultimately shall come up with an improved algorithm by combining existing algorithms for determining the probability of a domain being related to hostile activities.
\section{Related Work}
\label{sec:related_work}
In the context of IT-Security, there do exists several approaches for assigning a reputation score to a domain. Before 2010 the general idea of protecting a network against malicious requests targeting other networks was to establish static filter lists. This included both explicitly allowing requests as well as explicitly blocking request to certain IP addresses or domain names. For example \fsAuthor{Jung:2004:ESS:1028788.1028838} introduced an approach to block request to certain domains using a DNS black list. As shown by \fsCite{ramachandran2006can} in 2006, this approach is not always suitable to keep up with the speed of malware authors. A different type of system has been established in 2010 when two algorithms have been introduced, \textit{Notos} followed by \textit{Exposure}, that used machine learning to dynamically assign a reputation score to a domain by using the characteristics of how benign and malicious domains are usually configured and used in terms of e.g. DNS resource usage or the global distribution of the machines that are used for malicious purposes.
In the context of IT-Security, several approaches for assigning a reputation score to a domain do exist. Before 2010, the general idea of protecting a network against malicious requests targeting other networks was to establish static filter lists. This included both explicitly allowing requests as well as explicitly blocking request to certain IP addresses or domain names. For example \fsAuthor{Jung:2004:ESS:1028788.1028838} introduced an approach to block request to certain domains using a DNS black list. As shown by \fsCite{ramachandran2006can}, this approach is not always suitable to keep up with the speed of malware authors. A different type of system has been established in 2010 when two algorithms have been introduced, \textit{Notos} followed by \textit{Exposure}, that used machine learning to dynamically assign a reputation score to a domain by using the characteristics of how benign and malicious domains are usually configured and used in terms of e.g. DNS resource usage or the global distribution of the machines that are used for malicious purposes.

View File

@@ -1,17 +1,17 @@
\section{Domain Name System}
\label{sec:DNS}
The \gls{dns} is one of the cornerstones of the internet as it is known today. Nearly every device, connected to the internet is using DNS. Initial designs have been proposed in 1983 and evolved over the following four years into the first globally adapted standard RFC 1034 \fsCite{rfc1034} (see also RFC 1035 for implementation and specification details \fsCite{rfc1035}). The main idea of the \gls{dns} is translating human readable domain names to network addresses. There are many extensions to the initial design including many security related features and enhancements or the support for \gls{ipv6} in 1995.
The Domain Name System is one of the cornerstones of the internet as it is known today. Nearly every device connected to the internet is using DNS. Initial designs have been proposed in 1983 and evolved over the following four years into the first globally adapted standard RFC 1034 \fsCite{rfc1034} (see also RFC 1035 for implementation and specification details \fsCite{rfc1035}). The main idea of the DNS is translating human readable domain names to network addresses. There are many extensions to the initial design including many security related features and enhancements or the support for IPv6 in 1995.
In order to understand how the \gls{dns} is misused for malicious activities and how to prevent these attacks, it is necessary to explain some basic mechanisms.
In order to understand how the DNS is misused for malicious activities and how to prevent these attacks, it is necessary to explain some basic mechanisms.
\subsection{Basics}
\label{subsec:basics}
In the early days of the internet the mapping between host names and IP addresses has been accomplished using a single file, \texttt{HOSTS.TXT}. This file was maintained on a central instance, the \gls{sri-nic}, and distributed to all hosts in the internet via \gls{ftp}. As this file grew and more machines got connected to the internet, the costs for distributing the mappings were increasing up to an unacceptable effort. Additionally, the initial trend of the internet, the \gls{arpanet} connecting multiple hosts together into one network, got outdated. The new challenge of the internet was to connect multiple local networks (which itself contain many machines) into a global, interactive and \gls{tcp/ip} based grid. With the amount of machines quickly increasing and the costs for distributing the \texttt{HOSTS.TXT} file rising, a new system for a reliable and fast resolution of addresses to host names had to be developed.
In the early days of the internet the mapping between host names and IP addresses has been accomplished using a single file, \texttt{HOSTS.TXT}. This file was maintained on a central instance, the SRI-NIC (Stanford Research Institute - Network Information Center), and distributed to all hosts in the internet via FTP (File Transfer Protocol). As this file grew and more machines got connected to the internet, the costs for distributing the mappings were increasing up to an unacceptable effort. Additionally, the initial trend of the internet, the Advanced Research Projects Agency Network (ARPANET) connecting multiple hosts together into one network, got outdated. The new challenge of the internet was to connect multiple local networks (which itself contain many machines) into a global, interactive and TCP/IP based grid. With the amount of machines quickly increasing and the costs for distributing the \texttt{HOSTS.TXT} file rising, a new system for a reliable and fast resolution of addresses to host names had to be developed.
\citeauthor{mockapetris1988development} proposed five conditions that had to be met by the base design of \gls{dns} \fsCite[p. 124]{mockapetris1988development}:
\citeauthor{mockapetris1988development} proposed five conditions that had to be met by the base design of DNS \fsCite[p. 124]{mockapetris1988development}:
\begin{itemize}
\item Provide at least the same information as HOSTS.TXT.
@@ -21,21 +21,21 @@ In the early days of the internet the mapping between host names and IP addresse
\item Provide tolerable performance.
\end{itemize}
For the \gls{dns} to be globally acceptable, it should furthermore not give too many restrictions on how the distributed local networks and the hosts are designed and operated. This includes i.e. not limiting the system to work for a single \gls{os} or software architecture, backing different network topologies or the support of encapsulation of other name spaces.
For the DNS to be globally acceptable, it should furthermore not give too many restrictions on how the distributed local networks and the hosts are designed and operated. This includes i.e. not limiting the system to work for a single operating system (OS) or software architecture, backing different network topologies or the support of encapsulation of other name spaces.
In general, avoid as many constraints and support as many implementation structures as possible.
\subsubsection{Architecture}
\label{subsubsec:architecture}
The \gls{dns} primarily builds on two types of components: name servers and resolvers. A name server holds information that can be used to handle incoming requests e.g. to resolve a domain name into an IP address. Although resolving domain names into IP addresses might be the primary use case, name servers can possess arbitrary (within the limits of DNS records see \ref{tab:resource_record_types}) information and provide service to retrieve this information. A resolver interacts with client software and implements algorithms to find a name server that holds the information requested by the client (see also Section~\ref{subsec:resolution} for how the resolution is working). Depending on the functionality needed, these two components may be split to different machines and locations or running on one machine. Whereas in former days the bandwidth of a workstation may not have been sufficient to run a resolver on, today it is more interesting to benefit from cached information for performance reasons. In a company network it is common to have multiple resolvers e.g. one per organizational unit.
The DNS primarily builds on two types of components: name servers and resolvers. A name server holds information that can be used to handle incoming requests e.g. to resolve a domain name into an IP address. Although resolving domain names into IP addresses might be the primary use case, name servers can possess arbitrary (within the limits of DNS records see \ref{tab:resource_record_types}) information and provide service to retrieve this information. A resolver interacts with client software and implements algorithms to find a name server that holds the information requested by the client (see also Section~\ref{subsec:resolution} for how resolution is working). Depending on the functionality needed, these two components may be split into different machines and locations or running on one machine. Whereas in former days the bandwidth of a workstation may not have been sufficient to run a resolver on, today it is more interesting to benefit from cached information for performance reasons. In a company network it is common to have multiple resolvers e.g. one per organizational unit.
\subsubsection{Name space}
\label{subsubsec:name_space}
The \gls{dns} is based on a naming system that consists of a hierarchical and logical tree structure and is called the domain namespace. It contains a single root node (\textit{top level domain} or \textit{TLD})and an arbitrary amount of nodes in subordinate levels in variable depths (descending called second level, third level domain, and so forth). Each node is uniquely identifiable through a \gls{fqdn} and usually represents a domain, machine or service in the network. The FQDN can be constructed by fully iterating the DNS tree, see Figure~\ref{fig:dns_tree_web_de} for an example of how the DNS tree for www.web.de is looking like (note that the Root node is often abbreviated with a simple dot). Furthermore, every domain can be subdivided into more fine-grained domains. These can again be specific machines or domains, called subdomains. This subdividing is an important concept for the internet to continue to grow and each responsible instance of a domain (e.g. a company or cooperative) is responsible for the maintenance and subdivision of the domain.
The DNS is based on a naming system that consists of a hierarchical and logical tree structure and is called the domain namespace. It contains a single root node (\textit{top level domain} or \textit{TLD}) and an arbitrary amount of nodes in subordinate levels in variable depths (descending called second level, third level domain, and so forth). Each node is uniquely identifiable through a fully qualified domain name and usually represents a domain, machine or service in the network. The FQDN can be constructed by fully iterating the DNS tree, starting from the Root node. See Figure~\ref{fig:dns_tree_web_de} for an example of how the DNS tree for www.web.de is looking like (note that the Root node is often abbreviated with a simple dot). Furthermore, every domain can be subdivided into more fine-grained domains. These can again be specific machines or domains, called subdomains. This subdividing is an important concept for the internet to continue to grow and each responsible instance of a domain (e.g. a company or cooperative) is responsible for the maintenance and subdivision of the domain.
\begin{figure}[!htbp]
\centering
@@ -45,10 +45,10 @@ The \gls{dns} is based on a naming system that consists of a hierarchical and lo
\end{figure}
\subsubsection{\gls{dns} Resource Records}
\subsubsection{DNS Resource Records}
\label{subsubsec:dns_resource_records}
See Table~\ref{tab:resource_record_types} for an list of built-in resource types in the DNS. Those built-in resource records do serve different purposes and are more or less frequently used.
See Table~\ref{tab:resource_record_types} for an list of built-in resource types in the DNS.
\begin{table}[!htbp]
@@ -60,9 +60,9 @@ See Table~\ref{tab:resource_record_types} for an list of built-in resource types
Value & Text Code & Type & Description \\ \midrule
1 & A & Address & \begin{tabular}[c]{@{}l@{}}Returns the 32 bit IPv4 address of a host. \\ Most commonly used for name resolution \\ of a host.\end{tabular} \\
28 & AAAA & IPv6 address & \begin{tabular}[c]{@{}l@{}}Similar to the A record, this returns the \\ address of an host. For IPv6 this has 128 bit.\end{tabular} \\
2 & NS & \begin{tabular}[c]{@{}l@{}}Name\\ Server\end{tabular} & \begin{tabular}[c]{@{}l@{}}Specifies the name of a \gls{dns} name server \\ that is authoritative for the zone. Each \\ zone must have at least one NS record \\ that points to its primary name server.\end{tabular} \\
5 & CNAME & \begin{tabular}[c]{@{}l@{}}Canonical\\ Name\end{tabular} & \begin{tabular}[c]{@{}l@{}}The CNAME records allows to define \\ aliases that point to the real canonical \\ name of the node. This can e.g. be used\\ to hide internal \gls{dns} structures and \\ provide a stable interface for outside users.\end{tabular} \\
6 & SOA & \begin{tabular}[c]{@{}l@{}}Start of\\ Authority\end{tabular} & \begin{tabular}[c]{@{}l@{}}The SOA record marks the start of a \gls{dns} \\ zone and provides important information \\ about the zone. Every zone must have \\ exactly one SOA records containing \\ e.g. name of the zone, primary \\ authoritative server name and the \\ administration email address.\end{tabular} \\
2 & NS & \begin{tabular}[c]{@{}l@{}}Name\\ Server\end{tabular} & \begin{tabular}[c]{@{}l@{}}Specifies the name of a DNS name server \\ that is authoritative for the zone. Each \\ zone must have at least one NS record \\ that points to its primary name server.\end{tabular} \\
5 & CNAME & \begin{tabular}[c]{@{}l@{}}Canonical\\ Name\end{tabular} & \begin{tabular}[c]{@{}l@{}}The CNAME records allows to define \\ aliases that point to the real canonical \\ name of the node. This can e.g. be used\\ to hide internal DNS structures and \\ provide a stable interface for outside users.\end{tabular} \\
6 & SOA & \begin{tabular}[c]{@{}l@{}}Start of\\ Authority\end{tabular} & \begin{tabular}[c]{@{}l@{}}The SOA record marks the start of a DNS \\ zone and provides important information \\ about the zone. Every zone must have \\ exactly one SOA record containing \\ e.g. name of the zone, primary \\ authoritative server name and the \\ administration email address.\end{tabular} \\
12 & PTR & Pointer & \begin{tabular}[c]{@{}l@{}}Provides a pointer to a different record\\ in the name space.\end{tabular} \\
15 & MX & Mail Exchange & \begin{tabular}[c]{@{}l@{}}Returns the host that is responsible for\\ handling emails sent to this domain.\end{tabular} \\
16 & TXT & Text String & \begin{tabular}[c]{@{}l@{}}Record which allows arbitrary \\ additional texts to be stored that are\\ related to the domain.\end{tabular} \\ \bottomrule
@@ -70,10 +70,11 @@ Value & Text Code & Type
\end{table}
\newpage
\subsubsection{Payload}
\label{subsubsec:payload}
In this section we will introduce the actual payload a \gls{dns} request as well as the response are built on. The format of each message that is shared between a resolver and \gls{dns} server has been initially defined in RFC 1035 \fsCite{rfc1035} and consecutively extended with new opcodes, response codes etc. This general format applies to both requests as well as responses and consists of five sections:
In this section we will introduce the actual payload a DNS request as well as the response are built on. The format of each message that is shared between a resolver and DNS server has been initially defined in RFC 1035 \fsCite{rfc1035} and consecutively extended with new opcodes (opcodes are references to different actions in DNS, e.g. to query or update a record \ref{tab:message_header_opcodes}), response codes etc. This general format applies to both requests as well as responses and consists of five sections:
\begin{enumerate}
\item Message Header
@@ -103,14 +104,14 @@ QR & \multicolumn{4}{c}{OPCODE} & AA & TC & RD & RA & Z & AD & CD & \multicolumn
\end{tabular}
\end{table}
Table~\ref{tab:message_header} shows the template of a \gls{dns} message header. In the following listing, an explanation for the respective variables and flags is given:
Table~\ref{tab:message_header} shows the template of a DNS message header. In the following listing, an explanation for the respective variables and flags is given:
\begin{itemize}
\item \textbf{Message ID:} 16 bit identifier supplied by the requester (any kind of software that generates a request) and sent back unchanged by the responder to identify the transaction and enables the requester to match up replies to outstanding request.
\item \textbf{QR:} Query/Response Flag one bit field whether this message is a query(0) or a response(1)
\item \textbf{QR:} Query/Response Flag one bit field whether this message is a query(0) or a response(1).
\item \textbf{OPCODE:} Four bit field that specifies the kind of query for this message. This is set by the requester and copied into the response. Possible values for the opcode field can be found in Table~\ref{tab:message_header_opcodes}
\item \textbf{OPCODE:} Four bit field that specifies the kind of query for this message. This is set by the requester and copied into the response. Possible values for the opcode field can be found in Table~\ref{tab:message_header_opcodes}.
\begin{table}[!htbp]
\centering
\caption{DNS: Message Header Opcodes}
@@ -130,7 +131,7 @@ Table~\ref{tab:message_header} shows the template of a \gls{dns} message header.
\item \textbf{AA:} Authoritative Answer this flag is set to 1 by the responding server if it is an authority for the domain name in the question section. If set to 0 this usually means that a cached record is returned.
\item \textbf{TC:} The Truncated bit is set to 1 if the response is larger then the permitted transmission channel length and the message has been truncated therefore. This usually indicates that \gls{dns} over \gls{udp} is used and the response payload size increases the maximum 512 bytes. The client may either requery over \gls{tcp} (with no size limits) or not bother at all if the truncated data was part of the Additional section. Set on all truncated messages except for the last one.
\item \textbf{TC:} The Truncated bit is set to 1 if the response is larger then the permitted transmission channel length and the message has been truncated therefore. This usually indicates that DNS over UDP (User Datagram Protocol) is used and the response payload size increases the maximum 512 bytes. The client may either requery over TCP (Transmission Control Protocol) (with no size limits) or not bother at all if the truncated data was part of the Additional section. Set on all truncated messages except for the last one.
\item \textbf{RD:} Recursion Desired this bit may be set in a query and is copied into the response if the name server supports recursion. If recursion is refused by this name server, e.g. it has been configured as authoritative only, the response does not have this bit set. Recursive query support is optional.
@@ -142,7 +143,7 @@ Table~\ref{tab:message_header} shows the template of a \gls{dns} message header.
\item \textbf{CD:} Checking Disabled also used by \gls{dnssec} and may be set in a requests to show that non-verified data is acceptable to the requester. If \gls{dnssec} is not available in the resolver, this is always set to 0.
\item \textbf{RCODE:} Response Code only available in response messages, these four bits are used to reveal errors while processing the query. Available error codes are listed in Table~\ref{tab:message_header_response_codes}. Error codes 0 to 5 have been initially available whereas error codes 6 to 10 are used for dynamic \gls{dns} defined in RFC 2136 \fsCite{rfc2136}.
\item \textbf{RCODE:} Response Code only available in response messages, these four bits are used to reveal errors while processing the query. Available error codes are listed in Table~\ref{tab:message_header_response_codes}. Error codes 0 to 5 have been initially available whereas error codes 6 to 10 are used for dynamic DNS defined in RFC 2136 \fsCite{rfc2136}.
\begin{table}[!htbp]
\centering
@@ -173,7 +174,7 @@ Table~\ref{tab:message_header} shows the template of a \gls{dns} message header.
\begin{table}[!htbp]
\centering
\caption{DNS: Question Section}
\label{tab_question_section}
\label{tab:question_section}
\begin{tabular}{@{}ccccccccc@{}}
\toprule
0 & 4 & 8 & 12 & 16 & 20 & 24 & 28 & 32 \\ \midrule
@@ -183,29 +184,30 @@ Table~\ref{tab:message_header} shows the template of a \gls{dns} message header.
\end{table}
See Table~\ref{tab:question_section} for the query layout.
\begin{itemize}
\item \textbf{Question Name:} Contains a variably sized payload including the domain, zone name or general object that is subject of the query. Encoded using standard \gls{dns} name notation. Depending on the Question Type, for example requesting an A Record will require an host part, such as www.domain.tld. A MX query will usually only contain a base domain name (domain.tld).
\item \textbf{Question Name:} Contains a variably sized payload including the domain, zone name or general object that is subject of the query. Encoded using standard DNS name notation. Depending on the Question Type, for example requesting an A Record will require an host part, such as www.domain.tld. A MX query will usually only contain a base domain name (domain.tld).
\item \textbf{Question Type:} Specifies the type of question being asked. This field may contain a code number corresponding to a particular type of resource being requested, see Table~\ref{tab:resource_record_types} for common resource types.
\item \textbf{Question Class:} The class of the resource records that are being requested (unsigned 16 bit value). Usually Internet, question classes are assigned by the IANA where all can be found (\fsCite{IANADNSClassesOnline})
\item \textbf{Question Class:} The class of the resource records that are being requested (unsigned 16 bit value). Usually Internet, question classes are assigned by the IANA where all can be found (\fsCite{IANADNSClassesOnline}).
\end{itemize}
There are more parameters available that can be specified when requesting a resource but do not have a higher relevance here.
There are more parameters available that can be specified when requesting a resource but do not have a higher relevance in this work.
\subsection{Domain Names}
\label{subsec:domain_names}
The structure of domain names is generally managed by the corresponding registrar, e.g. the DENIC e.G. (\fsCite{DENICOnline}) for .de domains. This includes for example which characters are allowed in second-level domains and the overall registration process. In the .de space, the second-level domain must contain between one and 63 characters, all characters of the latin alphabet can be used in addition to numbers, hyphen and all 93 characters of the internationalized domain name. The first, third, fourth and last characters is additionally not allowed to be a hyphen. Many different registrars use similar rules like this example which makes it hard to easily distinguish valid from non-valid domain names.
The structure of domain names is generally managed by the corresponding registrar, e.g. the DENIC e.G. (\fsCite{DENICOnline}) for .de domains. For example, this includes which characters are allowed in second-level domains and the overall registration process for domains. In the .de space, the second-level domain must contain between one and 63 characters while all characters of the latin alphabet can be used in addition to numbers, hyphen and all characters of the internationalized domain name specification (\fsCite{IDNOnline}). The first, third, fourth and last characters is additionally not allowed to be a hyphen. Many different registrars use similar rules like this example which makes it hard to generally distinguish valid from non-valid domain names.
\subsection{Resolution}
\label{subsec:resolution}
Figure~\ref{fig:address_resolution} quickly describes the process of how domain names are resolved from the perspective of a requesting machine. Each step here assumes that the request has not been performed before and such is not available in any cache. In the first step, the \textit{Operating System} is contacting the local resolver, e.g. a router in a private network or a dedicated resolve server in a larger company. As the \textit{DNS Resolver} does know nothing about the domain, it contacts the \textit{Root NS} to return the address of the responsible top-level domain server (\textit{TLD NS} for .com in this example). The resolver then asks the \textit{TLD NS} server to return back the address of the second-level domain server that is in charge of the requested zone (e.g. google.com). Finally the resolver queries the \textit{Google NS} server for the IP address of the \textit{Google Webserver} and sends it back to the \textit{Operating System} which can then establish a connection to the \textit{Google Webserver}.
Figure~\ref{fig:address_resolution} quickly describes the process of how domain names are resolved from the perspective of a requesting machine. Each step here assumes that the request has not been performed before and such is not available in any cache. In the first step, the \textit{Operating System} is contacting the local resolver, e.g. a router in a private network or a dedicated resolve server in a larger company, to resolve the domain name (www.google.com in this example). As the \textit{DNS Resolver} does know nothing about the domain, it contacts the \textit{Root NS} to return the address of the responsible top-level domain server (\textit{TLD NS} for .com in this case). The resolver then asks the \textit{TLD NS} server to return back the address of the second-level domain server that is in charge of the requested zone, e.g. google.com. Finally the resolver queries the \textit{Google NS} server for the IP address of the \textit{Google Webserver} (or www.google.com) and sends it back to the \textit{Operating System} which can then establish a connection to the google web page.
There are mainly two different types of DNS requests that are performed here. The \textit{Operating System} is sending a recursive request to the \textit{DNS Resolver} which itself is successively sending iterative requests to the higher level DNS servers. Usually most public servers do not allow recursive queries due to security risks (denial of service attacks).
There are mainly two different types of DNS requests which are both performed here. The \textit{Operating System} is sending a recursive request to the \textit{DNS Resolver} which itself is successively sending iterative requests to the higher level DNS servers. Usually most public servers do not allow recursive queries due to security risks (denial-of-service attack).
\begin{figure}[!htbp]
@@ -218,4 +220,4 @@ There are mainly two different types of DNS requests that are performed here. Th
\subsection{Passive DNS}
\label{subsec:passive_dns}
A Passive DNS database is a database that contains a history of all resolved DNS queries in a network. The traffic can be observed at any appropriate location in a network, e.g. on a resolver. The main advantage of passively collecting DNS traffic is that there are no operational changes needed to collect logs of resolutions, one simple way is to mirror the DNS port on the resolver and persist the traffic into files). A Passive DNS database can be used in a variety of actions to harden a network from different threats. Projects like the Security Information Exchange (SIE) collect passive DNS data from multiple sources and analyse the databases to find e.g. inconsistencies in the resolutions (\fsCite{SIEOnline}). Passive DNS databases can also be used by researchers or service providers to find performance issues, identify anomalies or generate usage statistics \fsCite{Deri:2012:TPD:2245276.2245396}.
A Passive DNS database is a database that contains a history of all resolved DNS queries in a network. The traffic can be observed at any appropriate location in a network, e.g. on a resolver. The main advantage of passively collecting DNS traffic is that there are no operational changes needed to collect logs of resolutions. One simple way is to mirror the DNS port on the resolver and persist the traffic into files. A Passive DNS database can be used in a variety of actions to harden a network from different threats. Projects like the Security Information Exchange (SIE) collect passive DNS data from multiple sources and analyse the databases to find e.g. inconsistencies in the resolutions (\fsCite{SIEOnline}). Passive DNS databases can also be used by researchers or service providers to find performance issues, identify resolution anomalies or generate usage statistics \fsCite{Deri:2012:TPD:2245276.2245396}.

View File

@@ -1,8 +1,6 @@
\section{Detecting Malicious Domain Names}
\label{sec:detecting_malicious_domain_names}
\todo{literature exposure section 6.1}
\subsection{Domain Name Characteristics}
\label{subsec:domain_name_characteristics}

View File

@@ -6,10 +6,6 @@
\section{Machine Learning}
\label{sec:machine_learning}
Machine learning is broad field in computer science that aims to give computers the ability to learn without being explicitly programmed for a special purpose. There are many different approaches available that have advantages and disadvantages in different areas. Machine learning in this work is mostly limited to decision tree learning. Decision tree learning is an approach that is generally adopted from how humans are making decisions. Given a set of attributes, humans are able to decide, e.g. whether to buy one or another product. Machine learning algorithms use a technique called training to build a model which can later be used to make decisions. A decision tree consists of three components: a node represents the test of a certain attribute to split up the tree, leafs are terminal nodes and represent the prediction (the class or label) of the path from the root node to the leaf, and edges correspond to the results of a test and establish a connection to the next node or leaf. This training is performed in multiple steps: Given an arbitrarily large dataset (training set) with an fixed size of features (attributes) and each sample in the training set is assigned a label. The amount of labels is arbitrary (but limited), in a binary classification there are two different labels (e.g. malicious or benign in cases for domains). In the first step of the training, the whole training set is iterated and each time, a set of samples can be separated using one single attribute (in perspective to the assigned label) it is branched out and a new leaf is created. Each branch is then split into more fine grained subtrees as long as there is an \textit{information gain}, which means that all samples of the subset belong to the same class, i.e. are assigned the same label. The model can later be queried with an unlabeled data sample and the model returns the probability with which the data sample can be assigned to a class/label.
Machine learning is a broad field in computer science that aims to give computers the ability to learn without being explicitly programmed for a special purpose. There are many different approaches available that have advantages and disadvantages in different areas like object recognition in images, self driving cars or forecastings. Machine learning in this work is mostly limited to decision tree learning. Decision tree learning is an approach that is generally adopted from how humans are making decisions. Given a set of attributes, humans are able to decide, e.g. whether to buy one or another product. Machine learning algorithms use a technique called training to build a model which can later be used to make decisions and e.g. classify a dataset. A decision tree consists of three components: a node represents the test of a certain attribute to split up the tree, leafs are terminal nodes and represent a prediction (class/label) using all attributes in the trace from the root node to the leaf, and edges correspond to the results of a test and establish a connection to the next node or leaf. The training is performed in multiple steps. Input for the training is an arbitrarily large dataset (training set) with an fixed size of features (attributes) and for each sample in the training set, the corresponding label has to be known. The amount of labels or classes is arbitrary (but limited), in a binary classification there are two different labels (e.g. malicious or benign in the case of this work). In the first step of the training, the whole training set is iterated and each time a set of samples can be separated using one single attribute (in perspective to the assigned label) it is branched out and a new leaf is created. Each branch is then split into more fine grained subtrees as long as there is an \textit{information gain}, which means that not all samples of the subset belong to the same class, i.e. are assigned the same label. The model can later be queried with an unlabeled data sample and the model returns the probability with which the data sample can be assigned to a class.
This way, having a labeled training set with limited size and by learning the characteristics of the labeled test sample, unlabeled data can be classified.
%\input{content/Technical_Background/Detecting_Malicious_Domain_Names/Detecting_Malicious_Domain_Names}
\input{content/Technical_Background/Benchmarks/Benchmarks}
This way, having a labeled training set with limited size and by learning the characteristics of the labeled test sample, unlabeled data can be classified. The most popular decision tree implementation is \textit{C4.5} \fsCite{Salzberg1994}. Many current implementations like \textit{CART} (Classification and Regression Trees \fsCite{SciKitOnline}) or \textit{J48} are based off of \textit{C4.5}.

View File

@@ -1,5 +1,4 @@
\section*{Abstract}
\label{sec:Abstract}
\todo{write abstract}
In recent years, botnet authors have discovered another approach to financially benefit from their malicious networks. Ransomware like CryptoLocker and WannaCry infected hundreds of thousands of machines within days and encrypted data from both companies and individuals. To build those network structures malware authors are leveraging the Domain Name System (DNS). Previous work such as \textit{Notos} \fsCite{Antonakakis:2010:BDR:1929820.1929844}, \textit{Exposure} \fsCite{Bilge11exposure:finding} and \fsCite{Antonakakis:2011:DMD:2028067.2028094} has shown that characteristics of how DNS resources are allocated can distinguish legitimate from malicious usage. This work evaluates different approaches that use machine learning and passive DNS data to detect domains used for malicious activites in an early stage before their maliciousness becomes widely known and traditional approaches can stop further propagation of the malware. By combining the advantages of different approaches, a proof-of-concept implementation of a dynamic domain reputation scoring algorithm has been developed. This work proposes an implementation that is using a passive DNS database and that has been optimised to efficiently handle large amounts of traffic.

View File

@@ -2,7 +2,7 @@
{
name={API},
description={An Application Programming Interface (API) is a particular set
of rules and specifications that a software program can follow to access and make use of the services and resources provided by another particular software program that implements that API.}
of rules and specifications that a software program can follow to access and make use of the services and resources provided by another particular software program that implements that API}
}
\newglossaryentry{rir}
@@ -20,56 +20,35 @@
\newglossaryentry{ddos}
{
name={Distributed Denial-of-Service},
description={Distributed Denial-of-Service is an attack where multiple machines are used to generate as much workload as needed to cause downtimes of a service or machine and make benign usage impossible.}
description={Distributed Denial-of-Service is an attack where multiple machines are used to generate as much workload as needed to cause downtimes of a service or machine and make benign usage impossible}
}
\newglossaryentry{as}
{
name={AS},
description={An Autonomous System references a set of one or more networks in the Internet that allows to consistently route between those networks (i.e. an Internet Service Provider) and that exports a single interface for other AS. Each Autonomous System is assigned a officially registered unique Autonomous System Number (ASN).}
description={An Autonomous System references a set of one or more networks in the Internet that allows to consistently route between those networks (i.e. an Internet Service Provider) and that exports a single interface for other AS. Each Autonomous System is assigned a officially registered unique Autonomous System Number (ASN)}
}
\newglossaryentry{bgp}
{
name={BGP},
description={The Border Gateway Protocol, also known as the Exterior-Gateway-Protocol (EGP), is a protocol to route traffic between different Autonomous Systems in the Internet. It is used to share several information for IP blocks to allow routing between different Autonomous Systems.}
description={The Border Gateway Protocol, also known as the Exterior-Gateway-Protocol (EGP), is a protocol to route traffic between different Autonomous Systems in the Internet. It is used to share several information for IP blocks to allow routing between different Autonomous Systems}
}
\newglossaryentry{whois}
{
name={Whois},
description={Whois is a protocol used to gather information about owners of domains in the domain name system and IP addresses specified in RFC 1834.}
description={Whois is a protocol used to gather information about owners of domains in the Domain Name System and IP addresses specified in RFC 1834}
}
\newglossaryentry{roc}
{
name={Receiver Operating Characteristic Curve},
description={The ROC curve is a graphical plot of the true positive rate as well as the false positive rate and highlights the performance of a binary classifier.}
description={The ROC curve is a graphical plot of the true positive rate as well as the false positive rate and highlights the performance of a binary classifier}
}
\newacronym{sri-nic}{SRI-NIC}{Stanford Research Institute - Network Information Center}
\newacronym{dns}{DNS}{Domain Name System}
\newacronym{ipv6}{IPv6}{Internet Protocol Version 6}
\newacronym{arpanet}{ARPANET}{Advanced Research Projects Agency Network}
\newacronym{tcp/ip}{TCP/IP}{Transmission Control Protocol/Internet Protocol}
\newacronym{udp}{UDP}{User Datagram Protocol}
\newacronym{tcp}{TCP}{Transmission Control Protocol}
\newacronym{pdns}{pDNS}{passive DNS}
\newacronym{os}{OS}{Operating System}
\newacronym{ftp}{FTP}{File Transfer Protocol}
\newacronym{fqdn}{FQDN}{Fully Qualified Domain Name}
\newacronym{dnssec}{DNSSEC}{Domain Name System Security Extensions}
\newacronym{edns}{EDNS}{Extension mechanisms for DNS}
\newglossaryentry{dnssec}
{
name={DNSSEC},
description={Domain Name System Security Extensions: Series of extensions to the Domain Name System that enable integrity and authenticity of data that is distributed via DNS}
}

View File

@@ -36,6 +36,8 @@
\makeindex
\makeglossaries
\input{glossar}
% Headlines, margins. ----------------------------------------------------------
\input{pagestyle}
@@ -69,7 +71,6 @@
\tableofcontents
% Glossary ---------------------------------------------------------------------
\input{glossar}
% Correct headline in header
\clearpage\markboth{\glossar}{\glossar}
\printglossaries
@@ -79,7 +80,7 @@
\listoffigures
\listoftables
\lstlistoflistings
%\lstlistoflistings
% arabic paging in main content ------------------------------------------------
\clearpage
@@ -101,12 +102,12 @@
% Appendix ---------------------------------------------------------------------
% Appendix is included like main content in appendix.tex
% ------------------------------------------------------------------------------
\begin{appendix}
\pagenumbering{roman}
% Adjust margin in table listings
\setdefaultleftmargin{1em}{}{}{}{}{}
\input{appendix}
\end{appendix}
%\begin{appendix}
% \pagenumbering{roman}
% % Adjust margin in table listings
% \setdefaultleftmargin{1em}{}{}{}{}{}
% \input{appendix}
%\end{appendix}
% Bibliography -----------------------------------------------------------------
% Bibliography is created using bibliography.bib

View File

@@ -15,6 +15,6 @@
\newcommand{\location}{Passau}
\newcommand{\thesisyear}{2018}
%change to res/img/Logo_UniPassau_small_bw.png for a black and white version
\newcommand{\logo}{res/img/Logo_UniPassau_small.png}
\newcommand{\logo}{res/img/Logo_UniPassau_small_bw.png}
\newcommand{\institute}{Universität Passau}
\newcommand{\fsCopyright}{Copyright?}

View File

@@ -53,19 +53,19 @@
% Optimize your compilation for color based monitors (e.g. web) or print b/w ---
% WEB
\definecolor{colKeys}{rgb}{0,0,1}
\definecolor{colIdentifier}{rgb}{0,0,0}
\definecolor{colComments}{rgb}{1,0,0}
\definecolor{colString}{rgb}{0,0.5,0}
\definecolor{light-gray}{cmyk}{0,0,0,0.6}
% PRINT dont forget to change icon in meta, and pdf config
%\definecolor{colKeys}{cmyk}{0,0,0,1}
%\definecolor{colIdentifier}{cmyk}{0,0,0,1}
%\definecolor{colComments}{cmyk}{0,0,0,1}
%\definecolor{colString}{cmyk}{0,0,0,1}
%\definecolor{colKeys}{rgb}{0,0,1}
%\definecolor{colIdentifier}{rgb}{0,0,0}
%\definecolor{colComments}{rgb}{1,0,0}
%\definecolor{colString}{rgb}{0,0.5,0}
%\definecolor{light-gray}{cmyk}{0,0,0,0.6}
% PRINT dont forget to change icon in meta, and pdf options below
\definecolor{colKeys}{cmyk}{0,0,0,1}
\definecolor{colIdentifier}{cmyk}{0,0,0,1}
\definecolor{colComments}{cmyk}{0,0,0,1}
\definecolor{colString}{cmyk}{0,0,0,1}
\definecolor{light-gray}{cmyk}{0,0,0,0.6}
% Link url, break urls etc. ----------------------------------------------------
\usepackage{url}