partially finished exposure; started kopis

This commit is contained in:
2018-01-16 18:30:45 +01:00
parent 0f10314bc8
commit 3586762494
12 changed files with 224 additions and 18 deletions

View File

@@ -127,6 +127,20 @@
keywords = {Domain name system, machine learning, malicious domains},
}
@inproceedings{Antonakakis:2011:DMD:2028067.2028094,
author = {Antonakakis, Manos and Perdisci, Roberto and Lee, Wenke and Vasiloglou,II, Nikolaos and Dagon, David},
title = {Detecting Malware Domains at the Upper DNS Hierarchy},
booktitle = {Proceedings of the 20th USENIX Conference on Security},
series = {SEC'11},
year = {2011},
location = {San Francisco, CA},
pages = {27--27},
numpages = {1},
url = {http://dl.acm.org/citation.cfm?id=2028067.2028094},
acmid = {2028094},
publisher = {USENIX Association},
address = {Berkeley, CA, USA},
}
@misc{theguardiancom_wannacry,
author = {Nadia Khomami and Olivia Solon},
@@ -191,6 +205,88 @@
howpublished={\url{http://www.malwaredomains.com}}
}
@misc{zeusblocklistInformationOnline,
author={Zeus Block List},
title={{ZeuS Tracker :: ZeuS blocklist}},
month=jan,
year={2018},
howpublished={\url{https://zeustracker.abuse.ch/blocklist.php}}
}
@misc{MCAfeeWebAdvisorOnline,
author={McAfee},
title={{McAfee WebAdvisor}},
month=jan,
year={2018},
howpublished={\url{https://home.mcafee.com/root/landingpage.aspx?lpname=get-it-now&affid=0&culture=de-ch}}
}
@misc{GoogleSafeBrowsingOnline,
author={Google},
title={{Google Safe Browsing}},
month=jan,
year={2018},
howpublished={\url{https://safebrowsing.google.com/}}
}
@misc{NortonSafeWebOnline,
author={Symantec},
title={{Norton Safe Web}},
month=jan,
year={2018},
howpublished={\url{https://safeweb.norton.com/}}
}
@misc{DNSWLOnline,
author={dnswl.org},
title={{E-Mail Reputation Protect against false positives}},
month=jan,
year={2018},
howpublished={\url{https://www.dnswl.org/}}
}
@misc{DIHEOnline,
author={ipindex.dihe.de/},
title={{dihe's IP-Index Browser}},
month=jan,
year={2018},
howpublished={\url{http://ipindex.dihe.de/}}
}
@misc{SIEOnline,
author={Farsight Security, Inc},
title={{Security Information Exchange (SIE)}},
month=dec,
year={2017},
howpublished={\url{https://www.farsightsecurity.com/solutions/security-information-exchange/}}
}
@inproceedings{porras2009foray,
title={A Foray into Conficker's Logic and Rendezvous Points.},
author={Porras, Phillip A and Sa{\"\i}di, Hassen and Yegneswaran, Vinod},
booktitle={LEET},
year={2009}
}
@inproceedings{Stone-Gross:2009:YBM:1653662.1653738,
author = {Stone-Gross, Brett and Cova, Marco and Cavallaro, Lorenzo and Gilbert, Bob and Szydlowski, Martin and Kemmerer, Richard and Kruegel, Christopher and Vigna, Giovanni},
title = {Your Botnet is My Botnet: Analysis of a Botnet Takeover},
booktitle = {Proceedings of the 16th ACM Conference on Computer and Communications Security},
series = {CCS '09},
year = {2009},
isbn = {978-1-60558-894-0},
location = {Chicago, Illinois, USA},
pages = {635--647},
numpages = {13},
url = {http://doi.acm.org/10.1145/1653662.1653738},
doi = {10.1145/1653662.1653738},
acmid = {1653738},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {botnet, malware, measurement, security, torpig},
}
@techreport{RFC1033,
author = {M. Lottor},
title = {Domain administrators operations guide},

View File

@@ -24,6 +24,8 @@ fast flux networks, domain flux networks, domain generation algorithm
\subsection{Countermeasures}
\label{subsec:countermeasures}
\todo{see kopis section 2 end, DNS blacklisting etc}
\section{Phishing}

View File

@@ -2,6 +2,7 @@
\label{cha:development_of_doresa}
==> remember, operated in a mostly safe environment (few malware should be in the field)
==> not like exposure: do not initially filter out domains? (alexa top 1000 and older than one year)
\section{Initial Situation and Goals}
\label{sec:initial_situation_and_goals}

View File

@@ -13,14 +13,21 @@ For a comprehensive evaluation, all input and output as well as the exact implem
\input{content/Evaluation_of_existing_Systems/Exposure/Exposure.tex}
\section{Kopis}
\label{sec:kopis}
\input{content/Evaluation_of_existing_Systems/Kopis/Kopis.tex}
\section{Results and Comparison}
\label{sec:results_and_comparison}
\todo{here}
==> Exposure much simpler, much less data available (ips of malicious servers, honeypots, dyndns,...)
==> not possible to simply block everything, always false positives
==> difference notos exposure, see literature exposure section 5.5.2
==> not possible to simply block everything, always false positives
==> kopis is able to operate without ip reputation information
==> read all 'limitations' sections again and compare here
==> no concepts of kopis because of caching, no data available

View File

@@ -15,7 +15,7 @@ For the distinction of benign and malicious domains to perform well, a large set
\begin{itemize}
\item The \textit{Data Collector} module passively captures the DNS traffic in the monitored network.
\item The \textit{Feature Attribution} component is attributing the captured domains with the desired features.
\item The third component \textit{Malicious and Benign Domains Collector} is running in parallel to the first two modules and constantly gathers information about known good and known bad domains. These lists are used to label the output of the \textit{Feature Attribution} module afterwards, as it can be seen in picture~\ref{fig:exposure_system_overview}. The list of benign domains is extracted from the Alexa top list \fsCite{AlexaWebInformationOnline} and externally confirmed \gls{whois} data. The list of known malicious domains is collected from several external sources and includes domains in different threat classes, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline}, Phishtank \fsCite{PhishtankInformationOnline} and Anubis (no longer available).
\item The third component \textit{Malicious and Benign Domains Collector} is running in parallel to the first two modules and constantly gathers information about known good and known bad domains. These lists are used to label the output of the \textit{Feature Attribution} module afterwards, as it can be seen in picture~\ref{fig:exposure_system_overview}. The list of benign domains is extracted from the Alexa top list \fsCite{AlexaWebInformationOnline} and externally confirmed \gls{whois} data. The list of known malicious domains is collected from several external, both professionally provisioned and user maintained, sources and includes domains in different threat classes, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline}, Phishtank \fsCite{PhishtankInformationOnline}, Anubis (no longer available), the Zeus Block List \fsCite{zeusblocklistInformationOnline} and domains from DGAs for Conficker \fsCite{porras2009foray} and Mebroot \fsCite{Stone-Gross:2009:YBM:1653662.1653738}.
\item The labeled dataset is then fed into the \textit{Learning Module} and trains the domain detection model that is used in the final step. This classifier may also be retrained on a regular basis to keep up with malicious behavior (daily in \textit{Exposure}).
\item The \textit{Classifier} uses the decision model to classify unlabeled (new) domains into benign and malicious groups. For this, the same feature vector that is produced by the \textit{Feature Attribution} module is used.
\end{itemize}
@@ -34,7 +34,7 @@ For the distinction of benign and malicious domains to perform well, a large set
\textit{Exposure} uses a total of fifteen features that have been chosen after several month of study with thousands of well-known benign and malicious domains. These features are grouped into four different categories which can be seen in Table~\ref{tab:exposure_features}.
The first group, with \textit{Time-Based Features} has not been approached in publications before. These features investigate the time, at which the request with domain \textit{d} has been issued. The main idea behind this group of features is to find malicious services that use techniques like \textit{domain flux}
The first group, \textit{Time-Based Features} has not been approached in publications before. These features investigate the time, at which the request with domain \textit{d} has been issued. The main idea behind this group of features is to find malicious services that use techniques like \textit{domain flux}
\todo{explain domain flux} to circumvent take downs and make their infrastructure more agile. ``[\textit{Domain flux}] often show a sudden increase followed by a sudden decrease in the number of requests'' \fsCite[Section 3.1]{Bilge:2014:EPD:2617317.2584679}. Domains of malicious services using a DGA do only exist for a short period of time by design. \fsAuthor{Bilge:2014:EPD:2617317.2584679} defines the first feature as follows: ``A domain is defined to be a short-lived domain [...] if it is queried only between time \(t_0\) and \(t_1\), and if this duration is comparably short (e.g., less than several days).'' The next three features are subject to the change point detection (CPD) problem: Change point detection is about the identification of (abrupt) changes in the distribution of values, for example in time series. \textit{Exposure} implemented a CPD algorithm based on the popular CUSUM (cumulative sum) algorithm. At first, the time series of request timestamps is split into periods of 3600 seconds (one hour was tested to work well). After that, all time intervals are iterated and for each interval, the average request count of the previous eight hours \(P_t^-\) and following eight intervals \(P_t^+\) is calculated. In the next step, the distance of these two values is calculated \(d(t)=|P_t^--P_t^+|\) for each interval and the resulting ordered sequence \(d(t)\) of distances is fed to the CUSUM algorithm to finally get retrieve all change points (For more information on the implemented CPD algorithm, see \fsCite[Section 3.1]{Bilge:2014:EPD:2617317.2584679}). To calculate feature two (\textit{Daily similarity}), the Euclidean Distance of the time series of each day for \textit{d} is calculated. Intuitively, a low distance means similar time series and such high daily similarity whereas two days with higher distance do show a less similar request volume. All the features of this group do naturally only perform well when having a larger number of requests to \textit{d} over a significant period of time.
The next group of Features (\textit{DNS Answer-Based Features}) investigates resolutions of the requested domain \textit{d}. While one domain can map to multiple IP addresses for benign services, most harmless services do show a much smaller network profile in terms of e.g. location and \glspl{as}. To satisfy those findings, four features have been extracted: The number of distinct IP addresses, the amount of different countries these IP addresses are assigned to, the number of other domains that share an IP address \textit{d} resolves to and the fourth feature is the amount of results of the reverse dns query for all IPs of \textit{d}. It is worth noting, that some hosting providers also use one IP address for many domains so an extra layer to prevent such false positives make sense.
@@ -64,7 +64,29 @@ The last group of features are the \textit{Domain Name-Based Features}. Domain n
& Number of distinct TTL values \\ \cline{2-2}
& Number of TTL change \\ \cline{2-2}
& Percentage usage of specific TTL ranges \\ \hline
\multirow{2}{*}{Domain Name-Based Features} & \% of numerical characters \\ \cline{2-2}
\multirow{2}{*}{\textit{Domain Name-Based Features}} & \% of numerical characters \\ \cline{2-2}
& \% of the length of the LMS \\ \hline
\end{tabularx}
\end{table}
\end{tabularx}
\end{table}
\subsection{Reputation Engine}
\label{subsec:exposure_reputation_engine}
The reputation classifier of \textit{Exposure} is implemented as a \textit{J48} decision tree algorithm. The performance of decision trees mainly depend on the quality of the training set. For this reason a representative set of training data, with malicious domains from various threat classes, has to be chosen. Sources that have been used to identify malicious and benign domains can be found in Section~\ref{subsec:exposure_architecture}. In total, a list of 3500 known bad as well as 3000 known good domains have been used for the initial training. In order to take advantage of the \textit{Time-Based Features}, the optimal training period has been observed to be seven days. The tree is then constructed using the feature attribute values and its corresponding labels. More specifically, the whole training set is iterated and each time, a set of samples can be separated using one single attribute (in perspective to the assigned label) it is branched out and a new leaf is created. Each branch is then split into more fine grained subtrees as long as there is an \textit{information gain}, which means that all samples of the subset belong to the same class, i.e. are assigned the same label. \todo{link comprehensive decision tree explanation}
\subsection{Results}
\label{subsec:exposure_results}
The performance of classifiers with different feature sets has been tested using e.g. 10-fold cross validation. To find the model with the minimum error rate, all combinations of feature sets ({\textit{Time-Based Features} as F1, \textit{DNS Answer-Based Features} as F2, \textit{TTL Value-Based Features} F3 and \textit{Domain Name-Based Features} as F4) have been trained using the same decision tree algorithm. Figure~\ref{fig:exposure_miss-classifier_instances} shows the error rate of those different classification models. The \textit{Time-Based Features} are showing the smallest error when inspecting single feature sets only. Looking at models with multiple feature sets, the overall minimum error rate is produced when using all four feature groups. The total amount of requests in the dataset that was collected for the initial analysis counted roughly 100 billion DNS queries. As processing all of these requests is not feasible in practice, two filtering steps have been introduced. The first one filters out all requests to a domain in the top 1000 Alexa list. The assumption for this filter is that no malicious domain will get this popular without being detected in some form. This action reduced about 20\% of the initial requests. The second step filters out all requests to domains that have been registered at least one year before the analysis. This filter applied to 45.000 domains (or 40 billion corresponding queries) and reduced the remaining traffic by another 50\%. The filtering process has been cross tested against the Alexa top list, McAfee WebAdvisor (formerly McAfee SiteAdvisor) \fsCite{MCAfeeWebAdvisorOnline}, Google Safe Browsing \fsCite{GoogleSafeBrowsingOnline} and Norton Safe Web \fsCite{NortonSafeWebOnline} and only 0.09\% have been reported to be risky. \fsAuthor{Bilge11exposure:finding} for this reason states that: ``We therefore believe that our filtering policy did not miss a significant number of malicious domains because of the pre-filtering we performed during the offline experiments.''
The accuracy of the classifier has been validated using two different methods. The first method was to classify the training set with 10-fold cross validation. This validation method splits the dataset in ten partitions/folds (each partition optimally containing roughly the same class label distribution). One fold is then used as the validation sample (testing set) and the remaining nine partitions are used as the training set. The training set is used to train the model which is then cross validated with the testing set. This step is repeated for ten times using the same partitions, each partition being the testing set once. The second method is to simply use 66\% of the dataset for the training and the remaining 33\% as the testing set. \todo{detailed explanation of 10 fold cross validation}
\begin{figure}[!htbp]
\centering
\includegraphics[width=.9\textwidth, clip=true]{content/Evaluation_of_existing_Systems/Exposure/exposure_validation.png}
\caption{Exposure: Percentage of miss-classified instances \fsCite[Figure 2]{Bilge11exposure:finding}}
\label{fig:exposure_miss-classifier_instances}
\end{figure}

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

View File

@@ -0,0 +1,74 @@
\section{Kopis}
\label{sec:kopis}
\subsection{General}
\label{subsec:kopis_general}
The last evaluated System is called \textit{Kopis} and has been proposed in 2011 by \fsAuthor{Antonakakis:2011:DMD:2028067.2028094}, the authors that also released \nameref{sec:notos}, at the Georgia Institute of Technology and the University of Georgia. \textit{Kopis} is following a slightly different approach compared to the previous two Systems, \textit{Notos} and \textit{Exposure}. Instead of collecting passively monitored DNS traffic from a (limited) number of different recursive DNS servers in various locations \textit{Kopis} uses requests, registered in the upper DNS layers, from e.g. top-level domain servers and authoritative name servers. See Figure~\ref{fig:kopis_data_sources} for an overview of where those three different System aggregate logs to perform traffic analysis. Operating in the upper DNS layers, \textit{Kopis} is not only able to extract significantly different classes of features compared to \textit{Notos} and \textit{Exposure} but also has to deal with different challenges like DNS caching. The biggest
\begin{figure}[!htbp]
\centering
\includegraphics[width=.9\textwidth, clip=true]{content/Evaluation_of_existing_Systems/Kopis/kopis_data_sources.png}
\caption{Overview of the levels at which Kopis, Notos, and Exposure perform DNS monitoring. \fsCite[Figure 1]{Antonakakis:2011:DMD:2028067.2028094}}
\label{fig:kopis_data_sources}
\end{figure}
\subsection{Architecture}
\label{subsec:kopis_architecture}
\begin{figure}[!htbp]
\centering
\includegraphics[width=.9\textwidth, clip=true]{content/Evaluation_of_existing_Systems/Kopis/kopis_system_overview.png}
\caption{Kopis: system overview \fsCite[Figure 3]{Antonakakis:2011:DMD:2028067.2028094}}
\label{fig:kopis_system_overview}
\end{figure}
The overall system architecture can be seen in Figure~\ref{fig:kopis_system_overview}. The first step in the reputation system is to gather all (streamed) DNS queries and responses and divide this traffic into fixed epochs (e.g. one day in \textit{Kopis}). After collecting the traffic of each epoch \(E_i\), different statistics about a domain \textit{d} are extracted by the \textit{Feature Computation} function into a feature vector \(v_d^i\). A detailed table of which features are used is listed in Section~\ref{subsec:kopis_features}. \textit{Kopis} tries to separate benign from malicious domains by characteristics like the volume of DNS requests to domain \textit{d}, the diversity of IP addresses of the querying machines and the historic information relating to the IP space \textit{d} is pointing to. Like the first two investigated systems, \textit{Kopis} is operating in two different modes. In training mode, the reputation model is built in an offline fashion (\textit{Learning Module}) which is later used in the operational mode (\textit{Statistical Classifier}) to assign \textit{d} a reputation score in a streamed fashion. The \textit{Learning Module} takes the feature vector of a period of \textit{m} days that is generated by the \textit{Feature Computation} function as input and uses the \textit{Knowledge Base (KB)} to label each sample in that training set as being a malicious or legitimate domain (training set: \(V_{train} = \{v_d^i\}_{i=1..m}, \forall d \in \textit{KB}\)). The \textit{KB} consists of various public and undisclosed sources: \\
\textbf{Malicious domain sources: }
\begin{itemize}
\item Information about malware from a commercial feed with a volume between 400 MB and 2GB a day
\item Malware, captured from two corporate networks
\item Public blacklists, e.g., malwaredomains.com \fsCite{malwaredomainsInformationOnline} and the Zeus Block List \fsCite{zeusblocklistInformationOnline} \\
\end{itemize}
\textbf{Benign domain sources: }
\begin{itemize}
\item Domain and ip whitelists from DNSWL \fsCite{DNSWLOnline}
\item Address space of the top 30 Alexa domains \fsCite{AlexaWebInformationOnline}
\item Dihe's IP-Index Browser \fsCite{DIHEOnline}
\end{itemize}
The operational mode first captures all DNS traffic streams. At the end of each epoch \(E_j\), the feature vector \(v_{d'}^j\) for all unknown domains \(d' \notin \textit{KB}\) is extracted and the \textit{Statistical Classifier} assigns a label (either malicious or legitimate) \(l_{d', j}\) and a confidence score \(c(l_{d', j})\). While the label classifies if the domain \textit{d'} is expected to be malicious or legitimate, the confidence score expresses the probability of this label. For the final reputation score, \textit{Kopis} first computes a series of label/confidence tuples for \textit{m} epochs starting at epoch \(E_t\): \(S(v_{d'}^j) = \{l_{d', j}, c(l_{d', j})\}, j = t, .., (t + m)\) and by averaging the confidence scores of the malicious labels (\textit{M}), the reputation score can be expressed as \(\overline{C}_M = avg_j\{c(l_{d', j})\}\)
\subsection{Features}
\label{subsec:kopis_features}
Much like the previous investigated systems, \textit{Kopis} is extracting different features that are grouped in three sets. Two of those groups, the \textit{Requester Diversity} and the \textit{Requester Profile} features, have not been proposed in research before and due to the system architecture are differing from those that are used in \textit{Notos} and \textit{Exposure}. In contrast to \textit{Notos} and \textit{Exposure}, which use traffic monitored from recursive DNS servers in lower DNS layers, \textit{Kopis} is operating with data from two large AuthNS as well as a country level TLD server (.ca space) in the upper DNS layers (see \ref{fig:kopis_data_sources}). Operating in this level in the DNS hierarchy leads to different challenges as well. A top-level domain server is rarely answering a request itself but most of the time is only delegating the request to a more specific server, e.g. a server responsible for the zone of a second-level domain in a company. For this reason, to get the actual resolved record (IP), the delegated name server can be queried straightly or a passive DNS database (e.g. from the Security Information Exchange \fsCite{SIEOnline}) can be engaged.
The first step of extracting features out of the captured traffic for each dns query \(q_j\) (to resolve a domain \textit{d}), is to find the epoch \(T_j\), in which the request has been made, the IP address of the machine \(R_j\) that run the query and the resolved records \(IPs_j\). Using these raw values, \textit{Kopis} extracts the following specific features:
\subsubsection{Requester Diversity (RD)}
\label{subsubsec:kopis_requester_diversity}
\subsubsection{Requester Profile (RP)}
\label{subsubsec:kopis_requester_profile}
\subsubsection{Resolved-IPs Reputation (IPR)}
\label{subsubsec:kopis_resolved-ips_reputation}
\subsection{Reputation Engine}
\label{subsec:kopis_reputation_engine}
\subsection{Results}
\label{subsec:kopis_results}
Using the \textit{KB}, a sample with 225,429 unique RRs (corresponding to 28,915 unique domain names) could be split into groups with 27,317 malicious and 1,598 benign domains.
\todo{see section one for contributions}

Binary file not shown.

After

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

View File

@@ -60,6 +60,9 @@ Figure~\ref{fig:notos_system_overview} shows the overall system architecture of
In this Section, all statistical features are listed and a short explanation, for what reason those have been chosen, is introduced.
\subsubsection{Network-based features}
\label{subsubsec:notos_network-based_features}
The first group of features handles network-related keys. This group mostly describe how the owning operators of \textit{d} allocate network resources to achieve different goals. While most legitimate and professionally operated internet services feature have a rather stable network profile, malicious usage usually involves short living domain names and ip addresses with high agility to circumvent blacklisting and other simple types of resource blocking. Botnets usually contain machines in many different networks (\glspl{as} and \glspl{bgp}) operated by different organizations in different countries. Appropriate companies mostly acquire bigger ip blocks and such use consecutive IPs for their services in the same address space. This homogeneity also applies to other registration related information like registrars and registration dates. To measure this level of agility and homogeneity, eighteen statistical network-based features are extracted from the RHIPs (see Table~\ref{tab:notos_network-based_features}).
\begin{table}[!htbp]
@@ -90,6 +93,9 @@ The first group of features handles network-related keys. This group mostly desc
\end{tabularx}
\end{table}
\subsubsection{Zone-based features}
\label{subsubsec:notos_zone-based_features}
The second group is about zone-based features and is extracted from the RHDNs. In contrast to the network-based features which compares characteristics of the historic IPs, the zone-based features handles characteristics of all historically involved domains. While legitimate services often involve many domains, they usually share similarities. ``For example, google.com, googlesyndication.com, googlewave.com, etc., are all related to Internet services provided by Google, and contain the string 'google' in their name.''. In contrast, randomly generated domains used in spam campaigns are rarely sharing similarities. By calculating the mean, median and standard deviation for some key, the ``summarize [of] the shape of its distribution'' is investigated \fsCite[Section 3.2.2]{Antonakakis:2010:BDR:1929820.1929844}. To calculate this level of diversity, seventeen features are extracted which can be found in Table~\ref{tab:notos_zone-based_features}:
\begin{table}[!htbp]
@@ -119,6 +125,9 @@ The second group is about zone-based features and is extracted from the RHDNs. I
\end{tabularx}
\end{table}
\subsubsection{Evidence-based features}
\label{subsubsec:notos_evidence-based_features}
For the evidence-based features, public information and exclusively collected data from honeypots and spam-traps is collected. This \textit{knowledge base} primarily helps to discover if a domain \textit{d} is in some way interacting with known malicious IPs and domains. As domain names are much cheaper than ip addresses, malware authors tend to reuse IPs with updated domain names. The blacklist features detect the reuse of known malicious resources like IP addresses, \gls{bgp} prefixes and \glspl{as}.
\begin{table}[!htbp]

View File

@@ -212,15 +212,6 @@ QType & Type & Description \\
\end{tabular}
\end{table}
%\todo{remove?}
%\subsubsection{Database distribution}
%\label{subsubsec:database_distribution}
\subsection{Domain Names}
\label{subsec:domain_names}
\todo{TODO structure of a domain, etc. top-level, second-level, third-level}
@@ -232,6 +223,8 @@ QType & Type & Description \\
\subsubsection{Recursive}
\label{TODO subsubsec:recursive}
\todo{explain delegation (e.g. of TLDs) somewhere here}
\begin{figure}[!htbp]
\centering

View File

@@ -1,6 +1,8 @@
\section{Detecting Malicious Domain Names}
\label{sec:detecting_malicious_domain_names}
\todo{literature exposure section 6.1}
\subsection{Domain Name Characteristics}
\label{subsec:domain_name_characteristics}