@article{971646,
  author = {R. Bajcsy and T. Benzel and M. Bishop and B. Braden and C. Brodley and S. Fahmy and S. Floyd and W. Hardaker and A. Joseph and G. Kesidis and K. Levitt and B. Lindell and P. Liu and D. Miller and R. Mundy and C. Neuman and R. Ostrenga and V. Paxson and P. Porras and C. Rosenberg and J. D. Tygar and S. Sastry and D. Sterne and S. F. Wu},
  title = {Cyber defense technology networking and evaluation},
  journal = {Communications of the ACM},
  volume = {47},
  number = {3},
  year = {2004},
  issn = {0001-0782},
  pages = {58--61},
  doi = {http://doi.acm.org/10.1145/971617.971646}
}
@inproceedings{948198,
  abstract = { In this paper, we investigate epidemiological models to reason about computer viral propagation. We extend the classical homogeneous models to incorporate two timing parameters: Infection delay and user vigilance. We show that these timing parameters greatly influence the propagation of viral epidemics, and that the explicit treatment of these parameters gives rise to a more realistic and accurate propagation model. We validate the new model with simulation analysis.},
  author = {Yang Wang and Chenxi Wang},
  booktitle = {Proceedings of the 2003 ACM Workshop on Rapid Malcode (WORM)},
  doi = {http://doi.acm.org/10.1145/948187.948198},
  isbn = {1-58113-785-0},
  pages = {61--66},
  title = {Modeling the effects of timing parameters on virus propagation},
  year = {2003}
}
@inproceedings{LiljenstamYPN02,
  abstract = {Large-scale worm infestations, such as last year's Code Red, Code Red II, and Nimda, have led to increased interest in modeling these events to assess threat levels, evaluate countermeasures and investigate possible influence on the Internet infrastructure. However, the inherently large scale of these phenomena pose significant challenges for models that include infrastructure detail. We explore the use of selective abstraction through epidemiological models in conjunction with detailed protocol models as a means to scale up simulations to a point where we can ask meaningful questions regarding a hypothesized link between worms and inter-domain routing instability. We find that this approach shows significant promise, in contrast to some of our early attempts using all-out packet level models. We also describe some approaches we are taking to collect the underlying data for our models.},
  author = {Michael Liljenstam and Yougu Yuan and Brian J. Premore and David M. Nicol},
  booktitle = {10th International Workshop on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems (MASCOTS)},
  http = {http://csdl.computer.org/comp/proceedings/mascots/2002/1840/00/18400109abs.htm},
  isbn = {0-7695-1840-0},
  pages = {109--116},
  publisher = {IEEE Computer Society},
  title = {A Mixed Abstraction Level Simulation Model of Large-Scale {Internet} Worm Infestations.},
  year = {2002}
}
@article{Staniford04,
  author = {Stuart Staniford},
  journal = {Journal of Computer Security},
  title = {Containment of Scanning Worms in Enterprise Networks},
  year = {to appear}
}
@inproceedings{948193,
  abstract = {Reproducing the effects of large-scale worm attacks in a laboratory setup in a realistic and reproducible manner is an important issue for the development of worm detection and defense systems. In this paper, we describe a worm simulation model we are developing to accurately model the large-scale spread dynamics of a worm and many aspects of its detailed effects on the network. We can model slow or fast worms with realistic scan rates on realistic IP address spaces and selectively model local detailed network behavior. We show how it can be used to generate realistic input traffic for a working prototype worm detection and tracking system, the Dartmouth ICMP BCC: System/Tracking and Fusion Engine (DIB:S/TRAFEN), allowing performance evaluation of the system under realistic conditions. Thus, we can answer important design questions relating to necessary detector coverage and noise filtering without deploying and operating a full system. Our experiments indicate that the tracking algorithms currently implemented in the DIB:S/TRAFEN system could detect attacks such as Code Red v2 and Sapphire/Slammer very early, even when monitoring a quite limited portion of the address space, but more sophisticated algorithms are being constructed to reduce the risk of false positives in the presence of significant ``background noise'' scanning.},
  author = {Michael Liljenstam and David M.~Nicol and Vincent H.~Berk and Robert S.~Gray},
  booktitle = {Proceedings of the 2003 ACM Workshop on Rapid Malcode (WORM)},
  doi = {http://doi.acm.org/10.1145/948187.948193},
  isbn = {1-58113-785-0},
  pages = {24--33},
  publisher = {ACM Press},
  title = {Simulating realistic network worm traffic for worm warning system design and testing},
  year = {2003}
}
@url{SSFNetURL,
  key = {SSFNet},
  note = {\url{http://www.ssfnet.org/}},
  title = {{Scalable Simulation Framework}}
}
@inproceedings{Liljenstam04,
  author = {Michael Liljenstam and David M.~Nicol},
  booktitle = {Proceedings of the First International Conference on the Quantitative Evaluation of Systems (QEST)},
  month = {September},
  pages = {18--27},
  title = {Comparing passive and active worm defenses},
  pdf = {http://www.linklings.net/MOSES/papers/qest-242.pdf},
  year = {2004}
}
@inproceedings{948196,
  abstract = {We present a general framework for reasoning about network worms and analyzing the potency of worms within a specific network. First, we present a discussion of the life cycle of a worm based on a survey of contemporary worms. We build on that life cycle by developing a relational model that associates worm parameters, attributes of the environment, and the subsequent potency of the worm. We then provide a worm analytic framework that captures the generalized mechanical process a worm goes through while moving through a specific environment and its state as it does so. The key contribution of this work is a worm analytic framework. This framework can be used to evaluate worm potency and develop and validate defensive countermeasures and postures in both static and dynamic worm conflict. This framework will be implemented in a modeling and simulation language in order to evaluate the potency of specific worms within an environment.},
  author = {Dan Ellis},
  booktitle = {Proceedings of the 2003 ACM Workshop on Rapid Malcode},
  doi = {http://doi.acm.org/10.1145/948187.948196},
  isbn = {1-58113-785-0},
  location = {Washington, DC, USA},
  pages = {42--50},
  publisher = {ACM Press},
  title = {Worm anatomy and model},
  year = {2003}
}
@techreport{Ganger+02,
  abstract = {Self-securing network interfaces (NIs) examine the packets that they move between network links and host software, looking for and potentially blocking malicious network activity. This paper describes self-securing network interfaces, their features, and examples of how these features allow administrators to more effectively spot and contain malicious network activity. We present a software architecture for self-securing NIs that separates scanning software into applications (called scanners) running on a NI kernel. The resulting scanner API simplifies the construction of scanning software and allows its powers to be contained even if it is subverted. We illustrate the potential via a prototype self-securing NI and two example scanners: one that identifies and blocks known e-mail viruses and one that identifies and inhibits rapidly-propagating worms like Code-Red.},
  author = {Gregory R.~Ganger and Gregg Economou and Stanley M.~Bielski},
  institution = {Computer Science Department, Carnegie Mellon University},
  title = {Self-Securing Network Interfaces: What, Why and How},
  year = {2002}
}
@inproceedings{anagnostakis-cooperative,
  author = {K.~G.~Anagnostakis and M.~B.~Greenwald and S.~Ioannidis and A.~D.~Keromytis and D.~Li},
  booktitle = {Proceedings of the 11th IEEE International Conference on Networks (ICON'03)},
  month = {October},
  title = {A Cooperative Immunization System for an Untrusting {Internet}},
  year = {2003}
}
@inproceedings{Noordende04,
  author = {G.J.~van't Noordende and F.M.T.~Brazier and A.S.~Tanenbaum},
  booktitle = {First IEEE Symposium on Multi-Agent Security and Survivability (MAS\&S)},
  month = {Aug},
  pages = {35--45},
  title = {Security in a mobile agent system},
  pdf = {http://www.cs.vu.nl/~guido/mansion/publications/ps/mass.pdf},
  year = {2004}
}
@inproceedings{nojiri03,
  author = {D. Nojiri and J. Rowe and K. Levitt},
  booktitle = {DARPA Information Survivability Conference and Exposition},
  pages = {293--302},
  title = {Cooperative Response Strategies for Large Scale Attack Mitigation},
  year = {2003}
}
@inproceedings{Wong2004,
  author = {Cynthia Wong and Chenxi Wang and Dawn Song and Stan Bielski and Gregory R.~Ganger},
  booktitle = {Proceedings of the International Conference on Dependable Systems and Networks {DSN-2004}},
  month = {June},
  title = {Dynamic Quarantine of {Internet} Worms},
  year = {2004}
}
@inproceedings{784785,
  author = {Matthew M. Williamson},
  booktitle = {Proceedings of the 18th Annual Computer Security Applications Conference},
  isbn = {0-7695-1828-1},
  pages = {61},
  publisher = {IEEE Computer Society},
  title = {Throttling Viruses: Restricting propagation to defeat malicious mobile code},
  year = {2002},
  doi = {http://doi.ieeecomputersociety.org/10.1109/CSAC.2002.1176279},
  abstract = {Modern computer viruses spread incredibly quickly, far faster than human-mediated responses. This greatly increases the damage that they cause. This paper presents an approach to restricting this high speed propagation automatically. The approach is based on the observation that during virus propagation, an infected machine will connect to as many different machines as fast as possible. An uninfected machine has a different behaviour: connections are made at a lower rate, and are locally correlated (repeat connections to recently accessed machines are likely). This paper describes a simple technique to limit the rate of connections to ``new'' machines that is remarkably effective at both slowing and halting virus propagation without affecting normal traffic. Results of applying the filter to web browsing data are included. The paper concludes by suggesting an implementation and discussing the potential and limitations of this approach.}
}
@inproceedings{chen04slowing,
  author = {Shigang Chen and Yong Tang},
  title = {Slowing Down Internet Worms},
  booktitle = {Proceedings of the 24th International Conference on Distributed Computing Systems (ICDCS)},
  pages = {312--319},
  year = {2004},
  http = {http://citeseer.ist.psu.edu/chen04slowing.html},
  month = {March},
  http = {http://csdl.computer.org/comp/proceedings/icdcs/2004/2086/00/20860312abs.htm},
  publisher = {IEEE Computer Society},
  abstract = {An Internet worm automatically replicates itself to vulnerable systems and may infect hundreds of thousands of servers across the Internet. It is conceivable that the cyber-terrorists may use a wide-spread worm to cause major disruption to our Internet economy. While much recent research concentrates on propagation models, the defense against worms is largely an open problem. We propose a distributed anti-worm architecture (DAW) that automatically slows down or even halts the worm propagation. New defense techniques are developed based on behavioral difference between normal hosts and worm-infected hosts. Particulary, a worm-infected host has a much higher connection-failure rate when it scans the Internet with randomly selected addresses. This property allows DAW to set the worms apart from the normal hosts. We propose a temporal rate-limit algorithm and a spatial rate-limit algorithm, which makes the speed of worm propagation configurable by the parameters of the defense system. DAW is designed for an Internet service provider to provide the anti-worm service to its customers. The effectiveness of the new techniques is evaluated analytically and by simulations.}
}
@inproceedings{chen04warning,
  author = {Shigang Chen and Sanjay Ranka},
  title = {An internet-worm early warning system},
  booktitle = {Proceedings of the IEEE Globecom 2004 - Security and Network Management},
  pages = {2261--2265},
  year = {2004},
  volume = {4},
  month = {November},
  pdf = {http://www.cise.ufl.edu/~sgchen/papers/globecom2004_worm.pdf},
  http = {http://citeseer.ist.psu.edu/713025.html},
  abstract = {We propose an Internet-worm early warning system, which integrates a set of novel techniques that automatically detect the concerted scan activity of an on-going worm attack. It is able to issue warning at the early stage of worm propagation and to provide necessary information for security analysts to control the damage. The system monitors a ``used'' address space. Unlike the traditional approach that keeps track of SYN packets, it relies on RESET packets to find the scan sources, which has greater accuracy and less overhead. The system is resilient to anti-monitor measures. Particularly, a sophisticated protocol is designed to distinguish faked scan sources from real scan sources. We provide an analytical study on the properties and effectiveness of this early warning system, and back up our claims by numerical results.}
}
@inproceedings{DBLP:conf/mascots/RileySL04,
  author = {George F.~Riley and Monirul I.~Sharif and Wenke Lee},
  title = {Simulating Internet Worms.},
  booktitle = {Proceedings of the 12th International Workshop on Modeling, Analysis, and Simulation of Computer and Telecommunication Systems (MASCOTS)},
  year = {2004},
  pages = {268-274},
  pdf = {http://www.cc.gatech.edu/~wenke/papers/worms_simulation.pdf},
  ee = {http://csdl.computer.org/comp/proceedings/mascots/2004/2251/00/22510268abs.htm},
  isbn = {0-7695-2251-3},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  abstract = {The accurate and efficient modeling of Internet worms is a particularly challenging task for network simulation tools. The atypical and aggressive behavior of these worms can easily consume excessive resources, both processing time and storage, within a typical simulator. In particular, the selection of random IP addresses, and the sending of packets to the selected hosts, even if they are non-existent or not modeled in the simulation scenario, is challenging for existing network simulation tools. Further, the computation of routing information for these randomly chosen target addresses defeats most caching or on-demand routing methods, resulting in substantial overhead in the simulator. We discuss the design of our Internet worm models in the Georgia Tech Network Simulator, and show how we addressed these issues.We present some results from our Internet worm simulations that show the rate of infection spread for a typical worm under a variety of conditions.}
}
@inproceedings{Porras2004,
  author = {Phillip Porras and Linda Briesemeister and Keith Skinner and Karl Levitt and Jeff Rowe and Yu-Cheng Allen Ting},
  title = {A hybrid quarantine defense},
  booktitle = {Proceedings of the 2004 ACM workshop on Rapid malcode (WORM)},
  year = {2004},
  isbn = {1-58113-970-5},
  pages = {73--82},
  location = {Washington DC, USA},
  pdf = {publications/worm2004.pdf},
  doi = {http://doi.acm.org/10.1145/1029618.1029630},
  publisher = {ACM Press},
  abstract = {We study the strengths, weaknesses, and potential synergies of two complementary worm quarantine defense strategies under various worm attack profiles. We observe their abilities to delay or suppress infection growth rates under two propagation techniques and three scan rates, and explore the potential synergies in combining these two complementary quarantine strategies. We compare the performance of the individual strategies against a hybrid combination strategy, and conclude that the hybrid strategy yields substantial performance improvements, beyond what either technique provides independently. This result offers potential new directions in hybrid quarantine defenses.}
}
@inproceedings{Provos2004,
  author = {Niels Provos},
  booktitle = {Proceedings of the 12th USENIX Security Symposium},
  location = {San Diego, CA},
  title = {A Virtual Honeypot Framework},
  month = {August},
  year = {2004},
  pages = {1-14},
  ee = {http://www.usenix.org/publications/library/proceedings/sec04/tech/provos.html}
}
@techreport{Wang2004,
  author = {H. Wang and C. Guo and D. Simon and A. Zugenmaier},
  institution = {Microsoft Research, Technical Report MSR-TR-2003-81},
  title = {Shield: Vulnerability-Driven Network Filters for Preventing Known Vulnerability Exploits},
  year = {2004}
}
@inproceedings{1015489,
  author = {Helen J. Wang and Chuanxiong Guo and Daniel R. Simon and Alf Zugenmaier},
  title = {Shield: vulnerability-driven network filters for preventing known vulnerability exploits},
  booktitle = {Proceedings of the 2004 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communications (SIGCOMM)},
  year = {2004},
  isbn = {1-58113-862-8},
  pages = {193--204},
  location = {Portland, Oregon, USA},
  doi = {http://doi.acm.org/10.1145/1015467.1015489},
  publisher = {ACM Press},
  abstract = {Software patching has not been effective as a first-line defense against large-scale worm attacks, even when patches have long been available for their corresponding vulnerabilities. Generally, people have been reluctant to patch their systems immediately, because patches are perceived to be unreliable and disruptive to apply. To address this problem, we propose a first-line worm defense in the network stack, using shields -- vulnerability-specific, exploit-generic network filters installed in end systems once a vulnerability is discovered, but before a patch is applied. These filters examine the incoming or outgoing traffic of vulnerable applications, and correct traffic that exploits vulnerabilities. Shields are less disruptive to install and uninstall, easier to test for bad side effects, and hence more reliable than traditional software patches. Further, shields are resilient to polymorphic or metamorphic variations of exploits \cite{SzorFerrie2003}. In this paper, we show that this concept is feasible by describing a prototype Shield framework implementation that filters traffic above the transport layer. We have designed a safe and restrictive language to describe vulnerabilities as partial state machines of the vulnerable application. The expressiveness of the language has been verified by encoding the signatures of several known vulnerabilites. Our evaluation provides evidence of Shield's low false positive rate and small impact on application throughput. An examination of a sample set of known vulnerabilities suggests that Shield could be used to prevent exploitation of a substantial fraction of the most dangerous ones.}
}
@misc{SzorFerrie2003,
  author = {Peter Szor and Peter Ferrie},
  title = {Hunting for Metamorphic},
  howpublished = {Symantec Security Response. White Paper.},
  month = {June},
  year = {2003},
  http = {http://enterprisesecurity.symantec.com/content/displaycolateral.cfm?colID=426}
}
@techreport{Gorman2003,
  author = {Sean P.~Gorman and Rajendra G.~Kulkarni and Laurie A.~Schintler and Roger R.~Stough},
  institution = {George Mason University},
  title = {Least Effort Strategies for Cybersecurity},
  year = {2003},
  abstract = {Cybersecurity is an issue of increasing concern since the events of September 11th. Many questions have been raised concerning the security of the Internet and the rest of the US's information infrastructure. This paper begins to examine the issue by analyzing the Internet's autonomous system (AS) map. Using the AS map, malicious infections are simulated and different defense strategies are considered in a cost benefit framework. The results show that protecting the most connected nodes provides significant gains in security and that after the small minority of most connected nodes are protected there are diminishing returns for further protection. Although if parts of the small minority are not protected, such as non-US networks, protection levels are significantly decreased.}
}
@inproceedings{Staniford1996,
  author = {S.~Staniford-Chen and others},
  booktitle = {Proceedings of the 19th National Information Systems Security Conference},
  location = {Baltimore, MD},
  title = {{GrIDS}---{A} Graph based intrusion detection system for large networks},
  month = {October},
  year = {1996},
  volume = {1},
  pages = {361--370}
}
@inproceedings{Whyte2005,
  author = {David Whyte and Evangelos Kranakis and P.C.~van Oorschot},
  booktitle = {Proceedings of the 12th Network and Distributed System Security Symposium (NDSS)},
  location = {San Diego, CA},
  title = {{DNS}-based Detection of Scanning Worms in an Enterprise Network},
  month = {February},
  year = {2005},
  pages = {181--195}
}
@techreport{GualtieriMosse03,
  author = {M.~Gualtieri and D.~Moss\'e},
  institution = {Computer Science Department, University of Pittsburgh},
  title = {Limiting Worms Via {QoS} Degradation},
  year = {2003}
}
@inproceedings{Kannan2005,
  author = {Jayanthkumar Kannan and Lakshminarayanan Subramanian and Ion Stoica and Randy H.~Katz},
  title = {Analyzing Cooperative Containment of Fast Scanning Worms},
  booktitle = {Proceedings of USENIX Steps to Reducing Unwanted Traffic on the Internet Workshop (SRUTI)},
  pages = {17--23},
  year = {2005},
  month = {July},
  pdf = {http://www.cs.berkeley.edu/~lakme/sruti.pdf},
  http = {http://www.usenix.org/events/sruti05/tech/kannan.html},
  abstract = {Fast scanning worms, that can infect nearly the entire vulnerable population in order of minutes, are among the most serious threats to the Internet today. In this work, we investigate the efficacy of cooperation among Internet firewalls in containing such worms. We first propose a model for firewall-level cooperation and then study the containment in our model of cooperation using analysis and simulation. Our results suggest that, with moderate overhead, cooperation among Internet firewalls can provide 95\% containment under 10\% deployment while being resilient to 100--1000 malicious firewalls. }
}
@article{Cai2005,
  author = {Min Cai and Kai Hwang and Yu-Kwong Kwok and Shanshan Song and Yu Chen},
  title = {Collaborative {Internet} Worm Containment},
  journal = {IEEE Security and Privacy Magazine},
  year = {2005},
  volume = {3},
  number = {3},
  pages = {25--33},
  month = {May/June},
  abstract = {Large-scale worm outbreaks that lead to distributed denial-of-service (DDoS) attacks pose a major threat to Internet infrastructure security. Fast worm containment is crucial for minimizing damage and preventing flooding attacks against network hosts.},
  doi = {http://dx.doi.org/10.1109/MSP.2005.63}
}
@inproceedings{844105,
  author = {Hiroshi Toyoizumi and Atsuhi Kara},
  title = {Predators: {G}ood will mobile codes combat against computer viruses},
  booktitle = {Proceedings of the 2002 Workshop on New Security Paradigms (NSPW)},
  year = {2002},
  isbn = {1-58113-598-X},
  pages = {11--17},
  location = {Virginia Beach, Virginia},
  doi = {http://doi.acm.org/10.1145/844102.844105},
  abstract = {We present a mathematical analysis of a new approach to fight against computer viruses through the use of their predators. Predators are good will mobile codes which, like viruses, travel over computer networks, and replicate and multipy themselves. The only difference is that predators are specifically designed to eliminate the viruses. We model the interaction between predators and viruses by the Lotka-Volterra equations, which are widely used in mathematical biology. Using this model, we derive a method to constrain the number of predators to be as few as possible, while maintaining their power to eliminate viruses.}
}
@inproceedings{Scandariato2004,
  author = {Riccardo Scandariato and John Knight},
  title = {The Design and Evaluation of a Defense System for {Internet} Worms},
  booktitle = {Proceedings of the 23rd IEEE International Symposium on Reliable Distributed Systems (SRDS)},
  pages = {164--173},
  year = {2004},
  doi = {http://dx.doi.org/10.1109/RELDIS.2004.1353017},
  month = {October},
  issn = {1060-9857},
  abstract = {Many areas of society have become heavily dependent on services such as transportation facilities, utilities and so on that are implemented in part by large numbers of computers and communications links. Both past incidents and research studies show that a well-engineered Internet worm can disable such systems in a fairly simple way and, most notably, in a matter of a few minutes. This indicates the need for defenses against worms but their speed rules out the possibility of manually countering worm outbreaks. We present a platform that emulates the epidemic behavior of Internet active worms in very large networks. A reactive control system operates on top of the platform and provides a monitor/analyze/respond approach to deal with infections automatically. Details of our highly configurable platform and various experimental performance results are presented.}
}
@inproceedings{1080176,
  author = {Vijay Karamcheti and Davi Geiger and Zvi Kedem and S. Muthukrishnan},
  title = {Detecting malicious network traffic using inverse distributions of packet contents},
  booktitle = {MineNet '05: Proceeding of the 2005 ACM SIGCOMM workshop on Mining network data},
  year = {2005},
  isbn = {1-59593-026-4},
  pages = {165--170},
  location = {Philadelphia, Pennsylvania, USA},
  doi = {http://doi.acm.org/10.1145/1080173.1080176},
  publisher = {ACM Press},
  abstract = {We study the problem of detecting malicious IP traffic in the network early, by analyzing the contents of packets. Existing systems look at packet contents as a bag of substrings and study characteristics of its base distribution B where B(i) is the frequency of substring i.We propose studying the inverse distribution I where I(f) is the number of substrings that appear with frequency f. As we show using a detailed case study, the inverse distribution shows the emergence of malicious traffic very clearly not only in its ``static'' collection of bumps, but also in its nascent ``dynamic'' state when the phenomenon manifests itself only as a distortion of the inverse distribution envelope. We describe our probabilistic analysis of the inverse distribution in terms of Gaussian mixtures, our preliminary solution for discovering these bumps automatically. Finally, we briefly discuss challenges in analyzing the inverse distribution of IP contents and its applications.},
  address = {New York, NY, USA}
}
@inproceedings{DBLP:conf/dsn/SellkeSB05,
  author = {Sarah Sellke and Ness B. Shroff and Saurabh Bagchi},
  title = {Modeling and Automated Containment of Worms},
  booktitle = {Proceedings of the International Conference on Dependable Systems and Networks (DSN)},
  year = {2005},
  pages = {528--537},
  pdf = {http://shay.ecn.purdue.edu/~dcsl/Publications/papers/worm.pdf},
  doi = {http://doi.ieeecomputersociety.org/10.1109/DSN.2005.66},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  abstract = {Self-propagating codes, called worms, such as Code Red, Nimda, and Slammer, have drawn significant attention due to their enormous adverse impact on the Internet. There is a great interest in the research community in modeling the spread of worms and in providing adequate defense mechanisms against them. In this paper, we present a (stochastic) branching process model for characterizing the propagation of Internet worms. This model leads to the development of an automatic worm containment strategy that prevents the spread of worms beyond its early stages. Specifically, using the branching process model, we are able to (1) provide a precise condition that determines whether the worm will eventually die out and (2) provdide the probability that the total number of hosts that the worm infects will be below a certain level. We use these insights to develop a simple automatic worm containment scheme, which is demonstrated, through simulations and real trace data, to be both effective and non-intrusive.}
}
@article{QingWen2005,
  author = {Sihan Qing and Weiping Wen},
  title = {A survey and trends on {Internet} worms},
  journal = {Computers \& Security},
  year = {2005},
  volume = {24},
  number = {4},
  pages = {334--346},
  month = {June},
  doi = {http://dx.doi.org/10.1016/j.cose.2004.10.001},
  abstract = {With the explosive growth and increasing complexity of network applications, the threats of Internet worms against network security are more and more serious. This paper presents the concepts and research situations of Internet worms, their function component, and their execution mechanism. It also addresses the scanning strategies, propagation models, and the critical techniques of Internet worm prevention. Finally, the remaining problems and emerging trends in this area are also outlined.}
}
@article{Stephenson2005,
  author = {Peter Stephenson},
  title = {Modeling a virus or worm attack},
  journal = {Computer Fraud \& Security},
  year = {2004},
  optvolume = {},
  number = {9},
  pages = {15--19},
  month = {September},
  doi = {http://dx.doi.org/10.1016/S1361-3723(04)00112-5},
  abstract = {In our last two columns we introduced the concepts of modeling and simulation, security policy domains and the use of Colored Petri Nets (CPNets). In this column we will take the Net that we created in our last column (and discussed very briefly) and describe in more detail how we built it and how we use it to simulate security-relevant network activity. We begin by reviewing, briefly, the CPNet we created in the last column.}
}
@article{1053288,
  author = {Peng Liu and Wanyu Zang and Meng Yu},
  title = {Incentive-based modeling and inference of attacker intent, objectives, and strategies},
  journal = {ACM Trans. Inf. Syst. Secur.},
  volume = {8},
  number = {1},
  year = {2005},
  issn = {1094-9224},
  pages = {78--118},
  doi = {http://doi.acm.org/10.1145/1053283.1053288},
  publisher = {ACM Press},
  address = {New York, NY, USA}
}
@article{Zou2005,
  author = {Cliff C. Zou and Don Towsley and Weibo Gong},
  title = {On the performance of {Internet} worm scanning strategies},
  journal = {Performance Evaluation},
  year = {2005},
  optvolume = {},
  optnumber = {},
  optpages = {},
  optmonth = {},
  doi = {http://dx.doi.org/10.1016/j.peva.2005.07.032},
  note = {In Press, Corrected Proof},
  abstract = {In recent years, fast spreading worms, such as Code Red, Slammer, Blaster and Sasser, have become one of the major threats to the security of the Internet. In order to defend against future worms, it is important to first understand how worms propagate and how different scanning strategies affect worm propagation dynamics. In this paper, we systematically model and analyze worm propagation under various scanning strategies, such as uniform scan, routing scan, hit-list scan, cooperative scan, local preference scan, sequential scan, divide-and-conquer scan, target scan, etc. We also provide an analytical model to accurately model Witty worm's destructive behavior. By using the same modeling framework, we reveal the underlying similarity and relationship between different worm scanning strategies. In addition, based on our simulation and analysis of Blaster worm propagation and monitoring, we provide a guideline for building a better worm monitoring infrastructure.}
}
@article{XL:03,
  title = {Sustaining Availability of Web Services under Severe Denial of Service Attacks},
  journal = {IEEE Transaction on Computers, special issue on Reliable Distributed Systems},
  author = {J. Xu and W. Lee},
  editor = {A. Bondavalli and S. Upadhyaya},
  volume = 52,
  number = 2,
  pages = {195--208},
  month = feb,
  year = 2003,
  doi = {http://doi.ieeecomputersociety.org/10.1109/TC.2003.1176986},
  abstract = {The recent tide of Distributed Denial of Service (DDoS) attacks against high-profile web sites demonstrate how devastating DDoS attacks are and how defenseless the Internet is under such attacks. We design a practical DDoS defense system that can protect the availability of web services during severe DDoS attacks. The basic idea behind our system is to isolate and protect legitimate traffic from a huge volume of DDoS traffic when an attack occurs. Traffic that needs to be protected can be recognized and protected using efficient cryptographic techniques. Therefore, by provisioning adequate resource (e.g., bandwidth) to legitimate traffic separated by this process, we are able to provide adequate service to a large percentage of clients during DDoS attacks. The worst-case performance (effectiveness) of the system is evaluated based on a novel game theoretical framework, which characterizes the natural adversarial relationship between a DDoS adversary and the proposed system. We also conduct a simulation study to verify a key assumption used in the game-theoretical analysis and to demonstrate the system dynamics during an attack.}
}
@inproceedings{830561,
  author = {XiaoFeng Wang and Michael K. Reiter},
  title = {Defending Against Denial-of-Service Attacks with Puzzle Auctions},
  booktitle = {SP '03: Proceedings of the 2003 IEEE Symposium on Security and Privacy},
  year = {2003},
  isbn = {0-7695-1940-7},
  pages = {78},
  publisher = {IEEE Computer Society},
  address = {Washington, DC, USA},
  doi = {http://doi.ieeecomputersociety.org/10.1109/SECPRI.2003.1199329},
  abstract = {Although client puzzles represent a promising approach to defend against certain classes of denial-of-service attacks, several questions stand in the way of their deployment in practice: e.g., how to set the puzzle difficulty in the presence of an adversary with unknown computing power, and how to integrate the approach with existing mechanisms. In this paper, we attempt to address these questions with a new puzzle mechanism called the puzzle auction. Our mechanism enables each client to ``bid'' for resources by tuning the difficulty of the puzzles it solves, and to adapt its bidding strategy in response to apparent attacks. We analyze the effectiveness of our auction mechanism and further demonstrate it using an implementation within the TCP protocol stack of the Linux kernel. Our implementation has several appealing properties. It effectively defends against SYN ooding attacks, is fully compatible with TCP, and even provides a degree of interoperability with clients with unmodified kernels: Even without a puzzle-solving kernel, a client still can connect to a puzzle auction server under attack (albeit less effectively than those with puzzle-solving kernels, and at the cost of additional server expense).}
}
@inproceedings{Browne2000,
  author = {Randy Browne},
  title = {{C4I} defensive infrastructure for survivability against multi-mode attacks},
  booktitle = {Proceedings of the 21st Century Military Communications Conference (MILCOM)},
  pages = {417--424},
  year = {2000},
  volume = {1},
  month = {October},
  doi = {http://doi.ieeecomputersociety.org/10.1109/MILCOM.2000.904987},
  abstract = {A previous paper points out that the United States and her allies cannot achieve information superiority simply by prevailing at information warfare. 21st century C4I systems must be able to defend against ``multi-mode'' attacks, which are enemy strategies using clever combinations of conventional and non-conventional warfare. Owing to the problem of multi-mode attacks, completely new approaches to C4I defensive architecture are needed. This current paper criticizes some popular 20th century C4I defense technologies, such as adaptive autonomic defenses and encapsulated self-healing networks and systems, all of which are technologies with severe inherent weakness against multi-mode attacks. This paper is a speculative discussion of new C4I defense technologies as well as policy issues regarding information superiority that have never been adequately addressed in the literature. The intent is to stimulate new research and development to the benefit of practical fielded C4I systems.}
}
@inproceedings{DBLP:conf/csreaSAM/ZhuD03,
  author = {Gang Zhu and Jie Dai},
  title = {Economic Perspective of Information Security},
  booktitle = {Security and Management},
  year = {2003},
  pages = {527--533},
  bibsource = {DBLP, http://dblp.uni-trier.de}
}
@inproceedings{Nicol2004,
  author = {D. Nicol and M. Liljenstam},
  title = {Models of Active Worm Defenses},
  booktitle = {Proceedings of the Measurement, Modeling and Analysis of the Internet Workshop (IMA)},
  year = {2004},
  month = {January},
  pdf = {http://www.linklings.net/MOSES/papers/ipsi-236.pdf}
}
@techreport{Zou2004,
  author = {Cliff C. Zou and Don Towsley and Weibo Gong},
  title = {A Firewall Network System for Worm Defense in Enterprise Networks},
  institution = {University of Massachusetts Amherst, College of Engineering},
  year = {2004},
  number = {TR-04-CSE-01},
  month = {February},
  pdf = {http://tennis.ecs.umass.edu/~czou/research/FirewallNetwork-techreport.pdf}
}
@inproceedings{Moore03,
  author = {D. Moore and C. Shannon and G.M. Voelker and S. Savage},
  booktitle = {Proceedings of the 2003 IEEE Infocom Conference (INFOCOM)},
  title = {Internet Quarantine: {Requirements} for Containing Self-Propagating Code},
  pdf = {http://www.cs.ucsd.edu/users/voelker/pubs/worm-infocom03.pdf},
  year = {2003},
  optpages = {},
  month = {April},
  pdf = {http://www.ieee-infocom.org/2003/papers/46_04.PDF}
}
@techreport{carrier,
  author = {Brian Carrier and Sundararaman Jeyaraman and Sarah Sellke},
  institution = {CERIAS},
  note = {CERIAS TR 2004-35},
  school = {Purdue University},
  title = {Impact of Network Design on Worm Propagation},
  pdf = {https://www.cerias.purdue.edu/tools_and_resources/bibtex_archive/archive/2004-35.pdf},
  abstract = {In this paper, we simulate the Code Red II and Nimda worms on different enterprise-scale networks to determine the impact that topology has on worm propagation.  A corporate network can be designed to improve security and, as we show, to decrease the propagation rate of worms that use network scanning as a target discovery technique.  We also examine the impact that LaBrea-like devices have on propagation rates and compare it to the impact of network topology.}
}
@inproceedings{Pnueli77:FOCS,
  author = {A. Pnueli},
  year = {1977},
  title = {The temporal logic of programs},
  booktitle = {Proceedings of the 18th {IEEE} Symposium on Foundations of Computer Science},
  pages = {46--67}
}
@inproceedings{Zou2002,
  author = {Cliff Changchun Zou and Weibo Gong and Don Towsley},
  title = {Code red worm propagation modeling and analysis},
  booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications Security (CCS)},
  year = {2002},
  isbn = {1-58113-612-9},
  pages = {138--147},
  doi = {http://doi.acm.org/10.1145/586110.586130},
  abstract = {The Code Red worm incident of July 2001 has stimulated activities to model and analyze Internet worm propagation. In this paper we provide a careful analysis of Code Red propagation by accounting for two factors: one is the dynamic countermeasures taken by ISPs and users; the other is the slowed down worm infection rate because Code Red rampant propagation caused congestion and troubles to some routers. Based on the classical epidemic Kermack-Mckendrick model, we derive a general Internet worm model called the two-factor worm model. Simulations and numerical solutions of the two-factor worm model match the observed data of Code Red worm better than previous models do. This model leads to a better understanding and prediction of the scale and speed of Internet worm spreading.}
}
@inproceedings{Weaver2004,
  author = {Nicholas Weaver and Ihab Hamadeh and George Kesidis and Vern Paxson},
  title = {Preliminary results using scale-down to explore worm dynamics},
  booktitle = {Proceedings of the 2004 ACM Workshop on Rapid Malcode (WORM)},
  year = {2004},
  isbn = {1-58113-970-5},
  pages = {65--72},
  doi = {http://doi.acm.org/10.1145/1029618.1029628},
  abstract = {A major challenge when attempting to analyze and model large-scale Internet phenomena such as the dynamics of global worm propagation is finding appropriate abstractions that allow us to tractably grapple with size of the artifact while still capturing its most salient properties. We present initial results from investigating ``scaledown'' techniques for approximating global Internet worm dynamics by shrinking the effective size of the network under study. We explore scaledown in the context of both simulation and analysis, using as a calibration touchstone an attempt to reproduce the empirically observed behavior of the Slammer worm, which exhibited a peculiar decline in average per-worm scanning rate not seen in other worms (except for the later Witty worm, which exhibited similar propagation dynamics). We develop a series of abstract models approximating Slammer's Internet propagation and demonstrate that such modeling appears to require incorporating both heterogeneous clustering of infectibles and heterogeneous access-link bandwidths connecting those clusters to the Internet core. We demonstrate the viability of scaledown but also explore two important artifacts it introduces: heightened variability of results, and biasing the worm towards earlier propagation.}
}
@inproceedings{Abdelhafez05,
  author = {M. Abdelhafez and G.F. Riley},
  booktitle = {Third IEEE International Workshop on Information Assurance (IWIA)},
  location = {College Park, Maryland},
  title = {Evaluation of Worm Containment Algorithms and Their Effect on Legitimate Traffic},
  month = {March},
  year = {2005}
}
@inproceedings{Zhang2004,
  author = {Yun-Kai Zhang and Fang-Wei Wang and Yu-Qing Zhang and Jian-Feng Ma},
  title = {Worm propagation modeling and analysis based on quarantine},
  booktitle = {Proceedings of the 3rd International Conference on Information Security (InfoSecu)},
  year = {2004},
  isbn = {1-58113-955-1},
  pages = {69--75},
  location = {Shanghai, China},
  doi = {http://doi.acm.org/10.1145/1046290.1046305},
  abstract = {In recent years, the worms that had a dramatic increase in the frequency and virulence of such outbreaks have become one of the major threats to the security of the Internet. In this paper, we provide a worm propagating model. It bases on the classical epidemic Kermack-Kermack model, adopts dynamic quarantine strategy, dynamic infecting rate and removing rate. The analysis shows that model can efficiently reduce a worm's propagation speed, which can give us more precious time to defend it, and reduce the negative influence of worms. The simulation results verify the effectiveness of the model.}
}
@misc{SAL-language,
  title = {The {SAL} intermediate language},
  address = {Computer Science Laboratory, SRI International, Menlo Park, CA},
  institution = {SRI International},
  year = 2003,
  key = {SAL-language},
  note = {Computer Science Laboratory, SRI International, Menlo Park, CA. \url{http://sal.csl.sri.com/}}
}
@inproceedings{SAL204:CAV,
  title = {{SAL} 2},
  author = {de Moura, L. and Owre, S. and Rue{\ss}, H. and Rushby, J. and Shankar, N. and Sorea, M. and Tiwari, A.},
  booktitle = {Computer-Aided Verification, CAV},
  editor = {Alur, R. and Peled, D.},
  pages = {496--500},
  publisher = {Springer},
  series = {LNCS},
  volume = {3114},
  month = jul,
  year = 2004
}
@inproceedings{Staniford:usenixsec2002,
  author = {Stuart Staniford and Vern Paxson and Nicholas Weaver},
  title = {How to 0wn the {I}nternet in Your Spare Time},
  booktitle = {Proceedings of the 11th USENIX Security Symposium},
  month = aug,
  year = {2002},
  abstract = {The ability of attackers to rapidly gain control of vast numbers of Internet hosts poses an immense risk to the overall security of the Internet. Once subverted, these hosts can not only be used to launch massive denial of service floods, but also to steal or corrupt great quantities of sensitive information, and confuse and disrupt use of the network in more subtle ways. \par We present an analysis of the magnitude of the threat. We begin with a mathematical model derived from empirical data of the spread of Code Red I in July, 2001. We discuss techniques subsequently employed for achieving greater virulence by Code Red II and Nimda. In this context, we develop and evaluate several new, highly virulent possible techniques: hit-list scanning (which creates a \textit{Warhol} worm), permutation scanning (which enables self-coordinating scanning), and use of Internet-sized hit-lists (which creates a \textit{flash worm}). \par We then turn to the to the threat of \textit{surreptitious} worms that spread more slowly but in a much harder to detect ``contagion'' fashion. We demonstrate that such a worm today could arguably subvert upwards of 10,000,000 Internet hosts. We also consider robust mechanisms by which attackers can control and update deployed worms. \par In conclusion, we argue for the pressing need to develop a ``Center for Disease Control'' analog for virus- and worm-based threats to national cybersecurity, and sketch some of the components that would go into such a Center. }
}
@inproceedings{948190,
  author = {Nicholas Weaver and Vern Paxson and Stuart Staniford and Robert Cunningham},
  title = {A taxonomy of computer worms},
  booktitle = {Proceedings of the 2003 ACM Workshop on Rapid Malcode (WORM)},
  year = {2003},
  pages = {11--18},
  doi = {http://doi.acm.org/10.1145/948187.948190},
  abstract = {To understand the threat posed by computer worms, it is necessary to understand the classes of worms, the attackers who may employ them, and the potential payloads. This paper describes a preliminary taxonomy based on worm target discovery and selection strategies, worm carrier mechanisms, worm activation, possible payloads, and plausible attackers who would employ a worm.}
}
@inproceedings{1095824,
  author = {Manuel Costa and Jon Crowcroft and Miguel Castro and Antony Rowstron and Lidong Zhou and Lintao Zhang and Paul Barham},
  title = {Vigilante: end-to-end containment of {I}nternet worms},
  booktitle = {Proceedings of the 20th ACM Symposium on Operating Systems Principles (SOSP)},
  month = {October},
  year = {2005},
  isbn = {1-59593-079-5},
  pages = {133--147},
  location = {Brighton, United Kingdom},
  doi = {http://doi.acm.org/10.1145/1095810.1095824},
  abstract = {Worm containment must be automatic because worms can spread too fast for humans to respond. Recent work has proposed network-level techniques to automate worm containment; these techniques have limitations because there is no information about the vulnerabilities exploited by worms at the network level. We propose Vigilante, a new end-to-end approach to contain worms automatically that addresses these limitations. Vigilante relies on collaborative worm detection at end hosts, but does not require hosts to trust each other. Hosts run instrumented software to detect worms and broadcast self-certifying alerts (SCAs) upon worm detection. SCAs are proofs of vulnerability that can be inexpensively verified by any vulnerable host. When hosts receive an SCA, they generate filters that block infection by analysing the SCA-guided execution of the vulnerable software. We show that Vigilante can automatically contain fast-spreading worms that exploit unknown vulnerabilities without blocking innocuous traffic.}
}
@inproceedings{McDaniel2006,
  author = {Patrick McDaniel and Shubho Sen and Oliver Spatscheck and Jacobus Van der Merwe Bill Aiello and Charles Kalmanek},
  title = {Enterprise Security: A Community of Interest Based Approach},
  booktitle = {Proceedings of Network and Distributed Systems Security (NDSS)},
  optpages = {},
  year = {2006},
  opteditor = {},
  optvolume = {},
  optnumber = {},
  month = {February},
  pdf = {http://www.patrickmcdaniel.org/pubs/ndss06.pdf},
  note = {(draft)},
  abstract = {Enterprise networks today carry a range of mission critical communications. A successful worm attack within an enterprise network can be substantially more devastating to most companies than attacks on the larger Internet. In this paper we explore a brownfield approach to hardening an enterprise network against active malware such as worms. The premise of our approach is that if future communication patterns are constrained to historical ``normal'' communication patterns, then the ability of malware to exploit vulnerabilities in the enterprise can be severely curtailed. We present techniques for automatically deriving individual host profiles that capture historical communication patterns (i.e., community of interest (COI)) of end hosts within an enterprise network. Using traces from a large enterprise network, we investigate how a range of different security policies based on these profiles impact usability (as valid communications may get restricted) and security (how well the policies contain malware). Our evaluations indicate that a simple security policy comprising our \textit{Extended COI-based profile and relaxed Throttling Discipline} can effectively contains worm behavior within an enterprise without significantly impairing normal network operation.}
}
@inproceedings{KimKarp04,
  author = {Hyang-Ah Kim and Brad Karp},
  title = {Autograph: Toward Automated, Distributed Worm Signature Detection.},
  booktitle = {USENIX Security Symposium},
  year = {2004},
  pages = {271--286},
  http = {http://www.usenix.org/publications/library/proceedings/sec04/tech/kim.html},
  abstract = {Today's Internet intrusion detection systems (IDSes) monitor edge networks' DMZs to identify and/or filter malicious flows. While an IDS helps protect the hosts on its local edge network from compromise and denial of service, it cannot alone effectively intervene to halt and reverse the spreading of novel Internet worms. Generation of the \textit{worm signatures} required by an IDS---the byte patterns sought in monitored traffic to identify worms---today entails non-trivial human labor, and thus significant delay: as network operators detect anomalous behavior, they communicate with one another and manually study packet traces to produce a worm signature. Yet intervention must occur early in an epidemic to halt a worm's spread. In this paper, we describe Autograph, a system that \textit{automatically} generates signatures for novel Internet worms that propagate using TCP transport. Autograph generates signatures by analyzing the prevalence of \textit{portions of flow payloads}, and thus uses no knowledge of protocol semantics above the TCP level. It is designed to produce signatures that exhibit high \textit{sensitivity} (high true positives) and high \textit{specificity} (low false positives); our evaluation of the system on real DMZ traces validates that it achieves these goals. We extend Autograph to share port scan reports among distributed monitor instances, and using trace-driven simulation, demonstrate the value of this technique in speeding the generation of signatures for novel worms. Our results elucidate the fundamental trade-off between early generation of signatures for novel worms and the specificity of these generated signatures.}
}
@techreport{Sing+03,
  author = {Sumeet Singh and Cristian Estan and George Varghese and Stefan Savage},
  title = {The {EarlyBird} System for Realtime Detection of Unknown Worms},
  institution = {UC San Diego},
  year = {2003},
  number = {CS2003-0761},
  month = {August},
  optnote = {},
  abstract = {Network worms are a major threat to the security of today's Internet-connected hosts and networks. The combination of unmitigated connectivity and widespread software homogeneity allows worms to exploit tremendous parallelism in propagation. Modern worms spread so quickly that no human-mediated reaction to the outbreak of a new worm can hope to prevent a widespread epidemic. In this paper we propose an automated method for detecting new worms based on traffic characteristics common to most of them: highly repetitive packet content, an increasing population of sources generating infections and an increasing number of destinations being targeted. Our method generates content signatures for the worm without any human intervention. Preliminary results on a small network show promising results: we have identified three confirmed worms with a low percentage of false positives. This gives us reason to believe that our method could form the core of an effective network-level worm detection and countermeasure system capable of substantially slowing down the spread of new worms.}
}
@techreport{PortokalidisBos2005,
  author = {Georgios Portokalidis and Herbert Bos},
  title = {{SweetBait}: Zero-Hour Worm Detection and Containment Using Honeypots},
  institution = {Vrije Universiteit Amsterdam},
  year = {2005},
  number = {IR-CS-015},
  month = {May},
  pdf = {http://www.cs.vu.nl/~herbertb/papers/sweetbait-ir-cs-015.pdf}
}
@inproceedings{Locasto+2005,
  author = {M. Locasto and J. Parekh and A. Keromytis and S. Stolfo},
  title = {Towards collaborative security and {P2P} intrusion detection},
  booktitle = {Proceedings from the Sixth Annual IEEE Systems, Man and Cybernetics (SMC) Information Assurance Workshop},
  pages = {333--339},
  year = {2005},
  month = {June},
  doi = {http://dx.doi.org/10.1109/IAW.2005.1495971},
  abstract = { The increasing array of Internet-scale threats is a pressing problem for every organization that utilizes the network. Organizations have limited resources to detect and respond to these threats. The end-to-end (E2E) sharing of information related to probes and attacks is a facet of an emerging trend toward ``collaborative security''. The key benefit of a collaborative approach to intrusion detection is a better view of global network attack activity. Augmenting the information obtained at a single site with information gathered from across the network can provide a more precise model of an attacker's behavior and intent. While many organizations see value in adopting such a collaborative approach, some challenges must be addressed before intrusion detection can be performed on an inter-organizational scale. We report on our experience developing and deploying a decentralized system for efficiently distributing alerts to collaborating peers. Our system, worminator, extracts relevant information from alert streams and encodes it in bloom filters. This information forms the basis of a distributed watchlist. The watchlist can be distributed via a choice of mechanisms ranging from a centralized trusted third party to a decentralized P2P-style overlay network.}
}
@misc{worminator-site,
  key = {WORM},
  note = {Worminator web site \url{http://worminator.cs.columbia.edu}}
}
@inproceedings{White+:osdi02,
  author = {Brian White and Jay Lepreau and Leigh Stoller and Robert Ricci and Shashi Guruprasad and Mac Newbold and Mike Hibler and Chad Barb and Abhijeet Joglekar},
  title = {An Integrated Experimental Environment for Distributed Systems and Networks},
  booktitle = {Proceedings of the Fifth Symposium on Operating Systems Design and Implementation (OSDI)},
  organization = {{USENIX} {Association}},
  month = {December},
  year = {2002},
  pages = {255--270},
  abstract = {Three experimental environments traditionally support network and distributed systems research: network emulators, network simulators, and live networks. The continued use of multiple approaches highlights both the value and inadequacy of each. Netbed, a descendant of Emulab, provides an experimentation facility that integrates these approaches, allowing researchers to configure and access networks composed of emulated, simulated, and wide-area nodes and links. Netbed's primary goals are ease of use, control, and realism, achieved through consistent use of virtualization and abstraction. \par By providing operating system-like services, such as resource allocation and scheduling, and by virtualizing heterogeneous resources, Netbed acts as a virtual machine for network experimentation. This paper presents Netbed's overall design and implementation and demonstrates its ability to improve experimental automation and efficiency. These, in turn, lead to new methods of experimentation, including automated parameter-space studies within emulation and straightforward comparisons of simulated, emulated, and wide-area scenarios.}
}
@article{972384,
  author = {Christian Kreibich and Jon Crowcroft},
  title = {Honeycomb: creating intrusion detection signatures using honeypots},
  journal = {SIGCOMM Comput. Commun. Rev.},
  volume = {34},
  number = {1},
  year = {2004},
  issn = {0146-4833},
  pages = {51--56},
  doi = {http://doi.acm.org/10.1145/972374.972384},
  publisher = {ACM Press},
  address = {New York, NY, USA},
  abstract = {This paper describes a system for automated generation of attack signatures for network intrusion detection systems. Our system applies pattern-matching techniques and protocol conformance checks on multiple levels in the protocol hierarchy to network traffic captured a honeypot system. We present results of running the system on an unprotected cable modem connection for 24 hours. The system successfully created precise traffic signatures that otherwise would have required the skills and time of a security officer to inspect the traffic manually.}
}
@inbook{Stolfo2005,
  author = {Salvatore Stolfo},
  alteditor = {},
  title = {The Black Book on Corporate Security},
  chapter = {Collaborative Security: Uniting Against a Common Foe},
  publisher = {},
  year = {2005},
  isbn = {0-9764266-1-7},
  optvolume = {},
  optnumber = {},
  optseries = {},
  opttype = {},
  optaddress = {},
  optedition = {},
  optmonth = {},
  pages = {219--237},
  optnote = {},
  optannote = {}
}
@inproceedings{SinghEVS04,
  author = {Sumeet Singh and Cristian Estan and George Varghese and Stefan Savage},
  title = {Automated Worm Fingerprinting},
  booktitle = {Proceedings of the 6th Symposium on Operating Systems Design and Implementation (OSDI)},
  year = {2004},
  month = {December},
  pages = {45--60},
  pdf = {http://www.usenix.org/events/osdi04/tech/full_papers/singh/singh.pdf},
  http = {http://www.usenix.org/events/osdi04/tech/singh.html},
  abstract = {Network worms are a clear and growing threat to the security of today's Internet-connected hosts and networks. The combination of the Internet's unrestricted connectivity and widespread software homogeneity allows network pathogens to exploit tremendous parallelism in their propagation. In fact, modern worms can spread so quickly, and so widely, that no human-mediated reaction can hope to contain an outbreak.\par In this paper, we propose an automated approach for quickly detecting previously unknown worms and viruses based on two key behavioral characteristics---a common exploit sequence together with a range of unique sources generating infections and destinations being targeted. More importantly, our approach---called ``content sifting''---automatically generates \textit{precise} signatures that can then be used to filter or moderate the spread of the worm \textit{elsewhere} in the network.\par Using a combination of existing and novel algorithms we have developed a scalable content sifting implementation with low memory and CPU requirements. Over months of active use at UCSD, our \textit{Earlybird} prototype system has automatically detected and generated signatures for all pathogens known to be active on our network as well as for several \textit{new} worms and viruses which were \textit{unknown} at the time our system identified them. Our initial experience suggests that, for a wide range of network pathogens, it may be practical to construct fully automated defenses---even against so-called ``zero-day'' epidemics.}
}
@inproceedings{1102152,
  author = {Jedidiah R. Crandall and Zhendong Su and S. Felix Wu and Frederic T. Chong},
  title = {On deriving unknown vulnerabilities from zero-day polymorphic and metamorphic worm exploits},
  booktitle = {Proceedings of the 12th ACM Conference on Computer and Communications Security (CCS)},
  year = {2005},
  month = {November},
  isbn = {1-59593-226-7},
  pages = {235--248},
  location = {Alexandria, VA, USA},
  doi = {http://doi.acm.org/10.1145/1102120.1102152},
  abstract = {Vulnerabilities that allow worms to hijack the control flow of each host that they spread to are typically discovered months before the worm outbreak, but are also typically discovered by third party researchers. A determined attacker could discover vulnerabilities as easily and create zero-day worms for vulnerabilities unknown to network defenses. It is important for an analysis tool to be able to generalize from a new exploit observed and derive protection for the vulnerability.Many researchers have observed that certain predicates of the exploit vector must be present for the exploit to work and that therefore these predicates place a limit on the amount of polymorphism and metamorphism available to the attacker. We formalize this idea and subject it to quantitative analysis with a symbolic execution tool called DACODA. Using DACODA we provide an empirical analysis of 14 exploits (seven of them actual worms or attacks from the Internet, caught by Minos with no prior knowledge of the vulnerabilities and no false positives observed over a period of six months) for four operating systems.Evaluation of our results in the light of these two models leads us to conclude that 1) single contiguous byte string signatures are not effective for content filtering, and token-based byte string signatures composed of smaller substrings are only semantically rich enough to be effective for content filtering if the vulnerability lies in a part of a protocol that is not commonly used, and that 2) practical exploit analysis must account for multiple processes, multithreading, and kernel processing of network data necessitating a focus on primitives instead of vulnerabilities.}
}
@inproceedings{CrandallWC05,
  author = {Jedidiah R. Crandall and Shyhtsun Felix Wu and Frederic T. Chong},
  title = {Experiences Using Minos as a Tool for Capturing and Analyzing Novel Worms for Unknown Vulnerabilities},
  booktitle = {Proceedings of the Second International Conference on Intrusion and Malware Detection and Vulnerability Assessment (DIMVA)},
  month = {July},
  year = {2005},
  pages = {32--50},
  doi = {http://dx.doi.org/10.1007/11506881_3},
  abstract = {We present a honeypot technique based on an emulated environment of the Minos architecture \cite{CrandallC04} and describe our experiences and observations capturing and analyzing attacks. The main advantage of a Minos-enabled honeypot is that exploits based on corrupting control data can be stopped at the critical point where control flow is hijacked from the legitimate program, facilitating a detailed analysis of the exploit.\par Although Minos hardware has not yet been implemented, we are able to deploy Minos systems with the Bochs full system Pentium emulator. We discuss complexities of the exploits Minos has caught that are not accounted for in the simple model of ``buffer overflow exploits'' prevalent in the literature. We then propose the Epsilon-Gamma-Pi model to describe control data attacks in a way that is useful towards understanding polymorphic techniques. This model can not only aim at the centers of the concepts of exploit vector ($\epsilon$), bogus control data ($\gamma$), and payload ($\pi$) but also give them shape. This paper will quantify the polymorphism available to an attacker for $\gamma$ and $\pi$, while so characterizing $\epsilon$ is left for future work.}
}
@inproceedings{CrandallC04,
  author = {Jedidiah R. Crandall and Frederic T. Chong},
  title = {Minos: Control Data Attack Prevention Orthogonal to Memory Model.},
  booktitle = {Proceedings of the 37th International Symposium on Microarchitecture (MICRO)},
  year = {2004},
  pages = {221--232},
  doi = {http://doi.ieeecomputersociety.org/10.1109/MICRO.2004.26},
  abstract = {We introduce Minos, a microarchitecture that implements Biba's low-water-mark integrity policy on individual words of data. Minos stops attacks that corrupt control data to hijack program control flow but is orthogonal to the memory model. Control data is any data which is loaded into the program counter on control flow transfer, or any data used to calculate such data. The key is that Minos tracks the integrity of all data, but protects control flow by checking this integrity when a program uses the data for control transfer. Existing policies, in contrast, need to differentiate between control and non-control data a priori, a task made impossible by coercions between pointers and other data types such as integers in the C language. Our implementation of Minos for Red Hat Linux 6.2 on a Pentium-based emulator is a stable, usable Linux system on the network on which we are currently running a web server. Our emulated Minos systems running Linux and Windows have stopped several actual attacks. We present a microarchitectural implementation of Minos that achieves negligible impact on cycle time with a small investment in die area, and minor changes to the Linux kernel to handle the tag bits and perform virtual memory swapping.}
}
@inproceedings{NewsomeKS05,
  author = {James Newsome and Brad Karp and Dawn Xiaodong Song},
  title = {Polygraph: Automatically Generating Signatures for Polymorphic Worms},
  booktitle = {IEEE Symposium on Security and Privacy},
  year = {2005},
  month = {May},
  pages = {226--241},
  doi = {http://doi.ieeecomputersociety.org/10.1109/SP.2005.15},
  abstract = {It is widely believed that content-signature-based intrusion detection systems (IDS) are easily evaded by polymorphic worms, which vary their payload on every infection attempt. In this paper, we present Polygraph, a signature generation system that successfully produces signatures that match polymorphic worms. Polygraph generates signatures that consist of multiple disjoint content substrings. In doing so, Polygraph leverages our insight that for a real-world exploit to function properly, multiple invariant substrings must often be present in all variants of a payload; these substrings typically correspond to protocol framing, return addresses, and in some cases, poorly obfuscated code. We contribute a definition of the polymorphic signature generation problem; propose classes of signature suited for matching polymorphic worm payloads; and present algorithms for automatic generation of signatures in these classes. Our evaluation of these algorithms on a range of polymorphic worms demonstrates that Polygraph produces signatures for polymorphic worms that exhibit low false negatives and false positives.}
}
@inproceedings{1102150,
  author = {Zhenkai Liang and R. Sekar},
  title = {Fast and automated generation of attack signatures: a basis for building self-protecting servers},
  booktitle = {Proceedings of the 12th ACM Conference on Computer and Communications Security (CCS)},
  year = {2005},
  month = {November},
  isbn = {1-59593-226-7},
  pages = {213--222},
  location = {Alexandria, VA, USA},
  doi = {http://doi.acm.org/10.1145/1102120.1102150},
  abstract = {Large-scale attacks, such as those launched by worms and zombie farms, pose a serious threat to our network-centric society. Existing approaches such as software patches are simply unable to cope with the volume and speed with which new vulnerabilities are being discovered. In this paper, we develop a new approach that can provide effective protection against a vast majority of these attacks that exploit memory errors in C/C++ programs. Our approach, called COVERS, uses a forensic analysis of a victim server's memory to correlate attacks to inputs received over the network, and \textit{automatically} develop a signature that characterizes inputs that carry attacks. The signatures tend to capture characteristics of the underlying vulnerability (e.g., a message field being too long) rather than the characteristics of an attack, which makes them effective against variants of attacks. Our approach introduces low overheads (under 10\%), does not require access to source code of the protected server, and has successfully generated signatures for the attacks studied in our experiments, without producing false positives. Since the signatures are generated in tens of milliseconds, they can potentially be distributed quickly over the Internet to filter out (and thus stop) fast-spreading worms. Another interesting aspect of our approach is that it can defeat guessing attacks reported against address-space randomization and instruction set randomization techniques. Finally, it increases the capacity of servers to withstand repeated attacks by a factor of 10 or more.}
}
@inproceedings{1106825,
  author = {Zhenkai Liang and R. Sekar},
  title = {Automatic Generation of Buffer Overflow Attack Signatures: An Approach Based on Program Behavior Models},
  booktitle = {Proceedings of the 21st Annual Computer Security Applications Conference (ACSAC)},
  year = {2005},
  isbn = {0-7695-2461-3},
  pages = {215--224},
  doi = {http://dx.doi.org/10.1109/CSAC.2005.12},
  abstract = {Buffer overflows have become the most common target for network-based attacks. They are also the primary mechanism used by worms and other forms of automated attacks. Although many techniques have been developed to prevent server compromises due to buffer overflows, these defenses still lead to server crashes. When attacks occur repeatedly, as is common with automated attacks, these protection mechanisms lead to repeated restarts of the victim application, rendering its service unavailable. To overcome this problem, we develop a new approach that can learn the characteristics of a particular attack, and filter out future instances of the same attack or its variants. By doing so, our approach significantly increases the availability of servers subjected to repeated attacks. The approach is fully automatic, does not require source code, and has low runtime overheads. In our experiments, it was effective against most attacks, and did not produce any false positives.}
}
@inproceedings{Simkhada05,
  author = {Kumar Simkhada and Hiroshi Tsunoda and Yuji Waizumi and Yoshiaki Nemoto},
  title = {Differencing Worm Flows and Normal Flows for Automatic Generation of Worm Signatures},
  booktitle = {Proceedings of the Seventh IEEE International Symposium on Multimedia (ISM)},
  pages = {680--685},
  year = {2005},
  doi = {http://doi.ieeecomputersociety.org/10.1109/ISM.2005.49},
  month = {December},
  abstract = {Internet worms pose a serious threat to networks. Most current Intrusion Detection Systems (IDSs) take signature matching approach to detect worms. Given the fact that most signatures are developed manually, generating new signatures for each variant of a worm incurs significant overhead. In this paper, we propose a difference-based scheme which differences worm flows and normal flows to generate robust worm signatures. The proposed scheme is based on two observational facts - worm flows contain several invariant portions in their payloads, and core worm codes do not exist in normal flows. It uses samples of worm flows detected by available means to extract common tokens. It then differences the set of these tokens with those of normal flows and generates signature candidates. By using such signatures within enterprises, out of reach of worm writers, the possibility of being tricked by worm writers can be reduced. We evaluate the proposed scheme using real network traffic traces that contains worms. Experiment results show that the proposed scheme exhibits high detection rate with low false positives.}
}
@article{Atighetchi2004,
  author = {Michael Atighetchi and Partha Pal and Franklin Webber and Richard Schantz and Christopher Jones and Joseph Loyall},
  title = {Adaptive Cyberdefense for Survival and Intrusion Tolerance},
  journal = {IEEE Internet Computing},
  year = {2004},
  volume = {8},
  number = {6},
  pages = {25--33},
  month = {November/December},
  doi = {http://doi.ieeecomputersociety.org/10.1109/MIC.2004.54},
  keywords = {Homeland security, faul-tolerance, intrusion detection, middleware},
  abstract = {While providing some resistance against cyberattacks, current approaches to securing networked and distributed information systems are mainly concerned with static prevention measures. For example, signature-based systems can only detect known attacks and tend to provide brittle, all-or-nothing protection. New work in survivability and intrusion tolerance focuses on augmenting existing information systems with adaptive defenses. A middleware-based survivability toolkit lets applications use network- and host-based mechanisms in their own defense.}
}
@inproceedings{1036923,
  author = {Angelos D. Keromytis and Janak Parekh and Philip N. Gross and Gail Kaiser and Vishal Misra and Jason Nieh and Dan Rubenstein and Sal Stolfo},
  title = {A holistic approach to service survivability},
  booktitle = {Proceedings of the 2003 ACM Workshop on Survivable and Self-Regenerative Systems (SSRS)},
  year = {2003},
  isbn = {1-58113-784-2},
  pages = {11--22},
  doi = {http://doi.acm.org/10.1145/1036921.1036923},
  abstract = {We present SABER (Survivability Architecture: Block, Evade, React), a proposed survivability architecture that blocks, evades and reacts to a variety of attacks by using several security and survivability mechanisms in an automated and coordinated fashion. Contrary to the ad hoc manner in which contemporary survivable systems are built-using isolated, independent security mechanisms such as firewalls, intrusion detection systems and software sandboxes-SABER integrates several different technologies in an attempt to provide a unified framework for responding to the wide range of attacks malicious insiders and outsiders can launch. \par This coordinated multi-layer approach will be capable of defending against attacks targeted at various levels of the network stack, such as congestion-based DoS attacks, software-based DoS or code-injection attacks, and others. Our fundamental insight is that while multiple lines of defense are useful, most conventional, uncoordinated approaches fail to exploit the full range of available responses to incidents. By coordinating the response, the ability to survive successful security breaches increases substantially. \par We discuss the key components of SABER, how they will be integrated together, and how we can leverage on the promising results of the individual components to improve survivability in a variety of coordinated attack scenarios. SABER is currently in the prototyping stages, with several interesting open research topics.}
}
@article{358455,
  author = {John F. Shoch and Jon A. Hupp},
  title = {The ``worm'' programs---early experience with a distributed computation},
  journal = {Communications of the ACM},
  volume = {25},
  number = {3},
  year = {1982},
  issn = {0001-0782},
  pages = {172--180},
  doi = {http://doi.acm.org/10.1145/358453.358455},
  abstract = {The ``worm'' programs were an experiment in the development of distributed computations: programs that span machine boundaries and also replicate themselves in idle machines. A ``worm'' is composed of multiple ``segments,'' each running on a different machine. The underlying worm maintenance mechanisms are responsible for maintaining the worm---finding free machines when needed and replicating the program for each additional segment. These techniques were successfully used to support several real applications, ranging from a simple multimachine test program to a more sophisticated real-time animation system harnessing multiple machines.}
}
@inproceedings{brumleyASIACCS:2006,
  title = {Design Space and Analysis of Worm Defense Strategies},
  author = {David Brumley and Li-Hao Liu and Pongsin Poosankam and Dawn Song},
  year = {2006},
  month = {March},
  booktitle = {Proceedings of the 2006 {ACM} Symposium on Information, Computer, and Communication Security ({ASIACCS} 2006)},
  pdf = {http://www.cs.cmu.edu/~dbrumley/pubs/asiaccs06.pdf},
  abstract = {We give the first systematic investigation of the design space of worm defense system strategies. We accomplish this by providing a taxonomy of defense strategies by abstracting away implementation-dependent and approach-specific details and concentrating on the fundamental properties of each defense category. Our taxonomy and analysis reveals the key parameters for each strategy that determine its effectiveness. We provide a theoretical foundation for understanding how these parameters interact, as well as simulation-based analysis of how these strategies compare as worm defense systems. Finally, we offer recommendations based upon our taxonomy and analysis on which worm defense strategies are most likely to succeed. In particular, we show that a hybrid approach combining Proactive Protection and Reactive Antibody Defense is the most promising approach and can be effective even against the fastest worms such as hitlist worms. Thus, we are the first to demonstrate that it is possible to defend against the fastest worms such as hitlist worms.}
}
@inproceedings{Cooke+05,
  author = {Evan Cooke and Farnam Jahanian Danny Mcpherson},
  pdf = {http://www.arbornetworks.com/downloads/research130/sruti05_final.pdf},
  http = {http://www.usenix.org/events/sruti05/tech/cooke.html},
  booktitle = {Proceedings of the Workshop on Steps to Reducing Unwanted Traffic on the Internet (SRUTI)},
  keywords = {botnets, honeypots, security},
  month = {June},
  pages = {39--44},
  title = {The Zombie Roundup: Understanding, Detecting, and Disrupting Botnets},
  year = {2005},
  abstract = {Global Internet threats are undergoing a profound transformation from attacks designed solely to disable infrastructure to those that also target people and organizations. Behind these new attacks is a large pool of compromised hosts sitting in homes, schools, businesses, and governments around the world. These systems are infected with a \textbf{bot} that communicates with a bot \textbf{controller} and other bots to form what is commonly referred to as a \textbf{zombie army} or \textbf{botnet}. Botnets are a very real and quickly evolving problem that is still not well understood or studied. In this paper we outline the origins and structure of bots and botnets and use data from the operator community, the Internet Motion Sensor project, and a honeypot experiment to illustrate the botnet problem today. We then study the effectiveness of detecting botnets by directly monitoring IRC communication or other command and control activity and show a more comprehensive approach is required. We conclude by describing a system to detect botnets that utilize advanced command and control systems by correlating secondary detection data from multiple sources.}
}
@misc{honeynet-project-site,
  key = {German Honeynet Project},
  title = {Know your Enemy: Tracking Botnets},
  note = {\url{http://www.honeynet.org/papers/bots/}},
  year = {2005},
  month = {March}
}
@inproceedings{Yegneswaran+04,
  author = {Vinod Yegneswaran and Paul Barford and David Plonka},
  title = {On the Design and Use of {I}nternet Sinks for Network Abuse Monitoring},
  booktitle = {Proceedings of the 7th International Symposium Recent Advances in Intrusion Detection (RAID)},
  year = {2004},
  pages = {146--165},
  doi = {http://dx.doi.org/10.1007/b100714},
  month = {January},
  series = {Lecture Notes in Computer Science},
  volume = {3224},
  abstract = {Monitoring \textit{unused} or \textit{dark} IP addresses offers opportunities to significantly improve and expand knowledge of abuse activity without many of the problems associated with typical network intrusion detection and firewall systems. In this paper, we address the problem of designing and deploying a system for monitoring large unused address spaces such as class A telescopes with 16M IP addresses. We describe the architecture and implementation of the Internet Sink (iSink) system which measures packet traffic on unused IP addresses in an efficient, extensible and scalable fashion. In contrast to traditional intrusion detection systems or firewalls, iSink includes an \textit{active} component that generates response packets to incoming traffic. This gives the iSink an important advantage in discriminating between different types of attacks (through examination of the response payloads). The key feature of iSink's design that distinguishes it from other unused address space monitors is that its active response component is \textit{stateless} and thus highly scalable. We report performance results of our iSink implementation in both controlled laboratory experiments and from a case study of a live deployment. Our results demonstrate the efficiency and scalability of our implementation as well as the important perspective on abuse activity that is afforded by its use.},
  keywords = {Intrusion Detection, Honeypots, Deception Systems}
}
@inproceedings{Cheetancheri+2006,
  author = {Senthil Cheetancheri and Denys Ma and Todd Heberlein and Karl Levitt},
  title = {Towards an infrastructure for worm defense evaluation},
  booktitle = {Proceedings of the 25th International Performance Computing and Communications Conference (Workshop on Malware)},
  pages = {559--566},
  year = {2006},
  month = {April}
}
@article{939954,
  author = {David Moore and Vern Paxson and Stefan Savage and Colleen Shannon and Stuart Staniford and Nicholas Weaver},
  title = {Inside the {Slammer} Worm},
  journal = {IEEE Security and Privacy},
  volume = {1},
  number = {4},
  year = {2003},
  issn = {1540-7993},
  pages = {33--39},
  month = {July--August},
  doi = {http://doi.ieeecomputersociety.org/10.1109/MSECP.2003.1219056},
  abstract = {The Slammer worm, also sometimes known as Sapphire, was the fastest worm in history, achieving a peak scanning rate of 55 million scans per second. This time, this new worm breed had internal programming flaws and a benign payload, but what about next time?}
}
@inproceedings{haeberlen-2006-fallacies,
  author = {Andreas Haeberlen and Alan Mislove and Ansley Post and Peter Druschel},
  title = {Fallacies in evaluating decentralized systems},
  booktitle = {Proceedings of the 5th International Workshop on Peer-to-Peer Systems (IPTPS)},
  location = {Santa Barbara, CA},
  month = {February},
  year = {2006},
  http = {http://www.mpi-sws.mpg.de/~ahae/abstracts/fallacies.html},
  abstract = {Research on decentralized systems such as peer-to-peer overlays and ad hoc networks has been hampered by the fact that few systems of this type are in production use, and the space of possible applications is still poorly understood. As a consequence, new ideas have mostly been evaluated using common synthetic workloads, traces from a few existing systems, testbeds like PlanetLab, and simulators like ns-2. Some of these methods have, in fact, become the ``gold standard'' for evaluating new systems, and are often a prerequisite for getting papers accepted at top conferences in the field. In this paper, we examine the current practice of evaluating decentralized systems under these specific sets of conditions and point out pitfalls associated with this practice. In particular, we argue that (i) despite authors' best intentions, results from such evaluations often end up being inappropriately generalized; (ii) there is an incentive not to deviate from the accepted standard of evaluation, even if that is technically appropriate; (iii) research may gravitate towards systems that are feasible and perform well when evaluated in the accepted environments; and, (iv) in the worst-case, research may become ossified as a result. We close with a call to action for the community to develop tools, data, and best practices that allow systems to be evaluated across a space of workloads and environments.}
}
@inproceedings{Ganesh+06,
  author = {Ayalvadi Ganesh and Dinan Gunawardena and Peter Key and Laurent Massouli and Jacob Scott},
  title = {Efficient quarantining of scanning worms: optimal detection and coordination},
  booktitle = {Proceedings of the IEEE INFOCOM},
  optpages = {},
  year = {2006},
  optvolume = {},
  optnumber = {},
  optseries = {},
  http = {http://www.research.microsoft.com/users/ajg/PID158565.pdf},
  month = {April},
  optabstract = {}
}

This file was generated by bibtex2html 1.96.