ztatlock.bib

@inproceedings{POPL_18_1,
  author = {Sergey, Ilya and
               Wilcox, James R. and
               Tatlock, Zachary},
  title = {Programming and Proving with Distributed Protocols},
  booktitle = {45th Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
  series = {POPL '18},
  publisher = {ACM},
  year = {2018},
  paper = {disel-sergey-popl18.pdf}
}
@inproceedings{ICFP_17_1,
  author = {Weitz, Konstantin and
               Lyubomirsky, Steven and
               Heule, Stefan and
               Torlak, Emina and
               Ernst, Michael D. and
               Tatlock, Zachary},
  title = {SpaceSearch: A Library for Building and Verifying Solver-Aided Tools},
  booktitle = {2017 ACM SIGPLAN International Conference on Functional Programming},
  series = {ICFP '17},
  publisher = {ACM},
  year = {2017}
}
@inproceedings{SNAPL_17_1,
  author = {Nandi, Chandrakana and
              Caspi, Anat and
              Grossman, Dan and
              Tatlock, Zachary},
  title = {Programming Language Tools and Techniques for 3d Printing},
  booktitle = {The 2nd Summit on Advances in Programming Languages},
  series = {SNAPL '17},
  year = {2017}
}
@inproceedings{COQPL_17_1,
  author = {Doenges, Ryan  and
                  Wilcox, James R. and
                  Woos, Doug and
                  Tatlock, Zachary and
                  Karl Palmskog},
  title = {Verification of Implementations of Distributed Systems Under Churn},
  booktitle = {3rd International Workshop on Coq for Programming Languages},
  series = {CoqPL '17},
  publisher = {ACM},
  year = {2017}
}
@inproceedings{OOPSLA_16_1,
  author = {Weitz, Konstantin and
               Woos, Doug and
               Torlak, Emina and
               Ernst, Michael D. and
               Krishnamurthy, Arvind and
               Tatlock, Zachary},
  title = {Scalable Verification of Border Gateway Protocol Configurations with an SMT Solver},
  booktitle = {2016 ACM SIGPLAN Conference on Object-Oriented Programming, Systems, Languages, and Applications},
  series = {OOPSLA '16},
  year = {2016},
  paper = {bagpipe-weitz-oopsla16.pdf},
  project = {http://bagpipe.uwplse.org/bagpipe/},
  abstract = {Internet Service Providers (ISPs) use the Border Gateway
               Protocol (BGP) to announce and exchange routes for
               delivering packets through the internet. ISPs must
               carefully configure their BGP routers to ensure traffic is
               routed reli- ably and securely. Correctly configuring BGP
               routers has proven challenging in practice, and
               misconfiguration has led to worldwide outages and traffic
               hijacks. BRBR This paper presents Bagpipe, a system that
               enables ISPs to declaratively express BGP policies and
               that automatically verifies that router configurations
               implement such policies. The novel initial network
               reduction soundly reduces policy verification to a search
               for counterexamples in a finite space. An SMT-based
               symbolic execution engine performs this search
               efficiently. Bagpipe reduces the size of its search
               space using predicate abstraction and parallelizes its
               search using symbolic variable hoisting.  BRBR
               Bagpipe’s policy specification language is expressive:
               we expressed policies inferred from real AS
               configurations, policies from the literature, and
               policies for 10 Juniper TechLibrary configuration
               scenarios. Bagpipe is efficient: we ran it on three
               ASes with a total of over 240,000 lines of Cisco and
               Juniper BGP configuration. Bagpipe is effective: it
               revealed 19 policy violations without issuing any
               false positives.}
}
@inproceedings{NSV_16_1,
  author = {Nasrine Damouche and
               Matthieu Martel and
               Pavel Panchekha and
               Chen Qiu and
               Alexander Sanchez-Stern and
               Zachary Tatlock},
  title = {Toward a Standard Benchmark Format and Suite for Floating-Point Analysis},
  booktitle = {2016 International Workshop on Numerical Software Verification},
  series = {NSV '16},
  year = {2016},
  paper = {fpbench-damouche-nsv16.pdf},
  project = {http://fpbench.org},
  talk = {https://www.youtube.com/watch?v=SRE2Gky381M},
  slides = {fpbench-damouche-nsv16-slides.pdf},
  abstract = {We introduce FPBench, a standard benchmark format for
               validation and optimization of numerical accuracy in floating-point
               computations. FPBench is a first step toward addressing an increasing
               need in our community for comparisons and combinations of tools from
               different application domains. To this end, FPBench provides a basic
               floatingpoint benchmark format and accuracy measures for comparing
               different tools. The FPBench format and measures allow comparing and
               composing different floating-point tools. We describe the FPBench
               format and measures and show that FPBench expresses benchmarks from
               recent papers in the literature, by building an initial benchmark
               suite drawn from these papers. We intend for FPBench to grow into a
               standard benchmark suite for the members of the floating-point tools
               research community.}
}
@inproceedings{NETPL_16_1,
  author = {Konstantin Weitz and
               Doug Woos and
               Emina Torlak and
               Michael D. Ernst and
               Arvind Krishnamurthy and
               Zachary Tatlock},
  title = {Formal Semantics and Automated Verification for the Border Gateway Protocol},
  booktitle = {2016 ACM SIGCOMM Workshop on Networking and Programming Languages},
  series = {NetPL '16},
  year = {2016},
  paper = {bagpipe-weitz-netpl16.pdf},
  project = {http://bagpipe.uwplse.org/},
  abstract = {We present the first mechanized formal semantics of BGP
               based on the BGP specification RFC 4271, and we show how to use
               this semantics to develop reliable tools and guidelines that help BGP
               administrators avoid router misconfiguration. In contrast to previous
               semantics, our semantics is fully formal (it is
               implemented in the Coq proof assistant), and it models all
               required features of the BGP specification modulo low-level details
               such as bit representation of update messages and TCP. BRBR To
               provide evidence for the correctness and usefulness of our semantics:
               1) we have built the Bagpipe tool which automatically checks that BGP
               configurations adhere to given policy specifications, revealing 19
               apparent errors in three ASes with over 240,000 lines of BGP
               configuration; 2) we have tested the BGP simulator C-BGP, revealing
               one bug; and 3) we are currently extending and formalizing the
               pen-and-paper proof by Gao \& Rexford on the convergence of BGP,
               revealing necessary extensions to Gao \& Rexford’s original
               configuration guidelines.}
}
@inproceedings{CAV_16_1,
  author = {Stuart Pernsteiner and
               Calvin Loncaric and
               Emina Torlak and
               Zachary Tatlock and
               Xi Wang and
               Michael D. Ernst and
               Jonathan Jacky},
  title = {Investigating Safety of a Radiotherapy Machine Using System Models with Pluggable Checkers},
  booktitle = {28th International Conference on Computer Aided Verification},
  series = {CAV '16},
  publisher = {LNCS},
  year = {2016},
  paper = {neutrons-pernsteiner-cav16.pdf},
  project = {http://neutrons.uwplse.org/},
  abstract = {Formal techniques for guaranteeing software correctness have made tremendous progress in recent decades.
                  However, applying these techniques to existing, real-world safety-critical systems remains challenging
                  in practice.  Inspired by goals set out in prior work, we report on a large-scale case study that
                  applies modern verification techniques to check safety properties of a radiotherapy system in current
                  clinical use.  Because of the diversity and complexity of system components (software, hardware, and
                  physical), no single tool was suitable for checking critical cross-component safety properties.  This
                  paper describes how we used state-of-the-art approaches to develop specialized tools for checking
                  different components and an extensible Safety Case Checker (SCC) for composing the properties checked
                  for each component.  We describe the key design decisions that diverged from previous approaches and
                  that enabled us to practically apply our approach to provide machine-checked guarantees.  Our case
                  study uncovered subtle safety-critical flaws in a pre-release of the latest version of the
                  radiotherapy system's control software.}
}
@inproceedings{PLDI_16_1,
  author = {Mullen, Eric and
               Zuniga, Daryl and
               Tatlock, Zachary and
               Grossman, Dan},
  title = {Verified Peephole Optimizations for CompCert},
  booktitle = {2016 ACM SIGPLAN Conference on Programming Language Design and Implementation},
  series = {PLDI '16},
  publisher = {ACM},
  year = {2016},
  paper = {peek-mullen-pldi16.pdf},
  project = {http://peek.uwplse.org/},
  abstract = {Transformations over assembly code are common in many compilers. These transformations are also some of
                  the most bug-dense compiler components. Such bugs could be eliminated by formally verifying the
                  compiler, but state-of-the-art formally verified compilers like CompCert do not support assembly-level
                  program transformations. This paper presents Peek, a framework for expressing, verifying, and running
                  meaning-preserving assembly-level program transformations in CompCert. Peek contributes four new
                  components: a lower level semantics for CompCert x86 syntax, a liveness analysis, a library for
                  expressing and verifying peephole optimizations, and a verified peephole optimization pass built into
                  CompCert. Each of these is accompanied by a correctness proof in Coq against realistic assumptions
                  about the calling convention and the system memory allocator. BRBR Verifying peephole optimizations in
                  Peek requires proving only a set of local properties, which we have proved are sufficient to ensure
                  global transformation correctness. We have proven these local properties for 28 peephole
                  transformations from the literature. We discuss the development of our new assembly semantics,
                  liveness analysis, representation of program transformations, and execution engine; describe the
                  verification challenges of each component; and detail techniques we applied to mitigate the proof
                  burden.}
}
@inproceedings{CPP_16_1,
  author = {Wilcox, James R. and
               Woos, Doug and
               Anton, Steve and
               Tatlock, Zachary and
               Ernst, Michael D. and
               Anderson, Tom},
  title = {Planning for Change in a Formal Verification of the Raft Consensus Protocol},
  booktitle = {Proceedings of the 5th ACM SIGPLAN Conference on Certified Programs and Proofs},
  series = {CPP '16},
  year = {2016},
  paper = {verdi-woos-cpp16.pdf},
  slides = {verdi-woos-cpp16-slides.pdf},
  puburl = {http://dl.acm.org/citation.cfm?id=2854081},
  project = {http://verdi.uwplse.org/},
  abstract = {We present the first formal verification of state machine safety for the Raft consensus protocol, a
                  critical component of many distributed systems. We connected our proof to previous work to establish
                  an end-to-end guarantee that our implementation provides linearizable state machine replication. This
                  proof required iteratively discovering and proving 90 system invariants. Our verified implementation
                  is extracted to OCaml and runs on real networks.  BRBR The primary challenge we faced during the
                  verification process was proof maintenance, since proving one invariant often required strengthening
                  and updating other parts of our proof. To address this challenge, we propose a methodology of planning
                  for change during verification. Our methodology adapts classical information hiding techniques to the
                  context of proof assistants, factors out common invariant-strengthening patterns into custom induction
                  principles, proves higher-order lemmas that show any property proved about a particular component
                  implies analogous properties about related components, and makes proofs robust to change using
                  structural tactics. We also discuss how our methodology may be applied to systems verification more
                  broadly.}
}
@inproceedings{SNAPL_15_1,
  author = {Michael D. Ernst and
               Dan Grossman and
               Jon Jacky and
               Calvin Loncaric and
               Stuart Pernsteiner and
               Zachary Tatlock and
               Emina Torlak and
               Xi Wang},
  title = {Toward a Dependability Case Language and Workflow for a Radiation Therapy System},
  booktitle = {1st Summit on Advances in Programming Languages},
  series = {SNAPL '15},
  year = {2015},
  paper = {neutrons-ernst-snapl15.pdf},
  slides = {neutrons-ernst-snapl15-slides.pdf},
  puburl = {http://snapl.org/2015/abstracts/full/Ernst.html},
  project = {http://neutrons.uwplse.org/},
  abstract = {We present a near-future research agenda for bringing a suite of modern programming-languages
                  verification tools—specifically interactive theorem proving, solver-aided languages, and formally
                  defined domain-specific languages—to the development of a specific safety-critical system, a
                  radiotherapy medical device. We sketch how we believe recent programming-languages research advances
                  can merge with existing best practices for safety-critical systems to increase system assurance and
                  developer productivity. We motivate hypotheses central to our agenda: That we should start with a
                  single specific system and that we need to integrate a variety of complementary verification and
                  synthesis tools into system development.}
}
@inproceedings{PLDI_15_1,
  author = {Panchekha, Pavel and
               Sanchez-Stern, Alex and
               Wilcox, James R. and
               Tatlock, Zachary},
  title = {Automatically Improving Accuracy for Floating Point Expressions},
  booktitle = {2015 ACM SIGPLAN Conference on Programming Language Design and Implementation},
  series = {PLDI '15},
  publisher = {ACM},
  year = {2015},
  paper = {herbie-panchekha-pldi15.pdf},
  talk = {https://www.youtube.com/watch?v=RNzsvp6NLOY},
  slides = {herbie-panchekha-pldi15-slides.pdf},
  poster = {herbie-panchekha-pldi15-poster.pdf},
  video = {https://www.youtube.com/watch?v=qnkElmpTtBw},
  puburl = {http://dl.acm.org/citation.cfm?id=2737959},
  project = {http://herbie.uwplse.org/},
  abstract = {Scientific and engineering applications depend on floating point arithmetic to approximate real
                  arithmetic. This approximation introduces rounding error, which can accumulate to produce unacceptable
                  results. While the numerical methods literature provides techniques to mitigate rounding error,
                  applying these techniques requires manually rearranging expressions and understanding the finer
                  details of floating point arithmetic.  BRBR We introduce Herbie, a tool which automatically improves
                  floating point accuracy by searching for error-reducing transformations. Herbie estimates and
                  localizes rounding error, applies a database of rules to generate improvements, takes series
                  expansions, and combines improvements for different input regions. We evaluated Herbie on every
                  example from a chapter in a classic numerical methods textbook, and found that Herbie was able to
                  improve accuracy on each example, some by up to 60 bits, while imposing a median performance overhead
                  of 40\%. Colleagues in machine learning have applied Herbie to significantly improve the results of a
                  clustering algorithm, and a mathematical library has accepted a patch generated using Herbie.}
}
@inproceedings{PLDI_15_2,
  author = {Wilcox, James R. and
               Woos, Doug and
               Panchekha, Pavel and
               Tatlock, Zachary and
               Wang, Xi and
               Ernst, Michael D. and
               Anderson, Tom},
  title = {Verdi: A Framework for Implementing and Formally Verifying Distributed Systems},
  booktitle = {2015 ACM SIGPLAN Conference on Programming Language Design and Implementation},
  series = {PLDI '15},
  publisher = {ACM},
  year = {2015},
  paper = {verdi-wilcox-pldi15.pdf},
  slides = {verdi-wilcox-pldi15-slides.pdf},
  puburl = {http://dl.acm.org/citation.cfm?id=2737958},
  project = {http://verdi.uwplse.org/},
  abstract = {Implementing reliable distributed systems is challenging because such systems must tolerate faults
                  gracefully: machines may crash and networks may reorder, drop, or duplicate packets. Bugs in these
                  systems can lead to loss of critical data and unacceptable service outages.  BRBR We present Verdi, a
                  framework for implementing and formally verifying practical distributed systems in Coq. Verdi
                  formalizes various network semantics with different kinds of faults, and allows developers to choose
                  the most appropriate fault model when verifying their implementation. Furthermore, Verdi eases the
                  verification burden by enabling developers to construct systems from modular components. This
                  separation of concerns eases reasoning because the developer can verify their system in an idealized
                  fault model, then transfer the resulting correctness guarantees to a more realistic fault model
                  without any additional proof burden. To demonstrate Verdi’s utility, we present a series of case
                  studies including a key-value store and a primary-backup replication mechanism.}
}
@inproceedings{HRI_15_1,
  author = {Sonya Alexandrova and
               Zachary Tatlock and
               Maya Cakmak},
  title = {Visual Robot Programming for Generalizable Mobile Manipulation Tasks},
  booktitle = {Proceedings of the Tenth Annual {ACM/IEEE} International Conference on Human-Robot Interaction},
  series = {HRI '15},
  publisher = {ACM},
  year = {2015},
  paper = {alexandrova-roboflow-hri15.pdf},
  puburl = {http://doi.acm.org/10.1145/2701973.2702052},
  project = {https://homes.cs.washington.edu/~ztatlock/roboflow/},
  abstract = {General-purpose robots present the opportunity to be
               programmed for a specific purpose after deployment. This requires tools
               for end-users to quickly and intuitively program robots to perform
               useful tasks in new environments. In this paper, we present a
               flow-based visual programming language (VPL) for mobile
               manipulation tasks, demonstrate the generalizability of tasks
               programmed in this VPL, and present a preliminary user study of a
               development tool for this VPL.}
}
@inproceedings{ICRA_15_1,
  author = {Alexandrova, Sonya and Tatlock, Zachary and Cakmak, Maya},
  title = {Robo{F}low: A Flow-based Visual Programming Language for Mobile Manipulation Tasks},
  booktitle = {2015 IEEE International Conference on Robotics and Automation},
  series = {ICRA '15},
  publisher = {IEEE},
  year = {2015},
  paper = {alexandrova-roboflow-icra15.pdf},
  project = {https://homes.cs.washington.edu/~ztatlock/roboflow/},
  abstract = {General-purpose robots can perform a range of useful tasks in human environments; however, programming
                  them to robustly function in all possible environments that they might encounter is infeasible.
                  Instead, our research aims to develop robots that can be programmed by their end-users in their context
                  of use, so that the robot needs to robustly function in only one particular environment.  This
                  requires intuitive ways in which end-users can program their robot.  To that end, this paper
                  contributes a flow-based visual programming language, called RoboFlow, that allows programming of
                  generalizable mobile manipulation tasks.  RoboFlow is designed to (i) ensure a robust low-level
                  implementation of program procedures on a mobile manipulator, and (ii) restrict the high-level
                  programming as much as possible to avoid user errors while enabling expressive programs that involve
                  branching, looping, and nesting.  We present an implementation of RoboFlow on a PR2 mobile manipulator
                  and demonstrate the generalizability and error handling properties of RoboFlow programs on everyday
                  mobile manipulation tasks in human environments.}
}
@inproceedings{COQPL_15_1,
  author = {Mullen, Eric and Tatlock, Zachary and Grossman, Dan},
  title = {Peek: A Formally Verified Peephole Optimization Framework for x86},
  booktitle = {1st International Workshop on Coq for Programming Languages},
  series = {CoqPL '15},
  publisher = {ACM},
  year = {2015},
  paper = {peek-mullen-coqpl15.pdf},
  project = {http://peek.uwplse.org/},
  abstract = {Peek is a first step toward adding support for assembly-level program analyses, transformations, and
                  optimizations in CompCert. Currently, Peek focuses on x86 peephole transformations implemented and
                  verified in Coq. Peek is designed to provide a modular interface requiring that each peephole
                  optimization satisfy only local correctness properties.  Our primary result establishes that, assuming
                  the C calling convention, any peephole optimization satisfying these local properties preserves global
                  program meaning.}
}
@inproceedings{OSDI_14_1,
  author = {Wang, Xi and Lazar, David and Zeldovich, Nickolai and Chlipala, Adam and Tatlock, Zachary},
  title = {Jitk: A Trustworthy In-Kernel Interpreter Infrastructure},
  booktitle = {11th USENIX Symposium on Operating Systems Design and Implementation},
  series = {OSDI '14},
  publisher = {USENIX Association},
  year = {2014},
  paper = {jitk-wang-osdi14.pdf},
  slides = {jitk-wang-osdi14-slides.pdf},
  talk = {jitk-wang-osdi14-talk.mp4},
  puburl = {http://blogs.usenix.org/conference/osdi14/technical-sessions/presentation/wang_xi},
  project = {http://css.csail.mit.edu/jitk/},
  abstract = {Modern operating systems run multiple interpreters in the kernel, which enable user-space applications
                  to add new functionality or specialize system policies. The correctness of such interpreters is
                  critical to the overall system security: bugs in interpreters could allow adversaries to compromise
                  user-space applications and even the kernel. BRBR Jitk is a new infrastructure for building in-kernel
                  interpreters that guarantee functional correctness as they compile user-space policies down to native
                  instructions for execution in the kernel. To demonstrate Jitk, we implement two interpreters in the
                  Linux kernel, BPF and INET-DIAG, which are used for network and system call filtering and socket
                  monitoring, respectively. To help application developers write correct filters, we introduce a
                  high-level rule language, along with a proof that Jitk correctly translates high-level rules all the
                  way to native machine code, and demonstrate that this language can be integrated into OpenSSH with
                  tens of lines of code. We built a prototype of Jitk on top of the CompCert verified compiler and
                  integrated it into the Linux kernel. Experimental results show that Jitk is practical, fast, and
                  trustworthy.}
}
@inproceedings{PLDI_14_1,
  author = {Ricketts, Daniel and Robert, Valentin and Jang, Dongseok and Tatlock, Zachary and Lerner, Sorin},
  title = {Automating Formal Proofs for Reactive Systems},
  booktitle = {2014 ACM SIGPLAN Conference on Programming Language Design and Implementation},
  series = {PLDI '14},
  publisher = {ACM},
  year = {2014},
  paper = {reflex-ricketts-pldi14.pdf},
  talk = {https://www.youtube.com/watch?v=phwVo66aChc},
  project = {http://goto.ucsd.edu/reflex/},
  abstract = {Implementing systems in proof assistants like Coq and proving their correctness in full formal detail
                  has consistently demonstrated promise for making extremely strong guarantees about critical software,
                  ranging from compilers and operating systems to databases and web browsers.  Unfortunately, these
                  verifications demand such heroic manual proof effort, even for a single system, that the approach has
                  not been widely adopted. BRBR We demonstrate a technique to eliminate the manual proof burden for
                  verifying many properties within an entire class of applications, in our case reactive systems, while
                  only expending effort comparable to the manual verification of a single system.  A crucial insight of
                  our approach is simultaneously designing both (1) a domain-specific language (DSL) for expressing
                  reactive systems and their correctness properties and (2) proof automation which exploits the
                  constrained language of both programs and properties to enable fully automatic, pushbutton
                  verification.  We apply this insight in a deeply embedded Coq DSL, dubbed Reflex, and illustrate
                  Reflex's expressiveness by implementing and automatically verifying realistic systems including a
                  modern web browser, an SSH server, and a web server.  Using Reflex radically reduced the proof burden:
                  in previous, similar versions of our benchmarks written in Coq by experts, proofs accounted for over
                  80\% of the code base; our versions require no manual proofs.}
}
@inproceedings{NDSS_14_1,
  author = {Jang, Dongseok and Tatlock, Zachary and Lerner, Sorin},
  title = {Safe{D}ispatch: Securing {C++} Virtual Calls from Memory Corruption Attacks},
  booktitle = {20th Annual Network and Distributed System Security Symposium},
  series = {NDSS '14},
  publisher = {The Internet Society},
  year = {2014},
  paper = {sd-jang-ndss14.pdf},
  puburl = {http://www.internetsociety.org/doc/safedispatch-securing-c-virtual-calls-memory-corruption-attacks},
  abstract = {Several defenses have increased the cost of traditional, low-level attacks that corrupt control data,
                  e.g. return addresses saved on the stack, to compromise program execution.  In response, creative
                  adversaries have begun circumventing these defenses by exploiting programming errors to manipulate
                  pointers to virtual tables, or vtables, of C++ objects. These attacks can hijack program control flow
                  whenever a virtual method of a corrupted object is called, potentially allowing the attacker to gain
                  complete control of the underlying system.  In this paper we present SafeDispatch, a novel defense to
                  prevent such vtable hijacking by statically analyzing C++ programs and inserting sufficient runtime
                  checks to ensure that control flow at virtual method call sites cannot be arbitrarily influenced by an
                  attacker.  We implemented SafeDispatch as a Clang++/LLVM extension, used our enhanced compiler to
                  build a vtable-safe version of the Google Chromium browser, and measured the performance overhead of
                  our approach on popular browser benchmark suites.  By carefully crafting a handful of optimizations,
                  we were able to reduce average runtime overhead to just 2.1\%.}
}
@inproceedings{SECURITY_12_1,
  author = {Jang, Dongseok and Tatlock, Zachary and Lerner, Sorin},
  title = {Establishing Browser Security Guarantees Through Formal Shim Verification},
  booktitle = {21st USENIX Conference on Security Symposium},
  series = {SECURITY '12},
  publisher = {USENIX Association},
  year = {2012},
  paper = {quark-jang-usenixsec12.pdf},
  talk = {http://www.youtube.com/watch?v=QrqkVTEJ_vY},
  slides = {quark-jang-usenixsec12-slides.pdf},
  poster = {quark-jang-usenixsec12-poster.pdf},
  project = {http://goto.ucsd.edu/quark/},
  puburl = {https://www.usenix.org/conference/usenixsecurity12/establishing-browser-security-guarantees-through-formal-shim},
  abstract = {Web browsers mediate access to valuable private data in domains ranging from health care to banking.
                  Despite this critical role, attackers routinely exploit browser vulnerabilities to exfiltrate private
                  data and take over the underlying system.  We present QUARK, a browser whose kernel has been
                  implemented and verified in Coq.  We give a specification of our kernel, show that the implementation
                  satisfies the specification, and finally show that the specification implies several security
                  properties, including tab non-interference, cookie integrity and confidentiality, and address bar
                  correctness.}
}
@inproceedings{LMCS_11_1,
  author = {Tate, Ross and Stepp, Michael and Tatlock, Zachary and Lerner, Sorin},
  title = {Equality Saturation: a New Approach to Optimization},
  booktitle = {Logical Methods in Computer Science},
  series = {LMCS '11},
  year = {2011},
  paper = {eqsat-tate-lmcs11.pdf},
  project = {http://www.cs.cornell.edu/~ross/publications/eqsat/},
  puburl = {http://dx.doi.org/10.2168/LMCS-7(1:10)2011},
  abstract = {Optimizations in a traditional compiler are applied sequentially, with each optimization destructively
                  modifying the program to produce a transformed program that is then passed to the next optimization.
                  We present a new approach for structuring the optimization phase of a compiler.  In our approach,
                  optimizations take the form of equality analyses that add equality information to a common
                  intermediate representation.  The optimizer works by repeatedly applying these analyses to infer
                  equivalences between program fragments, thus saturating the intermediate representation with
                  equalities.  Once saturated, the intermediate representation encodes multiple optimized versions of
                  the input program.  At this point, a profitability heuristic picks the final optimized program from
                  the various programs represented in the saturated representation.  Our proposed way of structuring
                  optimizers has a variety of benefits over previous approaches: our approach obviates the need to worry
                  about optimization ordering, enables the use of a global optimization heuristic that selects among
                  fully optimized programs, and can be used to perform translation validation, even on compilers other
                  than our own.  We present our approach, formalize it, and describe our choice of intermediate
                  representation.  We also present experimental results showing that our approach is practical in terms
                  of time and space overhead, is effective at discovering intricate optimization opportunities, and is
                  effective at performing translation validation for a realistic optimizer.}
}
@inproceedings{PLDI_10_1,
  author = {Tatlock, Zachary and Lerner, Sorin},
  title = {Bringing Extensibility to Verified Compilers},
  booktitle = {2010 ACM SIGPLAN Conference on Programming Language Design and Implementation},
  series = {PLDI '10},
  publisher = {ACM},
  year = {2010},
  paper = {xcert-tatlock-pldi10.pdf},
  slides = {xcert-tatlock-pldi10-slides.pptx},
  puburl = {http://dx.doi.org/10.1145/1806596.1806611},
  abstract = {Verified compilers, such as Leroy's CompCert, are accompanied by a fully checked correctness
                  proof. Both the compiler and proof are often constructed with an interactive proof assistant. This
                  technique provides a strong, end-to-end correctness guarantee on top of a small trusted computing
                  base. Unfortunately, these compilers are also challenging to extend since each additional
                  transformation must be proven correct in full formal detail. BRBR At the other end of the spectrum,
                  techniques for compiler correctness based on a domain-specific language for writing optimizations,
                  such as Lerner's Rhodium and Cobalt, make the compiler easy to extend: the correctness of additional
                  transformations can be checked completely automatically. Unfortunately, these systems provide a weaker
                  guarantee since their end-to-end correctness has not been proven fully formally. BRBR We present an
                  approach for compiler correctness that provides the best of both worlds by bridging the gap between
                  compiler verification and compiler extensibility. In particular, we have extended Leroy's CompCert
                  compiler with an execution engine for optimizations written in a domain specific language and proved
                  that this execution engine preserves program semantics, using the Coq proof assistant. We present our
                  CompCert extension, XCert, including the details of its execution engine and proof of correctness in
                  Coq. Furthermore, we report on the important lessons learned for making the proof development
                  manageable.}
}
@inproceedings{PLDI_09_1,
  author = {Kundu, Sudipta and Tatlock, Zachary and Lerner, Sorin},
  title = {Proving Optimizations Correct Using Parameterized Program Equivalence},
  booktitle = {2009 ACM SIGPLAN Conference on Programming Language Design and Implementation},
  series = {PLDI '09},
  publisher = {ACM},
  year = {2009},
  paper = {pec-kundu-pldi09.pdf},
  talk = {http://www.youtube.com/watch?v=-ReOCUrHQtM},
  slides = {pec-kundu-pldi09-slides.pptx},
  puburl = {http://dx.doi.org/10.1145/1542476.1542513},
  abstract = {Translation validation is a technique for checking that, after an optimization has run, the input and
                  output of the optimization are equivalent. Traditionally, translation validation has been used to
                  prove concrete, fully specified programs equivalent. In this paper we present Parameterized
                  Equivalence Checking (PEC), a generalization of translation validation that can prove the equivalence
                  of parameterized programs. A parameterized program is a partially specified program that can represent
                  multiple concrete programs. For example, a parameterized program may contain a section of code whose
                  only known property is that it does not modify certain variables. By proving parameterized programs
                  equivalent, PEC can prove the correctness of transformation rules that represent complex optimizations
                  once and for all, before they are ever run. We implemented our PEC technique in a tool that can
                  establish the equivalence of two parameterized programs. To highlight the power of PEC, we designed a
                  language for implementing complex optimizations using many-to-many rewrite rules, and used this
                  language to implement a variety of optimizations including software pipelining, loop unrolling, loop
                  unswitching, loop interchange, and loop fusion. Finally, to demonstrate the effectiveness of PEC, we
                  used our PEC implementation to verify that all the optimizations we implemented in our language
                  preserve program behavior.}
}
@inproceedings{POPL_09_1,
  author = {Tate, Ross and Stepp, Michael and Tatlock, Zachary and Lerner, Sorin},
  title = {Equality Saturation: A New Approach to Optimization},
  booktitle = {36th Annual ACM SIGPLAN-SIGACT Symposium on Principles of Programming Languages},
  series = {POPL '09},
  publisher = {ACM},
  year = {2009},
  paper = {eqsat-tate-popl09.pdf},
  project = {http://www.cs.cornell.edu/~ross/publications/eqsat},
  puburl = {http://dl.acm.org/citation.cfm?id=1480915},
  abstract = {Optimizations in a traditional compiler are applied sequentially, with each optimization destructively
                  modifying the program to produce a transformed program that is then passed to the next
                  optimization. We present a new approach for structuring the optimization phase of a compiler. In our
                  approach, optimizations take the form of equality analyses that add equality information to a common
                  intermediate representation. The optimizer works by repeatedly applying these analyses to infer
                  equivalences between program fragments, thus saturating the intermediate representation with
                  equalities. Once saturated, the intermediate representation encodes multiple optimized versions of the
                  input program. At this point, a profitability heuristic picks the final optimized program from the
                  various programs represented in the saturated representation. Our proposed way of structuring
                  optimizers has a variety of benefits over previous approaches: our approach obviates the need to worry
                  about optimization ordering, enables the use of a global optimization heuristic that selects among
                  fully optimized programs, and can be used to perform translation validation, even on compilers other
                  than our own. We present our approach, formalize it, and describe our choice of intermediate
                  representation. We also present experimental results showing that our approach is practical in terms
                  of time and space overhead, is effective at discovering intricate optimization opportunities, and is
                  effective at performing translation validation for a realistic optimizer.}
}
@inproceedings{OOPSLA_08_1,
  author = {Tatlock, Zachary and Tucker, Chris and Shuffelton, David and Jhala, Ranjit and Lerner, Sorin},
  title = {Deep Typechecking and Refactoring},
  booktitle = {23rd ACM SIGPLAN Conference on Object-oriented Programming Systems Languages and Applications},
  series = {OOPSLA '08},
  publisher = {ACM},
  year = {2008},
  paper = {dtar-tatlock-oopsla08.pdf},
  slides = {dtar-tatlock-oopsla08-slides.pptx},
  poster = {dtar-tatlock-oopsla08-poster.pdf},
  puburl = {http://dx.doi.org/10.1145/1449764.1449768},
  abstract = {Large software systems are typically composed of multiple layers, written in different languages and
                  loosely coupled using a string-based interface. For example, in modern web-applications, a server
                  written in Java communicates with a database back-end by passing in query strings. This widely
                  prevalent approach is unsafe as the analyses developed for the individual layers are oblivious to the
                  semantics of the dynamically constructed strings, making it impossible to statically reason about the
                  correctness of the interaction. Further, even simple refactoring in such systems is daunting and error
                  prone as the changes must also be applied to isolated string fragments scattered across the code
                  base. BRBR We present techniques for deep typechecking and refactoring for systems that combine Java
                  code with a database back-end using the Java Persistence API. Deep typechecking ensures that the
                  queries that are constructed dynamically are type safe and that the values returned from the queries
                  are used safely by the program. Deep refactoring builds upon typechecking to allow programmers to
                  safely and automatically propagate code refactorings through the query string fragments.  BRBR Our
                  algorithms are implemented in a tool called QUAIL. We present experiments evaluating the effectiveness
                  of QUAIL on several benchmarks ranging from 3,369 to 82,907 lines of code.We show that QUAIL is able
                  to verify that 84\% of query strings in our benchmarks are type safe. Finally, we show that QUAIL
                  reduces the number of places in the code that a programmer must look at in order to perform a
                  refactoring by several orders of magnitude.}
}

This file was generated by bibtex2html 1.98.