rems-task4.bib

@inproceedings{TaDa2014,
  year = {2014},
  month = jul,
  isbn = {978-3-662-44201-2},
  booktitle = {\textbf{ECOOP 2014}},
  conf = {ECOOP 2014},
  volume = {8586},
  series = {Lecture Notes in Computer Science},
  editor = {Jones, Richard},
  doi = {10.1007/978-3-662-44202-9_9},
  title = {{TaDA}: A Logic for Time and Data Abstraction},
  url = {http://dx.doi.org/10.1007/978-3-662-44202-9_9},
  publisher = {Springer Berlin Heidelberg},
  author = {da Rocha Pinto, Pedro and Dinsdale-Young, Thomas and Gardner, Philippa},
  pages = {207-231},
  language = {English},
  abstract = {To avoid data races, concurrent operations should either be at distinct times or on distinct data. Atomicity is the abstraction that an operation takes effect at a single, discrete instant in time, with linearisability being a well-known correctness condition which asserts that concurrent operations appear to behave atomically. Disjointness is the abstraction that operations act on distinct data resource, with concurrent separation logics enabling reasoning about threads that appear to operate independently on disjoint resources.

We present TaDA, a program logic that combines the benefits of abstract atomicity and abstract disjointness. Our key contribution is the introduction of atomic triples, which offer an expressive approach to specifying program modules. By building up examples, we show that TaDA supports elegant modular reasoning in a way that was not previously possible.
}
}
@inproceedings{DBLP:conf/esop/GardnerNW14,
  author = {Philippa Gardner and
               Gian Ntzik and
               Adam Wright},
  title = {Local Reasoning for the {POSIX} File System},
  booktitle = {\textbf{ESOP 2014}: Programming Languages and Systems - 23rd European Symposium on Programming,
                Held as Part of the European Joint Conferences on Theory
               and Practice of Software, {ETAPS} 2014, Grenoble, France},
  conf = {ESOP 2014},
  pages = {169--188},
  year = {2014},
  month = apr,
  optcrossref = {DBLP:conf/esop/2014},
  url = {http://dx.doi.org/10.1007/978-3-642-54833-8_10},
  doi = {10.1007/978-3-642-54833-8_10},
  timestamp = {Sun, 23 Mar 2014 10:48:25 +0100},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/esop/GardnerNW14},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = {We provide a program logic for specifying a core subset of the sequential POSIX file system, and for reasoning abstractly about client programs working with the file system.}
}
@article{DBLP:journals/entcs/GardnerRWW14,
  author = {Philippa Gardner and
               Azalea Raad and
               Mark J. Wheelhouse and
               Adam Wright},
  title = {Abstract Local Reasoning for Concurrent Libraries: Mind the Gap},
  journal = {\textbf{MFPS 2014}: Electr. Notes Theor. Comput. Sci.},
  conf = {MFPS 2014},
  volume = {308},
  pages = {147--166},
  year = {2014},
  month = jun,
  url = {http://dx.doi.org/10.1016/j.entcs.2014.10.009},
  doi = {10.1016/j.entcs.2014.10.009},
  timestamp = {Tue, 04 Nov 2014 10:58:51 +0100},
  biburl = {http://dblp.uni-trier.de/rec/bib/journals/entcs/GardnerRWW14},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = {We study abstract local reasoning for concurrent libraries. There are two main approaches: provide a specification of a library by abstracting from concrete reasoning about an implementation; or provide a direct abstract library specification, justified by refining to an implementation. Both approaches have a significant gap in their reasoning, due to a mismatch between the abstract connectivity of the abstract data structures and the concrete connectivity of the concrete heap representations. We demonstrate this gap using structural separation logic (SSL) for specifying a concurrent tree library and concurrent abstract predicates (CAP) for reasoning about a concrete tree implementation. The gap between the abstract and concrete connectivity emerges as a mismatch between the SSL tree predicates and CAP heap predicates. This gap is closed by an interface function I which links the abstract and concrete connectivity. In the accompanying technical report, we generalise our SSL reasoning and results to arbitrary concurrent data libraries.}
}
@inproceedings{daRochaPinto2016Modular,
  title = {Modular Termination Verification for Non-blocking Concurrency},
  author = {{da Rocha Pinto}, Pedro and Dinsdale-Young, Thomas and Gardner, Philippa and Sutherland, Julian},
  booktitle = {\textbf{ESOP 2016}: Proceedings of the 25th European Symposium on Programming},
  conf = {ESOP 2016},
  year = {2016},
  month = apr,
  pages = {176--201},
  url = {http://dx.doi.org/10.1007/978-3-662-49498-1_8},
  doi = {10.1007/978-3-662-49498-1_8},
  pdf = {http://www.doc.ic.ac.uk/~pmd09/research/publications/2016/esop/modular-termination-verification-for-non-blocking-concurrency.pdf},
  abstract = {We present Total-TaDA, a program logic for verifying the total correctness of concurrent programs: that such programs both terminate and produce the correct result. With Total-TaDA, we can specify constraints on a thread's concurrent environment that are necessary to guarantee termination. This allows us to verify total correctness for non-blocking algorithms, e.g. a counter and a stack. Our specifications can express lock- and wait-freedom. More generally, they can express that one operation cannot impede the progress of another, a new non-blocking property we call non-impedance. Moreover, our approach is modular. We can verify the operations of a module independently, and build up modules on top of each other.},
  project = { concurrency, tada }
}
@inproceedings{Ntzik2015Fault,
  title = {{Fault-tolerant Resource Reasoning}},
  author = {Ntzik, Gian and {da Rocha Pinto}, Pedro and Gardner, Philippa},
  booktitle = {\textbf{APLAS 2015}: Proceedings of the 13th Asian Symposium on Programming Languages and Systems},
  conf = {APLAS 2015},
  year = {2015},
  month = dec,
  pages = {169--188},
  url = {http://dx.doi.org/10.1007/978-3-319-26529-2_10},
  doi = {10.1007/978-3-319-26529-2_10},
  pdf = {http://www.doc.ic.ac.uk/~pmd09/research/publications/2015/aplas/fault-tolerant-resource-reasoning.pdf},
  abstract = {Separation logic has been successful at verifying that programs do not crash due to illegal use of resources. The underlying assumption, however, is that machines do not fail. In practice, machines can fail unpredictably for various reasons, e.g. power loss, corrupting resources. Critical software, e.g. file systems, employ recovery methods to mitigate these effects. We introduce an extension of the Views framework to reason about such methods. We use concurrent separation logic as an instance of the framework to illustrate our reasoning, and explore programs using write-ahead logging, e.g. an ARIES recovery algorithm.}
}
@inproceedings{daRochaPinto2015Steps,
  title = {{Steps in Modular Specifications for Concurrent Modules (Invited Tutorial Paper)}},
  author = {{da Rocha Pinto}, Pedro and Dinsdale-Young, Thomas and Gardner, Philippa},
  booktitle = {\textbf{MFPS 2015}: Proceedings of the 31st Conference on the Mathematical Foundations of Programming Semantics},
  conf = {MFPS 2015},
  year = {2015},
  month = jun,
  pages = {3--18},
  url = {http://www.sciencedirect.com/science/article/pii/S1571066115000699},
  doi = {10.1016/j.entcs.2015.12.002},
  pdf = {http://www.doc.ic.ac.uk/~pmd09/research/publications/2015/mfps/steps-in-modular-specifications-for-concurrent-modules.pdf},
  abstract = {The specification of a concurrent program module is a difficult problem. The specifications must be strong enough to enable reasoning about the intended clients without reference to the underlying module implementation. We survey a range of verification techniques for specifying concurrent modules, in particular highlighting four key concepts: auxiliary state, interference abstraction, resource ownership and atomicity. We show how these concepts combine to provide powerful approaches to specifying concurrent modules.}
}
@inproceedings{icaptso-esop,
  author = {Filip Sieczkowski and Kasper Svendsen and Lars Birkedal and Jean Pichon-Pharabod},
  title = {A Separation Logic for Fictional Sequential Consistency},
  optcrossref = {},
  optkey = {},
  booktitle = {\textbf{ESOP 2015}},
  conf = {ESOP 2015},
  optpages = {},
  year = 2015,
  opteditor = {},
  optvolume = {},
  optnumber = {},
  optseries = {},
  optaddress = {},
  month = apr,
  optorganization = {},
  optpublisher = {},
  optnote = {},
  optannote = {},
  doi = {10.1007/978-3-662-46669-8_30},
  abstract = {To improve performance, modern multiprocessors and programming languages typically implement \emph{relaxed} memory models that
do not require all processors/threads to observe memory operations in
the same order. To relieve programmers from having to reason directly
about these relaxed behaviors, languages often provide efficient synchronization primitives and concurrent data structures with stronger high-level guarantees about memory reorderings. For instance, locks usually
ensure that when a thread acquires a lock, it can observe all memory
operations of the releasing thread, prior to the release. When used correctly, these synchronization primitives and data structures allow clients
to recover a fiction of a \emph{sequentially consistent} memory model.

In this paper we propose a new proof system, iCAP-TSO, that captures
this fiction formally, for a language with a TSO memory model. The
logic supports reasoning about libraries that directly exploit the relaxed
memory model to achieve maximum efficiency. When these libraries provide sufficient guarantees, the logic hides the underlying complexity and
admits standard separation logic rules for reasoning about their more
high-level clients.
}
}
@inproceedings{CoLoSL2015,
  author = {Azalea Raad and Jules Villard and Philippa Gardner},
  title = {{CoLoSL}: Concurrent Local Subjective Logic},
  optcrossref = {},
  optkey = {},
  booktitle = {\textbf{ESOP 2015}},
  conf = {ESOP 2015},
  optpages = {},
  year = {2015},
  opteditor = {},
  optvolume = {},
  optnumber = {},
  optseries = {},
  optaddress = {},
  month = apr,
  optorganization = {},
  optpublisher = {},
  optnote = {},
  optannote = {},
  doi = {10.1007/978-3-662-46669-8_29},
  pdf = {http://www.doc.ic.ac.uk/~azalea/papers/CoLoSL.pdf},
  abstract = {A key difficulty in verifying shared-memory concurrent programs is reasoning compositionally about each thread in isolation. Existing
verification techniques for fine-grained concurrency typically require reasoning about either the entire shared state or disjoint parts of the shared
state, impeding compositionality. This paper introduces the program logic
CoLoSL, where each thread is verified with respect to its subjective view
of the global shared state. This subjective view describes only that part of
the state accessed by the thread. Subjective views may arbitrarily overlap
with each other, and expand and contract depending on the resource
required by the thread. This flexibility gives rise to small specifications
and, hence, more compositional reasoning for concurrent programs. We
demonstrate our reasoning on a range of examples, including a concurrent
computation of a spanning tree of a graph.
}
}
@article{DBLP:journals/corr/ChakrabortyHSV15,
  author = {Soham Chakraborty and
               Thomas A. Henzinger and
               Ali Sezgin and
               Viktor Vafeiadis},
  title = {Aspect-oriented linearizability proofs},
  journal = {\textbf{Logical Methods in Computer Science}},
  volume = {11},
  number = {1},
  pages = {1--33},
  year = {2015},
  month = apr,
  url = {http://arxiv.org/abs/1502.07639},
  doi = {10.2168/LMCS-11(1:20)2015},
  biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/ChakrabortyHSV15},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = {Linearizability of concurrent data structures is usually proved by monolithic simulation arguments relying on the identification of the so-called linearization points. Regrettably, such proofs, whether manual or automatic, are often complicated and scale poorly to advanced non-blocking concurrency patterns, such as helping and optimistic updates. In response, we propose a more modular way of checking linearizability of concurrent queue algorithms that does not involve identifying linearization points. We reduce the task of proving linearizability with respect to the queue specification to establishing four basic properties, each of which can be proved independently by simpler arguments. As a demonstration of our approach, we verify the Herlihy and Wing queue, an algorithm that is challenging to verify by a simulation proof. }
}
@inproceedings{ocaml14tls,
  author = {Hannes Mehnert and David Kaloper Mersinjak},
  title = {Transport {L}ayer {S}ecurity purely in {O}{C}aml},
  booktitle = {Proceedings of the ACM OCaml 2014 Workshop},
  conf = {OCaml 2014},
  year = 2014,
  month = sep,
  url = {https://ocaml.org/meetings/ocaml/2014/ocaml2014_4.pdf},
  abstract = {Transport Layer Security (TLS) is probably the most
widely deployed security protocol on the Internet.  It is used to
setup virtual private networks, secure various services such as web
and email, etc.  In this paper we describe our clean slate TLS
implementation developed in OCaml.  Our motivating goals are
reliability, robustness, and API conciseness.  While our
implementation is still a work in progress and lacks some of the
protocol features (such as client authentication and session
resumption), it already interoperates with other existing TLS
implementations.  Preliminary performance evaluation shows that our
library is roughly five times slower compared to OpenSSL, but we
expect to improve our performance.}
}
@inproceedings{DBLP:conf/RelMiCS/HoareSMSVZO14,
  author = {Tony Hoare and
               Stephan van Staden and
               Bernhard M{\"{o}}ller and
               Georg Struth and
               Jules Villard and
               Huibiao Zhu and
               Peter W. O'Hearn},
  title = {Developments in Concurrent {Kleene} Algebra},
  booktitle = {\textbf{RAMiCS 2014}: Relational and Algebraic Methods in Computer Science - 14th International
               Conference, Marienstatt, Germany},
  conf = {RAMiCS 2014},
  pages = {1--18},
  year = {2014},
  month = apr,
  optcrossref = {DBLP:conf/RelMiCS/2014},
  url = {http://dx.doi.org/10.1007/978-3-319-06251-8_1},
  doi = {10.1007/978-3-319-06251-8_1},
  timestamp = {Tue, 08 Apr 2014 19:30:22 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/RelMiCS/HoareSMSVZO14},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = {This report summarises recent progress in the research of its co-authors towards the construction of links between algebraic presentations of the principles of programming and the exploitation of concurrency in modern programming practice. The research concentrates on the construction of a realistic family of partial order models for Concurrent Kleene Algebra (aka, the Laws of Programming). The main elements of the model are objects and the events in which they engage. Further primitive concepts are traces, errors and failures, and transferrable ownership. In terms of these we can define other concepts which have proved useful in reasoning about concurrent programs, for example causal dependency and independence, sequentiality and concurrency, allocation and disposal, synchrony and asynchrony, sharing and locality, input and output.}
}
@misc{31c3tls,
  optkey = {},
  author = {David Kaloper Mer{\v{s}}injak and Hannes Mehnert},
  title = {Trustworthy secure modular operating system engineering},
  howpublished = {Invited talk at 31st Chaos Communication
Congress (31C3)},
  month = jan,
  year = 2015,
  optnote = {},
  optannote = {},
  abstract = {We present Mirage OS, a modular library operating system developed from scratch in the functional programming language OCaml. Each service, called unikernel, is an OCaml application using libraries such as a TCP/IP stack, DNS. It is either compiled to a Xen virtual machine image or to a Unix binary (for development). State in 2014 is that it runs on x86 and arm, we implemented a clean-slate TLS (1.0, 1.1, 1.2), X.509, ASN.1 stack, crypto primitives, Off-the-record. We also have TCP/IP, HTTP, a persistent branchable store (similar to git) - all implemented in OCaml. A virtual machine serving data via https is roughly 2MB in size - no libc inside :)}
}
@misc{HCSS-Kaloper,
  optkey = {},
  author = {David Kaloper Mer{\v{s}}injak and Hannes Mehnert and Anil Madhavapeddy and Peter Sewell},
  title = {Not-quite-so-broken {TLS}: lessons in re-engineering a security protocol
specification and implementation},
  opthowpublished = {},
  month = may,
  year = 2015,
  note = {Presentation at HCSS 2015: the Fifteenth Annual
High Confidence Software and Systems Conference},
  optmonth = {},
  optyear = {},
  optnote = {},
  optannote = {},
  abstract = {Transport Layer Security (TLS) implementations have a history of
security flaws.  The immediate causes of these range from simple
programming errors, such as memory management, to subtle violations of
the protocol logic.  Deeper causes can be seen in the challenges of
interpreting the ambiguous prose specification, the complexities
inherent in large APIs and code bases, unsafe performance-oriented
programming choices, and the impossibility of directly testing
conformance between implementations and the specification.

We present nqsb-TLS, the result of our re-engineering approach
to improve the quality of security protocol implementations.  The same
code serves dual roles: it is both a specification of TLS, executable
as a test oracle to check conformance of traces from arbitrary
implementations, and a secure and usable executable implementation of
TLS.  nqsb-TLS employs a modular and declarative programming
style that also allows it to be compiled into a Xen unikernel (a
specialised virtual machine image) with a TCB that is 2.5\% of a
standalone system running a standard Linux/OpenSSL stack, with all
network traffic being handled in a memory-safe language.

nqsb-TLS focuses on protocol-level interoperability, and makes
no effort to be compatible with existing (and often poorly designed)
library APIs such as OpenSSL.  The higher-level API in nqsb-TLS
makes it harder to misuse the library, and is demonstrated via several
unikernel applications ranging over HTTPS, IMAP, Git, Websocket clients
and servers.
}
}
@inproceedings{nqsbTLS-sub,
  author = {David Kaloper{-}Mersinjak and
               Hannes Mehnert and
               Anil Madhavapeddy and
               Peter Sewell},
  title = {Not-Quite-So-Broken {TLS:} Lessons in Re-Engineering a Security Protocol
               Specification and Implementation},
  abstract = {Transport Layer Security (TLS) implementations have a history of security flaws.  The immediate causes of these are often programming errors, e.g.~in memory management, but the root causes are more fundamental: the challenges of interpreting the ambiguous prose specification, the complexities inherent in large APIs and code bases, inherently unsafe programming choices, and the impossibility of directly testing conformance between implementations and the specification.

We present \emph{nqsb-TLS}, the result of our re-engineered approach to security protocol specification and implementation that addresses these root causes.  The same code serves dual roles: it is both a specification of TLS, executable as a test oracle to check conformance of traces from arbitrary implementations, and a usable implementation of TLS; a modular and declarative programming style provides clean separation between its components.  Many security flaws are thus excluded by construction. 

nqsb-TLS can be used in standalone applications, which we demonstrate with a messaging client, and can also be compiled into a Xen unikernel (a specialised virtual machine image) with a TCB that is 4\% of a standalone system running a standard Linux/OpenSSL stack, with all network traffic being handled in a memory-safe language; this supports applications including HTTPS, IMAP, Git, and Websocket clients and servers.
},
  booktitle = {\textbf{USENIX Security 2015}: 24th {USENIX} Security Symposium,  Washington,
               D.C., USA},
  conf = {USENIX Security 2015},
  pages = {223--238},
  year = {2015},
  month = aug,
  optcrossref = {DBLP:conf/uss/2015},
  url = {https://www.usenix.org/conference/usenixsecurity15/technical-sessions/presentation/kaloper-mersinjak},
  pdf = {http://www.cl.cam.ac.uk/~pes20/nqsbtls-usenix-security15.pdf},
  timestamp = {Thu, 20 Aug 2015 14:07:40 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/uss/Kaloper-Mersinjak15},
  bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{SibylFS-sub,
  author = {Tom Ridge and
               David Sheets and
               Thomas Tuerk and
               Andrea Giugliano and
               Anil Madhavapeddy and
               Peter Sewell},
  title = {{SibylFS}: formal specification and oracle-based testing for {POSIX}
               and real-world file systems},
  abstract = {Systems depend critically on the behaviour of file systems, but that behaviour differs in many details, both between implementations and between each implementation and the POSIX (and other) prose specifications.  Building robust and portable software requires understanding these details and differences, but there is currently no good way to systematically describe, investigate, or test file system behaviour across this complex multi-platform interface.

In this paper we show how to characterise the envelope of allowed
behaviour of file systems in a form that enables practical and highly
discriminating testing.  We give a mathematically rigorous model of
file system behaviour, SibylFS, that specifies the range of allowed
behaviours of a file system for any sequence of the system calls
within our scope, and that can be used as a \emph{test oracle} to
decide whether an observed trace is allowed by the model, both for
validating the model and for testing file systems against it.  SibylFS
is modular enough to not only describe POSIX, but also specific
Linux, OSX and FreeBSD behaviours.  We complement the model
with an extensive test suite of over 21000 tests; this can be run on
a target file system and checked in less than 5 minutes, making it
usable in practice.  Finally, we report experimental results for
around 40 configurations of many file systems, identifying many
differences and some serious flaws.  
},
  booktitle = {\textbf{SOSP 2015}: Proceedings of the 25th Symposium on Operating Systems Principles,
               Monterey, CA, USA},
  conf = {SOSP 2015},
  pages = {38--53},
  year = {2015},
  month = oct,
  optcrossref = {DBLP:conf/sosp/2015},
  url = {http://doi.acm.org/10.1145/2815400.2815411},
  pdf = {http://www.cl.cam.ac.uk/~pes20/SOSP15-paper102-submitted.pdf},
  doi = {10.1145/2815400.2815411},
  timestamp = {Fri, 02 Oct 2015 08:59:57 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/sosp/RidgeSTGMS15},
  bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{nqsb-testing,
  author = {David Kaloper-Mer{\v s}injak and Hannes Mehnert},
  title = {Not-quite-so-broken {TLS} 1.3 mechanised conformance checking},
  booktitle = {{TLSv1.3} -- Ready or Not?  ({TRON}) workshop},
  conf = {TRON workshop 2016},
  year = {2016},
  month = feb,
  pdf = {https://www.cl.cam.ac.uk/~hm519/tron.pdf},
  abstract = {We present a set of tools to aid TLS 1.3 implementors, all derived from a single TLS implementation/model.  These include an automated offline TLS protocol conformance checker, a test oracle validating recorded sessions, a tool replicating recorded sessions with other implementations, and an interactive online handshake visualisation.

The conformance checker repeatedly runs a user-provided TLS implementation, attempting to establish TLS sessions with it; the checker explores the TLS parameter space to determine feature coverage of the provided implementation. The test oracle takes a recorded session between two endpoints and decides whether the session was conformant with the specification.  The replication utility re-runs one side of a recorded session against another TLS implementation, and observes its behaviour.  The online visualisation accepts connections from clients and presents the TLS session as an interactive sequence diagram.

All of these tools are based on our clean-slate nqsb-TLS implementation/model.  It already supports TLS 1.0-1.2, and interoperates with a broad range of other TLS implementations.  We are currently extending nqsb-TLS with TLS 1.3 support, and tracking the progress of the TLS 1.3 draft, adapting our implementation/model accordingly.

We invite the community to use our tools while implementing the TLS 1.3 RFC, and provide feedback on deviations in the interpretation thereof.  This process enables the community to converge to a single, mechanically checkable TLS 1.3 model, as implemented by nqsb-TLS.}
}
@inproceedings{biglater,
  author = {Kasper Svendsen and Filip Sieczkowski and Lars Birkedal},
  title = {Transfinite Step-indexing: Decoupling Concrete and Logical Steps},
  booktitle = {\textbf{ESOP 2016}},
  conf = {ESOP 2016},
  year = 2016,
  month = apr,
  pages = {727--751},
  url = {http://dx.doi.org/10.1007/978-3-662-49498-1_28},
  doi = {10.1007/978-3-662-49498-1_28},
  pdf = {http://www.cs.au.dk/~birke/papers/biglater-conf.pdf},
  abstract = { Step-indexing has proven to be a powerful technique for defining logical relations for languages with advanced type systems and models of expressive program logics. In both cases, the model is stratified using natural numbers to solve a recursive equation that has no naive solutions. As a result of this stratification, current models require that each unfolding of the recursive equation -- each logical step -- must coincide with a concrete reduction step. This tight coupling is problematic for applications where the number of logical steps cannot be statically bounded.

In this paper we demonstrate that this tight coupling between logical and concrete steps is artificial and show how to loosen it using transfinite step-indexing. We present a logical relation that supports an arbitrary but finite number of logical steps for each concrete step. }
}
@inproceedings{Ntzik2015Reasoning,
  title = {{Reasoning about the {POSIX} File System: Local Update and Global Pathnames}},
  author = {Ntzik, Gian and Gardner, Philippa},
  booktitle = {\textbf{OOPSLA 2015}: Proceedings of the 30th Annual {ACM} {SIGPLAN} Conference on Object-Oriented Programming, Systems, Languages, and Applications},
  conf = {OOPSLA 2015},
  year = {2015},
  month = oct,
  abstract = {We introduce a program logic for specifying a core sequential subset of the POSIX file system and for reasoning abstractly about client programs working with the file system. The challenge is to reason about the combination of local directory update and global pathname traversal (including '..' and symbolic links) which may overlap the directories being updated. Existing reasoning techniques are either based on first-order logic and do not scale, or on separation logic and can only handle linear pathnames (no '..' or symbolic links). We introduce fusion logic for reasoning about local update and global pathname traversal, introducing a novel effect frame rule to propagate the effect of a local update on overlapping pathnames. We apply our reasoning to the standard recursive remove utility (rm -r), discovering bugs in well-known implementations.}
}
@inproceedings{DBLP:conf/vstte/SezginT15,
  author = {Ali Sezgin and
               Serdar Tasiran},
  title = {Moving Around: Lipton's Reduction for {TSO} - (Regular Submission)},
  booktitle = {\textbf{VSTTE 2015}: Verified Software: Theories, Tools, and Experiments - 7th International
               Conference, San Francisco, CA, USA.
               Revised Selected Papers},
  conf = {VSTTE 2015},
  pages = {165--182},
  year = {2015},
  month = jul,
  optcrossref = {DBLP:conf/vstte/2015},
  url = {http://dx.doi.org/10.1007/978-3-319-29613-5_10},
  doi = {10.1007/978-3-319-29613-5_10},
  timestamp = {Fri, 12 Feb 2016 10:43:40 +0100},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/vstte/SezginT15},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = {We generalize Lipton’s reduction theory, hitherto limited to SC, for TSO programs. We demonstrate the use of our theory by specifying the conditions under which a particular write is SC-like (i.e. placing a fence immediately after the write does not constrain the behavior of the overall program) and a library implementation can be safely used (i.e. compositionality). Our theory is complete: a program has only SC behaviors iff there is a proof that establishes that every write in the program is SC-like. We adapt the notion of program abstraction to TSO analysis via our theory. We define precisely what is meant by abstraction, and propose a methodology by which one can obtain via abstraction SC summaries of a program which may have non-SC behaviors. Finally, we show how checking whether a write instruction is SC-like can be mechanized. We describe a transformation in which the execution of each thread of the original program (under TSO) is simulated by the execution of two tightly coupled threads in the new program (under SC).}
}
@inproceedings{GomesStruth16,
  author = {Victor B. F. Gomes and
               Georg Struth},
  title = {Modal {K}leene algebra applied to program correctness},
  booktitle = {\textbf{FM 2016}: Formal Methods -- 21st International Symposium,
    Limassol},
  conf = {FM 2016},
  year = {2016},
  month = nov,
  editor = {John S. Fitzgerald and
               Stefania Gnesi and
               Constance L. Heitmeyer},
  series = {Lecture Notes in Computer Science},
  publisher = {Springer},
  pages = {310--325},
  opturl = {http://dx.doi.org/10.1007/978-3-319-48989-6_19},
  doi = {10.1007/978-3-319-48989-6_19},
  abstract = {
      Modal Kleene algebras are relatives of dynamic logics that support
  program construction and verification by equational reasoning.  We
  describe their application in implementing versatile program
  correctness components in interactive theorem provers such as
  Isabelle/HOL. Starting from a weakest precondition based component
  with a simple relational store model, we show how variants for Hoare
  logic, strongest postconditions and program refinement can be built
  in  a principled way. Modularity of the approach is demonstrated
  by variants that capture program termination and recursion,
  memory models for programs with pointers, and program trace
  semantics.
  },
  url = {https://www.repository.cam.ac.uk/handle/1810/260625},
  pdf = {http://www.cl.cam.ac.uk/~pes20/rems/papers/GomesStruthMKA.pdf}
}
@inproceedings{KSB17,
  author = {Morten Krogh-Jespersen and 
             Kasper Svendsen and
             Lars Birkedal},
  title = {A Relational Model of Types-and-Effects in Higher-Order
             Concurrent Separation Logic},
  booktitle = {\textbf{POPL 2017}: The 44st Annual {ACM} {SIGPLAN-SIGACT} Symposium on Principles of
               Programming Languages,  Paris, France},
  optpages = {179--192},
  year = {2017},
  conf = {POPL 2017},
  month = jan,
  isbn = {978-1-4503-4660-3},
  location = {Paris, France},
  pages = {218--231},
  numpages = {14},
  url = {http://doi.acm.org/10.1145/3009837.3009877},
  doi = {10.1145/3009837.3009877},
  acmid = {3009877},
  publisher = {ACM},
  address = {New York, NY, USA},
  abstract = {
Recently we have seen a renewed interest in programming languages that tame the complexity of state and concurrency through refined type systems with more fine-grained control over effects. In addition to simplifying reasoning and eliminating whole classes of bugs, statically tracking effects opens the door to advanced compiler optimizations.

In this paper we present a relational model of a type-and-effect system for a higher-order, concurrent programming language. The model precisely captures the semantic invariants expressed by the effect annotations. We demonstrate that these invariants are strong enough to prove advanced program transformations, including automatic parallelization of expressions with suitably disjoint effects. The model also supports refinement proofs between abstract data types implementations with different internal data representations, including proofs that fine-grained concurrent algorithms refine their coarse-grained counterparts. This is the first model for such an expressive language that supports both effect-based optimizations and data abstraction.

The logical relation is defined in Iris, a state-of-the-art higher-order concurrent separation logic. This greatly simplifies proving well-definedness of the logical relation and also provides us with a powerful logic for reasoning in the model.}
}
@inproceedings{Caper,
  author = {Thomas Dinsdale{-}Young and
               Pedro da Rocha Pinto and
               Kristoffer Just Andersen and
               Lars Birkedal},
  title = {{Caper: Automatic Verification for Fine-grained Concurrency}},
  booktitle = {Proc. \textbf{ESOP 2017}: 26th European Symposium on Programming},
  conf = {ESOP 2017},
  series = {Lecture Notes in Computer Science},
  publisher = {Springer},
  year = {2017},
  pdf = {https://www.doc.ic.ac.uk/~pmd09/research/publications/2017/esop/caper-automatic-verification-for-fine-grained-concurrency.pdf},
  month = apr,
  abstract = {Recent program logics based on separation logic emphasise a modular approach to proving functional correctness for fine-grained concurrent programs. However, these logics have no automation support. In this paper, we present Caper, a prototype tool for automated reasoning in such a logic. Caper is based on symbolic execution, integrating reasoning about interference on shared data and about ghost resources that are used to mediate this interference. This enables Caper to verify the functional correctness of fine-grained concurrent algorithms.}
}
@inproceedings{Maps,
  author = {Shale Xiong and
               Pedro da Rocha Pinto and
               Gian Ntzik and
               Philippa Gardner},
  title = {{Abstract Specifications for Concurrent Maps}},
  booktitle = {Proc. \textbf{ESOP 2017}: 26th European Symposium on Programming},
  conf = {ESOP 2017},
  series = {Lecture Notes in Computer Science},
  publisher = {Springer},
  year = {2017},
  month = apr,
  pdf = {https://www.doc.ic.ac.uk/~pmd09/research/publications/2017/esop/abstract-specifications-for-concurrent-maps.pdf},
  abstract = {Despite recent advances in reasoning about concurrent data structure libraries, the largest implementations in java.util.concurrent have yet to be verified.  The key issue lies in the development of  modular specifications, which provide clear logical boundaries  between clients and implementations.  A solution is to use recent advances in fine-grained concurrency reasoning, in particular the introduction of abstract atomicity to concurrent separation logic reasoning. We present two specifications of concurrent maps, both providing the clear boundaries we seek. We show that these specifications are equivalent, in that they can be built from each other. We show how we can verify client programs, such as a concurrent set and a producer-consumer client. We also give a substantial first proof that the main operations of ConcurrentSkipListMap in java.util.concurrent satisfy the map specification. This work demonstrates that we now have the technology to verify the largest implementations in java.util.concurrent.}
}
@inproceedings{ocaml16conex,
  author = {Hannes Mehnert and Louis Gesbert},
  title = {Conex --- establishing trust into data repositories},
  booktitle = {Proceedings of the ACM \textbf{OCaml 2016} Workshop},
  conf = {OCaml 2016},
  year = 2016,
  month = sep,
  url = {https://github.com/hannesm/conex-paper/raw/master/paper.pdf},
  abstract = {Opam is a software update system, responsible for
discovering, downloading, building, and installing packages from
community repositories.  The data and metadata at each step should be
authenticated to originate from a (transitively) trusted author.  Opam
does not include any mechanism to authenticate any step at the moment.
We propose conex, which establishes digital signatures from the package
author to the user.  Using conex, neither the distribution server nor
the transport layer need to be trusted.  Clients will install
authenticated packages, and notice conex only if signatures cannot be
verified.  Authors need to adapt their workflow slightly, an updated
opam publish will be developed.  Janitors (repository maintainers) need
to adapt their workflow to emit cryptographic signatures to the
repository (in addition to pressing merge PR buttons).}
}
@inproceedings{ocaml16libtls,
  author = {Enguerrand Decorne and Jeremy Yallop and David Mer{\v{s}}injak},
  title = {OCaml inside: a drop-in replacement for libtls (extended abstract)},
  booktitle = {Proceedings of the ACM \textbf{OCaml 2016} Workshop},
  conf = {OCaml 2016},
  year = 2016,
  month = sep,
  url = {https://www.cl.cam.ac.uk/users/jdy22/papers/ocaml-inside-a-drop-in-replacement-for-libtls.pdf},
  abstract = {The C programming language pervades systems software. An operating
system in the Unix tradition consists of a kernel, written
in C, and a collection of libraries and executables, also written in
C, which communicate in large part via APIs defined as C types
and functions. Systems built in C typically suffer from a number of
problems, ranging from buffer overflows and other violations that
follow inevitably from unrestricted access to memory, to awkward
APIs that result from an inexpressive type system and a lack of
automatic memory management.
The openssl library, which implements the cryptographic protocols
TLS and SSL, suffers from both these problems. The lack
of bounds checking in C led to the notorious Heartbleed bug in
2014; a study two years earlier found that almost no clients of
openssl use the library correctly, apparently due to its unhelpful
interface (Georgiev et al. 2012).
In response to the Heartbleed bug, the OpenBSD team created
libressl, a fork of openssl with the aim of correcting the deficiencies.
One early fruit of the libressl project is libtls, a
much simpler, more disciplined interface to the TLS protocol implementation
in libressl.
However, libtls is still built in C, and so is still vulnerable
to potential buffer overflows, type errors, and similar defects. In
this talk we describe one approach to avoiding these problems,
namely replacing libtls with a fully compatible library written in
OCaml. Our library, libnqsb-tls1, matches the libtls function-for-function, but the implementation contains no C; instead, it it
wraps the pure OCaml TLS implementation ocaml-tls (Kaloper-Mer{\v{s}}injak et al. 2015)}
}
@inproceedings{haas_et_al:LIPIcs:2016:6180,
  author = {Andreas Haas and Thomas A. Henzinger and Andreas Holzer and Christoph M. Kirsch and Michael Lippautz and Hannes Payer and Ali Sezgin and Ana Sokolova and Helmut Veith},
  abstract = {The semantics of concurrent data structures is usually given by a sequential specification and a consistency condition. Linearizability is the most popular consistency condition due to its simplicity and general applicability. Nevertheless, for applications that do not require all guarantees offered by linearizability, recent research has focused on improving performance and scalability of concurrent data structures by relaxing their semantics.
In this paper, we present local linearizability, a relaxed consistency
condition that is applicable to container-type concurrent data
structures like pools, queues, and stacks. While linearizability requires that the effect of each operation is observed by all threads at the same time, local linearizability only requires that for each thread T, the effects of its local insertion operations and the effects of those removal operations that remove values inserted by T are observed by all threads at the same time. We investigate theoretical and practical properties of local linearizability and its relationship to many existing consistency conditions. We present a generic implementation method for locally linearizable data structures that uses existing linearizable data structures as building blocks. Our implementations show performance and scalability improvements over the original building blocks and outperform the fastest existing container-type implementations.},
  title = {{Local Linearizability for Concurrent Container-Type Data Structures}},
  booktitle = {27th International Conference on Concurrency Theory (\textbf{CONCUR 2016})},
  conf = {CONCUR 2016},
  pages = {6:1--6:15},
  series = {Leibniz International Proceedings in Informatics (LIPIcs)},
  isbn = {978-3-95977-017-0},
  issn = {1868-8969},
  year = {2016},
  month = aug,
  volume = {59},
  publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik},
  address = {Dagstuhl, Germany},
  doi = {10.4230/LIPIcs.CONCUR.2016.6},
  pdf = {http://drops.dagstuhl.de/opus/volltexte/2016/6180/pdf/LIPIcs-CONCUR-2016-6.pdf}
}
@article{GomesGHSW16,
  author = {Victor B. F. Gomes and
               Walter Guttmann and
               Peter H{\"{o}}fner and
               Georg Struth and
               Tjark Weber},
  title = {Kleene Algebras with Domain},
  journal = {Archive of Formal Proofs},
  volume = {2016},
  year = {2016},
  month = apr,
  url = {https://www.isa-afp.org/entries/KAD.shtml},
  abstract = {Kleene algebras with domain are Kleene algebras endowed with an operation that maps each element of the algebra to its domain of definition (or its complement) in abstract fashion. They form a simple algebraic basis for Hoare logics, dynamic logics or predicate transformer semantics. We formalise a modular hierarchy of algebras with domain and antidomain (domain complement) operations in Isabelle/HOL that ranges from domain and antidomain semigroups to modal Kleene algebras and divergence Kleene algebras. We link these algebras with models of binary relations and program traces. We include some examples from modal logics, termination and program analysis.}
}
@article{GomesS16,
  author = {Victor B. F. Gomes and
               Georg Struth},
  title = {Program Construction and Verification Components Based on {K}leene Algebra},
  journal = {Archive of Formal Proofs},
  volume = {2016},
  year = {2016},
  month = jun,
  url = {https://www.isa-afp.org/entries/Algebraic_VCs.shtml},
  abstract = {Variants of Kleene algebra support program construction and verification by algebraic reasoning. This entry provides a verification component for Hoare logic based on Kleene algebra with tests, verification components for weakest preconditions and strongest postconditions based on Kleene algebra with domain and a component for step-wise refinement based on refinement Kleene algebra with tests. In addition to these components for the partial correctness of while programs, a verification component for total correctness based on divergence Kleene algebras and one for (partial correctness) of recursive programs based on domain quantales are provided. Finally we have integrated memory models for programs with pointers and a program trace semantics into the weakest precondition component.}
}
@inproceedings{Raad2016Verifying,
  title = {Verifying Concurrent Graph Algorithms},
  author = {Raad, Azalea and Hobor, Aquinas and Villard, Jules and Gardner, Philippa},
  booktitle = {Proc. \textbf{APLAS 2016}: 14th Asian Symposium on Programming Languages and Systems},
  conf = {APLAS 2016},
  year = {2016},
  month = nov,
  pages = {314--334},
  url = {http://dx.doi.org/10.1007/978-3-319-47958-3_17},
  doi = {10.1007/978-3-319-47958-3_17},
  abstract = {We show how to verify four challenging concurrent finegrained graph-manipulating algorithms, including graph copy, a speculatively parallel Dijkstra, graph marking and spanning tree. We develop a reasoning method for such algorithms that dynamically tracks the contributions and responsibilities of each thread operating on a graph, even in cases of arbitrary recursive thread creation. We demonstrate how to use a logic without abstraction (CoLoSL) to carry out abstract reasoning in the style of iCAP, by building the abstraction into the proof structure rather than incorporating it into the semantic model of the logic.}
}
@inproceedings{Raad2016DOM,
  title = {{DOM:} Specification and Client Reasoning},
  author = {Raad, Azalea and {Fragoso Santos}, Jos{\'{e}} and Gardner, Philippa},
  booktitle = {Proc. \textbf{APLAS 2016}: 14th Asian Symposium on Programming Languages and Systems},
  conf = {APLAS 2016},
  year = {2016},
  month = nov,
  pages = {401--422},
  url = {http://dx.doi.org/10.1007/978-3-319-47958-3_21},
  doi = {10.1007/978-3-319-47958-3_21},
  abstract = {We present an axiomatic specification of a key fragment of DOM using structural separation logic. This specfication allows us to develop modular reasoning about client programs that call the DOM.}
}
@inproceedings{Kaloper-Mersinjak:2016:GPD:2976022.2976028,
  author = {Kaloper-Mer\v{s}injak, David and Yallop, Jeremy},
  title = {Generic Partially-static Data (Extended Abstract)},
  booktitle = {Proceedings of the 1st International Workshop on Type-Driven Development},
  conf = {TyDe 2016},
  series = {TyDe 2016},
  year = {2016},
  month = sep,
  isbn = {978-1-4503-4435-7},
  location = {Nara, Japan},
  pages = {39--40},
  numpages = {2},
  url = {http://doi.acm.org/10.1145/2976022.2976028},
  doi = {10.1145/2976022.2976028},
  acmid = {2976028},
  publisher = {ACM},
  address = {New York, NY, USA},
  keywords = {generic programming, partial evaluation, staging},
  pdf = {http://dl.acm.org/ft_gateway.cfm?id=2976028&ftid=1783813&dwn=1&CFID=738558588&CFTOKEN=28095393},
  abstract = {We describe a generic approach to defining partially-static data
and corresponding operations.}
}
@phdthesis{Ntzik2017Reasoning,
  author = {Ntzik, Gian},
  title = {Reasoning About {POSIX} File Systems},
  school = {Imperial College London},
  month = sep,
  year = {2016},
  opttype = {phdthesis},
  url = {https://psvg.doc.ic.ac.uk/publications/Ntzik2017Reasoning.pdf},
  abstract = {POSIX is a standard for operating systems, with a substantial
part devoted to specifying file-system operations. File-system operations
exhibit complex concurrent behaviour, comprising multiple actions affecting
different parts of the state: typically, multiple atomic reads followed by
an atomic update. However, the standard's description of concurrent
behaviour is unsatisfactory: it is fragmented; contains ambiguities; and is
generally under-specified. We provide a formal concurrent specification of
POSIX file systems and demonstrate scalable reasoning for clients. Our
speciation is based on a concurrent specification language, which uses a
modern concurrent separation logic for reasoning about abstract atomic
operations, and an associated refinement calculus. Our reasoning about
clients highlights an important difference between reasoning about modules
built over a heap, where the interference on the shared state is restricted
to the operations of the module, and modules built over a file system, where
the interference cannot be restricted as the file system is a public
namespace. We introduce specifications conditional on context invariants
used to restrict the interference, and apply our reasoning to lock files and
named pipes. Program reasoning based on separation logic has been successful
at verifying that programs do not crash due to illegal use of resources,
such invalid memory accesses. The underlying assumption of separation
logics, however, is that machines do not fail. In practice, machines can
fail unpredictably for various reasons, such as power loss, corrupting
resources or resulting in permanent data loss. Critical software, such as
file systems and databases, employ recovery methods to mitigate these
effects. We introduce an extension of the Views framework to reason about
programs in the presence of such events and their associated recovery
methods. We use concurrent separation logic as an instance of the framework
to illustrate our reasoning, and explore programs using write-ahead logging,
such a stylised ARIES recovery algorithm.}
}
@phdthesis{daRochaPinto2017Reasoning,
  author = {{da Rocha Pinto}, Pedro},
  title = {Reasoning with Time and Data Abstractions},
  school = {Imperial College London},
  year = {2016},
  month = sep,
  url = {https://psvg.doc.ic.ac.uk/publications/daRochaPinto2017Reasoning.pdf},
  abstract = {In this thesis, we address the problem of verifying the
functional correctness of concurrent programs, with emphasis on fine-grained
concurrent data structures. Reasoning about such programs is challenging
since data can be concurrently accessed by multiple threads: the reasoning
must account for the interference between threads, which is often subtle. To
reason about interference, concurrent operations should either be at
distinct times or on distinct data. We present TaDA, a sound program logic
for verifying clients and implementations that use abstract specifications
that incorporate both abstract atomicity---the abstraction that operations
take effect at a single, discrete instant in time---and abstract
disjointness---the abstraction that operations act on distinct data resources.
Our key contribution is the introduction of atomic triples, which offer an
expressive approach for specifying program modules. We also present
Total-TaDA, a sound extension of TaDA with which we can verify total
correctness of concurrent programs, i.e. that such programs both produce the
correct result and terminate. With Total-TaDA, we can specify constraints on
a thread=92s concurrent environment that are necessary to guarantee
termination. This allows us to verify total correctness for nonblocking
algorithms and express lock- and wait-freedom. More generally, the abstract
specifications can express that one operation cannot impede the progress of
another, a new non-blocking property that we call non-impedance. Finally, we
describe how to extend TaDA for proving abstract atomicity for data
structures that make use of helping---where one thread is performing an
abstract operation on behalf of another---and speculation---where an abstract
operation is determined by future behaviour.}
}
@phdthesis{Raad2017Abstraction,
  author = {Raad, Azalea},
  title = {Abstraction, Refinement and Concurrent Reasoning},
  school = {Imperial College London},
  year = {2016},
  month = sep,
  url = {https://psvg.doc.ic.ac.uk/publications/Raad2017Abstraction.pdf},
  opttype = {phdthesis},
  abstract = {This thesis explores the challenges in abstract library
specification, library refinement and reasoning about fine-grained
concurrent programs.For abstract library specification, this thesis applies
structural separation logic (SSL) to formally specify the behaviour of
several libraries in an abstract, local and compositional manner. This
thesis further generalises the theory of SSL to allow for library
specifications that are language independent. Most notably, we specify a
fragment of the Document Object Model (DOM) library. This result is
compelling as it significantly improves upon existing DOM formalisms in that
the specifications produced are local, compositional and
language-independent. Concerning library refinement, this thesis explores
two existing approaches to library refinement for separation logic,
identifying their advantages and limitations in different settings. This
thesis then introduces a hybrid approach to refinement, combining the
strengths of both techniques for simple scalable library refinement. These
ideas are then adapted to refinement for SSL by presenting a JavaScript
implementation of the DOM fragment studied and establishing its correctness
with respect to its specification using the hybrid refinement approach.As to
concurrent reasoning, this thesis introduces concurrent local subjective
logic (CoLoSL) for compositional reasoning about fine-grained concurrent
programs. CoLoSL introduces subjective views, where each thread is verified
with respect to a customised local view of the state, as well as the general
composition and framing of interference relations, allowing for better proof
reuse.}
}
@inproceedings{Cerone2017Algebraic,
  author = {Cerone, Andrea and Gotsman, Alexey and Yang, Hongseok},
  title = {Algebraic Laws for Weak Consistency},
  booktitle = {\textbf{CONCUR 2017}: Proceedings of 28th International Conference on Concurrency Theory},
  conf = {CONCUR 2017},
  year = {2017},
  month = sep,
  url = {https://psvg.doc.ic.ac.uk/publications/Cerone2017Algebraic.html},
  doi = {10.4230/LIPIcs.CONCUR.2017.26},
  abstract = {Modern distributed systems often rely on so called
weakly-consistent databases, which achieve scalability by sacrificing the
consistency guarantee of distributed transaction processing. Such databases
have been formalised in two different styles, one based on abstract
executions and the other based on dependency graphs. The choice between
these styles has been made according to intended applications. The former
has been used for specifying and verifying the implementation of these
databases, while the latter for proving properties of client programs of the
databases. In this paper, we present a set of novel algebraic laws (i.e.
inequations) that connect these two styles of specifications. The laws
relate binary relations used in a specification based on abstract
executions, to those used in a specification based on dependency graphs. We
then show that this algebraic connection gives rise to so called robustness
criteria, conditions which ensure that a client program of a
weakly-consistent database does not exhibit anomalous behaviours due to weak
consistency. These criteria make it easy to reason about these client
programs, and may become a basis for dynamic or static program analyses. For
a certain class of consistency models specifications, we prove a full
abstraction result that connects the two styles of specifications.}
}
@inproceedings{DBLP:conf/cade/SantosGMN17,
  author = {Jos{\'{e}} Fragoso Santos and
               Philippa Gardner and
               Petar Maksimovic and
               Daiva Naudziuniene},
  editor = {Leonardo de Moura},
  title = {Towards Logic-Based Verification of {JavaScript} Programs},
  booktitle = {\textbf{CADE 2017}: 26th International
Conference on Automated Deduction},
  conf = {CADE 2017},
  series = {Lecture Notes in Computer Science},
  volume = {10395},
  pages = {8--25},
  publisher = {Springer},
  month = aug,
  year = {2017},
  opturl = {https://doi.org/10.1007/978-3-319-63046-5_2},
  url = {https://psvg.doc.ic.ac.uk/publications/FragosoSantos2017Towards.html},
  doi = {10.1007/978-3-319-63046-5_2},
  timestamp = {Wed, 12 Jul 2017 10:22:36 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/conf/cade/SantosGMN17},
  bibsource = {dblp computer science bibliography},
  abstract = {In this position paper, we argue for what we believe is a correct pathway to achieving scalable symbolic verification of JavaScript based on separation logic. We highlight the difficulties imposed by the language, the current state-of-the-art in the literature, and the sequence of steps that needs to be taken. We briefly describe Open image in new window , our semi-automatic toolchain for JavaScript verification.}
}
@inproceedings{gomes-oopsla2017,
  author = {Victor B. F. Gomes and Martin Kleppmann and Dominic P. Mulligan and Alastair R. Beresford},
  title = {Verifying Strong Eventual Consistency in Distributed Systems},
  booktitle = {\textbf{OOPSLA 2017}: Proceedings of the {ACM} {SIGPLAN} International
Conference on Object-Oriented Programming, Systems, Languages, and
Applications ({OOPSLA})},
  conf = {OOPSLA 2017},
  pages = {109:1--109:28},
  articleno = {109},
  numpages = {28},
  url = {http://doi.acm.org/10.1145/3133933},
  doi = {10.1145/3133933},
  year = {2017},
  month = oct,
  note = {Distinguished Paper award},
  abstract = {Data replication is used in distributed systems to maintain up-to-date copies of shared data across multiple computers in a network. However, despite decades of research, algorithms for achieving consistency in replicated systems are still poorly understood. Indeed, many published algorithms have later been shown to be incorrect, even some that were accompanied by supposed mechanised proofs of correctness. In this work, we focus on the correctness of Conflict-free Replicated Data Types (CRDTs), a class of algorithm that provides strong eventual consistency guarantees for replicated data. We develop a modular and reusable framework in the Isabelle/HOL interactive proof assistant for verifying the correctness of CRDT algorithms. We avoid correctness issues that have dogged previous mechanised proofs in this area by including a network model in our formalisation, and proving that our theorems hold in all possible network behaviours. Our axiomatic network model is a standard abstraction that accurately reflects the behaviour of real-world computer networks. Moreover, we identify an abstract convergence theorem, a property of order relations, which provides a formal definition of strong eventual consistency. We then obtain the first machine-checked correctness theorems for three concrete CRDTs: the Replicated Growable Array, the Observed-Remove Set, and an Increment-Decrement Counter. We find that our framework is highly reusable, developing proofs of correctness for the latter two CRDTs in a few hours and with relatively little CRDT-specific code.}
}
@inproceedings{matache-aplas2017,
  author = {Cristina Matache and Victor B. F. Gomes and Dominic P. Mulligan},
  title = {Programming and proving with classical types},
  booktitle = {\textbf{APLAS 2017}: Proceedings of the 15th {Asian} Symposium on Programming Languages and Systems},
  conf = {APLAS 2017},
  year = {2017},
  url = {https://doi.org/10.1007/978-3-319-71237-6_11},
  doi = {10.1007/978-3-319-71237-6_11},
  month = nov,
  optnote = {To appear},
  abstract = {
The propositions-as-types correspondence is ordinarily presented as linking the metatheory of typed $\lambda $-calculi and the proof theory of intuitionistic logic. Griffin observed that this correspondence could be extended to classical logic through the use of control operators. This observation set off a flurry of further research, leading to the development of Parigot's $\lambda \mu $-calculus. In this work, we use the $\lambda \mu $-calculus as the foundation for a system of proof terms for classical first-order logic. In particular, we define an extended call-by-value $\lambda \mu $-calculus with a type system in correspondence with full classical logic. We extend the language with polymorphic types, add a host of data types in `direct style', and prove several metatheoretical properties. All of our proofs and definitions are mechanised in Isabelle/HOL, and we automatically obtain an interpreter for a system of proof terms cum programming language---called $\mu $ML---using Isabelle's code generation mechanism. Atop our proof terms, we build a prototype LCF-style interactive theorem prover---called $\mu $TP---for classical first-order logic, capable of synthesising $\mu $ML programs from completed tactic-driven proofs. We present example closed $\mu $ML programs with classical tautologies for types, including some inexpressible as closed programs in the original $\lambda \mu $-calculus, and some example tactic-driven $\mu $TP proofs of classical tautologies.}
}
@article{DBLP:journals/corr/BirkedalDJST17,
  author = {Lars Birkedal and
               Thomas Dinsdale{-}Young and
               Guilhem Jaber and
               Kasper Svendsen and
               Nikos Tzevelekos},
  title = {Trace Properties from Separation Logic Specifications},
  journal = {CoRR},
  volume = {abs/1702.02972},
  year = {2017},
  month = feb,
  url = {http://arxiv.org/abs/1702.02972},
  pdf = {https://arxiv.org/pdf/1702.02972},
  timestamp = {Wed, 07 Jun 2017 14:40:31 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/BirkedalDJST17},
  bibsource = {dblp computer science bibliography, http://dblp.org},
  abstract = {We propose a formal approach for relating abstract separation logic library specifications with the trace properties they enforce on interactions between a client and a library. Separation logic with abstract predicates enforces a resource discipline that constrains when and how calls may be made between a client and a library. Intuitively, this can enforce a protocol on the interaction trace. This intuition is broadly used in the separation logic community but has not previously been formalised. We provide just such a formalisation. Our approach is based on using wrappers which instrument library code to induce execution traces for the properties under examination. By considering a separation logic extended with trace resources, we prove that when a library satisfies its separation logic specification then its wrapped version satisfies the same specification and, moreover, maintains the trace properties as an invariant. Consequently, any client and library implementation that are correct with respect to the separation logic specification will satisfy the trace properties.}
}
@inproceedings{SvendsenEtAl2018,
  author = {Svendsen, Kasper and Pichon-Pharabod, Jean and Doko, Marko and Lahav, Ori and Vafeiadis, Viktor},
  title = {A separation logic for a promising semantics},
  booktitle = {\textbf{{ESOP 2018}}},
  pdf = {https://www.cl.cam.ac.uk/~jp622/a_separation_logic_for_a_promising_semantics.pdf},
  conf = {{ESOP 2018}},
  year = {2018},
  abstract = {We present SLR, the first expressive program logic for reasoning about concurrent programs under a weak memory model addressing the out-of-thin-air problem. Our logic includes the standard features from existing logics, such as RSL and GPS, that were previously known to be sound only under stronger memory models: (1) separation, (2) per-location invariants, and (3) ownership transfer via release-acquire synchronisation—as well as novel features for reasoning about (4) the absence of out-of-thin-air behaviours and (5) coherence. The logic is proved sound over the recent ``promising'' memory model of Kang et al., using a substantially different argument to soundness proofs of logics for simpler memory models.}
}
@article{Bauereiss:2017,
  author = {Bauerei{\ss}, Thomas
and Pesenti Gritti, Armando
and Popescu, Andrei
and Raimondi, Franco},
  title = {CoSMed: A Confidentiality-Verified Social Media Platform},
  journal = {\textbf{J. Automated Reasoning}},
  optjournal = {\textbf{Journal of Automated Reasoning}},
  year = {2017},
  month = dec,
  day = {02},
  abstract = {This paper describes progress with our agenda of formal verification of information flow security for realistic systems. We present CoSMed, a social media platform with verified document confidentiality. The system's kernel is implemented and verified in the proof assistant Isabelle/HOL. For verification, we employ the framework of Bounded-Deducibility (BD) Security, previously introduced for the conference system CoCon. CoSMed is a second major case study in this framework. For CoSMed, the static topology of declassification bounds and triggers that characterized previous instances of BD Security has to give way to a dynamic integration of the triggers as part of the bounds. We also show that, from a theoretical viewpoint, the removal of triggers from the notion of BD Security does not restrict its expressiveness.},
  issn = {1573-0670},
  doi = {10.1007/s10817-017-9443-3},
  url = {https://doi.org/10.1007/s10817-017-9443-3}
}
@article{Gardner2017Verified,
  author = {Gardner, Philippa},
  title = {Verified trustworthy software systems},
  optjournal = {\textbf{Philosophical Transactions of the Royal Society of London A}: Mathematical, Physical and Engineering Sciences},
  journal = {\textbf{Philosophical Transactions of the Royal Society of London A}},
  year = {2017},
  volume = {375},
  number = {2104},
  month = sep,
  issn = {1364-503X},
  abstract = {Modern society is faced with a fundamental problem:
the reliability of complex, evolving software systems on
which society critically depends cannot be guaranteed by
the established, non-mathematical computer engineering
techniques such as informal prose specification and
ad hoc testing. The situation is worsening: modern
companies are moving fast, leaving little time for code
analysis and testing; the behaviour of concurrent and
distributed programs cannot be adequately assessed
using traditional testing methods; users of mobile
applications often neglect to apply software fixes;
and malicious users increasingly exploit even simple
programming errors, causing major security disruptions.
Building trustworthy, reliable software is becoming
harder and harder to achieve, while new business and
cybersecurity challenges make it of escalating critical
importance},
  doi = {10.1098/rsta.2015.0408},
  pdf = {http://rsta.royalsocietypublishing.org/content/375/2104/20150408.full.pdf},
  publisher = {The Royal Society}
}
@inproceedings{DongolJRA2018,
  author = {Brijesh Dongol and
               Radha Jagadeesan and
               James Riely and
               Alasdair Armstrong},
  title = {On abstraction and compositionality for weak-memory linearisability},
  booktitle = {\textbf{{VMCAI} 2018}: Verification, Model Checking, and Abstract Interpretation - 19th International
               Conference},
  abstract = {Linearisability is the de facto standard correctness
	       condition for concurrent objects. Classical
	       linearisability assumes that the effect of a method
	       is captured entirely by the allowed sequences of
	       calls and returns. This assumption is inadequate in
	       the presence of relaxed memory models, where
	       happens-before relations are also of importance. In
	       this paper, we develop hb-linearisability for
	       relaxed memory models by extending the classical
	       notion with happens-before information. We con-
	       sider two variants: Real time hb-linearisability,
	       which adopts the classical view that time runs on a
	       single global clock, and causal hb-linearisability,
	       which eschews real-time and is appropriate for
	       systems without a global clock. For both variants,
	       we prove abstraction (so that programmers can reason
	       about a client program using the sequential speci
	       cation of an object rather than its more complex
	       concurrent implementation) and composition (so that
	       reasoning about independent objects can be conducted
	       in isolation).},
  conf = {VMCAI 2018},
  pages = {183--204},
  month = jan,
  year = {2018},
  editor = {Isil Dillig and
	       Jens Palsberg},
  series = {LNCS},
  publisher = {Springer},
  url = {https://doi.org/10.1007/978-3-319-73721-8_9},
  doi = {10.1007/978-3-319-73721-8_9}
}
@article{tcp3draft,
  optkey = {},
  author = {Steve Bishop and Matthew Fairbairn and Hannes Mehnert and Michael Norrish and Tom Ridge and Peter Sewell and Michael Smith and Keith Wansbrough},
  title = {Engineering with Logic: Rigorous Test-Oracle Specification and Validation for {TCP/IP} and the {Sockets API}},
  journal = {\textbf{J. ACM}},
  opthowpublished = {},
  month = dec,
  year = 2018,
  issue_date = {December 2018},
  volume = {66},
  number = {1},
  issn = {0004-5411},
  pages = {1:1--1:77},
  articleno = {1},
  numpages = {77},
  url = {http://doi.acm.org/10.1145/3243650},
  doi = {10.1145/3243650},
  acmid = {3243650},
  publisher = {ACM},
  pdf = {http://www.cl.cam.ac.uk/~pes20/Netsem/paper3.pdf},
  project = {http://www.cl.cam.ac.uk/~pes20/Netsem/index.html},
  recent = {true},
  topic = {netsem},
  abstract = {
Conventional computer engineering relies on test-and-debug development
processes, with the behaviour of common interfaces described (at best)
with prose specification documents.  But prose specifications cannot be used in
test-and-debug development in any automated way, and prose is a poor
medium for expressing complex (and loose) specifications. 

The TCP/IP protocols and Sockets API are a good example of this:
they play a vital role in modern communication and computation, and
interoperability between implementations is essential.  But what
exactly they are is surprisingly obscure: their original development
focussed on ``rough consensus and running code'', augmented by prose
RFC specifications that do not precisely define what it means for an
implementation to be correct.  Ultimately, the actual standard is the
de facto one of the common implementations, including, for example,
the 15\,000--20\,000 lines of the BSD implementation --- optimised and
multithreaded C code, time-dependent, with asynchronous event
handlers, intertwined with the operating system, and
security-critical.

This paper reports on work done in the \emph{Netsem} project to
develop lightweight mathematically rigorous techniques that can be
applied to such systems: to specify their behaviour precisely (but
loosely enough to permit the required implementation variation) and to
test whether these specifications and the implementations correspond,
with specifications that are \emph{executable as test oracles}.
We developed post-hoc specifications of TCP, UDP, and the Sockets API, both of
the service that they provide to applications (in terms of TCP
bidirectional stream connections), and of the internal operation of
the protocol (in terms of TCP segments and UDP datagrams), together
with a testable abstraction function relating the two.  These specifications
are rigorous, detailed, readable, with broad coverage, and are rather
accurate.  Working within a general-purpose proof assistant (HOL4), we
developed \emph{language idioms} (within higher-order logic) in which
to write the specifications: operational semantics with
nondeterminism, time, system calls, monadic relational programming,
etc.  We followed an \emph{experimental semantics} approach,
validating the specifications against several thousand traces captured
from three implementations (FreeBSD, Linux, and WinXP).  Many
differences between these were identified, and a number of bugs.
Validation was done using a special-purpose \emph{symbolic model
  checker} programmed above HOL4.

Having demonstrated that our logic-based engineering techniques
suffice for handling real-world protocols, we argue that similar
techniques could be applied to future critical software infrastructure
at design time, leading to cleaner designs and (via
specification-based testing) more robust and predictable
implementations.  In cases where specification looseness can be
controlled, this should be possible with lightweight techniques,
without the need for a general-purpose proof assistant, at relatively
little cost.
}
}
@article{Dinsdale-Young2018perspective,
  author = {Dinsdale-Young, Thomas and {da Rocha Pinto}, Pedro and Gardner, Philippa},
  title = {A Perspective on Specifying and Verifying Concurrent Modules},
  journal = {\textbf{Journal of Logical and Algebraic Methods in Programming}},
  year = {2018},
  volume = {98},
  pages = {1--25},
  month = aug,
  issn = {2352-2208},
  abstract = {The specification of a concurrent program module, and the verification of implementations and clients with respect to such a specification, are difficult problems. A specification should be general enough that any reasonable implementation satisfies it, yet precise enough that it can be used by any reasonable client. We survey a range of techniques for specifying concurrent modules, using the example of a counter module to illustrate the benefits and limitations of each. In particular, we highlight four key concepts underpinning these techniques: auxiliary state, interference abstraction, resource ownership and atomicity. We demonstrate how these concepts can be combined to achieve two powerful approaches for specifying concurrent modules and verifying implementations and clients, which remove the limitations highlighted by the counter example.},
  doi = {10.1016/j.jlamp.2018.03.003},
  keywords = {Concurrency, Specification, Program verification},
  url = {http://www.sciencedirect.com/science/article/pii/S2352220817300871},
  pdf = {http://psvg.doc.ic.ac.uk/publications/Dinsdale-Young2018perspective.pdf}
}
@inproceedings{Ntzik2018Concurrent,
  author = {Ntzik, Gian and {da Rocha Pinto}, Pedro and Sutherland, Julian and Gardner, Philippa},
  title = {A Concurrent Specification of {POSIX} File Systems },
  booktitle = {\textbf{ECOOP 2018}: 32nd European Conference on Object-Oriented Programming},
  optbooktitle = {32nd European Conference on Object-Oriented Programming (ECOOP 2018).},
  conf = {ECOOP 2018},
  year = {2018},
  month = jul,
  abstract = {POSIX is a standard for operating systems, with a substantial part devoted to specifying
  file-system operations. File-system operations exhibit complex concurrent behaviour, comprising
  multiple actions affecting different parts of the state: typically, multiple atomic reads followed by
  an atomic update. However, the standard’s description of concurrent behaviour is unsatisfactory:
  it is fragmented; contains ambiguities; and is generally under-specified. We provide a formal
  concurrent specification of POSIX file systems and demonstrate scalable reasoning for clients.
  Our specification is based on a concurrent specification language, which uses a modern concurrent
  separation logic for reasoning about abstract atomic operations, and an associated refinement
  calculus. Our reasoning about clients highlights an important difference between reasoning about
  modules built over a heap, where the interference on the shared state is restricted to the operations
  of the module, and modules built over a file system, where the interference cannot be restricted
  as the file system is a public namespace. We introduce specifications conditional on context
  invariants used to restrict the interference, and apply our reasoning to the example of lock files.},
  doi = {10.4230/LIPIcs.ECOOP.2018.4},
  pdf = {http://psvg.doc.ic.ac.uk/publications/Ntzik2018Concurrent.pdf}
}
@inproceedings{DBLP:conf/ppdp/SantosMGDG18,
  author = {Jos{\'{e}} Fragoso Santos and
               Petar Maksimovic and
               Th{\'{e}}otime Grohens and
               Julian Dolby and
               Philippa Gardner},
  title = {Symbolic Execution for {JavaScript}},
  booktitle = {\textbf{PPDP 2018}: Proceedings of the 20th International Symposium on Principles and
               Practice of Declarative Programming},
  conf = {PPDP 2018},
  pages = {11:1--11:14},
  year = {2018},
  month = sep,
  optcrossref = {DBLP:conf/ppdp/2018},
  url = {http://doi.acm.org/10.1145/3236950.3236956},
  doi = {10.1145/3236950.3236956},
  timestamp = {Tue, 14 Aug 2018 20:44:23 +0200},
  biburl = {https://dblp.org/rec/bib/conf/ppdp/SantosMGDG18},
  bibsource = {dblp computer science bibliography, https://dblp.org},
  abstract = {We present a framework for trustworthy symbolic execution of JavaScripts programs, whose aim is to assist developers in the testing of their code: the developer writes symbolic tests for which the framework provides concrete counter-models. We create the framework following a new, general methodology for designing compositional program analyses for dynamic languages. We prove that the underlying symbolic execution is sound and does not generate false positives. We establish additional trust by using the theory to precisely guide the implementation and by thorough testing. We apply our framework to whole-program symbolic testing of real-world JavaScript libraries and compositional debugging of separation logic specifications of JavaScript programs.},
  pdf = {}
}

This file was generated by bibtex2html 1.99.