From 6d805049a0a4f2fb41b0cee798ebf040181ab1d8 Mon Sep 17 00:00:00 2001
From: Tim Daly
Date: Sat, 7 Mar 2020 14:59:50 0500
Subject: [PATCH] books/bookvolbib add references
Goal: Proving Axiom Sane
\index{Alturki, Musab A.}
\index{Moore, Brandon}
\begin{chunk}{axiom.bib}
@misc{Altu19,
author = "Alturki, Musab A. and Moore, Brandon",
title = {{K vs Coq as Language Verification Frameworks}},
year = "2019",
link = "\url{https://runtimeverification.com/blog/kvscoqaslanguageverificationframeworkspart1of3/}"
}
\end{chunk}
\index{Rosu, Grigore}
\index{Lucanu, Dorel}
\index{Guth, Dwight}
\begin{chunk}{axiom.bib}
@misc{Rosu20,
author = "Rosu, Grigore and Lucanu, Dorel and Guth, Dwight",
title = {{The K Framework}},
year = "2020",
link = "\url{http://www.kframework.org/index.php/Main_Page}"
}
\end{chunk}
\index{Moore, Brandon}
\index{Pena, Lucas}
\index{Rosu, Grigore}
\begin{chunk}{axiom.bib}
@inproceedings{Moor18,
author = "Moore, Brandon and Pena, Lucas and Rosu, Grigore",
title = {{Program Verification by Coinduction}},
booktitle = "Programming Languages and Systems",
publisher = "Springer",
year = "2018",
pages = "589618",
link = "\url{https://link.springer.com/content/pdf/10.1007%2F9783319898841.pdf}",
abstract =
"We present a novel program verification approach based on
coinduction, which takes as input an operational semantics. No
intermediates like program logics or verifcation condition
generators are needed. Specifications can be written using any
state predicates. We implement our approach in Coq, giving a
certifying languageindependent verification framework. Our proof
system is implemented as a single module imported unchanged into
languagespecific proofs. Automation is reached by instantiating a
generic heuristic with languagespecific tactics. Manual
assistance is also smoothly allowed at points the automation
cannot handle. We demonstrate the power and versatility of our
approach by verifying algoirthms as complicated as SchorrWaite
graph marking and instantiating our framework for object languages
in several styles of semantics. Finally, we show that our
coinductive approach subsumes reachability logic, a recent
languageindependent soudn and (relatively) complete logic for
program verification that has been instantiated with operational
semantics of languages as complex as C, Java and Javascript.",
paper = "Moor18.pdf"
}
\end{chunk}
\index{Carneiro, Mario}
\begin{chunk}{axiom.bib}
@misc{Carn19a,
author = "Carneiro, Mario",
title = {{The Type Theory of Lean}},
year = "2019",
link = "\url{https://github.com/digama0/leantypetheory/releases/v1.0}",
abstract =
"This thesis is a presentation of dependent type theory with
inductive types, a hierarchy of universes, with an impredicative
universe of propositions, proof irrelevance, and subsingleton
elimination, along with axioms for propositional extensionality,
quotient types, and the axiom of choice. This theory is notable
for being the axiomatic framework of the Lean theorem prover. The
axiom system is given here in complete detail, including
``optional'' features of the type system such as {\bf let} binders
and definitions. We provide a reduction of the theory to a
finitely axiomatized fragment utilizing a fixed set of inductive
types (the {\bf W}type plus a few others), to ease the study of
this framework.
The metatheory of this theory (which we will Lean) is studied. In
particular, we prove unique typing of the definitional equality,
and use this to construct the expected settheoretic model, from
which we derive consistency of Lean relative to {\bf ZFC+} \{there
are $n$ inaccessible cardinals $\vert~n<\omega$\} (a relatively
weak large cardinal assumption). As Lean supports models of
{\bf ZFC} with $n$ inaccessible cardinals, this is optimal.
We also show a number of negative results, where the theory is
less nice than we would like. In particular, type checking is
undecidable, and the type checking as implemented by the Lean
theorem prover is a decideable nontransitive underapproximation
of the typing judgment. Nontransitivity also leads to lack of
subject reduction, and the reduction relation does not satisfy the
ChurchRosser property, so reduction to a normal form does not
produce a decision procedure for definitional equality. However, a
modified reduction relation allows us to restore the ChurchRosser
property at the expense of guaranteed termination, so that unique
typing is shown to hold.",
paper = "Carn19a.pdf",
keywords = "printed"
}
\end{chunk}
\index{Carneiro, Mario}
\begin{chunk}{axiom.bib}
@misc{Carn19b,
author = "Carneiro, Mario",
title = {{The Type Theory of Lean (slides)}},
year = "2019",
link = "\url{https://github.com/digama0/leantypetheory/releases/v1.0}",
paper = "Carn19b.pdf",
keywords = "printed"
}
\end{chunk}
\index{Milner, Robin}
\begin{chunk}{axiom.bib}
@techreport{Miln73,
author = "Milner, Robin",
title = {{Models of LCF}},
type = "technical report",
institution = "Stanford Artificial Intelligence Laboratory",
number = "STANCS73332",
year = "1973",
link = "\url{http://i.stanford.edu/pub/cstr/reports/cs/tr/73/332/CSTR73332.pdf}",
abstract =
"LCF is a deductive system for computable functions proposed by
D. Scott in 1969 in an unpublished memorandum. The purpose of the
present paper is to demonstrate the soundness of the system with
respect to certain models, which are partially ordered domains of
continuous functions. This demonstration was supplied by Scott in
his memorandum; the present paper is merely intended to make this
work more accessible.",
paper = "Miln73.pdf",
keywords = "printed"
}
\end{chunk}
\index{Jenkins, Kris}
\begin{chunk}{axiom.bib}
@misc{Jenk18,
author = "Jenkins, Kris",
title = {{Communicating with Types}},
year = "2018",
link = "\url{https://vimeo.com/302682323}"
}
\end{chunk}
\index{Taylor, Sophie}
\begin{chunk}{axiom.bib}
@misc{Tayl16,
author = "Taylor, Sophie",
title = {{A working (class) Introduction to Homotopy Type Theory}},
year = "2016",
link = "\url{https://www.youtube.com/watch?v=xv6SwPn1dlc}",
keywords = "DONE"
}
\end{chunk}
\index{Voevodsky, Vladimir}
\begin{chunk}{axiom.bib}
@misc{Voev16,
author = "Voevodsky, Vladimir",
title = {{Univalent Foundations of Mathematics}},
year = "2016",
link = "\url{https://www.youtube.com/watch?v=9f4sS9sX2A}"
}
\end{chunk}
\index{Zach, Richard}
\begin{chunk}{axiom.bib}
@book{Zach20,
author = "Zach, Richard",
title = {{The Open Logic Text}},
year = "2020",
publisher = "The Open Logic Project",
link = "\url{http://builds.openlogicproject.org/openlogiccomplete.pdf}",
paper = "Zach20.pdf"
}
\end{chunk}
\index{Bezhanishvili, Nick}
\index{de Jongh, Dick}
\begin{chunk}{axiom.bib}
@misc{Bezh05,
author = "Bezhanishvili, Nick and de Jongh, Dick",
title = {{Intuitionistic Logic}},
year = "2005",
link = "\url{https://www.cs.le.ac.uk/people/nb118/Publications/ESSLLI'05.pdf}",
paper = "Bezh05.pdf"
}
\end{chunk}
\index{Milne, Peter}
\begin{chunk}{axiom.bib}
@article{Miln94,
author = "Milne, Peter",
title = {{Classical Harmony: Rules of Inference and the Meaning of
the Logical Constants}},
journal = "Synthese",
volume = "100",
number = "1",
pages = "4994",
year = "1994",
abstract =
"The thesis that, in a system of natural deduction, the meaning of
a logical constant is given by some or all of its introduction and
elimination rules has been developed recently by Dummett, Prawitz,
Tennant, and others, by the addition of harmony constraints.
Introduction and elimination rules for a logical constant must be
in harmony. By deploying harmony constraints, these authors have
arrived at logics no stronger than intuitionistic propositional
logic. Classical logic, they maintain, cannot be justified from
this prooftheoretic perspective. This paper argues that, while
classical logic can be formulated so as to satisfy a number of
harmony constraints, the meanings of the standard logical
constants cannot all be given by their introduction and/or
elimination rules; negation, in particular, comes under close
scrutiny.",
paper = "Miln94.pdf"
}
\end{chunk}
\index{Tennant, Neil}
\begin{chunk}{axiom.bib}
@article{Tenn79,
author = "Tennant, Neil",
title = {{Entailment and Proofs}},
journal = "Proc. of the Aristotelian Society",
volume = "79",
pages = "167189",
year = "1979",
paper = "Tenn79.pdf"
}
\end{chunk}
\index{Montanaro, Ashley}
\begin{chunk}{axiom.bib}
@misc{Mont12,
author = "Montanaro, Ashley",
title = {{Computational Complexity Lecture Notes}},
year = "2012",
link = "\url{https://people.maths.bris.ac.uk/~csxam/teaching/cclecturenotes.pdf}",
paper = "Mont12.pdf"
}
\end{chunk}
\index{Grayson, Daniel R.}
\begin{chunk}{axiom.bib}
@misc{Gray18a,
author = "Grayson, Daniel R.",
title = {{The Mathematical Work of Vladimir Voevodsky}},
year = "2018",
link = "\url{https://www.youtube.com/watch?v=BanMgvdKP8E}",
keywords = "DONE"
}
\end{chunk}
\index{Gentzen, G.}
\begin{chunk}{axiom.bib}
@misc{Gent35,
author = "Gentzen, G.",
title = {{Untersuchungen uber das logische Schliessen I and II}},
booktitle = "Mathematische Zeitschrift",
year = "1935",
publisher = "Springer"
paper = "Gent35.pdf"
}
\end{chunk}
\index{Vapnik, Vladimir}
\begin{chunk}{axiom.bib}
@misc{Vapn20,
author = "Vapnik, Vladimir",
title = {{Predicates, Invariants, and the Essence of Intelligence}},
year = "2020",
link = "\url{https://lexfridman.com/vladimirvapnik2}",
keywords = "DONE"
}
\end{chunk}
\index{Baruch, Robert}
\begin{chunk}{axiom.bib}
@misc{Baru19,
author = "Baruch, Robert",
title = {{Very Basic Introduction to Formal Verification}},
year = "2019",
link = "\url{https://www.youtube.com/watch?v=9e7F1XhjhKw}",
keywords = "DONE"
}
\end{chunk}
\index{Carneiro, Mario}
\begin{chunk}{axiom.bib}
@misc{Carn19c,
author = "Carneiro, Mario",
title = {{Specifying Verified x86 Software from Scratch}},
year = "2019",
link = "\url{https://arxiv.org/pdf/1907.01283.pdf}",
paper = "Carn19c.pdf",
keywords = "printed"
}
\end{chunk}
\index{Bendersky, Eli}
\begin{chunk}{axiom.bib}
@misc{Bend10,
author = "Bendersky, Eli",
title = {{Top Down Operator Precedence}},
year = "2010",
link = "\url{https://eli.thegreenplace.net/2010/01/02/topdownoperatorprecedenceparsing}",
keywords = "DONE"
}
\end{chunk}
\index{Baruch, Robert}
\begin{chunk}{axiom.bib}
@misc{Baru19a,
author = "Baruch, Robert",
title = {{Cmod A7 Reference Manual}},
year = "2019",
link = "\url{https://reference.digilentinc.com/_media/reference/programmablelogic/cmoda7/cmod_a7_rm.pdf}",
paper = "Baru19a.pdf",
keywords = "printed"
}
\end{chunk}
\index{Baruch, Robert}
\begin{chunk}{axiom.bib}
@misc{Baru19b,
author = "Baruch, Robert",
title = {{Cmod A7 Schematic}},
year = "2019",
link = "\url{https://reference.digilentinc.com/_media/cmoda7/cmod_a7_sch.pdf}",
paper = "Baru19b.pdf",
keywords = "printed"
}
\end{chunk}
\index{Reis, Leonardo Vieira dos Santos}
\index{Dilorio, Oliveira}
\index{Bigonha, Roberto S.}
\begin{chunk}{axiom.bib}
@inproceedings{Reis14,
author = "Reis, Leonardo Vieira dos Santos and Dilorio, Oliveira and
Bigonha, Roberto S.",
title = {{A Mixed Approach for Building Extensible Parsers}},
booktitle = "Programming Language",
publisher = "Springer",
volume = "LNCS 8771",
pages = "115",
year = "2014",
abstract =
"For languages whose syntax is fixed, parsers are usually built
with a static structure. The implementation of features like macor
mechanisms or extensible languages requires the use of parsers
that may be dynamically extended. In this work, we discuss a mixed
approach for building efficient topdown dynamically extensible
parsers. Our view is based on the fact that a large part of the
parser code can be statically compiled and only the parts that are
dynamic should be interpreted for a more efficient processing. We
propose the generation of code for the base parser, in which hooks
are included to allow efficient extension of the underlying
grammar and activation of a grammar interpreter whenever it is
necessary to use an extended syntax. As a proof of concept, we
present a prototype implementation of a parser generator using
Adaptable Parsing Expression Grammars (APEG) as the underlying
method for syntax definition. We shos that APEG has features which
allow an efficient implementation using the proposed mixed
approach.",
paper = "Reis14.pdf"
}
\end{chunk}
\index{Ford, Bryan}
\begin{chunk}{axiom.bib}
@article{Ford04,
author = "Ford, Bryan",
title = {{Parsing Expression Grammars: A RecognitionBased Syntactic
Foundation}},
journal = "SIGPLAN Notices",
volume = "39",
number = "1",
pages = "111122",
year = "2004",
abstract =
"For decades we have been using Chomsky's generative system of
grammars, particularly contextfree grammars (CFGs) and regular
expressions (REs), to express the syntax of programming languages
and protocols. The power of generative grammars to express
ambiguity is crucial to their original purpose of modelling
natural languages, but this very power makes it unnecessarily
difficult both to express and to parse machineoriented languages
using CFGs. Parsing Expression Grammars (PEGs) provide an
alternative, recognitionbased formal foundation for describing
machineoriented syntax, which solves the ambiguity problem by not
introducing ambiguity in the first place. Where CFGs express
nondeterministic choice between alternatives, PEGs instead use
{\sl prioritized choice}. PEGs address frequently felt
expressiveness limitations of CFGs and REs, simplifying syntax
definitions and making it unnecessary to separate their lexical
and hierarchical components. A lineartime parser can be built for
any PEG, avoiding both the complexity and fickleness of LR parsers
and the inefficiency of generalized CFG parsing. While PEGs
provide a rich set of operators for constructing grammars, they
are reducible to two minimal recognition schemas developed around
1970, TS/TDPL and gTS/GTDPL, which are here proven equivalent in
effective recognition power.",
paper = "Ford04.pdf",
keywords = "printed"
}
\end{chunk}
\index{Gueneau, Armael}
\begin{chunk}{axiom.bib}
@phdthesis{Guen19,
author = "Gueneau, Armael",
title = {{Mechanized Verification of the Correctness and Asymptotic
Complexity of Programs}},
school = "University of Paris",
year = "2019",
link = "\url{http://gallium.inria.fr/~agueneau/phd/manuscript.pdf}",
abstract =
"This dissertation is concerned with the question of formally
verifying that the implementation of an algorithm is not only
functionally correct (it always returns the right result), but
also has the right asymptotic complexity (it reliably computes the
result in the expected amount of time).
In the algorithms literature, it is standard practice to
characterize the performance of an algorithm by indicating its
asymptotic time complexity, typically using Landau's ``bigO''
notation. We first argue informally that asymptotic complexity
bounds are equally useful as formal specifications, because they
enable modular reasoning: a $O$ bound abstracts over the concrete
cost expression of a program, and therefore abstracts over the
specifics of its implementation. We illustrate  with the help of
small illustrative examples  a number of challenges with the use
of the $O$ notation, in particular in the multivariate case, that
might be overlooked when reasoning informally.
We put these considerations into practice by formalizing the $O$
notation in the Coq proof assistant, and by extending an existing
program verification framework, CFML, with support for a
methodology enabling robust and modular proofs of asymptotic
complexity bounds. We extend the existing framework of Separation
Logic with Time Credits, which allows to reason at the same time
about correctness and time complexity, and introduce negative time
credits. Negative time credits increase the expressiveness of the
logic, and enable convenient reasoning principles as well as
elegant specifications. At the level of specifications, we show
how asymptotic complexity specifications using $O$ can be
integrated and composed within Separation Logic with Time
Credits. Then, in order to establish such specifications, we
develop a methodology that allows proofs of complexity in
Separation Logic to be robust and carried out at a relatively high
level of abstraction, by relying on two key elements: a mechanism
for collecting and deferring constraints during the proof, and a
mechanism for semiautomatically synthesizing cost expressions
without loss of generality.
We demonstrate the usefulness and practicality of our approach on
a number of increasingly challenging case studies. These include
algorithms whose complexity analysis is relatively simple (such as
binary search, which is nonetheless out of the scope of many
automated complexity analysis tools) and data structures (such as
Okasaki's binary random access lists). In our most challenging
case study, we establish the correctness and amortized complexity
of a stateoftheart incremental cycle detection algorithm: our
methodology scales up to highly nontrivial algorithms whose
complexity analysis intimately depends on subtle functional
invariants, and furthermore makes it possible to formally verify
OCaml code which can then actually be used as part of real world
programs.",
paper = "Guen19.pdf"
}
\end{chunk}
\begin{chunk}{axiom.bib}
@misc{Fade17,
author = "Unknown",
title = {{Strongly Connected and Completely Specified Moore
Equivalent of a Mealy Machine}},
year = "2017",
link = "\url{https://cs.stackexchange.com/questions/81127/stronglyconnectedandcompletelyspecifiedmooreequivalentofamealymachine}",
keywords = "DONE"
}
\end{chunk}
\index{Hoare, C.A.R}
\begin{chunk}{axiom.bib}
@misc{Hoar04,
author = "Hoare, C.A.R",
title = {{Unified Theories of Programming}},
year = "2004",
link = "\url{https://fi.ort.edu.uy/innovaportal/file/20124/1/04hoard_unified_theories.pdf}",
abstract =
"Professional practice in a mature engineering discipline is based
on relevant scientific theories, usually expressed in the language
of mathematics. A mathematical theory of programming aims to
provide a similar basis for specification, design and
implementation of computer programs. The theory can be presented
in a variety of styles, including
\begin{enumerate}
\item Denotational, relating a program to a specification of its
observable properties and behaviour
\item Algebraic, providing equations and inequalities for
comparison, transformation and optimisation of designs and
programs.
\item Operational, describing individual steps of a possible
mechanical implementation
\end{enumerate}
This paper presents simple theories of sequential
nondeterministic programming in each of these three styles; by
deriving each presentation from its predecessor in a cyclic
fashion, mutual consistency is assured.",
paper = "Hoar04.pdf",
keywords = "printed"
}
\end{chunk}
\index{Davis, Ernest}
\begin{chunk}{axiom.bib}
@misc{Davi19,
author = "Davis, Ernest",
title = {{The Use of Deep Learning for Symbolic Integration}},
year = "2019",
link = "\url{https://arxiv.org/pdf/1912.05752.pdf}",
abstract =
"Lample and Charton (2019) describe a system that uses deep
learning technology to compute symbolic, indefinite integrals, and
to find symbolic solutions to first and secondorder ordinary
differential equations, when the solutions are elementary
functions. They found that, over a particular test set, the system
could find solutions more successfully than sophisticated packages
for symbolic mathematics such as Mathematica run with a long
timeout. This is an impressive accomplishment, as far as it
goes. However, the system can handle only a quite limited subset
of the problems that Mathematica deals with, and the test set has
significant builtin biases. Therefore the claim that this
outperforms Mathematica on symbolic integration needs to be very
much qualified.",
paper = "Davi19.pdf",
keywords = "printed, DONE"
}
\end{chunk}
\index{Pierce, Benjamin}
\index{Appel, Andrew W.}
\index{Weirich, Stephanie}
\index{Zdancewic, Steve}
\index{Shao, Zhong}
\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@misc{Pier16,
author = "Pierce, Benjamin and Appel, Andrew W. and Weirich, Stephanie
and Zdancewic, Steve and Shao, Zhong and Chlipala, Adam",
title = {{The Science of Deep Specification}},
year = "2016",
link = "\url{https://www.cis.upenn.edu/~bcpierce/papers/deepspechcss2016slides.pdf}",
paper = "Pier16.pdf"
}
\end{chunk}
\index{Pierce, Benjamin}
\begin{chunk}{axiom.bib}
@misc{Pier18,
author = "Pierce, Benjamin"
title = {{The Science of Deep Specification}},
year = "2018",
link = "\url{https://www.cis.upenn.edu/~bcpierce/papers/pierceetaps2018.pdf}",
paper = "Pier18.pdf"
}
\end{chunk}
\index{Chlipala, Adam}
\index{Arvind}
\index{Sherman, Benjamin}
\index{Choi, Joonwon}
\index{Vijayaraghavan, Murali}
\begin{chunk}{axiom.bib}
@misc{Chli17b,
author = "Chlipala, Adam and Arvind and Sherman, Benjamin and
Choi, Joonwon and Vijayaraghavan, Murali",
title = {{Kami: A Platform for HighLevel Parametric Hardware
Specification and its Modular Verification}},
year = "2017",
abstract =
"It has become fairly standard in the programming languages
research world to verify functional programs in proof assistants
using induction, algebraic simplification, and rewriting. In this
paper, we introduce Kami, a Coq library that uses labelled
transition systems to enable similar expressive and modular
reasoning for hardware designs expressed in the style of the
Bluespec language. We can specify, implement, and verify realistic
designs entirely within Coq, ending with automatic extraction into
a pipeline that bottoms out in FPGAs. Our methodology has been
evaluated in a case study verifying an infinite family fo
multicore systems, with cachecoherent shared memory and pipelined
cores implementing (the base integer subset of) the RISCV
instruction set.",
paper = "Chli19b.pdf",
keywords = "printed"
}
\end{chunk}
\index{Appel, Andrew W.}
\index{Beringer, Lennart}
\index{Chlipala, Adam}
\index{Pierce, Benjamin C.}
\index{Shao, Zhong}
\index{Weirich, Stephanie}
\index{Zdancewic, Steve}
\begin{chunk}{axiom.bib}
@article{Appe17a,
author = "Appel, Andrew W. and Beringer, Lennart and Chlipala, Adam
and Pierce, Benjamin C. and Shao, Zhong and Weirich, Stephanie
and Zdancewic, Steve",
title = {{Position Paper: The Science of Deep Specification}},
journal = "Philosophical Transactions of the Royal Society",
volume = "375",
year = "2017",
link = "\url{https://royalsocietypublishing.org/doi/pdf/10.1098/rsta.2016.0331}",
abstract =
"We introduce our efforts within the project ``The Science of Deep
Specification'' to work out the key formal underpinnings of
inductrialscale formal specifications of software and hardware
components, anticipating a world where large verified systems are
routinely built out of smaller verified components that are also
used by many other projects. We identify an important class of
specification that has already been used in a few experiments that
connect strong componentcorrectness theorems across the work of
different teams. To help popularize the unique advantages of that
style, we dub it {\sl deep specification}, and we say that it
encompasses specifications that are {\sl rich}, {\sl twosided},
{\sl formal}, and {\sl live} (terms that we define in the
article). Our core team is developing a proofofconcept ssystem
(based on teh Coq proof assistant) whose specification and
verification work is divided across argely decoupled subteams at
our four institutions, encompassing hardware microarchitecture,
compilers, operating systems and applications, along with
crosscutting principles and tools for effective specification. We
also aim to catalyse interest in the approach, not just by basic
researchers but also by users in industry.
This article is part of the themed issue ``Verified trustworthy
software systems''",
paper = "Appe17a.pdf",
keywords = "printed"
}
\end{chunk}
\index{Braibant, Thomas}
\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@inproceedings{Brai13,
author = "Braibant, Thomas and Chlipala, Adam",
title = {{Formal Verification of Hardware Synthesis}},
booktitle = "Computer Aided Verification (CAV'13)",
publisher = "unknown",
year = "2013",
abstract =
"We report on the implmentation of a certified compier for a
highlevel hardware description language (HDL) called FeSi
(FEatherweight Synthesis). FeSi is a simplified version of
Bluespec, an HDL based on a notion of guarded atomic
actions. FeSi is defined as a dependently typed deep embedding in
Coq. The target language of the compiler corresponds to a
synthesisable subset of Verilog or VHDL. A key aspect of our
approach is that input programs to the compiler can be defined and
proved correct inside Coq. The, we use extraction and a Verilog
backend (written in OCaml) to get a certified version of a
hardware design.",
paper = "Brai13.pdf",
keywords = "printed"
}
\end{chunk}
\index{Timany, Amin}
\index{Sozeau, Mattieu}
\begin{chunk}{axiom.bib}
@inproceedings{Tima18,
author = "Timany, Amin and Sozeau, Mattieu",
title = {{Cumulative Inductive Types in Coq}},
booktitle = "3rd Conf. on Formal Structures for Computation and
Deduction",
publisher = "HAL",
link = "\url{https://hal.inria.fr/hal01952037/document}",
abstract =
"In order to avoid wellknown paradoxes associated with
selfreferential definitions, highorder dependent type theories
stratify the theory using a countably infinite hierarchy of
universes (also known as sorts), $Type_0 : Type_1,\ldots$ Such
type systems are called cumulative if for any type $A$ we have
that $A:Type_i$ implies $A:Type_{i+1}$. The Predicative Calculus
of Inductive Constructions (pCIC) which forms the basis of the Coq
proof assistant, is one such system. In this paper we present the
Predicative Calculus of Cumulative Inductive Constructions
(pCUIC) which extends the cumulativity relation to inductive
types. We discuss cumulative inductive types as present in Coq 8.7
and their application to formalization and definitional
translations.",
paper = "Tima18.pdf"
}
\end{chunk}
\index{Gilbert, Gaetan}
\begin{chunk}{axiom.bib}
@phdthesis{Gilb19,
author = "Gilbert, Gaetan",
title = {{A Type Theory with Definitional ProofIrrelevance}},
school = "Ecole Nationale Superieure MinesTelecom Alantique.",
year = "2019",
link = "\url{https://gitlab.com/SkySkimmer/thesis/~/jobs/artifacts/master/download?job=build",
paper = "Gilb19.zip"
}
\end{chunk}
\index{Awodey, Steve}
\begin{chunk}{axiom.bib}
@misc{Awod16,
author = "Awodey, Steve",
title = {{Univalence as a Principle of Logic}},
year = "2016",
link = "\url{https://www.andrew.cmu.edu/user/awodey/preprints/ualp.pdf}",
abstract =
"It is sometmes convenient or useful in mathematics to treat
isomorphic structures as the same. The recently proposed
Univalence Axiom for the foundations of mathematics elevates this
idea to a foundational principle in the setting of Homotopy Type
Theory. It states, roughly, that isomorphic structures can be
identified. We explore the motivations and consequences, both
mathematical and philosophical, of making such a new logical
postulate.",
paper = "Awod16.pdf",
keywords = "printed"
}
\end{chunk}
\index{McCorduck, Pamela}
\begin{chunk}{axiom.bib}
@book{Mcco79,
author = "McCorduck, Pamela",
tilte = {{Machines Who Think}},
year = "1979",
publisher = "Freeman",
keywords = "shelf, DONE"
}
\end{chunk}
\index{Gabriel, Richard}
\begin{chunk}{axiom.bib}
@book{Gabr96,
author = "Gabriel, Richard",
title = {{Patterns of Software}},
link = "\url{https://www.dreamsongs.com/Files/PatternsOfSoftware.pdf}",
year = "1996",
publisher = "Oxford University Press",
paper = "Gabr96.pdf"
}
\end{chunk}
\index{Gabriel, Richard}
\begin{chunk}{axiom.bib}
@book{Gabr85,
author = "Gabriel, Richard",
title = {{Performance and Evaluation of Lisp Systems}},
link = "\url{https://www.dreamsongs.com/Files/Timrep.pdf}",
year = "1985",
publisher = "MIT Press",
paper = "Gabr85.pdf"
}
\end{chunk}
\index{Lucas, J.R.}
\begin{chunk}{axiom.bib}
@misc{Luca03,
author = "Lucas, J.R.",
title = {{Minds, Machines and Godel}},
year = "2003",
link =
"\url{https://pdfs.semanticscholar.org/bde3/b731bf73ef6052e34c4465e57718c03b13f8.pdf}",
paper = "Luca03.pdf"
}
\end{chunk}
\index{Lem, Stanislaw}
\begin{chunk}{axiom.bib}
@book{Lemx68,
author = "Lem, Stanislaw",
title = {{His Master's Voice}},
publisher = "MIT Press",
year = "1968"
}
\end{chunk}

books/bookvol15.pamphlet  1408 +++++++++++++++++++++++++++++++++++++++
books/bookvolbib.pamphlet  1102 ++++++++++++++++++++++++++++++
changelog  3 +
patch  1090 ++++++++++++++++++
src/axiomwebsite/patches.html  2 +
5 files changed, 3117 insertions(+), 488 deletions()
diff git a/books/bookvol15.pamphlet b/books/bookvol15.pamphlet
index 4f2a6a1..2e45520 100755
 a/books/bookvol15.pamphlet
+++ b/books/bookvol15.pamphlet
@@ 490,6 +490,14 @@ November 10, 2003 ((iHy))
% NOTE REWRITE LOOPS INTO FUNCTIONS
+\begin{quote}
+{\bf Consciousness is an emergent phenomenon, like a traffic jam.
+It requires a certain level of complexity to achieve a certain
+level of intelligence. Thus, it is not contained within a logic
+nor bounded by Turing restrictions.}\\
+ Tim Daly
+\end{quote}
+
\chapter{Motivation}
In the logical traditional fallacy of ``appeal to authority'' I
@@ 889,7 +897,7 @@ viewpoints. We will try not to claim that this effort is ``doing
mathematics'', only that it is ``increasing the trust in the
algorithms so they can be used in interactive proof systems''.
Make no mistake, this is an enourmous task.
+Make no mistake, this is an enormous task.
It requires a deep understanding of how Axiom implements firstorder
dependent type theory. Dependent types are undecidable so there have
@@ 905,9 +913,9 @@ theory is used? How can it be factored into categories? How can we
show that all inherited paths form sound and complete types?
It requires a deep understanding of the Calculus of Construction with
induction. Coq and Lean implement this form of logic. They are both at
the leading edge of the field and still under active development and
debate.
+induction. Coq and Lean \cite{Carn19a} implement this form of
+logic. They are both at the leading edge of the field and still under
+active development and debate.
It requires a deep understanding of Program Verification. Coq and Lean
do not (yet) have libraries that directly support program
@@ 979,6 +987,245 @@ types is proved formally at the same time as it is being synthesized.}\\
Robert Harper uses it as the basis for his Computational Type Theory
Course \cite{Harp18}.
+\section{Intellectual Complexity}
+
+There is a considerable gap between writing a program and proving the
+program correct. Languages like Coq, and even the terms withing Coq
+such as ``inductive'', and ``constructor'' are not normally part of
+the programmer's world.
+
+There is a very slow convergence of syntax in languages like ML and
+Haskell but their emphasis is on type theory as a "checking" bit of
+machinery rather than on type theory as a "proof" bit of machinery.
+
+\subsection{Alturki and Moore \cite{Altu19}}
+
+Consider a simple example of a Coq program proof.
+
+We need to specify three things:
+\begin{itemize}
+\item {\bf L}, a formal model of the syntax and semantics of the
+programming language
+\item {\bf P}, a formal representation in model {\bf L}
+\item {\bf S}, a specification of the property to be verified
+\end{itemize}
+
+\subsection{An example}
+
+See the example code in the appendix \ref{Coq Example Code}.
+
+Given a program
+\begin{verbatim}
+sum = 0;
+while (!(n <= 0)) {
+ sum = sum + n;
+ n = n + 1;
+}
+\end{verbatim}
+we need to have
+\begin{itemize}
+\item a formal model of the semantics of the language used
+\item a precise specification of the program structure
+\item a specification that this computes the sum of integers
+from 1 to n.
+\end{itemize}
+
+\subsection{Specification of the example in Coq}
+
+In Coq an arithmetic expression can be defined as:
+\begin{verbatim}
+Inductive AExp :=
+  var : string > AExp
+  con : Z > Aexp
+  plus : Aexp > AExp > AExp
+\end{verbatim}
+where variables use constructor {\bf var} applied to a string,
+constants use constructor {\bf con} applied to an integer (Z
+is the set of integers), and the {\bf plus} constructor takes
+2 {\bf AExp} arguments and returns a {\bf AExp} result.
+
+Programs are defined as
+\begin{verbatim}
+Inductive Pgm :=
+ pgm : list string > Stmt > Pgm.
+\end{verbatim}
+
+The summation program above become essentially an abstract
+syntax tree
+\begin{verbatim}
+Definition sum_pgm N : Pgm :=
+ pgm ["n"; "sum"]
+(seq (assign "n" (con N))
+(seq (assign "sum" (con 0))
+ (while (not (le (var "n") (con 0)))
+ (seq (assign "sum" (plus (var "sum") (var "n")))
+ (assign "n" (plus (var "n") (con (1)))))))).
+\end{verbatim}
+
+\subsection{Semantics of the example in Coq}
+
+Note that we are using operational semantics here.
+
+The semantics of arithmetic expressions is captured by a binary
+transition relation on states, where a state is a pair
+{\bf (AExp, Env)} consisting of an arithmetic expression
+{\bf AExp} and the environment {\bf Env} in which {\bf AExp} is to be
+evaluated.
+
+This binary transition relation can be defined in Coq as an
+inductively defined proposition (of type Prop), {\bf step\_e}, that
+tells us whether a given pair of states belongs to the relation.
+\begin{verbatim}
+Inductive step_e : (AExp * Env) > (AExp * Env) > Prop :=
+  step_var: forall v x env, get v env = Some x >
+ step_e (var v, env) (con x, env)
+  step_plus: forall x y env,
+ step_e (plus (con x) (con y), env) (con (Z.add x y), env)
+  cong_plus_r: forall e1 e2 e2' env env',
+ step_e (e2, env) (e2', env') >
+ step_e (plus e1 e2, env) (plus e1 e2', env')
+  cong_plus_l: forall e2 e1 e1' env env',
+ step_e (e1, env) (e1', env') >
+ step_e (plus e1 e2, env) (plus e1' e2, env')
+\end{verbatim}
+
+The {\bf step\_var} states that the program variable {\bf var~v}
+evaluates in one step to its value {\bf con~x} in the environment
+{\bf env} ({\bf x} is the integer value obtained from the function
+application {\bf get~v~env}, the value to which {\bf v} is mapped in
+the {\bf env}, and this holds for all program variables and
+environments (recall that {\bf con} is the constructor for integer
+constants). Another example is the {\bf step\_plus} case, which states
+that any arithmetic expression of the form
+\begin{verbatim}
+ plus (con x) (con y)
+\end{verbatim}, in which the operands of addition have already been
+evaluated to constant values, evaluates in one step to the value that
+is the sum of the operands, regardless of the current environment
+({\bf Z.add} is the integer addition operation). Note how
+{\bf step\_plus}, {\bf cong\_plus\_r}, and {\bf cong\_plus\_1}
+collectively specify callbyvalue semantics of {\bf plus}.
+
+\subsection{Testing execution}
+
+Using the Coq definition of {\bf sum\_pgm} above, we can test that
+{\bf sum\_pgm~100} produces the expected result, 5050.
+\begin{verbatim}
+Lemma test_execution :
+ clos_relf_trans_1n _ step_p
+ (sum_pgm 100,[])
+ (pgm nil skip, [("sum",5050);("n",0)]).
+\end{verbatim}
+
+An execution trace is constructed by the reflexivetransitive closure
+of the {\bf step\_p} relation (see \ref{Coq Example Code}.), computed
+by Coq's inductively defined proposition
+{\bf clos\_refl\_trans\_1n}. Note that the program in the final state
+is the command {\bf skip}, requiring that the program be fully
+executed.
+
+Completing the proof requires picking the right clause of the step
+relation for each step of execution.
+
+\subsection{Specifying correctness goals}
+
+We need to formalize the statement of the property we wish to verify as
+a reachability property. Following the language independent approach
+of program verification by coinduction \cite{Moor18},
+we define a program property
+as a reachability claim on program states of the form
+{\bf (Pgm~*~Env)}. In particular, using the coinductive proof machinery
+(see \ref{Coq Example Code}), the main correctness property of
+{\bf sum} can be defined as the following inductive proposition
+(recall that the quoted strings ``n'' and ``sum'' are the program
+variables, while {\bf n} is a universally quantified logical name
+denoting a symbolic value)
+\begin{verbatim}
+Inductive sum_spec : Spec (Pgm * Env) :=
+  sum_claim: forall n, 0 <= n >
+ sum_spec
+ (pgm ["n"; "sum"]
+ (seq (assign "n" (con n))
+ (seq (assign "sum" (con 0))
+ (while (not (le (var "n") (con 0)))
+ (seq (assign "sum" (plus (var "sum") (var "n")))
+ (assign "n" (plus (var "n") (con (1))))))))
+ ,[])
+ (fun cfg' => cfg' = (pgm [] skip, [("sum",((n + 1) * n)/2);("n",0)]))
+\end{verbatim}
+
+{\bf Spec} is the general type of (language independent) program
+specifications (reachability claims), instantiated here with states
+{\bf Spec~(Pgm~*~Env)}. The specification states that for all
+nonnegative values {\bf n}, the initial pair consisting of the
+program {\bf sum} and the empty environment {\bf []} reaches a state
+having the empty program {\bf skip} as its first component and an
+environment in its second component that maps the program variables
+``n'' to zero and ``sum'' to the value of the expression
+{\bf ((n~+~1)~*~n)/2}.
+
+Other properties, e.g. the loop invariant for {\bf sum}, may also need
+to be defined in addition to the main correctness property. In Coq,
+these properties on program states, or instantiated {\bf Spec}
+propositions in the case when the coinductive program verification
+approach described here is used. Coq tends to be more verbose
+requiring having other, smaller, properties specified and proved
+including correctness properties of environment manipulation
+operations.
+
+\subsection{Verifying correctness}
+
+In a language verification framework, deductive program verification
+entails showing that a property {\bf S}, like the main correctness
+property of {\bf sum}, follows logically from a given model {\bf L} of
+the language and a model {\bf P} of the program, along with other
+smaller intermediate properties that have already been show to hold in
+{\bf L} and {\bf P}.
+
+Following the languageindependent coinductive verification approach
+(see \cite{Moor18}, a program is shown to meet its specification by
+(1) using the step relation defining the semantics of the language in
+which the program is written as a symbolic execution engine, and (2)
+using simple coinductive reasoning on circular behaviors emanating
+from recursive and iterative constructs in the language (such as
+loops). The machinery specified in Coq allowing the reachability logic
+proof strategy can be found in the appendix (see
+\ref{Coq Example Code}).
+
+To prove correctness we need to specify and show the loop
+invariant. For that, we introduce another constructor of the type
+{\bf sum\_spec}
+\begin{verbatim}
+  sum_loop_claim : forall env n, get "n" env = Some n > 0 <= n >
+ forall s, get "sum" env = Some s >
+ sum_spec
+ (pgm []
+ (while (not (le (var "n") (con 0)))
+ (seq (assign "sum" (plus (var "sum") (var "n")))
+ (assign "n" (plus (var "n") (con (1))))))
+ ,env)
+ (fun cfg' => fst cfg' = pgm [] skip /\
+ snd cfg' = set "n" 0 (set "sum" (s + ((n + 1) * n)/2) env)).
+\end{verbatim}
+
+Note that {\bf get $\times$ env} evaluates to the value to which $x$
+is mapped in {\bf env} (if defined), while {\bf fst} and {\bf snd}
+return respectively the first and second components of a state pair.
+
+To verify that {\bf sum} meets its specification given by
+{\bf sum\_spec}, which is that the initial state of {\bf sum} either
+diverges or terminates with the correct value for {\bf sum}, we
+instantiate the general coinductive soundness theorem with the step
+relation {\bf step\_p} defining the semantics, and show the following
+statement:
+\begin{verbatim}
+Lemma sum_ok : sound step_p sum_spec.
+\end{verbatim}
+
+{\bf sound} is a generic proposition on reachability claims. The proof
+of this instantiation is based on a generalized coinduction theorem
+that mechanizes the verification approach.
+
\chapter{Related Work}
This is a ``wide'' project that touches on many areas. Here we try to
@@ 1120,7 +1367,7 @@ from impure code using the keyword {\tt method}.
Axiom\cite{Daly17} is a computer algebra system. It was originally
developed at IBM Research based on a proposal by James
Griesmer\cite{Blai70} in 1965, led to fruition by Richard
+Griesmer\cite{Blai70a} in 1965, led to fruition by Richard
Jenks\cite{Dave84}. The project was called Scratchpad. A more complete
history is available in the online documentation.
@@ 2464,6 +2711,21 @@ From Signatures to OperationAlist
The parser operates on the input stream step by step.
+There are three possible approaches to the parser and these are left
+``unchosen'' for the moment. There is the traditional Chomsky BNF
+style, the Parsing Expression Grammar (PEG) \cite{Ford04}, and the
+Pratt parser \cite{Prat73}.
+
+PEG parsers reduce the ambiguity of choice, making parsing more
+efficient.
+
+Pratt Parsers allow user modification of syntax. While this has not
+been of interest to Axiom it has been used in the proof community.
+Once proof technology is integrated and presented to users it will
+probably be more important.
+
+\section{ParserClass}
+
The {\bf *place*}
\index[doc]{*place*}
is the search path for the .spad files. These paths
@@ 2478,8 +2740,6 @@ are concatenated before the spad filenames.
\end{chunk}
\subsection{ParserClass}

\index[code]{defclass!ParserClass}
\index[code]{ParserClass!defclass}
\begin{chunk}{defclass ParserClass}
@@ 10026,6 +10286,27 @@ Value = NIL
\index[cat]{Group!Category}
\index[cat]{Category!Group}
\index[cat]{GROUP}
+Voevodsky's \cite{Gray18a} definition of a group:
+
+Let $U$ be a {\sl universe}
+
+A {\sl group} in $U$ is a sequence
+${(G,e,i,m,\lambda,\rho,\lambda^\prime,\rho^\prime,\alpha,\iota)}$,
+where
+\begin{itemize}
+\item $G$ is a type of $U$
+\item $e:G$
+\item $i:G\rightarrow G$
+\item $m:G\times G\rightarrow G$
+\item $\lambda$ is a proof that for every $a:G, m(e,a)=a$
+\item $\rho$ is a proof that for every $a:G, m(a,e)=a$
+\item $\lambda^\prime$ is a proof that for every $a:G, m(i(a),a)=e$
+\item $\rho^\prime$ is a proof that for every $a:G, m(a,i(a))=e$
+\item $\alpha$ is a proof that for every
+$a,b,c:G:m(m(a,b),c)=m(a,m(b,c))$
+\item $\iota$ is a proof that $G$ is a set
+\end{itemize}
+
\begin{chunk}{defclass GroupType}
(defclass GroupType (MonoidType)
((parents :initform '(Monoid))
@@ 43036,6 +43317,491 @@ walk the sorted keys to output the sorted lines.
\chapter{Musings}
+This chapter contains snippets from the literature containing
+ideas that need to be ``pondered''. They may or may not survive and
+will almost certainly be modified beyond these notes. So this isn't
+part of the document, but rather a collection of notes for later.
+
+\section{Hardware Proof Checking}
+
+\subsection{Mealy/Moore equivalence \cite{Fade17}}
+
+Prove that if a Mealy machine is strongly connected and completely
+specified, the corresponding Moore machine will also be strongly
+connected and completely specified.
+
+As the Mealy Machine is completely specified, we have 2 cases for each
+state:
+\begin{enumerate}
+\item All transitions coming into this state have the same output
+\item At least 2 transitions coming into this state have different
+outputs
+\end{enumerate}
+
+For case 1: The equivalent for this state would simply be changing the
+notation, i.e. mark all transitions into this state simply by the
+input symbols and just mention the output with the state as is.
+
+Now, as the machine is completely specified, each such input
+transition and its corresponding output will be defined, and the
+states resulting due to such states will also have completely
+specified transitions.
+
+For case 2: The equivalent for such a state requires the
+splitting/addition of new states such that each split state
+corresponds to the different outputs that can be generated. We split a
+state into 2 corresponding states.
+
+Now, as the machine is completely specified, each such input
+transition and its corresponding output will be defined, and the
+states resulting due to such states will also have completely
+specified transitions.
+
+{\sl Strongly Connected Machine:} If for any two states $S_i$, $S_j$
+of the machine, there exists a finite sequence of input characters
+that can make the machine transition from $S_i$ to $S_j$, the machine
+is strongly connected.
+
+{\sl Completely Specified Machine:} Consider $M=(I,O,S,F,Z)$
+\begin{itemize}
+\item {\bf I} : Input Symbols
+\item {\bf O} : Output Symbols
+\item {\bf S} : Set of States
+\item {\bf F} : Transition function $F : S \times I\rightarrow S$
+\item {\bf Z} : Output function
+\item {\bf Z} : $S\rightarrow O$ (Moore)
+\item {\bf Z} : $S\times I\rightarrow O$ (Mealy)
+\end{itemize}
+
+Now, if for every $(p,x)$ such that $(p\in S, x\in I)$ there exist
+some $q\in S$ and $y\in O$ such that $F(p,x)=q$ and $Z(p,x)=y$
+(Mealy), $Z(p)=y$ (Moore).
+
+Let $M=(Q,\Sigma,\Delta,\delta,\lambda,q_0)$ be a Mealy machine where
+$\Delta$ is the output alphabet and $\lambda$ is the output function.
+
+Denote states of the corresponding Mealy machine by $q_{ib}$ if there
+is input $a$ and output $b$ such that $\delta(q_j,a)=(q_i,b)$. This
+means if the Mealy machine enters a state $q$ by emitting more than
+two different outputs then the state $q_i$ splits. Also, denote
+corresponding Moore machine's transition function by $\delta^\prime$
+
+Now consider two states $q_{ib_1}$ and $q_{jb_2}$ (of corresponding
+Moore machine). Existence of the state $q_{jb_2}$ implies that there
+is a states $q_k$ and input symbols $a$ such that
+$\delta(q_k,a)=(q_j,b_2)$. Since we made the assumption that the Mealy
+machine is strongly connected, there is a string $w$ such that
+$\delta(q_i,w)=(q_k,b_k)$ for some output symbol $b_k$. So, we have
+\[\delta^\prime(q_{ib_1},wa)=
+\delta^\prime(\delta^\prime(q_{ib_1},w),a)=
+\delta^\prime(q_{kb_k},a)=q_{jb_2}\]
+This shows that the corresponding Moore machine is strongly connected.
+
+As for the completely specification, let $q_{ib_1}$ be a state of the
+Moore machine, and $a$ is an input symbol. Then
+$\delta^\prime(q_{ib_1},a)=(q_{jb_2},b_2)$ where
+$\delta(q_i,a)=(q_j,b_2)$, which shows it is completely specified.
+
+
+\section{Specification}
+
+\subsection{Binary Search \cite{Guen19}}
+
+A Binary Search in OCaml
+\begin{chunk}{BS}
+(* Requires a to be a sorted array of integers.
+ Returns k such that i <= k < j and a.(k) = v
+ or 1 if there is no such k *)
+
+let rec bsearch a v i j =
+ if j <= i then 1
+ else
+ let k = i + (j  i) / 2 in
+ if v = a.(k) then k
+ else if v < a.(k) then bsearch a v i k
+ else bsearch a v (k+1) j
+
+\end{chunk}
+
+There is an informal specification in the comments. The {\tt bsearch}
+program takes as input an array of integers $a$ (which is assumed to be
+sorted), a value $v$ which is the target of the search, and lower and
+upper bounds $i$ and $j$. If $v$ can be found between indices $i$
+(included) and $j$ (excluded), the {\tt bsearch} returns its index in
+the array; otherwise it returns $1$.
+
+The Separation Logic specification for {\tt bsearch} is
+
+$\forall a~xs~v~i~j$\\
+$0\le i\le xs \land 0 \le j \le xs \land {\tt sorted\ }xs\Rightarrow\\
+\{a \leadsto {\tt Array\ } xs\} {\tt\ bsearch\ } a~v~i~j~
+\{\lambda k.~a\leadsto {\tt Array\ }xs *
+[{\tt bsearch\_correct\ }xs~v~i~j~k]\}$
+
+This statement consists of a Hoare triple $(\{H\}~t~\{Q\})$, after
+quantification (in the logic) over suitable arguments (the array
+location $a$ and integers $v$, $i$, and $j$) as well as the logical
+model of the array $xs$ (list of integers). Notice that we associate
+Coq integers (with $v$, $i$, $j$) with OCaml integers. This is not an
+abuse of notation but a feature of CFML, which reflects pure OCaml
+values directly as corresponding values in Coq. We require the indices
+$i$ and $j$ to be in bounds, and the contents of the array $xs$ to be
+sorted. The Hoare triple which forms the conclusion of the statement
+relates an OCaml expression (here, a call to {\tt bsearch},
+"{\tt bsearch\ }$a~v~i~j$") with a precondition and postcondition
+describing the state of the heap before and after the execution,
+respectively. Both pre and postconditions are expressed using
+Separation Logic assertions; the postcondition is additionally
+parameterized by the value returned by the function (here, named $k$).
+
+Generally speaking, Separation Logic assertions can be read in terms
+of ownership. For instance, the Separation Logic formula
+"$a\leadsto {\tt Array\ }xs$" asserts the unique ownership of an
+heapallocated array, starting at local $a$, whose contents are
+described by the list $xs$. The operator $*$ should be read as a
+conjunction; and $[\cdot]$ lifts an ordinary Coq proposition as a
+Separation Logic assertion. To express the fact that the value
+returned by {\tt bsearch} is indeed correct, we rely on an auxiliary
+definition {\tt bsearch\_correct}, which expresses the functional
+correctness property of {\tt bsearch}. Thereafter, we write "$xs[i]$",
+where $xs$ is a Coq list and $i$ an integer, to denote the $i$th
+element of the list $xs$ (or a dummy value if $i$ is not a valid index).
+
+{\bf Functional Correctness of {\tt bsearch}}
+
+${\tt bsearch\_correct\ }xs~v~i~j~k \overset{\Delta}{=}\\
+(k=1 \land \forall~p.~i\le p < j \Rightarrow xs[p]\ne v)\\
+\land (i\le k< j \land xs[k]=v)$
+
+Assuming the preconditions, we have the following facts.
+\begin{itemize}
+\item {\tt bsearch} will run and terminate without crashing (a
+consequence of the definition of Hoare triples)
+\item {\tt bsearch} will not modify the contents of the input array
+("$a\leadsto {\tt Array\ }xs$" appears in both the pre and
+postconditions, with the same $xs$)
+\item {\tt bsearch} will return a correct result, according to the
+{\tt bsearch\_correct} predicate
+\end{itemize}
+
+A technical remark: one might wonder why the ``pure'' assumptions on
+$i$, $j$ and $xs$ are not part of the triple precondition, and instead
+are lifted as an implication at the metalevel (in the logic of
+Coq). Formally speaking, these two options are equivalent. That is,
+one could equivalently prove the following specification for
+{\tt bsearch}
+
+$\forall a~xs~v~i~j\\
+\{a\leadsto {\tt Array\ }xs * [0\le i\le xs \land
+0\le j\le xs \land {\tt sorted\ }xs]\}\\
+{\tt bsearch\ }a~v~i~j\\
+\{\lambda k.~a\leadsto {\tt Array\ }xs *
+[{\tt bsearch\_correct\ }xs~v~i~j~k]\}$
+
+
+\section{Parsing}
+
+\subsection{Top Down Operator Precedence \cite{Prat73}\cite{Bend20}}
+
+\begin{chunk}{TDOP}
+
+def expression(rbp=0):
+ global token
+ t = token
+ token = next()
+ left = t.nud()
+ while rbp < token.lbp:
+ t = token
+ token = next()
+ left = t.led(left)
+ return left
+
+class literal_token(object):
+ def __init__(self, value):
+ self.value = int(value)
+ def nud(self):
+ return self.value
+
+class operator_add_token(object):
+ lbp = 10
+ def led(self,left):
+ right = expression(10)
+ return left+right
+
+class operator_mul_token(object):
+ lbp = 20
+ def led(self,left):
+ return left * expression(20)
+
+class end_token(object):
+ lbp = 0
+
+\end{chunk}
+
+The tokenizer and parser
+
+\begin{chunk}{TDOP}
+import re
+token_pat = re.compile("\s*(?:(\d+)(.))")
+
+def tokenize(program):
+ for number, operator in token_pat.findall(program):
+ if number:
+ yield literal_token(number)
+ elif
+ yield operator_add_token()
+ elif
+ yield operator_mul_token()
+ else:
+ raise SyntaxError('unknown operator: %s', operator)
+ yield end_token()
+
+def parse(program):
+ global token, next
+ next = tokenize(program).next
+ token = next()
+ return expression()
+
+\end{chunk}
+
+The {\bf lbp} is the left binding power. For an infix operator, it
+tells us how strongly the operator binds to the argument on its left.
+
+The {\bf rbp} is the right binding power.
+
+The {\bf nud} (null denotation) is the prefix handler.
+
+The {\bf led} (left denotation) is the left handler, used to handle
+infix operators.
+
+The {\bf expression} handler function does the work. Operator handlers
+call it and pass in an {\bf rbp}. It consumes tokens until it meets a
+token whose left binding power is equal or lower than the right
+binding power, that is until {\bf lbp <= rbp}.
+
+Handlers of operators call {\bf expression} to process their
+arguments, providing it with their binding power to make user it gets
+just the right tokens from the input.
+
+For example:
+\begin{verbatim}
+ 3 + 1 * 2 * 4 + 5
+\end{verbatim}
+has the call trace
+\begin{verbatim}
+ expression with rbp 0
+ literal nud = 3
+ led of +
+ expression with rbp 10
+ literal nud = 1
+ led of *
+ expression with rbp 20
+ literal nud = 2
+ led of *
+ expression with rbp 20
+ literal nud = 4
+ led of +
+ expression with rbp 10
+ literal nud = 5
+\end{verbatim}
+
+\[\begin{array}{ccccccccc}
+\hline
+3 & + & 1 & * & 2 & * & 4 & + & 5\\
+\hline
+ &  &  &  & 0 &  &  &  &  \\
+ & &  &  & 10 &  &  & & 10\\
+ & & & & 20 & & 20 & & \\
+\hline
+\end{array}\]
+
+At each stage of the parser, there is an expression at each precedence
+level that is active at the moment. This instance awaits the results
+of the higher precedence instance and keeps going, until it has to
+stop itself and return to its caller.
+
+{\bf Handling Unary Operators}
+
+TDOP makes an explicit distinction between unary and binary operators,
+encoding the difference between the {\bf nud} and {\bf let} methods.
+For example, the subtraction operator becomes
+\begin{chunk}{TDOP}
+class operator_sub_token(object):
+ lbp = 10
+ def nud(self):
+ return expression(100)
+ def led(self, left):
+ return left  expression(10)
+
+\end{chunk}
+
+The {\bf nud} handles the unary (prefix) form of minus. It has no left
+argument (since it is a prefix), and it negates its right argument,
+The binding power passed into {\bf expression} is high since infix
+minus has a high precedence, higher than multiplication. The {\bf led}
+handles the infix case similarly to the handlers of $+$ and $*$.
+
+{\bf Right Associative Operators}
+
+
+
+\section{Program proof}
+
+See Altenkirch et al. ``Why Dependent Types Matter'' \cite{Alte05}
+
+\section{Notation}
+
+From Altenkirch \cite{Alte18}
+
+\[A\rightarrow B \equiv \Pi n:N.A^n \equiv \forall(x:A),B\]
+\[A\rightarrow B \equiv \Pi~~:~A.B\]
+
+\[List A \equiv \Sigma_n:N.A^n\]
+\[A\times B \equiv \Sigma~~:~A.B\]
+
+\section{Data Description Language}
+
+Jenkins \cite{Jenk18} proposes combination of sum and product
+types, e.g.
+\begin{verbatim}
+data OrderResponse
+ = PurchaseSuccessful
+ { newOrder :: Order }
+  PaymentFailed
+ { paymentProvider :: ProviderId,
+ failureMessage :: String }
+  NetworkError
+ { statusCode :: Int,
+ message :: String
+ }
+\end{verbatim}
+
+\section{Interactive Proof Semantics}
+
+\subsection{Lean Semantics \cite{Carn19b}}
+
+{\bf The Untyped Lambda Calculus}
+
+\[e:=x~~ e~e~~\lambda~x.~e\]
+
+\begin{prooftree}
+\AxiomC{$e_1 \leadsto e_1^\prime$}
+\UnaryInfC{$e_1~e_2 \leadsto e_1^\prime~e_2$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$e_2 \leadsto e_2^\prime$}
+\UnaryInfC{$e_1~e_2 \leadsto e_1~e_2^\prime$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{}
+\UnaryInfC{$(\lambda x.~e^\prime)~e \leadsto e^\prime[e/x]$}
+\end{prooftree}
+
+{\bf Simple Type Theory}
+
+\[\tau ::= \iota~~\tau \rightarrow \tau\]
+
+\begin{prooftree}
+\AxiomC{$(x:\tau)\in\Gamma$}
+\UnaryInfC{$\Gamma \vdash x:\tau$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma\vdash e_1 : \alpha\rightarrow\beta$}
+\AxiomC{$\Gamma\vdash e_2 : \alpha$}
+\BinaryInfC{$\Gamma\vdash e_1~e_2 : \beta$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma,x : \alpha\vdash e:\beta$}
+\UnaryInfC{$\Gamma\vdash(\lambda x : \alpha.e) :
+\alpha\rightarrow\beta$}
+\end{prooftree}
+
+{\bf Dependent Type Theory}
+
+\[\tau ::= \iota~~\tau \rightarrow \tau\]
+
+\[e ::= x~~e~e~~\lambda x : \tau.e\]
+
+\begin{prooftree}
+\AxiomC{$(x:\tau)\in \Gamma$}
+\UnaryInfC{$\Gamma\vdash x:\tau$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma\vdash e_1:\alpha\rightarrow\beta$}
+\AxiomC{$\Gamma\vdash e_2:\alpha$}
+\BinaryInfC{$\Gamma\vdash e_1~e_2:\beta$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma,x:\alpha\vdash e:\beta$}
+\UnaryInfC{$\Gamma\vdash(\lambda x:\alpha.e):\alpha\rightarrow\beta$}
+\end{prooftree}
+
+introducing $\forall x$:
+
+\[\tau ::= \iota~~\forall x : \tau.~\tau\]
+
+\[e ::= x~~e~e~~\lambda x : \tau.e\]
+
+\begin{prooftree}
+\AxiomC{$(x:\tau)\in \Gamma$}
+\UnaryInfC{$\Gamma\vdash x:\tau$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma\vdash e_1:\forall x: \alpha.\beta$}
+\AxiomC{$\Gamma\vdash e_2:\alpha$}
+\BinaryInfC{$\Gamma\vdash e_1~e_2:\beta[e_2/x]$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma,x:\alpha\vdash e:\beta$}
+\UnaryInfC{$\Gamma\vdash(\lambda x:\alpha.e):\forall x : \alpha.\beta$}
+\end{prooftree}
+
+introducing universes:
+
+\[\tau ::= \iota~~\forall x : \tau.~\tau~~U\]
+
+\[e ::= x~~e~e~~\lambda x : \tau.e\]
+
+\begin{prooftree}
+\AxiomC{$(x:\tau)\in \Gamma$}
+\UnaryInfC{$\Gamma\vdash x:\tau$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma\vdash e_1:\forall x: \alpha.\beta$}
+\AxiomC{$\Gamma\vdash e_2:\alpha$}
+\BinaryInfC{$\Gamma\vdash e_1~e_2:\beta[e_2/x]$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma,x:\alpha\vdash e:\beta$}
+\UnaryInfC{$\Gamma\vdash(\lambda x:\alpha.e):\forall x : \alpha.\beta$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{}
+\UnaryInfC{$\Gamma\vdash \iota:U$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{$\Gamma\vdash\alpha:U$}
+\AxiomC{$\Gamma,x:\alpha\vdash\beta:U$}
+\BinaryInfC{$\Gamma\vdash\forall x:\alpha.\beta:U$}
+\end{prooftree}
+
+\begin{prooftree}
+\AxiomC{}
+\UnaryInfC{$\Gamma\vdash U:U$}
+\end{prooftree}
+
\section{Small Step Operational Semantics}
\subsection{Sylvan Clebsch \cite{Cleb19}}
@@ 43206,7 +43972,7 @@ $f : X \overset{\overset{\cong}{=}}{\longrightarrow} Y$
The notation for the type of all equivalences between $X$ and $Y$ is
$X \overset{\cong}{=} Y$
In Coq (by Voevodsky)
+In Coq (by Voevodsky \cite{Gray18a})
\begin{verbatim}
Definition eqweqmap {T1 T2 : UU} (e: paths T1 T2) : weq T1 T2.
@@ 43660,6 +44426,116 @@ From Bauer \cite{Baue19a}, Rules for dependent products
(A type (a:A0 (b:A) (p:Id A a b) : a $\equiv$ b : A
\end{itemize}
+\section{Verbx Regular Expressions}
+
+\begin{itemize}
+\item \verb,., \verb,. A.C, match any character
+\item \verb,[], \verb,[ABab][ACac], match any character in list
+\item \verb,[^], \verb,[^Z][^a], match all except characters in list
+\item \verb,^, \verb,^C, next token must be the first part of string
+\item \verb,$, \verb,[Ca]t$, prev token must be the last part of string
+\item \verb,*, \verb,[ab]*, match 0 or more copies
+\item \verb,+, \verb,[ab]+, match 1 or more copies
+\item \verb,\, \verb,c\a, match either the 1st token or the second
+\item \verb,\(\), \verb,\(CA\)+, combine multiple tokens into one
+\end{itemize}
+
+\subsection{methods \cite{verb20}}
+
+\begin{chunk}{verbx}
+(defun sanitize (str)
+ (replace str "([.$*+?^()\[\]{}\\])" "\\\\$1"))
+
+\end{chunk}
+
+{\bf add}  Append a transformed value to internal expression that
+will compiled. This method should be private.
+
+{\bf startOfLine}  append \verb,^, at start of expression
+
+{\bf find}  \verb,"(?:" + value + ")",
+
+{\bf then}  shorthand for {\bf find}
+
+{\bf maybe}  0 or 1 times \verb,"(?:" + value + ")?",
+
+{\bf anything}  matches everything \verb,(?:.*),
+
+\begin{chunk}{verbx}
+(defun anything (verbx)
+ (add verbx "(?:.*)"))
+
+\end{chunk}
+
+{\bf anythingBut}  \verb,"(?:[^" + value + "]*)",
+
+{\bf something}  matches at least one char \verb,(?:.+),
+
+\begin{chunk}{verbx}
+(defun something (verbx)
+ (add verbx "(?:.+)"))
+
+\end{chunk}
+
+{\bf somethingBut}  matches at least one char not in value
+\verb,"(?:[^"+value+"]+)",
+
+{\bf replace}  return replaced string
+
+{\bf lineBreak}  mathes linebreak \verb,"(?:(?:\n)(?:\r\n))",
+
+\begin{chunk}{verbx}
+(defun lineBreak (verbx)
+ (add verbx "(?:(?:\n)(?:\r\n))"))
+
+\end{chunk}
+
+{\bf br}  shorthand for {\bf linebreak}
+
+\begin{chunk}{axiom.bib}
+(defmacro br (verbx)
+ `(lineBreak ,verbx))
+
+\end{chunk}
+
+{\bf tab}  match tab character \verb,"\t",
+
+{\bf word}  atches at least one word \verb,"\w+",
+
+{\bf anyOf}  matches any char in value \verb,"(?:["+value+"])",
+
+{\bf any}  shorthand for {\bf anyOf}
+
+\begin{chunk}{axiom.bib}
+(defmacro any (verbx)
+ `(anyOf ,verbx))
+
+\end{chunk}
+
+{\bf range}
+
+{\bf withAnyCase}  append modifier \verb,"i",
+
+{\bf stopAtFirst}  remove modifier \verb,"g", if \verb,"true",,
+else append modifier \verb,"g",
+
+{\bf searchOneLine}  if true, remove modifier \verb,"m",
+
+{\bf multiple}
+
+{\bf or}  split expression with \verb,,
+
+{\bf beginCapture}  initiate capture
+
+{\bf endCapture}  stop capturing elements
+
+{\bf whitespace}  \verb,\\s,
+
+{\bf oneOrMore}  repeats the previous at least once.
+
+
+
+
\chapter{Quote collections}
\begin{quote}
@@ 43869,7 +44745,7 @@ tequila and hand guns.}\\
\begin{quote}
{\bf Me: There are a lot of 'Axiom' things around.\\
Simon: Of Course. That's why we have the Axiom of Choice}\\
+Simon: Of course. That's why we have the Axiom of Choice}\\
 Simon Cruanes \cite{Crua20}
\end{quote}
@@ 43880,12 +44756,54 @@ mind about equality at least once}\\
\end{quote}
\begin{quote}
{\bf A tecnical argument by a trusted author, which is hard to check
and looks imilar to agruments known to be correct, is hardly ever
+{\bf A tecnnical argument by a trusted author, which is hard to check
+and looks similar to arguments known to be correct, is hardly ever
checked in detail.}\\
 Vladimir Voevodsky \cite{Gray18}
\end{quote}
+\begin{quote}
+{\bf We're all on earth to help others  what I can't figure out is
+what the others are here for}\\
+ Marvin Minsky \cite{Mcco79}
+\end{quote}
+
+\begin{quote}
+{\bf If I ask a student whether her design is as good as Chartres, she
+often smiles tolerantly at me as if to say ``Of course not, that isn't
+what I am trying to do... I could never do that.'' Then, I express my
+disagreement, and tell her: ``That standard {\sl must} be ur
+standard.''}\\
+ Christopher Alexander \cite{Gabr96}
+\end{quote}
+
+\begin{quote}
+{\bf The distance is commonly very great between actual performances
+and speculative possibility. It is natural to suppose that as much as
+has been done today may be done tomorrow: but on the morrow some
+difficuty emerges, or some external impediment obstructs. Indolence,
+interruption, business, and pleasure, all take their turns of
+retardation; and every long work is lengthened by a thousand causes
+that can, and ten thousand that cannot, be recounted. Perhaps no
+extensive and multifarious performance was ever effected within the
+term originally fixed in the undertaker's mind. He that runs against
+Time has an antagonist not subject to casualties.}\\
+ Samuel Johnson \cite{Gabr85}
+\end{quote}
+
+\begin{quote}
+{\bf I cannot claim to offer anything other than the notions of myself
+that I have formed over the space of roughly forty years, and their
+only singularity, it semms to me, is that they are not flattering.}\\
+ Peter Hogarth \cite{Lemx68}
+\end{quote}
+
+\begin{quote}
+{\bf The shame of a genius may be his intellectual futility, the
+knowledge of how uncertain is all that he has accomplished.}\\
+ Peter Hogarth \cite{Lemx68}
+\end{quote}
+
\includegraphics[scale=0.5]{ps/PreciseSpec.eps}\\
\cite{Comm16}
@@ 43893,6 +44811,474 @@ checked in detail.}\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\appendix
+\newpage
+\phantomsection
+{\huge Coq Example Code}
+\addcontentsline{toc}{chapter}{Coq Example Code}
+\label{Coq Example Code}
+
+\url{https://github.com/kvscoqlanguageframeworks/coq/imp.v}
+
+\begin{verbatim}
+
+(* Based on the imp example in coind,
+ extracted to a singlefile.
+ *)
+(* Developed with Coq 8.9.1 *)
+
+Require Import String.
+Require Import ZArith.
+Require Import List.
+
+Set Implicit Arguments.
+
+Local Open Scope Z.
+Import List.ListNotations.
+Local Open Scope list.
+Local Open Scope string.
+
+(** * The syntax of IMP programs *)
+
+Inductive AExp :=
+  var : string > AExp
+  con : Z > AExp
+  div : AExp > AExp > AExp
+  plus : AExp > AExp > AExp
+ .
+
+Inductive BExp :=
+  bcon : bool > BExp
+  le : AExp > AExp > BExp
+  not : BExp > BExp
+  and : BExp > BExp > BExp
+ .
+
+Inductive Stmt :=
+  assign : string > AExp > Stmt
+  cond : BExp > Stmt > Stmt > Stmt
+  while : BExp > Stmt > Stmt
+  seq : Stmt > Stmt > Stmt
+  skip : Stmt
+ .
+
+Inductive Pgm :=
+ pgm : list string > Stmt > Pgm.
+
+(** Here is the sum program *)
+Definition sum_pgm N : Pgm :=
+ pgm ["n"; "sum"]
+(seq (assign "n" (con N))
+(seq (assign "sum" (con 0))
+ (while (not (le (var "n") (con 0)))
+ (seq (assign "sum" (plus (var "sum") (var "n")))
+ (assign "n" (plus (var "n") (con (1)))))))).
+
+(** * The semantics of IMP programs *)
+
+Definition Env := list (string * Z).
+Definition empty_env : Env := [].
+Fixpoint get x (env:Env) :=
+ match env with
+  [] => None
+  (x',v)::env' =>
+ if string_dec x x' then Some v else get x env'
+ end.
+Fixpoint set x v (env:Env) :=
+ match env with
+  [] => []
+  (x',v')::env' =>
+ if string_dec x x' then (x,v)::env' else (x',v')::set x v env'
+ end.
+ (* "simpl" should reduce set if concrete values are given for both variables *)
+
+(* ** These "step" types together define single execution steps *)
+Inductive step_e : (AExp * Env) > (AExp * Env) > Prop :=
+  step_var: forall v x env, get v env = Some x >
+ step_e (var v, env) (con x, env)
+  step_plus: forall x y env,
+ step_e (plus (con x) (con y), env) (con (Z.add x y), env)
+  step_div: forall x y env,
+ y <> 0%Z >
+ step_e (div (con x) (con y), env) (con (Z.div x y), env)
+  cong_plus_r: forall e1 e2 e2' env env',
+ step_e (e2, env) (e2', env') >
+ step_e (plus e1 e2, env) (plus e1 e2', env')
+  cong_plus_l: forall e2 e1 e1' env env',
+ step_e (e1, env) (e1', env') >
+ step_e (plus e1 e2, env) (plus e1' e2, env')
+  cong_div_r: forall e1 e2 e2' env env',
+ step_e (e2, env) (e2', env') >
+ step_e (div e1 e2, env) (div e1 e2', env')
+  cong_div_l: forall e2 e1 e1' env env',
+ step_e (e1, env) (e1', env') >
+ step_e (div e1 e2, env) (div e1' e2, env')
+ .
+
+(* These abbreviations capture the pattern of the congruence rules *)
+Notation cong_l op R1 R2 :=
+ (forall a env a' env', R1 (a,env) (a',env') >
+ forall b, R2 (op a b, env) (op a' b, env')).
+Notation cong_r op nf R1 R2 :=
+ (forall b env b' env', R1 (b,env) (b',env') >
+ forall a, R2 (op (nf a) b, env) (op (nf a) b', env')).
+Notation cong_1 op R1 R2 :=
+ (forall a env a' env', R1 (a,env) (a',env') > R2 (op a, env) (op a', env')).
+
+Inductive step_b : (BExp * Env) > (BExp * Env) > Prop :=
+  eval_le : forall v1 v2 env,
+ step_b (le (con v1) (con v2), env) (bcon (Z.leb v1 v2), env)
+  eval_not : forall b env,
+ step_b (not (bcon b), env) (bcon (negb b), env)
+  eval_and : forall b e env,
+ step_b (and (bcon b) e, env) (if b then e else bcon false, env)
+  cong_le_r : cong_r le con step_e step_b
+  cong_le_l : cong_l le step_e step_b
+  cong_not : cong_1 not step_b step_b
+  cong_and : cong_l and step_b step_b
+ .
+
+Inductive step_s : (Stmt * Env) > (Stmt * Env) > Prop :=
+  exec_assign : forall x v v0 env, get x env = Some v0 >
+ step_s (assign x (con v),env) (skip, set x v env)
+  cong_assign : forall x,
+ cong_1 (assign x) step_e step_s
+  exec_seq : forall s env,
+ step_s (seq skip s,env) (s,env)
+  cong_seq : cong_l seq step_s step_s
+  exec_cond : forall b s1 s2 env,
+ step_s (cond (bcon b) s1 s2, env) (if b then s1 else s2, env)
+  cong_cond : forall b b' env env' s1 s2, step_b (b,env) (b',env') >
+ step_s (cond b s1 s2,env) (cond b' s1 s2,env')
+  exec_while : forall b s env,
+ step_s (while b s,env) (cond b (seq s (while b s)) skip, env)
+ .
+
+Inductive step_p : (Pgm * Env) > (Pgm * Env) > Prop :=
+  exec_init: forall x xs s env,
+ step_p (pgm (x::xs) s,env) (pgm xs s, (x,0)::env)
+  exec_body: forall s env s' env',
+ step_s (s, env) (s', env') >
+ step_p (pgm nil s, env) (pgm nil s', env')
+ .
+
+(** Now we verify the program *)
+Require Import proof_system.
+
+(** The claim about the loop says that running the loop in any enviroment
+ with a nonnegative n finishes with n set to zero, and sum increased
+ from it's original value by the sum of numbers 0+1+...+n.
+ *)
+Inductive sum_spec : Spec (Pgm * Env) :=
+  sum_claim: forall n, 0 <= n >
+ sum_spec
+ (pgm ["n"; "sum"]
+ (seq (assign "n" (con n))
+ (seq (assign "sum" (con 0))
+ (while (not (le (var "n") (con 0)))
+ (seq (assign "sum" (plus (var "sum") (var "n")))
+ (assign "n" (plus (var "n") (con (1))))))))
+ ,[])
+ (fun cfg' => cfg' = (pgm [] skip, [("sum",((n + 1) * n)/2);("n",0)]))
+  sum_loop_claim : forall env n, get "n" env = Some n > 0 <= n >
+ forall s, get "sum" env = Some s >
+ sum_spec
+ (pgm []
+ (while (not (le (var "n") (con 0)))
+ (seq (assign "sum" (plus (var "sum") (var "n")))
+ (assign "n" (plus (var "n") (con (1))))))
+ ,env)
+ (fun cfg' => fst cfg' = pgm [] skip /\
+ snd cfg' = set "n" 0 (set "sum" (s + ((n + 1) * n)/2) env)).
+
+(* Some lemmas about enviroment stuff *)
+Ltac env_ind_tac env :=
+ induction env as [[]];try reflexivity;simpl;
+ repeat match goal with
+  [  context [string_dec ?a ?b]] => destruct (string_dec a b);simpl;try congruence
+ end.
+
+Lemma env_set_id: forall x v env,
+ get x env = Some v >
+ set x v env = env.
+Proof.
+ env_ind_tac env.
+ intro. f_equal. tauto.
+Qed.
+
+Lemma env_set_eq:
+ forall x v1 v2 env,
+ set x v1 (set x v2 env) = set x v1 env.
+Proof. env_ind_tac env. Qed.
+
+Lemma env_set_ne_comm:
+ forall x1 x2, x1 <> x2 >
+ forall v1 v2 env,
+ set x1 v1 (set x2 v2 env) = set x2 v2 (set x1 v1 env).
+Proof. env_ind_tac env. Qed.
+
+Lemma env_set_set: forall x1 x2 v1 v2 env,
+ set x1 v1 (set x2 v2 env) =
+ if string_dec x1 x2
+ then set x1 v1 env
+ else set x2 v2 (set x1 v1 env).
+Proof. env_ind_tac env. Qed.
+
+Definition env_has x env: bool :=
+ match get x env with
+  Some _ => true
+  None => false
+ end.
+
+Lemma env_has_get x v env:
+ get x env = Some v >
+ env_has x env = true.
+Proof.
+ unfold env_has;intros >;reflexivity.
+Qed.
+
+Lemma env_get_set x x' v env:
+ get x (set x' v env) =
+ if string_dec x x'
+ then if env_has x env then Some v else None
+ else get x env.
+Proof. unfold env_has; env_ind_tac env. Qed.
+
+Lemma env_has_set x x' v env:
+ env_has x (set x' v env) = env_has x env.
+Proof.
+ unfold env_has.
+ rewrite env_get_set.
+ unfold env_has.
+ destruct (string_dec x x');[reflexivity].
+ destruct (get x env);reflexivity.
+Qed.
+
+Ltac step_tac :=
+ match goal with
+  [  step_p _ _] => econstructor;step_tac
+  [  step_s _ _] => econstructor;step_tac
+  [  step_b _ _] => econstructor;step_tac
+  [  step_e _ _] => econstructor;step_tac
+  [  get _ _ = _] => rewrite ?env_get_set;(reflexivity  eassumption)
+ end.
+
+Ltac run := repeat first[
+ eapply dtrans;[constructor]
+ eapply ddone;simpl;split;[reflexivity]
+ eapply dstep;[step_tac]].
+
+Require Import Recdef.
+Function sum_to (n:Z) { wf (fun x y => 0 <= x < y) n } : Z :=
+ if Z_lt_ge_dec 0 n then n + sum_to (n  1) else 0.
+intros;omega.
+exact (Z.lt_wf 0).
+Defined.
+
+Lemma sum_algebra: forall s n, 0 < n >
+ s + n + (n + 1 + 1) * (n + 1) / 2
+ = s + (n + 1) * n / 2.
+Proof.
+ intros s n H.
+ rewrite < Z.add_assoc.
+ f_equal.
+ rewrite < Z.add_assoc, Z.add_0_r.
+ rewrite < Z.div_add_l by omega.
+ f_equal.
+ rewrite Z.mul_add_distr_r, Z.mul_add_distr_l.
+ omega.
+Qed.
+
+Lemma sum_ok : sound step_p sum_spec.
+apply proved_sound;destruct 1.
+
+{ (* Overall claim, easily proved with loop claim *)
+ eapply sstep;[solve[step_tac]].
+ run;[reflexivity  assumption ..].
+ destruct k';simpl.
+ destruct 1 as [> >].
+ apply ddone.
+ reflexivity.
+}
+
+
+eapply sstep;[solve[step_tac]].
+run.
+destruct (Z.leb_spec n 0);simpl.
+
+(* when n = 0, loop exits.
+ To conclude, need to prove that the initial
+ environment env is an acceptable result *)
+run.
+replace n with 0 in H  * by auto with zarith.
+rewrite (env_set_id "sum") by (rewrite H1;f_equal;auto with zarith).
+rewrite (env_set_id "n") by assumption.
+reflexivity.
+
+(* when n > 0, execution goes through the loop body,
+ then sum_loop_claim is applied by transitivity,
+ which takes us to a state satisfying the goal *)
+run.
+{ rewrite env_get_set, ?env_has_set.
+ simpl.
+ erewrite env_has_get by eassumption;reflexivity. }
+ omega.
+{ rewrite !env_get_set.
+ simpl.
+ erewrite env_has_get by eassumption;reflexivity. }
+destruct k';simpl;intros [> >].
+
+run.
+rewrite (env_set_set "sum" "n"). simpl.
+rewrite 2 env_set_eq.
+f_equal.
+f_equal.
+apply sum_algebra.
+assumption.
+Qed.
+
+\end{verbatim}
+
+The proof
+\begin{verbatim}
+Require Import String.
+Require Import ZArith.
+Require Import imp.
+
+Local Open Scope Z.
+Import List.ListNotations.
+Local Open Scope list.
+Local Open Scope string.
+
+(** An execution trace is a sequence of zero or more steps.
+ We will take the reflexivetransitive closure of the step
+ relation using the library type clos_refl_trans_1n *)
+Import Relation_Operators.
+
+(** Execution tests can be written as a theorem claiming that
+ an initial state reaches an expected final state *)
+Lemma test_execution :
+ clos_refl_trans_1n _ step_p (sum_pgm 100,[]) (pgm nil skip,[("sum",5050);("n",0)]).
+Proof.
+ Time repeat (eapply rt1n_trans;[once solve[repeat econstructor]];simpl).
+ apply rt1n_refl.
+Time Qed.
+
+(** The final state can also be filled in by the search.
+ This statement uses a sort of existential quantification over the
+ final environment e2 *)
+Lemma test_execution2 :
+ {e2:Env  clos_refl_trans_1n _ step_p (sum_pgm 100,[]) (pgm nil skip,e2)}.
+Proof.
+ eexists.
+ Time repeat (eapply rt1n_trans;[once solve[repeat econstructor]simpl]).
+ apply rt1n_refl.
+Time Defined.
+
+(** The final environment can be extracted from this trace *)
+Eval simpl in proj1_sig test_execution2.
+
+\end{verbatim}
+
+Proving reachability
+
+\begin{verbatim}
+(*
+ This file contains the main soundness proof allowing
+ the reachability logic proof strategy to be used in Coq.
+
+ 'reaches x P' holds
+ if any execution path from configuration x is either infinite or
+ reaches a configuration satisfying P.
+
+ 'reaches' is shown to be the greatest fixpoint of the 'step' function.
+
+ 'stable_sound' is plain coinduction theorem for 'reaches' and 'step',
+ 'proved_sound' is the generalized coinduction theorem also allowing
+ the "proof" rules defined in 'trans'.
+ *)
+Set Implicit Arguments.
+
+Section relations.
+Variables (cfg : Type) (cstep : cfg > cfg > Prop).
+
+Definition Spec : Type := cfg > (cfg > Prop) > Prop.
+
+(* Soundness *)
+CoInductive reaches (k : cfg) (P : cfg > Prop) : Prop :=
+ (* reaches : Spec, but defining k and P as parameters
+ gives a cleaner definition and induction principle. *)
+ rdone : P k > reaches k P
+ rstep : forall k', cstep k k' > reaches k' P > reaches k P.
+
+Definition sound (Rules : Spec) : Prop :=
+ forall x P, Rules x P > reaches x P.
+
+Inductive step (X : Spec) (k : cfg) (P : cfg > Prop) : Prop :=
+ (* step : Spec > Spec *)
+ sdone : P k > step X k P
+ sstep : forall k', cstep k k' > X k' P > step X k P.
+
+Lemma reaches_stable :
+ (forall x P, reaches x P > step reaches x P).
+Proof. destruct 1;econstructor;eassumption. Qed.
+
+CoFixpoint stable_sound (Rules : Spec)
+ (Hstable : forall x P, Rules x P > step Rules x P)
+ : sound Rules :=
+ fun x P H =>
+ match Hstable _ _ H with
+  sdone _ _ _ pf => rdone _ _ pf
+  sstep _ _ Hstep H' =>
+ rstep Hstep (stable_sound Hstable _ _ H')
+ end.
+
+(*
+Inductive derived (X : Spec) k P : Prop :=
+ dclaim : X k P > derived X k P
+ ddone : P k > derived X k P
+ dstep : forall k', cstep k k' > derived X k' P > derived X k P
+ dtrans' : forall Q, derived X k Q > (forall k', Q k' > derived X k' P)
+ > derived X k P
+ dproved : reaches k P > derived X k P.
+*)
+
+Inductive trans (X : Spec) (k : cfg) (P : cfg > Prop) : Prop :=
+ (* trans : Spec > Spec *)
+  ddone : P k > trans X k P
+  dtrans' : forall Q, trans X k Q > (forall k', Q k' > trans X k' P) > trans X k P
+  drule : X k P > trans X k P
+  dstep : forall k', cstep k k' > trans X k' P > trans X k P
+  dvalid : reaches k P > trans X k P
+ .
+Definition dtrans_valid (X : Spec) (k : cfg) (P Q : cfg > Prop)
+ (rule : reaches k Q) (rest : forall k', Q k' > trans X k' P) : trans X k P :=
+ @dtrans' X k P Q (dvalid _ rule) rest.
+Definition dtrans (X : Spec) (k : cfg) (P Q : cfg > Prop)
+ (rule : X k Q) (rest : forall k', Q k' > trans X k' P) : trans X k P :=
+ @dtrans' X k P Q (drule _ _ _ rule) rest.
+
+Lemma trans_stable (Rules : Spec) :
+ (forall x P, Rules x P > step (trans Rules) x P)
+ > (forall x P, trans Rules x P > step (trans Rules) x P).
+induction 2;eauto using step.
+destruct IHtrans; eauto using step, dtrans'.
+destruct H0; eauto using step,dvalid.
+Qed.
+
+Lemma proved_sound (Rules : Spec) :
+ (forall x P, Rules x P > step (trans Rules) x P)
+ > sound Rules.
+unfold sound.
+intros H x P R.
+eapply stable_sound.
+apply trans_stable. eassumption.
+apply drule. assumption.
+Qed.
+
+End relations.
+
+\end{verbatim}
+
\chapter{Bibliography}
\bibliographystyle{axiom}
\bibliography{axiom}
diff git a/books/bookvolbib.pamphlet b/books/bookvolbib.pamphlet
index 2e029fd..2893ae7 100644
 a/books/bookvolbib.pamphlet
+++ b/books/bookvolbib.pamphlet
@@ 10367,13 +10367,43 @@ when shown in factored form.
\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@book{Appe17,
+@book{Appe18,
author = "Appel, Andrew W.",
title = {{Verified Functional Algorithms}},
 year = "2017",
+ year = "2018",
publisher = "University of Pennsylvania",
link =
 "\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}"
+ "\url{https://softwarefoundations.cis.upenn.edu/vfacurrent/index.html}",
+ abstract =
+ "Here's a good way to build formally verified correct software:
+ \begin{itemize}
+ \item Write your program in an expressive language with a good
+ proof theory (the Gallina language embedded in Coq's logi)
+ \item Prove it correct in Coq
+ \item Compile it with an optimizing ML compiler
+ \end{itemize}
+
+ Since you want your programs to be {\sl efficient}, you'll want to
+ implement sophisticated data structures and algorithms. Since
+ Gallina is a {\sl purely functional} language, it helps to have
+ purely functional algorithms. In this volume you'll learn how to
+ specify and verify (prove the correctness of) sorting algorithms,
+ binary search trees, balanced binary search trees, and priority
+ queues. Before using this book, you should have some understanding
+ of these algorithms and data structures, available in any standard
+ undergraduate algorithms textbook. This electronic book is Volume
+ 3 of the {\sl Software Foundations} series, which presents the
+ mathematical underpingging of reliable software. It builds on
+ {\sl Software Foundations Volume 1} (Logical Foundations), but
+ does not depend on Volume 2. The exposition here is intended for a
+ broad range of readers, from advanced undergraduates to PhD
+ students and researchers. The principal novelty of
+ {\sl Software Foundations} is that it is one hundred percent
+ formalized and machinechecked: the entire text is literally a
+ script for Coq. It is intended to be read alongside an interactive
+ session with Coq. All the details in the text are fully formalized
+ in Coq, and the exercises are designed to be worked using Coq.",
+ paper = "Appe18.tgz"
}
\end{chunk}
@@ 13178,6 +13208,65 @@ when shown in factored form.
\end{chunk}
+\index{Alturki, Musab A.}
+\index{Moore, Brandon}
+\begin{chunk}{axiom.bib}
+@misc{Altu19,
+ author = "Alturki, Musab A. and Moore, Brandon",
+ title = {{K vs Coq as Language Verification Frameworks}},
+ year = "2019",
+ link = "\url{https://runtimeverification.com/blog/kvscoqaslanguageverificationframeworkspart1of3/}"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\index{Beringer, Lennart}
+\index{Chlipala, Adam}
+\index{Pierce, Benjamin C.}
+\index{Shao, Zhong}
+\index{Weirich, Stephanie}
+\index{Zdancewic, Steve}
+\begin{chunk}{axiom.bib}
+@article{Appe17a,
+ author = "Appel, Andrew W. and Beringer, Lennart and Chlipala, Adam
+ and Pierce, Benjamin C. and Shao, Zhong and Weirich, Stephanie
+ and Zdancewic, Steve",
+ title = {{Position Paper: The Science of Deep Specification}},
+ journal = "Philosophical Transactions of the Royal Society",
+ volume = "375",
+ year = "2017",
+ link = "\url{https://royalsocietypublishing.org/doi/pdf/10.1098/rsta.2016.0331}",
+ abstract =
+ "We introduce our efforts within the project ``The Science of Deep
+ Specification'' to work out the key formal underpinnings of
+ inductrialscale formal specifications of software and hardware
+ components, anticipating a world where large verified systems are
+ routinely built out of smaller verified components that are also
+ used by many other projects. We identify an important class of
+ specification that has already been used in a few experiments that
+ connect strong componentcorrectness theorems across the work of
+ different teams. To help popularize the unique advantages of that
+ style, we dub it {\sl deep specification}, and we say that it
+ encompasses specifications that are {\sl rich}, {\sl twosided},
+ {\sl formal}, and {\sl live} (terms that we define in the
+ article). Our core team is developing a proofofconcept ssystem
+ (based on teh Coq proof assistant) whose specification and
+ verification work is divided across argely decoupled subteams at
+ our four institutions, encompassing hardware microarchitecture,
+ compilers, operating systems and applications, along with
+ crosscutting principles and tools for effective specification. We
+ also aim to catalyse interest in the approach, not just by basic
+ researchers but also by users in industry.
+
+ This article is part of the themed issue ``Verified trustworthy
+ software systems''",
+ paper = "Appe17a.pdf",
+ keywords = "printed, DONE"
+}
+
+\end{chunk}
+
\index{Ardizzoni, Alessandro}
\index{Stumbo, Fabio}
\begin{chunk}{axiom.bib}
@@ 13229,7 +13318,8 @@ when shown in factored form.
author = "Altenkirch, Thorsten",
title = {{Naive Type Theory}},
year = "2018",
 link = "\url{https://www.youtube.com/watch?v=bNG53SA4n48p}"
+ link = "\url{https://www.youtube.com/watch?v=bNG53SA4n48p}",
+ keywords = "DONE"
}
\end{chunk}
@@ 13773,11 +13863,33 @@ when shown in factored form.
\end{chunk}
+\index{Awodey, Steve}
+\begin{chunk}{axiom.bib}
+@misc{Awod16,
+ author = "Awodey, Steve",
+ title = {{Univalence as a Principle of Logic}},
+ year = "2016",
+ link = "\url{https://www.andrew.cmu.edu/user/awodey/preprints/ualp.pdf}",
+ abstract =
+ "It is sometmes convenient or useful in mathematics to treat
+ isomorphic structures as the same. The recently proposed
+ Univalence Axiom for the foundations of mathematics elevates this
+ idea to a foundational principle in the setting of Homotopy Type
+ Theory. It states, roughly, that isomorphic structures can be
+ identified. We explore the motivations and consequences, both
+ mathematical and philosophical, of making such a new logical
+ postulate.",
+ paper = "Awod16.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\subsection{B} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{Baccala, Brent W.}
\begin{chunk}{axiom.bib}
@misc{Bacc,
+@misc{Bacc20,
author = "Baccala, Brent W.",
title = {{The Facebook Integral}},
year = "2020",
@@ 13789,7 +13901,7 @@ when shown in factored form.
\index{Baccala, Brent W.}
\begin{chunk}{axiom.bib}
@misc{Bacca,
+@misc{Bacc20a,
author = "Baccala, Brent W.",
title = {{The Facebook Integral (solved with Axiom and Sage}},
year = "2020",
@@ 13801,7 +13913,7 @@ when shown in factored form.
\index{Baccala, Brent W.}
\begin{chunk}{axiom.bib}
@misc{Baccb,
+@misc{Bacc20b,
author = "Baccala, Brent W.",
title = {{The Risch Theorem}},
year = "2020",
@@ 14378,6 +14490,18 @@ when shown in factored form.
\end{chunk}
+\index{Bendersky, Eli}
+\begin{chunk}{axiom.bib}
+@misc{Bend10,
+ author = "Bendersky, Eli",
+ title = {{Top Down Operator Precedence}},
+ year = "2010",
+ link = "\url{https://eli.thegreenplace.net/2010/01/02/topdownoperatorprecedenceparsing}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
\index{Benker, Hans}
\begin{chunk}{axiom.bib}
@book{Benk99,
@@ 15000,6 +15124,38 @@ when shown in factored form.
\end{chunk}
+\index{Brady, Edwin}
+\begin{chunk}{axiom.bib}
+@misc{Brad18,
+ author = "Brady, Edwin",
+ title = {{Quantitative Type Theory in Action}},
+ year = "2018",
+ link = "\url{https://www.typedriven.org.uk/edwinb/papers/idris2.pdf}",
+ abstract =
+ "Dependent types allow us to express precisely {\sl what} a
+ function is indented to do. Recent work on Quantitative Type
+ Theory (QTT) extends dependent type systems with {\sl linearity},
+ also allowing precision in expressing {\sl when} a function can
+ run. This is promising, because it suggests the ability to design
+ and reason about resource usage protocols, such as we might find
+ in distributed and concurrent programming, where the state of a
+ communication channel changes throughout program execution. As
+ yet, however, there has not been a fullscale programming language
+ with which to experiment with these ideas. Idris 2 is a new
+ version of the dependently typed language Idris, with a new core
+ language based on QTT, supporting linear and dependent types. In
+ this paper, we introduce Idris 2, and describe how QTT has
+ influenced its design. We give several examples of the benefits of
+ QTT in proactice including: clearly expressing which data is
+ erased at run time, at the type level; improving interactive
+ program development by reducing the search space for typedriven
+ program synthesis; and, resource tracking in the type system
+ leading to typesafe concurrent programming with session types.",
+ paper = "Brad18.pdf"
+}
+
+\end{chunk}
+
\index{Brady, Sean}
\begin{chunk}{axiom.bib}
@misc{Brad20,
@@ 15092,6 +15248,33 @@ when shown in factored form.
\end{chunk}
+\index{Braibant, Thomas}
+\index{Chlipala, Adam}
+\begin{chunk}{axiom.bib}
+@inproceedings{Brai13,
+ author = "Braibant, Thomas and Chlipala, Adam",
+ title = {{Formal Verification of Hardware Synthesis}},
+ booktitle = "Computer Aided Verification (CAV'13)",
+ publisher = "unknown",
+ year = "2013",
+ abstract =
+ "We report on the implmentation of a certified compier for a
+ highlevel hardware description language (HDL) called FeSi
+ (FEatherweight Synthesis). FeSi is a simplified version of
+ Bluespec, an HDL based on a notion of guarded atomic
+ actions. FeSi is defined as a dependently typed deep embedding in
+ Coq. The target language of the compiler corresponds to a
+ synthesisable subset of Verilog or VHDL. A key aspect of our
+ approach is that input programs to the compiler can be defined and
+ proved correct inside Coq. The, we use extraction and a Verilog
+ backend (written in OCaml) to get a certified version of a
+ hardware design.",
+ paper = "Brai13.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Bridges, Douglas}
\index{Reeves, Steve}
\begin{chunk}{axiom.bib}
@@ 15860,6 +16043,93 @@ when shown in factored form.
\end{chunk}
+\index{Carneiro, Mario}
+\begin{chunk}{axiom.bib}
+@misc{Carn19a,
+ author = "Carneiro, Mario",
+ title = {{The Type Theory of Lean}},
+ year = "2019",
+ link = "\url{https://github.com/digama0/leantypetheory/releases/v1.0}",
+ comment = "\url{https://www.youtube.com/watch?v=3sKrSNhSxik}",
+ abstract =
+ "This thesis is a presentation of dependent type theory with
+ inductive types, a hierarchy of universes, with an impredicative
+ universe of propositions, proof irrelevance, and subsingleton
+ elimination, along with axioms for propositional extensionality,
+ quotient types, and the axiom of choice. This theory is notable
+ for being the axiomatic framework of the Lean theorem prover. The
+ axiom system is given here in complete detail, including
+ ``optional'' features of the type system such as {\bf let} binders
+ and definitions. We provide a reduction of the theory to a
+ finitely axiomatized fragment utilizing a fixed set of inductive
+ types (the {\bf W}type plus a few others), to ease the study of
+ this framework.
+
+ The metatheory of this theory (which we will Lean) is studied. In
+ particular, we prove unique typing of the definitional equality,
+ and use this to construct the expected settheoretic model, from
+ which we derive consistency of Lean relative to {\bf ZFC+} \{there
+ are $n$ inaccessible cardinals $\vert~n<\omega$\} (a relatively
+ weak large cardinal assumption). As Lean supports models of
+ {\bf ZFC} with $n$ inaccessible cardinals, this is optimal.
+
+ We also show a number of negative results, where the theory is
+ less nice than we would like. In particular, type checking is
+ undecidable, and the type checking as implemented by the Lean
+ theorem prover is a decideable nontransitive underapproximation
+ of the typing judgment. Nontransitivity also leads to lack of
+ subject reduction, and the reduction relation does not satisfy the
+ ChurchRosser property, so reduction to a normal form does not
+ produce a decision procedure for definitional equality. However, a
+ modified reduction relation allows us to restore the ChurchRosser
+ property at the expense of guaranteed termination, so that unique
+ typing is shown to hold.",
+ paper = "Carn19a.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Carneiro, Mario}
+\begin{chunk}{axiom.bib}
+@misc{Carn19b,
+ author = "Carneiro, Mario",
+ title = {{The Type Theory of Lean (slides)}},
+ year = "2019",
+ link = "\url{https://github.com/digama0/leantypetheory/releases/v1.0}",
+ paper = "Carn19b.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Carneiro, Mario}
+\begin{chunk}{axiom.bib}
+@misc{Carn19c,
+ author = "Carneiro, Mario",
+ title = {{Specifying Verified x86 Software from Scratch}},
+ year = "2019",
+ link = "\url{https://arxiv.org/pdf/1907.01283.pdf}",
+ abstract =
+ "We present a simple framework for specifying and proving facts
+ about the input/output behavior of ELF binary files on the x8664
+ architecture. A strong emphasis has been placed on simplicity at
+ all levels: the specification says only what it needs to about the
+ target executable, the specification is performed inside a simple
+ logic (equivalent to firstorder Peano arithmetic), and the
+ verification language and proof checker are customdesigned to
+ have only what is necessary to perform efficient general purpose
+ verification. This forms a part of the Metamath Zero project, to
+ build a minimal verifier that is capable of verifying its own
+ binary. In this paper, we will present the specification of the
+ dynamic semantics of x86 machine code, together with enough
+ information about Linux system calls to perform simple IO.",
+ paper = "Carn19c.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Castagna, Giuseppe}
\index{Lanvin, Victor}
\index{Petrucciani, Tommaso}
@@ 16250,6 +16520,38 @@ when shown in factored form.
\end{chunk}
+\index{Chlipala, Adam}
+\index{Arvind}
+\index{Sherman, Benjamin}
+\index{Choi, Joonwon}
+\index{Vijayaraghavan, Murali}
+\begin{chunk}{axiom.bib}
+@misc{Chli17b,
+ author = "Chlipala, Adam and Arvind and Sherman, Benjamin and
+ Choi, Joonwon and Vijayaraghavan, Murali",
+ title = {{Kami: A Platform for HighLevel Parametric Hardware
+ Specification and its Modular Verification}},
+ year = "2017",
+ abstract =
+ "It has become fairly standard in the programming languages
+ research world to verify functional programs in proof assistants
+ using induction, algebraic simplification, and rewriting. In this
+ paper, we introduce Kami, a Coq library that uses labelled
+ transition systems to enable similar expressive and modular
+ reasoning for hardware designs expressed in the style of the
+ Bluespec language. We can specify, implement, and verify realistic
+ designs entirely within Coq, ending with automatic extraction into
+ a pipeline that bottoms out in FPGAs. Our methodology has been
+ evaluated in a case study verifying an infinite family fo
+ multicore systems, with cachecoherent shared memory and pipelined
+ cores implementing (the base integer subset of) the RISCV
+ instruction set.",
+ paper = "Chli19b.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Chojecki, Przemyslaw}
\begin{chunk}{axiom.bib}
@article{Choj17,
@@ 16438,12 +16740,11 @@ when shown in factored form.
\index{Clebsch, Sylvan}
\begin{chunk}{axiom.bib}
@msic{Cleb19,
+@misc{Cleb19,
author = "Clebsch, Sylvan",
title = {{Starting with Semantics}},
year = "2019",
 link = "\url{https://www.youtube.com/watch?v=JbS8aBa0Ck}",
 keywords = "DONE"
+ link = "\url{https://www.youtube.com/watch?v=JbS8aBa0Ck}"
}
\end{chunk}
@@ 17167,6 +17468,33 @@ when shown in factored form.
\end{chunk}
+\index{Davis, Ernest}
+\begin{chunk}{axiom.bib}
+@misc{Davi19,
+ author = "Davis, Ernest",
+ title = {{The Use of Deep Learning for Symbolic Integration}},
+ year = "2019",
+ link = "\url{https://arxiv.org/pdf/1912.05752.pdf}",
+ abstract =
+ "Lample and Charton (2019) describe a system that uses deep
+ learning technology to compute symbolic, indefinite integrals, and
+ to find symbolic solutions to first and secondorder ordinary
+ differential equations, when the solutions are elementary
+ functions. They found that, over a particular test set, the system
+ could find solutions more successfully than sophisticated packages
+ for symbolic mathematics such as Mathematica run with a long
+ timeout. This is an impressive accomplishment, as far as it
+ goes. However, the system can handle only a quite limited subset
+ of the problems that Mathematica deals with, and the test set has
+ significant builtin biases. Therefore the claim that this
+ outperforms Mathematica on symbolic integration needs to be very
+ much qualified.",
+ paper = "Davi19.pdf",
+ keywords = "printed, DONE"
+}
+
+\end{chunk}
+
\index{Davies, Rowan}
\index{Pfenning, Frank}
\begin{chunk}{axiom.bib}
@@ 18007,6 +18335,18 @@ when shown in factored form.
\subsection{F} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\begin{chunk}{axiom.bib}
+@misc{Fade17,
+ author = "Unknown",
+ title = {{Strongly Connected and Completely Specified Moore
+ Equivalent of a Mealy Machine}},
+ year = "2017",
+ link = "\url{https://cs.stackexchange.com/questions/81127/stronglyconnectedandcompletelyspecifiedmooreequivalentofamealymachine}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
\index{Farmer, William M.}
\index{Mohrenschildt, Martin v.}
\begin{chunk}{axiom.bib}
@@ 18242,6 +18582,46 @@ when shown in factored form.
\end{chunk}
+\index{Ford, Bryan}
+\begin{chunk}{axiom.bib}
+@article{Ford04,
+ author = "Ford, Bryan",
+ title = {{Parsing Expression Grammars: A RecognitionBased Syntactic
+ Foundation}},
+ journal = "SIGPLAN Notices",
+ volume = "39",
+ number = "1",
+ pages = "111122",
+ year = "2004",
+ abstract =
+ "For decades we have been using Chomsky's generative system of
+ grammars, particularly contextfree grammars (CFGs) and regular
+ expressions (REs), to express the syntax of programming languages
+ and protocols. The power of generative grammars to express
+ ambiguity is crucial to their original purpose of modelling
+ natural languages, but this very power makes it unnecessarily
+ difficult both to express and to parse machineoriented languages
+ using CFGs. Parsing Expression Grammars (PEGs) provide an
+ alternative, recognitionbased formal foundation for describing
+ machineoriented syntax, which solves the ambiguity problem by not
+ introducing ambiguity in the first place. Where CFGs express
+ nondeterministic choice between alternatives, PEGs instead use
+ {\sl prioritized choice}. PEGs address frequently felt
+ expressiveness limitations of CFGs and REs, simplifying syntax
+ definitions and making it unnecessary to separate their lexical
+ and hierarchical components. A lineartime parser can be built for
+ any PEG, avoiding both the complexity and fickleness of LR parsers
+ and the inefficiency of generalized CFG parsing. While PEGs
+ provide a rich set of operators for constructing grammars, they
+ are reducible to two minimal recognition schemas developed around
+ 1970, TS/TDPL and gTS/GTDPL, which are here proven equivalent in
+ effective recognition power.",
+ paper = "Ford04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Fourer, Robert}
\index{Gay, David M.}
\index{Kernighan, Brian W.}
@@ 18367,6 +18747,34 @@ when shown in factored form.
\subsection{G} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Gabriel, Richard}
+\begin{chunk}{axiom.bib}
+@book{Gabr85,
+ author = "Gabriel, Richard",
+ title = {{Performance and Evaluation of Lisp Systems}},
+ link = "\url{https://www.dreamsongs.com/Files/Timrep.pdf}",
+ year = "1985",
+ publisher = "MIT Press",
+ paper = "Gabr85.pdf"
+
+}
+
+\end{chunk}
+
+\index{Gabriel, Richard}
+\begin{chunk}{axiom.bib}
+@book{Gabr96,
+ author = "Gabriel, Richard",
+ title = {{Patterns of Software}},
+ link = "\url{https://www.dreamsongs.com/Files/PatternsOfSoftware.pdf}",
+ year = "1996",
+ publisher = "Oxford University Press",
+ paper = "Gabr96.pdf"
+
+}
+
+\end{chunk}
+
\index{Gabriel, Richard P.}
\begin{chunk}{axiom.bib}
@misc{Gabr10,
@@ 18588,6 +18996,19 @@ when shown in factored form.
\end{chunk}
+\index{Gentzen, G.}
+\begin{chunk}{axiom.bib}
+@misc{Gent35a,
+ author = "Gentzen, G.",
+ title = {{Untersuchungen uber das logische Schliessen I and II}},
+ booktitle = "Mathematische Zeitschrift",
+ year = "1935",
+ publisher = "Springer",
+ paper = "Gent35a.pdf"
+}
+
+\end{chunk}
+
\index{Gerdt, Vladimir P.}
\index{Koepf, Wolfram}
\index{Mayr, Ernst W.}
@@ 18722,6 +19143,19 @@ when shown in factored form.
\end{chunk}
+\index{Gilbert, Gaetan}
+\begin{chunk}{axiom.bib}
+@phdthesis{Gilb19,
+ author = "Gilbert, Gaetan",
+ title = {{A Type Theory with Definitional ProofIrrelevance}},
+ school = "Ecole Nationale Superieure MinesTelecom Alantique.",
+ year = "2019",
+ link = "\url{https://gitlab.com/SkySkimmer/thesis/~/jobs/artifacts/master/download?job=build}",
+ paper = "Gilb19.zip"
+}
+
+\end{chunk}
+
\index{Girard, JeanYves}
\begin{chunk}{axiom.bib}
@article{Gira87,
@@ 19189,6 +19623,18 @@ when shown in factored form.
\end{chunk}
+\index{Grayson, Daniel R.}
+\begin{chunk}{axiom.bib}
+@misc{Gray18a,
+ author = "Grayson, Daniel R.",
+ title = {{The Mathematical Work of Vladimir Voevodsky}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=BanMgvdKP8E}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
\index{Griesmer, J.H.}
\begin{chunk}{axiom.bib}
@article{Grie76,
@@ 19280,6 +19726,72 @@ when shown in factored form.
\end{chunk}
+\index{Gueneau, Armael}
+\begin{chunk}{axiom.bib}
+@phdthesis{Guen19,
+ author = "Gueneau, Armael",
+ title = {{Mechanized Verification of the Correctness and Asymptotic
+ Complexity of Programs}},
+ school = "University of Paris",
+ year = "2019",
+ link = "\url{http://gallium.inria.fr/~agueneau/phd/manuscript.pdf}",
+ abstract =
+ "This dissertation is concerned with the question of formally
+ verifying that the implementation of an algorithm is not only
+ functionally correct (it always returns the right result), but
+ also has the right asymptotic complexity (it reliably computes the
+ result in the expected amount of time).
+
+ In the algorithms literature, it is standard practice to
+ characterize the performance of an algorithm by indicating its
+ asymptotic time complexity, typically using Landau's ``bigO''
+ notation. We first argue informally that asymptotic complexity
+ bounds are equally useful as formal specifications, because they
+ enable modular reasoning: a $O$ bound abstracts over the concrete
+ cost expression of a program, and therefore abstracts over the
+ specifics of its implementation. We illustrate  with the help of
+ small illustrative examples  a number of challenges with the use
+ of the $O$ notation, in particular in the multivariate case, that
+ might be overlooked when reasoning informally.
+
+ We put these considerations into practice by formalizing the $O$
+ notation in the Coq proof assistant, and by extending an existing
+ program verification framework, CFML, with support for a
+ methodology enabling robust and modular proofs of asymptotic
+ complexity bounds. We extend the existing framework of Separation
+ Logic with Time Credits, which allows to reason at the same time
+ about correctness and time complexity, and introduce negative time
+ credits. Negative time credits increase the expressiveness of the
+ logic, and enable convenient reasoning principles as well as
+ elegant specifications. At the level of specifications, we show
+ how asymptotic complexity specifications using $O$ can be
+ integrated and composed within Separation Logic with Time
+ Credits. Then, in order to establish such specifications, we
+ develop a methodology that allows proofs of complexity in
+ Separation Logic to be robust and carried out at a relatively high
+ level of abstraction, by relying on two key elements: a mechanism
+ for collecting and deferring constraints during the proof, and a
+ mechanism for semiautomatically synthesizing cost expressions
+ without loss of generality.
+
+ We demonstrate the usefulness and practicality of our approach on
+ a number of increasingly challenging case studies. These include
+ algorithms whose complexity analysis is relatively simple (such as
+ binary search, which is nonetheless out of the scope of many
+ automated complexity analysis tools) and data structures (such as
+ Okasaki's binary random access lists). In our most challenging
+ case study, we establish the correctness and amortized complexity
+ of a stateoftheart incremental cycle detection algorithm: our
+ methodology scales up to highly nontrivial algorithms whose
+ complexity analysis intimately depends on subtle functional
+ invariants, and furthermore makes it possible to formally verify
+ OCaml code which can then actually be used as part of real world
+ programs.",
+ paper = "Guen19.pdf"
+}
+
+\end{chunk}
+
\index{Gurevich, Yuri}
\begin{chunk}{axiom.bib}
@article{Gure12,
@@ 20339,6 +20851,40 @@ when shown in factored form.
\end{chunk}
+\index{Hoare, C.A.R}
+\begin{chunk}{axiom.bib}
+@misc{Hoar04,
+ author = "Hoare, C.A.R",
+ title = {{Unified Theories of Programming}},
+ year = "2004",
+ link = "\url{https://fi.ort.edu.uy/innovaportal/file/20124/1/04hoard_unified_theories.pdf}",
+ abstract =
+ "Professional practice in a mature engineering discipline is based
+ on relevant scientific theories, usually expressed in the language
+ of mathematics. A mathematical theory of programming aims to
+ provide a similar basis for specification, design and
+ implementation of computer programs. The theory can be presented
+ in a variety of styles, including
+ \begin{enumerate}
+ \item Denotational, relating a program to a specification of its
+ observable properties and behaviour
+ \item Algebraic, providing equations and inequalities for
+ comparison, transformation and optimisation of designs and
+ programs.
+ \item Operational, describing individual steps of a possible
+ mechanical implementation
+ \end{enumerate}
+
+ This paper presents simple theories of sequential
+ nondeterministic programming in each of these three styles; by
+ deriving each presentation from its predecessor in a cyclic
+ fashion, mutual consistency is assured.",
+ paper = "Hoar04.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Holzl, Johannes}
\index{Heller, Armin}
\begin{chunk}{axiom.bib}
@@ 20789,6 +21335,36 @@ when shown in factored form.
\end{chunk}
+\index{Jenkins, Kris}
+\begin{chunk}{axiom.bib}
+@misc{Jenk18,
+ author = "Jenkins, Kris",
+ title = {{Communicating with Types}},
+ year = "2018",
+ link = "\url{https://vimeo.com/302682323}",
+ abstract =
+ "Modern type systems have come a long way from the days of C and
+ Java. Far from being nitpickers that berate us for making
+ mistakes, type systems like the ones found in Haskell, PureScript
+ and Elm form a language in themselves. A language for expressing
+ highlevel ideas about our software to our colleagues and to the
+ computer. A design language. In this talk, we'll take a look at
+ the way the right kind of type signatures let us talk about
+ software. We'll survey how to state our assumptions about the
+ domain we're coding in and how each part fits together. We'll show
+ how it can highlight problems, and surface opportunities for
+ reuse. And most importantly, we'll see how types can help you
+ communicate to your coworkers, and to future maintainers, with
+ little effort. You've probably heard the phrase ``programs are
+ meant to be read by humans and only incidentally for computers to
+ execute''. Come and see how a modern type system is about
+ communicating ideas to humans, and only incidentally about proving
+ correctness.",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
\index{Jenks, Richard D.}
\begin{chunk}{axiom.bib}
@article{Jenk75,
@@ 21829,6 +22405,17 @@ when shown in factored form.
\end{chunk}
+\index{Lem, Stanislaw}
+\begin{chunk}{axiom.bib}
+@book{Lemx68,
+ author = "Lem, Stanislaw",
+ title = {{His Master's Voice}},
+ publisher = "MIT Press",
+ year = "1968"
+}
+
+\end{chunk}
+
\index{Leroy, Xavier}
\begin{chunk}{axiom.bib}
@techreport{Lero92,
@@ 21867,7 +22454,7 @@ when shown in factored form.
\begin{chunk}{axiom.bib}
@misc{Lewi17a,
author = "Lewis, Robert Y. and Wu, Minchao",
 title = {{A Bidirectional extensible ad hoc interface between Lean
+ title = {{A Bidirectional Extensible Ad Hoc Interface between Lean
and Mathematica}},
year = "2017",
link = "\url{https://robertylewis.com/leanmm/lean_mm.pdf}",
@@ 22153,6 +22740,19 @@ when shown in factored form.
\end{chunk}
+\index{Lucas, J.R.}
+\begin{chunk}{axiom.bib}
+@misc{Luca03,
+ author = "Lucas, J.R.",
+ title = {{Minds, Machines and Godel}},
+ year = "2003",
+ link =
+ "\url{https://pdfs.semanticscholar.org/bde3/b731bf73ef6052e34c4465e57718c03b13f8.pdf}",
+ paper = "Luca03.pdf"
+}
+
+\end{chunk}
+
\index{Luckham, D.C.}
\index{Park, D.M.R}
\index{Paterson, M.S.}
@@ 22475,7 +23075,20 @@ when shown in factored form.
Facilities will be provided for communication with humans in the
system via manual intervention and display devices connected to
the computer.",
 paper = "Mcca60a.pdf"
+ paper = "Mcca60a.pdf",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
+\index{McCorduck, Pamela}
+\begin{chunk}{axiom.bib}
+@book{Mcco79,
+ author = "McCorduck, Pamela",
+ tilte = {{Machines Who Think}},
+ year = "1979",
+ publisher = "Freeman",
+ keywords = "shelf, DONE"
}
\end{chunk}
@@ 22509,6 +23122,37 @@ when shown in factored form.
\end{chunk}
+\index{Milne, Peter}
+\begin{chunk}{axiom.bib}
+@article{Miln94,
+ author = "Milne, Peter",
+ title = {{Classical Harmony: Rules of Inference and the Meaning of
+ the Logical Constants}},
+ journal = "Synthese",
+ volume = "100",
+ number = "1",
+ pages = "4994",
+ year = "1994",
+ abstract =
+ "The thesis that, in a system of natural deduction, the meaning of
+ a logical constant is given by some or all of its introduction and
+ elimination rules has been developed recently by Dummett, Prawitz,
+ Tennant, and others, by the addition of harmony constraints.
+ Introduction and elimination rules for a logical constant must be
+ in harmony. By deploying harmony constraints, these authors have
+ arrived at logics no stronger than intuitionistic propositional
+ logic. Classical logic, they maintain, cannot be justified from
+ this prooftheoretic perspective. This paper argues that, while
+ classical logic can be formulated so as to satisfy a number of
+ harmony constraints, the meanings of the standard logical
+ constants cannot all be given by their introduction and/or
+ elimination rules; negation, in particular, comes under close
+ scrutiny.",
+ paper = "Miln94.pdf"
+}
+
+\end{chunk}
+
\index{Monin, JeanFrancois}
\begin{chunk}{axiom.bib}
@book{Moni03,
@@ 22522,6 +23166,18 @@ when shown in factored form.
\end{chunk}
+\index{Montanaro, Ashley}
+\begin{chunk}{axiom.bib}
+@misc{Mont12,
+ author = "Montanaro, Ashley",
+ title = {{Computational Complexity Lecture Notes}},
+ year = "2012",
+ link = "\url{https://people.maths.bris.ac.uk/~csxam/teaching/cclecturenotes.pdf}",
+ paper = "Mont12.pdf"
+}
+
+\end{chunk}
+
\index{de Moura, Leonardo}
\index{Avigad, Jeremy}
\index{Kong, Soonho}
@@ 22697,6 +23353,30 @@ when shown in factored form.
\end{chunk}
+\index{Milner, Robin}
+\begin{chunk}{axiom.bib}
+@techreport{Miln73,
+ author = "Milner, Robin",
+ title = {{Models of LCF}},
+ type = "technical report",
+ institution = "Stanford Artificial Intelligence Laboratory",
+ number = "STANCS73332",
+ year = "1973",
+ link = "\url{http://i.stanford.edu/pub/cstr/reports/cs/tr/73/332/CSTR73332.pdf}",
+ abstract =
+ "LCF is a deductive system for computable functions proposed by
+ D. Scott in 1969 in an unpublished memorandum. The purpose of the
+ present paper is to demonstrate the soundness of the system with
+ respect to certain models, which are partially ordered domains of
+ continuous functions. This demonstration was supplied by Scott in
+ his memorandum; the present paper is merely intended to make this
+ work more accessible.",
+ paper = "Miln73.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Mobarakeh, S. Saeidi}
\begin{chunk}{axiom.bib}
@misc{Moba09,
@@ 22723,6 +23403,42 @@ when shown in factored form.
\end{chunk}
+\index{Moore, Brandon}
+\index{Pena, Lucas}
+\index{Rosu, Grigore}
+\begin{chunk}{axiom.bib}
+@inproceedings{Moor18,
+ author = "Moore, Brandon and Pena, Lucas and Rosu, Grigore",
+ title = {{Program Verification by Coinduction}},
+ booktitle = "Programming Languages and Systems",
+ publisher = "Springer",
+ year = "2018",
+ pages = "589618",
+ link = "\url{https://link.springer.com/content/pdf/10.1007%2F9783319898841.pdf}",
+ abstract =
+ "We present a novel program verification approach based on
+ coinduction, which takes as input an operational semantics. No
+ intermediates like program logics or verifcation condition
+ generators are needed. Specifications can be written using any
+ state predicates. We implement our approach in Coq, giving a
+ certifying languageindependent verification framework. Our proof
+ system is implemented as a single module imported unchanged into
+ languagespecific proofs. Automation is reached by instantiating a
+ generic heuristic with languagespecific tactics. Manual
+ assistance is also smoothly allowed at points the automation
+ cannot handle. We demonstrate the power and versatility of our
+ approach by verifying algoirthms as complicated as SchorrWaite
+ graph marking and instantiating our framework for object languages
+ in several styles of semantics. Finally, we show that our
+ coinductive approach subsumes reachability logic, a recent
+ languageindependent soudn and (relatively) complete logic for
+ program verification that has been instantiated with operational
+ semantics of languages as complex as C, Java and Javascript.",
+ paper = "Moor18.pdf"
+}
+
+\end{chunk}
+
\index{Morris Jr., J.H.}
\begin{chunk}{axiom.bib}
@inproceedings{Morr73,
@@ 23944,6 +24660,57 @@ when shown in factored form.
\end{chunk}
+\index{Pierce, Benjamin}
+\index{Appel, Andrew W.}
+\index{Weirich, Stephanie}
+\index{Zdancewic, Steve}
+\index{Shao, Zhong}
+\index{Chlipala, Adam}
+\begin{chunk}{axiom.bib}
+@misc{Pier16,
+ author = "Pierce, Benjamin and Appel, Andrew W. and Weirich, Stephanie
+ and Zdancewic, Steve and Shao, Zhong and Chlipala, Adam",
+ title = {{The Science of Deep Specification}},
+ year = "2016",
+ link = "\url{https://www.cis.upenn.edu/~bcpierce/papers/deepspechcss2016slides.pdf}",
+ paper = "Pier16.pdf"
+}
+
+\end{chunk}
+
+\index{Pierce, Benjamin}
+\begin{chunk}{axiom.bib}
+@misc{Pier18,
+ author = "Pierce, Benjamin",
+ title = {{The Science of Deep Specification}},
+ year = "2018",
+ link = "\url{https://www.cis.upenn.edu/~bcpierce/papers/pierceetaps2018.pdf}",
+ paper = "Pier18.pdf"
+}
+
+\end{chunk}
+
+\index{Pierce, Benjamin}
+\index{de Amorim, Arthur Azevedo}
+\index{Casinghino, Chris}
+\index{Gaboardi, Marco}
+\index{Greenberg, Michael}
+\index{Hritcu, Catalin}
+\index{Sjoberg, Vilhelm}
+\index{Yorgey, Brent}
+\begin{chunk}{axiom.bib}
+@misc{Pier19,
+ author = "Pierce, Benjamin and de Amorim, Arthur Azevedo and
+ Casinghino, Chris and Gaboardi, Marco and Greenberg, Michael
+ and Hritcu, Catalin and Sjoberg, Vilhelm and Yorgey, Brent",
+ title = {{Logical Foundations}},
+ year = "2019",
+ link = "\url{https://softwarefoundations.cis.upenn.edu/lfcurrent/lf.tgz}",
+ paper = "Pier19.tgz"
+}
+
+\end{chunk}
+
\index{Piotrowski, Bartosz}
\index{Brown, Chad E.}
\index{Kaliszyk, Cezary}
@@ 23993,19 +24760,6 @@ when shown in factored form.
\end{chunk}
\index{Pittman, Dan}
\begin{chunk}{axiom.bib}
@misc{Pitt18,
 author = "Pittman, Dan",
 title = {{Proof Theory Impressionism: Blurring the CurryHoward Line}},
 link = "\url{https://www.youtube.com/watch?v=jrVPBAd5Gc}",
 year = "2018",
 comment = "Strange Loop 2018 Conference",
 keywords = "DONE"
}

\end{chunk}

\index{Piskac, Ruzica}
\begin{chunk}{axiom.bib}
@inproceedings{Pisk15,
@@ 24042,6 +24796,19 @@ when shown in factored form.
\end{chunk}
+\index{Pittman, Dan}
+\begin{chunk}{axiom.bib}
+@misc{Pitt18,
+ author = "Pittman, Dan",
+ title = {{Proof Theory Impressionism: Blurring the CurryHoward Line}},
+ link = "\url{https://www.youtube.com/watch?v=jrVPBAd5Gc}",
+ year = "2018",
+ comment = "Strange Loop 2018 Conference",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
\index{Platzer, Andre}
\begin{chunk}{axiom.bib}
@book{Plat18,
@@ 24444,6 +25211,42 @@ when shown in factored form.
\end{chunk}
+\index{Reis, Leonardo Vieira dos Santos}
+\index{Dilorio, Oliveira}
+\index{Bigonha, Roberto S.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Reis14,
+ author = "Reis, Leonardo Vieira dos Santos and Dilorio, Oliveira and
+ Bigonha, Roberto S.",
+ title = {{A Mixed Approach for Building Extensible Parsers}},
+ booktitle = "Programming Language",
+ publisher = "Springer",
+ volume = "LNCS 8771",
+ pages = "115",
+ year = "2014",
+ abstract =
+ "For languages whose syntax is fixed, parsers are usually built
+ with a static structure. The implementation of features like macor
+ mechanisms or extensible languages requires the use of parsers
+ that may be dynamically extended. In this work, we discuss a mixed
+ approach for building efficient topdown dynamically extensible
+ parsers. Our view is based on the fact that a large part of the
+ parser code can be statically compiled and only the parts that are
+ dynamic should be interpreted for a more efficient processing. We
+ propose the generation of code for the base parser, in which hooks
+ are included to allow efficient extension of the underlying
+ grammar and activation of a grammar interpreter whenever it is
+ necessary to use an extended syntax. As a proof of concept, we
+ present a prototype implementation of a parser generator using
+ Adaptable Parsing Expression Grammars (APEG) as the underlying
+ method for syntax definition. We shos that APEG has features which
+ allow an efficient implementation using the proposed mixed
+ approach.",
+ paper = "Reis14.pdf"
+}
+
+\end{chunk}
+
\index{Remy, Didier}
\begin{chunk}{axiom.bib}
@techreport{Remy92,
@@ 24732,6 +25535,19 @@ when shown in factored form.
\end{chunk}
+\index{Rosu, Grigore}
+\index{Lucanu, Dorel}
+\index{Guth, Dwight}
+\begin{chunk}{axiom.bib}
+@misc{Rosu20,
+ author = "Rosu, Grigore and Lucanu, Dorel and Guth, Dwight",
+ title = {{The K Framework}},
+ year = "2020",
+ link = "\url{http://www.kframework.org/index.php/Main_Page}"
+}
+
+\end{chunk}
+
\index{Russell, Bertrand}
\begin{chunk}{axiom.bib}
@book{Russ1920,
@@ 25437,7 +26253,8 @@ when shown in factored form.
link =
"\url{https://www.logicmatters.net/resources/pdfs/TeachYourselfLogic2017.pdf}",
year = "2017",
 paper = "Smit17.pdf"
+ paper = "Smit17.pdf",
+ keywords = "DONE"
}
\end{chunk}
@@ 25570,13 +26387,15 @@ when shown in factored form.
\index{Soare, Robert I.}
\begin{chunk}{axiom.bib}
@misc{Soar99,
+@inbook{Soar99,
author = "Soare, Robert I.",
title = {{The History and Concept of Computability}},
booktitle = "Handbook of Computability Theory",
 publisher = "NorthHolland",
 link = "\url{http://www.people.cs.uchicago.edu/~soare/History/handbook.pdf}",
year = "1999",
+ link = "\url{http://www.people.cs.uchicago.edu/~soare/History/handbook.pdf}",
+ chapter = "unknown",
+ pages = "unknown",
+ publisher = "Elsevier",
paper = "Soar99.pdf",
keywords = "printed"
}
@@ 25652,6 +26471,21 @@ when shown in factored form.
\end{chunk}
+\index{Stojanovski, Jordan}
+\begin{chunk}{axiom.bib}
+@article{Stoj86,
+ author = "Stojanovski, Jordan",
+ title = {{A Note on Implementing Prolog in Lisp}},
+ journal = "Information Processing Letters",
+ volume = "23",
+ pages = "261264",
+ year = "1986",
+ paper = "Stoj86.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Stratford, Jonathan}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@@ 26102,6 +26936,34 @@ when shown in factored form.
\end{chunk}
+\index{Taylor, Sophie}
+\begin{chunk}{axiom.bib}
+@misc{Tayl16,
+ author = "Taylor, Sophie",
+ title = {{A working (class) Introduction to Homotopy Type Theory}},
+ year = "2016",
+ link = "\url{https://www.youtube.com/watch?v=xv6SwPn1dlc}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
+\index{Tennant, Neil}
+\begin{chunk}{axiom.bib}
+@article{Tenn79,
+ author = "Tennant, Neil",
+ title = {{Entailment and Proofs}},
+ journal = "Proc. of the Aristotelian Society",
+ volume = "79",
+ pages = "167189",
+ year = "1979",
+ link = "\url{https://cpbusw2.wpmucdn.com/u.osu.edu/dist/a/4597/files/2014/07/tennant_pas19791rrsm6l.pdf}",
+ paper = "Tenn79.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Tennant, Neil}
\begin{chunk}{axiom.bib}
@book{Tenn90,
@@ 26172,6 +27034,35 @@ when shown in factored form.
\end{chunk}
+\index{Timany, Amin}
+\index{Sozeau, Mattieu}
+\begin{chunk}{axiom.bib}
+@inproceedings{Tima18,
+ author = "Timany, Amin and Sozeau, Mattieu",
+ title = {{Cumulative Inductive Types in Coq}},
+ booktitle = "3rd Conf. on Formal Structures for Computation and
+ Deduction",
+ publisher = "HAL",
+ link = "\url{https://hal.inria.fr/hal01952037/document}",
+ abstract =
+ "In order to avoid wellknown paradoxes associated with
+ selfreferential definitions, highorder dependent type theories
+ stratify the theory using a countably infinite hierarchy of
+ universes (also known as sorts), $Type_0 : Type_1,\ldots$ Such
+ type systems are called cumulative if for any type $A$ we have
+ that $A:Type_i$ implies $A:Type_{i+1}$. The Predicative Calculus
+ of Inductive Constructions (pCIC) which forms the basis of the Coq
+ proof assistant, is one such system. In this paper we present the
+ Predicative Calculus of Cumulative Inductive Constructions
+ (pCUIC) which extends the cumulativity relation to inductive
+ types. We discuss cumulative inductive types as present in Coq 8.7
+ and their application to formalization and definitional
+ translations.",
+ paper = "Tima18.pdf"
+}
+
+\end{chunk}
+
\index{Tofte, Mads}
\begin{chunk}{axiom.bib}
@phdthesis{Toft88,
@@ 26520,6 +27411,16 @@ when shown in factored form.
\end{chunk}
+\begin{chunk}{axiom.bib}
+@misc{verb20,
+ author = "unknown",
+ title = {{Verbal Regular Expressions}},
+ year = "2020",
+ link = "\url{https://github.com/VerbalExpressions/implementation/wiki/Listofmethodstoimplement}"
+}
+
+\end{chunk}
+
\subsection{W} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{Wadler, Philip}
@@ 38826,12 +39727,13 @@ when shown in factored form.
\index{Fitting, M.}
\begin{chunk}{axiom.bib}
@book{Fitt90,
+@book{Fitt96,
author = "Fitting, M.",
title = {{Firstorder Logic and Automated Theorem Proving}},
publisher = "SpringerVerlag",
 year = "1990",
 isbn = "9781461275152"
+ year = "1996",
+ isbn = "13.9781461223603",
+ paper = "Fitt96.pdf"
}
\end{chunk}
@@ 41225,7 +42127,8 @@ when shown in factored form.
abstract =
"LCF is based on a logic by Dana Scott, proposed by him at Oxford in the
fall of 1969, for reasoning about computable functions.",
 paper = "Miln72.pdf"
+ paper = "Miln72.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 42041,6 +42944,46 @@ when shown in factored form.
\end{chunk}
+\index{Pierce, Benjamin C.}
+\index{Casinghino, Chris}
+\index{Gaboardi, Marco}
+\index{Greenberg, Michael}
+\index{Hritcu, Catalin}
+\index{Sj\"oberg, Vilhelm}
+\index{Yorgey, Brent}
+\begin{chunk}{axiom.bib}
+@misc{Pier19a,
+ author = {Pierce, Benjamin C. and Casinghino, Chris and Gaboardi, Marco and
+ Greenberg, Michael and Hritcu, Catalin and Sj\"oberg, Vilhelm and
+ Yorgey, Brent},
+ title = {{Programming Language Foundations}},
+ year = "2019",
+ file = "Pier19a.tgz",
+ abstract =
+ "This electronic book is a course on Software Foundations, the
+ mathematical underpinnings of reliable software. Topics include basic
+ concepts of logic, computerassisted theorem proving, the Coq proof
+ assistant, functional programming, operational semantics, Hoare logic,
+ and static type systems. The exposition is intended for a broad range
+ of readers, from advanced undergraduates to PhD students and
+ researchers. No specific background in logic or programming languages
+ is assumed, though a degree of mathematical maturity will be helpful.
+
+ The principal novelty of the course is that it is one hundred per cent
+ formalized and machinechecked: the entire text is literally a script
+ for Coq. It is intended to be read alongside an interactive session
+ with Coq. All the details in the text are fully formalized in Coq, and
+ the exercises are designed to be worked using Coq.
+
+ The files are organized into a sequence of core chapters, covering
+ about one semester's worth of material and organized into a coherent
+ linear narrative, plus a number of appendices covering additional
+ topics. All the core chapters are suitable for both upperlevel
+ undergraduate and graduate students."
+}
+
+\end{chunk}
+
\index{Piroi, Florina}
\index{Kutsiz, Temur}
\begin{chunk}{axiom.bib}
@@ 42095,7 +43038,8 @@ when shown in factored form.
every r.e. element of the fully abstract semantics becomes definable,
thus characterising the programming language, up to interdefinability,
from the set of r.e. elements of the domains of the semantics.",
 paper = "Plot77.pdf"
+ paper = "Plot77.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 49538,6 +50482,19 @@ Proc ISSAC 97 pp172175 (1997)
\end{chunk}
+\index{Bezhanishvili, Nick}
+\index{de Jongh, Dick}
+\begin{chunk}{axiom.bib}
+@misc{Bezh05,
+ author = "Bezhanishvili, Nick and de Jongh, Dick",
+ title = {{Intuitionistic Logic}},
+ year = "2005",
+ link = "\url{https://www.cs.le.ac.uk/people/nb118/Publications/ESSLLI'05.pdf}",
+ paper = "Bezh05.pdf"
+}
+
+\end{chunk}
+
\index{Bradford, Russell}
\index{Chen, Changbo}
\index{Davenport, James H.}
@@ 51589,6 +52546,19 @@ Proc ISSAC 97 pp172175 (1997)
\subsection{Z} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\index{Zach, Richard}
+\begin{chunk}{axiom.bib}
+@book{Zach20,
+ author = "Zach, Richard",
+ title = {{The Open Logic Text}},
+ year = "2020",
+ publisher = "The Open Logic Project",
+ link = "\url{http://builds.openlogicproject.org/openlogiccomplete.pdf}",
+ paper = "Zach20.pdf"
+}
+
+\end{chunk}
+
\index{Zhao, Ting}
\index{Wang, Dongming}
\index{Hong, Hoon}
@@ 55944,6 +56914,44 @@ American Mathematical Society (1994)
\end{chunk}
+\index{Baruch, Robert}
+\begin{chunk}{axiom.bib}
+@misc{Baru19,
+ author = "Baruch, Robert",
+ title = {{Very Basic Introduction to Formal Verification}},
+ year = "2019",
+ link = "\url{https://www.youtube.com/watch?v=9e7F1XhjhKw}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
+\index{Baruch, Robert}
+\begin{chunk}{axiom.bib}
+@misc{Baru19a,
+ author = "Baruch, Robert",
+ title = {{Cmod A7 Reference Manual}},
+ year = "2019",
+ link = "\url{https://reference.digilentinc.com/_media/reference/programmablelogic/cmoda7/cmod_a7_rm.pdf}",
+ paper = "Baru19a.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Baruch, Robert}
+\begin{chunk}{axiom.bib}
+@misc{Baru19b,
+ author = "Baruch, Robert",
+ title = {{Cmod A7 Schematic}},
+ year = "2019",
+ link = "\url{https://reference.digilentinc.com/_media/cmoda7/cmod_a7_sch.pdf}",
+ paper = "Baru19b.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Beebe, Nelson H. F.}
\begin{chunk}{axiom.bib}
@misc{Beeb14,
@@ 72108,6 +73116,18 @@ IBM Manual, March 1988
\end{chunk}
+\index{Vapnik, Vladimir}
+\begin{chunk}{axiom.bib}
+@misc{Vapn20,
+ author = "Vapnik, Vladimir",
+ title = {{Predicates, Invariants, and the Essence of Intelligence}},
+ year = "2020",
+ link = "\url{https://lexfridman.com/vladimirvapnik2}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
\index{Vasconcelos, Wolmer}
\begin{chunk}{ignore}
\bibitem[Vasconcelos 99]{Vas99} Vasconcelos, Wolmer
@@ 72120,6 +73140,18 @@ ISBN 3540213112
\end{chunk}
\index{Voevodsky, Vladimir}
+\begin{chunk}{axiom.bib}
+@misc{Voev16,
+ author = "Voevodsky, Vladimir",
+ title = {{Univalent Foundations of Mathematics}},
+ year = "2016",
+ link = "\url{https://www.youtube.com/watch?v=9f4sS9sX2A}",
+ keywords = "DONE"
+}
+
+\end{chunk}
+
+\index{Voevodsky, Vladimir}
\index{Benedikt, Ahrens}
\index{Grayson, Daniel}
\begin{chunk}{axiom.bib}
@@ 81292,7 +82324,7 @@ Mathematical Programming: The State of the Art.
\begin{chunk}{axiom.bib}
@inproceedings{Prat73,
author = "Pratt, Vaughan R.",
 title = {{Top down operator precedence}},
+ title = {{Top Down Operator Precedence}},
booktitle = "Proc. 1st annual ACM SIGACTSIGPLAN Symposium on Principles
of Programming Languages",
series = "POPL'73",
diff git a/changelog b/changelog
index cdac313..95ef7a6 100644
 a/changelog
+++ b/changelog
@@ 1,3 +1,6 @@
+20200307 tpd src/axiomwebsite/patches.html 20200307.01.tpd.patch
+20200307 tpd books/bookvol15 updated
+20200307 tpd books/bookvolbib new references
20200207 tpd src/axiomwebsite/patches.html 20200207.01.tpd.patch
20200207 tpd books/bookvol15 updated
20200207 tpd books/bookvolbib new references
diff git a/patch b/patch
index b933f9f..5d5d3e6 100644
 a/patch
+++ b/patch
@@ 2,636 +2,842 @@ books/bookvolbib add references
Goal: Proving Axiom Sane
+\index{Alturki, Musab A.}
+\index{Moore, Brandon}
\begin{chunk}{axiom.bib}
@misc{Wiki19,
 author = "Wikipedia",
 title = {{Algebraic data type}},
+@misc{Altu19,
+ author = "Alturki, Musab A. and Moore, Brandon",
+ title = {{K vs Coq as Language Verification Frameworks}},
year = "2019",
 link = "\url{https://en.wikipedia.org/wiki/Algebraic_data_type}"
+ link = "\url{https://runtimeverification.com/blog/kvscoqaslanguageverificationframeworkspart1of3/}"
}
\end{chunk}
\index{MacKenzei, Donald}
+\index{Rosu, Grigore}
+\index{Lucanu, Dorel}
+\index{Guth, Dwight}
\begin{chunk}{axiom.bib}
@book{Mack01,
 author = "MacKenzei, Donald",
 title = {{Mechanizing Proof: Computing, Risk, and Trust}},
 year = "2001",
 publisher = "MIT Press",
 isbn = "0262133938",
 keywords = "shelf"
+@misc{Rosu20,
+ author = "Rosu, Grigore and Lucanu, Dorel and Guth, Dwight",
+ title = {{The K Framework}},
+ year = "2020",
+ link = "\url{http://www.kframework.org/index.php/Main_Page}"
}
\end{chunk}
\index{Selsam, Daniel}
\index{Ullrich, Sebastian}
\index{de Moura, Leonardo}
+\index{Moore, Brandon}
+\index{Pena, Lucas}
+\index{Rosu, Grigore}
\begin{chunk}{axiom.bib}
@misc{Sels20,
 author = "Selsam, Daniel and Ullrich, Sebastian and
 de Moura, Leonardo",
 title = {{Tabled Typeclass Resolution}},
 year = "2020",
 link = "\url{https://www.dropbox.com/s/5nxklkxvdh7xna9/typeclass.pdf}",
 comment = "preprint",
+@inproceedings{Moor18,
+ author = "Moore, Brandon and Pena, Lucas and Rosu, Grigore",
+ title = {{Program Verification by Coinduction}},
+ booktitle = "Programming Languages and Systems",
+ publisher = "Springer",
+ year = "2018",
+ pages = "589618",
+ link = "\url{https://link.springer.com/content/pdf/10.1007%2F9783319898841.pdf}",
abstract =
 "Typeclasses have proven an elegant and effective way of managing
 adhoc polymorphism in both programming languages and interactive
 proof assistants. However, the increasingly sophisticated uses of
 typeclasses within proof assistants has exposed two critical
 problems with existing typeclass resolution procedures: the
 {\sl diamond problem} which causes exponential running times in
 both theory and practice, and the {\sl cycle problem}, which
 cuases loops in the presense of cycles and so thwarts many desired
 uses of typeclasses. We present a new typeclass resolution
 procedure, called {\sl tabled typeclass resolution}, that solve
 these problems. We have implemented our procedure for the upcoming
 version (v4) of the Lean Theorem Prover, and confirm empirically
 that it provides exponential speedups compared to all existing
 procedures in the presense of diamonds. Our procedure is
 sufficiently lightweight that it could easily be implemented in
 other systems. We hope our new procedure facilitates even more
 sophisticated uses of typeclasses in both software development and
 interactive theorem proving.",
 paper = "Sels20.pdf"
}

\end{chunk}

\index{Hoare, Tony}
\begin{chunk}{axiom.bib}
@article{Hoar03,
 author = "Hoare, Tony",
 title = {{The Verifying Compiler: A Grand Challenge for Computing
 Research}},
 journal = "Journal of the ACM",
 volume = "50",
 number = "1",
 pages = "6369",
 year = "2003",
 abstract =
 "This contribution proposes a set of criteria that distinguish a
 grand challenge in science or engineering from the many other
 kinds of shortterm or longterm research problems that engage the
 interest of scientists and engineers. As an example drawn from
 Computer Science, it revives an old challenge: the contruction and
 application of a verifying compiler that guarantees correctness of
 a program before running it.",
 paper = "Hoar03.pdf",
+ "We present a novel program verification approach based on
+ coinduction, which takes as input an operational semantics. No
+ intermediates like program logics or verifcation condition
+ generators are needed. Specifications can be written using any
+ state predicates. We implement our approach in Coq, giving a
+ certifying languageindependent verification framework. Our proof
+ system is implemented as a single module imported unchanged into
+ languagespecific proofs. Automation is reached by instantiating a
+ generic heuristic with languagespecific tactics. Manual
+ assistance is also smoothly allowed at points the automation
+ cannot handle. We demonstrate the power and versatility of our
+ approach by verifying algoirthms as complicated as SchorrWaite
+ graph marking and instantiating our framework for object languages
+ in several styles of semantics. Finally, we show that our
+ coinductive approach subsumes reachability logic, a recent
+ languageindependent soudn and (relatively) complete logic for
+ program verification that has been instantiated with operational
+ semantics of languages as complex as C, Java and Javascript.",
+ paper = "Moor18.pdf"
+}
+
+\end{chunk}
+
+\index{Carneiro, Mario}
+\begin{chunk}{axiom.bib}
+@misc{Carn19a,
+ author = "Carneiro, Mario",
+ title = {{The Type Theory of Lean}},
+ year = "2019",
+ link = "\url{https://github.com/digama0/leantypetheory/releases/v1.0}",
+ abstract =
+ "This thesis is a presentation of dependent type theory with
+ inductive types, a hierarchy of universes, with an impredicative
+ universe of propositions, proof irrelevance, and subsingleton
+ elimination, along with axioms for propositional extensionality,
+ quotient types, and the axiom of choice. This theory is notable
+ for being the axiomatic framework of the Lean theorem prover. The
+ axiom system is given here in complete detail, including
+ ``optional'' features of the type system such as {\bf let} binders
+ and definitions. We provide a reduction of the theory to a
+ finitely axiomatized fragment utilizing a fixed set of inductive
+ types (the {\bf W}type plus a few others), to ease the study of
+ this framework.
+
+ The metatheory of this theory (which we will Lean) is studied. In
+ particular, we prove unique typing of the definitional equality,
+ and use this to construct the expected settheoretic model, from
+ which we derive consistency of Lean relative to {\bf ZFC+} \{there
+ are $n$ inaccessible cardinals $\vert~n<\omega$\} (a relatively
+ weak large cardinal assumption). As Lean supports models of
+ {\bf ZFC} with $n$ inaccessible cardinals, this is optimal.
+
+ We also show a number of negative results, where the theory is
+ less nice than we would like. In particular, type checking is
+ undecidable, and the type checking as implemented by the Lean
+ theorem prover is a decideable nontransitive underapproximation
+ of the typing judgment. Nontransitivity also leads to lack of
+ subject reduction, and the reduction relation does not satisfy the
+ ChurchRosser property, so reduction to a normal form does not
+ produce a decision procedure for definitional equality. However, a
+ modified reduction relation allows us to restore the ChurchRosser
+ property at the expense of guaranteed termination, so that unique
+ typing is shown to hold.",
+ paper = "Carn19a.pdf",
keywords = "printed"
}
\end{chunk}
\index{Pfenning, Frank}
+\index{Carneiro, Mario}
\begin{chunk}{axiom.bib}
@misc{Pfen04,
 author = "Pfenning, Frank",
 title = {{Automated Theorem Proving}},
 year = "2004",
 link = "\url{https://www.cs.cmu.edu/~fp/courses/atp/handouts/atp.pdf}",
 paper = "Pfen04.pdf"
+@misc{Carn19b,
+ author = "Carneiro, Mario",
+ title = {{The Type Theory of Lean (slides)}},
+ year = "2019",
+ link = "\url{https://github.com/digama0/leantypetheory/releases/v1.0}",
+ paper = "Carn19b.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Horgan, John}
+\index{Milner, Robin}
\begin{chunk}{axiom.bib}
@misc{Horg93,
 author = "Horgan, John",
 title = {{The Death of Proof}},
 year = "1993",
 publisher = "Scientific American",
 paper = "Horg93.pdf"
+@techreport{Miln73,
+ author = "Milner, Robin",
+ title = {{Models of LCF}},
+ type = "technical report",
+ institution = "Stanford Artificial Intelligence Laboratory",
+ number = "STANCS73332",
+ year = "1973",
+ link = "\url{http://i.stanford.edu/pub/cstr/reports/cs/tr/73/332/CSTR73332.pdf}",
+ abstract =
+ "LCF is a deductive system for computable functions proposed by
+ D. Scott in 1969 in an unpublished memorandum. The purpose of the
+ present paper is to demonstrate the soundness of the system with
+ respect to certain models, which are partially ordered domains of
+ continuous functions. This demonstration was supplied by Scott in
+ his memorandum; the present paper is merely intended to make this
+ work more accessible.",
+ paper = "Miln73.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Licata, Dan}
+\index{Jenkins, Kris}
\begin{chunk}{axiom.bib}
@misc{Lica16,
 author = "Licata, Dan",
 title = {{A Functional Programmer's Guide to Homotopy Type Theory}},
 year = "2016",
 comment = "ICFP conference",
 link = "\url{https://www.youtube.com/watch?v=caSOTjr1z18}"
+@misc{Jenk18,
+ author = "Jenkins, Kris",
+ title = {{Communicating with Types}},
+ year = "2018",
+ link = "\url{https://vimeo.com/302682323}"
}
\end{chunk}
\index{Licata, Dan}
+\index{Taylor, Sophie}
\begin{chunk}{axiom.bib}
@misc{Lica16a,
 author = "Licata, Dan",
 title = {{Computing with Univalence}},
+@misc{Tayl16,
+ author = "Taylor, Sophie",
+ title = {{A working (class) Introduction to Homotopy Type Theory}},
year = "2016",
 comment = "Institute for Advanced Study",
 link = "\url{https://www.youtube.com/watch?v=YDdDuq2AGw4}"
+ link = "\url{https://www.youtube.com/watch?v=xv6SwPn1dlc}",
+ keywords = "DONE"
}
\end{chunk}
\index{Pittman, Dan}
+\index{Voevodsky, Vladimir}
\begin{chunk}{axiom.bib}
@misc{Pitt18,
 author = "Pittman, Dan",
 title = {{Proof Theory Impressionism: Blurring the CurryHoward Line}},
 year = "2018",
 comment = "StrangeLoop 2018",
 link = "\url{https://www.youtube.com/watch?v=jrVPBAd5Gc}"
+@misc{Voev16,
+ author = "Voevodsky, Vladimir",
+ title = {{Univalent Foundations of Mathematics}},
+ year = "2016",
+ link = "\url{https://www.youtube.com/watch?v=9f4sS9sX2A}"
}
\end{chunk}
\index{Baker, Henry G.}
+\index{Zach, Richard}
\begin{chunk}{axiom.bib}
@misc{Bake93a,
 author = "Baker, Henry G.",
 title = {{Linear Logic and Permutation Stacks  The Forth Shall Be First}},
 year = "1993",
 link = "\url{http:/home.pipeline.com/~hbaker1/ForthStack.html}",
 abstract =
 "Girard's linear logic can be used to model programming languages
 in which each bound variable name has exactly one ``occurence'' 
 i.e., no variable can have implicit ``fanout'', multiple uses
 require explicit duplication. Among other nice properties,
 ``linear'' languages need no garbage collector, yet have no
 dangling reference problems. We show a natural equivalence between
 a ``linear'' programming language and a stack machine in which the
 top items can undergo arbitrary permutations. Such permutation
 stack machines can be considered combinator abstractions of
 Moore's {\sl Forth} programming language.",
 keywords = "DONE"
+@book{Zach20,
+ author = "Zach, Richard",
+ title = {{The Open Logic Text}},
+ year = "2020",
+ publisher = "The Open Logic Project",
+ link = "\url{http://builds.openlogicproject.org/openlogiccomplete.pdf}",
+ paper = "Zach20.pdf"
}
\end{chunk}
\index{Dijkstra, Edsger W.}
+\index{Bezhanishvili, Nick}
+\index{de Jongh, Dick}
\begin{chunk}{axiom.bib}
@misc{Dijk78,
 author = "Dijkstra, Edsger W.",
 title = {{A Political Pamphlet From The Middle Ages}},
 year = "1978",
 comment = "EWD638",
 link = "\url{https://www.cs.utexas.edu/users/EWD/transcriptions/EWD06xx/EWD638.html}"
+@misc{Bezh05,
+ author = "Bezhanishvili, Nick and de Jongh, Dick",
+ title = {{Intuitionistic Logic}},
+ year = "2005",
+ link = "\url{https://www.cs.le.ac.uk/people/nb118/Publications/ESSLLI'05.pdf}",
+ paper = "Bezh05.pdf"
}
\end{chunk}
\index{Bridges, Douglas}
\index{Reeves, Steve}
+\index{Milne, Peter}
\begin{chunk}{axiom.bib}
@article{Brid99,
 author = "Bridges, Douglas and Reeves, Steve",
 title = {{Constructive Mathematics in Theory and Programming
 Practice}},
 journal = "Philosophia Mathematica",
 volume = "7",
 number = "3",
 year = "1999",
 pages = "65104",
 paper = "Brid99.pdf",
 keywords = "printed"
}
+@article{Miln94,
+ author = "Milne, Peter",
+ title = {{Classical Harmony: Rules of Inference and the Meaning of
+ the Logical Constants}},
+ journal = "Synthese",
+ volume = "100",
+ number = "1",
+ pages = "4994",
+ year = "1994",
+ abstract =
+ "The thesis that, in a system of natural deduction, the meaning of
+ a logical constant is given by some or all of its introduction and
+ elimination rules has been developed recently by Dummett, Prawitz,
+ Tennant, and others, by the addition of harmony constraints.
+ Introduction and elimination rules for a logical constant must be
+ in harmony. By deploying harmony constraints, these authors have
+ arrived at logics no stronger than intuitionistic propositional
+ logic. Classical logic, they maintain, cannot be justified from
+ this prooftheoretic perspective. This paper argues that, while
+ classical logic can be formulated so as to satisfy a number of
+ harmony constraints, the meanings of the standard logical
+ constants cannot all be given by their introduction and/or
+ elimination rules; negation, in particular, comes under close
+ scrutiny.",
+ paper = "Miln94.pdf"
+}
\end{chunk}
\index{Pearce, David J.}
\index{Groves, Lindsay}
+\index{Tennant, Neil}
\begin{chunk}{axiom.bib}
@misc{Pear15,
 author = "Pearce, David J. and Groves, Lindsay",
 title = {{Designing a Verifying Compiler: Lessons Learned from
 developing Whiley}},
 year = "2015",
 link = "\url{https://homepages.ecs.vuw.ac.nz/~djp/files/SCP15preprint.pdf}",
 abstract =
 "An ongoing challenge for computer science is the development of a
 tool which automatically verifies programms meet their
 specifications, and are free from runtime errors such as
 dividebyzero, array outofbounds and null dereferences. Several
 impressive systems have been developed to this end, such as
 ESC/Java and Spec\#, which build on existing programming languages
 (e.g., Java, C\#). We have been developing a programming language
 from scratch to simplify verification, called Whiley, and an
 accompanying verifying compiler. In this paper, we present a
 technical overview of the verifying compiler and document the
 numerous design decisions made. Indeed, many of our designs
 reflect thos of similar tools. However, they have often been
 ignored in the literature and/or spread thinly throughout. In
 doing this, we hope to provide a useful resource for those
 building verifying compilers.",
 paper = "Pear15.pdf",
 keywords = "printed"
+@article{Tenn79,
+ author = "Tennant, Neil",
+ title = {{Entailment and Proofs}},
+ journal = "Proc. of the Aristotelian Society",
+ volume = "79",
+ pages = "167189",
+ year = "1979",
+ paper = "Tenn79.pdf"
}
\end{chunk}
\index{Rucker, Rudy}
+\index{Montanaro, Ashley}
\begin{chunk}{axiom.bib}
@book{Ruck87,
 author = "Rucker, Rudy",
 title = {{Mathenauts: Tales of Mathematical Wonder}},
 year = "1987",
 comment = "Norman Kagan: Four Brands of Impossible",
 publisher = "Arbor House Publishing"
+@misc{Mont12,
+ author = "Montanaro, Ashley",
+ title = {{Computational Complexity Lecture Notes}},
+ year = "2012",
+ link = "\url{https://people.maths.bris.ac.uk/~csxam/teaching/cclecturenotes.pdf}",
+ paper = "Mont12.pdf"
}
\end{chunk}
\index{Woodcock, Jim}
\index{Davies, Jim}
@book{Wood20,
 author = "Woodcock, Jim and Davies, Jim"l
 title = {{Using Z: Specification, Refinement, and Proof}}
 year = "2020",
 link = "\url{http://www.cs.cmu.edu/~15819/zedbook.pdf}",
 publisher = "CMU PDF",
 paper = "Wood20.pdf"
+\index{Grayson, Daniel R.}
+\begin{chunk}{axiom.bib}
+@misc{Gray18a,
+ author = "Grayson, Daniel R.",
+ title = {{The Mathematical Work of Vladimir Voevodsky}},
+ year = "2018",
+ link = "\url{https://www.youtube.com/watch?v=BanMgvdKP8E}",
+ keywords = "DONE"
}
\end{chunk}
\index{Chang, Stephen}
\index{Ballantyne, Michael}
\index{Turner, Milo}
\index{Bowman, William J.}
+\index{Gentzen, G.}
\begin{chunk}{axiom.bib}
@article{Chan20,
 author = "Chang, Stephen and Ballantyne, Michael and Turner, Milo
 and Bowman, William J.",
 title = {{Dependent Type Systems as Macros}},
 journal = "Proc. ACM Program Lang.",
 volume = "4",
 year = "2020",
 abstract =
 "We present {\tt TURNSTYLE+}, a highlevel, macrosbased metaDSL
 for building dependently typed languages. With it, programmers may
 rapidly prototype and iterate on the design of new dependently
 typed features and extensions. Or they may create entirely new
 DSLs whose dependet type ``power'' is tailored to a specific
 domain. Our framework's support of languageoriented programming
 also makes it suitable for experimenting with systems of
 interacting components, e.g. a proof assistant and its companion
 DSLs. This paper explains the implementation details of
 {\tt TURNSTYLE+}, as well as how it may be used to create a wide
 variety of dependently typed languages, from a lightweight one
 with indexed types, to a full spectrum proof assistant, complete
 with a tactic system and extensions for features like sized types
 and SMT interaction.",
 paper = "Chan20.pdf",
 keywords = "printed"
+@misc{Gent35,
+ author = "Gentzen, G.",
+ title = {{Untersuchungen uber das logische Schliessen I and II}},
+ booktitle = "Mathematische Zeitschrift",
+ year = "1935",
+ publisher = "Springer"
+ paper = "Gent35.pdf"
}
\end{chunk}
\index{Warne, Henrik}
+\index{Vapnik, Vladimir}
\begin{chunk}{axiom.bib}
@misc{Warn19,
 author = "Warne, Henrik",
 title = {{More Good Programming Quotes, Part 3}},
 year = "2019",
 link = "\url{https://henrikwarne.com/2019/04/03/moregoodprogrammingquotespart3}"
+@misc{Vapn20,
+ author = "Vapnik, Vladimir",
+ title = {{Predicates, Invariants, and the Essence of Intelligence}},
+ year = "2020",
+ link = "\url{https://lexfridman.com/vladimirvapnik2}",
+ keywords = "DONE"
}
\end{chunk}
\index{Warne, Henrik}
+\index{Baruch, Robert}
\begin{chunk}{axiom.bib}
@misc{Warn16,
 author = "Warne, Henrik",
 title = {{More Good Programming Quotes}},
+@misc{Baru19,
+ author = "Baruch, Robert",
+ title = {{Very Basic Introduction to Formal Verification}},
year = "2019",
 link = "\url{https://henrikwarne.com/2016/04/17/moregoodprogrammingquotes}"
+ link = "\url{https://www.youtube.com/watch?v=9e7F1XhjhKw}",
+ keywords = "DONE"
}
\end{chunk}
\index{Baccala, Brent W.}
+\index{Carneiro, Mario}
\begin{chunk}{axiom.bib}
@misc{Bacc,
 author = "Baccala, Brent W.",
 title = {{The Facebook Integral}},
 year = "2020",
 link = "\url{https://ec2.freesoft.org/blogs/soapbox/thefacebookintegral}"
+@misc{Carn19c,
+ author = "Carneiro, Mario",
+ title = {{Specifying Verified x86 Software from Scratch}},
+ year = "2019",
+ link = "\url{https://arxiv.org/pdf/1907.01283.pdf}",
+ paper = "Carn19c.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Baccala, Brent W.}
+\index{Bendersky, Eli}
\begin{chunk}{axiom.bib}
@misc{Bacca,
 author = "Baccala, Brent W.",
 title = {{The Facebook Integral (solved with Axiom and Sage}},
 year = "2020",
 link = "\url{https://youtu.be/tz1LwfJlMuo}"
+@misc{Bend10,
+ author = "Bendersky, Eli",
+ title = {{Top Down Operator Precedence}},
+ year = "2010",
+ link = "\url{https://eli.thegreenplace.net/2010/01/02/topdownoperatorprecedenceparsing}",
+ keywords = "DONE"
}
\end{chunk}
\index{Baccala, Brent W.}
+\index{Baruch, Robert}
\begin{chunk}{axiom.bib}
@misc{Baccb,
 author = "Baccala, Brent W.",
 title = {{The Risch Theorem}},
 year = "2020",
 link = "\url{https://youtu.be/BGnLwhXb204}"
+@misc{Baru19a,
+ author = "Baruch, Robert",
+ title = {{Cmod A7 Reference Manual}},
+ year = "2019",
+ link = "\url{https://reference.digilentinc.com/_media/reference/programmablelogic/cmoda7/cmod_a7_rm.pdf}",
+ paper = "Baru19a.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Goel, Shilpi}
\index{Slobodova, Anna}
\index{Sumners, Rob}
\index{Swords, Sol}
+\index{Baruch, Robert}
\begin{chunk}{axiom.bib}
@misc{Goel19,
 author = "Goel, Shilpi and Slobodova, Anna and Sumners, Rob and
 Swords, Sol",
 title = {{Verifying X86 Instruction Implementations}},
+@misc{Baru19b,
+ author = "Baruch, Robert",
+ title = {{Cmod A7 Schematic}},
year = "2019",
 link = "\url{https://arxiv.org/abs/1912.10285}",
 paper = "Goel19.pdf"
+ link = "\url{https://reference.digilentinc.com/_media/cmoda7/cmod_a7_sch.pdf}",
+ paper = "Baru19b.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Boizumault, Patrice}
\index{Djamboulian, Ara M.}
\index{Fattouh, Jamal}
+\index{Reis, Leonardo Vieira dos Santos}
+\index{Dilorio, Oliveira}
+\index{Bigonha, Roberto S.}
\begin{chunk}{axiom.bib}
@book{Boiz93,
 author = "Boizumault, Patrice and Djamboulian, Ara M. and Fattouh, Jamal",
 title = {{The Implementation of Prolog}}
 year = "1993",
 publisher = "Princeton Legacy Library",
 keywords = "shelf"
+@inproceedings{Reis14,
+ author = "Reis, Leonardo Vieira dos Santos and Dilorio, Oliveira and
+ Bigonha, Roberto S.",
+ title = {{A Mixed Approach for Building Extensible Parsers}},
+ booktitle = "Programming Language",
+ publisher = "Springer",
+ volume = "LNCS 8771",
+ pages = "115",
+ year = "2014",
+ abstract =
+ "For languages whose syntax is fixed, parsers are usually built
+ with a static structure. The implementation of features like macor
+ mechanisms or extensible languages requires the use of parsers
+ that may be dynamically extended. In this work, we discuss a mixed
+ approach for building efficient topdown dynamically extensible
+ parsers. Our view is based on the fact that a large part of the
+ parser code can be statically compiled and only the parts that are
+ dynamic should be interpreted for a more efficient processing. We
+ propose the generation of code for the base parser, in which hooks
+ are included to allow efficient extension of the underlying
+ grammar and activation of a grammar interpreter whenever it is
+ necessary to use an extended syntax. As a proof of concept, we
+ present a prototype implementation of a parser generator using
+ Adaptable Parsing Expression Grammars (APEG) as the underlying
+ method for syntax definition. We shos that APEG has features which
+ allow an efficient implementation using the proposed mixed
+ approach.",
+ paper = "Reis14.pdf"
+}
+
+\end{chunk}
+
+\index{Ford, Bryan}
+\begin{chunk}{axiom.bib}
+@article{Ford04,
+ author = "Ford, Bryan",
+ title = {{Parsing Expression Grammars: A RecognitionBased Syntactic
+ Foundation}},
+ journal = "SIGPLAN Notices",
+ volume = "39",
+ number = "1",
+ pages = "111122",
+ year = "2004",
+ abstract =
+ "For decades we have been using Chomsky's generative system of
+ grammars, particularly contextfree grammars (CFGs) and regular
+ expressions (REs), to express the syntax of programming languages
+ and protocols. The power of generative grammars to express
+ ambiguity is crucial to their original purpose of modelling
+ natural languages, but this very power makes it unnecessarily
+ difficult both to express and to parse machineoriented languages
+ using CFGs. Parsing Expression Grammars (PEGs) provide an
+ alternative, recognitionbased formal foundation for describing
+ machineoriented syntax, which solves the ambiguity problem by not
+ introducing ambiguity in the first place. Where CFGs express
+ nondeterministic choice between alternatives, PEGs instead use
+ {\sl prioritized choice}. PEGs address frequently felt
+ expressiveness limitations of CFGs and REs, simplifying syntax
+ definitions and making it unnecessary to separate their lexical
+ and hierarchical components. A lineartime parser can be built for
+ any PEG, avoiding both the complexity and fickleness of LR parsers
+ and the inefficiency of generalized CFG parsing. While PEGs
+ provide a rich set of operators for constructing grammars, they
+ are reducible to two minimal recognition schemas developed around
+ 1970, TS/TDPL and gTS/GTDPL, which are here proven equivalent in
+ effective recognition power.",
+ paper = "Ford04.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Clark, K.L.}
\index{McCabe, F.G.}
+\index{Gueneau, Armael}
\begin{chunk}{axiom.bib}
@book{Clar84,
 author = "Clark, K.L. and McCabe, F.G.",
 title = {{microPROLOG: Programming in Logic}},
 year = "1984",
 publisher = "PrenticeHall",
 isbn = "013581264X",
 keywords = "shelf"
+@phdthesis{Guen19,
+ author = "Gueneau, Armael",
+ title = {{Mechanized Verification of the Correctness and Asymptotic
+ Complexity of Programs}},
+ school = "University of Paris",
+ year = "2019",
+ link = "\url{http://gallium.inria.fr/~agueneau/phd/manuscript.pdf}",
+ abstract =
+ "This dissertation is concerned with the question of formally
+ verifying that the implementation of an algorithm is not only
+ functionally correct (it always returns the right result), but
+ also has the right asymptotic complexity (it reliably computes the
+ result in the expected amount of time).
+
+ In the algorithms literature, it is standard practice to
+ characterize the performance of an algorithm by indicating its
+ asymptotic time complexity, typically using Landau's ``bigO''
+ notation. We first argue informally that asymptotic complexity
+ bounds are equally useful as formal specifications, because they
+ enable modular reasoning: a $O$ bound abstracts over the concrete
+ cost expression of a program, and therefore abstracts over the
+ specifics of its implementation. We illustrate  with the help of
+ small illustrative examples  a number of challenges with the use
+ of the $O$ notation, in particular in the multivariate case, that
+ might be overlooked when reasoning informally.
+
+ We put these considerations into practice by formalizing the $O$
+ notation in the Coq proof assistant, and by extending an existing
+ program verification framework, CFML, with support for a
+ methodology enabling robust and modular proofs of asymptotic
+ complexity bounds. We extend the existing framework of Separation
+ Logic with Time Credits, which allows to reason at the same time
+ about correctness and time complexity, and introduce negative time
+ credits. Negative time credits increase the expressiveness of the
+ logic, and enable convenient reasoning principles as well as
+ elegant specifications. At the level of specifications, we show
+ how asymptotic complexity specifications using $O$ can be
+ integrated and composed within Separation Logic with Time
+ Credits. Then, in order to establish such specifications, we
+ develop a methodology that allows proofs of complexity in
+ Separation Logic to be robust and carried out at a relatively high
+ level of abstraction, by relying on two key elements: a mechanism
+ for collecting and deferring constraints during the proof, and a
+ mechanism for semiautomatically synthesizing cost expressions
+ without loss of generality.
+
+ We demonstrate the usefulness and practicality of our approach on
+ a number of increasingly challenging case studies. These include
+ algorithms whose complexity analysis is relatively simple (such as
+ binary search, which is nonetheless out of the scope of many
+ automated complexity analysis tools) and data structures (such as
+ Okasaki's binary random access lists). In our most challenging
+ case study, we establish the correctness and amortized complexity
+ of a stateoftheart incremental cycle detection algorithm: our
+ methodology scales up to highly nontrivial algorithms whose
+ complexity analysis intimately depends on subtle functional
+ invariants, and furthermore makes it possible to formally verify
+ OCaml code which can then actually be used as part of real world
+ programs.",
+ paper = "Guen19.pdf"
+}
+
+\end{chunk}
+
+\begin{chunk}{axiom.bib}
+@misc{Fade17,
+ author = "Unknown",
+ title = {{Strongly Connected and Completely Specified Moore
+ Equivalent of a Mealy Machine}},
+ year = "2017",
+ link = "\url{https://cs.stackexchange.com/questions/81127/stronglyconnectedandcompletelyspecifiedmooreequivalentofamealymachine}",
+ keywords = "DONE"
}
\end{chunk}
\index{Abelson, Harold}
\index{Sussman, Gerald Jay}
\index{Sussman, Julie}
+\index{Hoare, C.A.R}
\begin{chunk}{axiom.bib}
@book{Abel86,
 author = "Abelson, Harold and Sussman, Gerald Jay and Sussman, Julie",
 title = {{Structure and Interpretation of Computer Programs}},
 year = "1986",
 publisher = "The MIT Press",
 isbn = "0262010771",
 keywords = "shelf"
+@misc{Hoar04,
+ author = "Hoare, C.A.R",
+ title = {{Unified Theories of Programming}},
+ year = "2004",
+ link = "\url{https://fi.ort.edu.uy/innovaportal/file/20124/1/04hoard_unified_theories.pdf}",
+ abstract =
+ "Professional practice in a mature engineering discipline is based
+ on relevant scientific theories, usually expressed in the language
+ of mathematics. A mathematical theory of programming aims to
+ provide a similar basis for specification, design and
+ implementation of computer programs. The theory can be presented
+ in a variety of styles, including
+ \begin{enumerate}
+ \item Denotational, relating a program to a specification of its
+ observable properties and behaviour
+ \item Algebraic, providing equations and inequalities for
+ comparison, transformation and optimisation of designs and
+ programs.
+ \item Operational, describing individual steps of a possible
+ mechanical implementation
+ \end{enumerate}
+
+ This paper presents simple theories of sequential
+ nondeterministic programming in each of these three styles; by
+ deriving each presentation from its predecessor in a cyclic
+ fashion, mutual consistency is assured.",
+ paper = "Hoar04.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Chandy, K. Mani}
\index{Misra, Jayadev}
+\index{Davis, Ernest}
\begin{chunk}{axiom.bib}
@book{Chan89,
 author = "Chandy, K. Mani and Misra, Jayadev",
 title = {{Parallel Program Design}},
 year = "1989",
 publisher = "AddisonWesley Publishing",
 isbn = "0201058669",
 keywords = "shelf"
+@misc{Davi19,
+ author = "Davis, Ernest",
+ title = {{The Use of Deep Learning for Symbolic Integration}},
+ year = "2019",
+ link = "\url{https://arxiv.org/pdf/1912.05752.pdf}",
+ abstract =
+ "Lample and Charton (2019) describe a system that uses deep
+ learning technology to compute symbolic, indefinite integrals, and
+ to find symbolic solutions to first and secondorder ordinary
+ differential equations, when the solutions are elementary
+ functions. They found that, over a particular test set, the system
+ could find solutions more successfully than sophisticated packages
+ for symbolic mathematics such as Mathematica run with a long
+ timeout. This is an impressive accomplishment, as far as it
+ goes. However, the system can handle only a quite limited subset
+ of the problems that Mathematica deals with, and the test set has
+ significant builtin biases. Therefore the claim that this
+ outperforms Mathematica on symbolic integration needs to be very
+ much qualified.",
+ paper = "Davi19.pdf",
+ keywords = "printed, DONE"
}
\end{chunk}
\index{Bundy, Alan}
+\index{Pierce, Benjamin}
+\index{Appel, Andrew W.}
+\index{Weirich, Stephanie}
+\index{Zdancewic, Steve}
+\index{Shao, Zhong}
+\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@book{Bund83,
 author = "Bundy, Alan",
 title = {{The Computer Modelling of Mathematics Reasoning}},
 year = "1983",
 publisher = "Academic Press",
 isbn = "0121412504",
 keywords = "shelf"
+@misc{Pier16,
+ author = "Pierce, Benjamin and Appel, Andrew W. and Weirich, Stephanie
+ and Zdancewic, Steve and Shao, Zhong and Chlipala, Adam",
+ title = {{The Science of Deep Specification}},
+ year = "2016",
+ link = "\url{https://www.cis.upenn.edu/~bcpierce/papers/deepspechcss2016slides.pdf}",
+ paper = "Pier16.pdf"
}
\end{chunk}
\index{Hamilton, A.G.}
+\index{Pierce, Benjamin}
\begin{chunk}{axiom.bib}
@book{Hami78,
 author = "Hamilton, A.G.",
 title = {{Logic for Mathematicians}},
 year = "1978",
 publisher = "Cambridge University Press",
 isbn = "0521292913",
 keywords = "shelf"
+@misc{Pier18,
+ author = "Pierce, Benjamin"
+ title = {{The Science of Deep Specification}},
+ year = "2018",
+ link = "\url{https://www.cis.upenn.edu/~bcpierce/papers/pierceetaps2018.pdf}",
+ paper = "Pier18.pdf"
}
\end{chunk}
\index{Michaelson, S.}
\index{Milner, R.}
+\index{Chlipala, Adam}
+\index{Arvind}
+\index{Sherman, Benjamin}
+\index{Choi, Joonwon}
+\index{Vijayaraghavan, Murali}
\begin{chunk}{axiom.bib}
@book{Mich76,
 author = "Michaelson, S. and Milner, R.",
 title = {{Automata Languages and Programming}},
 year = "1976",
 publisher = "Edinburgh University Press",
 isbn = "0852243081",
 keywords = "shelf"
+@misc{Chli17b,
+ author = "Chlipala, Adam and Arvind and Sherman, Benjamin and
+ Choi, Joonwon and Vijayaraghavan, Murali",
+ title = {{Kami: A Platform for HighLevel Parametric Hardware
+ Specification and its Modular Verification}},
+ year = "2017",
+ abstract =
+ "It has become fairly standard in the programming languages
+ research world to verify functional programs in proof assistants
+ using induction, algebraic simplification, and rewriting. In this
+ paper, we introduce Kami, a Coq library that uses labelled
+ transition systems to enable similar expressive and modular
+ reasoning for hardware designs expressed in the style of the
+ Bluespec language. We can specify, implement, and verify realistic
+ designs entirely within Coq, ending with automatic extraction into
+ a pipeline that bottoms out in FPGAs. Our methodology has been
+ evaluated in a case study verifying an infinite family fo
+ multicore systems, with cachecoherent shared memory and pipelined
+ cores implementing (the base integer subset of) the RISCV
+ instruction set.",
+ paper = "Chli19b.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Campbell, J.A.}
+\index{Appel, Andrew W.}
+\index{Beringer, Lennart}
+\index{Chlipala, Adam}
+\index{Pierce, Benjamin C.}
+\index{Shao, Zhong}
+\index{Weirich, Stephanie}
+\index{Zdancewic, Steve}
\begin{chunk}{axiom.bib}
@book{Camp84,
 author = "Campbell, J.A.",
 title = {{Implementations of PROLOG}},
 year = "1984",
 publisher = "Ellis Horwood Limited",
 isbn = "0853126755",
 keywords = "shelf"
+@article{Appe17a,
+ author = "Appel, Andrew W. and Beringer, Lennart and Chlipala, Adam
+ and Pierce, Benjamin C. and Shao, Zhong and Weirich, Stephanie
+ and Zdancewic, Steve",
+ title = {{Position Paper: The Science of Deep Specification}},
+ journal = "Philosophical Transactions of the Royal Society",
+ volume = "375",
+ year = "2017",
+ link = "\url{https://royalsocietypublishing.org/doi/pdf/10.1098/rsta.2016.0331}",
+ abstract =
+ "We introduce our efforts within the project ``The Science of Deep
+ Specification'' to work out the key formal underpinnings of
+ inductrialscale formal specifications of software and hardware
+ components, anticipating a world where large verified systems are
+ routinely built out of smaller verified components that are also
+ used by many other projects. We identify an important class of
+ specification that has already been used in a few experiments that
+ connect strong componentcorrectness theorems across the work of
+ different teams. To help popularize the unique advantages of that
+ style, we dub it {\sl deep specification}, and we say that it
+ encompasses specifications that are {\sl rich}, {\sl twosided},
+ {\sl formal}, and {\sl live} (terms that we define in the
+ article). Our core team is developing a proofofconcept ssystem
+ (based on teh Coq proof assistant) whose specification and
+ verification work is divided across argely decoupled subteams at
+ our four institutions, encompassing hardware microarchitecture,
+ compilers, operating systems and applications, along with
+ crosscutting principles and tools for effective specification. We
+ also aim to catalyse interest in the approach, not just by basic
+ researchers but also by users in industry.
+
+ This article is part of the themed issue ``Verified trustworthy
+ software systems''",
+ paper = "Appe17a.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Reichenbach, Hans}
+\index{Braibant, Thomas}
+\index{Chlipala, Adam}
\begin{chunk}{axiom.bib}
@book{Reic47,
 author = "Reichenbach, Hans",
 title = {{Elements of Symbolic Logic}},
 year = "1947",
 publisher = "The MacMillan Company",
 keywords = "shelf"
+@inproceedings{Brai13,
+ author = "Braibant, Thomas and Chlipala, Adam",
+ title = {{Formal Verification of Hardware Synthesis}},
+ booktitle = "Computer Aided Verification (CAV'13)",
+ publisher = "unknown",
+ year = "2013",
+ abstract =
+ "We report on the implmentation of a certified compier for a
+ highlevel hardware description language (HDL) called FeSi
+ (FEatherweight Synthesis). FeSi is a simplified version of
+ Bluespec, an HDL based on a notion of guarded atomic
+ actions. FeSi is defined as a dependently typed deep embedding in
+ Coq. The target language of the compiler corresponds to a
+ synthesisable subset of Verilog or VHDL. A key aspect of our
+ approach is that input programs to the compiler can be defined and
+ proved correct inside Coq. The, we use extraction and a Verilog
+ backend (written in OCaml) to get a certified version of a
+ hardware design.",
+ paper = "Brai13.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Oesterle, John A.}
+\index{Timany, Amin}
+\index{Sozeau, Mattieu}
\begin{chunk}{axiom.bib}
@book{Oest53
 author = "Oesterle, John A.",
 title = {{Logic: The Art of Defining and Reasoning}},
 year = "1953",
 publisher = "PrenticeHall",
 keywords = "shelf"
+@inproceedings{Tima18,
+ author = "Timany, Amin and Sozeau, Mattieu",
+ title = {{Cumulative Inductive Types in Coq}},
+ booktitle = "3rd Conf. on Formal Structures for Computation and
+ Deduction",
+ publisher = "HAL",
+ link = "\url{https://hal.inria.fr/hal01952037/document}",
+ abstract =
+ "In order to avoid wellknown paradoxes associated with
+ selfreferential definitions, highorder dependent type theories
+ stratify the theory using a countably infinite hierarchy of
+ universes (also known as sorts), $Type_0 : Type_1,\ldots$ Such
+ type systems are called cumulative if for any type $A$ we have
+ that $A:Type_i$ implies $A:Type_{i+1}$. The Predicative Calculus
+ of Inductive Constructions (pCIC) which forms the basis of the Coq
+ proof assistant, is one such system. In this paper we present the
+ Predicative Calculus of Cumulative Inductive Constructions
+ (pCUIC) which extends the cumulativity relation to inductive
+ types. We discuss cumulative inductive types as present in Coq 8.7
+ and their application to formalization and definitional
+ translations.",
+ paper = "Tima18.pdf"
+}
+
+\end{chunk}
+
+\index{Gilbert, Gaetan}
+\begin{chunk}{axiom.bib}
+@phdthesis{Gilb19,
+ author = "Gilbert, Gaetan",
+ title = {{A Type Theory with Definitional ProofIrrelevance}},
+ school = "Ecole Nationale Superieure MinesTelecom Alantique.",
+ year = "2019",
+ link = "\url{https://gitlab.com/SkySkimmer/thesis/~/jobs/artifacts/master/download?job=build",
+ paper = "Gilb19.zip"
}

+
\end{chunk}
\index{Lewis, Robert Y.}
\index{Wu, Minchao}
+\index{Awodey, Steve}
\begin{chunk}{axiom.bib}
@misc{Lewi17,
 author = "Lewis, Robert Y. and Wu, Minchao",
 title = {{A Bidirectional extensible ad hoc interface between Lean
 and Mathematica}},
 year = "2017",
 link = "\url{https://robertylewis.com/leanmm/lean_mm.pdf}",
+@misc{Awod16,
+ author = "Awodey, Steve",
+ title = {{Univalence as a Principle of Logic}},
+ year = "2016",
+ link = "\url{https://www.andrew.cmu.edu/user/awodey/preprints/ualp.pdf}",
abstract =
 "We implement a userextensible ad hoc connection between the Lean
 proof assistant and the computer algebra system Mathematica. By
 reflecting the syntax of each system in the other and providing a
 flexible interface for extending translation, our connection
 allows for the exchange of arbitrary information between the two
 systemss. We show how to make use of the Lean metaprogramming
 framework to verif certain Mathematica computations, so that the
 rigor of the proof assistant is not compromised. We also establish
 a connection in the opposite direction, using Mathematica to
 import and process proof terms from Lean.",
 paper = "Lewi17.pdf",
+ "It is sometmes convenient or useful in mathematics to treat
+ isomorphic structures as the same. The recently proposed
+ Univalence Axiom for the foundations of mathematics elevates this
+ idea to a foundational principle in the setting of Homotopy Type
+ Theory. It states, roughly, that isomorphic structures can be
+ identified. We explore the motivations and consequences, both
+ mathematical and philosophical, of making such a new logical
+ postulate.",
+ paper = "Awod16.pdf",
keywords = "printed"
}
\end{chunk}
+\index{McCorduck, Pamela}
\begin{chunk}{axiom.bib}
@misc{Unkn20,
 author = "Unknown",
 title = {{Programming is Terrible  Lessons Learned from a life
 wasted}},
 year = "2020",
 link = "\url{https://programmingisterrible.com/post/65781074112/devilsdictionaryofprogramming}"
+@book{Mcco79,
+ author = "McCorduck, Pamela",
+ tilte = {{Machines Who Think}},
+ year = "1979",
+ publisher = "Freeman",
+ keywords = "shelf, DONE"
}
\end{chunk}
+\index{Gabriel, Richard}
\begin{chunk}{axiom.bib}
@misc{Comm16,
 author = "Unknown",
 title = {{A very comprehensive and precise spec}},
 year = "2016"
 link =
 "\url{http://www.commitstrip.com/en/2016/08/25/averycomprehensiveandprecisespec}"
}

\end{chunk}
+@book{Gabr96,
+ author = "Gabriel, Richard",
+ title = {{Patterns of Software}},
+ link = "\url{https://www.dreamsongs.com/Files/PatternsOfSoftware.pdf}",
+ year = "1996",
+ publisher = "Oxford University Press",
+ paper = "Gabr96.pdf"
\index{Pfenning, Frank}
\index{Schurmann, Carston}
\begin{chunk}{axiom.bib}
@misc{Pfen99,
 author = "Pfenning, Frank and Schurmann, Carston",
 title = {{System Description: Twelf  A MetaLogical Framework for
 Deductive Systems}},
 year = "1999",
 link = "\url{https://www.cs.cmu.edu/~fp/papers/cade99.pdf}",
 abstract =
 "Twelf is a metalogical framework for the specification,
 implementation, and metatheory of deductive systems from the
 theory of programming languages and logics. It relies on the LF
 type theory and the jugementsastypes methodology for
 specifications [HHP93], a constraint logic programming interpreter
 for implementation [Pfe91], and the metalogic $M_2$ for reasoning
 about object languages encoded in LF [SP98]. It is a significant
 extension and complete reimplementation of the Elf system [Pfe94].

 Twelf is written in Standard ML and runs under SML of New Jersey
 and MLWorks on Unix and Window platforms. The current version
 (1.2) is distributed with a complete manual, example suites, a
 tutorial in the form of online lecture notes [Pfe], and an Emacs
 interface. Source and binary distributions are accessible via the
 Twelf home page \url{http://www.cs.cmu.edu/~twelf}.",
 paper = "Pfen99.pdf",
 keywords = "printed"
}
\end{chunk}
\index{Pfenning, Frank}
+\index{Gabriel, Richard}
\begin{chunk}{axiom.bib}
@misc{Pfen94,
 author = "Pfenning, Frank",
 title = {{Elf: A MetaLanguage for Deductive Systems}},
 year = "1994",
 paper = "Pfen94.pdf",
 keywords = "printed, DONE"
}

\end{chunk}
+@book{Gabr85,
+ author = "Gabriel, Richard",
+ title = {{Performance and Evaluation of Lisp Systems}},
+ link = "\url{https://www.dreamsongs.com/Files/Timrep.pdf}",
+ year = "1985",
+ publisher = "MIT Press",
+ paper = "Gabr85.pdf"
\index{Thornton, Jacob}
\begin{chunk}{axiom.bib}
@misc{Thor12,
 author = "Thornton, Jacob",
 title = {{What is Open Source and Why Do I Feel So Guilty}},
 year = "2012",
 link = "\url{https://www.youtube.com/watch?v=UIDb6VBO9os}"
}
\end{chunk}
\index{Bazerman, Gershom}
+\index{Lucas, J.R.}
\begin{chunk}{axiom.bib}
@misc{Baze14,
 author = "Bazerman, Gershom",
 title = {{Homotopy Type Theory: What's the Big Idea}},
 year = "2014",
 comment = "Lambda Jam 2014",
 link = "\url{https://www.youtube.com/watch?v=OupcXmLER7I}"
+@misc{Luca03,
+ author = "Lucas, J.R.",
+ title = {{Minds, Machines and Godel}},
+ year = "2003",
+ link =
+ "\url{https://pdfs.semanticscholar.org/bde3/b731bf73ef6052e34c4465e57718c03b13f8.pdf}",
+ paper = "Luca03.pdf"
}
\end{chunk}
\index{Voevodsky, Vladimir}
+\index{Lem, Stanislaw}
\begin{chunk}{axiom.bib}
@misc{Voev10,
 author = "Voevodsky, Vladimir",
 title = {{The Equivalence Axiom and Univalent Models of Type
 Theory}},
 year = "2010",
 link = "\url{https://www.math.ias.edu/~vladimir/Site3/Univalent_Foundations_files/CMU_talk.pdf}",
 abstract =
 "I will show how to define, in any type system with dependent
 sums, products and MartinLof identity types, the notion of a
 homotopy equivalence between two types and how to formulate the
 Equivalence Axiom which provides a natural way to assert that
 ``two homotopy equivalent types are equal''. I will then sketch a
 construction of a model of one of the standard MartinLof type
 theories which satisfies the equivalence axiom and the excluded
 middle thus proving that M.L. type theory with excluded middle and
 equivalence axiom is at least as consistent as ZFC theory.

 Models which satisfy the equivalence axiom are called
 univalent. This is a totally new class of models and I will argue
 that the semantics which they provide leads to the first
 satisfactory approach to typetheoretic formalization of
 mathematics.",
 paper = "Voev10.pdf",
 keywords = "printed"
+@book{Lemx68,
+ author = "Lem, Stanislaw",
+ title = {{His Master's Voice}},
+ publisher = "MIT Press",
+ year = "1968"
}
\end{chunk}
diff git a/src/axiomwebsite/patches.html b/src/axiomwebsite/patches.html
index 42a1144..8e0f2d6 100644
 a/src/axiomwebsite/patches.html
+++ b/src/axiomwebsite/patches.html
@@ 6022,6 +6022,8 @@ books/bookvol* cleanup
books/bookvol15 updated
20200207.01.tpd.patch
books/bookvol15 updated
+20200307.01.tpd.patch
+books/bookvol15 updated

1.9.1