%% 12pt font size, PhD thesis, LFCS, print twosided, new chapters on right page \documentclass[12pt,phd,lfcs,twoside,openright,logo,leftchapter,normalheadings]{infthesis} \shieldtype{0} %% Packages \usepackage[utf8]{inputenc} % Enable UTF-8 typing \usepackage[british]{babel} % British English \usepackage[breaklinks]{hyperref} % Interactive PDF \usepackage{url} \usepackage[sort&compress,square,numbers]{natbib} % Bibliography \usepackage{breakurl} \usepackage{amsmath} % Mathematics library \usepackage{amssymb} % Provides math fonts \usepackage{amsthm} % Provides \newtheorem, \theoremstyle, etc. \usepackage{mathtools} \usepackage{pkgs/mathpartir} % Inference rules \usepackage{pkgs/mathwidth} \usepackage{stmaryrd} % semantic brackets \usepackage{array} \usepackage{float} % Float control \usepackage{caption,subcaption} % Sub figures support \DeclareCaptionFormat{underlinedfigure}{#1#2#3\hrulefill} \captionsetup[figure]{format=underlinedfigure} \usepackage[T1]{fontenc} % Fixes font issues %\usepackage{lmodern} \usepackage[activate=true, final, tracking=true, kerning=true, spacing=true, factor=1100, stretch=10, shrink=10]{microtype} \usepackage{enumerate} % Customise enumerate-environments \usepackage{xcolor} % Colours \usepackage{xspace} % Smart spacing in commands. \usepackage{tikz} \usetikzlibrary{fit,calc,trees,positioning,arrows,chains,shapes.geometric,% decorations.pathreplacing,decorations.pathmorphing,shapes,% matrix,shapes.symbols,intersections} \SetProtrusion{encoding={*},family={bch},series={*},size={6,7}} {1={ ,750},2={ ,500},3={ ,500},4={ ,500},5={ ,500}, 6={ ,500},7={ ,600},8={ ,500},9={ ,500},0={ ,500}} \SetExtraKerning[unit=space] {encoding={*}, family={bch}, series={*}, size={footnotesize,small,normalsize}} {\textendash={400,400}, % en-dash, add more space around it "28={ ,150}, % left bracket, add space from right "29={150, }, % right bracket, add space from left \textquotedblleft={ ,150}, % left quotation mark, space from right \textquotedblright={150, }} % right quotation mark, space from left \usepackage[customcolors,shade]{hf-tikz} % Shaded backgrounds. \hfsetfillcolor{gray!40} \hfsetbordercolor{gray!40} %% %% Theorem environments %% \theoremstyle{plain} \newtheorem{theorem}{Theorem}[chapter] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}[theorem]{Definition} % Example environment. \makeatletter \def\@endtheorem{\hfill$\blacksquare$\endtrivlist\@endpefalse} % inserts a black square at the end. \makeatother \theoremstyle{definition} \newtheorem{example}{Example}[chapter] %% %% Load macros. %% \input{macros} %% Information about the title, etc. % \title{Higher-Order Theories of Handlers for Algebraic Effects} % \title{Handlers for Algebraic Effects: Applications, Compilation, and Expressiveness} % \title{Applications, Compilation, and Expressiveness for Effect Handlers} % \title{Handling Computational Effects} % \title{Programming Computable Effectful Functions} % \title{Handling Effectful Computations} \title{Foundations for Programming and Implementing Effect Handlers} \author{Daniel Hillerström} %% If the year of submission is not the current year, uncomment this line and %% specify it here: \submityear{2020} %% Specify the abstract here. \abstract{% An abstract\dots } %% Now we start with the actual document. \begin{document} \raggedbottom %% First, the preliminary pages \begin{preliminary} %% This creates the title page \maketitle %% Acknowledgements \begin{acknowledgements} List of people to thank \begin{itemize} \item Sam Lindley \item John Longley \item Christophe Dubach \item KC Sivaramakrishnan \item Stephen Dolan \item Anil Madhavapeddy \item Gemma Gordon \item Leo White \item Andreas Rossberg \item Robert Atkey \item Jeremy Yallop \item Simon Fowler \item Craig McLaughlin \item Garrett Morris \item James McKinna \item Brian Campbell \item Paul Piho \item Amna Shahab \item Gordon Plotkin \item Ohad Kammar \item School of Informatics (funding) \item Google (Kevin Millikin, Dmitry Stefantsov) \item Microsoft Research (Daan Leijen) \end{itemize} \end{acknowledgements} %% Next we need to have the declaration. % \standarddeclaration \begin{declaration} I declare that this thesis was composed by myself, that the work contained herein is my own except where explicitly stated otherwise in the text, and that this work has not been submitted for any other degree or professional qualification except as specified. \end{declaration} %% Finally, a dedication (this is optional -- uncomment the following line if %% you want one). % \dedication{To my mummy.} \dedication{\emph{To be or to do}} % \begin{preface} % A preface will possibly appear here\dots % \end{preface} %% Create the table of contents \setcounter{secnumdepth}{2} % Numbering on sections and subsections \setcounter{tocdepth}{1} % Show chapters, sections and subsections in TOC %\singlespace \tableofcontents %\doublespace %% If you want a list of figures or tables, uncomment the appropriate line(s) % \listoffigures % \listoftables \end{preliminary} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Main content %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% Introduction %% \chapter{Introduction} \label{ch:introduction} An enthralling introduction\dots % Motivation: 1) compiler perspective: unifying control abstraction, lean runtime, desugaring of async/await, generators/iterators, 2) giving control to programmers, safer microkernels, everything as a library. \section{Thesis outline} Thesis outline\dots \section{Typographical conventions} Explain conventions\dots \part{Background} \label{p:background} \chapter{The state of effectful programming} \label{ch:related-work} \section{Type and effect systems} \section{Monadic programming} \chapter{Continuations} \label{ch:continuations} \section{Zoo of control operators} Describe how effect handlers fit amongst shift/reset, prompt/control, callcc, J, catchcont, etc. \section{Implementation strategies} \part{Design} \chapter{An ML-flavoured programming language based on rows} \label{ch:base-language} In this chapter we introduce a core calculus, \BCalc{}, which we shall later use as the basis for exploration of design considerations for effect handlers. This calculus is based on \CoreLinks{} by \citet{LindleyC12}, which distils the essence of the functional multi-tier web-programming language \Links{}~\cite{CooperLWY06}. \Links{} belongs to the ML-family~\cite{MilnerTHM97} of programming languages as it features typical characteristics of ML languages such as a static type system supporting parametric polymorphism with type inference (in fact Links supports first-class polymorphism), and its evaluation semantics is strict. However, \Links{} differentiates itself from the rest of the ML-family by making crucial use of \emph{row polymorphism} to support extensible records, variants, and tracking of computational effects. Thus \Links{} has a rather strong emphasis on structural types rather than nominal types. \CoreLinks{} captures all of these properties of \Links{}. Our calculus \BCalc{} differs in several aspects from \CoreLinks{}. For example, the underlying formalism of \CoreLinks{} is call-by-value, whilst the formalism of \BCalc{} is \emph{fine-grain call-by-value}~\cite{LevyPT03}, which shares similarities with A-normal form (ANF)~\cite{FlanaganSDF93} as it syntactically distinguishes between value and computation terms by mandating every intermediate computation being named. However unlike ANF, fine-grain call-by-value remains closed under $\beta$-reduction. The reason for choosing fine-grain call-by-value as our formalism is entirely due to convenience. As we shall see in Chapter~\ref{ch:unary-handlers} fine-grain call-by-value is a convenient formalism for working with continuations. Another point of difference between \CoreLinks{} and \BCalc{} is that the former models the integrated database query sublanguage of \Links{}. We do not consider the query sublanguage at all, and instead our focus is entirely on modelling the interaction and programming with computational effects. \section{Syntax and static semantics} \label{sec:syntax-base-language} As \BCalc{} is intrinsically typed, we begin by presenting the syntax of kinds and types in Section~\ref{sec:base-language-types}. Subsequently in Section~\ref{sec:base-language-terms} we present the term syntax, before presenting the formation rules for types in Section~\ref{sec:base-language-type-rules}. % Typically the presentation of a programming language begins with its % syntax. If the language is typed there are two possible starting % points: Either one presents the term syntax first, or alternatively, % the type syntax first. Although the choice may seem rather benign % there is, however, a philosophical distinction to be drawn between % them. Terms are, on their own, entirely meaningless, whilst types % provide, on their own, an initial approximation of the semantics of % terms. This is particularly true in an intrinsic typed system perhaps % less so in an extrinsic typed system. In an intrinsic system types % must necessarily be precursory to terms, as terms ultimately depend on % the types. Following this argument leaves us with no choice but to % first present the type syntax of \BCalc{} and subsequently its term % syntax. \subsection{Types and their kinds} \label{sec:base-language-types} % \begin{figure} \begin{syntax} % \slab{Value types} &A,B &::= & A \to C % \mid \alpha % \mid \forall \alpha^K.C % \mid \Record{R} % \mid [R]\\ % \slab{Computation types} % &C,D &::= & A \eff E \\ % \slab{Effect types} &E &::= & \{R\}\\ % \slab{Row types} &R &::= & \ell : P;R \mid \rho \mid \cdot \\ % \slab{Presence types} &P &::= & \Pre{A} \mid \Abs \mid \theta\\ % %\slab{Labels} &\ell & & \\ % % \slab{Types} &T &::= & A \mid C \mid E \mid R \mid P \\ % \slab{Kinds} &K &::= & \Type \mid \Row_\mathcal{L} \mid \Presence % \mid \Comp \mid \Effect \\ % \slab{Label sets} &\mathcal{L} &::=& \emptyset \mid \{\ell\} \uplus \mathcal{L}\\ % %\slab{Type variables} &\alpha, \rho, \theta& \\ % \slab{Type environments} &\Gamma &::=& \cdot \mid \Gamma, x:A \\ % \slab{Kind environments} &\Delta &::=& \cdot \mid \Delta, \alpha:K \slab{Value types} &A,B \in \ValTypeCat &::= & A \to C \mid \forall \alpha^K.C \mid \Record{R} \mid [R] \mid \alpha \\ \slab{Computation types\!\!} &C,D \in \CompTypeCat &::= & A \eff E \\ \slab{Effect types} &E \in \EffectCat &::= & \{R\}\\ \slab{Row types} &R \in \RowCat &::= & \ell : P;R \mid \rho \mid \cdot \\ \slab{Presence types\!\!\!\!\!} &P \in \PresenceCat &::= & \Pre{A} \mid \Abs \mid \theta\\ \\ \slab{Types} &T \in \TypeCat &::= & A \mid C \mid E \mid R \mid P \\ \slab{Kinds} &K \in \KindCat &::= & \Type \mid \Comp \mid \Effect \mid \Row_\mathcal{L} \mid \Presence \\ \slab{Label sets} &\mathcal{L} \in \LabelCat &::=& \emptyset \mid \{\ell\} \uplus \mathcal{L}\\\\ \slab{Type environments} &\Gamma \in \TyEnvCat &::=& \cdot \mid \Gamma, x:A \\ \slab{Kind environments} &\Delta \in \KindEnvCat &::=& \cdot \mid \Delta, \alpha:K \\ \end{syntax} \caption{Syntax of types, kinds, and their environments.} \label{fig:base-language-types} \end{figure} % The types are divided into several distinct syntactic categories which are given in Figure~\ref{fig:base-language-types} along with the syntax of kinds and environments. % \paragraph{Value types} We distinguish between values and computations at the level of types. Value types comprise the function type $A \to C$, which maps values of type $A$ to computations of type $C$; the polymorphic type $\forall \alpha^K . C$ is parameterised by a type variable $\alpha$ of kind $K$; and the record type $\Record{R}$ represents records with fields constrained by row $R$. Dually, the variant type $[R]$ represents tagged sums constrained by row $R$. \paragraph{Computation types and effect types} The computation type $A \eff E$ is given by a value type $A$ and an effect type $E$, which specifies the effectful operations a computation inhabiting this type may perform. An effect type $E = \{R\}$ is constrained by row $R$. \paragraph{Row types} Row types play a pivotal role in our type system as effect, record, and variant types are uniformly given by row types. A \emph{row type} describes a collection of distinct labels, each annotated by a presence type. A presence type indicates whether a label is \emph{present} with type $A$ ($\Pre{A}$), \emph{absent} ($\Abs$) or \emph{polymorphic} in its presence ($\theta$). % For example, the effect row $\{\Read:\Pre{\Int},\Write:\Abs,\cdot\}$ denotes a read-only context in which the operation label $\Read$ may occur to access some integer value, whilst the operation label $\Write$ cannot appear. % Row types are either \emph{closed} or \emph{open}. A closed row type ends in~$\cdot$, whilst an open row type ends with a \emph{row variable} $\rho$ (in an effect row we usually use $\varepsilon$ rather than $\rho$ and refer to it as an \emph{effect variable}). % The example effect row above is closed, an open variation of it ends in an effect variable $\varepsilon$, i.e. $\{\Read:\Pre{\Int},\Write:\Abs,\varepsilon\}$. % The row variable in an open row type can be instantiated with additional labels subject to the restriction that each label may only occur at most once (we enforce this restriction at the level of kinds). We identify rows up to the reordering of labels as follows. % \begin{mathpar} \inferrule*[Lab=\rowlab{Closed}] {~} {\cdot \equiv_{\mathrm{row}} \cdot} \inferrule*[Lab=\rowlab{Open}] {~} {\rho \equiv_{\mathrm{row}} \rho'} \inferrule*[Lab=\rowlab{Head}] {R \equiv_{\mathrm{row}} R'} {\ell:P;R \equiv_{\mathrm{row}} \ell:P;R'} \inferrule*[Lab=\rowlab{Swap}] {R \equiv_{\mathrm{row}} R'} {\ell:P;\ell':P';R \equiv_{\mathrm{row}} \ell':P';\ell:P;R'} \end{mathpar} % % The last rule $\rowlab{Swap}$ let us identify rows up to the % reordering of labels. For instance, the two rows % $\ell_1 : P_1; \cdots; \ell_n : P_n; \cdot$ and % $\ell_n : P_n; \cdots ; \ell_1 : P_1; \cdot$ are equivalent. % The \rowlab{Closed} rule states that the closed marker $\cdot$ is equivalent to itself, similarly the \rowlab{Open} rule states that any two row variables are equivalent if and only if they have the same syntactic name. The \rowlab{Head} rule compares the head of two given rows and inductively compares their tails. The \rowlab{Swap} rule permits reordering of labels. We assume structural equality on labels. The \rowlab{Head} rule % The standard zero and unit types are definable using rows. We define the zero type as the empty, closed variant $\ZeroType \defas [\cdot]$. Dually, the unit type is defined as the empty, closed record type, i.e. $\UnitType \defas \Record{\cdot}$. % As absent labels in closed rows are redundant we will, for example, % consider the following two rows equivalent % $\Read:\Pre{\Int},\Write:\Abs,\cdot \equiv_{\mathrm{row}} % \Read:\Pre{\Int},\cdot$. For brevity, we shall often write $\ell : A$ to mean $\ell : \Pre{A}$ and omit $\cdot$ for closed rows. % \begin{figure} \begin{mathpar} % alpha : K \inferrule*[Lab=\klab{TyVar}] { } {\Delta, \alpha : K \vdash \alpha : K} % Computation \inferrule*[Lab=\klab{Comp}] { \Delta \vdash A : \Type \\ \Delta \vdash E : \Effect \\ } {\Delta \vdash A \eff E : \Comp} % A -E-> B, A : Type, E : Row, B : Type \inferrule*[Lab=\klab{Fun}] { \Delta \vdash A : \Type \\ \Delta \vdash C : \Comp \\ } {\Delta \vdash A \to C : \Type} % forall alpha : K . A : Type \inferrule*[Lab=\klab{Forall}] { \Delta, \alpha : K \vdash C : \Comp} {\Delta \vdash \forall \alpha^K . \, C : \Type} % Record \inferrule*[Lab=\klab{Record}] { \Delta \vdash R : \Row_\emptyset} {\Delta \vdash \Record{R} : \Type} % Variant \inferrule*[Lab=\klab{Variant}] { \Delta \vdash R : \Row_\emptyset} {\Delta \vdash [R] : \Type} % Effect \inferrule*[Lab=\klab{Effect}] { \Delta \vdash R : \Row_\emptyset} {\Delta \vdash \{R\} : \Effect} % Present \inferrule*[Lab=\klab{Present}] {\Delta \vdash A : \Type} {\Delta \vdash \Pre{A} : \Presence} % Absent \inferrule*[Lab=\klab{Absent}] { } {\Delta \vdash \Abs : \Presence} % Empty row \inferrule*[Lab=\klab{EmptyRow}] { } {\Delta \vdash \cdot : \Row_\mathcal{L}} % Extend row \inferrule*[Lab=\klab{ExtendRow}] { \Delta \vdash P : \Presence \\ \Delta \vdash R : \Row_{\mathcal{L} \uplus \{\ell\}} } {\Delta \vdash \ell : P;R : \Row_\mathcal{L}} \end{mathpar} \caption{Kinding rules} \label{fig:base-language-kinding} \end{figure} % \paragraph{Kinds} The kinds classify the different categories of types. The $\Type$ kind classifies value types, $\Presence$ classifies presence annotations, $\Comp$ classifies computation types, $\Effect$ classifies effect types, and lastly $\Row_{\mathcal{L}}$ classifies rows. % The formation rules for kinds are given in Figure~\ref{fig:base-language-kinding}. The kinding judgement $\Delta \vdash T : K$ states that type $T$ has kind $K$ in kind environment $\Delta$. % The row kind is annotated by a set of labels $\mathcal{L}$. We use this set to track the labels of a given row type to ensure uniqueness amongst labels in each row type. For example, the kinding rule $\klab{ExtendRow}$ uses this set to constrain which labels may be mentioned in the tail of $R$. We shall elaborate on this in Section~\ref{sec:row-polymorphism}. \paragraph{Environments} Kind and type environments are right-extended sequences of bindings. A kind environment binds type variables to their kinds, whilst a type environment binds term variables to their types. \paragraph{Type variables} The type structure has three syntactically distinct type variables (the kinding system gives us five semantically distinct notions of type variables). As we sometimes wish to refer collectively to type variables, we define the set of type variables, $\TyVarCat$, to be generated by: % \[ \TyVarCat \defas \ba[t]{@{~}l@{~}l} &\{ A \in \ValTypeCat \mid A \text{ has the form } \alpha \}\\ \cup &\{ R \in \RowCat \mid R \text{ has the form } \rho \}\\ \cup &\{ P \in \PresenceCat \mid P \text{ has the form } \theta \} \ea \] % Value types comprise the function type $A \to C$, whose domain % is a value type and its codomain is a computation type $B \eff E$, % where $E$ is an effect type detailing which effects the implementing % function may perform. Value types further comprise type variables % $\alpha$ and quantification $\forall \alpha^K.C$, where the quantified % type variable $\alpha$ is annotated with its kind $K$. Finally, the % value types also contains record types $\Record{R}$ and variant types % $[R]$, which are built up using row types $R$. An effect type $E$ is % also built up using a row type. A row type is a sequence of fields of % labels $\ell$ annotated with their presence information $P$. The % presence information denotes whether a label is present $\Pre{A}$ with % some type $A$, absent $\Abs$, or polymorphic in its presence % $\theta$. A row type may be either \emph{open} or \emph{closed}. An % open row ends in a row variable $\rho$ which can be instantiated with % additional fields, effectively growing the row, whilst a closed row % ends in $\cdot$, meaning the row cannot grow further. % The kinds comprise $\Type$ for regular type variables, $\Presence$ for % presence variables, $\Comp$ for computation type variables, $\Effect$ % for effect variables, and lastly $\Row_{\mathcal{L}}$ for row % variables. The row kind is annotated by a set of labels % $\mathcal{L}$. We use this set to track the labels of a given row type % to ensure uniqueness amongst labels in each row type. We shall % elaborate on this in Section~\ref{sec:row-polymorphism}. \paragraph{Free type variables} Sometimes we need to compute the free type variables ($\FTV$) of a given type. To this end we define a metafunction $\FTV$ by induction on the type structure, $T$, and point-wise on type environments, $\Gamma$. Note that we always work up to $\alpha$-conversion~\cite{Church32} of types. % \[ \ba[t]{@{~}l@{~~~~~~}c@{~}l} \multicolumn{3}{c}{\begin{eqs} \FTV &:& \TypeCat \to \TyVarCat \end{eqs}}\\ \ba[t]{@{}l} \begin{eqs} % \FTV &:& \ValTypeCat \to \TyVarCat\\ \FTV(\alpha) &\defas& \{\alpha\}\\ \FTV(\forall \alpha^K.C) &\defas& \FTV(C) \setminus \{\alpha\}\\ \FTV(A \to C) &\defas& \FTV(A) \cup \FTV(C)\\ \FTV(A \eff E) &\defas& \FTV(A) \cup \FTV(E)\\ \FTV(\{R\}) &\defas& \FTV(R)\\ \FTV(\Record{R}) &\defas& \FTV(R)\\ \FTV([R]) &\defas& \FTV(R)\\ % \FTV(l:P;R) &\defas& \FTV(P) \cup \FTV(R)\\ % \FTV(\Pre{A}) &\defas& \FTV(A)\\ % \FTV(\Abs) &\defas& \emptyset\\ % \FTV(\theta) &\defas& \{\theta\} \end{eqs}\ea & & \begin{eqs} % \FTV([R]) &\defas& \FTV(R)\\ % \FTV(\Record{R}) &\defas& \FTV(R)\\ % \FTV(\{R\}) &\defas& \FTV(R)\\ % \FTV &:& \RowCat \to \TyVarCat\\ \FTV(\cdot) &\defas& \emptyset\\ \FTV(\rho) &\defas& \{\rho\}\\ \FTV(l:P;R) &\defas& \FTV(P) \cup \FTV(R)\\ % \FTV &:& \PresenceCat \to \TyVarCat\\ \FTV(\theta) &\defas& \{\theta\}\\ \FTV(\Abs) &\defas& \emptyset\\ \FTV(\Pre{A}) &\defas& \FTV(A)\\ \end{eqs}\\\\ \multicolumn{3}{c}{\begin{eqs} \FTV &:& \TyEnvCat \to \TyVarCat\\ \FTV(\cdot) &\defas& \emptyset\\ \FTV(\Gamma,x : A) &\defas& \FTV(\Gamma) \cup \FTV(A) \end{eqs}} % \begin{eqs} % \FTV(\theta) &\defas& \{\theta\}\\ % \FTV(\Abs) &\defas& \emptyset\\ % \FTV(\Pre{A}) &\defas& \FTV(A) % \end{eqs} & & % \begin{eqs} % \FTV(\cdot) &\defas& \emptyset\\ % \FTV(\Gamma,x : A) &\defas& \FTV(\Gamma) \cup \FTV(A) % \end{eqs} \ea \] % \paragraph{Type substitution} We define a type substitution map, $\sigma : (\TyVarCat \times \TypeCat)^\ast$ as list of pairs mapping a type variable to its replacement. We denote a single mapping as $T/\alpha$ meaning substitute $T$ for the variable $\alpha$. We write multiple mappings using list notation, i.e. $[T_0/\alpha_0,\dots,T_n/\alpha_n]$. The domain of a substitution map is set generated by projecting the first component, i.e. % \[ \bl \dom : (\TyVarCat \times \TypeCat)^\ast \to \TyVarCat\\ \dom(\sigma) \defas \{ \alpha \mid (\_/\alpha) \in \sigma \} \el \] % The application of a type substitution map on a type term, written $T\sigma$ for some type $T$, is defined inductively on the type structure as follows. % \[ \ba[t]{@{~}l@{~}c@{~}r} \multicolumn{3}{c}{ \begin{eqs} (A \eff E)\sigma &\defas& A\sigma \eff E\sigma\\ (A \to C)\sigma &\defas& A\sigma \to C\sigma\\ (\forall \alpha^K.C)\sigma &\defas& \forall \alpha^K.C\sigma \quad \text{convert if } \alpha \in \dom(\sigma)\\ \alpha\sigma &\defas& \begin{cases} A & \text{if } (\alpha,A) \in \sigma\\ \alpha & \text{otherwise} \end{cases} \end{eqs}}\\ \begin{eqs} \Record{R}\sigma &\defas& \Record{R[B/\beta]}\\ {[R]}\sigma &\defas& [R\sigma]\\ \{R\}\sigma &\defas& \{R\sigma\}\\ \cdot\sigma &\defas& \cdot\\ \rho\sigma &\defas& \begin{cases} R & \text{if } (\rho, R) \in \sigma\\ \rho & \text{otherwise} \end{cases}\\ \end{eqs} & ~~~~~~~~~~ & \begin{eqs} (\ell : P;R)\sigma &\defas& (\ell : P\sigma; R\sigma)\\ \theta\sigma &\defas& \begin{cases} P & \text{if } (\theta,P) \in \sigma\\ \theta & \text{otherwise} \end{cases}\\ \Abs\sigma &\defas& \Abs\\ \Pre{A}\sigma &\defas& \Pre{A\sigma} \end{eqs} \ea \] % \paragraph{Types and their inhabitants} We now have the basic vocabulary to construct types in $\BCalc$. For instance, the signature of the standard polymorphic identity function is % \[ \forall \alpha^\Type. \alpha \to \alpha \eff \emptyset. \] % Modulo the empty effect signature, this type is akin to the type one would give for the identity function in System F~\cite{Girard72,Reynolds74}, and thus we can use standard techniques from parametricity~\cite{Wadler89} to reason about inhabitants of this signature. However, in our system we can give an even more general type to the identity function: % \[ \forall \alpha^\Type,\varepsilon^{\Row_\emptyset}. \alpha \to \alpha \eff \{\varepsilon\}. \] % This type is polymorphic in its effect signature as signified by the singleton open effect row $\{\varepsilon\}$, meaning it may be used in an effectful context. By contrast, the former type may only be used in a strictly pure context, i.e. the effect-free context. % \dhil{Maybe say something about reasoning effect types} % We can use the effect system to give precise types to effectful computations. For example, we can give the signature of some polymorphic computation that may only be run in a read-only context % \[ \forall \alpha^\Type, \varepsilon^{\Row_{\{\Read,\Write\}}}. \alpha \eff \{\Read:\Int,\Write:\Abs,\varepsilon\}. \] % The effect row comprise a nullary $\Read$ operation returning some integer and an absent operation $\Write$. The absence of $\Write$ means that the computation cannot run in a context that admits a present $\Write$. It can, however, run in a context that admits a presence polymorphic $\Write : \theta$ as the presence variable $\theta$ may instantiated to $\Abs$. An inhabitant of this type may be run in larger effect contexts, i.e. contexts that admit more operations, because the row ends in an effect variable. % The type and effect system is also precise about how a higher-order function may use its function arguments. For example consider the signature of a map-operation over some datatype such as $\dec{Option}~\alpha^\Type \defas [\dec{None};\dec{Some}:\alpha]$ % \[ \forall \alpha^\Type,\beta^\Type,\varepsilon^{\Row_\emptyset}. \Record{\alpha \to \beta \eff \{\varepsilon\}, \dec{Option}~\alpha} \to \dec{Option}~\beta \eff \{\varepsilon\}. \] % % The $\dec{map}$ function for % lists is a canonical example of a higher-order function which is % parametric in its own effects and the effects of its function % argument. Supposing $\BCalc$ have some polymorphic list datatype % $\List$, then we would be able to ascribe the following signature to % $\dec{map}$ % % % \[ % \forall \alpha^\Type,\beta^\Type,\varepsilon^{\Row_\emptyset}. \Record{\alpha \to \beta \eff \{\varepsilon\},\List~\alpha} \to \List~\beta \eff \{\varepsilon\}. % \] % The first argument is the function that will be applied to the data carried by second argument. Note that the two effect rows are identical and share the same effect variable $\varepsilon$, it is thus evident that an inhabitant of this type can only perform whatever effects its first argument is allowed to perform. Higher-order functions may also transform their function arguments, e.g. modify their effect rows. The following is the signature of a higher-order function which restricts its argument's effect context % \[ \forall \alpha^\Type, \varepsilon^{\Row_{\{\Read\}}},\varepsilon'^{\Row_\emptyset}. (\UnitType \to \alpha \eff \{\Read:\Int,\varepsilon\}) \to (\UnitType \to \alpha \eff \{\Read:\Abs,\varepsilon\}) \eff \{\varepsilon'\}. \] % The function argument is allowed to perform a $\Read$ operation, whilst the returned function cannot. Moreover, the two functions share the same effect variable $\varepsilon$. Like the option-map signature above, an inhabitant of this type performs no effects of its own as the (right-most) effect row is a singleton row containing a distinct effect variable $\varepsilon'$. \paragraph{Syntactic sugar} Detail the syntactic sugar\dots \subsection{Terms} \label{sec:base-language-terms} % \begin{figure} \begin{syntax} \slab{Variables} &x \in \VarCat&&\\ \slab{Values} &V,W \in \ValCat &::= & x \mid \lambda x^A .\, M \mid \Lambda \alpha^K .\, M \mid \Record{} \mid \Record{\ell = V;W} \mid (\ell~V)^R \\ & & &\\ \slab{Computations} &M,N \in \CompCat &::= & V\,W \mid V\,T\\ & &\mid& \Let\; \Record{\ell=x;y} = V \; \In \; N\\ & &\mid& \Case\; V \{\ell~x \mapsto M; y \mapsto N\} \mid \Absurd^C~V\\ & &\mid& \Return~V \mid \Let \; x \revto M \; \In \; N\\ \slab{Terms} &t \in \TermCat &::= & x \mid V \mid M \end{syntax} \caption{Term syntax of \BCalc{}.} \label{fig:base-language-term-syntax} \end{figure} % The syntax for terms is given in Figure~\ref{fig:base-language-term-syntax}. We assume a countably infinite set of names $\VarCat$ from which we draw fresh variable names. We shall typically denote term variables by $x$, $y$, or $z$. % The syntax partitions terms into values and computations. % Value terms comprise variables ($x$), lambda abstraction ($\lambda x^A . \, M$), type abstraction ($\Lambda \alpha^K . \, M$), and the introduction forms for records and variants. Records are introduced using the empty record $(\Record{})$ and record extension $(\Record{\ell = V; W})$, whilst variants are introduced using injection $((\ell~V)^R)$, which injects a field with label $\ell$ and value $V$ into a row whose type is $R$. We include the row type annotation in to support bottom-up type reconstruction. All elimination forms are computation terms. Abstraction and type abstraction are eliminated using application ($V\,W$) and type application ($V\,A$) respectively. % The record eliminator $(\Let \; \Record{\ell=x;y} = V \; \In \; N)$ splits a record $V$ into $x$, the value associated with $\ell$, and $y$, the rest of the record. Non-empty variants are eliminated using the case construct ($\Case\; V\; \{\ell~x \mapsto M; y \mapsto N\}$), which evaluates the computation $M$ if the tag of $V$ matches $\ell$. Otherwise it falls through to $y$ and evaluates $N$. The elimination form for empty variants is ($\Absurd^C~V$). % There is one computation introduction form, namely, the trivial computation $(\Return~V)$ which returns value $V$. Its elimination form is the expression $(\Let \; x \revto M \; \In \; N)$ which evaluates $M$ and binds the result value to $x$ in $N$. % % As our calculus is intrinsically typed, we annotate terms with type or kind information (term abstraction, type abstraction, injection, operations, and empty cases). However, we shall omit these annotations whenever they are clear from context. \paragraph{Free variables} A given term is said to be \emph{closed} if every applied occurrence of a variable is preceded by some corresponding binding occurrence. Any applied occurrence of a variable that is not preceded by a binding occurrence is said be \emph{free variable}. We define the function $\FV : \TermCat \to \VarCat$ inductively on the term structure to compute the free variables of any given term. % \[ \bl \ba[t]{@{~}l@{~}c@{~}l} \begin{eqs} \FV(x) &\defas& \{x\}\\ \FV(\lambda x^A.M) &\defas& \FV(M) \setminus \{x\}\\ \FV(\Lambda \alpha^K.M) &\defas& \FV(M)\\[1.0ex] \FV(V\,W) &\defas& \FV(V) \cup \FV(W)\\ \FV(\Return~V) &\defas& \FV(V)\\ \end{eqs} & \qquad\qquad & \begin{eqs} \FV(\Record{}) &\defas& \emptyset\\ \FV(\Record{\ell = V; W}) &\defas& \FV(V) \cup \FV(W)\\ \FV((\ell~V)^R) &\defas& \FV(V)\\[1.0ex] \FV(V\,T) &\defas& \FV(V)\\ \FV(\Absurd^C~V) &\defas& \FV(V)\\ \end{eqs} \ea\\ \begin{eqs} \FV(\Let\;x \revto M \;\In\;N) &\defas& \FV(M) \cup (\FV(N) \setminus \{x\})\\ \FV(\Let\;\Record{\ell=x;y} = V\;\In\;N) &\defas& \FV(V) \cup (\FV(N) \setminus \{x, y\})\\ \FV(\Case~V~\{\ell\,x \mapsto M; y \mapsto N\} &\defas& \FV(V) \cup (\FV(M) \setminus \{x\}) \cup (\FV(N) \setminus \{y\}) \end{eqs} \el \] % The function computes the set of free variables bottom-up. Most cases are homomorphic on the syntax constructors. The interesting cases are those constructs which feature term binders: lambda abstraction, let bindings, pair destructing, and case splitting. In each of those cases we subtract the relevant binder(s) from the set of free variables. \subsection{Typing rules} \label{sec:base-language-type-rules} % \begin{figure} Values \begin{mathpar} % Variable \inferrule*[Lab=\tylab{Var}] {x : A \in \Gamma} {\typv{\Delta;\Gamma}{x : A}} % Abstraction \inferrule*[Lab=\tylab{Lam}] {\typ{\Delta;\Gamma, x : A}{M : C}} {\typv{\Delta;\Gamma}{\lambda x^A .\, M : A \to C}} % Polymorphic abstraction \inferrule*[Lab=\tylab{PolyLam}] {\typv{\Delta,\alpha : K;\Gamma}{M : C} \\ \alpha \notin \FTV(\Gamma) } {\typv{\Delta;\Gamma}{\Lambda \alpha^K .\, M : \forall \alpha^K . \,C}} \\ % unit : () \inferrule*[Lab=\tylab{Unit}] { } {\typv{\Delta;\Gamma}{\Record{} : \UnitType}} % Extension \inferrule*[Lab=\tylab{Extend}] { \typv{\Delta;\Gamma}{V : A} \\ \typv{\Delta;\Gamma}{W : \Record{\ell:\Abs;R}} } {\typv{\Delta;\Gamma}{\Record{\ell=V;W} : \Record{\ell:\Pre{A};R}}} % Inject \inferrule*[Lab=\tylab{Inject}] {\typv{\Delta;\Gamma}{V : A}} {\typv{\Delta;\Gamma}{(\ell~V)^R : [\ell : \Pre{A}; R]}} \end{mathpar} Computations \begin{mathpar} % Application \inferrule*[Lab=\tylab{App}] {\typv{\Delta;\Gamma}{V : A \to C} \\ \typv{\Delta;\Gamma}{W : A} } {\typ{\Delta;\Gamma}{V\,W : C}} % Polymorphic application \inferrule*[Lab=\tylab{PolyApp}] {\typv{\Delta;\Gamma}{V : \forall \alpha^K . \, C} \\ \Delta \vdash A : K } {\typ{\Delta;\Gamma}{V\,A : C[A/\alpha]}} % Split \inferrule*[Lab=\tylab{Split}] {\typv{\Delta;\Gamma}{V : \Record{\ell : \Pre{A};R}} \\\\ \typ{\Delta;\Gamma, x : A, y : \Record{\ell : \Abs; R}}{N : C} } {\typ{\Delta;\Gamma}{\Let \; \Record{\ell =x;y} = V\; \In \; N : C}} % Case \inferrule*[Lab=\tylab{Case}] { \typv{\Delta;\Gamma}{V : [\ell : \Pre{A};R]} \\\\ \typ{\Delta;\Gamma,x:A}{M : C} \\\\ \typ{\Delta;\Gamma,y:[\ell : \Abs;R]}{N : C} } {\typ{\Delta;\Gamma}{\Case \; V \{\ell\; x \mapsto M;y \mapsto N \} : C}} % Absurd \inferrule*[Lab=\tylab{Absurd}] {\typv{\Delta;\Gamma}{V : []}} {\typ{\Delta;\Gamma}{\Absurd^C \; V : C}} % Return \inferrule*[Lab=\tylab{Return}] {\typv{\Delta;\Gamma}{V : A}} {\typc{\Delta;\Gamma}{\Return \; V : A}{E}} \\ % Let \inferrule*[Lab=\tylab{Let}] {\typc{\Delta;\Gamma}{M : A}{E} \\ \typ{\Delta;\Gamma, x : A}{N : C} } {\typ{\Delta;\Gamma}{\Let \; x \revto M\; \In \; N : C}} \end{mathpar} \caption{Typing rules} \label{fig:base-language-type-rules} \end{figure} % Thus the rule states that a type abstraction $(\Lambda \alpha. M)$ has type $\forall \alpha.C$ if the computation $M$ has type $C$ assuming $\alpha : K$ and $\alpha$ does not appear in the free type variables of current type environment $\Gamma$. The \tylab{Unit} rule provides the basis for all records as it simply states that the empty record has type unit. The \tylab{Extend} rule handles record extension. Supposing we wish to extend some record $\Record{W}$ with $\ell = V$, that is $\Record{\ell = V; W}$. This extension has type $\Record{\ell : \Pre{A};R}$ if and only if $V$ is well-typed and we can ascribe $W : \Record{\ell : \Abs; R}$. Since $\Record{\ell : \Abs; R}$ must be well-kinded with respect to $\Delta$, the label $\ell$ cannot be mentioned in $W$, thus $\ell$ cannot occur more than once in the record. Similarly, the dual rule \tylab{Inject} states that the injection $(\ell~V)^R$ has type $[\ell : \Pre{A}; R]$ if the payload $V$ is well-typed. The implicit well-kindedness condition on $R$ ensures that $\ell$ cannot be injected twice. As an example consider the following ill-typed injection into a singleton variant type % \[ (\dec{S}~\Unit)^{\dec{S}:\UnitType} : [\dec{S}:\UnitType;\dec{S}:\UnitType]. \] % Typing fails because the resulting row type is ill-kinded by the \klab{ExtendRow}-rule: \begin{mathpar} \inferrule*[] {\inferrule*[] {\vdash \Pre{\UnitType} : \Presence \\ \inferrule*[] {\vdash \Pre{\UnitType} : \Presence \\ \vdash \cdot : \Row_{\color{red}{\{\dec{S}\} \uplus \{\dec{S}\}}}} {\vdash \dec{S}:\Pre{\UnitType};\cdot : \Row_{\emptyset \uplus \{\dec{S}\}}}} % {\inferrule[] % {\vdash \UnitType : \Type} % {\vdash \Pre{\UnitType} : \Presence} % \\ % \inferrule*[] % {\inferrule*[] % {\vdash \UnitType : \Type} % {\vdash \Pre{\UnitType} : \Presence} % \\ % \inferrule*[] % {} % {\vdash \dec{None} : \Pre{\UnitType} : \Row_{\{\dec{Some}\} \uplus \{\dec{Some}\}}} % } % {\vdash \dec{Some}:\Pre{\UnitType};\dec{None}:\Pre{\UnitType} : \Row_{{\emptyset \uplus \{\dec{Some}\}}}} % } {\vdash \dec{S}:\Pre{\UnitType};\dec{S}:\Pre{\UnitType};\cdot : \Row_{\emptyset}} } {\vdash [\dec{S}:\Pre{\UnitType};\dec{S}:\Pre{\UnitType};\cdot] : \Type} \end{mathpar} % The two sets $\{\dec{S}\}$ and $\{\dec{S}\}$ are clearly not disjoint, thus the second premise of the last application of \klab{ExtendRow} cannot be satisfied. \paragraph{Typing computations} The \tylab{App} rule states that an application $V\,W$ has computation type $C$ if the abstractor term $V$ has type $A \to C$ and the argument term $W$ has type $A$, that is both the argument type and the domain type of the abstractor agree. % The type application rule \tylab{PolyApp} tells us that a type application $V\,A$ is well-typed whenever the abstractor term $V$ has the polymorphic type $\forall \alpha^K.C$ and the type $A$ has kind $K$. This rule makes use of type substitution. % The \tylab{Split} rule handles typing of record destructing. When splitting a record term $V$ on some label $\ell$ binding it to $x$ and the remainder to $y$. The label we wish to split on must be present with some type $A$, hence we require that $V : \Record{\ell : \Pre{A}; R}$. This restriction prohibits us for splitting on an absent or presence polymorphic label. The continuation of the splitting, $N$, must then have some computation type $C$ subject to the following restriction: $N : C$ must be well-typed under the additional assumptions $x : A$ and $y : \Record{\ell : \Abs; R}$, statically ensuring that it is not possible to split on $\ell$ again in the continuation $N$. % The \tylab{Case} rule is similar, but has two possible continuations: the success continuation, $M$, and the fall-through continuation, $N$. The label being matched must be present with some type $A$ in the type of the scrutinee, thus we require $V : [\ell : \Pre{A};R]$. The success continuation has some computation $C$ under the assumption that the binder $x : A$, whilst the fall-through continuation also has type $C$ it is subject to the restriction that the binder $y : [\ell : \Abs;R]$ which statically enforces that no subsequent case split in $N$ can match on $\ell$. % The \tylab{Absurd} states that we can ascribe any computation type to the term $\Absurd~V$ if $V$ has the empty type $[]$. % The trivial computation term is typed by the \tylab{Return} rule, which says that $\Return\;V$ has computation type $A \eff E$ if the value $V$ has type $A$. % The \tylab{Let} rule types let bindings. The computation being bound, $M$, must have computation type $A \eff E$, whilst the continuation, $N$, must have computation $C$ subject to the additional assumption that the binder $x : A$. \section{Dynamic semantics} \label{sec:base-language-dynamic-semantics} % \begin{figure} \begin{reductions} \semlab{App} & (\lambda x^A . \, M) V &\reducesto& M[V/x] \\ \semlab{TyApp} & (\Lambda \alpha^K . \, M) A &\reducesto& M[A/\alpha] \\ \semlab{Split} & \Let \; \Record{\ell = x;y} = \Record{\ell = V;W} \; \In \; N &\reducesto& N[V/x,W/y] \\ \semlab{Case_1} & \Case \; (\ell~V)^R \{ \ell \; x \mapsto M; y \mapsto N\} &\reducesto& M[V/x] \\ \semlab{Case_2} & \Case \; (\ell~V)^R \{ \ell' \; x \mapsto M; y \mapsto N\} &\reducesto& N[(\ell~V)^R/y], \hfill\quad \text{if } \ell \neq \ell' \\ \semlab{Let} & \Let \; x \revto \Return \; V \; \In \; N &\reducesto& N[V/x] \\ \semlab{Lift} & \EC[M] &\reducesto& \EC[N], \hfill\quad \text{if } M \reducesto N \\ \end{reductions} \begin{syntax} \slab{Evaluation contexts} & \mathcal{E} \in \EvalCat &::=& [\,] \mid \Let \; x \revto \mathcal{E} \; \In \; N \end{syntax} %%\[ % Evaluation context lift %% \inferrule*[Lab=\semlab{Lift}] %% { M \reducesto N } %% { \mathcal{E}[M] \reducesto \mathcal{E}[N]} %% \] \caption{Contextual small-step semantics} \label{fig:base-language-small-step} \end{figure} % In this section we present the dynamic semantics of \BCalc{}. We choose to use a contextual small-step semantics, since in conjunction with fine-grain call-by-value, it yields a considerably simpler semantics than the traditional structural operational semantics (SOS)~\cite{Plotkin04a}, because only the rule for let bindings admits a continuation whereas in ordinary call-by-value SOS each congruence rule admits a continuation. % The semantics are based on a substitution model of computation. Thus, before presenting the reduction rules, we define an adequate substitution function. As usual we work up to $\alpha$-conversion~\cite{Church32} of terms in $\BCalc{}$. % \paragraph{Term substitution} We write $M[V/x]$ for the substitution of some value $V$ for some variable $x$ in some computation term $M$. We use the same notation for substitution on values, i.e. $W[V/x]$ denotes the substitution of $V$ for $x$ in some value $W$. We define substitution as a ternary function, whose signature is given by % \[ -[-/-] : \TermCat \times \ValCat \times \VarCat \to \CompCat, \] % and we realise it by pattern matching on the first argument. % \[ \begin{eqs} x[V/y] &\defas& \begin{cases} V & \text{if } x = y\\ x & \text{otherwise } \end{cases}\\ (\lambda x^A.M)[V/y] &\defas& \lambda x^A.M[V/y] \quad \text{if } x \neq y \text{ and } x \notin \FV(V)\\ (\Lambda \alpha^K. M)[V/y] &\defas& \Lambda \alpha^K. M[V/y]\\ \Unit[V/y] &\defas& \Unit\\ \Record{\ell = W; W'}[V/y] &\defas& \Record{\ell = W[V/y]; W'[V/y]}\\ (\ell~W)^R[V/y] &\defas& (\ell~W[V/y])^R\\ (W\,W')[V/y] &\defas& W[V/y]\,W'[V/y]\\ (W\,T)[V/y] &\defas& W[V/y]~T\\ (\ba[t]{@{}l} \Let\;\Record{\ell = x; y} = W\\ \In\;N)[V/z] \ea &\defas& \ba[t]{@{~}l} \Let\;\Record{\ell = x; y} = W[V/z]\\ \In\;N[V/z] \ea \quad \ba[t]{@{}l} \text{if } x \neq z, y \neq z,\\ \text{and } x,y\notin \FV(V) \ea\\ (\Case\;(\ell~W)^R\{ \ba[t]{@{}l} \ell~x \mapsto M\\ ; y \mapsto N \})[V/z] \ea &\defas& \Case\;(\ell~W[V/z])^R\{ \ba[t]{@{}l} \ell~x \mapsto M[V/z]\\ ; y \mapsto N[V/z] \} \ea\quad \ba[t]{@{}l} \text{if } x \neq z, y \neq z,\\ \text{and } x, y \notin \FV(V) \ea\\ (\Let\;x \revto M \;\In\;N)[V/y] &\defas& \Let\;x \revto M[V/y] \;\In\;N[V/y] \quad\text{if } x \neq y \text{ and } x \notin \FV(V) \end{eqs} \] % % \begin{cases} % \Let\;x \revto M[V/y]\;\In\;N & \text{if } x = y\\ % \Let\;x \revto M[V/y]\;\In\;N[V/y] & \text{otherwise} % \end{cases} % \end{eqs} % We write $t[t_0/x_0,\dots,t_n/x_n]$ to mean the simultaneous substitution of $t_0$ for $x_0$ up to $t_n$ for $x_n$ in $t$. % \paragraph{Reduction semantics} The reduction relation $\reducesto : \CompCat \pto \CompCat$ is defined on computation terms. Figure~\ref{fig:base-language-small-step} depicts the reduction rules. The application rules \semlab{App} and \semlab{TyApp} eliminates a lambda and type abstraction, respectively, by substituting the argument for the parameter in their body computation $M$. % Record splitting is handled by the \semlab{Split} rule: splitting on some label $\ell$ binds the payload $V$ to $x$ and the remainder $W$ to $y$ in the continuation $N$. % Disjunctive case splitting is handled by the two rules \semlab{Case_1} and \semlab{Case_2}. The former rule handles the success case, when the scrutinee's tag $\ell$ matches the tag of the success clause, thus binds the payload $V$ to $x$ and proceeds to evaluate the continuation $M$. The latter rule handles the fall-through case, here the scrutinee gets bounds to $y$ and evaluation proceeds with the continuation $N$. % The \semlab{Let} rule eliminates a trivial computation term $\Return\;V$ by substituting $V$ for $x$ in the continuation $N$. % \paragraph{Evaluation contexts} Recall from Section~\ref{sec:base-language-terms}, Figure~\ref{fig:base-language-term-syntax} that the syntax of let bindings allows a general computation term $M$ to occur on the right hand side of the binding, i.e. $\Let\;x \revto M \;\In\;N$. Thus we are seemingly stuck in the general case, as the \semlab{Let} rule only applies if the right hand side is a trivial computation. % However, it is at this stage we make use of the notion of \emph{evaluation contexts} due to \citet{Felleisen87}. An evaluation context is syntactic construction which decompose the dynamic semantics into a set of base rules (i.e. the rules presented thus far) and an inductive rule, which enables us to focus on a particular computation term, $M$, in some larger context, $\EC$, and reduce it in the said context to another computation $N$ if $M$ reduces outside out the context to that particular $N$. In our formalism, we call this rule \semlab{Lift}. Evaluation contexts are generated from the empty context $[~]$ and let expressions $\Let\;x \revto \EC \;\In\;N$. For now the choices of using fine-grain call-by-value and evaluation contexts may seem odd, if not arbitrary at this stage; the reader may wonder with good reason why we elect to use fine-grain call-by-value over ordinary call-by-value. In Chapter~\ref{ch:unary-handlers} we will reap the benefits from our design choices, as we shall see that the combination of fine-grain call-by-value and evaluation contexts provide the basis for a convenient, simple semantic framework for working with continuations. \section{Metatheoretic properties of \BCalc{}} \label{sec:base-language-metatheory} Thus far we have defined the syntax, static semantics, and dynamic semantics of \BCalc{}. In this section, we finish the definition of \BCalc{} by stating and proving some standard metatheoretic properties about the language. % We begin by showing that substitutions preserve typability. % \begin{lemma}[Preservation of typing under substitution]\label{lem:base-language-subst} Let $\sigma$ be any type substitution and $V \in \ValCat$ be any value and $M \in \CompCat$ a computation such that $\typ{\Delta;\Gamma}{V : A}$ and $\typ{\Delta;\Gamma}{M : C}$, then $\typ{\Delta;\sigma~\Gamma}{\sigma~V : \sigma~A}$ and $\typ{\Delta;\sigma~\Gamma}{\sigma~M : \sigma~C}$. \end{lemma} % \begin{proof} By induction on the typing derivations. \end{proof} % \dhil{It is clear to me at this point, that I want to coalesce the substitution functions. Possibly define them as maps rather than ordinary functions.} The reduction semantics satisfy a \emph{unique decomposition} property, which guarantees the existence and uniqueness of complete decomposition for arbitrary computation terms into evaluation contexts. % \begin{lemma}[Unique decomposition]\label{lem:base-language-uniq-decomp} For any computation $M \in \CompCat$ it holds that $M$ is either stuck or there exists a unique evaluation context $\EC \in \EvalCat$ and a redex $N \in \CompCat$ such that $M = \EC[N]$. \end{lemma} % \begin{proof} By structural induction on $M$. \begin{description} \item[Base step] $M = N$ where $N$ is either $\Return\;V$, $\Absurd^C\;V$, $V\,W$, or $V\,T$. In either case take $\EC = [\,]$ such that $M = \EC[N]$. \item[Inductive step] % There are several cases to consider. In each case we must find an evaluation context $\EC$ and a computation term $M'$ such that $M = \EC[M']$. \begin{itemize} \item[Case] $M = \Let\;\Record{\ell = x; y} = V\;\In\;N$: Take $\EC = [\,]$ such that $M = \EC[\Let\;\Record{\ell = x; y} = V\;\In\;N]$. \item[Case] $M = \Case\;V\,\{\ell\,x \mapsto M'; y \mapsto N\}$: Take $\EC = [\,]$ such that $M = \EC[\Case\;V\,\{\ell\,x \mapsto M'; y \mapsto N\}]$. \item[Case] $M = \Let\;x \revto M' \;\In\;N$: By the induction hypothesis it follows that $M'$ is either stuck or it decomposes (uniquely) into an evaluation context $\EC'$ and a redex $N'$. If $M$ is stuck, then take $\EC = \Let\;x \revto [\,] \;\In\;N$ such that $M = \EC[M']$. Otherwise take $\EC = \Let\;x \revto \EC'\;\In\;N$ such that $M = \EC[N']$. \end{itemize} \end{description} \end{proof} % The calculus enjoys a rather strong \emph{progress} property, which states that \emph{every} closed computation term reduces to a trivial computation term $\Return\;V$ for some value $V$. In other words, any realisable function in \BCalc{} is effect-free and total. % \begin{definition}[Computation normal form]\label{def:base-language-comp-normal} A computation $M \in \CompCat$ is said to be \emph{normal} if it is of the form $\Return\; V$ for some value $V \in \ValCat$. \end{definition} % \begin{theorem}[Progress]\label{thm:base-language-progress} Suppose $\typ{}{M : C}$, then there exists $\typ{}{N : C}$, such that $M \reducesto^\ast N$ and $N$ is normal. \end{theorem} % \begin{proof} Proof by induction on typing derivations. \end{proof} % \begin{corollary} Every closed computation term in \BCalc{} is terminating. \end{corollary} % The calculus also satisfies the \emph{preservation} property, which states that if some computation $M$ is well-typed and reduces to some other computation $M'$, then $M'$ is also well-typed. % \begin{theorem}[Preservation]\label{thm:base-language-preservation} Suppose $\typ{\Gamma}{M : C}$ and $M \reducesto M'$, then $\typ{\Gamma}{M' : C}$. \end{theorem} % \begin{proof} Proof by induction on typing derivations. \end{proof} \section{A primitive effect: recursion} \label{sec:base-language-recursion} % As evident from Theorem~\ref{thm:base-language-progress} \BCalc{} admit no computational effects. As a consequence every realisable program is terminating. Thus the calculus provide a solid and minimal basis for studying the expressiveness of any extension, and in particular, which primitive effects any such extension may sneak into the calculus. However, we cannot write many (if any) interesting programs in \BCalc{}. The calculus is simply not expressive enough. In order to bring it closer to the ML-family of languages we endow the calculus with a fixpoint operator which introduces recursion as a primitive effect. We dub the resulting calculus \BCalcRec{}. % First we augment the syntactic category of values with a new abstraction form for recursive functions. % \begin{syntax} & V,W \in \ValCat &::=& \cdots \mid~ \tikzmarkin{rec1} \Rec \; f^{A \to C} \, x.M \tikzmarkend{rec1} \end{syntax} % The $\Rec$ construct binds the function name $f$ and its argument $x$ in the body $M$. Typing of recursive functions is standard and entirely straightforward. % \begin{mathpar} \inferrule*[Lab=\tylab{Rec}] {\typ{\Delta;\Gamma,f : A \to C, x : A}{M : C}} {\typ{\Delta;\Gamma}{(\Rec \; f^{A \to C} \, x . M) : A \to C}} \end{mathpar} % The reduction semantics are similarly simple. % \begin{reductions} \semlab{Rec} & (\Rec \; f^{A \to C} \, x.M)\,V &\reducesto& M[(\Rec \; f^{A \to C}\,x.M)/f, V/x] \end{reductions} % Every occurrence of $f$ in $M$ is replaced by the recursive abstractor term, while every $x$ in $M$ is replaced by the value argument $V$. The introduction of recursion means we obtain a slightly weaker progress theorem as some programs may now diverge. % \begin{theorem}[Progress] \label{thm:base-rec-language-progress} Suppose $\typ{}{M : C}$, then there exists $\typ{}{N : C}$, such that $M \reducesto^\ast N$ either diverges or $N\not\reducesto$ and $N$ is normal. \end{theorem} % \begin{proof} Similar to the proof of Theorem~\ref{thm:base-language-progress}. \end{proof} \subsection{Tracking divergence via the effect system} \label{sec:tracking-div} % With the $\Rec$-operator in \BCalcRec{} we can now implement the customary factorial function. % \[ \bl \dec{fac} : \Int \to \Int \eff \emptyset\\ \dec{fac} \defas \Rec\;f~n. \ba[t]{@{}l} \Let\;is\_zero \revto n = 0\;\In\\ \If\;is\_zero\;\Then\; \Return\;1\\ \Else\;\ba[t]{@{~}l} \Let\; n' \revto n - 1 \;\In\\ \Let\; m \revto f~n' \;\In\\ n * m \ea \ea \el \] % The $\dec{fac}$ function computes $n!$ for any non-negative integer $n$. If $n$ is negative then $\dec{fac}$ diverges as the function will repeatedly select the $\Else$-branch in the conditional expression. Thus this function is not total on its domain. Yet the effect signature does not alert us about the potential divergence. In fact, in this particular instance the effect row on the computation type is empty, which might deceive the doctrinaire to think that this function is `pure'. Whether this function is pure or impure depend on the precise notion of purity -- which we have yet to choose. In Section~\ref{sec:notions-of-purity} we shall make clear the notion of purity that we have mind, however, first let us briefly illustrate how we might utilise the effect system to track divergence. The key to track divergence is to modify the \tylab{Rec} to inject some primitive operation into the effect row. % \begin{mathpar} \inferrule*[Lab=$\tylab{Rec}^\ast$] {\typ{\Delta;\Gamma,f : A \to B\eff\{\dec{Div}:\Zero\}, x : A}{M : B\eff\{\dec{Div}:\Zero\}}} {\typ{\Delta;\Gamma}{(\Rec \; f^{A \to B\eff\{\dec{Div}:\Zero\}} \, x .M) : A \to B\eff\{\dec{Div}:\Zero\}}} \end{mathpar} % In this typing rule we have chosen to inject an operation named $\dec{Div}$ into the effect row of the computation type on the recursive binder $f$. The operation is primitive, because it can never be directly invoked, rather, it occurs as a side-effect of applying a $\Rec$-definition. % Using this typing rule we get that $\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}$. Consequently, every application-site of $\dec{fac}$ must now permit the $\dec{Div}$ operation in order to type check. % \begin{example} We will use the following suspended computation to demonstrate effect tracking in action. % \[ \bl \lambda\Unit. \dec{fac}~3 \el \] % The computation calculates $3!$ when forced. % We will now give a typing derivation for this computation to illustrate how the application of $\dec{fac}$ causes its effect row to be propagated outwards. Let $\Gamma = \{\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}\}$. % \begin{mathpar} \inferrule*[Right={\tylab{Lam}}] {\inferrule*[Right={\tylab{App}}] {\typ{\emptyset;\Gamma,\Unit:\Record{}}{\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}}\\ \typ{\emptyset;\Gamma,\Unit:\Record{}}{3 : \Int} } {\typc{\emptyset;\Gamma,\Unit:\Record{}}{\dec{fac}~3 : \Int}{\{\dec{Div}:\Zero\}}} } {\typ{\emptyset;\Gamma}{\lambda\Unit.\dec{fac}~3} : \Unit \to \Int \eff \{\dec{Div}:\Zero\}} \end{mathpar} % The information that the computation applies a possibly divergent function internally gets reflected externally in its effect signature. \end{example} % A possible inconvenience of the current formulation of $\tylab{Rec}^\ast$ is that it recursion cannot be mixed with other computational effects. The reason being that the effect row on $A \to B\eff \{\dec{Div}:\Zero\}$ is closed. Thus in a practical general-purpose programming language implementation it is likely be more convenient to leave the tail of the effect row open as to allow recursion to be used in larger effect contexts. The rule formulation is also rather coarse as it renders every $\Rec$-definition as possibly divergent -- even definitions that are obviously non-divergent such as the $\Rec$-variation of the identity function: $\Rec\;f\,x.x$. A practical implementation could utilise a static termination checker~\cite{Walther94} to obtain more fine-grained tracking of divergence. % By fairly lightweight means we can obtain a finer analysis of % $\Rec$-definitions by simply having an additional typing rule for % the application of $\Rec$. % % % \begin{mathpar} % \inferrule*[lab=$\tylab{AppRec}^\ast$] % { E' = \{\dec{Div}:\Zero\} \uplus E\\ % \typ{\Delta}{E'}\\\\ % \typ{\Delta;\Gamma}{\Rec\;f^{A \to B \eff E}\,x.M : A \to B \eff E}\\ % \typ{\Delta;\Gamma}{W : A} % } % {\typ{\Delta;\Gamma}{(\Rec\;f^{A \to B \eff E}\,x.M)\,W : B \eff E'}} % \end{mathpar} % % \subsection{Notions of purity} \label{sec:notions-of-purity} The term `pure' is heavily overloaded in the programming literature. % \dhil{In this thesis we use the Haskell notion of purity.} \section{Row polymorphism} \label{sec:row-polymorphism} \dhil{A discussion of alternative row systems} \section{Type and effect inference} \dhil{While I would like to detail the type and effect inference, it may not be worth the effort. The reason I would like to do this goes back to 2016 when Richard Eisenberg asked me about how we do effect inference in Links.} \chapter{Unary handlers} \label{ch:unary-handlers} % In this chapter we study various flavours of unary effect handlers~\cite{PlotkinP13}, that is handlers of a single computation. Concretely, we shall study four variations of effect handlers: in Section~\ref{sec:unary-deep-handlers} we augment the base calculus \BCalc{} with \emph{deep} effect handlers yielding the calculus \HCalc{}; subsequently in Sections~\ref{sec:unary-parameterised-handlers} and \ref{sec:unary-default-handlers} we refine \HCalc{} with two practical relevant kinds of handlers, namely, \emph{parameterised} and \emph{default} handlers. The former is a specialisation of a particular class of deep handlers, whilst the latter is important for programming at large. Finally in Section~\ref{sec:unary-shallow-handlers} we study \emph{shallow} effect handlers which are an alternative to deep effect handlers. % , First we endow \BCalc{} % with a syntax for performing effectful operations, yielding the % calculus \EffCalc{}. On its own the calculus is not very interesting, % however, as the sole addition of the ability to perform effectful % operations does not provide any practical note-worthy % expressiveness. However, as we augment the calculus with different % forms of effect handlers, we begin be able to implement interesting % that are either difficult or impossible to realise in \BCalc{} in % direct-style. Concretely, we shall study four variations of effect % handlers, each as a separate extension to \EffCalc{}: deep, default, % parameterised, and shallow handlers. \section{Deep handlers} \label{sec:unary-deep-handlers} % Programming with effect handlers is a dichotomy of \emph{performing} and \emph{handling} of effectful operations -- or alternatively a dichotomy of \emph{constructing} and \emph{destructing}. An operation is a constructor of an effect without a predefined semantics. A handler destructs effects by pattern-matching on their operations. By matching on a particular operation, a handler instantiates the said operation with a particular semantics of its own choosing. The key ingredient to make this work in practice is \emph{delimited control}~\cite{Landin65,Landin65a,Landin98,FelleisenF86,DanvyF90}. Performing an operation reifies the remainder of the computation up to the nearest enclosing handler of the said operation. As our starting point we take the regular base calculus, \BCalc{}, without the recursion operator. We elect to do so to understand exactly which primitive effects deep handlers bring into our resulting calculus. % Deep handlers are defined as folds (catamorphisms) over computation trees, meaning they provide a uniform semantics to the handled operations of a given computation. In contrast, shallow handlers are defined as case-splits over computation trees, and thus, allow a nonuniform semantics to be given to operations. We will discuss this last point in greater detail in Section~\ref{sec:unary-shallow-handlers}. \subsection{Performing effectful operations} \label{sec:eff-language-perform} An effectful operation is a purely syntactic construction, which has no predefined dynamic semantics. The introduction form for effectful operations is a computation term. % \begin{syntax} &M,N \in \CompCat &::=& \cdots \mid~ \tikzmarkin{do1} (\Do \; \ell~V)^E \tikzmarkend{do1} \end{syntax} % Informally, the intended behaviour of the new computation term $(\Do\; \ell~V)^E$ is that it performs some operation $\ell$ with value argument $V$. Thus the $\Do$-construct is similar to the typical exception-signalling $\keyw{throw}$ or $\keyw{raise}$ constructs found in programming languages with support for exceptions. The term is annotated with an effect row $E$, providing a handle to obtain the current effect context. We make use of this effect row during typing: % \begin{mathpar} \inferrule*[Lab=\tylab{Do}] { E = \{\ell : A \to B; R\} \\ \typ{\Delta}{E} \\ \typ{\Delta;\Gamma}{V : A} } {\typc{\Delta;\Gamma}{(\Do \; \ell \; V)^E : B}{E}} \end{mathpar} % An operation invocation is only well-typed if the effect row $E$ is well-kinded and contains the said operation with a present type; in other words, the current effect context permits the operation. The argument type $A$ must be the same as the domain of the operation. % We slightly abuse notation by using the function space arrow, $\to$, to also denote the operation space. Although, the function and operation spaces are separate entities, we may think of the operation space as a subspace of the function space in which every effect row is empty, that is every operation has a type on the form $A \to B \eff \emptyset$. The reason that the effect row is always empty is that any effects an operation might have are ultimately conferred by a handler. \dhil{Introduce notation for computation trees.} \subsection{Handling of effectful operations} % We now present the elimination form for effectful operations, namely, handlers. % First we define notation for handler kinds and types. % \begin{syntax} \slab{Kinds} &K \in \KindCat &::=& \cdots \mid~ \tikzmarkin{handlerkinds1} \Handler \tikzmarkend{handlerkinds1}\\ \slab{Handler types} &F \in \HandlerTypeCat &::=& C \Harrow D\\ \slab{Types} &T \in \TypeCat &::=& \cdots \mid~ \tikzmarkin{typeswithhandler} F \tikzmarkend{typeswithhandler} \end{syntax} % The syntactic category of kinds is augmented with the kind $\Handler$ which we will ascribe to handler types $F$. The arrow, $\Harrow$, denotes the handler space. Type structure suggests that a handler is a transformer of computations, as by the types it takes a computation of type $C$ and returns another computation of type $D$. We use the following kinding rule to check that a handler type is well-kinded. % \begin{mathpar} \inferrule*[Lab=\klab{Handler}] { \Delta \vdash C : \Comp \\ \Delta \vdash D : \Comp } {\Delta \vdash C \Harrow D : \Handler} \end{mathpar} % With the type structure in place, we present the term syntax for handlers. The addition of handlers augments the syntactic category of computations with a new computation form as well as introducing a new syntactic category of handler definitions. % \begin{syntax} \slab{Computations} &M,N \in \CompCat &::=& \cdots \mid~ \tikzmarkin{deephandlers1} \Handle \; M \; \With \; H\tikzmarkend{deephandlers1}\\[1ex] \slab{Handlers} &H \in \HandlerCat &::=& \{ \Return \; x \mapsto M \} \mid \{ \ell \; p \; r \mapsto N \} \uplus H\\ \slab{Terms} &t \in \TermCat &::=& \cdots \mid~ \tikzmarkin{handlerdefs} H \tikzmarkend{handlerdefs} \end{syntax} % The handle construct $(\Handle \; M \; \With \; H)$ is the counterpart to $\Do$. It runs computation $M$ using handler $H$. A handler $H$ consists of a return clause $\{\Return \; x \mapsto M\}$ and a possibly empty set of operation clauses $\{\ell \; p_\ell \; r_\ell \mapsto N_\ell\}_{\ell \in \mathcal{L}}$. % The return clause $\{\Return \; x \mapsto M\}$ defines how to interpret the final return value of a handled computation. The variable $x$ is bound to the final return value in the body $M$. % Each operation clause $\{\ell \; p_\ell \; r_\ell \mapsto N_\ell\}_{\ell \in \mathcal{L}}$ defines how to interpret an invocation of a particular operation $\ell$. The variables $p_\ell$ and $r_\ell$ are bound in the body $N_\ell$: $p_\ell$ binds the argument carried by the operation and $r_\ell$ binds the continuation of the invocation site of $\ell$. Given a handler $H$, we often wish to refer to the clause for a particular operation or the return clause; for these purposes we define two convenient projections on handlers in the metalanguage. \[ \ba{@{~}r@{~}c@{~}l@{~}l} \hell &\defas& \{\ell\; p\; r \mapsto N \}, &\quad \text{where } \{\ell\; p\; r \mapsto N \} \in H\\ \hret &\defas& \{\Return\; x \mapsto N \}, &\quad \text{where } \{\Return\; x \mapsto N \} \in H\\ \ea \] % The $\hell$ projection yields the singleton set consisting of the operation clause in $H$ that handles the operation $\ell$, whilst $\hret$ yields the singleton set containing the return clause of $H$. % We define the \emph{domain} of an handler as the set of operation labels it handles, i.e. % \begin{equations} \dom &:& \HandlerCat \to \LabelCat\\ \dom(\{\Return\;x \mapsto M\}) &\defas& \emptyset\\ \dom(\{\ell\; p\;r \mapsto M\} \uplus H) &\defas& \{\ell\} \cup \dom(H) \end{equations} \subsection{Static semantics} The typing of effect handlers is slightly more involved than the typing of the $\Do$-construct. % \begin{mathpar} \inferrule*[Lab=\tylab{Handle}] { \typ{\Gamma}{M : C} \\ \typ{\Gamma}{H : C \Harrow D} } {\Gamma \vdash \Handle \; M \; \With\; H : D} %\mprset{flushleft} \inferrule*[Lab=\tylab{Handler}] {{\bl C = A \eff \{(\ell_i : A_i \to B_i)_i; R\} \\ D = B \eff \{(\ell_i : P_i)_i; R\}\\ H = \{\Return\;x \mapsto M\} \uplus \{ \ell_i\;p_i\;r_i \mapsto N_i \}_i \el}\\\\ \typ{\Delta;\Gamma, x : A}{M : D}\\\\ [\typ{\Delta;\Gamma,p_i : A_i, r_i : B_i \to D}{N_i : D}]_i } {\typ{\Delta;\Gamma}{H : C \Harrow D}} \end{mathpar} % % The \tylab{Handler} rule is where most of the work happens. The effect rows on the input computation type $C$ and the output computation type $D$ must mention every operation in the domain of the handler. In the output row those operations may be either present ($\Pre{A}$), absent ($\Abs$), or polymorphic in their presence ($\theta$), whilst in the input row they must be mentioned with a present type as those types are used to type operation clauses. % In each operation clause the resumption $r_i$ must have the same return type, $D$, as its handler. In the return clause the binder $x$ has the same type, $C$, as the result of the input computation. \subsection{Dynamic semantics} We augment the operational semantics with two new reduction rules: one for handling return values and another for handling operations. %{\small{ \begin{reductions} \semlab{Ret} & \Handle \; (\Return \; V) \; \With \; H &\reducesto& N[V/x], \hfill\text{where } \hret = \{ \Return \; x \mapsto N \} \\ \semlab{Op} & \Handle \; \EC[\Do \; \ell \, V] \; \With \; H &\reducesto& N[V/p, \lambda y . \, \Handle \; \EC[\Return \; y] \; \With \; H/r], \\ \multicolumn{4}{@{}r@{}}{ \hfill\ba[t]{@{~}r@{~}l} \text{where}& \hell = \{ \ell \; p \; r \mapsto N \}\\ \text{and} & \ell \notin \BL(\EC) \ea } \end{reductions}%}}% % The rule \semlab{Ret} invokes the return clause of the current handler $H$ and substitutes $V$ for $x$ in the body $N$. % The rule \semlab{Op} handles an operation $\ell$, provided that the handler definition $H$ contains a corresponding operation clause for $\ell$ and that $\ell$ does not appear in the \emph{bound labels} ($\BL$) of the inner context $\EC$. The bound label condition enforces that an operation is always handled by the nearest enclosing suitable handler. % Formally, we define the notion of bound labels, $\BL : \EvalCat \to \LabelCat$, inductively over the structure of evaluation contexts. % \begin{equations} \BL([~]) &=& \emptyset \\ \BL(\Let\;x \revto \EC\;\In\;N) &=& \BL(\EC) \\ \BL(\Handle\;\EC\;\With\;H) &=& \BL(\EC) \cup \dom(H) \\ \end{equations} % To illustrate the necessity of this condition consider the following example with two nested handlers which both handle the same operation $\ell$. % \[ \bl \ba{@{~}r@{~}c@{~}l} H_{\mathsf{inner}} &\defas& \{\ell\;p\;r \mapsto r~42; \Return\;x \mapsto \Return~x\}\\ H_{\mathsf{outer}} &\defas& \{\ell\;p\;r \mapsto r~0;\Return\;x \mapsto \Return~x \} \ea\medskip\\ \Handle \; \left(\Handle\; \Do\;\ell~\Record{}\;\With\; H_{\mathsf{inner}}\right)\; \With\; H_{\mathsf{outer}} \reducesto^+ \begin{cases} \Return\;42 & \text{Innermost}\\ \Return\;0 & \text{Outermost} \end{cases} \el \] % Without the bound label condition there are two possible results as the choice of which handler to pick for $\ell$ is ambiguous, meaning reduction would be nondeterministic. Conversely, with the bound label condition we obtain that the above term reduces to $\Return\;42$, because $\ell$ is bound in the computation term of the outermost $\Handle$. % We have made a conscious design decision by always selecting nearest enclosing suitable handler for any operation. In fact, we have made the \emph{only} natural and sensible choice as picking any other handler than the nearest enclosing renders programming with effect handlers anti-modular. Consider always selecting the outermost suitable handler, then the meaning of closed program composition cannot be derived from its immediate constituents. For example, consider using integer addition as the composition operator to compose the inner handle expression from above with a copy of itself. % \[ \bl \dec{fortytwo} \defas \Handle\;\Do\;\ell~\Unit\;\With\;H_{\mathsf{inner}} \medskip\\ \EC[\dec{fortytwo} + \dec{fortytwo}] \reducesto^+ \begin{cases} \Return\; 84 & \text{when $\EC$ is empty}\\ ?? & \text{otherwise} \end{cases} \el \] % Clearly, if the ambient context $\EC$ is empty, then we can derive the result by reasoning locally about each constituent separately and subsequently add their results together to obtain the computation term $\Return\;84$. However, if the ambient context is nonempty, then we need to account for the possibility that some handler for $\ell$ could occur in the context. For instance if $\EC = \Handle\;[~]\;\With\;H_{\mathsf{outer}}$ then the result would be $\Return\;0$, which we cannot derive locally from looking at the constituents. Thus we argue that if we want programming to remain modular and compositional, then we must necessarily always select the nearest enclosing suitable handler to handle an operation invocation. % \dhil{Effect forwarding (the first condition)} The metatheoretic properties of $\BCalc$ transfer to $\HCalc$ with little extra effort, although we must amend the definition of computation normal forms as there are now two ways in which a computation term can terminate: successfully returning a value or getting stuck on an unhandled operation. % \begin{definition}[Computation normal forms] We say that a computation term $N$ is normal with respect to an effect signature $E$, if $N$ is either of the form $\Return\;V$, or $\EC[\Do\;\ell\,W]$ where $\ell \in E$ and $\ell \notin \BL(\EC)$. \end{definition} % \begin{theorem}[Progress] Suppose $\typ{}{M : C}$, then either there exists $\typ{}{N : C}$ such that $M \reducesto^+ N$ and $N$ is normal, or $M$ diverges. \end{theorem} % \begin{theorem}[Preservation] Suppose $\typ{\Gamma}{M : C}$ and $M \reducesto M'$, then $\typ{\Gamma}{M' : C}$. \end{theorem} \subsection{Coding nontermination} \subsection{Programming with deep handlers} \dhil{Exceptions} \dhil{Reader} \dhil{State} \dhil{Nondeterminism} \section{Parameterised handlers} \label{sec:unary-parameterised-handlers} \dhil{Example: Lightweight threads} \section{Default handlers} \label{sec:unary-default-handlers} \section{Shallow handlers} \label{sec:unary-shallow-handlers} \subsection{Syntax and static semantics} \subsection{Dynamic semantics} \chapter{N-ary handlers} \label{ch:multi-handlers} % \section{Syntax and Static Semantics} % \section{Dynamic Semantics} \section{Unifying deep and shallow handlers} \part{Implementation} \chapter{Continuation passing styles} \chapter{Abstract machine semantics} \part{Expressiveness} \chapter{Computability, complexity, and expressivness} \label{ch:expressiveness} \section{Notions of expressiveness} Felleisen's macro-expressiveness, Longley's type-respecting expressiveness, Kammar's typability-preserving expressiveness. \section{Interdefinability of deep and shallow Handlers} \section{Encoding parameterised handlers} \chapter{The asymptotic power of control} \label{ch:handlers-efficiency} Describe the methodology\dots \section{Generic search} \section{Calculi} \subsection{Base calculus} \subsection{Handler calculus} \section{A practical model of computation} \subsection{Syntax} \subsection{Semantics} \subsection{Realisability} \section{Points, predicates, and their models} \section{Efficient generic search with effect handlers} \subsection{Space complexity} \section{Best-case complexity of generic search without control} \subsection{No shortcuts} \subsection{No sharing} \chapter{Robustness of the asymptotic power of control} \section{Mutable state} \section{Exception handling} \section{Effect system} \part{Conclusions} \chapter{Conclusions} \label{ch:conclusions} Some profound conclusions\dots \chapter{Future Work} \label{ch:future-work} %% %% Appendices %% % \appendix %% If you want the bibliography single-spaced (which is allowed), uncomment %% the next line. %\nocite{*} \singlespace %\nocite{*} %\printbibliography[heading=bibintoc] \bibliographystyle{plainnat} \bibliography{\jobname} %% ... that's all, folks! \end{document}