%% 12pt font size, PhD thesis, LFCS, print twosided, new chapters on right page \documentclass[12pt,phd,lfcs,twoside,openright,logo,leftchapter,normalheadings]{infthesis} \shieldtype{0} %% Packages \usepackage[utf8]{inputenc} % Enable UTF-8 typing \usepackage[british]{babel} % British English \usepackage[breaklinks]{hyperref} % Interactive PDF \usepackage{url} \usepackage[sort&compress,square,numbers]{natbib} % Bibliography \usepackage{breakurl} \usepackage{amsmath} % Mathematics library \usepackage{amssymb} % Provides math fonts \usepackage{amsthm} % Provides \newtheorem, \theoremstyle, etc. \usepackage{mathtools} \usepackage{pkgs/mathpartir} % Inference rules \usepackage{stmaryrd} % semantic brackets \usepackage{array} \usepackage{float} % Float control \usepackage{caption,subcaption} % Sub figures support \DeclareCaptionFormat{underlinedfigure}{#1#2#3\hrulefill} \captionsetup[figure]{format=underlinedfigure} \usepackage[T1]{fontenc} % Fixes font issues %\usepackage{lmodern} \usepackage[activate=true, final, tracking=true, kerning=true, spacing=true, factor=1100, stretch=10, shrink=10]{microtype} \usepackage{enumerate} % Customise enumerate-environments \usepackage{xcolor} % Colours \usepackage{xspace} % Smart spacing in commands. \usepackage{tikz} \usetikzlibrary{fit,calc,trees,positioning,arrows,chains,shapes.geometric,% decorations.pathreplacing,decorations.pathmorphing,shapes,% matrix,shapes.symbols,intersections} \SetProtrusion{encoding={*},family={bch},series={*},size={6,7}} {1={ ,750},2={ ,500},3={ ,500},4={ ,500},5={ ,500}, 6={ ,500},7={ ,600},8={ ,500},9={ ,500},0={ ,500}} \SetExtraKerning[unit=space] {encoding={*}, family={bch}, series={*}, size={footnotesize,small,normalsize}} {\textendash={400,400}, % en-dash, add more space around it "28={ ,150}, % left bracket, add space from right "29={150, }, % right bracket, add space from left \textquotedblleft={ ,150}, % left quotation mark, space from right \textquotedblright={150, }} % right quotation mark, space from left \usepackage[customcolors,shade]{hf-tikz} % Shaded backgrounds. \hfsetfillcolor{gray!40} \hfsetbordercolor{gray!40} %% %% Theorem environments %% \theoremstyle{plain} \newtheorem{theorem}{Theorem}[chapter] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}[theorem]{Definition} % Example environment. \makeatletter \def\@endtheorem{\hfill$\blacksquare$\endtrivlist\@endpefalse} % inserts a black square at the end. \makeatother \theoremstyle{definition} \newtheorem{example}{Example}[chapter] %% %% Load macros. %% \input{macros} %% Information about the title, etc. % \title{Higher-Order Theories of Handlers for Algebraic Effects} % \title{Handlers for Algebraic Effects: Applications, Compilation, and Expressiveness} % \title{Applications, Compilation, and Expressiveness for Effect Handlers} % \title{Handling Computational Effects} % \title{Programming Computable Effectful Functions} % \title{Handling Effectful Computations} \title{Foundations for Programming and Implementing Effect Handlers} \author{Daniel Hillerström} %% If the year of submission is not the current year, uncomment this line and %% specify it here: \submityear{2020} %% Specify the abstract here. \abstract{% An abstract\dots } %% Now we start with the actual document. \begin{document} \raggedbottom %% First, the preliminary pages \begin{preliminary} %% This creates the title page \maketitle %% Acknowledgements \begin{acknowledgements} List of people to thank \begin{itemize} \item Sam Lindley \item John Longley \item Christophe Dubach \item KC Sivaramakrishnan \item Stephen Dolan \item Anil Madhavapeddy \item Gemma Gordon \item Leo White \item Andreas Rossberg \item Robert Atkey \item Jeremy Yallop \item Simon Fowler \item Craig McLaughlin \item Garrett Morris \item James McKinna \item Brian Campbell \item Paul Piho \item Amna Shahab \item Gordon Plotkin \item Ohad Kammar \item School of Informatics (funding) \item Google (Kevin Millikin, Dmitry Stefantsov) \item Microsoft Research (Daan Leijen) \end{itemize} \end{acknowledgements} %% Next we need to have the declaration. % \standarddeclaration \begin{declaration} I declare that this thesis was composed by myself, that the work contained herein is my own except where explicitly stated otherwise in the text, and that this work has not been submitted for any other degree or professional qualification except as specified. \end{declaration} %% Finally, a dedication (this is optional -- uncomment the following line if %% you want one). % \dedication{To my mummy.} \dedication{\emph{To be or to do}} % \begin{preface} % A preface will possibly appear here\dots % \end{preface} %% Create the table of contents \setcounter{secnumdepth}{2} % Numbering on sections and subsections \setcounter{tocdepth}{1} % Show chapters, sections and subsections in TOC %\singlespace \tableofcontents %\doublespace %% If you want a list of figures or tables, uncomment the appropriate line(s) % \listoffigures % \listoftables \end{preliminary} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Main content %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% Introduction %% \chapter{Introduction} \label{ch:introduction} An enthralling introduction\dots % Motivation: 1) compiler perspective: unifying control abstraction, lean runtime, desugaring of async/await, generators/iterators, 2) giving control to programmers, safer microkernels, everything as a library. \section{Thesis outline} Thesis outline\dots \section{Typographical conventions} Explain conventions\dots \part{Background} \label{p:background} \chapter{The state of effectful programming} \label{ch:related-work} \section{Type and effect systems} \section{Monadic programming} \chapter{Continuations} \label{ch:continuations} \section{Zoo of control operators} Describe how effect handlers fit amongst shift/reset, prompt/control, callcc, J, catchcont, etc. \section{Implementation strategies} \part{Design} \chapter{A ML-flavoured programming language based on rows} \label{ch:base-language} In this chapter we introduce a core calculus, \BCalc{}, which we shall later use as the basis for exploration of design considerations for effect handlers. This calculus is based on \CoreLinks{} by \citet{LindleyC12}, which distils the essence of the functional multi-tier web-programming language \Links{}~\cite{CooperLWY06}. \Links{} belongs to the ML-family~\cite{MilnerTHM97} of programming languages as it features typical characteristics of ML languages such as a static type system supporting parametric polymorphism with type inference support (in fact Links supports first-class polymorphism), and its evaluation semantics is strict. However, \Links{} differentiates itself from the rest of the ML-family by making crucial use of \emph{row polymorphism} to support extensible records, variants, and tracking of computational effects. Thus \Links{} has a rather strong emphasis on structural types rather than nominal types. \CoreLinks{} captures all of these properties of \Links{}. Our calculus \BCalc{} differs in several aspects from \CoreLinks{}. For example, the underlying formalism of \CoreLinks{} is call-by-value, whilst the formalism of \BCalc{} is \emph{fine-grain call-by-value}~\cite{LevyPT03}, which shares similarities with A-normal form (ANF)~\cite{FlanaganSDF93} as it syntactically distinguishes between value and computation terms by mandating every intermediate computation being named. However unlike ANF, fine-grain call-by-value remains closed under $\beta$-reduction. The reason for choosing fine-grain call-by-value as our formalism is entirely due to convenience. As we shall see in Chapter~\ref{ch:unary-handlers} fine-grain call-by-value is a convenient formalism for working with continuations. Another point of difference between \CoreLinks{} and \BCalc{} is that the former models the integrated database query sublanguage of \Links{}. We do not consider the query sublanguage at all, and instead our focus is entirely on modelling the interaction and programming with computational effects. \section{Syntax and static semantics} \label{sec:syntax-base-language} In \BCalc{}, types are precursory to terms, as it is intrinsically typed. Thus we begin by presenting the syntax of its kind and type structure in Section~\ref{sec:base-language-types}. Subsequently in Section~\ref{sec:base-language-terms} we present the term syntax, before presenting the formation rules for types in Section~\ref{sec:base-language-type-rules}. % Typically the presentation of a programming language begins with its % syntax. If the language is typed there are two possible starting % points: Either one presents the term syntax first, or alternatively, % the type syntax first. Although the choice may seem rather benign % there is, however, a philosophical distinction to be drawn between % them. Terms are, on their own, entirely meaningless, whilst types % provide, on their own, an initial approximation of the semantics of % terms. This is particularly true in an intrinsic typed system perhaps % less so in an extrinsic typed system. In an intrinsic system types % must necessarily be precursory to terms, as terms ultimately depend on % the types. Following this argument leaves us with no choice but to % first present the type syntax of \BCalc{} and subsequently its term % syntax. \subsection{Types and their kinds} \label{sec:base-language-types} % \begin{figure} \begin{syntax} % \slab{Value types} &A,B &::= & A \to C % \mid \alpha % \mid \forall \alpha^K.C % \mid \Record{R} % \mid [R]\\ % \slab{Computation types} % &C,D &::= & A \eff E \\ % \slab{Effect types} &E &::= & \{R\}\\ % \slab{Row types} &R &::= & \ell : P;R \mid \rho \mid \cdot \\ % \slab{Presence types} &P &::= & \Pre{A} \mid \Abs \mid \theta\\ % %\slab{Labels} &\ell & & \\ % % \slab{Types} &T &::= & A \mid C \mid E \mid R \mid P \\ % \slab{Kinds} &K &::= & \Type \mid \Row_\mathcal{L} \mid \Presence % \mid \Comp \mid \Effect \\ % \slab{Label sets} &\mathcal{L} &::=& \emptyset \mid \{\ell\} \uplus \mathcal{L}\\ % %\slab{Type variables} &\alpha, \rho, \theta& \\ % \slab{Type environments} &\Gamma &::=& \cdot \mid \Gamma, x:A \\ % \slab{Kind environments} &\Delta &::=& \cdot \mid \Delta, \alpha:K \slab{Value types} &A,B \in \ValTypeCat &::= & A \to C \mid \forall \alpha^K.C \mid \Record{R} \mid [R] \mid \alpha \\ \slab{Computation types\!\!} &C,D \in \CompTypeCat &::= & A \eff E \\ \slab{Effect types} &E \in \EffectCat &::= & \{R\}\\ \slab{Row types} &R \in \RowCat &::= & \ell : P;R \mid \rho \mid \cdot \\ \slab{Presence types\!\!\!\!\!} &P \in \PresenceCat &::= & \Pre{A} \mid \Abs \mid \theta\\ \\ \slab{Types} &T \in \TypeCat &::= & A \mid C \mid E \mid R \mid P \\ \slab{Kinds} &K \in \KindCat &::= & \Type \mid \Comp \mid \Effect \mid \Row_\mathcal{L} \mid \Presence \\ \slab{Label sets} &\mathcal{L} \in \LabelCat &::=& \emptyset \mid \{\ell\} \uplus \mathcal{L}\\\\ \slab{Type environments} &\Gamma \in \TyEnvCat &::=& \cdot \mid \Gamma, x:A \\ \slab{Kind environments} &\Delta \in \KindEnvCat &::=& \cdot \mid \Delta, \alpha:K \\ \end{syntax} \caption{Syntax of types, kinds, and their environments.} \label{fig:base-language-types} \end{figure} % Figure~\ref{fig:base-language-types} depicts the syntax for types, kinds, and their environments. % \paragraph{Value types} We distinguish between values and computations at the level of types. Value types comprise the function type $A \to C$, which maps values of type $A$ to computations of type $C$; the polymorphic type $\forall \alpha^K . C$ is parameterised by a type variable $\alpha$ of kind $K$; and the record type $\Record{R}$ represents records with fields constrained by row $R$. Dually, the variant type $[R]$ represents tagged sums constrained by row $R$. \paragraph{Computation types and effect types} The computation type $A \eff E$ is given by a value type $A$ and an effect type $E$, which specifies the effectful operations a computation inhabiting this type may perform. An effect type $E = \{R\}$ is constrained by row $R$. \paragraph{Row types} Row types play a pivotal role in our type system as effect, record, and variant types are uniformly given by row types. A \emph{row type} describes a collection of distinct labels, each annotated by a presence type. A presence type indicates whether a label is \emph{present} with type $A$ ($\Pre{A}$), \emph{absent} ($\Abs$) or \emph{polymorphic} in its presence ($\theta$). % Row types are either \emph{closed} or \emph{open}. A closed row type ends in~$\cdot$, whilst an open row type ends with a \emph{row variable} $\rho$ (in an effect row we usually use $\varepsilon$ rather than $\rho$ and refer to it as an \emph{effect variable}). % The row variable in an open row type can be instantiated with additional labels. Each label may occur at most once in each row (we enforce this restriction at the level of kinds). We identify rows up to the reordering of labels. We assume structural equality on labels. % % \begin{mathpar} % \inferrule*[Lab=\rowlab{Closed}] % {~} % {\cdot \equiv_{\mathrm{row}} \cdot} % \inferrule*[Lab=\rowlab{Open}] % {~} % {\rho \equiv_{\mathrm{row}} \rho} % \inferrule*[Lab=\rowlab{Head}] % {R \equiv_{\mathrm{row}} R'} % {\ell:P;R \equiv_{\mathrm{row}} \ell:P;R'} % \inferrule*[Lab=\rowlab{Swap}] % {R \equiv_{\mathrm{row}} R'} % {\ell:P;\ell':P';R \equiv_{\mathrm{row}} \ell':P';\ell:P;R'} % \end{mathpar} % % % The last rule $\rowlab{Swap}$ let us identify rows up to the % reordering of labels. For instance, the two rows % $\ell_1 : P_1; \cdots; \ell_n : P_n; \cdot$ and % $\ell_n : P_n; \cdots ; \ell_1 : P_1; \cdot$ are equivalent. % Absent labels in closed rows are redundant. % The standard zero and unit types are definable using rows. We define the zero type as the empty, closed variant $\ZeroType \defas [\cdot]$. Dually, the unit type is defined as the empty, closed record type, i.e. $\UnitType \defas \Record{\cdot}$. We shall often omit $\cdot$ for closed rows. % \begin{figure} \begin{mathpar} % alpha : K \inferrule*[Lab=\klab{TyVar}] { } {\Delta, \alpha : K \vdash \alpha : K} % Computation \inferrule*[Lab=\klab{Comp}] { \Delta \vdash A : \Type \\ \Delta \vdash E : \Effect \\ } {\Delta \vdash A \eff E : \Comp} % A -E-> B, A : Type, E : Row, B : Type \inferrule*[Lab=\klab{Fun}] { \Delta \vdash A : \Type \\ \Delta \vdash C : \Comp \\ } {\Delta \vdash A \to C : \Type} % forall alpha : K . A : Type \inferrule*[Lab=\klab{Forall}] { \Delta, \alpha : K \vdash C : \Comp} {\Delta \vdash \forall \alpha^K . \, C : \Type} % Record \inferrule*[Lab=\klab{Record}] { \Delta \vdash R : \Row_\emptyset} {\Delta \vdash \Record{R} : \Type} % Variant \inferrule*[Lab=\klab{Variant}] { \Delta \vdash R : \Row_\emptyset} {\Delta \vdash [R] : \Type} % Effect \inferrule*[Lab=\klab{Effect}] { \Delta \vdash R : \Row_\emptyset} {\Delta \vdash \{R\} : \Effect} % Present \inferrule*[Lab=\klab{Present}] {\Delta \vdash A : \Type} {\Delta \vdash \Pre{A} : \Presence} % Absent \inferrule*[Lab=\klab{Absent}] { } {\Delta \vdash \Abs : \Presence} % Empty row \inferrule*[Lab=\klab{EmptyRow}] { } {\Delta \vdash \cdot : \Row_\mathcal{L}} % Extend row \inferrule*[Lab=\klab{ExtendRow}] { \Delta \vdash P : \Presence \\ \Delta \vdash R : \Row_{\mathcal{L} \uplus \{\ell\}} } {\Delta \vdash \ell : P;R : \Row_\mathcal{L}} \end{mathpar} \caption{Kinding rules} \label{fig:base-language-kinding} \end{figure} % \paragraph{Kinds} The kinds comprise $\Type$ for regular type variables, $\Presence$ for presence variables, $\Comp$ for computation type variables, $\Effect$ for effect variables, and lastly $\Row_{\mathcal{L}}$ for row variables. % The formation rules for kinds are given in Figure~\ref{fig:base-language-kinding}. The kinding judgement $\Delta \vdash T : K$ states that type $T$ has kind $K$ in kind environment $\Delta$. % The row kind is annotated by a set of labels $\mathcal{L}$. We use this set to track the labels of a given row type to ensure uniqueness amongst labels in each row type. For example, the kinding rule $\klab{ExtendRow}$ uses this set to constrain which labels may be mentioned in the tail of $R$. We shall elaborate on this in Section~\ref{sec:row-polymorphism}. \paragraph{Environments} Kind and type environments are right-extended sequences of bindings. A kind environment binds type variables to their kinds, whilst a type environment binds term variables to their types. \paragraph{Type variable} The type structure has three syntactically distinct type variables (the kinding system gives us five semantically distinct notions of type variables). As we sometimes wish to refer collectively to type variables, we define the set of type variables, $\TyVarCat$, to be generated by: % \[ \TyVarCat \defas \ba[t]{@{~}l@{~}l} &\{ A \in \ValTypeCat \mid A \text{ has the form } \alpha \}\\ \cup &\{ R \in \RowCat \mid R \text{ has the form } \rho \}\\ \cup &\{ P \in \PresenceCat \mid P \text{ has the form } \theta \} \ea \] % Value types comprise the function type $A \to C$, whose domain % is a value type and its codomain is a computation type $B \eff E$, % where $E$ is an effect type detailing which effects the implementing % function may perform. Value types further comprise type variables % $\alpha$ and quantification $\forall \alpha^K.C$, where the quantified % type variable $\alpha$ is annotated with its kind $K$. Finally, the % value types also contains record types $\Record{R}$ and variant types % $[R]$, which are built up using row types $R$. An effect type $E$ is % also built up using a row type. A row type is a sequence of fields of % labels $\ell$ annotated with their presence information $P$. The % presence information denotes whether a label is present $\Pre{A}$ with % some type $A$, absent $\Abs$, or polymorphic in its presence % $\theta$. A row type may be either \emph{open} or \emph{closed}. An % open row ends in a row variable $\rho$ which can be instantiated with % additional fields, effectively growing the row, whilst a closed row % ends in $\cdot$, meaning the row cannot grow further. % The kinds comprise $\Type$ for regular type variables, $\Presence$ for % presence variables, $\Comp$ for computation type variables, $\Effect$ % for effect variables, and lastly $\Row_{\mathcal{L}}$ for row % variables. The row kind is annotated by a set of labels % $\mathcal{L}$. We use this set to track the labels of a given row type % to ensure uniqueness amongst labels in each row type. We shall % elaborate on this in Section~\ref{sec:row-polymorphism}. \subsection{Terms} \label{sec:base-language-terms} % \begin{figure} \begin{syntax} \slab{Variables} &x \in \VarCat&&\\ \slab{Values} &V,W \in \ValCat &::= & x \mid \lambda x^A .\, M \mid \Lambda \alpha^K .\, M \mid \Record{} \mid \Record{\ell = V;W} \mid (\ell~V)^R \\ & & &\\ \slab{Computations} &M,N \in \CompCat &::= & V\,W \mid V\,T\\ & &\mid& \Let\; \Record{\ell=x;y} = V \; \In \; N\\ & &\mid& \Case\; V \{\ell~x \mapsto M; y \mapsto N\} \mid \Absurd^C~V\\ & &\mid& \Return~V \mid \Let \; x \revto M \; \In \; N\\ \slab{Terms} &t \in \TermCat &::= & x \mid V \mid M \end{syntax} \caption{Term syntax of \BCalc{}.} \label{fig:base-language-term-syntax} \end{figure} % The syntax for terms is given in Figure~\ref{fig:base-language-term-syntax}. We assume countably infinite set of names $\VarCat$ from which we draw fresh variable names at will. We shall typically denote term variables by $x$, $y$, or $z$. % The syntax partitions terms into values and computations. % Value terms comprise variables ($x$), lambda abstraction ($\lambda x^A . \, M$), type abstraction ($\Lambda \alpha^K . \, M$), and the introduction forms for records and variants. Records are introduced using the empty record $\Record{}$ and record extension $\Record{\ell = V; W}$, whilst variants are introduced using injection $(\ell~V)^R$, which injects a field with label $\ell$ and value $V$ into a row whose type is $R$. We include the row type annotation in order to support bottom-up type reconstruction. All elimination forms are computation terms. Abstraction and type abstraction are eliminated using application ($V\,W$) and type application ($V\,A$) respectively. % The record eliminator $(\Let \; \Record{\ell=x;y} = V \; \In \; N)$ splits a record $V$ into $x$, the value associated with $\ell$, and $y$, the rest of the record. Non-empty variants are eliminated using the case construct ($\Case\; V\; \{\ell~x \mapsto M; y \mapsto N\}$), which evaluates the computation $M$ if the tag of $V$ matches $\ell$. Otherwise it falls through to $y$ and evaluates $N$. The elimination form for empty variants is ($\Absurd^C~V$). % There is one computation introduction form, namely, the trivial computation $(\Return~V)$ which returns value $V$. Its elimination form is the expression $(\Let \; x \revto M \; \In \; N)$ which evaluates $M$ and binds the result value to $x$ in $N$. % % As our calculus is intrinsically typed, we annotate terms with type or kind information (term abstraction, type abstraction, injection, operations, and empty cases). However, we shall omit these annotations whenever they are clear from context. \paragraph{Free variables} We define the function $\FV : \TermCat \to \VarCat$ to compute the free variables of any given term. % \[ \bl \ba[t]{@{~}l@{~}c@{~}l} \begin{eqs} \FV(x) &\defas& \{x\}\\ \FV(\lambda x^A.M) &\defas& \FV(M) \setminus \{x\}\\ \FV(\Lambda \alpha^K.M) &\defas& \FV(M)\\[1.0ex] \FV(V\,W) &\defas& \FV(V) \cup \FV(W)\\ \FV(\Return~V) &\defas& \FV(V)\\ \end{eqs} & \qquad\qquad & \begin{eqs} \FV(\Record{}) &\defas& \emptyset\\ \FV(\Record{\ell = V; W}) &\defas& \FV(V) \cup \FV(W)\\ \FV((\ell~V)^R) &\defas& \FV(V)\\[1.0ex] \FV(V\,T) &\defas& \FV(V)\\ \FV(\Absurd^C~V) &\defas& \FV(V)\\ \end{eqs} \ea\\ \begin{eqs} \FV(\Let\;x \revto M \;\In\;N) &\defas& \FV(M) \cup (\FV(N) \setminus \{x\})\\ \FV(\Let\;\Record{\ell=x;y} = V\;\In\;N) &\defas& \FV(V) \cup (\FV(N) \setminus \{x, y\})\\ \FV(\Case~V~\{\ell\,x \mapsto M; y \mapsto N\} &\defas& \FV(V) \cup (\FV(M) \setminus \{x\}) \cup (\FV(N) \setminus \{y\}) \end{eqs} \el \] \subsection{Typing rules} \label{sec:base-language-type-rules} % \begin{figure} Values \begin{mathpar} % Variable \inferrule*[Lab=\tylab{Var}] {x : A \in \Gamma} {\typv{\Delta;\Gamma}{x : A}} % Abstraction \inferrule*[Lab=\tylab{Lam}] {\typ{\Delta;\Gamma, x : A}{M : C}} {\typv{\Delta;\Gamma}{\lambda x^A .\, M : A \to C}} % Polymorphic abstraction \inferrule*[Lab=\tylab{PolyLam}] {\typv{\Delta,\alpha : K;\Gamma}{M : C} \\ \alpha \notin \FTV(\Gamma) } {\typv{\Delta;\Gamma}{\Lambda \alpha^K .\, M : \forall \alpha^K . \,C}} \\ % unit : () \inferrule*[Lab=\tylab{Unit}] { } {\typv{\Delta;\Gamma}{\Record{} : \Record{}}} % Extension \inferrule*[Lab=\tylab{Extend}] { \typv{\Delta;\Gamma}{V : A} \\ \typv{\Delta;\Gamma}{W : \Record{\ell:\Abs;R}} } {\typv{\Delta;\Gamma}{\Record{\ell=V;W} : \Record{\ell:\Pre{A};R}}} % Inject \inferrule*[Lab=\tylab{Inject}] {\typv{\Delta;\Gamma}{V : A}} {\typv{\Delta;\Gamma}{(\ell~V)^R : [\ell : \Pre{A}; R]}} \end{mathpar} Computations \begin{mathpar} % Application \inferrule*[Lab=\tylab{App}] {\typv{\Delta;\Gamma}{V : A \to C} \\ \typv{\Delta;\Gamma}{W : A} } {\typ{\Delta;\Gamma}{V\,W : C}} % Polymorphic application \inferrule*[Lab=\tylab{PolyApp}] {\typv{\Delta;\Gamma}{V : \forall \alpha^K . \, C} \\ \Delta \vdash A : K } {\typ{\Delta;\Gamma}{V\,A : C[A/\alpha]}} % Split \inferrule*[Lab=\tylab{Split}] {\typv{\Delta;\Gamma}{V : \Record{\ell : \Pre{A};R}} \\\\ \typ{\Delta;\Gamma, x : A, y : \Record{\ell : \Abs; R}}{N : C} } {\typ{\Delta;\Gamma}{\Let \; \Record{\ell =x;y} = V\; \In \; N : C}} % Case \inferrule*[Lab=\tylab{Case}] { \typv{\Delta;\Gamma}{V : [\ell : \Pre{A};R]} \\\\ \typ{\Delta;\Gamma,x:A}{M : C} \\\\ \typ{\Delta;\Gamma,y:[\ell : \Abs;R]}{N : C} } {\typ{\Delta;\Gamma}{\Case \; V \{\ell\; x \mapsto M;y \mapsto N \} : C}} % Absurd \inferrule*[Lab=\tylab{Absurd}] {\typv{\Delta;\Gamma}{V : []}} {\typ{\Delta;\Gamma}{\Absurd^C \; V : C}} % Return \inferrule*[Lab=\tylab{Return}] {\typv{\Delta;\Gamma}{V : A}} {\typc{\Delta;\Gamma}{\Return \; V : A}{E}} \\ % Let \inferrule*[Lab=\tylab{Let}] {\typc{\Delta;\Gamma}{M : A}{E} \\ \typ{\Delta;\Gamma, x : A}{N : C} } {\typ{\Delta;\Gamma}{\Let \; x \revto M\; \In \; N : C}} \end{mathpar} \caption{Typing rules} \label{fig:base-language-type-rules} \end{figure} % \dhil{There is some rendering issue with T-labels in the typing rules.} % The typing rules are given by Figure~\ref{fig:base-language-type-rules}. In each typing rule, we implicitly assume that each type is well-kinded with respect to the kinding environment $\Delta$. \paragraph{Typing values} The \tylab{Var} rule states that a variable $x$ has type $A$ if $x$ is bound to $A$ in the type environment $\Gamma$. The \tylab{Lam} rules states that a lambda value $(\lambda x.M)$ has type $A \to C$ if the computation $M$ has type $C$ assuming $x : A$. In the \tylab{PolyLam} rule we make use of the set of \emph{free type variables} ($\FTV$) to ensure that we do not inadvertently capture a free type variable from the context. % We define $\FTV$ by mutual induction over type environments, $\Gamma$, and the type structure, $T$. Note that we always work up to $\alpha$-conversion~\cite{Church32} of types. % \[ \ba[t]{@{~}l@{~~~~~~}c@{~}l} \multicolumn{3}{c}{\begin{eqs} \FTV &:& \TypeCat \to \TyVarCat \end{eqs}}\\ \ba[t]{@{}l} \begin{eqs} % \FTV &:& \ValTypeCat \to \TyVarCat\\ \FTV(\alpha) &\defas& \{\alpha\}\\ \FTV(\forall \alpha^K.C) &\defas& \FTV(C) \setminus \{\alpha\}\\ \FTV(A \to C) &\defas& \FTV(A) \cup \FTV(C)\\ \FTV(A \eff E) &\defas& \FTV(A) \cup \FTV(E)\\ \FTV(\{R\}) &\defas& \FTV(R)\\ \FTV(\Record{R}) &\defas& \FTV(R)\\ \FTV([R]) &\defas& \FTV(R)\\ % \FTV(l:P;R) &\defas& \FTV(P) \cup \FTV(R)\\ % \FTV(\Pre{A}) &\defas& \FTV(A)\\ % \FTV(\Abs) &\defas& \emptyset\\ % \FTV(\theta) &\defas& \{\theta\} \end{eqs}\ea & & \begin{eqs} % \FTV([R]) &\defas& \FTV(R)\\ % \FTV(\Record{R}) &\defas& \FTV(R)\\ % \FTV(\{R\}) &\defas& \FTV(R)\\ % \FTV &:& \RowCat \to \TyVarCat\\ \FTV(\cdot) &\defas& \emptyset\\ \FTV(\rho) &\defas& \{\rho\}\\ \FTV(l:P;R) &\defas& \FTV(P) \cup \FTV(R)\\ % \FTV &:& \PresenceCat \to \TyVarCat\\ \FTV(\theta) &\defas& \{\theta\}\\ \FTV(\Abs) &\defas& \emptyset\\ \FTV(\Pre{A}) &\defas& \FTV(A)\\ \end{eqs}\\\\ \multicolumn{3}{c}{\begin{eqs} \FTV &:& \TyEnvCat \to \TyVarCat\\ \FTV(\cdot) &\defas& \emptyset\\ \FTV(\Gamma,x : A) &\defas& \FTV(\Gamma) \cup \FTV(A) \end{eqs}} % \begin{eqs} % \FTV(\theta) &\defas& \{\theta\}\\ % \FTV(\Abs) &\defas& \emptyset\\ % \FTV(\Pre{A}) &\defas& \FTV(A) % \end{eqs} & & % \begin{eqs} % \FTV(\cdot) &\defas& \emptyset\\ % \FTV(\Gamma,x : A) &\defas& \FTV(\Gamma) \cup \FTV(A) % \end{eqs} \ea \] % Thus the rule states that a type abstraction $(\Lambda \alpha. M)$ has type $\forall \alpha.C$ if the computation $M$ has type $C$ assuming $\alpha : K$ and $\alpha$ does not appear in the free type variables of current type environment $\Gamma$. The \tylab{Unit} rule provides the basis for all records as it simply states that the empty record has type unit. The \tylab{Extend} rule handles record extension. Supposing we wish to extend some record $\Record{W}$ with $\ell = V$, that is $\Record{\ell = V; W}$. This extension has type $\Record{\ell : \Pre{A};R}$ if and only if $V$ is well-typed and we can ascribe $W : \Record{\ell : \Abs; R}$. Since $\Record{\ell : \Abs; R}$ must be well-kinded with respect to $\Delta$, the label $\ell$ cannot be mentioned in $W$, thus $\ell$ cannot occur more than once in the record. Similarly, the dual rule \tylab{Inject} states that the injection $(\ell~V)^R$ has type $[\ell : \Pre{A}; R]$ if the payload $V$ is well-typed. Again since $[\ell : \Pre{A}; R]$ is well-kinded, it must be that $\ell$ is not mentioned by $R$. In other words, the tag cannot be injected twice. \paragraph{Typing computations} The \tylab{App} rule states that an application $V\,W$ has computation type $C$ if the abstractor term $V$ has type $A \to C$ and the argument term $W$ has type $A$, that is both the argument type and the domain type of the abstractor agree. % The type application rule \tylab{PolyApp} tells us that a type application $V\,A$ is well-typed whenever the abstractor term $V$ has the polymorphic type $\forall \alpha^K.C$ and the type $A$ has kind $K$. This rule makes use of type substitution. We write $C[A/\alpha]$ to mean substitute some type $A$ for some type variable $\alpha$ in some type $C$. We define type substitution as a ternary function defined as follows. % \[ \begin{eqs} -[-/-] &:& \TypeCat \times \TypeCat \times \TyVarCat \to \TypeCat\\ (A \eff E)[B/\beta] &\defas& A[B/\beta] \eff E[B/\beta]\\ (A \to C)[B/\beta] &\defas& A[B/\beta] \to C[B/\beta]\\ (\forall \alpha^K.C)[B/\beta] &\defas& \forall \alpha^K.C[B/\beta] \quad \text{if } \alpha \neq \beta \text{ and } \alpha \notin \FTV(B)\\ \alpha[B/\beta] &\defas& \begin{cases} B & \text{if } \alpha = \beta\\ \alpha & \text{otherwise} \end{cases}\\ \Record{R}[B/\beta] &\defas& \Record{R[B/\beta]}\\ {[R]}[B/\beta] &\defas& [R[B/\beta]]\\ \{R\}[B/\beta] &\defas& \{R[B/\beta]\}\\ \cdot[B/\beta] &\defas& \cdot\\ \rho[B/\beta] &\defas& \begin{cases} B & \text{if } \rho = \beta\\ \rho & \text{otherwise} \end{cases}\\ (\ell : P;R)[B/\beta] &\defas& (\ell : P[B/\beta]; R[B/\beta])\\ \theta[B/\beta] &\defas& \begin{cases} B & \text{if } \rho = \beta\\ \theta & \text{otherwise} \end{cases}\\ \Abs[B/\beta] &\defas& \Abs\\ \Pre{A}[B/\beta] &\defas& \Pre{A[B/\beta]} \end{eqs} \] % The \tylab{Split} rule handles typing of record destructing. When splitting a record term $V$ on some label $\ell$ binding it to $x$ and the remainder to $y$. The label we wish to split on must be present with some type $A$, hence we require that $V : \Record{\ell : \Pre{A}; R}$. This restriction prohibits us for splitting on an absent or presence polymorphic label. The continuation of the splitting, $N$, must then have some computation type $C$ subject to the following restriction: $N : C$ must be well-typed under the additional assumptions $x : A$ and $y : \Record{\ell : \Abs; R}$, statically ensuring that it is not possible to split on $\ell$ again in the continuation $N$. % The \tylab{Case} rule is similar, but has two possible continuations: the success continuation, $M$, and the fall-through continuation, $N$. The label being matched must be present with some type $A$ in the type of the scrutinee, thus we require $V : [\ell : \Pre{A};R]$. The success continuation has some computation $C$ under the assumption that the binder $x : A$, whilst the fall-through continuation also has type $C$ it is subject to the restriction that the binder $y : [\ell : \Abs;R]$ which statically enforces that no subsequent case split in $N$ can match on $\ell$. % The \tylab{Absurd} states that we can ascribe any computation type to the term $\Absurd~V$ if $V$ has the empty type $[]$. % The trivial computation term is typed by the \tylab{Return} rule, which says that $\Return\;V$ has computation type $A \eff E$ if the value $V$ has type $A$. % The \tylab{Let} rule types let bindings. The computation being bound, $M$, must have computation type $A \eff E$, whilst the continuation, $N$, must have computation $C$ subject to the additional assumption that the binder $x : A$. \section{Dynamic semantics} \label{sec:base-language-dynamic-semantics} % \begin{figure} \begin{reductions} \semlab{App} & (\lambda x^A . \, M) V &\reducesto& M[V/x] \\ \semlab{TyApp} & (\Lambda \alpha^K . \, M) A &\reducesto& M[A/\alpha] \\ \semlab{Split} & \Let \; \Record{\ell = x;y} = \Record{\ell = V;W} \; \In \; N &\reducesto& N[V/x,W/y] \\ \semlab{Case$_1$} & \Case \; (\ell~V)^R \{ \ell \; x \mapsto M; y \mapsto N\} &\reducesto& M[V/x] \\ \semlab{Case$_2$} & \Case \; (\ell~V)^R \{ \ell' \; x \mapsto M; y \mapsto N\} &\reducesto& N[(\ell~V)^R/y], \hfill\quad \text{if } \ell \neq \ell' \\ \semlab{Let} & \Let \; x \revto \Return \; V \; \In \; N &\reducesto& N[V/x] \\ \semlab{Lift} & \EC[M] &\reducesto& \EC[N], \hfill\quad \text{if } M \reducesto N \\ \end{reductions} \begin{syntax} \slab{Evaluation contexts} & \mathcal{E} \in \EvalCat &::=& [\,] \mid \Let \; x \revto \mathcal{E} \; \In \; N \end{syntax} %%\[ % Evaluation context lift %% \inferrule*[Lab=\semlab{Lift}] %% { M \reducesto N } %% { \mathcal{E}[M] \reducesto \mathcal{E}[N]} %% \] \caption{Contextual small-step semantics} \label{fig:base-language-small-step} \end{figure} % In this section we present the dynamic semantics of \BCalc{}. We choose to use a contextual small-step semantics, since in conjunction with fine-grain call-by-value, it yields a considerably simpler semantics than the traditional structural operational semantics (SOS)~\cite{Plotkin04a}, because only the rule for let bindings admits a continuation whereas in ordinary call-by-value SOS each congruence rule admits a continuation. % The semantics are based on a substitution model of computation. Thus, before presenting the reduction rules, we define an adequate substitution function. As usual we work up to $\alpha$-conversion~\cite{Church32} of terms in $\BCalc{}$. % \paragraph{Term substitution} We write $M[V/x]$ for the substitution of some value $V$ for some variable $x$ in some computation term $M$. We use the same notation for substitution on values, i.e. $W[V/x]$ denotes the substitution of $V$ for $x$ in some value $W$. We define substitution as a ternary function, whose signature is given by % \[ -[-/-] : \TermCat \times \ValCat \times \VarCat \to \CompCat, \] % and we realise it by pattern matching on the first argument. % \[ \begin{eqs} x[V/y] &\defas& \begin{cases} V & \text{if } x = y\\ x & \text{otherwise } \end{cases}\\ (\lambda x^A.M)[V/y] &\defas& \lambda x^A.M[V/y] \quad \text{if } x \neq y \text{ and } x \notin \FV(V)\\ (\Lambda \alpha^K. M)[V/y] &\defas& \Lambda \alpha^K. M[V/y]\\ \Unit[V/y] &\defas& \Unit\\ \Record{\ell = W; W'}[V/y] &\defas& \Record{\ell = W[V/y]; W'[V/y]}\\ (\ell~W)^R[V/y] &\defas& (\ell~W[V/y])^R\\ (W\,W')[V/y] &\defas& W[V/y]\,W'[V/y]\\ (W\,T)[V/y] &\defas& W[V/y]~T\\ (\ba[t]{@{}l} \Let\;\Record{\ell = x; y} = W\\ \In\;N)[V/z] \ea &\defas& \ba[t]{@{~}l} \Let\;\Record{\ell = x; y} = W[V/z]\\ \In\;N[V/z] \ea \quad \ba[t]{@{}l} \text{if } x \neq z, y \neq z,\\ \text{and } x,y\notin \FV(V) \ea\\ (\Case\;(\ell~W)^R\{ \ba[t]{@{}l} \ell~x \mapsto M\\ ; y \mapsto N \})[V/z] \ea &\defas& \Case\;(\ell~W[V/z])^R\{ \ba[t]{@{}l} \ell~x \mapsto M[V/z]\\ ; y \mapsto N[V/z] \} \ea\quad \ba[t]{@{}l} \text{if } x \neq z, y \neq z,\\ \text{and } x, y \notin \FV(V) \ea\\ (\Let\;x \revto M \;\In\;N)[V/y] &\defas& \Let\;x \revto M[V/y] \;\In\;N[V/y] \quad\text{if } x \neq y \text{ and } x \notin \FV(V) \end{eqs} \] % % \begin{cases} % \Let\;x \revto M[V/y]\;\In\;N & \text{if } x = y\\ % \Let\;x \revto M[V/y]\;\In\;N[V/y] & \text{otherwise} % \end{cases} % \end{eqs} % We write $t[t_0/x_0,\dots,t_n/x_n]$ to mean the simultaneous substitution of $t_0$ for $x_0$ up to $t_n$ for $x_n$ in $t$. % \paragraph{Reduction semantics} The reduction relation $\reducesto : \CompCat \pto \CompCat$ is defined on computation terms. Figure~\ref{fig:base-language-small-step} depicts the reduction rules. The application rules \semlab{App} and \semlab{TyApp} eliminates a lambda and type abstraction, respectively, by substituting the argument for the parameter in their body computation $M$. % Record splitting is handled by the \semlab{Split} rule: splitting on some label $\ell$ binds the payload $V$ to $x$ and the remainder $W$ to $y$ in the continuation $N$. % Disjunctive case splitting is handled by the two rules \semlab{Case$_1$} and \semlab{Case$_2$}. The former rule handles the success case, when the scrutinee's tag $\ell$ matches the tag of the success clause, thus binds the payload $V$ to $x$ and proceeds to evaluate the continuation $M$. The latter rule handles the fall-through case, here the scrutinee gets bounds to $y$ and evaluation proceeds with the continuation $N$. % The \semlab{Let} rule eliminates a trivial computation term $\Return\;V$ by substituting $V$ for $x$ in the continuation $N$. % \paragraph{Evaluation contexts} Recall from Section~\ref{sec:base-language-terms}, Figure~\ref{fig:base-language-term-syntax} that the syntax of let bindings allows a general computation term $M$ to occur on the right hand side of the binding, i.e. $\Let\;x \revto M \;\In\;N$. Thus we are seemingly stuck in the general case, as the \semlab{Let} rule only applies if the right hand side is a trivial computation. % However, it is at this stage we make use of the notion of \emph{evaluation contexts} due to \citet{Felleisen87}. An evaluation context is syntactic construction which decompose the dynamic semantics into a set of base rules (i.e. the rules presented thus far) and an inductive rule, which enables us to focus on a particular computation term, $M$, in some larger context, $\EC$, and reduce it in the said context to another computation $N$ if $M$ reduces outside out the context to that particular $N$. In our formalism, we call this rule \semlab{Lift}. Evaluation contexts are generated from the empty context $[~]$ and let expressions $\Let\;x \revto \EC \;\In\;N$. For now the choices of using fine-grain call-by-value and evaluation contexts may seem odd, if not arbitrary at this stage; the reader may wonder with good reason why we elect to use fine-grain call-by-value over ordinary call-by-value. In Chapter~\ref{ch:unary-handlers} we will reap the benefits from our design choices, as we shall see that the combination of fine-grain call-by-value and evaluation contexts provide the basis for a convenient, simple semantic framework for working with continuations. \section{Metatheoretic properties of \BCalc{}} \label{sec:base-language-metatheory} Thus far we have defined the syntax, static semantics, and dynamic semantics of \BCalc{}. In this section, we finish the definition of \BCalc{} by stating and proving some standard metatheoretic properties about the language. % We begin by showing that substitutions preserve typability. % \begin{lemma}[Preservation of typing under substitution]\label{lem:base-language-subst} Let $\sigma$ be any type substitution and $V \in \ValCat$ be any value and $M \in \CompCat$ a computation such that $\typ{\Delta;\Gamma}{V : A}$ and $\typ{\Delta;\Gamma}{M : C}$, then $\typ{\Delta;\sigma~\Gamma}{\sigma~V : \sigma~A}$ and $\typ{\Delta;\sigma~\Gamma}{\sigma~M : \sigma~C}$. \end{lemma} % \begin{proof} By induction on the typing derivations. \end{proof} % \dhil{It is clear to me at this point, that I want to coalesce the substitution functions. Possibly define them as maps rather than ordinary functions.} The reduction semantics satisfy a \emph{unique decomposition} property, which guarantees the existence and uniqueness of complete decomposition for arbitrary computation terms into evaluation contexts. % \begin{lemma}[Unique decomposition]\label{lem:base-language-uniq-decomp} For any computation $M \in \CompCat$ it holds that $M$ is either stuck or there exists a unique evaluation context $\EC \in \EvalCat$ and a redex $N \in \CompCat$ such that $M = \EC[N]$. \end{lemma} % \begin{proof} By structural induction on $M$. \begin{description} \item[Base step] $M = N$ where $N$ is either $\Return\;V$, $\Absurd^C\;V$, $V\,W$, or $V\,T$. In either case take $\EC = [\,]$ such that $M = \EC[N]$. \item[Inductive step] % There are several cases to consider. In each case we must find an evaluation context $\EC$ and a computation term $M'$ such that $M = \EC[M']$. \begin{itemize} \item[Case] $M = \Let\;\Record{\ell = x; y} = V\;\In\;N$: Take $\EC = [\,]$ such that $M = \EC[\Let\;\Record{\ell = x; y} = V\;\In\;N]$. \item[Case] $M = \Case\;V\,\{\ell\,x \mapsto M'; y \mapsto N\}$: Take $\EC = [\,]$ such that $M = \EC[\Case\;V\,\{\ell\,x \mapsto M'; y \mapsto N\}]$. \item[Case] $M = \Let\;x \revto M' \;\In\;N$: By the induction hypothesis it follows that $M'$ is either stuck or it decomposes (uniquely) into an evaluation context $\EC'$ and a redex $N'$. If $M$ is stuck, then take $\EC = \Let\;x \revto [\,] \;\In\;N$ such that $M = \EC[M']$. Otherwise take $\EC = \Let\;x \revto \EC'\;\In\;N$ such that $M = \EC[N']$. \end{itemize} \end{description} \end{proof} % The calculus enjoys a rather strong \emph{progress} property, which states that \emph{every} closed computation term reduces to a trivial computation term $\Return\;V$ for some value $V$. In other words, any realisable function in \BCalc{} is effect-free and total. % \begin{definition}[Computation normal form]\label{def:base-language-comp-normal} A computation $M \in \CompCat$ is said to be \emph{normal} if it is of the form $\Return\; V$ for some value $V \in \ValCat$. \end{definition} % \begin{theorem}[Progress]\label{thm:base-language-progress} Suppose $\typ{}{M : C}$, then there exists $\typ{}{N : C}$, such that $M \reducesto^\ast N$ and $N$ is normal. \end{theorem} % \begin{proof} Proof by induction on typing derivations. \end{proof} % \begin{corollary} Every closed computation term in \BCalc{} is terminating. \end{corollary} % The calculus also satisfies the \emph{preservation} property, which states that if some computation $M$ is well-typed and reduces to some other computation $M'$, then $M'$ is also well-typed. % \begin{theorem}[Preservation]\label{thm:base-language-preservation} Suppose $\typ{\Gamma}{M : C}$ and $M \reducesto M'$, then $\typ{\Gamma}{M' : C}$. \end{theorem} % \begin{proof} Proof by induction on typing derivations. \end{proof} \section{A primitive effect: recursion} \label{sec:base-language-recursion} % As evident from Theorem~\ref{thm:base-language-progress} \BCalc{} admit no computational effects. As a consequence every realisable program is terminating. Thus the calculus provide a solid and minimal basis for studying the expressiveness of any extension, and in particular, which primitive effects any such extension may sneak into the calculus. However, we cannot write many (if any) interesting programs in \BCalc{}. The calculus is simply not expressive enough. In order to bring it closer to the ML-family of languages we endow the calculus with a fixpoint operator which introduces recursion as a primitive effect. We dub the resulting calculus \BCalcRec{}. % First we augment the syntactic category of values with a new abstraction form for recursive functions. % \begin{syntax} & V,W \in \ValCat &::=& \cdots \mid~ \tikzmarkin{rec1} \Rec \; f^{A \to C} \, x.M \tikzmarkend{rec1} \end{syntax} % The $\Rec$ construct binds the function name $f$ and its argument $x$ in the body $M$. Typing of recursive functions is standard and entirely straightforward. % \begin{mathpar} \inferrule*[Lab=\tylab{Rec}] {\typ{\Delta;\Gamma,f : A \to C, x : A}{M : C}} {\typ{\Delta;\Gamma}{(\Rec \; f^{A \to C} \, x . M) : A \to C}} \end{mathpar} % The reduction semantics are similarly simple. % \begin{reductions} \semlab{Rec} & (\Rec \; f^{A \to C} \, x.M)\,V &\reducesto& M[(\Rec \; f^{A \to C}\,x.M)/f, V/x] \end{reductions} % Every occurrence of $f$ in $M$ is replaced by the recursive abstractor term, while every $x$ in $M$ is replaced by the value argument $V$. The introduction of recursion means we obtain a slightly weaker progress theorem as some programs may now diverge. % \begin{theorem}[Progress] \label{thm:base-rec-language-progress} Suppose $\typ{}{M : C}$, then there exists $\typ{}{N : C}$, such that $M \reducesto^\ast N$ either diverges or $N\not\reducesto$ and $N$ is normal. \end{theorem} % \begin{proof} Similar to the proof of Theorem~\ref{thm:base-language-progress}. \end{proof} \subsection{Tracking divergence via the effect system} \label{sec:tracking-div} % With the $\Rec$-operator in \BCalcRec{} we can now implement the customary factorial function. % \[ \bl \dec{fac} : \Int \to \Int \eff \emptyset\\ \dec{fac} \defas \Rec\;f~n. \ba[t]{@{}l} \Let\;is\_zero \revto n = 0\;\In\\ \If\;is\_zero\;\Then\; \Return\;1\\ \Else\;\ba[t]{@{~}l} \Let\; n' \revto n - 1 \;\In\\ \Let\; m \revto f~n' \;\In\\ n * m \ea \ea \el \] % The $\dec{fac}$ function computes $n!$ for any non-negative integer $n$. If $n$ is negative then $\dec{fac}$ diverges as the function will repeatedly select the $\Else$-branch in the conditional expression. Thus this function is not total on its domain. Yet the effect signature does not alert us about the potential divergence. In fact, in this particular instance the effect row on the computation type is empty, which might deceive the doctrinaire to think that this function is `pure'. Whether this function is pure or impure depend on the precise notion of purity -- which we have yet to choose. We shall soon make clear the notion of purity that we have mind, however, before doing so let us briefly illustrate how we might utilise the effect system to track divergence. The key to track divergence is to modify the \tylab{Rec} to inject some primitive operation into the effect row. % \begin{mathpar} \inferrule*[Lab=$\tylab{Rec}^\ast$] {\typ{\Delta;\Gamma,f : A \to B\eff\{\dec{Div}:\Zero\}, x : A}{M : B\eff\{\dec{Div}:\Zero\}}} {\typ{\Delta;\Gamma}{(\Rec \; f^{A \to B\eff\{\dec{Div}:\Zero\}} \, x .M) : A \to B\eff\{\dec{Div}:\Zero\}}} \end{mathpar} % In this typing rule we have chosen to inject an operation named $\dec{Div}$ into the effect row of the computation type on the recursive binder $f$. The operation is primitive, because it can never be directly invoked, rather, it occurs as a side-effect of applying a $\Rec$-definition. % Using this typing rule we get that $\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}$. Consequently, every application-site of $\dec{fac}$ must now permit the $\dec{Div}$ operation in order to type check. % \begin{example} We will use the following suspended computation to demonstrate effect tracking in action. % \[ \bl \lambda\Unit. \dec{fac}~3 \el \] % The computation calculates $3!$ when forced. % We will now give a typing derivation for this computation to illustrate how the application of $\dec{fac}$ causes its effect row to be propagated outwards. Let $\Gamma = \{\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}\}$. % \begin{mathpar} \inferrule*[Right={\tylab{Lam}}] {\inferrule*[Right={\tylab{App}}] {\typ{\emptyset;\Gamma,\Unit:\Record{}}{\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}}\\ \typ{\emptyset;\Gamma,\Unit:\Record{}}{3 : \Int} } {\typ{\emptyset;\Gamma,\Unit:\Record{}}{\dec{fac}~3 : \Int \eff \{\dec{Div}:\Zero\}}} } {\typ{\emptyset;\Gamma}{\lambda\Unit.\dec{fac}~3} : \Unit \to \Int \eff \{\dec{Div}:\Zero\}} \end{mathpar} % The information that the computation applies a possibly divergent function internally gets reflected externally in its effect signature. \end{example} % A possible inconvenience of the current formulation of $\tylab{Rec}^\ast$ is that it recursion cannot be mixed with other computational effects. The reason being that the effect row on $A \to B\eff \{\dec{Div}:\Zero\}$ is closed. Thus in a practical general-purpose programming language implementation it is likely be more convenient to leave the tail of the effect row open as to allow recursion to be used in larger effect contexts. The rule formulation is also rather coarse as it renders every $\Rec$-definition as possibly divergent -- even definitions that are obviously non-divergent such as the $\Rec$-variation of the identity function: $\Rec\;f\,x.x$. A practical implementation could utilise a static termination checker~\cite{Walther94} to obtain more fine-grained tracking of divergence. % By fairly lightweight means we can obtain a finer analysis of % $\Rec$-definitions by simply having an additional typing rule for % the application of $\Rec$. % % % \begin{mathpar} % \inferrule*[lab=$\tylab{AppRec}^\ast$] % { E' = \{\dec{Div}:\Zero\} \uplus E\\ % \typ{\Delta}{E'}\\\\ % \typ{\Delta;\Gamma}{\Rec\;f^{A \to B \eff E}\,x.M : A \to B \eff E}\\ % \typ{\Delta;\Gamma}{W : A} % } % {\typ{\Delta;\Gamma}{(\Rec\;f^{A \to B \eff E}\,x.M)\,W : B \eff E'}} % \end{mathpar} % % \section{Row polymorphism} \label{sec:row-polymorphism} \dhil{A discussion of alternative row systems} \section{Type and effect inference} \dhil{While I would like to detail the type and effect inference, it may not be worth the effort. The reason I would like to do this goes back to 2016 when Richard Eisenberg asked me about how we do effect inference in Links.} \chapter{Unary handlers} \label{ch:unary-handlers} \section{Deep handlers} \subsection{Syntax and static semantics} \subsection{Effect inference} \subsection{Dynamic semantics} \section{Default handlers} \section{Parameterised handlers} \section{Shallow handlers} \label{ch:shallow-handlers} \subsection{Syntax and static semantics} \subsection{Dynamic semantics} \chapter{N-ary handlers} \label{ch:multi-handlers} % \section{Syntax and Static Semantics} % \section{Dynamic Semantics} \section{Unifying deep and shallow handlers} \part{Implementation} \chapter{Continuation passing styles} \chapter{Abstract machine semantics} \part{Expressiveness} \chapter{Computability, complexity, and expressivness} \label{ch:expressiveness} \section{Notions of expressiveness} Felleisen's macro-expressiveness, Longley's type-respecting expressiveness, Kammar's typability-preserving expressiveness. \section{Interdefinability of deep and shallow Handlers} \section{Encoding parameterised handlers} \chapter{The asymptotic power of control} \label{ch:handlers-efficiency} Describe the methodology\dots \section{Generic search} \section{Calculi} \subsection{Base calculus} \subsection{Handler calculus} \section{A practical model of computation} \subsection{Syntax} \subsection{Semantics} \subsection{Realisability} \section{Points, predicates, and their models} \section{Efficient generic search with effect handlers} \subsection{Space complexity} \section{Best-case complexity of generic search without control} \subsection{No shortcuts} \subsection{No sharing} \chapter{Robustness of the asymptotic power of control} \section{Mutable state} \section{Exception handling} \section{Effect system} \part{Conclusions} \chapter{Conclusions} \label{ch:conclusions} Some profound conclusions\dots \chapter{Future Work} \label{ch:future-work} %% %% Appendices %% % \appendix %% If you want the bibliography single-spaced (which is allowed), uncomment %% the next line. %\nocite{*} \singlespace %\nocite{*} %\printbibliography[heading=bibintoc] \bibliographystyle{plainnat} \bibliography{\jobname} %% ... that's all, folks! \end{document}