My PhD dissertation at the University of Edinburgh, Scotland https://www.dhil.net/research/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4220 lines
174 KiB

8 years ago
%% 12pt font size, PhD thesis, LFCS, print twosided, new chapters on right page
6 years ago
\documentclass[12pt,phd,lfcs,twoside,openright,logo,leftchapter,normalheadings]{infthesis}
8 years ago
\shieldtype{0}
%% Packages
\usepackage[utf8]{inputenc} % Enable UTF-8 typing
\usepackage[british]{babel} % British English
\usepackage[breaklinks]{hyperref} % Interactive PDF
\usepackage{url}
\usepackage[sort&compress,square,numbers]{natbib} % Bibliography
8 years ago
\usepackage{breakurl}
\usepackage{amsmath} % Mathematics library
\usepackage{amssymb} % Provides math fonts
\usepackage{amsthm} % Provides \newtheorem, \theoremstyle, etc.
\usepackage{mathtools}
\usepackage{pkgs/mathpartir} % Inference rules
6 years ago
\usepackage{pkgs/mathwidth}
8 years ago
\usepackage{stmaryrd} % semantic brackets
\usepackage{array}
\usepackage{float} % Float control
\usepackage{caption,subcaption} % Sub figures support
\DeclareCaptionFormat{underlinedfigure}{#1#2#3\hrulefill}
\captionsetup[figure]{format=underlinedfigure}
8 years ago
\usepackage[T1]{fontenc} % Fixes font issues
%\usepackage{lmodern}
\usepackage[activate=true,
final,
tracking=true,
kerning=true,
spacing=true,
factor=1100,
stretch=10,
shrink=10]{microtype}
8 years ago
\usepackage{enumerate} % Customise enumerate-environments
\usepackage{xcolor} % Colours
6 years ago
\usepackage{xspace} % Smart spacing in commands.
8 years ago
\usepackage{tikz}
\usetikzlibrary{fit,calc,trees,positioning,arrows,chains,shapes.geometric,%
decorations.pathreplacing,decorations.pathmorphing,shapes,%
matrix,shapes.symbols,intersections}
6 years ago
\SetProtrusion{encoding={*},family={bch},series={*},size={6,7}}
{1={ ,750},2={ ,500},3={ ,500},4={ ,500},5={ ,500},
6={ ,500},7={ ,600},8={ ,500},9={ ,500},0={ ,500}}
\SetExtraKerning[unit=space]
{encoding={*}, family={bch}, series={*}, size={footnotesize,small,normalsize}}
{\textendash={400,400}, % en-dash, add more space around it
"28={ ,150}, % left bracket, add space from right
"29={150, }, % right bracket, add space from left
\textquotedblleft={ ,150}, % left quotation mark, space from right
\textquotedblright={150, }} % right quotation mark, space from left
6 years ago
\usepackage[customcolors,shade]{hf-tikz} % Shaded backgrounds.
\hfsetfillcolor{gray!40}
\hfsetbordercolor{gray!40}
% Cancelling
\newif\ifCancelX
\tikzset{X/.code={\CancelXtrue}}
\newcommand{\Cancel}[2][]{\relax
\ifmmode%
\tikz[baseline=(X.base),inner sep=0pt] {\node (X) {$#2$};
\tikzset{#1}
\draw[#1,overlay,shorten >=-2pt,shorten <=-2pt] (X.south west) -- (X.north east);
\ifCancelX
\draw[#1,overlay,shorten >=-2pt,shorten <=-2pt] (X.north west) -- (X.south east);
\fi}
\else
\tikz[baseline=(X.base),inner sep=0pt] {\node (X) {#2};
\tikzset{#1}
\draw[#1,overlay,shorten >=-2pt,shorten <=-2pt] (X.south west) -- (X.north east);
\ifCancelX
\draw[#1,overlay,shorten >=-2pt,shorten <=-2pt] (X.north west) -- (X.south east);
\fi}%
\fi}
\newcommand{\XCancel}[1]{\Cancel[red,X,line width=1pt]{#1}}
%%
%% Theorem environments
%%
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}[chapter]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
% Example environment.
\makeatletter
\def\@endtheorem{\hfill$\blacksquare$\endtrivlist\@endpefalse} % inserts a black square at the end.
\makeatother
\theoremstyle{definition}
\newtheorem{example}{Example}[chapter]
6 years ago
%%
%% Load macros.
%%
\input{macros}
6 years ago
8 years ago
%% Information about the title, etc.
% \title{Higher-Order Theories of Handlers for Algebraic Effects}
% \title{Handlers for Algebraic Effects: Applications, Compilation, and Expressiveness}
% \title{Applications, Compilation, and Expressiveness for Effect Handlers}
% \title{Handling Computational Effects}
6 years ago
% \title{Programming Computable Effectful Functions}
% \title{Handling Effectful Computations}
\title{Foundations for Programming and Implementing Effect Handlers}
8 years ago
\author{Daniel Hillerström}
%% If the year of submission is not the current year, uncomment this line and
8 years ago
%% specify it here:
\submityear{2020}
%% Specify the abstract here.
\abstract{%
An abstract\dots
}
%% Now we start with the actual document.
\begin{document}
\raggedbottom
%% First, the preliminary pages
\begin{preliminary}
%% This creates the title page
\maketitle
%% Acknowledgements
\begin{acknowledgements}
Firstly, I want to thank Sam Lindley for his guidance, advice, and
encouragement throughout my studies. He has been enthusiastic
supervisor, and he has always been generous with his time. I am
fortunate to have been supervised by him.
Throughout my studies I have received funding from the
\href{https://www.ed.ac.uk/informatics}{School of Informatics} at
The University of Edinburgh, as well as an
\href{https://www.epsrc.ac.uk/}{EPSRC} grant
\href{http://pervasiveparallelism.inf.ed.ac.uk}{EP/L01503X/1} (EPSRC
Centre for Doctoral Training in Pervasive Parallelism), and by ERC
Consolidator Grant Skye (grant number 682315).
8 years ago
List of people to thank
\begin{itemize}
\item Sam Lindley
\item John Longley
\item Christophe Dubach
\item KC Sivaramakrishnan
\item Stephen Dolan
\item Anil Madhavapeddy
\item Gemma Gordon
\item Leo White
\item Andreas Rossberg
\item Robert Atkey
\item Jeremy Yallop
\item Simon Fowler
\item Craig McLaughlin
\item Garrett Morris
\item James McKinna
\item Brian Campbell
\item Paul Piho
\item Amna Shahab
\item Gordon Plotkin
\item Ohad Kammar
\item School of Informatics (funding)
\item Google (Kevin Millikin, Dmitry Stefantsov)
\item Microsoft Research (Daan Leijen)
8 years ago
\end{itemize}
\end{acknowledgements}
%% Next we need to have the declaration.
% \standarddeclaration
\begin{declaration}
I declare that this thesis was composed by myself, that the work
contained herein is my own except where explicitly stated otherwise
in the text, and that this work has not been submitted for any other
degree or professional qualification except as specified.
\end{declaration}
8 years ago
%% Finally, a dedication (this is optional -- uncomment the following line if
%% you want one).
% \dedication{To my mummy.}
\dedication{\emph{To be or to do}}
8 years ago
% \begin{preface}
% A preface will possibly appear here\dots
% \end{preface}
%% Create the table of contents
\setcounter{secnumdepth}{2} % Numbering on sections and subsections
\setcounter{tocdepth}{1} % Show chapters, sections and subsections in TOC
%\singlespace
\tableofcontents
%\doublespace
%% If you want a list of figures or tables, uncomment the appropriate line(s)
% \listoffigures
% \listoftables
\end{preliminary}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Main content %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Introduction
%%
\chapter{Introduction}
\label{ch:introduction}
An enthralling introduction\dots
%
Motivation: 1) compiler perspective: unifying control abstraction,
lean runtime, desugaring of async/await, generators/iterators, 2)
giving control to programmers, safer microkernels, everything as a
library.
8 years ago
\section{Thesis outline}
Thesis outline\dots
\section{Typographical conventions}
Explain conventions\dots
\part{Background}
\label{p:background}
6 years ago
\chapter{The state of effectful programming}
\label{ch:related-work}
6 years ago
\section{Type and effect systems}
\section{Monadic programming}
\dhil{Moggi's seminal work applies the notion of monads to effectful
programming by modelling effects as monads. More importantly,
Moggi's work gives a precise characterisation of what's \emph{not}
an effect}
\chapter{Continuations}
\label{ch:continuations}
6 years ago
\section{Zoo of control operators}
Describe how effect handlers fit amongst shift/reset, prompt/control,
callcc, J, catchcont, etc.
6 years ago
\section{Implementation strategies}
\part{Design}
6 years ago
\chapter{An ML-flavoured programming language based on rows}
6 years ago
\label{ch:base-language}
6 years ago
In this chapter we introduce a core calculus, \BCalc{}, which we shall
6 years ago
later use as the basis for exploration of design considerations for
6 years ago
effect handlers. This calculus is based on \CoreLinks{} by
\citet{LindleyC12}, which distils the essence of the functional
multi-tier web-programming language
\Links{}~\cite{CooperLWY06}. \Links{} belongs to the
ML-family~\cite{MilnerTHM97} of programming languages as it features
typical characteristics of ML languages such as a static type system
6 years ago
supporting parametric polymorphism with type inference (in fact Links
supports first-class polymorphism), and its evaluation semantics is
strict. However, \Links{} differentiates itself from the rest of the
ML-family by making crucial use of \emph{row polymorphism} to support
extensible records, variants, and tracking of computational
effects. Thus \Links{} has a rather strong emphasis on structural
types rather than nominal types.
6 years ago
\CoreLinks{} captures all of these properties of \Links{}. Our
calculus \BCalc{} differs in several aspects from \CoreLinks{}. For
example, the underlying formalism of \CoreLinks{} is call-by-value,
whilst the formalism of \BCalc{} is \emph{fine-grain
call-by-value}~\cite{LevyPT03}, which shares similarities with
A-normal form (ANF)~\cite{FlanaganSDF93} as it syntactically
distinguishes between value and computation terms by mandating every
intermediate computation being named. However unlike ANF, fine-grain
call-by-value remains closed under $\beta$-reduction. The reason for
choosing fine-grain call-by-value as our formalism is entirely due to
convenience. As we shall see in Chapter~\ref{ch:unary-handlers}
fine-grain call-by-value is a convenient formalism for working with
continuations. Another point of difference between \CoreLinks{} and
\BCalc{} is that the former models the integrated database query
6 years ago
sublanguage of \Links{}. We do not consider the query sublanguage at
all, and instead our focus is entirely on modelling the interaction
and programming with computational effects.
6 years ago
\section{Syntax and static semantics}
6 years ago
\label{sec:syntax-base-language}
6 years ago
As \BCalc{} is intrinsically typed, we begin by presenting the syntax
of kinds and types in
Section~\ref{sec:base-language-types}. Subsequently in
6 years ago
Section~\ref{sec:base-language-terms} we present the term syntax,
6 years ago
before presenting the formation rules for types in
Section~\ref{sec:base-language-type-rules}.
6 years ago
% Typically the presentation of a programming language begins with its
% syntax. If the language is typed there are two possible starting
% points: Either one presents the term syntax first, or alternatively,
% the type syntax first. Although the choice may seem rather benign
% there is, however, a philosophical distinction to be drawn between
% them. Terms are, on their own, entirely meaningless, whilst types
% provide, on their own, an initial approximation of the semantics of
% terms. This is particularly true in an intrinsic typed system perhaps
% less so in an extrinsic typed system. In an intrinsic system types
% must necessarily be precursory to terms, as terms ultimately depend on
% the types. Following this argument leaves us with no choice but to
% first present the type syntax of \BCalc{} and subsequently its term
% syntax.
\subsection{Types and their kinds}
\label{sec:base-language-types}
6 years ago
%
\begin{figure}
\begin{syntax}
% \slab{Value types} &A,B &::= & A \to C
% \mid \alpha
% \mid \forall \alpha^K.C
% \mid \Record{R}
% \mid [R]\\
% \slab{Computation types}
% &C,D &::= & A \eff E \\
% \slab{Effect types} &E &::= & \{R\}\\
% \slab{Row types} &R &::= & \ell : P;R \mid \rho \mid \cdot \\
% \slab{Presence types} &P &::= & \Pre{A} \mid \Abs \mid \theta\\
% %\slab{Labels} &\ell & & \\
% % \slab{Types} &T &::= & A \mid C \mid E \mid R \mid P \\
% \slab{Kinds} &K &::= & \Type \mid \Row_\mathcal{L} \mid \Presence
% \mid \Comp \mid \Effect \\
% \slab{Label sets} &\mathcal{L} &::=& \emptyset \mid \{\ell\} \uplus \mathcal{L}\\
% %\slab{Type variables} &\alpha, \rho, \theta& \\
% \slab{Type environments} &\Gamma &::=& \cdot \mid \Gamma, x:A \\
% \slab{Kind environments} &\Delta &::=& \cdot \mid \Delta, \alpha:K
\slab{Value types} &A,B \in \ValTypeCat &::= & A \to C
\mid \forall \alpha^K.C
\mid \Record{R} \mid [R]
\mid \alpha \\
\slab{Computation types\!\!}
&C,D \in \CompTypeCat &::= & A \eff E \\
\slab{Effect types} &E \in \EffectCat &::= & \{R\}\\
\slab{Row types} &R \in \RowCat &::= & \ell : P;R \mid \rho \mid \cdot \\
\slab{Presence types\!\!\!\!\!} &P \in \PresenceCat &::= & \Pre{A} \mid \Abs \mid \theta\\
\\
\slab{Types} &T \in \TypeCat &::= & A \mid C \mid E \mid R \mid P \\
\slab{Kinds} &K \in \KindCat &::= & \Type \mid \Comp \mid \Effect \mid \Row_\mathcal{L} \mid \Presence \\
\slab{Label sets} &\mathcal{L} \in \LabelCat &::=& \emptyset \mid \{\ell\} \uplus \mathcal{L}\\\\
\slab{Type environments} &\Gamma \in \TyEnvCat &::=& \cdot \mid \Gamma, x:A \\
\slab{Kind environments} &\Delta \in \KindEnvCat &::=& \cdot \mid \Delta, \alpha:K \\
\end{syntax}
6 years ago
\caption{Syntax of types, kinds, and their environments.}
\label{fig:base-language-types}
\end{figure}
6 years ago
%
The types are divided into several distinct syntactic categories which
are given in Figure~\ref{fig:base-language-types} along with the
syntax of kinds and environments.
6 years ago
%
6 years ago
\paragraph{Value types}
6 years ago
We distinguish between values and computations at the level of
6 years ago
types. Value types comprise the function type $A \to C$, which maps
values of type $A$ to computations of type $C$; the polymorphic type
$\forall \alpha^K . C$ is parameterised by a type variable $\alpha$ of
kind $K$; and the record type $\Record{R}$ represents records with
fields constrained by row $R$. Dually, the variant type $[R]$
represents tagged sums constrained by row $R$.
\paragraph{Computation types and effect types}
The computation type $A \eff E$ is given by a value type $A$ and an
effect type $E$, which specifies the effectful operations a
computation inhabiting this type may perform. An effect type
$E = \{R\}$ is constrained by row $R$.
\paragraph{Row types}
Row types play a pivotal role in our type system as effect, record,
and variant types are uniformly given by row types. A \emph{row type}
describes a collection of distinct labels, each annotated by a
presence type. A presence type indicates whether a label is
\emph{present} with type $A$ ($\Pre{A}$), \emph{absent} ($\Abs$) or
\emph{polymorphic} in its presence ($\theta$).
%
For example, the effect row $\{\Read:\Pre{\Int},\Write:\Abs,\cdot\}$
denotes a read-only context in which the operation label $\Read$ may
occur to access some integer value, whilst the operation label
$\Write$ cannot appear.
%
6 years ago
Row types are either \emph{closed} or \emph{open}. A closed row type
ends in~$\cdot$, whilst an open row type ends with a \emph{row
variable} $\rho$ (in an effect row we usually use $\varepsilon$
rather than $\rho$ and refer to it as an \emph{effect variable}).
%
The example effect row above is closed, an open variation of it ends
in an effect variable $\varepsilon$,
i.e. $\{\Read:\Pre{\Int},\Write:\Abs,\varepsilon\}$.
%
6 years ago
The row variable in an open row type can be instantiated with
additional labels subject to the restriction that each label may only
occur at most once (we enforce this restriction at the level of
kinds). We identify rows up to the reordering of labels as follows.
%
\begin{mathpar}
\inferrule*[Lab=\rowlab{Closed}]
{~}
{\cdot \equiv_{\mathrm{row}} \cdot}
\inferrule*[Lab=\rowlab{Open}]
{~}
{\rho \equiv_{\mathrm{row}} \rho'}
\inferrule*[Lab=\rowlab{Head}]
{R \equiv_{\mathrm{row}} R'}
{\ell:P;R \equiv_{\mathrm{row}} \ell:P;R'}
\inferrule*[Lab=\rowlab{Swap}]
{R \equiv_{\mathrm{row}} R'}
{\ell:P;\ell':P';R \equiv_{\mathrm{row}} \ell':P';\ell:P;R'}
\end{mathpar}
%
6 years ago
% The last rule $\rowlab{Swap}$ let us identify rows up to the
% reordering of labels. For instance, the two rows
% $\ell_1 : P_1; \cdots; \ell_n : P_n; \cdot$ and
% $\ell_n : P_n; \cdots ; \ell_1 : P_1; \cdot$ are equivalent.
6 years ago
%
The \rowlab{Closed} rule states that the closed marker $\cdot$ is
equivalent to itself, similarly the \rowlab{Open} rule states that any
two row variables are equivalent if and only if they have the same
syntactic name. The \rowlab{Head} rule compares the head of two given
rows and inductively compares their tails. The \rowlab{Swap} rule
permits reordering of labels. We assume structural equality on
labels. The \rowlab{Head} rule
6 years ago
%
6 years ago
The standard zero and unit types are definable using rows. We define
the zero type as the empty, closed variant $\ZeroType \defas
[\cdot]$. Dually, the unit type is defined as the empty, closed record
type, i.e. $\UnitType \defas \Record{\cdot}$.
% As absent labels in closed rows are redundant we will, for example,
% consider the following two rows equivalent
% $\Read:\Pre{\Int},\Write:\Abs,\cdot \equiv_{\mathrm{row}}
% \Read:\Pre{\Int},\cdot$.
For brevity, we shall often write $\ell : A$
to mean $\ell : \Pre{A}$ and omit $\cdot$ for closed rows.
6 years ago
6 years ago
%
\begin{figure}
\begin{mathpar}
% alpha : K
\inferrule*[Lab=\klab{TyVar}]
{ }
{\Delta, \alpha : K \vdash \alpha : K}
% Computation
\inferrule*[Lab=\klab{Comp}]
{ \Delta \vdash A : \Type \\
\Delta \vdash E : \Effect \\
}
{\Delta \vdash A \eff E : \Comp}
% A -E-> B, A : Type, E : Row, B : Type
\inferrule*[Lab=\klab{Fun}]
{ \Delta \vdash A : \Type \\
\Delta \vdash C : \Comp \\
}
{\Delta \vdash A \to C : \Type}
% forall alpha : K . A : Type
\inferrule*[Lab=\klab{Forall}]
{ \Delta, \alpha : K \vdash C : \Comp}
{\Delta \vdash \forall \alpha^K . \, C : \Type}
% Record
\inferrule*[Lab=\klab{Record}]
{ \Delta \vdash R : \Row_\emptyset}
{\Delta \vdash \Record{R} : \Type}
% Variant
\inferrule*[Lab=\klab{Variant}]
{ \Delta \vdash R : \Row_\emptyset}
{\Delta \vdash [R] : \Type}
% Effect
\inferrule*[Lab=\klab{Effect}]
{ \Delta \vdash R : \Row_\emptyset}
{\Delta \vdash \{R\} : \Effect}
% Present
\inferrule*[Lab=\klab{Present}]
{\Delta \vdash A : \Type}
{\Delta \vdash \Pre{A} : \Presence}
% Absent
\inferrule*[Lab=\klab{Absent}]
{ }
{\Delta \vdash \Abs : \Presence}
% Empty row
\inferrule*[Lab=\klab{EmptyRow}]
{ }
{\Delta \vdash \cdot : \Row_\mathcal{L}}
% Extend row
\inferrule*[Lab=\klab{ExtendRow}]
{ \Delta \vdash P : \Presence \\
\Delta \vdash R : \Row_{\mathcal{L} \uplus \{\ell\}}
}
{\Delta \vdash \ell : P;R : \Row_\mathcal{L}}
\end{mathpar}
6 years ago
\caption{Kinding rules}
6 years ago
\label{fig:base-language-kinding}
\end{figure}
%
\paragraph{Kinds}
6 years ago
The kinds classify the different categories of types. The $\Type$ kind
classifies value types, $\Presence$ classifies presence annotations,
$\Comp$ classifies computation types, $\Effect$ classifies effect
types, and lastly $\Row_{\mathcal{L}}$ classifies rows.
6 years ago
%
The formation rules for kinds are given in
Figure~\ref{fig:base-language-kinding}. The kinding judgement
$\Delta \vdash T : K$ states that type $T$ has kind $K$ in kind
environment $\Delta$.
%
The row kind is annotated by a set of labels $\mathcal{L}$. We use
this set to track the labels of a given row type to ensure uniqueness
amongst labels in each row type. For example, the kinding rule
$\klab{ExtendRow}$ uses this set to constrain which labels may be
mentioned in the tail of $R$. We shall elaborate on this in
Section~\ref{sec:row-polymorphism}.
\paragraph{Environments}
Kind and type environments are right-extended sequences of bindings. A
kind environment binds type variables to their kinds, whilst a type
environment binds term variables to their types.
6 years ago
\paragraph{Type variables} The type structure has three syntactically
distinct type variables (the kinding system gives us five semantically
distinct notions of type variables). As we sometimes wish to refer
collectively to type variables, we define the set of type variables,
$\TyVarCat$, to be generated by:
%
\[
\TyVarCat \defas
\ba[t]{@{~}l@{~}l}
&\{ A \in \ValTypeCat \mid A \text{ has the form } \alpha \}\\
\cup &\{ R \in \RowCat \mid R \text{ has the form } \rho \}\\
\cup &\{ P \in \PresenceCat \mid P \text{ has the form } \theta \}
\ea
\]
6 years ago
% Value types comprise the function type $A \to C$, whose domain
% is a value type and its codomain is a computation type $B \eff E$,
% where $E$ is an effect type detailing which effects the implementing
% function may perform. Value types further comprise type variables
% $\alpha$ and quantification $\forall \alpha^K.C$, where the quantified
% type variable $\alpha$ is annotated with its kind $K$. Finally, the
% value types also contains record types $\Record{R}$ and variant types
% $[R]$, which are built up using row types $R$. An effect type $E$ is
% also built up using a row type. A row type is a sequence of fields of
% labels $\ell$ annotated with their presence information $P$. The
% presence information denotes whether a label is present $\Pre{A}$ with
% some type $A$, absent $\Abs$, or polymorphic in its presence
% $\theta$. A row type may be either \emph{open} or \emph{closed}. An
% open row ends in a row variable $\rho$ which can be instantiated with
% additional fields, effectively growing the row, whilst a closed row
% ends in $\cdot$, meaning the row cannot grow further.
% The kinds comprise $\Type$ for regular type variables, $\Presence$ for
% presence variables, $\Comp$ for computation type variables, $\Effect$
% for effect variables, and lastly $\Row_{\mathcal{L}}$ for row
% variables. The row kind is annotated by a set of labels
% $\mathcal{L}$. We use this set to track the labels of a given row type
% to ensure uniqueness amongst labels in each row type. We shall
% elaborate on this in Section~\ref{sec:row-polymorphism}.
\paragraph{Free type variables} Sometimes we need to compute the free
type variables ($\FTV$) of a given type. To this end we define a
metafunction $\FTV$ by induction on the type structure, $T$, and
point-wise on type environments, $\Gamma$. Note that we always work up
to $\alpha$-conversion~\cite{Church32} of types.
%
\[
\ba[t]{@{~}l@{~~~~~~}c@{~}l}
\multicolumn{3}{c}{\begin{eqs}
\FTV &:& \TypeCat \to \TyVarCat
\end{eqs}}\\
\ba[t]{@{}l}
\begin{eqs}
% \FTV &:& \ValTypeCat \to \TyVarCat\\
\FTV(\alpha) &\defas& \{\alpha\}\\
\FTV(\forall \alpha^K.C) &\defas& \FTV(C) \setminus \{\alpha\}\\
\FTV(A \to C) &\defas& \FTV(A) \cup \FTV(C)\\
\FTV(A \eff E) &\defas& \FTV(A) \cup \FTV(E)\\
\FTV(\{R\}) &\defas& \FTV(R)\\
\FTV(\Record{R}) &\defas& \FTV(R)\\
\FTV([R]) &\defas& \FTV(R)\\
% \FTV(l:P;R) &\defas& \FTV(P) \cup \FTV(R)\\
% \FTV(\Pre{A}) &\defas& \FTV(A)\\
% \FTV(\Abs) &\defas& \emptyset\\
% \FTV(\theta) &\defas& \{\theta\}
\end{eqs}\ea & &
\begin{eqs}
% \FTV([R]) &\defas& \FTV(R)\\
% \FTV(\Record{R}) &\defas& \FTV(R)\\
% \FTV(\{R\}) &\defas& \FTV(R)\\
% \FTV &:& \RowCat \to \TyVarCat\\
\FTV(\cdot) &\defas& \emptyset\\
\FTV(\rho) &\defas& \{\rho\}\\
\FTV(l:P;R) &\defas& \FTV(P) \cup \FTV(R)\\
% \FTV &:& \PresenceCat \to \TyVarCat\\
\FTV(\theta) &\defas& \{\theta\}\\
\FTV(\Abs) &\defas& \emptyset\\
\FTV(\Pre{A}) &\defas& \FTV(A)\\
\end{eqs}\\\\
\multicolumn{3}{c}{\begin{eqs}
\FTV &:& \TyEnvCat \to \TyVarCat\\
\FTV(\cdot) &\defas& \emptyset\\
\FTV(\Gamma,x : A) &\defas& \FTV(\Gamma) \cup \FTV(A)
\end{eqs}}
% \begin{eqs}
% \FTV(\theta) &\defas& \{\theta\}\\
% \FTV(\Abs) &\defas& \emptyset\\
% \FTV(\Pre{A}) &\defas& \FTV(A)
% \end{eqs} & &
% \begin{eqs}
% \FTV(\cdot) &\defas& \emptyset\\
% \FTV(\Gamma,x : A) &\defas& \FTV(\Gamma) \cup \FTV(A)
% \end{eqs}
\ea
\]
%
\paragraph{Type substitution}
We define a type substitution map,
$\sigma : (\TyVarCat \times \TypeCat)^\ast$ as list of pairs mapping a
type variable to its replacement. We denote a single mapping as
$T/\alpha$ meaning substitute $T$ for the variable $\alpha$. We write
multiple mappings using list notation,
i.e. $[T_0/\alpha_0,\dots,T_n/\alpha_n]$. The domain of a substitution
map is set generated by projecting the first component, i.e.
%
\[
\bl
\dom : (\TyVarCat \times \TypeCat)^\ast \to \TyVarCat\\
\dom(\sigma) \defas \{ \alpha \mid (\_/\alpha) \in \sigma \}
\el
\]
%
The application of a type substitution map on a type term, written
$T\sigma$ for some type $T$, is defined inductively on the type
structure as follows.
%
\[
\ba[t]{@{~}l@{~}c@{~}r}
\multicolumn{3}{c}{
\begin{eqs}
(A \eff E)\sigma &\defas& A\sigma \eff E\sigma\\
(A \to C)\sigma &\defas& A\sigma \to C\sigma\\
(\forall \alpha^K.C)\sigma &\simdefas& \forall \alpha^K.C\sigma\\
\alpha\sigma &\defas& \begin{cases}
A & \text{if } (\alpha,A) \in \sigma\\
\alpha & \text{otherwise}
\end{cases}
\end{eqs}}\\
\begin{eqs}
\Record{R}\sigma &\defas& \Record{R[B/\beta]}\\
{[R]}\sigma &\defas& [R\sigma]\\
\{R\}\sigma &\defas& \{R\sigma\}\\
\cdot\sigma &\defas& \cdot\\
\rho\sigma &\defas& \begin{cases}
R & \text{if } (\rho, R) \in \sigma\\
\rho & \text{otherwise}
\end{cases}\\
\end{eqs}
& ~~~~~~~~~~ &
\begin{eqs}
(\ell : P;R)\sigma &\defas& (\ell : P\sigma; R\sigma)\\
\theta\sigma &\defas& \begin{cases}
P & \text{if } (\theta,P) \in \sigma\\
\theta & \text{otherwise}
\end{cases}\\
\Abs\sigma &\defas& \Abs\\
\Pre{A}\sigma &\defas& \Pre{A\sigma}
\end{eqs}
\ea
\]
%
6 years ago
\paragraph{Types and their inhabitants}
We now have the basic vocabulary to construct types in $\BCalc$. For
6 years ago
instance, the signature of the standard polymorphic identity function
is
%
\[
\forall \alpha^\Type. \alpha \to \alpha \eff \emptyset.
\]
%
Modulo the empty effect signature, this type is akin to the type one
would give for the identity function in System
F~\cite{Girard72,Reynolds74}, and thus we can use standard techniques
from parametricity~\cite{Wadler89} to reason about inhabitants of this
signature. However, in our system we can give an even more general
type to the identity function:
%
\[
\forall \alpha^\Type,\varepsilon^{\Row_\emptyset}. \alpha \to \alpha \eff \{\varepsilon\}.
\]
%
This type is polymorphic in its effect signature as signified by the
singleton open effect row $\{\varepsilon\}$, meaning it may be used in
an effectful context. By contrast, the former type may only be used in
a strictly pure context, i.e. the effect-free context.
%
\dhil{Maybe say something about reasoning effect types}
%
We can use the effect system to give precise types to effectful
6 years ago
computations. For example, we can give the signature of some
polymorphic computation that may only be run in a read-only context
%
\[
6 years ago
\forall \alpha^\Type, \varepsilon^{\Row_{\{\Read,\Write\}}}. \alpha \eff \{\Read:\Int,\Write:\Abs,\varepsilon\}.
\]
%
The effect row comprise a nullary $\Read$ operation returning some
6 years ago
integer and an absent operation $\Write$. The absence of $\Write$
means that the computation cannot run in a context that admits a
present $\Write$. It can, however, run in a context that admits a
presence polymorphic $\Write : \theta$ as the presence variable
$\theta$ may instantiated to $\Abs$. An inhabitant of this type may be
run in larger effect contexts, i.e. contexts that admit more
operations, because the row ends in an effect variable.
%
6 years ago
The type and effect system is also precise about how a higher-order
function may use its function arguments. For example consider the
signature of a map-operation over some datatype such as
$\dec{Option}~\alpha^\Type \defas [\dec{None};\dec{Some}:\alpha]$
%
\[
6 years ago
\forall \alpha^\Type,\beta^\Type,\varepsilon^{\Row_\emptyset}. \Record{\alpha \to \beta \eff \{\varepsilon\}, \dec{Option}~\alpha} \to \dec{Option}~\beta \eff \{\varepsilon\}.
\]
%
6 years ago
% The $\dec{map}$ function for
% lists is a canonical example of a higher-order function which is
% parametric in its own effects and the effects of its function
% argument. Supposing $\BCalc$ have some polymorphic list datatype
% $\List$, then we would be able to ascribe the following signature to
% $\dec{map}$
% %
% \[
% \forall \alpha^\Type,\beta^\Type,\varepsilon^{\Row_\emptyset}. \Record{\alpha \to \beta \eff \{\varepsilon\},\List~\alpha} \to \List~\beta \eff \{\varepsilon\}.
% \]
%
The first argument is the function that will be applied to the data
carried by second argument. Note that the two effect rows are
identical and share the same effect variable $\varepsilon$, it is thus
evident that an inhabitant of this type can only perform whatever
effects its first argument is allowed to perform.
Higher-order functions may also transform their function arguments,
e.g. modify their effect rows. The following is the signature of a
6 years ago
higher-order function which restricts its argument's effect context
6 years ago
%
\[
\forall \alpha^\Type, \varepsilon^{\Row_{\{\Read\}}},\varepsilon'^{\Row_\emptyset}. (\UnitType \to \alpha \eff \{\Read:\Int,\varepsilon\}) \to (\UnitType \to \alpha \eff \{\Read:\Abs,\varepsilon\}) \eff \{\varepsilon'\}.
\]
%
The function argument is allowed to perform a $\Read$ operation,
whilst the returned function cannot. Moreover, the two functions share
6 years ago
the same effect variable $\varepsilon$. Like the option-map signature
above, an inhabitant of this type performs no effects of its own as
the (right-most) effect row is a singleton row containing a distinct
effect variable $\varepsilon'$.
\paragraph{Syntactic sugar}
Detail the syntactic sugar\dots
\subsection{Terms}
\label{sec:base-language-terms}
6 years ago
%
\begin{figure}
\begin{syntax}
\slab{Variables} &x \in \VarCat&&\\
\slab{Values} &V,W \in \ValCat &::= & x
\mid \lambda x^A .\, M \mid \Lambda \alpha^K .\, M
\mid \Record{} \mid \Record{\ell = V;W} \mid (\ell~V)^R \\
6 years ago
& & &\\
\slab{Computations} &M,N \in \CompCat &::= & V\,W \mid V\,T\\
& &\mid& \Let\; \Record{\ell=x;y} = V \; \In \; N\\
& &\mid& \Case\; V \{\ell~x \mapsto M; y \mapsto N\} \mid \Absurd^C~V\\
& &\mid& \Return~V \mid \Let \; x \revto M \; \In \; N\\
\slab{Terms} &t \in \TermCat &::= & x \mid V \mid M
6 years ago
\end{syntax}
\caption{Term syntax of \BCalc{}.}
\label{fig:base-language-term-syntax}
\end{figure}
%
The syntax for terms is given in
Figure~\ref{fig:base-language-term-syntax}. We assume a countably
infinite set of names $\VarCat$ from which we draw fresh variable
names. We shall typically denote term variables by $x$, $y$, or $z$.
6 years ago
%
The syntax partitions terms into values and computations.
%
Value terms comprise variables ($x$), lambda abstraction
($\lambda x^A . \, M$), type abstraction ($\Lambda \alpha^K . \, M$),
and the introduction forms for records and variants. Records are
introduced using the empty record $(\Record{})$ and record extension
$(\Record{\ell = V; W})$, whilst variants are introduced using
injection $((\ell~V)^R)$, which injects a field with label $\ell$ and
value $V$ into a row whose type is $R$. We include the row type
annotation in to support bottom-up type reconstruction.
6 years ago
All elimination forms are computation terms. Abstraction and type
abstraction are eliminated using application ($V\,W$) and type
application ($V\,A$) respectively.
%
The record eliminator $(\Let \; \Record{\ell=x;y} = V \; \In \; N)$
splits a record $V$ into $x$, the value associated with $\ell$, and
$y$, the rest of the record. Non-empty variants are eliminated using
the case construct ($\Case\; V\; \{\ell~x \mapsto M; y \mapsto N\}$),
which evaluates the computation $M$ if the tag of $V$ matches
$\ell$. Otherwise it falls through to $y$ and evaluates $N$. The
elimination form for empty variants is ($\Absurd^C~V$).
%
There is one computation introduction form, namely, the trivial
computation $(\Return~V)$ which returns value $V$. Its elimination
form is the expression $(\Let \; x \revto M \; \In \; N)$ which evaluates
$M$ and binds the result value to $x$ in $N$.
%
%
As our calculus is intrinsically typed, we annotate terms with type or
kind information (term abstraction, type abstraction, injection,
operations, and empty cases). However, we shall omit these annotations
whenever they are clear from context.
\paragraph{Free variables} A given term is said to be \emph{closed} if
every applied occurrence of a variable is preceded by some
corresponding binding occurrence. Any applied occurrence of a variable
that is not preceded by a binding occurrence is said be \emph{free
variable}. We define the function $\FV : \TermCat \to \VarCat$
inductively on the term structure to compute the free variables of any
given term.
%
\[
\bl
\ba[t]{@{~}l@{~}c@{~}l}
\begin{eqs}
\FV(x) &\defas& \{x\}\\
\FV(\lambda x^A.M) &\defas& \FV(M) \setminus \{x\}\\
\FV(\Lambda \alpha^K.M) &\defas& \FV(M)\\[1.0ex]
\FV(V\,W) &\defas& \FV(V) \cup \FV(W)\\
\FV(\Return~V) &\defas& \FV(V)\\
\end{eqs}
& \qquad\qquad &
\begin{eqs}
\FV(\Record{}) &\defas& \emptyset\\
\FV(\Record{\ell = V; W}) &\defas& \FV(V) \cup \FV(W)\\
\FV((\ell~V)^R) &\defas& \FV(V)\\[1.0ex]
\FV(V\,T) &\defas& \FV(V)\\
\FV(\Absurd^C~V) &\defas& \FV(V)\\
\end{eqs}
\ea\\
\begin{eqs}
\FV(\Let\;x \revto M \;\In\;N) &\defas& \FV(M) \cup (\FV(N) \setminus \{x\})\\
\FV(\Let\;\Record{\ell=x;y} = V\;\In\;N) &\defas& \FV(V) \cup (\FV(N) \setminus \{x, y\})\\
\FV(\Case~V~\{\ell\,x \mapsto M; y \mapsto N\} &\defas& \FV(V) \cup (\FV(M) \setminus \{x\}) \cup (\FV(N) \setminus \{y\})
\end{eqs}
\el
\]
%
The function computes the set of free variables bottom-up. Most cases
are homomorphic on the syntax constructors. The interesting cases are
those constructs which feature term binders: lambda abstraction, let
bindings, pair deconstructing, and case splitting. In each of those
cases we subtract the relevant binder(s) from the set of free
variables.
6 years ago
\subsection{Typing rules}
\label{sec:base-language-type-rules}
6 years ago
%
6 years ago
\begin{figure}
Values
\begin{mathpar}
% Variable
\inferrule*[Lab=\tylab{Var}]
{x : A \in \Gamma}
{\typv{\Delta;\Gamma}{x : A}}
% Abstraction
\inferrule*[Lab=\tylab{Lam}]
{\typ{\Delta;\Gamma, x : A}{M : C}}
{\typv{\Delta;\Gamma}{\lambda x^A .\, M : A \to C}}
% Polymorphic abstraction
\inferrule*[Lab=\tylab{PolyLam}]
{\typv{\Delta,\alpha : K;\Gamma}{M : C} \\
\alpha \notin \FTV(\Gamma)
6 years ago
}
{\typv{\Delta;\Gamma}{\Lambda \alpha^K .\, M : \forall \alpha^K . \,C}}
\\
% unit : ()
\inferrule*[Lab=\tylab{Unit}]
{ }
{\typv{\Delta;\Gamma}{\Record{} : \UnitType}}
6 years ago
% Extension
\inferrule*[Lab=\tylab{Extend}]
{ \typv{\Delta;\Gamma}{V : A} \\
\typv{\Delta;\Gamma}{W : \Record{\ell:\Abs;R}}
}
{\typv{\Delta;\Gamma}{\Record{\ell=V;W} : \Record{\ell:\Pre{A};R}}}
% Inject
\inferrule*[Lab=\tylab{Inject}]
{\typv{\Delta;\Gamma}{V : A}}
{\typv{\Delta;\Gamma}{(\ell~V)^R : [\ell : \Pre{A}; R]}}
\end{mathpar}
Computations
\begin{mathpar}
% Application
\inferrule*[Lab=\tylab{App}]
{\typv{\Delta;\Gamma}{V : A \to C} \\
6 years ago
\typv{\Delta;\Gamma}{W : A}
6 years ago
}
{\typ{\Delta;\Gamma}{V\,W : C}}
% Polymorphic application
\inferrule*[Lab=\tylab{PolyApp}]
{\typv{\Delta;\Gamma}{V : \forall \alpha^K . \, C} \\
\Delta \vdash A : K
}
{\typ{\Delta;\Gamma}{V\,A : C[A/\alpha]}}
% Split
\inferrule*[Lab=\tylab{Split}]
{\typv{\Delta;\Gamma}{V : \Record{\ell : \Pre{A};R}} \\\\
\typ{\Delta;\Gamma, x : A, y : \Record{\ell : \Abs; R}}{N : C}
}
{\typ{\Delta;\Gamma}{\Let \; \Record{\ell =x;y} = V\; \In \; N : C}}
% Case
\inferrule*[Lab=\tylab{Case}]
{ \typv{\Delta;\Gamma}{V : [\ell : \Pre{A};R]} \\\\
\typ{\Delta;\Gamma,x:A}{M : C} \\\\
\typ{\Delta;\Gamma,y:[\ell : \Abs;R]}{N : C}
}
{\typ{\Delta;\Gamma}{\Case \; V \{\ell\; x \mapsto M;y \mapsto N \} : C}}
% Absurd
\inferrule*[Lab=\tylab{Absurd}]
{\typv{\Delta;\Gamma}{V : []}}
{\typ{\Delta;\Gamma}{\Absurd^C \; V : C}}
% Return
\inferrule*[Lab=\tylab{Return}]
{\typv{\Delta;\Gamma}{V : A}}
{\typc{\Delta;\Gamma}{\Return \; V : A}{E}}
\\
% Let
\inferrule*[Lab=\tylab{Let}]
{\typc{\Delta;\Gamma}{M : A}{E} \\
\typ{\Delta;\Gamma, x : A}{N : C}
}
{\typ{\Delta;\Gamma}{\Let \; x \revto M\; \In \; N : C}}
\end{mathpar}
6 years ago
\caption{Typing rules}
6 years ago
\label{fig:base-language-type-rules}
\end{figure}
6 years ago
%
6 years ago
Thus the rule states that a type abstraction $(\Lambda \alpha. M)$ has
type $\forall \alpha.C$ if the computation $M$ has type $C$ assuming
$\alpha : K$ and $\alpha$ does not appear in the free type variables
of current type environment $\Gamma$. The \tylab{Unit} rule provides
the basis for all records as it simply states that the empty record
has type unit. The \tylab{Extend} rule handles record
extension. Supposing we wish to extend some record $\Record{W}$ with
$\ell = V$, that is $\Record{\ell = V; W}$. This extension has type
$\Record{\ell : \Pre{A};R}$ if and only if $V$ is well-typed and we
6 years ago
can ascribe $W : \Record{\ell : \Abs; R}$. Since
$\Record{\ell : \Abs; R}$ must be well-kinded with respect to
$\Delta$, the label $\ell$ cannot be mentioned in $W$, thus $\ell$
cannot occur more than once in the record. Similarly, the dual rule
\tylab{Inject} states that the injection $(\ell~V)^R$ has type
$[\ell : \Pre{A}; R]$ if the payload $V$ is well-typed. The implicit
well-kindedness condition on $R$ ensures that $\ell$ cannot be
injected twice. To illustrate how the kinding system prevents
duplicated labels consider the following ill-typed example
%
\[
(\dec{S}~\Unit)^{\dec{S}:\UnitType} : [\dec{S}:\UnitType;\dec{S}:\UnitType].
\]
%
Typing fails because the resulting row type is ill-kinded by the
\klab{ExtendRow}-rule:
\begin{mathpar}
\inferrule*[leftskip=6.5em,Right={\klab{Variant}}]
{\inferrule*[Right={\klab{ExtendRow}}]
{\vdash \Pre{\UnitType} : \Presence \\
\inferrule*[Right={\klab{ExtendRow}}]
{\vdash \Pre{\UnitType} : \Presence \\ \vdash \cdot : \Row_{\color{red}{\{\dec{S}\} \uplus \{\dec{S}\}}}}
{\vdash \dec{S}:\Pre{\UnitType};\cdot : \Row_{\emptyset \uplus \{\dec{S}\}}}}
{\vdash \dec{S}:\Pre{\UnitType};\dec{S}:\Pre{\UnitType};\cdot : \Row_{\emptyset}}
}
{\vdash [\dec{S}:\Pre{\UnitType};\dec{S}:\Pre{\UnitType};\cdot] : \Type}
\end{mathpar}
%
The two sets $\{\dec{S}\}$ and $\{\dec{S}\}$ are clearly not disjoint,
thus the second premise of the last application of \klab{ExtendRow}
cannot be satisfied.
6 years ago
\paragraph{Typing computations}
The \tylab{App} rule states that an application $V\,W$ has computation
type $C$ if the function-term $V$ has type $A \to C$ and the
6 years ago
argument term $W$ has type $A$, that is both the argument type and the
domain type of the abstractor agree.
%
The type application rule \tylab{PolyApp} tells us that a type
application $V\,A$ is well-typed whenever the abstractor term $V$ has
the polymorphic type $\forall \alpha^K.C$ and the type $A$ has kind
$K$. This rule makes use of type substitution.
6 years ago
%
The \tylab{Split} rule handles typing of record deconstructing. When
6 years ago
splitting a record term $V$ on some label $\ell$ binding it to $x$ and
the remainder to $y$. The label we wish to split on must be present
with some type $A$, hence we require that
$V : \Record{\ell : \Pre{A}; R}$. This restriction prohibits us for
splitting on an absent or presence polymorphic label. The
continuation of the splitting, $N$, must then have some computation
type $C$ subject to the following restriction: $N : C$ must be
well-typed under the additional assumptions $x : A$ and
$y : \Record{\ell : \Abs; R}$, statically ensuring that it is not
possible to split on $\ell$ again in the continuation $N$.
%
The \tylab{Case} rule is similar, but has two possible continuations:
the success continuation, $M$, and the fall-through continuation, $N$.
The label being matched must be present with some type $A$ in the type
of the scrutinee, thus we require $V : [\ell : \Pre{A};R]$. The
success continuation has some computation $C$ under the assumption
that the binder $x : A$, whilst the fall-through continuation also has
type $C$ it is subject to the restriction that the binder
$y : [\ell : \Abs;R]$ which statically enforces that no subsequent
case split in $N$ can match on $\ell$.
%
The \tylab{Absurd} states that we can ascribe any computation type to
the term $\Absurd~V$ if $V$ has the empty type $[]$.
%
The trivial computation term is typed by the \tylab{Return} rule,
which says that $\Return\;V$ has computation type $A \eff E$ if the
value $V$ has type $A$.
%
The \tylab{Let} rule types let bindings. The computation being bound,
$M$, must have computation type $A \eff E$, whilst the continuation,
$N$, must have computation $C$ subject to the additional assumption
that the binder $x : A$.
\section{Dynamic semantics}
\label{sec:base-language-dynamic-semantics}
%
\begin{figure}
\begin{reductions}
\semlab{App} & (\lambda x^A . \, M) V &\reducesto& M[V/x] \\
\semlab{TyApp} & (\Lambda \alpha^K . \, M) A &\reducesto& M[A/\alpha] \\
\semlab{Split} & \Let \; \Record{\ell = x;y} = \Record{\ell = V;W} \; \In \; N &\reducesto& N[V/x,W/y] \\
\semlab{Case_1} &
\Case \; (\ell~V)^R \{ \ell \; x \mapsto M; y \mapsto N\} &\reducesto& M[V/x] \\
\semlab{Case_2} &
\Case \; (\ell~V)^R \{ \ell' \; x \mapsto M; y \mapsto N\} &\reducesto& N[(\ell~V)^R/y], \hfill\quad \text{if } \ell \neq \ell' \\
\semlab{Let} &
\Let \; x \revto \Return \; V \; \In \; N &\reducesto& N[V/x] \\
\semlab{Lift} &
\EC[M] &\reducesto& \EC[N], \hfill\quad \text{if } M \reducesto N \\
\end{reductions}
\begin{syntax}
\slab{Evaluation contexts} & \mathcal{E} \in \EvalCat &::=& [~] \mid \Let \; x \revto \mathcal{E} \; \In \; N
\end{syntax}
%
\dhil{Describe evaluation contexts as functions: decompose and plug.}
%
%%\[
% Evaluation context lift
%% \inferrule*[Lab=\semlab{Lift}]
%% { M \reducesto N }
%% { \mathcal{E}[M] \reducesto \mathcal{E}[N]}
%% \]
\caption{Contextual small-step semantics}
\label{fig:base-language-small-step}
\end{figure}
%
In this section I will present the dynamic semantics of \BCalc{}. I
have chosen opt to use a \citet{Felleisen87}-style contextual
small-step semantics, since in conjunction with fine-grain
call-by-value (FGCBV), it yields a considerably simpler semantics than
the traditional structural operational semantics
(SOS)~\cite{Plotkin04a}, because only the rule for let bindings admits
a continuation wheres in ordinary call-by-value SOS each congruence
rule admits a continuation.
%
The simpler semantics comes at the expense of a more verbose syntax,
which is not a concern as one can readily convert between fine-grain
call-by-value and ordinary call-by-value.
The reduction semantics are based on a substitution model of
computation. Thus, before presenting the reduction rules, we define an
adequate substitution function. As usual we work up to
$\alpha$-conversion~\cite{Church32} of terms in $\BCalc{}$.
%
\paragraph{Term substitution}
We define a term substitution map,
$\sigma : (\VarCat \times \ValCat)^\ast$ as list of pairs mapping a
variable to its value replacement. We denote a single mapping as $V/x$
meaning substitute $V$ for the variable $x$. We write multiple
mappings using list notation, i.e. $[V_0/x_0,\dots,V_n/x_n]$. The
domain of a substitution map is set generated by projecting the first
component, i.e.
%
\[
\bl
\dom : (\VarCat \times \ValCat)^\ast \to \ValCat\\
\dom(\sigma) \defas \{ x \mid (\_/x) \in \sigma \}
\el
\]
%
The application of a type substitution map on a term $t \in \TermCat$,
written $t\sigma$, is defined inductively on the term structure as
follows.
%
\[
\ba[t]{@{~}l@{~}c@{~}r}
\begin{eqs}
x\sigma &\defas& \begin{cases}
V & \text{if } (x, V) \in \sigma\\
x & \text{otherwise}
\end{cases}\\
(\lambda x^A.M)\sigma &\simdefas& \lambda x^A.M\sigma\\
(\Lambda \alpha^K.M)\sigma &\defas& \Lambda \alpha^K.M\sigma\\
(V~T)\sigma &\defas& V\sigma~T
\end{eqs}
&~~&
\begin{eqs}
(V~W)\sigma &\defas& V\sigma~W\sigma\\
\Unit\sigma &\defas& \Unit\\
\Record{\ell = V; W}\sigma &\defas& \Record{\ell = V\sigma;W\sigma}\\
(\ell~V)^R\sigma &\defas& (\ell~V\sigma)^R\\
\end{eqs}\bigskip\\
\multicolumn{3}{c}{
\begin{eqs}
(\Let\;\Record{\ell = x; y} = V\;\In\;N)\sigma &\simdefas& \Let\;\Record{\ell = x; y} = V\sigma\;\In\;N\sigma\\
(\Case\;(\ell~V)^R\{
\ell~x \mapsto M
; y \mapsto N \})\sigma
&\simdefas&
\Case\;(\ell~V\sigma)^R\{
\ell~x \mapsto M\sigma
; y \mapsto N\sigma \}\\
(\Let\;x \revto M \;\In\;N)\sigma &\simdefas& \Let\;x \revto M[V/y] \;\In\;N\sigma
\end{eqs}}
\ea
\]
%
The attentive reader will notice that we are using the same notation
for type and term substitutions. In fact, we shall go further and
unify the two notions of substitution by combining them. As such we
may think of a combined substitution map as pair of a term
substitution map and a type substitution map, i.e.
$\sigma : (\VarCat \times \ValCat)^\ast \times (\TyVarCat \times
\TypeCat)^\ast$. The application of a combined substitution mostly the
same as the application of a term substitution map save for a couple
equations in which we need to apply the type substitution map
component to a type annotation and type abstraction which now might
require a change of name of the bound type variable
%
\[
\bl
(\lambda x^A.M)\sigma \defas \lambda x^{A\sigma.2}.M\sigma, \qquad
(V~T)\sigma \defas V\sigma~T\sigma.2, \qquad
(\ell~V)^R\sigma \defas (\ell~V\sigma)^{R\sigma.2}\medskip\\
\begin{eqs}
(\Lambda \alpha^K.M)\sigma &\simdefas& \Lambda \alpha^K.M\sigma\\
(\Case\;(\ell~V)^R\{
\ell~x \mapsto M
; y \mapsto N \})\sigma
&\simdefas&
\Case\;(\ell~V\sigma)^{R\sigma.2}\{
\ell~x \mapsto M\sigma
; y \mapsto N\sigma \}.
\end{eqs}
\el
\]
%
% We shall go further and use the
% notation to mean simultaneous substitution of types and terms, that is
% we
% %
% We justify this choice by the fact that we can lift type substitution
% pointwise on the term syntax constructors, enabling us to use one
% uniform notation for substitution.
% %
% Thus we shall generally allow a mix
% of pairs of variables and values and pairs of type variables and types
% to occur in the same substitution map.
\paragraph{Reduction semantics}
5 years ago
The reduction relation $\reducesto \subseteq \CompCat \times \CompCat$
relates a computation term to another if the former can reduce to the
latter in a single step. Figure~\ref{fig:base-language-small-step}
depicts the reduction rules. The application rules \semlab{App} and
\semlab{TyApp} eliminates a lambda and type abstraction, respectively,
by substituting the argument for the parameter in their body
computation $M$.
%
Record splitting is handled by the \semlab{Split} rule: splitting on
some label $\ell$ binds the payload $V$ to $x$ and the remainder $W$
to $y$ in the continuation $N$.
%
Disjunctive case splitting is handled by the two rules
\semlab{Case_1} and \semlab{Case_2}. The former rule handles the
success case, when the scrutinee's tag $\ell$ matches the tag of the
success clause, thus binds the payload $V$ to $x$ and proceeds to
evaluate the continuation $M$. The latter rule handles the
fall-through case, here the scrutinee gets bounds to $y$ and
evaluation proceeds with the continuation $N$.
%
The \semlab{Let} rule eliminates a trivial computation term
$\Return\;V$ by substituting $V$ for $x$ in the continuation $N$.
%
5 years ago
\paragraph{Evaluation contexts}
Recall from Section~\ref{sec:base-language-terms},
Figure~\ref{fig:base-language-term-syntax} that the syntax of let
bindings allows a general computation term $M$ to occur on the right
hand side of the binding, i.e. $\Let\;x \revto M \;\In\;N$. Thus we
are seemingly stuck in the general case, as the \semlab{Let} rule only
applies if the right hand side is a trivial computation.
%
However, it is at this stage we make use of the notion of
\emph{evaluation contexts} due to \citet{Felleisen87}. An evaluation
context is syntactic construction which decompose the dynamic
semantics into a set of base rules (i.e. the rules presented thus far)
and an inductive rule, which enables us to focus on a particular
computation term, $M$, in some larger context, $\EC$, and reduce it in
the said context to another computation $N$ if $M$ reduces outside out
the context to that particular $N$. In our formalism, we call this
rule \semlab{Lift}. Evaluation contexts are generated from the empty
context ($[~]$) and let expressions ($\Let\;x \revto \EC \;\In\;N$).
5 years ago
The choices of using fine-grain call-by-value and evaluation contexts
may seem odd, if not arbitrary at this point; the reader may wonder
with good reason why we elect to use fine-grain call-by-value over
ordinary call-by-value. In Chapter~\ref{ch:unary-handlers} we will
reap the benefits from our design choices, as we shall see that the
combination of fine-grain call-by-value and evaluation contexts
provide the basis for a convenient, simple semantic framework for
working with continuations.
\section{Metatheoretic properties of \BCalc{}}
\label{sec:base-language-metatheory}
Thus far we have defined the syntax, static semantics, and dynamic
5 years ago
semantics of \BCalc{}. In this section, we state and prove some
customary metatheoretic properties about \BCalc{}.
%
We begin by showing that substitutions preserve typability.
%
\begin{lemma}[Preservation of typing under substitution]\label{lem:base-language-subst}
Let $\sigma$ be any type substitution and $V \in \ValCat$ be any
value and $M \in \CompCat$ a computation such that
$\typ{\Delta;\Gamma}{V : A}$ and $\typ{\Delta;\Gamma}{M : C}$, then
5 years ago
$\typ{\Delta;\Gamma\sigma}{V\sigma : A\sigma}$ and
$\typ{\Delta;\Gamma\sigma}{M\sigma : C\sigma}$.
\end{lemma}
%
\begin{proof}
By induction on the typing derivations.
\end{proof}
%
\dhil{It is clear to me at this point, that I want to coalesce the
substitution functions. Possibly define them as maps rather than ordinary functions.}
The reduction semantics satisfy a \emph{unique decomposition}
property, which guarantees the existence and uniqueness of complete
decomposition for arbitrary computation terms into evaluation
contexts.
%
\begin{lemma}[Unique decomposition]\label{lem:base-language-uniq-decomp}
For any computation $M \in \CompCat$ it holds that $M$ is either
stuck or there exists a unique evaluation context $\EC \in \EvalCat$
and a redex $N \in \CompCat$ such that $M = \EC[N]$.
\end{lemma}
%
\begin{proof}
By structural induction on $M$.
\begin{description}
\item[Base step] $M = N$ where $N$ is either $\Return\;V$,
$\Absurd^C\;V$, $V\,W$, or $V\,T$. In each case take $\EC = [\,]$
such that $M = \EC[N]$.
\item[Inductive step]
%
There are several cases to consider. In each case we must find an
evaluation context $\EC$ and a computation term $M'$ such that
$M = \EC[M']$.
\begin{itemize}
\item[Case] $M = \Let\;\Record{\ell = x; y} = V\;\In\;N$: Take $\EC = [\,]$ such that $M = \EC[\Let\;\Record{\ell = x; y} = V\;\In\;N]$.
\item[Case] $M = \Case\;V\,\{\ell\,x \mapsto M'; y \mapsto N\}$:
Take $\EC = [\,]$ such that
$M = \EC[\Case\;V\,\{\ell\,x \mapsto M'; y \mapsto N\}]$.
\item[Case] $M = \Let\;x \revto M' \;\In\;N$: By the induction
hypothesis it follows that $M'$ is either stuck or it
decomposes (uniquely) into an evaluation context $\EC'$ and a
redex $N'$. If $M'$ is stuck, then so is $M$. Otherwise take
$\EC = \Let\;x \revto \EC'\;\In\;N$ such that $M = \EC[N']$.
\end{itemize}
\end{description}
\end{proof}
%
The calculus enjoys a rather strong \emph{progress} property, which
states that \emph{every} closed computation term reduces to a trivial
computation term $\Return\;V$ for some value $V$. In other words, any
realisable function in \BCalc{} is effect-free and total.
%
\begin{definition}[Computation normal form]\label{def:base-language-comp-normal}
A computation $M \in \CompCat$ is said to be \emph{normal} if it is
of the form $\Return\; V$ for some value $V \in \ValCat$.
\end{definition}
%
\begin{theorem}[Progress]\label{thm:base-language-progress}
Suppose $\typ{}{M : C}$, then $M$ is normal or there exists
$\typ{}{N : C}$ such that $M \reducesto^\ast N$.
\end{theorem}
%
\begin{proof}
Proof by induction on typing derivations.
\end{proof}
%
% \begin{corollary}
% Every closed computation term in \BCalc{} is terminating.
% \end{corollary}
%
The calculus also satisfies the \emph{subject reduction} property,
which states that if some computation $M$ is well typed and reduces to
some other computation $M'$, then $M'$ is also well typed.
%
\begin{theorem}[Subject reduction]\label{thm:base-language-preservation}
6 years ago
Suppose $\typ{\Gamma}{M : C}$ and $M \reducesto M'$, then
$\typ{\Gamma}{M' : C}$.
\end{theorem}
%
\begin{proof}
Proof by induction on typing derivations.
\end{proof}
6 years ago
\section{A primitive effect: recursion}
\label{sec:base-language-recursion}
6 years ago
%
As evident from Theorem~\ref{thm:base-language-progress} \BCalc{}
admit no computational effects. As a consequence every realisable
program is terminating. Thus the calculus provide a solid and minimal
basis for studying the expressiveness of any extension, and in
particular, which primitive effects any such extension may sneak into
the calculus.
However, we cannot write many (if any) interesting programs in
\BCalc{}. The calculus is simply not expressive enough. In order to
bring it closer to the ML-family of languages we endow the calculus
with a fixpoint operator which introduces recursion as a primitive
effect. We dub the resulting calculus \BCalcRec{}.
%
First we augment the syntactic category of values with a new
abstraction form for recursive functions.
%
\begin{syntax}
& V,W \in \ValCat &::=& \cdots \mid~ \tikzmarkin{rec1} \Rec \; f^{A \to C} \, x.M \tikzmarkend{rec1}
6 years ago
\end{syntax}
%
The $\Rec$ construct binds the function name $f$ and its argument $x$
in the body $M$. Typing of recursive functions is standard.
6 years ago
%
\begin{mathpar}
\inferrule*[Lab=\tylab{Rec}]
{\typ{\Delta;\Gamma,f : A \to C, x : A}{M : C}}
{\typ{\Delta;\Gamma}{(\Rec \; f^{A \to C} \, x . M) : A \to C}}
6 years ago
\end{mathpar}
%
The reduction semantics are also standard.
6 years ago
%
\begin{reductions}
\semlab{Rec} &
(\Rec \; f^{A \to C} \, x.M)\,V &\reducesto& M[(\Rec \; f^{A \to C}\,x.M)/f, V/x]
6 years ago
\end{reductions}
%
Every occurrence of $f$ in $M$ is replaced by the recursive abstractor
term, while every $x$ in $M$ is replaced by the value argument $V$.
The introduction of recursion means we obtain a slightly weaker
progress theorem as some programs may now diverge.
%
\begin{theorem}[Progress]
\label{thm:base-rec-language-progress}
Suppose $\typ{}{M : C}$, then $M$ is normal or there exists
$\typ{}{N : C}$ such that $M \reducesto^\ast N$.
6 years ago
\end{theorem}
%
\begin{proof}
Similar to the proof of Theorem~\ref{thm:base-language-progress}.
\end{proof}
\subsection{Tracking divergence via the effect system}
\label{sec:tracking-div}
%
With the $\Rec$-operator in \BCalcRec{} we can now implement the
factorial function.
%
\[
\bl
\dec{fac} : \Int \to \Int \eff \emptyset\\
\dec{fac} \defas \Rec\;f~n.
\ba[t]{@{}l}
\Let\;is\_zero \revto n = 0\;\In\\
\If\;is\_zero\;\Then\; \Return\;1\\
\Else\;\ba[t]{@{~}l}
\Let\; n' \revto n - 1 \;\In\\
\Let\; m \revto f~n' \;\In\\
n * m
\ea
\ea
\el
\]
%
The $\dec{fac}$ function computes $n!$ for any non-negative integer
$n$. If $n$ is negative then $\dec{fac}$ diverges as the function will
repeatedly select the $\Else$-branch in the conditional
expression. Thus this function is not total on its domain. Yet the
effect signature does not alert us about the potential divergence. In
fact, in this particular instance the effect row on the computation
type is empty, which might deceive the doctrinaire to think that this
function is `pure'. Whether this function is pure or impure depend on
the precise notion of purity -- which we have yet to choose. In
Section~\ref{sec:notions-of-purity} we shall make clear the notion of
purity that we have mind, however, first let us briefly illustrate how
we might utilise the effect system to track divergence.
The key to tracking divergence is to modify the \tylab{Rec} to inject
some primitive operation into the effect row.
%
\begin{mathpar}
\inferrule*[Lab=$\tylab{Rec}^\ast$]
{\typ{\Delta;\Gamma,f : A \to B\eff\{\dec{Div}:\Zero\}, x : A}{M : B\eff\{\dec{Div}:\Zero\}}}
{\typ{\Delta;\Gamma}{(\Rec \; f^{A \to B\eff\{\dec{Div}:\Zero\}} \, x .M) : A \to B\eff\{\dec{Div}:\Zero\}}}
\end{mathpar}
%
In this typing rule we have chosen to inject an operation named
$\dec{Div}$ into the effect row of the computation type on the
recursive binder $f$. The operation is primitive, because it can never
be directly invoked, rather, it occurs as a side-effect of applying a
$\Rec$-definition.
%
Using this typing rule we get that
$\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}$. Consequently,
every application-site of $\dec{fac}$ must now permit the $\dec{Div}$
operation in order to type check.
%
\begin{example}
We will use the following suspended computation to demonstrate
effect tracking in action.
%
\[
\bl
\lambda\Unit. \dec{fac}~3
\el
\]
%
The computation calculates $3!$ when forced.
%
We will now give a typing derivation for this computation to
illustrate how the application of $\dec{fac}$ causes its effect row
to be propagated outwards. Let
$\Gamma = \{\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}\}$.
%
\begin{mathpar}
\inferrule*[Right={\tylab{Lam}}]
{\inferrule*[Right={\tylab{App}}]
{\typ{\emptyset;\Gamma,\Unit:\Record{}}{\dec{fac} : \Int \to \Int \eff \{\dec{Div}:\Zero\}}\\
\typ{\emptyset;\Gamma,\Unit:\Record{}}{3 : \Int}
}
{\typc{\emptyset;\Gamma,\Unit:\Record{}}{\dec{fac}~3 : \Int}{\{\dec{Div}:\Zero\}}}
}
{\typ{\emptyset;\Gamma}{\lambda\Unit.\dec{fac}~3} : \Unit \to \Int \eff \{\dec{Div}:\Zero\}}
\end{mathpar}
%
The information that the computation applies a possibly divergent
function internally gets reflected externally in its effect
signature.
\end{example}
%
A possible inconvenience of the current formulation of
$\tylab{Rec}^\ast$ is that it recursion cannot be mixed with other
computational effects. The reason being that the effect row on
6 years ago
$A \to B\eff \{\dec{Div}:\Zero\}$ is closed. Thus in a practical
general-purpose programming language implementation it is likely be
more convenient to leave the tail of the effect row open as to allow
recursion to be used in larger effect contexts. The rule formulation
is also rather coarse as it renders every $\Rec$-definition as
possibly divergent -- even definitions that are obviously
non-divergent such as the $\Rec$-variation of the identity function:
$\Rec\;f\,x.x$. A practical implementation could utilise a static
termination checker~\cite{Walther94} to obtain more fine-grained
tracking of divergence.
% By fairly lightweight means we can obtain a finer analysis of
% $\Rec$-definitions by simply having an additional typing rule for
% the application of $\Rec$.
% %
% \begin{mathpar}
% \inferrule*[lab=$\tylab{AppRec}^\ast$]
% { E' = \{\dec{Div}:\Zero\} \uplus E\\
% \typ{\Delta}{E'}\\\\
% \typ{\Delta;\Gamma}{\Rec\;f^{A \to B \eff E}\,x.M : A \to B \eff E}\\
% \typ{\Delta;\Gamma}{W : A}
% }
% {\typ{\Delta;\Gamma}{(\Rec\;f^{A \to B \eff E}\,x.M)\,W : B \eff E'}}
% \end{mathpar}
% %
\subsection{Notions of purity}
\label{sec:notions-of-purity}
The term `pure' is heavily overloaded in the programming literature.
%
\dhil{In this thesis we use the Haskell notion of purity.}
6 years ago
\section{Row polymorphism}
\label{sec:row-polymorphism}
\dhil{A discussion of alternative row systems}
6 years ago
% \section{Type and effect inference}
% \dhil{While I would like to detail the type and effect inference, it
% may not be worth the effort. The reason I would like to do this goes
% back to 2016 when Richard Eisenberg asked me about how we do effect
% inference in Links.}
6 years ago
6 years ago
\chapter{Unary handlers}
6 years ago
\label{ch:unary-handlers}
%
In this chapter we study various flavours of unary effect
handlers~\cite{PlotkinP13}, that is handlers of a single
computation. Concretely, we shall study four variations of effect
handlers: in Section~\ref{sec:unary-deep-handlers} we augment the base
calculus \BCalc{} with \emph{deep} effect handlers yielding the
calculus \HCalc{}; subsequently in
Sections~\ref{sec:unary-parameterised-handlers} and
\ref{sec:unary-default-handlers} we refine \HCalc{} with two practical
relevant kinds of handlers, namely, \emph{parameterised} and
\emph{default} handlers. The former is a specialisation of a
particular class of deep handlers, whilst the latter is important for
programming at large. Finally in
Section~\ref{sec:unary-shallow-handlers} we study \emph{shallow}
effect handlers which are an alternative to deep effect handlers.
% , First we endow \BCalc{}
% with a syntax for performing effectful operations, yielding the
% calculus \EffCalc{}. On its own the calculus is not very interesting,
% however, as the sole addition of the ability to perform effectful
% operations does not provide any practical note-worthy
% expressiveness. However, as we augment the calculus with different
% forms of effect handlers, we begin be able to implement interesting
% that are either difficult or impossible to realise in \BCalc{} in
% direct-style. Concretely, we shall study four variations of effect
% handlers, each as a separate extension to \EffCalc{}: deep, default,
% parameterised, and shallow handlers.
\section{Deep handlers}
\label{sec:unary-deep-handlers}
%
Programming with effect handlers is a dichotomy of \emph{performing}
and \emph{handling} of effectful operations -- or alternatively a
dichotomy of \emph{constructing} and \emph{deconstructing}. An operation
is a constructor of an effect without a predefined semantics. A
handler deconstructs effects by pattern-matching on their operations. By
matching on a particular operation, a handler instantiates the said
operation with a particular semantics of its own choosing. The key
ingredient to make this work in practice is \emph{delimited
control}~\cite{Landin65,Landin65a,Landin98,FelleisenF86,DanvyF90}. Performing
an operation reifies the remainder of the computation up to the
nearest enclosing handler of the said operation.
As our starting point we take the regular base calculus, \BCalc{},
without the recursion operator. We elect to do so to understand
exactly which primitive effects deep handlers bring into our resulting
calculus.
%
Deep handlers are defined as folds (catamorphisms) over computation
trees, meaning they provide a uniform semantics to the handled
operations of a given computation. In contrast, shallow handlers are
defined as case-splits over computation trees, and thus, allow a
nonuniform semantics to be given to operations. We will discuss this
last point in greater detail in
Section~\ref{sec:unary-shallow-handlers}.
\subsection{Performing effectful operations}
\label{sec:eff-language-perform}
An effectful operation is a purely syntactic construction, which has
no predefined dynamic semantics. The introduction form for effectful
operations is a computation term.
%
\begin{syntax}
&M,N \in \CompCat &::=& \cdots \mid~ \tikzmarkin{do1} (\Do \; \ell~V)^E \tikzmarkend{do1}
\end{syntax}
%
Informally, the intended behaviour of the new computation term
$(\Do\; \ell~V)^E$ is that it performs some operation $\ell$ with
value argument $V$. Thus the $\Do$-construct is similar to the typical
exception-signalling $\keyw{throw}$ or $\keyw{raise}$ constructs found
in programming languages with support for exceptions. The term is
annotated with an effect row $E$, providing a handle to obtain the
current effect context. We make use of this effect row during typing:
%
\begin{mathpar}
\inferrule*[Lab=\tylab{Do}]
{ E = \{\ell : A \to B; R\} \\
\typ{\Delta}{E} \\
\typ{\Delta;\Gamma}{V : A}
}
{\typc{\Delta;\Gamma}{(\Do \; \ell \; V)^E : B}{E}}
\end{mathpar}
%
An operation invocation is only well-typed if the effect row $E$ is
well-kinded and contains the said operation with a present type; in
other words, the current effect context permits the operation. The
argument type $A$ must be the same as the domain of the operation.
%
We slightly abuse notation by using the function space arrow, $\to$,
to also denote the operation space. Although, the function and
operation spaces are separate entities, we may think of the operation
space as a subspace of the function space in which every effect row is
empty, that is every operation has a type on the form
$A \to B \eff \emptyset$. The reason that the effect row is always
empty is that any effects an operation might have are ultimately
conferred by a handler.
6 years ago
\dhil{Introduce notation for computation trees.}
\subsection{Handling of effectful operations}
%
We now present the elimination form for effectful operations, namely,
handlers.
%
First we define notation for handler kinds and types.
%
\begin{syntax}
\slab{Kinds} &K \in \KindCat &::=& \cdots \mid~ \tikzmarkin{handlerkinds1} \Handler \tikzmarkend{handlerkinds1}\\
\slab{Handler types} &F \in \HandlerTypeCat &::=& C \Harrow D\\
\slab{Types} &T \in \TypeCat &::=& \cdots \mid~ \tikzmarkin{typeswithhandler} F \tikzmarkend{typeswithhandler}
\end{syntax}
%
The syntactic category of kinds is augmented with the kind $\Handler$
which we will ascribe to handler types $F$. The arrow, $\Harrow$,
denotes the handler space. Type structure suggests that a handler is a
transformer of computations, as by the types it takes a computation of
type $C$ and returns another computation of type $D$. We use the
following kinding rule to check that a handler type is well-kinded.
%
\begin{mathpar}
\inferrule*[Lab=\klab{Handler}]
{ \Delta \vdash C : \Comp \\
\Delta \vdash D : \Comp
}
{\Delta \vdash C \Harrow D : \Handler}
\end{mathpar}
%
With the type structure in place, we present the term syntax for
handlers. The addition of handlers augments the syntactic category of
computations with a new computation form as well as introducing a new
syntactic category of handler definitions.
%
\begin{syntax}
\slab{Computations} &M,N \in \CompCat &::=& \cdots \mid~ \tikzmarkin{deephandlers1} \Handle \; M \; \With \; H\tikzmarkend{deephandlers1}\\[1ex]
\slab{Handlers} &H \in \HandlerCat &::=& \{ \Return \; x \mapsto M \}
\mid \{ \ell \; p \; r \mapsto N \} \uplus H\\
\slab{Terms} &t \in \TermCat &::=& \cdots \mid~ \tikzmarkin{handlerdefs} H \tikzmarkend{handlerdefs}
\end{syntax}
%
The handle construct $(\Handle \; M \; \With \; H)$ is the counterpart
to $\Do$. It runs computation $M$ using handler $H$. A handler $H$
consists of a return clause $\{\Return \; x \mapsto M\}$ and a
possibly empty set of operation clauses
$\{\ell \; p_\ell \; r_\ell \mapsto N_\ell\}_{\ell \in \mathcal{L}}$.
%
The return clause $\{\Return \; x \mapsto M\}$ defines how to
interpret the final return value of a handled computation. The
variable $x$ is bound to the final return value in the body $M$.
%
Each operation clause
$\{\ell \; p_\ell \; r_\ell \mapsto N_\ell\}_{\ell \in \mathcal{L}}$
defines how to interpret an invocation of a particular operation
$\ell$. The variables $p_\ell$ and $r_\ell$ are bound in the body
$N_\ell$: $p_\ell$ binds the argument carried by the operation and
$r_\ell$ binds the continuation of the invocation site of $\ell$.
Given a handler $H$, we often wish to refer to the clause for a
particular operation or the return clause; for these purposes we
define two convenient projections on handlers in the metalanguage.
\[
\ba{@{~}r@{~}c@{~}l@{~}l}
6 years ago
\hell &\defas& \{\ell\; p\; r \mapsto N \}, &\quad \text{where } \{\ell\; p\; r \mapsto N \} \in H\\
\hret &\defas& \{\Return\; x \mapsto N \}, &\quad \text{where } \{\Return\; x \mapsto N \} \in H\\
\ea
\]
%
The $\hell$ projection yields the singleton set consisting of the
operation clause in $H$ that handles the operation $\ell$, whilst
$\hret$ yields the singleton set containing the return clause of $H$.
6 years ago
%
We define the \emph{domain} of an handler as the set of operation
labels it handles, i.e.
%
\begin{equations}
\dom &:& \HandlerCat \to \LabelCat\\
\dom(\{\Return\;x \mapsto M\}) &\defas& \emptyset\\
\dom(\{\ell\; p\;r \mapsto M\} \uplus H) &\defas& \{\ell\} \cup \dom(H)
\end{equations}
\subsection{Static semantics}
The typing of effect handlers is slightly more involved than the
typing of the $\Do$-construct.
%
\begin{mathpar}
\inferrule*[Lab=\tylab{Handle}]
{
\typ{\Gamma}{M : C} \\
\typ{\Gamma}{H : C \Harrow D}
}
{\Gamma \vdash \Handle \; M \; \With\; H : D}
%\mprset{flushleft}
\inferrule*[Lab=\tylab{Handler}]
{{\bl
C = A \eff \{(\ell_i : A_i \to B_i)_i; R\} \\
D = B \eff \{(\ell_i : P_i)_i; R\}\\
H = \{\Return\;x \mapsto M\} \uplus \{ \ell_i\;p_i\;r_i \mapsto N_i \}_i
\el}\\\\
\typ{\Delta;\Gamma, x : A}{M : D}\\\\
[\typ{\Delta;\Gamma,p_i : A_i, r_i : B_i \to D}{N_i : D}]_i
}
{\typ{\Delta;\Gamma}{H : C \Harrow D}}
\end{mathpar}
%
%
The \tylab{Handler} rule is where most of the work happens. The effect
rows on the input computation type $C$ and the output computation type
$D$ must mention every operation in the domain of the handler. In the
output row those operations may be either present ($\Pre{A}$), absent
($\Abs$), or polymorphic in their presence ($\theta$), whilst in the
input row they must be mentioned with a present type as those types
are used to type operation clauses.
%
In each operation clause the resumption $r_i$ must have the same
return type, $D$, as its handler. In the return clause the binder $x$
has the same type, $C$, as the result of the input computation.
\subsection{Dynamic semantics}
6 years ago
We augment the operational semantics with two new reduction rules: one
for handling return values and another for handling operations.
%{\small{
\begin{reductions}
\semlab{Ret} &
6 years ago
\Handle \; (\Return \; V) \; \With \; H &\reducesto& N[V/x], \hfill\text{where } \hret = \{ \Return \; x \mapsto N \} \\
\semlab{Op} &
\Handle \; \EC[\Do \; \ell \, V] \; \With \; H
&\reducesto& N[V/p, \lambda y . \, \Handle \; \EC[\Return \; y] \; \With \; H/r], \\
\multicolumn{4}{@{}r@{}}{
\hfill\ba[t]{@{~}r@{~}l}
\text{where}& \hell = \{ \ell \; p \; r \mapsto N \}\\
\text{and} & \ell \notin \BL(\EC)
6 years ago
\ea
}
\end{reductions}%}}%
%
6 years ago
The rule \semlab{Ret} invokes the return clause of the current handler
$H$ and substitutes $V$ for $x$ in the body $N$.
%
6 years ago
The rule \semlab{Op} handles an operation $\ell$, provided that the
handler definition $H$ contains a corresponding operation clause for
$\ell$ and that $\ell$ does not appear in the \emph{bound labels}
($\BL$) of the inner context $\EC$. The bound label condition enforces
that an operation is always handled by the nearest enclosing suitable
handler.
%
Formally, we define the notion of bound labels,
$\BL : \EvalCat \to \LabelCat$, inductively over the structure of
evaluation contexts.
%
\begin{equations}
\BL([~]) &=& \emptyset \\
\BL(\Let\;x \revto \EC\;\In\;N) &=& \BL(\EC) \\
\BL(\Handle\;\EC\;\With\;H) &=& \BL(\EC) \cup \dom(H) \\
\end{equations}
%
To illustrate the necessity of this condition consider the following
example with two nested handlers which both handle the same operation
$\ell$.
6 years ago
%
\[
\bl
\ba{@{~}r@{~}c@{~}l}
H_{\mathsf{inner}} &\defas& \{\ell\;p\;r \mapsto r~42; \Return\;x \mapsto \Return~x\}\\
H_{\mathsf{outer}} &\defas& \{\ell\;p\;r \mapsto r~0;\Return\;x \mapsto \Return~x \}
6 years ago
\ea\medskip\\
\Handle \;
\left(\Handle\; \Do\;\ell~\Record{}\;\With\; H_{\mathsf{inner}}\right)\;
6 years ago
\With\; H_{\mathsf{outer}}
\reducesto^+ \begin{cases}
\Return\;42 & \text{Innermost}\\
\Return\;0 & \text{Outermost}
\end{cases}
6 years ago
\el
\]
%
Without the bound label condition there are two possible results as
the choice of which handler to pick for $\ell$ is ambiguous, meaning
reduction would be nondeterministic. Conversely, with the bound label
condition we obtain that the above term reduces to $\Return\;42$,
because $\ell$ is bound in the computation term of the outermost
$\Handle$.
%
6 years ago
We have made a conscious design decision by always selecting nearest
enclosing suitable handler for any operation. In fact, we have made
the \emph{only} natural and sensible choice as picking any other
handler than the nearest enclosing renders programming with effect
handlers anti-modular. Consider always selecting the outermost
suitable handler, then the meaning of closed program composition
cannot be derived from its immediate constituents. For example,
consider using integer addition as the composition operator to compose
the inner handle expression from above with a copy of itself.
%
\[
\bl
\dec{fortytwo} \defas \Handle\;\Do\;\ell~\Unit\;\With\;H_{\mathsf{inner}} \medskip\\
\EC[\dec{fortytwo} + \dec{fortytwo}] \reducesto^+ \begin{cases}
\Return\; 84 & \text{when $\EC$ is empty}\\
?? & \text{otherwise}
\end{cases}
\el
\]
%
Clearly, if the ambient context $\EC$ is empty, then we can derive the
result by reasoning locally about each constituent separately and
subsequently add their results together to obtain the computation term
$\Return\;84$. However, if the ambient context is nonempty, then we
need to account for the possibility that some handler for $\ell$ could
occur in the context. For instance if
$\EC = \Handle\;[~]\;\With\;H_{\mathsf{outer}}$ then the result would
be $\Return\;0$, which we cannot derive locally from looking at the
constituents. Thus we argue that if we want programming to remain
modular and compositional, then we must necessarily always select the
nearest enclosing suitable handler to handle an operation invocation.
%
\dhil{Effect forwarding (the first condition)}
The metatheoretic properties of $\BCalc$ transfer to $\HCalc$ with
6 years ago
little extra effort, although we must amend the definition of
computation normal forms as there are now two ways in which a
computation term can terminate: successfully returning a value or
getting stuck on an unhandled operation.
%
\begin{definition}[Computation normal forms]
6 years ago
We say that a computation term $N$ is normal with respect to an
effect signature $E$, if $N$ is either of the form $\Return\;V$, or
$\EC[\Do\;\ell\,W]$ where $\ell \in E$ and $\ell \notin \BL(\EC)$.
\end{definition}
%
\begin{theorem}[Progress]
Suppose $\typ{}{M : C}$, then either there exists $\typ{}{N : C}$
such that $M \reducesto^+ N$ and $N$ is normal, or $M$ diverges.
\end{theorem}
%
\begin{theorem}[Preservation]
Suppose $\typ{\Gamma}{M : C}$ and $M \reducesto M'$, then
$\typ{\Gamma}{M' : C}$.
\end{theorem}
\subsection{Coding nontermination}
\subsection{Programming with deep handlers}
\dhil{Exceptions}
\dhil{Reader}
\dhil{State}
\dhil{Nondeterminism}
5 years ago
\dhil{Inversion of control: generator from iterator}
6 years ago
\section{Parameterised handlers}
\label{sec:unary-parameterised-handlers}
\dhil{Example: Lightweight threads}
\section{Default handlers}
\label{sec:unary-default-handlers}
6 years ago
\section{Shallow handlers}
\label{sec:unary-shallow-handlers}
6 years ago
\subsection{Syntax and static semantics}
\subsection{Dynamic semantics}
\section{Flavours of control}
\subsection{Undelimited control}
\subsection{Delimited control}
\subsection{Composable control}
6 years ago
\chapter{N-ary handlers}
\label{ch:multi-handlers}
% \section{Syntax and Static Semantics}
% \section{Dynamic Semantics}
6 years ago
\section{Unifying deep and shallow handlers}
\part{Implementation}
\chapter{Continuation-passing style}
\label{ch:cps}
Continuation-passing style (CPS) is a \emph{canonical} program
notation that makes every facet of control flow and data flow
explicit. In CPS every function takes an additional function-argument
called the \emph{continuation}, which represents the next computation
in evaluation position. CPS is canonical in the sense that is
5 years ago
definable in pure $\lambda$-calculus without any primitives. As an
5 years ago
informal illustration of CPS consider again the rudimentary factorial
function from Section~\ref{sec:tracking-div}.
%
\[
\bl
5 years ago
\dec{fac} : \Int \to \Int\\
\dec{fac} \defas \Rec\;f~n.
\ba[t]{@{}l}
\Let\;is\_zero \revto n = 0\;\In\\
\If\;is\_zero\;\Then\; \Return\;1\\
\Else\;\ba[t]{@{~}l}
\Let\; n' \revto n - 1 \;\In\\
\Let\; m \revto f~n' \;\In\\
n * m
\ea
\ea
\el
\]
%
5 years ago
The above implementation of the function $\dec{fac}$ is given in
direct-style fine-grain call-by-value. In CPS notation the
implementation of this function changes as follows.
%
\[
\bl
\dec{fac}_{\dec{cps}} : \Int \to (\Int \to \alpha) \to \alpha\\
\dec{fac}_{\dec{cps}} \defas \lambda n.\lambda k.
5 years ago
=_{\dec{cps}}~n~0~
(\ba[t]{@{~}l}
\lambda is\_zero.\\
\quad\ba[t]{@{~}l}
\If\;is\_zero\;\Then\; k~1\\
\Else\;
-_{\dec{cps}}~n~1~
(\lambda n'.
\dec{fac}_{\dec{cps}}~n'~
(\lambda m. *_{\dec{cps}}~n~m~
(\lambda res. k~res))))
\ea
\ea
\el
\]
5 years ago
%
\dhil{Adjust the example to avoid stepping into the margin.}
%
There are several worthwhile observations to make about the
differences between the two implementations $\dec{fac}$ and
$\dec{fac}_{\dec{cps}}$.
%
Firstly note that their type signatures differ. The CPS version has an
additional formal parameter of type $\Int \to \alpha$ which is the
continuation. By convention the continuation parameter is named $k$ in
the implementation. The continuation captures the remainder of
computation that ultimately produces a result of type $\alpha$, or put
differently: it determines what to do with the result returned by an
invocation of $\dec{fac}$. Semantically the continuation corresponds
to the surrounding evaluation
context.% , thus with a bit of hand-waving
% we can say that for $\EC[\dec{fac}~2]$ the entire evaluation $\EC$
% would be passed as the continuation argument to the CPS version:
% $\dec{fac}_{\dec{cps}}~2~\EC$.
%
Secondly note that every $\Let$-binding in $\dec{fac}$ has become a
function application in $\dec{fac}_{\dec{cps}}$. The functions
$=_{\dec{cps}}$, $-_{\dec{cps}}$, and $*_{\dec{cps}}$ denote the CPS
versions of equality testing, subtraction, and multiplication
respectively. Moreover, the explicit $\Return~1$ in the true branch
has been turned into an application of continuation $k$, and the
implicit return $n*m$ in the $\Else$-branch has been turned into an
explicit application of the continuation.
%
Thirdly note every function application is in tail position. This is a
characteristic property of CPS that makes CPS feasible as a practical
implementation strategy since programs in CPS notation does not
consume stack space.
\dhil{The focus of the introduction should arguably not be to explain CPS.}
\dhil{Justify CPS as an implementation technique}
\dhil{Give a side-by-side reduction example of $\dec{fac}$ and $\dec{fac}_{\dec{cps}}$.}
\dhil{Define desirable properties of a CPS translation: properly tail-recursive, no static administrative redexes}
\section{Initial target calculus}
\label{sec:target-cps}
5 years ago
The syntax, semantics, and syntactic sugar for the target calculus
$\UCalc$ is given in Figure~\ref{fig:cps-cbv-target}. The calculus
largely amounts to an untyped variation of $\BCalc$, specifically
5 years ago
we retain the syntactic distinction between values ($V$) and
computations ($M$).
%
The values ($V$) comprise lambda abstractions ($\lambda x.M$),
% recursive functions ($\Rec\,g\,x.M$),
empty tuples ($\Record{}$), pairs ($\Record{V,W}$), and first-class
labels ($\ell$).
5 years ago
%
Computations ($M$) comprise values ($V$), applications ($M~V$), pair
elimination ($\Let\; \Record{x, y} = V \;\In\; N$), label elimination
($\Case\; V \;\{\ell \mapsto M; x \mapsto N\}$), and explicit marking
of unreachable code ($\Absurd$). A key difference from $\BCalcRec$ is
that the function position of an application is allowed to be a
computation (i.e., the application form is $M~W$ rather than
$V~W$). Later, when we refine the initial CPS translation we will be
able to rule out this relaxation.
We give a standard small-step evaluation context-based reduction
semantics. Evaluation contexts comprise the empty context and function
application.
To make the notation more lightweight, we define syntactic sugar for
variant values, record values, list values, let binding, variant
eliminators, and record eliminators. We use pattern matching syntax
for deconstructing variants, records, and lists. For desugaring
records, we assume a failure constant $\ell_\bot$ (e.g. a divergent
term) to cope with the case of pattern matching failure.
\begin{figure}
\flushleft
\textbf{Syntax}
\begin{syntax}
\slab{Values} &U, V, W \in \UValCat &::= & x \mid \lambda x.M \mid % \Rec\,g\,x.M
\mid \Record{} \mid \Record{V, W} \mid \ell
\smallskip \\
\slab{Computations} &M,N \in \UCompCat &::= & V \mid M\,W \mid \Let\; \Record{x,y} = V \; \In \; N\\
& &\mid& \Case\; V\, \{\ell \mapsto M; y \mapsto N\} \mid \Absurd\,V
\smallskip \\
\slab{Evaluation contexts} &\EC \in \UEvalCat &::= & [~] \mid \EC\;W \\
\end{syntax}
\textbf{Reductions}
\begin{reductions}
\usemlab{App} & (\lambda x . \, M) V &\reducesto& M[V/x] \\
% \usemlab{Rec} & (\Rec\,g\,x.M) V &\reducesto& M[\Rec\,g\,x.M/g,V/x]\\
\usemlab{Split} & \Let \; \Record{x,y} = \Record{V,W} \; \In \; N &\reducesto& N[V/x,W/y] \\
\usemlab{Case_1} &
\Case \; \ell \; \{ \ell \mapsto M; y \mapsto N\} &\reducesto& M \\
\usemlab{Case_2} &
\Case \; \ell \; \{ \ell' \mapsto M; y \mapsto N\} &\reducesto& N[\ell/y], \hfill\quad \text{if } \ell \neq \ell' \\
\usemlab{Lift} &
\EC[M] &\reducesto& \EC[N], \hfill \text{if } M \reducesto N \\
\end{reductions}
\textbf{Syntactic sugar}
\[
\begin{eqs}
\Let\;x=V\;\In\;N &\equiv & N[V/x]\\
\ell \; V & \equiv & \Record{\ell; V}\\
\Record{} & \equiv & \ell_{\Record{}} \\
\Record{\ell = V; W} & \equiv & \Record{\ell, \Record{V, W}}\\
\nil &\equiv & \ell_{\nil} \\
V \cons W & \equiv & \Record{\ell_{\cons}, \Record{V, W}}\\
\Case\;V\;\{\ell\;x \mapsto M; y \mapsto N \} &\equiv&
\ba[t]{@{~}l}
\Let\;y = V\;\In\; \Let\;\Record{z,x} = y\;\In \\
\Case\; z\;\{ \ell \mapsto M; z' \mapsto N \}
\ea\\
\Let\; \Record{\ell=x;y} = V\;\In\;N &\equiv&
\ba[t]{@{~}l}
\Let\; \Record{z,z'} = V\;\In\;\Let\; \Record{x,y} = z'\;\In \\
\Case\;z\;\{\ell \mapsto N; z'' \mapsto \ell_\bot \}
\ea
\end{eqs}
\]
\caption{Untyped target calculus for the CPS translations.}
\label{fig:cps-cbv-target}
\end{figure}
\section{CPS transform for fine-grain call-by-value}
\label{sec:cps-cbv}
We start by giving a CPS translation of $\BCalc$ in
5 years ago
Figure~\ref{fig:cps-cbv}. Fine-grain call-by-value admits a
particularly simple CPS translation due to the separation of values
and computations. All constructs from the source language are
translated homomorphically into the target language $\UCalc$, except
for $\Return$ and $\Let$ (and type abstraction because the translation
performs type erasure). Lifting a value $V$ to a computation
$\Return~V$ is interpreted by passing the value to the current
5 years ago
continuation $k$. Sequencing computations with $\Let$ is translated by
applying the translation of $M$ to the translation of the continuation
$N$, which is ultimately applied to the current continuation $k$. In
addition, we explicitly $\eta$-expand the translation of a type
abstraction in order to ensure that value terms in the source calculus
translate to value terms in the target.
\begin{figure}
\flushleft
\textbf{Values} \\
\[
\bl
\begin{eqs}
\cps{-} &:& \ValCat \to \UValCat\\
\cps{x} &=& x \\
\cps{\lambda x.M} &=& \lambda x.\cps{M} \\
\cps{\Lambda \alpha.M} &=& \lambda k.\cps{M}~k \\
% \cps{\Rec\,g\,x.M} &=& \Rec\,g\,x.\cps{M}\\
\cps{\Record{}} &=& \Record{} \\
\cps{\Record{\ell = V; W}} &=& \Record{\ell = \cps{V}; \cps{W}} \\
\cps{\ell~V} &=& \ell~\cps{V} \\
\end{eqs}
\el
\]
\textbf{Computations}
\[
\bl
\begin{eqs}
\cps{-} &:& \CompCat \to \UCompCat\\
\cps{V\,W} &=& \cps{V}\,\cps{W} \\
\cps{V\,T} &=& \cps{V} \\
\cps{\Let\; \Record{\ell=x;y} = V \; \In \; N} &=& \Let\; \Record{\ell=x;y} = \cps{V} \; \In \; \cps{N} \\
\cps{\Case~V~\{\ell~x \mapsto M; y \mapsto N\}} &=&
\Case~\cps{V}~\{\ell~x \mapsto \cps{M}; y \mapsto \cps{N}\} \\
\cps{\Absurd~V} &=& \Absurd~\cps{V} \\
\cps{\Return~V} &=& \lambda k.k\,\cps{V} \\
\cps{\Let~x \revto M~\In~N} &=& \lambda k.\cps{M}(\lambda x.\cps{N}\,k) \\
\end{eqs}
\el
\]
\caption{First-order CPS translation of $\BCalc$.}
\label{fig:cps-cbv}
\end{figure}
\section{CPS transforming deep effect handlers}
\label{sec:fo-cps}
5 years ago
The translation of a computation term by the basic CPS translation in
Section~\ref{sec:cps-cbv} takes a single continuation parameter that
represents the context.
%
5 years ago
In the presence of effect handlers in the source language, it becomes
necessary to keep track of two kinds of contexts in which each
computation executes: a \emph{pure context} that tracks the state of
pure computation in the scope of the current handler, and an
\emph{effect context} that describes how to handle operations in the
scope of the current handler.
%
Correspondingly, we have both \emph{pure continuations} ($k$) and
\emph{effect continuations} ($h$).
%
As handlers can be nested, each computation executes in the context of
a \emph{stack} of pairs of pure and effect continuations.
On entry into a handler, the pure continuation is initialised to a
representation of the return clause and the effect continuation to a
representation of the operation clauses. As pure computation proceeds,
the pure continuation may grow, for example when executing a
$\Let$. If an operation is encountered then the effect continuation is
invoked.
%
The current continuation pair ($k$, $h$) is packaged up as a
\emph{resumption} and passed to the current handler along with the
operation and its argument. The effect continuation then either
handles the operation, invoking the resumption as appropriate, or
forwards the operation to an outer handler. In the latter case, the
resumption is modified to ensure that the context of the original
operation invocation can be reinstated when the resumption is invoked.
%
\subsection{Curried translation}
\label{sec:first-order-curried-cps}
We first consider a curried CPS translation that extends the
5 years ago
translation of Figure~\ref{fig:cps-cbv}. The extension to operations
and handlers is localised to the additional features because currying
conveniently lets us get away with a shift in interpretation: rather
than accepting a single continuation, translated computation terms now
accept an arbitrary even number of arguments representing the stack of
pure and effect continuations. Thus, we can conservatively extend the
translation in Figure~\ref{fig:cps-cbv} to cover $\HCalc$, where we
imagine there being some number of extra continuation arguments that
have been $\eta$-reduced. The translation of operations and handlers
is as follows.
%
\begin{equations}
\cps{-} &:& \CompCat \to \UCompCat\\
\cps{\Do\;\ell\;V} &\defas& \lambda k.\lambda h.h~\Record{\ell,\Record{\cps{V}, \lambda x.k~x~h}} \\
\cps{\Handle \; M \; \With \; H} &\defas& \cps{M}~\cps{\hret}~\cps{\hops} \medskip\\
\cps{-} &:& \HandlerCat \to \UCompCat\\
\cps{\{ \Return \; x \mapsto N \}} &\defas& \lambda x . \lambda h . \cps{N} \\
\cps{\{ \ell~p~r \mapsto N_\ell \}_{\ell \in \mathcal{L}}}
&\defas&
\lambda \Record{z,\Record{p,r}}. \Case~z~
\{ (\ell \mapsto \cps{N_\ell})_{\ell \in \mathcal{L}}; y \mapsto \hforward(y,p,r) \} \\
\hforward(y,p,r) &\defas& \lambda k. \lambda h. h\,\Record{y,\Record{p, \lambda x.\,r\,x\,k\,h}}
\end{equations}
%
The translation of $\Do\;\ell\;V$ abstracts over the current pure
($k$) and effect ($h$) continuations passing an encoding of the
operation into the latter. The operation is encoded as a triple
consisting of the name $\ell$, parameter $\cps{V}$, and resumption
5 years ago
$\lambda x.k~x~h$, which passes the same effect continuation $h$ to
ensure deep handler semantics.
The translation of $\Handle~M~\With~H$ invokes the translation of $M$
with new pure and effect continuations for the return and operation
clauses of $H$.
%
The translation of a return clause is a term which garbage collects
the current effect continuation $h$.
%
The translation of a set of operation clauses is a function which
dispatches on encoded operations, and in the default case forwards to
an outer handler.
%
In the forwarding case, the resumption is extended by the parent
continuation pair to ensure that an eventual invocation of the
resumption reinstates the handler stack.
The translation of complete programs feeds the translated term the
identity pure continuation (which discards its handler argument), and
5 years ago
an effect continuation that is never intended to be called.
%
\begin{equations}
\pcps{-} &:& \CompCat \to \UCompCat\\
\pcps{M} &\defas& \cps{M}~(\lambda x.\lambda h.x)~(\lambda \Record{z,\_}.\Absurd~z) \\
\end{equations}
5 years ago
%
Conceptually, this translation encloses the translated program in a
top-level handler with an empty collection of operation clauses and an
identity return clause.
We may regard this particular CPS translation as being simple, because
it requires virtually no extension to the CPS translation for
$\BCalc$. However, this translation suffers from two deficiencies
which makes it unviable in practice.
\begin{enumerate}
\item The image of the translation is not \emph{properly
tail-recursive}~\citep{DanvyF92,Steele78}, and as a result the
image is not stackless, meaning it cannot readily be used as the
basis for an implementation. This deficiency is essentially due to
the curried representation of the continuation stack.
\item The image of the translation yields static administrative
redexes, i.e. redexes that could be reduced statically. This is a
classic problem with CPS translation, which is typically dealt
with by introducing a second pass to clean up the
image~\cite{DanvyF92}.
\end{enumerate}
The following minimal example readily illustrates both issues.
%
\begin{equations}
\pcps{\Return\;\Record{}}
&= & (\lambda k.k\,\Record{})\,(\lambda x.\lambda h.x)\,(\lambda \Record{z,\_}.\Absurd\,z) \\
&\reducesto& ((\lambda x.\lambda h.x)\,\Record{})\,(\lambda \Record{z,\_}.\Absurd\,z) \\
&\reducesto& (\lambda h.\Record{})\,(\lambda \Record{z,\_}.\Absurd\,z)\\
&\reducesto& \Record{} \\
\end{equations}
%
\dhil{Mark the second reduction, so that it can be referred back to}
%
The second and third reductions simulate handling $\Return\;\Record{}$
at the top level. The second reduction partially applies the curried
function term $\lambda x.\lambda h.x$ to $\Record{}$, which must
return a value such that the third reduction can be
applied. Consequently, evaluation is not tail-recursive.
%
The lack of tail-recursion is also apparent in our relaxation of
fine-grain call-by-value in Figure~\ref{fig:cps-cbv-target} as the
function position of an application can be a computation.
%
In Section~\ref{sec:first-order-uncurried-cps} we will refine this
translation to be properly tail-recursive.
%
As for administrative redexes, observe that the first reduction is
administrative. It is an artefact introduced by the translation, and
thus it has nothing to do with the dynamic semantics of the original
term. We can eliminate such redexes statically. We will address this
issue in Section~\ref{sec:higher-order-cps}.
Nevertheless, we can show that the image of this CPS translation
simulates the preimage. Due to the presence of administrative
reductions, the simulation is not on the nose, but instead up to
congruence.
%
For reduction in the untyped target calculus we write
$\reducesto_{\textrm{cong}}$ for the smallest relation containing
$\reducesto$ that is closed under the term formation constructs.
%
\begin{theorem}[Simulation]
\label{thm:fo-simulation}
If $M \reducesto N$ then $\pcps{M} \reducesto_{\textrm{cong}}^+
\pcps{N}$.
\end{theorem}
\begin{proof}
The result follows by composing a call-by-value variant of
\citeauthor{ForsterKLP19}'s translation from effect handlers to
delimited continuations~\citeyearpar{ForsterKLP19} with
\citeauthor{MaterzokB12}'s CPS translation for delimited
continuations~\citeyearpar{MaterzokB12}.
\end{proof}
5 years ago
% \paragraph*{Remark}
% We originally derived this curried CPS translation for effect handlers
% by composing \citeauthor{ForsterKLP17}'s translation from effect
% handlers to delimited continuations~\citeyearpar{ForsterKLP17} with
% \citeauthor{MaterzokB12}'s CPS translation for delimited
% continuations~\citeyearpar{MaterzokB12}.
\subsection{Uncurried translation}
\label{sec:first-order-uncurried-cps}
%
%
\begin{figure}
\flushleft
\textbf{Syntax}
\begin{syntax}
\slab{Computations} &M,N \in \UCompCat &::= & \cdots \mid \XCancel{M\,W} \mid V\,W \mid U\,V\,W \smallskip \\
\XCancel{\slab{Evaluation contexts}} &\XCancel{\EC \in \UEvalCat} &::= & \XCancel{[~] \mid \EC\;W} \\
\end{syntax}
\textbf{Reductions}
\begin{reductions}
\usemlab{App_1} & (\lambda x . M) V &\reducesto& M[V/x] \\
\usemlab{App_2} & (\lambda x . \lambda y. \, M) V\, W &\reducesto& M[V/x,W/y] \\
\XCancel{\usemlab{Lift}} & \XCancel{\EC[M]} &\reducesto& \XCancel{\EC[N], \hfill \text{if } M \reducesto N}
\end{reductions}
\caption{Adjustments to the syntax and semantics of $\UCalc$.}
\label{fig:refined-cps-cbv-target}
\end{figure}
%
In this section we will refine the CPS translation for deep handlers
to make it properly tail-recursive. As remarked in the previous
section, the lack of tail-recursion is apparent in the syntax of the
target calculus $\UCalc$ as it permits an arbitrary computation term
in the function position of an application term.
%
As a first step we may impose a syntax restriction in target calculus
such that the term in function position must be a value. With this
restriction the syntax of $\UCalc$ implements the property that any
term constructor features at most one computation term, and this
computation term always appears in tail position. Thus this
restriction suffices to ensure that every function application will be
in tail-position.
%
Figure~\ref{fig:refined-cps-cbv-target} contains the adjustments to
syntax and semantics of $\UCalc$. The target calculus now supports
both unary and binary application forms. As we shall see shortly,
binary application turns out be convenient when we enrich the notion
of continuation. Both application forms are comprised only of value
terms. As a result the dynamic semantics of $\UCalc$ no longer makes
use of evaluation contexts, hence we remove them altogether. The
reduction rule $\usemlab{App_1}$ applies to unary application and it
is the same as the $\usemlab{App}$-rule in
Figure~\ref{fig:cps-cbv-target}. The new $\usemlab{App_2}$-rule
applies to binary application: it performs a simultaneous substitution
of the arguments $V$ and $W$ for the parameters $x$ and $y$,
respectively, in the function body $M$.
%
These changes to $\UCalc$ immediately invalidate the curried
translation from the previous section as the image of the translation
is no longer well-formed.
%
% The crux of the problem is the curried representation of the
% continuation pair.
The crux of the problem is that the curried interpretation of
continuations causes the CPS translation to produce `large'
application terms, e.g. the translation rule for effect forwarding
produces three-argument application term.
%
To rectify this problem we can adapt the technique of
\citet{MaterzokB12} to uncurry our CPS translation. Uncurrying
necessitates a change of representation for continuations: a
continuation is now an alternating list of pure continuation functions
and effect continuation functions. Thus, we move to an explicit
representation of the runtime handler stack.
%
The change of continuation representation means the CPS translation
for effect handlers is no longer a conservative extension. The
translation is adjusted as follows to account for the new
representation of continuations.
%
\begin{equations}
\cps{-} &:& \CompCat \to \UCompCat\\
\cps{\Return~V} &\defas& \lambda (k \cons ks).k\,\cps{V}\,ks \\
\cps{\Let~x \revto M~\In~N} &\defas& \lambda (k \cons ks).\cps{M}((\lambda x.\lambda ks'.\cps{N}(k \cons ks')) \cons ks)
\smallskip \\
\cps{\Do\;\ell\;V} &\defas& \lambda (k \cons h \cons ks).h\,\Record{\ell,\Record{\cps{V}, \lambda x.\lambda ks'.k\,x\,(h \cons ks')}}\,ks
\smallskip \\
\cps{\Handle \; M \; \With \; H} &\defas& \lambda ks . \cps{M} (\cps{\hret} \cons \cps{\hops} \cons ks) \medskip\\
\cps{-} &:& \HandlerCat \to \UCompCat\\
\cps{\{\Return \; x \mapsto N\}} &\defas& \lambda x.\lambda ks.\Let\; (h \cons ks') = ks \;\In\; \cps{N}\,ks'
\\
\cps{\{\ell \; p \; r \mapsto N_\ell\}_{\ell \in \mathcal{L}}}
&\defas&
\bl
\lambda \Record{z,\Record{p,r}}. \lambda ks. \Case \; z \;
\{( \bl\ell \mapsto \cps{N_\ell}\,ks)_{\ell \in \mathcal{L}};\,\\
y \mapsto \hforward((y,p,r),ks) \}\el \\
\el \\
\hforward((y,p,r),ks) &\defas& \bl
\Let\; (k' \cons h' \cons ks') = ks \;\In\; \\
h'\,\Record{y, \Record{p, \lambda x.\lambda ks''.\,r\,x\,(k' \cons h' \cons ks'')}}\,ks'\\
\el \medskip\\
\pcps{-} &:& \CompCat \to \UCompCat\\
\pcps{M} &\defas& \cps{M}~((\lambda x.\lambda ks.x) \cons (\lambda \Record{z,\Record{p,r}}. \lambda ks.\,\Absurd~z) \cons \nil)
\end{equations}
%
The other cases are as in the original CPS translation in
Figure~\ref{fig:cps-cbv}.
%
Since we now use a list representation for the stacks of
continuations, we have had to modify the translations of all the
constructs that manipulate continuations. For $\Return$ and $\Let$, we
extract the top continuation $k$ and manipulate it analogously to the
original translation in Figure\ \ref{fig:cps-cbv}. For $\Do$, we
extract the top pure continuation $k$ and effect continuation $h$ and
invoke $h$ in the same way as the curried translation, except that we
explicitly maintain the stack $ks$ of additional continuations. The
translation of $\Handle$, however, pushes a continuation pair onto the
stack instead of supplying them as arguments. Handling of operations
is the same as before, except for explicit passing of the
$ks$. Forwarding now pattern matches on the stack to extract the next
continuation pair, rather than accepting them as arguments.
%
% Proper tail recursion coincides with a refinement of the target
% syntax. Now applications are either of the form $V\,W$ or of the form
% $U\,V\,W$. We could also add a rule for applying a two argument lambda
% abstraction to two arguments at once and eliminate the
% $\usemlab{Lift}$ rule, but we defer this until our higher order
% translation in Section~\ref{sec:higher-order-uncurried-cps}.
Let us revisit the example from
Section~\ref{sec:first-order-curried-cps} to see first hand that our
refined translation makes the example properly tail-recursive.
%
\begin{equations}
\pcps{\Return\;\Record{}}
&= & (\lambda (k \cons ks).k\,\Record{}\,ks)\,((\lambda x.\lambda ks.x) \cons (\lambda \Record{z, \_}.\lambda ks.\Absurd\,z) \cons \nil) \\
&\reducesto& (\lambda x.\lambda ks.x)\,\Record{}\,((\lambda \Record{z,\_}.\lambda ks.\Absurd\,z) \cons \nil)\\
&\reducesto& \Record{}
\end{equations}
%
The image of this uncurried translation admits three
$\reducesto$-reduction steps (disregarding the administrative steps
induced by pattern matching), which is one less step than admitted by
the image of the curried translation. The `missing' step is precisely
the partial application of the initial pure continuation function,
which was not in tail position. Note, however, that the first
reduction is remains administrative, the reduction is entirely static,
and as such, it can be dealt with as part of the translation.
%
\paragraph{Administrative redexes}
We can determine whether a redex is administrative in the image by
determining whether it corresponds to a redex in the preimage. If
there is no corresponding redex, then the redex is said to be
administrative. We can further classify an administrative redex as to
whether it is \emph{static} or \emph{dynamic}. A static administrative
redex is a by-product of the translation that does not contribute to
the implementation of the dynamic behaviour of the preimage.
%
In contrast, a dynamic administrative redex is a genuine
implementation detail that supports some part of the dynamic behaviour
of the preimage. An example of such a detail is the implementation of
effect forwarding. In $\HCalc$ effect forwarding involves no auxiliary
reductions, any operation invocation is instantaneously dispatched to
a suitable handler (if such one exists).
%
The translation presented above realises effect forwarding by
explicitly applying the next effect continuation. This application is
an example of a dynamic administrative reduction. Though, not all
dynamic administrative redexes are necessary, for instance, the
implementation of resumptions as a composition of
$\lambda$-abstractions gives rise to administrative reductions upon
invocation. As we shall see in
Section~\ref{sec:first-order-explicit-resump} administrative
reductions due to resumption invocation can be dealt with by choosing
a more clever implementation of resumptions.
We can characterise static administrative redexes\dots
\dhil{Characterise static redexes\dots}
% \dhil{Discuss dynamic and static administrative redex.}
\subsection{Resumptions as explicit reversed stacks}
\label{sec:first-order-explicit-resump}
%
\dhil{Show an example involving administrative redexes produced by resumptions}
%
Thus far resumptions have been represented as functions, and
forwarding has been implemented using function composition. The
composition of resumption gives rise to unnecessary dynamic
administrative redexes as function composition necessitates the
introduction of an additional lambda abstraction.
%
We can avert generating these administrative redexes by applying a
variation of the technique that we used in the previous section to
uncurry the curried CPS translation.
%
Rather than representing resumptions as functions, we move to an
explicit representation of resumptions as \emph{reversed} stacks of
pure and effect continuations. By choosing to reverse the order of
pure and effect continuations, we can construct resumptions
efficiently using regular cons-lists. We augment the syntax and
semantics of $\UCalc$ with a computation term
$\Let\;r=\Res\,V\;\In\;N$ which allow us to convert these reversed
stacks to actual functions on demand.
%
\begin{reductions}
\usemlab{Res}
& \Let\;r=\Res\,(V_n \cons \dots \cons V_1 \cons \nil)\;\In\;N
& \reducesto
& N[\lambda x\,k.V_1\,x\,(V_2 \cons \dots \cons V_n \cons k)/r]
\end{reductions}
%
This reduction rule reverses the stack, extracts the top continuation
$V_1$, and prepends the remainder onto the current stack $W$. The
stack representing a resumption and the remaining stack $W$ are
reminiscent of the zipper data structure for representing cursors in
lists~\cite{Huet97}. Thus we may think of resumptions as representing
pointers into the stack of handlers.
%
The translations of $\Do$, handling, and forwarding need to be
modified to account for the change in representation of
resumptions.
%
\begin{equations}
\cps{-} &:& \CompCat \to \UCompCat\\
\cps{\Do\;\ell\;V}
&\defas& \lambda k \cons h \cons ks.\,h\, \Record{\ell,\Record{\cps{V}, h \cons k \cons \nil}}\, ks
\medskip\\
%
\cps{-} &:& \HandlerCat \to \UCompCat\\
\cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}
&\defas& \bl
\lambda \Record{z,\Record{p,rs}}.\lambda ks.\Case \;z\; \{
\bl
(\ell \mapsto \Let\;r=\Res\;rs \;\In\; \cps{N_{\ell}}\, ks)_{\ell \in \mathcal{L}};\,\\
y \mapsto \hforward((y,p,rs),ks) \} \\
\el \\
\el \\
\hforward((y,p,rs),ks)
&\defas&\Let\; (k' \cons h' \cons ks') = ks \;\In\; h'\,\Record{y,\Record{p,h' \cons k' \cons rs}} \,ks'
\end{equations}
%
The translation of $\Do$ constructs an initial resumption stack,
operation clauses extract and convert the current resumption stack
into a function using the $\Res$, and $\hforward$ augments the current
resumption stack with the current continuation pair.
%
\subsection{Higher-order translation for deep effect handlers}
\label{sec:higher-order-uncurried-deep-handlers-cps}
%
\begin{figure}
%
\textbf{Values}
%
\begin{displaymath}
\begin{eqs}
\cps{-} &:& \ValCat \to \UValCat\\
\cps{x} &\defas& x \\
\cps{\lambda x.M} &\defas& \dlam x\,ks.\Let\;(k \dcons h \dcons ks') = ks \;\In\;\cps{M} \sapp (\reflect k \scons \reflect h \scons \reflect ks') \\
% \cps{\Rec\,g\,x.M} &\defas& \Rec\;f\,x\,ks.\cps{M} \sapp \reflect ks\\
\cps{\Lambda \alpha.M} &\defas& \dlam \Unit\,ks.\Let\;(k \dcons h \dcons ks') = ks \;\In\;\cps{M} \sapp (\reflect k \scons \reflect h \scons \reflect ks') \\
\cps{\Record{}} &\defas& \Record{} \\
\cps{\Record{\ell = V; W}} &\defas& \Record{\ell = \cps{V}; \cps{W}} \\
\cps{\ell~V} &\defas& \ell~\cps{V} \\
\end{eqs}
\end{displaymath}
%
\textbf{Computations}
%
\begin{equations}
\cps{-} &:& \CompCat \to \SValCat^\ast \to \UCompCat\\
\cps{V\,W} &\defas& \slam \sks.\cps{V} \dapp \cps{W} \dapp \reify \sks \\
\cps{V\,T} &\defas& \slam \sks.\cps{V} \dapp \Record{} \dapp \reify \sks \\
\cps{\Let\; \Record{\ell=x;y} = V \; \In \; N} &\defas& \slam \sks.\Let\; \Record{\ell=x;y} = \cps{V} \; \In \; \cps{N} \sapp \sks \\
\cps{\Case~V~\{\ell~x \mapsto M; y \mapsto N\}} &\defas&
\slam \sks.\Case~\cps{V}~\{\ell~x \mapsto \cps{M} \sapp \sks; y \mapsto \cps{N} \sapp \sks\} \\
\cps{\Absurd~V} &\defas& \slam \sks.\Absurd~\cps{V} \\
\cps{\Return~V} &\defas& \slam \sk \scons \sks.\reify \sk \dapp \cps{V} \dapp \reify \sks \\
\cps{\Let~x \revto M~\In~N} &\defas& \slam \sk \scons \sks.\cps{M} \sapp
(\reflect (\dlam x\,\dhk.
\ba[t]{@{}l}
\Let\;(h \dcons \dhk') = \dhk\;\In\\
\cps{N} \sapp (\sk \scons \reflect h \scons \reflect \dhk')) \scons \sks)
\ea\\
\cps{\Do\;\ell\;V}
&\defas& \slam \sk \scons \sh \scons \sks.\reify \sh \dapp \Record{\ell,\Record{\cps{V}, \reify \sh \dcons \reify \sk \dcons \dnil}} \dapp \reify \sks\\
\cps{\Handle \; M \; \With \; H} &\defas& \slam \sks . \cps{M} \sapp (\reflect \cps{\hret} \scons \reflect \cps{\hops} \scons \sks)
%
\end{equations}
%
\textbf{Handler definitions}
%
\begin{equations}
\cps{-} &:& \HandlerCat \to \UValCat\\
\cps{\{\Return \; x \mapsto N\}} &\defas& \dlam x\, \dhk.
\ba[t]{@{~}l}
\Let\; (h \dcons \dk \dcons h' \dcons \dhk') = \dhk \;\In\\
\cps{N} \sapp (\reflect \dk \scons \reflect h' \scons \reflect \dhk')
\ea
\\
\cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}
&\defas& \bl
\dlam \Record{z,\Record{p,\dhkr}}\,\dhk.\Case \;z\; \{
\ba[t]{@{}l@{}c@{~}l}
&(\ell \mapsto&
\ba[t]{@{}l}
\Let\;r=\Res\;\dhkr \;\In\\
\Let\;(\dk \dcons h \dcons \dhk') = \dhk \;\In\\
\cps{N_{\ell}} \sapp (\reflect \dk \scons \reflect h \scons \reflect \dhk'))_{\ell \in \mathcal{L}};
\ea\\
&y \mapsto& \hforward((y,p,\dhkr),\dhk) \} \\
\ea \\
\el \\
\hforward((y,p,\dhkr),\dhk)
&\defas&\Let\; (\dk' \dcons h' \dcons \dhk') = \dhk \;\In\; h' \dapp \Record{y,\Record{p,h' \dcons \dk' \dcons \dhkr}} \dapp \dhk'
\end{equations}
%
\textbf{Top level program}
%
\begin{equations}
\pcps{-} &:& \CompCat \to \UCompCat\\
\pcps{M} &=& \cps{M} \sapp (\reflect (\dlam x\,\dhk.x) \scons \reflect (\dlam z\,\dhk.\Absurd~z) \scons \snil) \\
\end{equations}
\caption{Higher-order uncurried CPS translation of $\HCalc$.}
\label{fig:cps-higher-order-uncurried}
\end{figure}
%
In the previous sections, we have step-wise refined the initial
curried CPS translation for deep effect handlers
(Section~\ref{sec:first-order-curried-cps}) to be properly
tail-recursive (Section~\ref{sec:first-order-uncurried-cps}) and to
avoid yielding unnecessary dynamic administrative redexes for
resumptions (Section~\ref{sec:first-order-explicit-resump}).
%
There is still one outstanding issue, namely, that the translation
yields static administrative redexes. In this section we will further
refine the CPS translation to eliminate all static administrative
redexes at translation time.
%
Specifically, we will adapt the translation to a higher-order one-pass
CPS translation~\citep{DanvyF90} that partially evaluates
administrative redexes at translation time.
%
Following \citet{DanvyN03}, we adopt a two-level lambda calculus
notation to distinguish between \emph{static} lambda abstraction and
application in the meta language and \emph{dynamic} lambda abstraction
and application in the target language. To disambiguate syntax
constructors in the respective calculi we mark static constructors
with a {\color{blue}$\overline{\text{blue overline}}$}, whilst dynamic
constructors are marked with a
{\color{red}$\underline{\text{red underline}}$}. The principal idea is
that redexes marked as static are reduced as part of the translation,
whereas those marked as dynamic are reduced at runtime. To facilitate
this notation we write application explicitly using an infix ``at''
symbol ($@$) in both calculi.
\paragraph{Static terms}
%
As in the dynamic target language, we will represent continuations as
alternating lists of pure continuation functions and effect
continuation functions. To ease notation we are going make use of
pattern matching notation. The static meta language is generated by
the following productions.
%
\begin{syntax}
\slab{Static patterns} &\sP \in \SPatCat &::=& \sks \mid \sk \scons \sP\\
\slab{Static values} & \sV, \sW \in \SValCat &::=& \reflect V \mid \sV \scons \sW \mid \slam \sP. \sM\\
\slab{Static computations} & \sM \in \SCompCat &::=& \sV \mid \sV \sapp \sW \mid \sV \dapp V \dapp W
\end{syntax}
%
The patterns comprise only static list deconstructing. We let $\sP$
range over static patterns.
%
The static values comprise reflected dynamic values, static lists, and
static lambda abstractions. We let $\sV, \sW$ range over meta language
values; by convention we shall use variables $\sk$ to denote
statically known pure continuations, $\sh$ to denote statically known
effect continuations, and $\sks$ to denote statically known
continuations.
%
We shall use $\sM$ to range over static computations, which comprise
static values, static application and binary dynamic application of a
static value to two dynamic values.
%
Static computations are subject to the following equational axioms.
%
\begin{equations}
(\slam \sks. \sM) \sapp \sV &\defas& \sM[\sV/\sks]\\
(\slam \sk \scons \sks. \sM) \sapp (\sV \scons \sW) &\defas& (\slam \sks. \sM[\sV/\sk]) \sapp \sW\\
\end{equations}
%
The first equation is static $\beta$-equivalence, it states that
applying a static lambda abstraction with binder $\sks$ and body $\sM$
to a static value $\sV$ is equal to substituting $\sV$ for $\sks$ in
$\sM$. The second equation provides a means for applying a static
lambda abstraction to a static list component-wise.
%
Reflected static values are reified as dynamic language values
$\reify \sV$ by induction on their structure.
%
\[
\ba{@{}l@{\qquad}c}
\reify \reflect V \defas V
&\reify (\sV \scons \sW) \defas \reify \sV \dcons \reify \sW
\ea
\]
%
%\dhil{Need to spell out that static pattern matching may induce dynamic (administrative) reductions}
%
\paragraph{Higher-order translation}
%
The complete CPS translation is given in
Figure~\ref{fig:cps-higher-order-uncurried}. In essence, it is the
same as the refined first-order uncurried CPS translation, although
the notation is slightly more involved due to the separation of static
and dynamic parts.
%
The translation on values is almost homomorphic on the syntax
constructors. The notable exceptions are the translations of
$\lambda$-abstractions and $\Lambda$-abstractions. The former gives
rise to dynamic computation at runtime, and thus, the translation
emits a dynamic $\dlam$-abstraction with two formal parameters: the
function parameter, $x$, and the continuation parameter, $ks$. The
body of the $\dlam$-abstraction is somewhat anomalous as the
continuation $ks$ is subject to immediate dynamic deconstruction just
to be reconstructed again and used as a static argument to the
translation of the original body computation $M$. This deconstruction
and reconstruction may at first glance appear to be rather pointless,
however, a closer look reveals that it performs a critical role as it
ensures that the translation on computation terms has immediate access
to the next pure and effect continuation functions. In fact, the
translation is set up such that every generated dynamic
$\dlam$-abstraction deconstructs its continuation argument
appropriately to be to expose just enough (static) continuation
structure in order to guarantee that static pattern matching never
fails.
%
(Though, the translation is somewhat unsatisfactory regarding this
deconstruction of dynamic continuations as the various generated
dynamic lambda abstractions do not deconstruct their continuation
arguments uniformly. I will return to discuss this dissatisfaction
shortly.)
%
Since the target language is oblivious to types any
$\Lambda$-abstraction gets translated in a similar way to
$\Lambda$-abstraction, where the first parameter becomes a placeholder
for a ``dummy'' unit argument.
The translation of computation terms is more interesting. Note the
type of the translation function,
$\cps{-} : \CompCat \to \SValCat^\ast \to \UCompCat$. It is a binary
function, taking a $\HCalc$-computation term as its first argument and
a static continuation as its second argument, and ultimately produces
a $\UCalc$-computation term. The static continuation may be
manipulated during the translation as is done in the translations of
$\Return$, $\Let$, $\Do$, and $\Handle$. The translation of $\Return$
consumes the next pure continuation function $\sk$ and applies it
dynamically it to the translation of $V$ and the reified remainder,
$\sks$, of the static continuation. Keep in mind that the translation
of $\Return$ only consumes one of the two guaranteed available
continuation functions from the provided continuation argument, as a
consequence the next continuation function in $\sks$ is an effect
continuation. This consequence is accounted for in the translations of
$\Let$ and $\Return$-clauses, which are precisely the two possible
(dynamic) origins of $\sk$. For instance, the translation of $\Let$
consumes the current pure continuation function and generates a
replacement: a pure continuation function which expects an odd dynamic
continuation $ks$, which it deconstructs to expose the effect
continuation $h$ along with the current pure continuation function in
the translation of $N$. The modified continuation is passed to the
translation of $M$.
%
To provide a flavour of how this continuation manipulation functions
in practice, consider the following example term.
%
\begin{derivation}
&\pcps{\Let\;x \revto \Return\;V\;\In\;N}\\
=& \reason{definition of $\pcps{-}$}\\
&\ba[t]{@{}l}(\slam \sk \scons \sks.\cps{\Return\;V} \sapp
(\reflect(\dlam x\,ks.
\ba[t]{@{}l}
\Let\;(h \dcons ks') = ks \;\In\\
\cps{N} \sapp (\sk \scons \reflect h \scons \reflect ks')) \scons \sks)
\ea\\
\sapp (\reflect (\dlam x\,ks.x) \scons \reflect (\dlam z\,ks.\Absurd~z) \scons \snil))
\ea\\
=& \reason{definition of $\cps{-}$}\\
&\ba[t]{@{}l}(\slam \sk \scons \sks.(\slam \sk \scons \sks. \reify \sk \dapp \cps{V} \dapp \reify \sks) \sapp
(\reflect(\dlam x\,ks.
\ba[t]{@{}l}
\Let\;(h \dcons ks') = ks \;\In\\
\cps{N} \sapp (\sk \scons \reflect h \scons \reflect ks')) \scons \sks)
\ea\\
\sapp (\reflect (\dlam x\,ks.x) \scons \reflect (\dlam z\,ks.\Absurd~z) \scons \snil))
\ea\\
=& \reason{static $\beta$-reduction}\\
&(\slam \sk \scons \sks. \reify \sk \dapp \cps{V} \dapp \reify \sks)
\sapp
(\reflect(\dlam x\,\dhk.
\ba[t]{@{}l}
\Let\;(h \dcons \dhk') = \dhk \;\In\\
\cps{N} \sapp
\ba[t]{@{}l}
(\reflect (\dlam x\,\dhk.x) \scons \reflect h \scons \reflect \dhk'))\\
~~\scons \reflect (\dlam z\,\dhk.\Absurd~z) \scons \snil))
\ea
\ea\\
=& \reason{static $\beta$-reduction}\\
&\ba[t]{@{}l@{~}l}
&(\dlam x\,\dhk.
\Let\;(h \dcons \dhk') = \dhk \;\In\;
\cps{N} \sapp
(\reflect (\dlam x\,\dhk.x) \scons \reflect h \scons \reflect \dhk'))\\
\dapp& \cps{V} \dapp ((\dlam z\,\dhk.\Absurd~z) \dcons \dnil)\\
\ea\\
\reducesto& \reason{\usemlab{App_2}}\\
&\Let\;(h \dcons \dhk') = (\dlam z\,\dhk.\Absurd~z) \dcons \dnil \;\In\;
\cps{N[V/x]} \sapp
(\reflect (\dlam x\,\dhk.x) \scons \reflect h \scons \reflect \dhk'))\\
\reducesto^+& \reason{dynamic pattern matching and substitution}\\
&\cps{N[V/x]} \sapp
(\reflect (\dlam x\,\dhk.x) \scons \reflect (\dlam z\,\dhk.\Absurd~z) \scons \reflect \dnil)
\end{derivation}
%
The translation of $\Return$ provides the generated dynamic pure
continuation function with the odd continuation
$((\dlam z\,ks.\Absurd~z) \dcons \dnil)$. After the \usemlab{App_2}
reduction, the pure continuation function deconstructs the odd
continuation in order to bind the current effect continuation function
to the name $h$, which would have been used during the translation of
$N$.
The translation of $\Do$ consumes both the current pure continuation
function and current effect continuation function. It applies the
effect continuation function dynamically to an operation package and
the reified remainder of the continuation. As usual, the operation
package contains the payload and the resumption, which is a reversed
slice of the provided continuation. The translation of $\Handle$
applies the translation of $M$ to the current continuation extended
with the translation of the $\Return$-clause, acting as a pure
continuation function, and the translation of operation-clauses,
acting as an effect continuation function.
The translation of a return clause $\hret$ of some handler $H$
generates a dynamic lambda abstraction, which expects an odd
continuation. In its body it extracts three elements from the
continuation parameter $ks$. The first element is discarded as it is
the effect continuation corresponding to the translation of
$\hops$. The other two elements are used to statically expose the next
pure and effect continuation functions in the translation of $N$.
%
The translation of $\hops$ also generates a dynamic lambda
abstraction, which unpacks the operation package to perform a
case-split on the operation label $z$. The continuation $ks$ is
deconstructed in the branch for $\ell$ in order to expose the
continuation structure. The forwarding branch also deconstructs the
continuation, however, for a different purpose, namely, to augment the
resumption $rs$ the next pure and effect continuation functions.
%
\dhil{Remark that an alternative to cluttering the translations with
unpacking and repacking of continuations would be to make static
pattern generate dynamic let bindings whenever necessary. While it
may reduce the dynamic administrative reductions, it makes the
simulation proof more complicated.}
% : the $\dlam$-abstractions generated by translating $\lambda$-abstractions, $\Lambda$-abstractions, and operation clauses expose the next two continuation functions, whilst the dynamic lambda abstraction generated by translating $\Let$ only expose the next continuation, others expose only the next one, and the translation of
% $\lambda$-abstractions expose the next two continuation functions,
% The translation on values and handler definitions generates dynamic
% lambda abstractions. Whilst the translation on computations generates
% static lambda abstractions.
% Static continuation application corresponds to
% The translation function for computation terms is staged: for any
% given computation term the translation yields a static function, which
% can be applied to a static continuation to ultimately yield a
% $\UCalc$-computation term. Staging is key to eliminating static
% administrative redexes as any static function may perform static
% computation by manipulating its provided static continuation
% a binary higher-order
% function; it is higher-order because its second parameter is a list of
% static continuation functions.
% A major difference that has a large cosmetic effect on the
% presentation of the translation is that we maintain the invariant that
% the statically known stack ($\sk$) always contains at least one frame,
% consisting of a triple
% $\sRecord{\reflect V_{fs}, \sRecord{\reflect V_{ret}, \reflect
% V_{ops}}}$ of reflected dynamic pure frame stacks, return
% handlers, and operation handlers. Maintaining this invariant ensures
% that all translations are uniform in whether they appear statically
% within the scope of a handler or not, and this simplifies our
% correctness proof. To maintain the invariant, any place where a
% dynamically known stack is passed in (as a continuation parameter
% $k$), it is immediately decomposed using a dynamic language $\Let$ and
% repackaged as a static value with reflected variable
% names. Unfortunately, this does add some clutter to the translation
% definition, as compared to the translations above. However, there is a
% payoff in the removal of administrative reductions at run time. The
% translations presented by \citet{HillerstromLAS17} and
% \citet{HillerstromL18} did not do this decomposition and repackaging
% step, which resulted in additional administrative reductions in the
% translation due to the translations of $\Let$ and $\Do$ being passed
% dynamic continuations when they were expecting statically known ones.
%
% The translation on values is mostly homomorphic on the syntax
% constructors, except for $\lambda$-abstractions and
% $\Lambda$-abstractions which are translated in the exact same way,
% because the target calculus has no notion of types. As per usual
% continuation passing style practice, the translation of a lambda
% abstraction yields a dynamic binary lambda abstraction, where the
% second parameter is intended to be the continuation
% parameter.
% %
% However, the translation slightly diverge from the usual
% practice in the translation of the body as the result of $\cps{M}$ is
% applied statically, rather than dynamically, to the (reflected)
% continuation parameter.
% %
% The reason is that the translation on computations is staged: for any
% given computation term the translation yields a static function, which
% can be applied to a static continuation to ultimately produce a
% $\UCalc$-computation term. This staging is key to eliminating static
% administrative redexes as any static function may perform static
% computation by manipulating its provided static continuation, as is
% for instance done in the translation of $\Let$.
%
\dhil{Spell out why this translation qualifies as `higher-order'.}
Let us revisit the example from
Section~\ref{sec:first-order-curried-cps} to see that the higher-order
translation eliminates the static redex at translation time.
%
\begin{equations}
\pcps{\Return\;\Record{}}
&=& (\slam \sk \scons \sks. \sk \dapp \Record{} \dapp \reify \sks) \sapp (\reflect (\dlam x\,\dhk.x) \scons \reflect (\dlam z\,\dhk.\Absurd\;z) \scons \snil)\\
&=& (\dlam x\,\dhk.x) \dapp \Record{} \dapp (\reflect (\dlam z\,\dhk.\Absurd\;z) \dcons \dnil)\\
&\reducesto& \Record{}
\end{equations}
%
In contrast with the previous translations, the image of this
translation admits only a single dynamic reduction (disregarding the
dynamic administrative reductions arising from continuation
construction and deconstruction).
\subsubsection{Correctness}
\label{sec:higher-order-cps-deep-handlers-correctness}
We establish the correctness of the higher-order uncurried CPS
translation via a simulation result in style of
Plotkin~\cite{Plotkin75} (Theorem~\ref{thm:ho-simulation}). However,
before we can state and prove this result, we first several auxiliary
lemmas describing how translated terms behave. First, the higher-order
CPS translation commutes with substitution.
%
\begin{lemma}[Substitution]\label{lem:ho-cps-subst}
%
The higher-order uncurried CPS translation commutes with
substitution in value terms
%
\[
\cps{W}[\cps{V}/x] = \cps{W[V/x]},
\]
%
and with substitution in computation terms
\[
(\cps{M} \sapp (\sk \scons \sh \scons \sks))[\cps{V}/x]
= \cps{M[V/x]} \sapp (\sk \scons \sh \scons \sks)[\cps{V}/x],
\]
%
and with substitution in handler definitions
%
\begin{equations}
\cps{\hret}[\cps{V}/x]
&=& \cps{\hret[V/x]},\\
\cps{\hops}[\cps{V}/x]
&=& \cps{\hops[V/x]}.
\end{equations}
\end{lemma}
%
\begin{proof}
Proof is by mutual induction on the structure of $W$, $M$, $\hret$,
and $\hops$.
\end{proof}
%
It follows as a corollary that top-level substitution is well-behaved.
%
\begin{corollary}[Top-level substitution]
\[
\pcps{M}[\cps{V}/x] = \pcps{M[V/x]}.
\]
\end{corollary}
%
\begin{proof}
Follows immediately by the definitions of $\pcps{-}$ and
Lemma~\ref{lem:ho-cps-subst}.
\end{proof}
%
In order to reason about the behaviour \semlab{Op} rule, which is
defined in terms of an evaluation context, we need to extend the CPS
translation to evaluation contexts.
%
\begin{equations}
\cps{-} &:& \EvalCat \to \SValCat\\
\cps{[~]} &\defas& \slam \sks.\sks \\
\cps{\Let\; x \revto \EC \;\In\; N} &\defas& \slam \sk \scons \sks.\cps{\EC} \sapp
(\reflect(\dlam x\,ks.
\ba[t]{@{}l}
\Let\;(h \dcons ks') = ks\;\In\;\\
\cps{N} \sapp (\sk \scons \reflect h \scons \reflect ks')) \scons \sks)
\ea\\
\cps{\Handle\; \EC \;\With\; H} &\defas& \slam \sks. \cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \sks)
\end{equations}
%
The following lemma is the characteristic property of the CPS
translation on evaluation contexts.
%
It provides a means for decomposing an evaluation context, such that
we can focus on the computation contained within the evaluation
context.
%
\begin{lemma}[Decomposition]
\label{lem:decomposition}
%
\begin{equations}
\cps{\EC[M]} \sapp (\sV \scons \sW) &=& \cps{M} \sapp (\cps{\EC} \sapp (\sV \scons \sW)) \\
\end{equations}
%
\end{lemma}
%
\begin{proof}
Proof by structural induction on the evaluation context $\EC$.
\end{proof}
%
Even though we have eliminated the static administrative redexes, we
still need to account for the dynamic administrative redexes that
arise from pattern matching against a reified continuation. To
properly account for these administrative redexes it is convenient to
treat list pattern matching as a primitive in $\UCalc$, therefore we
introduce a new reduction rule $\usemlab{SplitList}$ in $\UCalc$.
%
\begin{reductions}
\usemlab{SplitList} & \Let\; (k \dcons ks) = V \dcons W \;\In\; M &\reducesto& M[V/k, W/ks] \\
\end{reductions}
%
Note this rule is isomorphic to the \usemlab{Split} rule with lists
encoded as right nested pairs using unit to denote nil.
%
We write $\areducesto$ for the compatible closure of
\usemlab{SplitList}.
We also need to be able to reason about the computational content of
reflection after reification. By definition we have that
$\reify \reflect V = V$, the following lemma lets us reason about the
inverse composition.
%
\begin{lemma}[Reflect after reify]
\label{lem:reflect-after-reify}
%
Reflection after reification may give rise to dynamic administrative
reductions, i.e.
%
\[
\cps{M} \sapp (\sV_1 \scons \dots \sV_n \scons \reflect \reify \sW)
\areducesto^* \cps{M} \sapp (\sV_1 \scons \dots \sV_n \scons \sW)
\]
\end{lemma}
%
\begin{proof}
Proof is by induction on the structure of $M$.
\end{proof}
%
We next observe that the CPS translation simulates forwarding.
%
\begin{lemma}[Forwarding]
\label{lem:forwarding}
If $\ell \notin dom(H_1)$ then
%
\[
\cps{\hops_1} \dapp \Record{\ell,\Record{U, V}} \dapp (V_2 \dcons \cps{\hops_2} \dcons W)
\reducesto^+
\cps{\hops_2} \dapp \Record{\ell,\Record{U, \cps{\hops_2} \dcons V_2 \dcons V}} \dapp W
\]
%
\end{lemma}
%
\begin{proof}
Proof by direct calculation.
\end{proof}
%
Now we show that the translation simulates the \semlab{Op}
rule.
%
\begin{lemma}[Handling]
\label{lem:handle-op}
If $\ell \notin BL(\EC)$ and $\hell = \{\ell\,p\,r \mapsto N_\ell\}$ then
%
\begin{displaymath}
\bl
\cps{\Do\;\ell\;V} \sapp (\cps{\EC} \sapp (\reflect\cps{\hret} \scons \reflect\cps{\hops} \scons \sV)) \reducesto^+\areducesto^* \\
\quad
(\cps{N_\ell} \sapp \sV)[\cps{V}/p, (\lambda y\,ks.\cps{\Return\;y} \sapp (\cps{\EC} \sapp (\reflect\cps{\hret} \scons \reflect\cps{\hops} \scons \reflect ks)))/r] \\
\el
\end{displaymath}
\end{lemma}
%
\begin{proof}
Follows from Lemmas~\ref{lem:decomposition},
\ref{lem:reflect-after-reify}, and \ref{lem:forwarding}.
\end{proof}
%
Finally, we have the ingredients to state and prove the simulation
result. The following theorem shows that the only extra behaviour
exhibited by a translated term is the bureaucracy of deconstructing
the continuation stack.
%
\begin{theorem}[Simulation]
\label{thm:ho-simulation}
If $M \reducesto N$ then $\pcps{M} \reducesto^+ \areducesto^* \pcps{N}$.
\end{theorem}
%
\begin{proof}
Proof is by case analysis on the reduction relation using Lemmas
\ref{lem:decomposition}--\ref{lem:handle-op}. The \semlab{Op} case
follows from Lemma~\ref{lem:handle-op}.
\end{proof}
%
% In common with most CPS translations, full abstraction does not
% hold. However, as our semantics is deterministic it is straightforward
% to show a backward simulation result.
% %
% \begin{corollary}[Backwards simulation]
% If $\pcps{M} \reducesto^+ \areducesto^* V$ then there exists $W$ such that
% $M \reducesto^* W$ and $\pcps{W} = V$.
% \end{corollary}
% %
% \begin{proof}
% TODO\dots
% \end{proof}
%
\section{CPS transforming shallow effect handlers}
\label{sec:cps-shallow}
In this section we will continue to build upon the higher-order
uncurried CPS translation
(Section~\ref{sec:higher-order-uncurried-deep-handlers-cps}). Specifically,
we will adapt it to be able to translate shallow effect handlers. The
dynamic nature of shallow handlers pose an interesting challenge as
shallow resumption invocation discards its handler. Consequently
evaluation after resumption may occur under a potentially different
handler which is determined dynamically by the context. This means
that the CPS translation must be able to update the current
continuation pair.
\dhil{Fix the text}
\subsection{A specious attempt}
\label{sec:cps-shallow-flawed}
%
Initially it is tempting to try to extend the interpretation of the
continuation representation in the higher-order uncurried CPS
translation for deep handlers to squeeze in shallow handlers. The
first obstacle one encounters is how to decouple a pure continuation
from its handler such that it can later be `adopted' by another
handler.
To fully uninstall a handler, we must uninstall both the pure
continuation function corresponding to its return clause and the
effect continuation function corresponding to its operation clauses.
%
In the current setup it is impossible to reliably uninstall the former
as due to the translation of $\Let$-expressions it may be embedded
arbitrarily deep within the current pure continuation and the
extensional representation of pure continuations means that we cannot
decompose them.
A quick fix to this problem is to treat pure continuation functions
arising from return clauses separately from pure continuation
functions arising from $\Let$-expressions.
%
Thus we can interpret the continuation as a sequence of triples
consisting of two pure continuation functions followed by an effect
continuation function, where the first pure continuation function
corresponds the continuation induced by some $\Let$-expression and the
second corresponds to the return clause of the current handler.
%
To distinguish between the two kinds of pure continuations, we shall
write $\svhret$ for statically known pure continuations arising from
return clauses, and $\vhret$ for dynamically known ones. Similarly, we
write $\svhops$ and $\vhops$ for statically and dynamically,
respectively, known effect continuations. With this notation in mind,
we may translate operation invocation and handler installation using
the new interpretation of the continuation representation as follows.
%
\begin{equations}
\cps{-} &:& \CompCat \to \SValCat^\ast \to \UCompCat \smallskip\\
\cps{\Do\;\ell\;V} &\defas& \slam \sk \scons \svhret \scons \svhops \scons \sks.
\reify\svhops \ba[t]{@{}l}
\dapp \Record{\ell, \Record{\cps{V}, \reify\svhops \dcons \reify\svhret \dcons \reify\sk \dcons \dnil}}\\
\dapp \reify \sks
\ea\smallskip\\
\cps{\ShallowHandle \; M \; \With \; H} &\defas&
\slam \sks . \cps{M} \sapp (\reflect\kid \scons \reflect\cps{\hret} \scons \reflect\cps{\hops}^\dagger \scons \sks) \medskip\\
\kid &\defas& \dlam x\, \dhk.\Let\; (\vhret \dcons \dhk') = \dhk \;\In\; \vhret \dapp x \dapp \dhk'
\end{equations}
%
The only change to the translation of operation invocation is the
extra bureaucracy induced by the additional pure continuation.
%
The translation of handler installation is a little more interesting
as it must make up an initial pure continuation in order to maintain
the sequence of triples interpretation of the continuation
structure. As the initial pure continuation we use the administrative
function $\kid$, which amounts to a dynamic variation of the
translation rule for the trivial computation term $\Return$: it
invokes the next pure continuation with whatever value it was
provided.
%
Although, I will not demonstrate it here, the translation rules for
$\lambda$-abstractions, $\Lambda$-abstractions, and $\Let$-expressions
must also be adjusted accordingly to account for the extra
bureaucracy. The same is true for the translation of $\Return$-clause,
thus it is rather straightforward to adapt it to the new continuation
interpretation.
%
\begin{equations}
\cps{-} &:& \HandlerCat \to \UValCat\\
\cps{\{\Return \; x \mapsto N\}} &\defas& \dlam x\, \dhk.
\ba[t]{@{}l}
\Let\; (\_ \dcons \dk \dcons \vhret \dcons \vhops \dcons \dhk') = \dhk \;\In\\
\cps{N} \sapp (\reflect \dk \scons \reflect \vhret \scons \reflect \vhops \scons \reflect \dhk')
\ea
\end{equations}
%
As before, it discards its associated effect continuation, which is
the first element of the dynamic continuation $ks$. In addition it
extracts the next continuation triple and reifies it as a static
continuation triple.
%
The interesting rule is the translation of operation clauses.
%
\begin{equations}
\cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}^\dagger
&\defas&
\bl
\dlam \Record{z,\Record{p,\dhkr}}\,\dhk.\\
\qquad\Case \;z\; \{
\ba[t]{@{}l@{}c@{~}l}
(&\ell &\mapsto
\ba[t]{@{}l}
\Let\;(\dk \dcons \vhret \dcons \vhops \dcons \dhk') = \dhk\;\In\\
\Let\;(\_ \dcons \_ \dcons \dhkr') = \dhkr \;\In\\
\Let\;r = \Res\,(\hid \dcons \rid \dcons \dhkr') \;\In \\
\cps{N_{\ell}} \sapp (\reflect\dk \scons \reflect\vhret \scons \reflect\vhops \scons \reflect \dhk'))_{\ell \in \mathcal{L}} \\
\ea \\
&y &\mapsto \hforward((y,p,\dhkr),\dhk) \} \\
\ea
\el \medskip\\
\hforward((y, p, \dhkr), \dhk) &\defas& \bl
\Let\; (\dk \dcons \vhret \dcons \vhops \dcons \dhk') = \dhk \;\In \\
\vhops \dapp \Record{y, \Record{p, \vhops \dcons \vhret \dcons \dk \dcons \dhkr}} \dapp \dhk' \\
\el \smallskip\\
\hid &\defas& \dlam\,\Record{z,\Record{p,\dhkr}}\,\dhk.\hforward((z,p,\dhkr),\dhk) \smallskip\\
\rid &\defas& \dlam x\, \dhk.\Let\; (\vhops \dcons \dk \dcons \dhk') = \dhk \;\In\; \dk \dapp x \dapp \dhk'
% \pcps{-} &:& \CompCat \to \UCompCat\\
% \pcps{M} &\defas& \cps{M} \sapp (\reflect \kid \scons \reflect (\dlam x\,\dhk.x) \scons \reflect (\dlam z\,ks.\Absurd~z) \scons \snil) \\
\end{equations}
%
The main difference between this translation rule and the translation
rule for deep handler operation clauses is the realisation of
resumptions.
%
Recall that a resumption is represented as a reversed slice of a
continuation. Thus the deconstruction of the resumption $\dhkr$
effectively ensures that the current handler gets properly
uninstalled. However, it presents a new problem as the remainder
$\dhkr'$ is not a well-formed continuation slice, because the top
element is a pure continuation without a handler.
%
To rectify this problem, we can insert a dummy identity handler
composed from $\hid$ and $\rid$. The effect continuation $\hid$
forwards every operation, and the pure continuation $\rid$ is an
identity clause. Thus every operation and the return value will be
effectively be handled by the next handler.
%
Unfortunately, inserting such dummy handlers lead to memory
leaks.
%
\dhil{Give the counting example}
%
Instead of inserting dummy handlers, one may consider composing the
current pure continuation with the next pure continuation, meaning
that the current pure continuation would be handled accordingly by the
next handler. To retrieve the next pure continuation, we must reach
under the next handler, i.e. something along the lines of the
following.
%
\[
\bl
\Let\;(\_ \dcons \_ \dcons \dk \dcons \vhops \dcons \vhret \dcons \dk' \dcons rs') = rs\;\In\\
\Let\;r = \Res\,(\vhops \dcons \vhret \dcons (\dk' \circ \dk) \dcons rs')\;\In\;\dots
\el
\]
%
where $\circ$ is defined to be function composition in continuation
passing style.
%
\[
g \circ f \defas \lambda x\,\dhk.
\ba[t]{@{}l}
\Let\;(\dk \dcons \dhk') = \dhk\; \In\\
f \dapp x \dapp ((\lambda x\,\dhk. g \dapp x \dapp (\dk \dcons \dhk)) \dcons \dhk')
\ea
\]
%
This idea is cute, but it reintroduces a variation of the dynamic
redexes we dealt with in
Section~\ref{sec:first-order-explicit-resump}.
%
Both solutions side step the underlying issue, namely, that the
continuation structure is still too extensional. The need for a more
intensional structure is evident in the insertion of $\kid$ in the
translation of $\Handle$ as a placeholder for the empty pure
continuation. Another telltale is that the disparity of continuation
deconstructions also gets bigger. The continuation representation
needs more structure. While it is seductive to program with lists, it
quickly gets unwieldy.
\subsection{Effect handlers via generalised continuations}
\label{sec:cps-deep-shallow}
\begin{figure}
%
\textbf{Values}
%
\begin{equations}
\cps{-} &:& \ValCat \to \UValCat\\
\cps{x} &\defas& x\\
\cps{\lambda x.M} &\defas& \dlam x\,\dhk.\Let\;(\dk \dcons \dhk') = \dhk\;\In\;\cps{M} \sapp (\reflect\dk \scons \reflect \dhk') \\
\cps{\Lambda \alpha.M} &\defas& \dlam \Unit\,\dhk.\Let\;(\dk \dcons \dhk') = \dhk\;\In\;\cps{M} \sapp (\reflect\dk \scons \reflect \dhk') \\
\cps{\Rec\,g\,x.M} &\defas& \Rec\,g\,x\,\dhk.\Let\;(\dk \dcons \dhk') = \dhk\;\In\;\cps{M} \sapp (\reflect\dk \scons \reflect \dhk') \\
\multicolumn{3}{c}{
\cps{\Record{}} \defas \Record{}
\qquad
\cps{\Record{\ell = \!\!V; W}} \defas \Record{\ell = \!\cps{V}; \cps{W}}
\qquad
\cps{\ell\,V} \defas \ell\,\cps{V}
}
\end{equations}
%
\textbf{Computations}
%
\begin{equations}
\cps{-} &:& \CompCat \to \SValCat^\ast \to \UCompCat\\
\cps{V\,W} &\defas& \slam \shk.\cps{V} \dapp \cps{W} \dapp \reify \shk \\
\cps{V\,T} &\defas& \slam \shk.\cps{V} \dapp \Record{} \dapp \reify \shk \\
\cps{\Let\; \Record{\ell=x;y} = V \; \In \; N} &\defas& \slam \shk.\Let\; \Record{\ell=x;y} = \cps{V} \; \In \; \cps{N} \sapp \shk \\
\cps{\Case~V~\{\ell~x \mapsto M; y \mapsto N\}} &\defas&
\slam \shk.\Case~\cps{V}~\{\ell~x \mapsto \cps{M} \sapp \shk; y \mapsto \cps{N} \sapp \shk\} \\
\cps{\Absurd~V} &\defas& \slam \shk.\Absurd~\cps{V} \\
\end{equations}
\begin{equations}
\cps{\Return\,V} &\defas& \slam \shk.\kapp\;(\reify \shk)\;\cps{V} \\
\cps{\Let~x \revto M~\In~N} &\defas&
\bl\slam \sRecord{\shf, \sRecord{\svhret, \svhops}} \scons \shk.
\ba[t]{@{}l}
\cps{M} \sapp (\sRecord{\bl\reflect((\dlam x\,\dhk.\bl\Let\;(\dk \dcons \dhk') = \dhk\;\In\\
\cps{N} \sapp (\reflect\dk \scons \reflect \dhk')) \el\\
\dcons \reify\shf), \sRecord{\svhret, \svhops}} \scons \shk)\el
\ea
\el\\
\cps{\Do\;\ell\;V} &\defas&
\slam \sRecord{\shf, \sRecord{\svhret, \svhops}} \scons \shk.\,
\reify\svhops \bl\dapp \dRecord{\ell,\dRecord{\cps{V}, \dRecord{\reify \shf, \dRecord{\reify\svhret, \reify\svhops}} \dcons \dnil}}\\
\dapp \reify \shk\el \\
\cps{\Handle^\depth \, M \; \With \; H} &\defas&
\slam \shk . \cps{M} \sapp (\sRecord{\snil, \sRecord{\reflect \cps{\hret}, \reflect \cps{\hops}^\depth}} \scons \shk) \\
\end{equations}
%
\textbf{Handler definitions}
%
\begin{equations}
\cps{-} &:& \HandlerCat \to \UValCat\\
% \cps{H}^\depth &=& \sRecord{\reflect \cps{\hret}, \reflect \cps{\hops}^\depth}\\
\cps{\{\Return \; x \mapsto N\}} &\defas& \dlam x\,\dhk.\Let\;(\dk \dcons \dhk') = \dhk\;\In\;\cps{N} \sapp (\reflect\dk \scons \reflect \dhk') \\
\cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}^\depth
&\defas&
\dlam \dRecord{z,\dRecord{p,\dhkr}}\,\dhk.
\Case \;z\; \{
\ba[t]{@{}l@{}c@{~}l}
(&\ell &\mapsto
\ba[t]{@{}l}
\Let\;r=\Res^\depth\,\dhkr\;\In\; \\
\Let\;(\dk \dcons \dhk') = \dhk\;\In\\
\cps{N_{\ell}} \sapp (\reflect\dk \scons \reflect \dhk'))_{\ell \in \mathcal{L}}
\ea\\
&y &\mapsto \hforward((y, p, \dhkr), \dhk) \} \\
\ea \\
\hforward((y, p, \dhkr), \dhk) &\defas& \bl
\Let\; \dRecord{fs, \dRecord{\vhret, \vhops}} \dcons \dhk' = \dhk \;\In \\
% \Let\; rk' = \dRecord{fs, \dRecord{\vhret, \vhops}} \dcons \dhkr\;\In\\
\vhops \dapp \dRecord{y,\dRecord{p, \dRecord{fs, \dRecord{\vhret, \vhops}} \dcons \dhkr}} \dapp \dhk' \\
\el
\end{equations}
\textbf{Top-level program}
%
\begin{equations}
\pcps{-} &:& \CompCat \to \UCompCat\\
\pcps{M} &\defas& \cps{M} \sapp (\sRecord{\snil, \sRecord{\reflect \dlam x\,\dhk. x, \reflect \dlam \dRecord{z,\dRecord{p,rk}}\,\dhk.\Absurd~z}} \scons \snil) \\
\end{equations}
%
\caption{Adjustments to the higher-order uncurried CPS translation.}
\label{fig:cps-higher-order-uncurried}
\end{figure}
%
\section{Related work}
\label{sec:cps-related-work}
5 years ago
\paragraph{Plotkin's colon translation}
5 years ago
The traditional method for proving the correctness of a CPS
translation is by way of a simulation result. Simulation states that
every reduction sequence in a given source program is mimicked by its
CPS transformation.
%
Static administrative redexes in the image of a CPS translation
provide hurdles for proving simulation, since these redexes do not
arise in the source program.
%
\citet{Plotkin75} uses the so-called \emph{colon translation} to
overcome static administrative reductions.
%
5 years ago
Informally, it is defined such that given some source term $M$ and
some continuation $k$, then the term $M : k$ is the result of
performing all static administrative reductions on $\cps{M}\,k$, that
is to say $\cps{M}\,k \areducesto^* M : k$.
%
Thus this translation makes it possible to bypass administrative
reductions and instead focus on the reductions inherited from the
source program.
%
The colon translation captures precisely the intuition that drives CPS
transforms, namely, that if in the source $M \reducesto^\ast \Return\;V$
then in the image $\cps{M}\,k \reducesto^\ast k\,\cps{V}$.
% CPS The colon translation captures the
% intuition tThe colon translation is itself a CPS translation which
% yields
5 years ago
% In his seminal work, \citet{Plotkin75} devises CPS translations for
% call-by-value lambda calculus into call-by-name lambda calculus and
% vice versa. \citeauthor{Plotkin75} establishes the correctness of his
% translations by way of simulations, which is to say that every
% reduction sequence in a given source program is mimicked by the
% transformed program.
% %
% His translations generate static administrative redexes, and as argued
% previously in this chapter from a practical view point this is an
% undesirable property in practice. However, it is also an undesirable
% property from a theoretical view point as the presence of
% administrative redexes interferes with the simulation proofs.
% To handle the static administrative redexes, \citeauthor{Plotkin75}
% introduced the so-called \emph{colon translation} to bypass static
% administrative reductions, thus providing a means for focusing on
% reductions induced by abstractions inherited from the source program.
% %
% The colon translation is itself a CPS translation, that given a source
% expression, $e$, and some continuation, $K$, produces a CPS term such
% that $\cps{e}K \reducesto e : K$.
% \citet{DanvyN03} used this insight to devise a one-pass CPS
% translation that contracts all administrative redexes at translation
% time.
5 years ago
\paragraph{Iterated CPS transform}
5 years ago
\paragraph{Partial evaluation}
6 years ago
\chapter{Abstract machine semantics}
\dhil{The text is this chapter needs to be reworked}
In this chapter we develop an abstract machine that supports deep and
shallow handlers \emph{simultaneously}, using the generalised
continuation structure we identified in the previous section for the
CPS translation. We also build upon prior work~\citep{HillerstromL16}
that developed an abstract machine for deep handlers by generalising
the continuation structure of a CEK machine (Control, Environment,
Kontinuation)~\citep{FelleisenF86}.
%
% \citet{HillerstromL16} sketched an adaptation for shallow handlers. It
% turns out that this adaptation had a subtle flaw, similar to the flaw
% in the sketched implementation of a CPS translation for shallow
% handlers given by \citet{HillerstromLAS17}. We fix the flaw here with
% a full development of shallow handlers along with a statement of the
% correctness property.
\section{Syntax and semantics}
The abstract machine syntax is given in
Figure~\ref{fig:abstract-machine-syntax}.
% A machine continuation is a list of handler frames. A handler frame is
% a pair of a \emph{handler closure} (handler definition) and a
% \emph{pure continuation} (a sequence of let bindings), analogous to
% the structured frames used in the CPS translation in
% \Sec\ref{sec:higher-order-uncurried-cps}.
% %
% Handling an operation amounts to searching through the continuation
% for a matching handler.
% %
% The resumption is constructed during the search by reifying each
% handler frame. As in the CPS translation, the resumption is assembled
% in one of two ways depending on whether the matching handler is deep
% or shallow.
% %
% For a deep handler, the current handler closure is included, and a deep
% resumption is a reified continuation.
% %
% An invocation of a deep resumption amounts to concatenating it with
% the current machine continuation.
% %
% For a shallow handler, the current handler closure must be discarded
% leaving behind a dangling pure continuation, and a shallow resumption
% is a pair of this pure continuation and the remaining reified
% continuation.
% %
% (By contrast, the prior flawed adaptation prematurely precomposed the
% pure continuation with the outer handler in the current resumption.)
% %
% An invocation of a shallow resumption again amounts to concatenating
% it with the current machine continuation, but taking care to
% concatenate the dangling pure continuation with that of the next
% frame.
%
\begin{figure}[t]
\flushleft
\begin{syntax}
\slab{Configurations} & \conf &::= & \cek{M \mid \env \mid \shk \circ \shk'} \\
\slab{Value environments} &\env &::= & \emptyset \mid \env[x \mapsto v] \\
\slab{Values} &v, w &::= & (\env, \lambda x^A . M) \mid (\env, \Lambda \alpha^K . M) \\
& &\mid& \Record{} \mid \Record{\ell = v; w} \mid (\ell\, v)^R \mid \shk^A \mid (\shk, \slk)^A \medskip\\
% \end{syntax}
% \begin{displaymath}
% \ba{@{}l@{~~}r@{~}c@{~}l@{\quad}l@{~~}r@{~}c@{~}l@{}}
% \slab{Continuations} &\shk &::= & \nil \mid \shf \cons \shk & \slab{Continuation frames} &\shf &::= & (\slk, \chi) \\
% & & & & \slab{Handler closures} &\chi &::= & (\env, H)^\depth \smallskip \\
% \slab{Pure continuations} &\slk &::= & \nil \mid \slf \cons \slk & \slab{Pure continuation frames} &\slf &::= & (\env, x, N) \\
% \ea
% \end{displaymath}
% \begin{syntax}
\slab{Continuations} &\shk &::= & \nil \mid \shf \cons \shk \\
\slab{Continuation frames} &\shf &::= & (\slk, \chi) \\
\slab{Pure continuations} &\slk &::= & \nil \mid \slf \cons \slk \\
\slab{Pure continuation frames} &\slf &::= & (\env, x, N) \\
\slab{Handler closures} &\chi &::= & (\env, H) \mid (\env, H)^\dagger \medskip \\
\end{syntax}
\caption{Abstract machine syntax.}
\label{fig:abstract-machine-syntax}
\end{figure}
%% A CEK machine~\citep{FelleisenF86} operates on configurations, which
%% are (Control, Environment, Continuation) triples.
%% %
% Our machine, like Hillerström and Lindley's, generalises the usual
% notion of continuation to accommodate handlers.
%
%
\begin{figure}[p]
\dhil{Fix figure formatting}
\begin{minipage}{0.90\textheight}%
%% Identity continuation
%% \[
%% \shk_0 = [(\nil, (\emptyset, \{\Return\;x \mapsto x\}))]
%% \]
\textbf{Transition function}
\begin{displaymath}
\bl
\ba{@{}l@{~}r@{~}c@{~}l@{\quad}l@{}}
\mlab{Init} & \multicolumn{4}{@{}c@{}}{M \stepsto \cek{M \mid \emptyset \mid [(\nil, (\emptyset, \{\Return\;x \mapsto \Return\;x\}))]}} \\[1ex]
% App
\mlab{AppClosure} & \cek{ V\;W \mid \env \mid \shk}
&\stepsto& \cek{ M \mid \env'[x \mapsto \val{W}{\env}] \mid \shk},
&\text{if }\val{V}{\env} = (\env', \lambda x^A.M) \\
\mlab{AppRec} & \cek{ V\;W \mid \env \mid \shk}
&\stepsto& \cek{ M \mid \env'[g \mapsto (\env', \Rec\,g^{A \to C}\,x.M), x \mapsto \val{W}{\env}] \mid \shk},
&\text{if }\val{V}{\env} = (\env', \Rec\,g^{A \to C}\,x.M) \\
% App - continuation
\mlab{AppCont} & \cek{ V\;W \mid \env \mid \shk}
&\stepsto& \cek{ \Return \; W \mid \env \mid \shk' \concat \shk},
&\text{if }\val{V}{\env} = (\shk')^A \\
\mlab{AppCont^\dagger} & \cek{ V\,W \mid \env \mid (\slk, \chi) \cons \shk}
&\stepsto&
\cek{\Return\; W \mid \env \mid \shk' \concat ((\slk' \concat \slk, \chi) \cons \shk)},
&\text{if } \val{V}{\env} = (\shk', \slk')^A \\
% TyApp
\mlab{AppType} & \cek{ V\,T \mid \env \mid \shk}
&\stepsto& \cek{ M[T/\alpha] \mid \env' \mid \shk},
&\text{if }\val{V}{\env} = (\env', \Lambda \alpha^K . \, M) \\[1ex]
\ea \\
\ba{@{}l@{}r@{~}c@{~}l@{\quad}l@{}}
\mlab{Split} & \cek{ \Let \; \Record{\ell = x;y} = V \; \In \; N \mid \env \mid \shk}
&\stepsto& \cek{ N \mid \env[x \mapsto v, y \mapsto w] \mid \shk},
& \text {if }\val{V}{\env} = \Record{\ell=v; w} \\
% Case
\mlab{Case} & \cek{ \Case\; V\, \{ \ell~x \mapsto M; y \mapsto N\} \mid \env \mid \shk}
&\stepsto& \left\{\ba{@{}l@{}}
\cek{ M \mid \env[x \mapsto v] \mid \shk}, \\
\cek{ N \mid \env[y \mapsto \ell'\, v] \mid \shk}, \\
\ea \right.
&
\ba{@{}l@{}}
\text{if }\val{V}{\env} = \ell\, v \\
\text{if }\val{V}{\env} = \ell'\, v \text{ and } \ell \neq \ell' \\
\ea \\[2ex]
% Let - eval M
\mlab{Let} & \cek{ \Let \; x \revto M \; \In \; N \mid \env \mid (\slk, \chi) \cons \shk}
&\stepsto& \cek{ M \mid \env \mid ((\env,x,N) \cons \slk, \chi) \cons \shk} \\
% Handle
\mlab{Handle} & \cek{ \Handle^\depth \, M \; \With \; H \mid \env \mid \shk}
&\stepsto& \cek{ M \mid \env \mid (\nil, (\env, H)^\depth) \cons \shk} \\[1ex]
% Return - let binding
\mlab{RetCont} &\cek{ \Return \; V \mid \env \mid ((\env',x,N) \cons \slk, \chi) \cons \shk}
&\stepsto& \cek{ N \mid \env'[x \mapsto \val{V}{\env}] \mid (\slk, \chi) \cons \shk} \\
% Return - handler
\mlab{RetHandler} & \cek{ \Return \; V \mid \env \mid (\nil, (\env',H)) \cons \shk}
&\stepsto& \cek{ M \mid \env'[x \mapsto \val{V}{\env}] \mid \shk},
& \text{if } \hret = \{\Return\; x \mapsto M\} \\
\mlab{RetTop} & \cek{\Return\;V \mid \env \mid \nil} &\stepsto& \val{V}{\env} \\[1ex]
% Deep
\mlab{Do} & \cek{ (\Do \; \ell \; V)^E \mid \env \mid ((\slk, (\env', H)) \cons \shk) \circ \shk'}
&\stepsto& \multicolumn{2}{@{}l@{}}{\cek{M \mid \env'[x \mapsto \val{V}{\env},
r \mapsto (\shk' \concat [(\slk, (\env', H))])^B] \mid \shk},} \\
&&& \multicolumn{2}{@{}r@{}}{\text{if } \ell : A \to B \in E \text{ and } \hell = \{\ell\; x \; r \mapsto M\}} \\
% Shallow
\mlab{Do^\dagger} & \cek{ (\Do \; \ell \; V)^E \mid \env \mid ((\slk, (\gamma', H)^\dagger) \cons \shk) \circ \shk'} &\stepsto&
\multicolumn{2}{@{}l@{}}{\cek{M \mid \env'[x \mapsto \val{V}{\env}, r \mapsto (\shk', \slk)^B] \mid \shk},}\\
&&&\multicolumn{2}{@{}r@{}}{\text{if } \ell : A \to B \in E \text{ and } \hell = \{\ell\; x \; r \mapsto M\}} \\
% Forward
\mlab{Forward} & \cek{ (\Do \; \ell \; V)^E \mid \env \mid ((\slk, (\env', H)^\depth) \cons \shk) \circ \shk'}
&\stepsto& \cek{ (\Do \; \ell \; V)^E \mid \env \mid \shk \circ (\shk' \concat [(\slk, (\env', H)^\depth)])},
& \text{if } \hell = \emptyset \\
\ea \\
\el
\end{displaymath}
\textbf{Value interpretation}
\begin{displaymath}
\ba{@{}r@{~}c@{~}l@{}}
\val{x}{\env} &=& \env(x) \\
\val{\Record{}}{\env} &=& \Record{} \\
\ea
\qquad
\ba{@{}r@{~}c@{~}l@{}}
\val{\lambda x^A.M}{\env} &=& (\env, \lambda x^A.M) \\
\val{\Record{\ell = V; W}}{\env} &=& \Record{\ell = \val{V}{\env}; \val{W}{\env}} \\
\ea
\qquad
\ba{@{}r@{~}c@{~}l@{}}
\val{\Lambda \alpha^K.M}{\env} &=& (\env, \Lambda \alpha^K.M) \\
\val{(\ell\, V)^R}{\env} &=& (\ell\; \val{V}{\env})^R \\
\ea
\qquad
\val{\Rec\,g^{A \to C}\,x.M}{\env} = (\env, \Rec\,g^{A \to C}\,x.M) \\
\end{displaymath}
\caption{Abstract machine semantics.}
\label{fig:abstract-machine-semantics}
\end{minipage}
\end{figure}
%
%
A configuration $\conf = \cek{M \mid \env \mid \shk \circ \shk'}$
of our abstract machine is a quadruple of a computation term ($M$), an
environment ($\env$) mapping free variables to values, and two
continuations ($\shk$) and ($\shk'$).
%
The latter continuation is always the identity, except when forwarding
an operation, in which case it is used to keep track of the extent to
which the operation has been forwarded.
%
We write $\cek{M \mid \env \mid \shk}$ as syntactic sugar for $\cek{M
\mid \env \mid \shk \circ []}$ where $[]$ is the identity
continuation.
%
%% Our continuations differ from the standard machine. On the one hand,
%% they are somewhat simplified, due to our strict separation between
%% computations and values. On the other hand, they have considerably
%% more structure in order to accommodate effects and handlers.
Values consist of function closures, type function closures, records,
variants, and captured continuations.
%
A continuation $\shk$ is a stack of frames $[\shf_1, \dots,
\shf_n]$. We annotate captured continuations with input types in order
to make the results of Section~\ref{subsec:machine-correctness} easier
to state. Each frame $\shf = (\slk, \chi)$ represents pure
continuation $\slk$, corresponding to a sequence of let bindings,
inside handler closure $\chi$.
%
A pure continuation is a stack of pure frames. A pure frame $(\env, x,
N)$ closes a let-binding $\Let \;x=[~] \;\In\;N$ over environment
$\env$. A handler closure $(\env, H)$ closes a handler definition $H$
over environment $\env$.
%
We write $\nil$ for an empty stack, $x \cons s$ for the result of
pushing $x$ on top of stack $s$, and $s \concat s'$ for the
concatenation of stack $s$ on top of $s'$. We use pattern matching to
deconstruct stacks.
The abstract machine semantics defining the transition function $\stepsto$ is given in
Fig.~\ref{fig:abstract-machine-semantics}.
%
It depends on an interpretation function $\val{-}$ for values.
%
The machine is initialised (\mlab{Init}) by placing a term in a
configuration alongside the empty environment and identity
continuation.
%
The rules (\mlab{AppClosure}), (\mlab{AppRec}), (\mlab{AppCont}),
(\mlab{AppCont^\dagger}), (\mlab{AppType}), (\mlab{Split}), and
(\mlab{Case}) enact the elimination of values.
%
The rules (\mlab{Let}) and (\mlab{Handle}) extend the current
continuation with let bindings and handlers respectively.
%
The rule (\mlab{RetCont}) binds a returned value if there is a pure
continuation in the current continuation frame;
%
(\mlab{RetHandler}) invokes the return clause of a handler if the pure
continuation is empty; and (\mlab{RetTop}) returns a final value if
the continuation is empty.
%
%% The rule (\mlab{Op}) switches to a special four place configuration in
%% order to handle an operation. The fourth component of the
%% configuration is an auxiliary forwarding continuation, which keeps
%% track of the continuation frames through which the operation has been
%% forwarded. It is initialised to be empty.
%% %
The rule (\mlab{Do}) applies the current handler to an operation if
the label matches one of the operation clauses. The captured
continuation is assigned the forwarding continuation with the current
frame appended to the end of it.
%
The rule (\mlab{Do^\dagger}) is much like (\mlab{Do}), except it
constructs a shallow resumption, discarding the current handler but
keeping the current pure continuation.
%
The rule (\mlab{Forward}) appends the current continuation
frame onto the end of the forwarding continuation.
%
The (\mlab{Init}) rule provides a canonical way to map a computation
term onto a configuration.
\newcommand{\Chiid}{\ensuremath{\Chi_{\text{id}}}}
\newcommand{\kappaid}{\ensuremath{\kappa_{\text{id}}}}
\paragraph{Example} To make the transition rules in
Figure~\ref{fig:abstract-machine-semantics} concrete we give an
example of the abstract machine in action. We reuse the small producer
and consumer from Section~\ref{sec:shallow-handlers-tutorial}. We
reproduce their definitions here in ANF.
%
\[
\bl
\Ones \defas \Rec\;ones~\Unit. \Do\; \Yield~1; ones~\Unit \\
\AddTwo \defas
\lambda \Unit.
\Let\; x \revto \Do\;\Await~\Unit \;\In\;
\Let\; y \revto \Do\;\Await~\Unit \;\In\;
x + y \\
\el
\]%
%
Let $N_x$ denote the term $\Let\;x \revto \Do\;\Await~\Unit\;\In\;N_y$
and $N_y$ the term $\Let\;y \revto \Do\;\Await~\Unit\;\In\;x+y$.
%
%% For clarity, we annotate each bound variable $resume$ with a subscript
%% $\Await$ or $\Yield$ according to whether it was captured by a
%% consumer or a producer.
%
Suppose $\Ones$, $\AddTwo$, $\Pipe$, and $\Copipe$ are bound in
$\env_\top$. Furthermore, let $H_\Pipe$ and $H_\Copipe$ denote the
pipe and copipe handler definitions. The machine begins by applying
$\Pipe$.
%
\begin{derivation}
&\cek{\Pipe~\Record{\Ones, \AddTwo} \mid \env_\top \mid \kappaid}\\
\stepsto& \reason{apply $\Pipe$}\\
&\cek{\ShallowHandle\;c~\Unit\;\With\;H_\Pipe \mid \env_\top[c \mapsto (\emptyset, \AddTwo), p \mapsto (\emptyset, \Ones)] \mid \kappaid}\\
\stepsto& \reason{install $H_\Pipe$ with $\env_\Pipe = \env_\top[c \mapsto (\emptyset, \AddTwo), p \mapsto (\emptyset, \Ones)]$}\\
&\cek{c~\Unit \mid \env_\Pipe \mid (\nil, (\env_\Pipe, H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{apply $c$ and $\val{c}\env_\Pipe = (\emptyset, \AddTwo)$}\\
&\cek{N_x \mid \emptyset \mid (\nil, (\env_\Pipe, H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{focus on left operand}\\
&\cek{\Do\;\Await~\Unit \mid \emptyset \mid ([(\emptyset, x, N_y)], (\env_\Pipe, H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{shallow continuation capture $v_\Await = (\nil, [(\emptyset, x, N_y)])$}\\
&\cek{\Copipe~\Record{resume, p} \mid \env_\Pipe[resume \mapsto v_\Await] \mid \kappaid}\\
\end{derivation}
%
The invocation of $\Await$ begins a search through the machine
continuation to locate a matching handler. In this instance the
top-most handler $H_\Pipe$ handles $\Await$. The complete shallow
resumption consists of an empty continuation and a singleton pure
continuation. The former is empty as $H_\Pipe$ is a shallow handler,
meaning that it is discarded.
Evaluation continues by applying $\Copipe$.
%
\begin{derivation}
\stepsto& \reason{apply $\Copipe$}\\
&\cek{\ShallowHandle\; p~\Unit \;\With\;H_\Copipe \mid \env_\top[c \mapsto v_\Await, p \mapsto (\emptyset, \Ones)] \mid \kappaid}\\
\stepsto& \reason{install $H_\Copipe$ with $\env_\Copipe = \env_\top[c \mapsto v_\Await, p \mapsto (\emptyset, \Ones)]$}\\
&\cek{p~\Unit \mid \env_\Copipe \mid (\emptyset, (\env_\Copipe, H_\Copipe)) \cons \kappaid}\\
\stepsto^2& \reason{apply $p$, $\val{p}\env_\Copipe = (\emptyset, \Ones)$, and $\env_{\Ones} = \emptyset[ones \mapsto (\emptyset, \Ones)]$}\\
% &\cek{\Do\;\Yield~1;~ones~\Unit \mid \env_{ones} \mid (\nil, (\env_\Copipe, H_\Copipe)) \cons \kappaid}\\
% \stepsto^2& \reason{focus on $\Yield$}\\
&\cek{\Do\;\Yield~1 \mid \env_{\Ones} \mid ([(\env_{\Ones},\_,ones~\Unit)], (\env_\Copipe, H_\Copipe)) \cons \kappaid}\\
\stepsto& \reason{shallow continuation capture $v_\Yield = (\nil, [(\env_{\Ones}, \_, ones~\Unit)])$}\\
&\cek{\Pipe~\Record{resume, \lambda\Unit.c~s} \mid \env_\Copipe[s \mapsto 1,resume \mapsto v_\Yield] \mid \kappaid}
\end{derivation}
%
At this point the situation is similar as before: the invocation of
$\Yield$ causes the continuation to be unwound in order to find an
appropriate handler, which happens to be $H_\Copipe$. Next $\Pipe$ is
applied.
%
\begin{derivation}
\stepsto& \reason{apply $\Pipe$ and $\env_\Pipe' = \env_\top[c \mapsto (\env_\Copipe[c \mapsto v_\Await, s\mapsto 1]), p \mapsto v_\Yield])]$}\\
&\cek{\ShallowHandle\;c~\Unit\;\With\; H_\Pipe \mid \env_\Pipe' \mid \kappaid}\\
\stepsto& \reason{install $H_\Pipe$}\\
&\cek{c~\Unit \mid \env_\Pipe' \mid (\nil, (\env_\Pipe', H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{apply $c$ and $\val{c}\env_\Pipe' = (\env_\Copipe[c \mapsto v_\Await, s\mapsto 1])$}\\
&\cek{c~s \mid \env_\Copipe[c \mapsto v_\Await, s\mapsto 1] \mid (\nil, (\env_\Pipe', H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{shallow resume with $v_\Await = (\nil, [(\emptyset,x,N_y)])$}\\
&\cek{\Return\;1 \mid \env_\Pipe' \mid ([(\emptyset,x,N_y)], (\env_\Pipe', H_\Pipe)) \cons \kappaid}
\end{derivation}
%
Applying the resumption concatenates the first component (the
continuation) with the machine continuation. The second component
(pure continuation) gets concatenated with the pure continuation of
the top-most frame of the machine continuation. Thus in this
particular instance, the machine continuation is manipulated as
follows.
%
\[
\ba{@{~}l@{~}l}
&\nil \concat ([(\emptyset,x,N_y)] \concat \nil, (\env_\Pipe', H_\Pipe)) \cons \kappaid\\
=& ([(\emptyset,x,N_y)], (\env_\Pipe', H_\Pipe)) \cons \kappaid
\ea
\]
%
Because the control component contains the expression $\Return\;1$ and
the pure continuation is nonempty, the machine applies the pure
continuation.
\begin{derivation}
\stepsto& \reason{apply pure continuation, $\env_{\AddTwo} = \emptyset[x \mapsto 1]$}\\
&\cek{N_y \mid \env_{\AddTwo} \mid (\nil, (\env_\Pipe', H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{focus on right operand}\\
&\cek{\Do\;\Await~\Unit \mid \env_{\AddTwo} \mid ([(\env_{\AddTwo}, y, x + y)], (\env_\Pipe', H_\Pipe)) \cons \kappaid}\\
\stepsto^2& \reason{shallow continuation capture $w_\Await = (\nil, [(\env_{\AddTwo}, y, x + y)])$, apply $\Copipe$}\\
&\cek{\ShallowHandle\;p~\Unit \;\With\; \env_\Copipe \mid \env_\top[c \mapsto w_\Await, p \mapsto v_\Yield] \mid \kappaid}\\
\reducesto& \reason{install $H_\Copipe$ with $\env_\Copipe = \env_\top[c \mapsto w_\Await, p \mapsto v_\Yield]$}\\
&\cek{p~\Unit \mid \env_\Copipe \mid (\nil, (\env_\Copipe, H_\Copipe)) \cons \kappaid}
\end{derivation}
%
The variable $p$ is bound to the shallow resumption $v_\Yield$, thus
invoking it will transfer control back to the $\Ones$ computation.
%
\begin{derivation}
\stepsto & \reason{shallow resume with $v_\Yield = (\nil, [(\env_{\Ones}, \_, ones~\Unit)])$}\\
&\cek{\Return\;\Unit \mid \env_\Copipe \mid ([(\env_{\Ones}, \_, ones~\Unit)], (\env_\Copipe, H_\Copipe)) \cons \kappaid}\\
\stepsto^3& \reason{apply pure continuation, apply $ones$, focus on $\Yield$}\\
&\cek{\Do\;\Yield~1 \mid \env_{\Ones} \mid ([(\env_{\Ones}, \_, ones~\Unit)], (\env_\Copipe, H_\Copipe)) \cons \kappaid}\\
\end{derivation}
%
At this stage the machine repeats the transitions from before: the
shallow continuation of $\Do\;\Yield~1$ is captured, control passes to
the $\Yield$ clause in $H_\Copipe$, which again invokes $\Pipe$ and
subsequently installs the $H_\Pipe$ handler with an environment
$\env_\Pipe''$. The handler runs the computation $c~\Unit$, where $c$
is an abstraction over the resumption $w_\Await$ applied to the
yielded value $1$.
%
\begin{derivation}
\stepsto^6 & \reason{by the above reasoning, shallow resume with $w_\Await = (\nil, [(\env_{\AddTwo}, y, x + y)])$}\\
&\cek{x + y \mid \env_{\AddTwo}[y \mapsto 1] \mid (\nil, (\env_\Pipe'', H_\Pipe)) \cons \kappaid}\\
\stepsto& \reason{$\val{x}\env_{\AddTwo}[y\mapsto 1] = 1$ and $\val{y}\env_{\AddTwo}[y\mapsto 1] = 1$}\\
&\cek{\Return\;2 \mid \env_{\AddTwo}[y \mapsto 1] \mid (\nil, (\env_\Pipe'', H_\Pipe)) \cons \kappaid}
\end{derivation}
%
Since the pure continuation is empty the $\Return$ clause of $H_\Pipe$
gets invoked with the value $2$. Afterwards the $\Return$ clause of the
identity continuation in $\kappaid$ is invoked, ultimately
transitioning to the following final configuration.
%
\begin{derivation}
\stepsto^2& \reason{by the above reasoning}\\
&\cek{\Return\;2 \mid \emptyset \mid \nil}
\end{derivation}
%
%\env_\top[c \mapsto (\env_\Copipe, \lambda\Unit.w_\Await~1),p \mapsto (\nil, [(\env_{\Ones}, \_, ones~\Unit)])]
\begin{figure}[t]
\flushleft
\newcommand{\contapp}[2]{#1 #2}
\newcommand{\contappp}[2]{#1(#2)}
%% \newcommand{\contapp}[2]{#1[#2]}
%% \newcommand{\contapp}[2]{#1\mathbin{@}#2}
%% \newcommand{\contappp}[2]{#1\mathbin{@}(#2)}
%
Configurations
\begin{displaymath}
\inv{\cek{M \mid \env \mid \shk \circ \shk'}} = \contappp{\inv{\shk' \concat \shk}}{\inv{M}\env}
= \contappp{\inv{\shk'}}{\contapp{\inv{\shk}}{\inv{M}\env}}
\end{displaymath}
Pure continuations
\begin{displaymath}
\contapp{\inv{[]}}{M} = M \qquad \contapp{\inv{((\env, x, N) \cons \slk)}}{M}
= \contappp{\inv{\slk}}{\Let\; x \revto M \;\In\; \inv{N}(\env \res \{x\})}
\end{displaymath}
%% \begin{equations}
%% \contapp{\inv{[]}}{M}
%% &=& M \\
%% \contapp{\inv{((\env, x, N) \cons \slk)}}{M}
%% &=& \contappp{\inv{\slk}}{\Let\; x \revto M \;\In\; \inv{N}(\env \res \{x\})} \\
%% \end{equations}
Continuations
\begin{displaymath}
\contapp{\inv{[]}}{M}
= M \qquad
\contapp{\inv{(\slk, \chi) \cons \shk}}{M}
= \contapp{\inv{\shk}}{(\contappp{\inv{\chi}}{\contappp{\inv{\slk}}{M}})}
\end{displaymath}
%% \begin{equations}
%% \contapp{\inv{[]}}{M}
%% &=& M \\
%% \contapp{\inv{(\slk, \chi) \cons \shk}}{M}
%% &=& \contapp{\inv{\shk}}{(\contappp{\inv{\chi}}{\contappp{\inv{\slk}}{M}})} \\
%% \end{equations}
Handler closures
\begin{displaymath}
\contapp{\inv{(\env, H)}^\depth}{M}
= \Handle^\depth\;M\;\With\;\inv{H}\env
\end{displaymath}
%% \begin{equations}
%% \contapp{\inv{(\env, H)}}{M}
%% &=& \Handle\;M\;\With\;\inv{H}\env \\
%% \contapp{\inv{(\env, H)^\dagger}}{M}
%% &=& \ShallowHandle\;M\;\With\;\inv{H}\env \\
%% \end{equations}
Computation terms
\begin{equations}
\inv{V\,W}\env &=& \inv{V}\env\,\inv{W}{\env} \\
\inv{V\,T}\env &=& \inv{V}\env\,T \\
\inv{\Let\;\Record{\ell = x; y} = V \;\In\;N}\env
&=& \Let\;\Record{\ell = x; y} =\inv{V}\env \;\In\; \inv{N}(\env \res \{x, y\}) \\
\inv{\Case\;V\,\{\ell\;x \mapsto M; y \mapsto N\}}\env
&=& \Case\;\inv{V}\env \,\{\ell\;x \mapsto \inv{M}(\env \res \{x\}); y \mapsto \inv{N}(\env \res \{y\})\} \\
\inv{\Return\;V}\env &=& \Return\;\inv{V}\env \\
\inv{\Let\;x \revto M \;\In\;N}\env
&=& \Let\;x \revto\inv{M}\env \;\In\; \inv{N}(\env \res \{x\}) \\
\inv{\Do\;\ell\;V}\env
&=& \Do\;\ell\;\inv{V}\env \\
\inv{\Handle^\depth\;M\;\With\;H}\env
&=& \Handle^\depth\;\inv{M}\env\;\With\;\inv{H}\env \\
\end{equations}
Handler definitions
\begin{equations}
\inv{\{\Return\;x \mapsto M\}}\env
&=& \{\Return\;x \mapsto \inv{M}(\env \res \{x\})\} \\
\inv{\{\ell\;x\;k \mapsto M\} \uplus H}\env
&=& \{\ell\;x\;k \mapsto \inv{M}(\env \res \{x, k\}\} \uplus \inv{H}\env \\
\end{equations}
Value terms and values
\begin{displaymath}
\ba{@{}c@{}}
\begin{eqs}
\inv{x}\env &=& \inv{v}, \quad \text{ if }\env(x) = v \\
\inv{x}\env &=& x, \quad \text{ if }x \notin dom(\env) \\
\inv{\lambda x^A.M}\env &=& \lambda x^A.\inv{M}(\env \res \{x\}) \\
\inv{\Lambda \alpha^K.M}\env &=& \Lambda \alpha^K.\inv{M}\env \\
\inv{\Record{}}\env &=& \Record{} \\
\inv{\Record{\ell=V; W}}\env &=& \Record{\ell=\inv{V}\env; \inv{W}\env} \\
\inv{(\ell\;V)^R}\env &=& (\ell\;\inv{V}\env)^R \\
\end{eqs}
\quad
\begin{eqs}
\inv{\shk^A} &=& \lambda x^A.\inv{\shk}(\Return\;x) \\
\inv{(\shk, \slk)^A} &=& \lambda x^A.\inv{\slk}(\inv{\shk}(\Return\;x)) \\
\inv{(\env, \lambda x^A.M)} &=& \lambda x^A.\inv{M}(\env \res \{x\}) \\
\inv{(\env, \Lambda \alpha^K.M)} &=& \Lambda \alpha^K.\inv{M}\env \\
\inv{\Record{}} &=& \Record{} \\
\inv{\Record{\ell=v; w}} &=& \Record{\ell=\inv{v}; \inv{w}} \\
\inv{(\ell\;v)^R} &=& (\ell\;\inv{v})^R \\
\end{eqs} \smallskip\\
\inv{\Rec\,g^{A \to C}\,x.M}\env = \Rec\,g^{A \to C}\,x.\inv{M}(\env \res \{g, x\})
= \inv{(\env, \Rec\,g^{A \to C}\,x.M)} \\
\ea
\end{displaymath}
\caption{Mapping from abstract machine configurations to terms.}
\label{fig:config-to-term}
\end{figure}
\paragraph{Remark} If the main continuation is empty then the machine gets
stuck. This occurs when an operation is unhandled, and the forwarding
continuation describes the succession of handlers that have failed to
handle the operation along with any pure continuations that were
encountered along the way.
%
Assuming the input is a well-typed closed computation term $\typc{}{M
: A}{E}$, the machine will either not terminate, return a value of
type $A$, or get stuck failing to handle an operation appearing in
$E$. We now make the correspondence between the operational semantics
and the abstract machine more precise.
\section{Correctness}
\label{subsec:machine-correctness}
Fig.~\ref{fig:config-to-term} defines an inverse mapping $\inv{-}$
from configurations to computation terms via a collection of mutually
recursive functions defined on configurations, continuations,
computation terms, handler definitions, value terms, and values.
%
We write $dom(\gamma)$ for the domain of $\gamma$ and $\gamma \res
\{x_1, \dots, x_n\}$ for the restriction of environment $\gamma$ to
$dom(\gamma) \res \{x_1, \dots, x_n\}$.
%
The $\inv{-}$ function enables us to classify the abstract machine
reduction rules by how they relate to the operational
semantics.
%
The rules (\mlab{Init}) and (\mlab{RetTop}) concern only initial
input and final output, neither a feature of the operational
semantics.
%
The rules (\mlab{AppCont^\depth}), (\mlab{Let}), (\mlab{Handle}),
and (\mlab{Forward}) are administrative in that $\inv{-}$ is invariant
under them.
%
This leaves $\beta$-rules (\mlab{AppClosure}), (\mlab{AppRec}),
(\mlab{AppType}), (\mlab{Split}), (\mlab{Case}), (\mlab{RetCont}),
(\mlab{RetHandler}), (\mlab{Do}), and (\mlab{Do^\dagger}),
each of which corresponds directly to performing a reduction in the
operational semantics.
%
We write $\stepsto_a$ for administrative steps, $\stepsto_\beta$ for
$\beta$-steps, and $\Stepsto$ for a sequence of steps of the form
$\stepsto_a^* \stepsto_\beta$.
Each reduction in the operational semantics is simulated by a sequence
of administrative steps followed by a single $\beta$-step in the
abstract machine. The $Id$ handler (\S\ref{subsec:terms})
implements the top-level identity continuation.
\\
\begin{theorem}[Simulation]
\label{lem:simulation}
If $M \reducesto N$, then for any $\conf$ such that $\inv{\conf} =
Id(M)$ there exists $\conf'$ such that $\conf \Stepsto \conf'$ and
$\inv{\conf'} = Id(N)$.
%% If $M \reducesto N$, then for any $\conf$ such that $\inv{\conf} = M$
%% there exists $\conf'$ such that $\conf \Stepsto \conf'$ and
%% $\inv{\conf'} = N$.
\end{theorem}
\begin{proof}
By induction on the derivation of $M \reducesto N$.
\end{proof}
\begin{corollary}
If $\typc{}{M : A}{E}$ and $M \reducesto^+ N \not\reducesto$, then $M
\stepsto^+ \conf$ with $\inv{\conf} = N$.
\end{corollary}
\part{Expressiveness}
6 years ago
\chapter{Computability, complexity, and expressivness}
\label{ch:expressiveness}
6 years ago
\section{Notions of expressiveness}
Felleisen's macro-expressiveness, Longley's type-respecting
expressiveness, Kammar's typability-preserving expressiveness.
6 years ago
\section{Interdefinability of deep and shallow Handlers}
\section{Encoding parameterised handlers}
6 years ago
\chapter{The asymptotic power of control}
\label{ch:handlers-efficiency}
Describe the methodology\dots
6 years ago
\section{Generic search}
\section{Calculi}
\subsection{Base calculus}
\subsection{Handler calculus}
6 years ago
\section{A practical model of computation}
\subsection{Syntax}
\subsection{Semantics}
\subsection{Realisability}
6 years ago
\section{Points, predicates, and their models}
\section{Efficient generic search with effect handlers}
\subsection{Space complexity}
6 years ago
\section{Best-case complexity of generic search without control}
\subsection{No shortcuts}
\subsection{No sharing}
6 years ago
\chapter{Robustness of the asymptotic power of control}
\section{Mutable state}
\section{Exception handling}
\section{Effect system}
\part{Conclusions}
\chapter{Conclusions}
8 years ago
\label{ch:conclusions}
Some profound conclusions\dots
\chapter{Future Work}
\label{ch:future-work}
8 years ago
%%
%% Appendices
%%
% \appendix
%% If you want the bibliography single-spaced (which is allowed), uncomment
%% the next line.
%\nocite{*}
\singlespace
%\nocite{*}
8 years ago
%\printbibliography[heading=bibintoc]
\bibliographystyle{plainnat}
8 years ago
\bibliography{\jobname}
%% ... that's all, folks!
\end{document}