1
0
mirror of https://github.com/dhil/phd-dissertation synced 2026-04-28 15:06:29 +01:00

Compare commits

..

7 Commits

Author SHA1 Message Date
8cc91fa4b1 WIP 2021-05-20 18:03:38 +01:00
15125f206c Intro WIP 2021-05-20 17:49:38 +01:00
ef04653ad2 WIP 2021-05-20 14:41:25 +01:00
1ede601435 Mention equations 2021-05-20 11:22:22 +01:00
9e7c74ef0f Reword 2021-05-20 11:17:47 +01:00
d1a91fb197 Reword 2021-05-20 11:04:54 +01:00
76d9c00e19 Free monad section 2021-05-20 11:03:43 +01:00
3 changed files with 456 additions and 99 deletions

View File

@@ -356,6 +356,7 @@
\newcommand{\return}{\dec{Return}}
\newcommand{\faild}{\dec{withDefault}}
\newcommand{\Free}{\dec{Free}}
\newcommand{\FreeState}{\dec{FreeState}}
\newcommand{\OpF}{\dec{Op}}
\newcommand{\DoF}{\dec{do}}
\newcommand{\getF}{\dec{get}}

View File

@@ -195,6 +195,18 @@
year = {2001}
}
@inproceedings{PlotkinP02,
author = {Gordon D. Plotkin and
John Power},
title = {Notions of Computation Determine Monads},
booktitle = {FoSSaCS},
series = {Lecture Notes in Computer Science},
volume = {2303},
pages = {342--356},
publisher = {Springer},
year = {2002}
}
# Other algebraic effects and handlers
@inproceedings{Lindley14,
author = {Sam Lindley},
@@ -880,7 +892,14 @@
year = 1971
}
# Monad transformers
@phdthesis{Espinosa95,
author = {David A. Espinosa},
school = {Columbia University},
title = {Semantic Lego},
year = 1995,
address = {New York, USA}
}
# Hop.js
@inproceedings{SerranoP16,
@@ -1553,6 +1572,15 @@
year = {1996}
}
@inproceedings{Filinski99,
author = {Andrzej Filinski},
title = {Representing Layered Monads},
booktitle = {{POPL}},
pages = {175--188},
publisher = {{ACM}},
year = {1999}
}
# Landin's J operator
@article{Landin65,
author = {Peter J. Landin},
@@ -3253,6 +3281,20 @@
booktitle = {{TPDC}}
}
# Generators / iterators
@article{ShawWL77,
author = {Mary Shaw and
William A. Wulf and
Ralph L. London},
title = {Abstraction and Verification in Alphard: Defining and Specifying Iteration
and Generators},
journal = {Commun. {ACM}},
volume = {20},
number = {8},
pages = {553--564},
year = {1977}
}
# Fellowship of the Ring reference
@book{Tolkien54,
title = {The lord of the rings: Part 1: The fellowship of the ring},
@@ -3329,3 +3371,57 @@
pages = {299--314},
year = {1960}
}
# Effect systems
@inproceedings{GiffordL86,
author = {David K. Gifford and
John M. Lucassen},
title = {Integrating Functional and Imperative Programming},
booktitle = {{LISP} and Functional Programming},
pages = {28--38},
publisher = {{ACM}},
year = {1986}
}
@inproceedings{LucassenG88,
author = {John M. Lucassen and
David K. Gifford},
title = {Polymorphic Effect Systems},
booktitle = {{POPL}},
pages = {47--57},
publisher = {{ACM} Press},
year = {1988}
}
@inproceedings{TalpinJ92,
author = {Jean{-}Pierre Talpin and
Pierre Jouvelot},
title = {The Type and Effect Discipline},
booktitle = {{LICS}},
pages = {162--173},
publisher = {{IEEE} Computer Society},
year = {1992}
}
@article{TofteT97,
author = {Mads Tofte and
Jean{-}Pierre Talpin},
title = {Region-based Memory Management},
journal = {Inf. Comput.},
volume = {132},
number = {2},
pages = {109--176},
year = {1997}
}
@inproceedings{NielsonN99,
author = {Flemming Nielson and
Hanne Riis Nielson},
title = {Type and Effect Systems},
booktitle = {Correct System Design},
series = {Lecture Notes in Computer Science},
volume = {1710},
pages = {114--136},
publisher = {Springer},
year = {1999}
}

View File

@@ -335,69 +335,139 @@
%%
\chapter{Introduction}
\label{ch:introduction}
Control is an ample ingredient of virtually every programming
language. A programming language typically feature a variety of
Control is a pervasive phenomenon in virtually every programming
language. A programming language typically features a variety of
control constructs, which let the programmer manipulate the control
flow of programs in interesting ways. The most well-known control
construct may well be $\If\;V\;\Then\;M\;\Else\;N$, which
conditionally selects between two possible \emph{continuations} $M$
and $N$ depending on whether the condition $V$ is $\True$ or
$\False$. Another familiar control construct is function application
$\EC[(\lambda x.M)\,W]$, which evaluates some parameterised
continuation $M$ at value argument $W$ to normal form and subsequently
continues the current continuation induced by the invocation context
$\EC$.
and $N$ depending on whether the condition $V$ is $\True$ or $\False$.
%
At the time of writing the trendiest control construct happen to be
async/await, which is designed for direct-style asynchronous
programming~\cite{SymePL11}. It takes the form
$\async.\,\EC[\await\;M]$, where $\async$ delimits an asynchronous
context $\EC$ in which computations may be interleaved. The $\await$
primitive may be used to defer execution of the current continuation
until the result of the asynchronous computation $M$ is ready. Prior
to async/await the most fashionable control construct was coroutines,
which provide the programmer with a construct for performing non-local
transfers of control by suspending the current continuation on
demand~\cite{MouraI09}, e.g. in
$\keyw{co_0}.\,\EC_0[\keyw{suspend}];\keyw{co_1}.\,\EC_1[\Unit]$ the
two coroutines $\keyw{co_0}$ and $\keyw{co_1}$ work in tandem by
invoking suspend in order to hand over control to the other coroutine;
$\keyw{co_0}$ suspends the current continuation $\EC_0$ and transfers
control to $\keyw{co_1}$, which resume its continuation $\EC_1$ with
the unit value $\Unit$. The continuation $\EC_1$ may later suspend in
order to transfer control back to $\keyw{co_0}$ such that it can
resume execution of the continuation
$\EC_0$~\cite{AbadiP10}. Coroutines are amongst the oldest ideas of
the literature as they have been around since the dawn of
programming~\cite{DahlMN68,DahlDH72,Knuth97,MouraI09}. Nevertheless
coroutines frequently reappear in the literature in various guises.
The $\If$ construct offers no means for programmatic manipulation of
either continuation.
%
More intriguing forms of control exist, which enable the programmer to
manipulate and reify continuations as first-class data objects. This
kind of control is known as \emph{first-class control}.
The common denominator for the aforementioned control constructs is
that they are all second-class.
The idea of first-class control is old. It was conceived already
during the design of the programming language
Algol~\cite{BackusBGKMPRSVWWW60} (one of the early high-level
programming languages along with Fortran~\cite{BackusBBGHHNSSS57} and
Lisp~\cite{McCarthy60}) when \citet{Landin98} sought to model
unrestricted goto-style jumps using an extended $\lambda$-calculus.
%
Since then a wide variety of first-class control operators have
appeared. We can coarsely categorise them into two groups:
\emph{undelimited} and \emph{delimited} (in
Chapter~\ref{ch:continuations} we will perform a finer analysis of
first-class control). Undelimited control operators are global
phenomena that let programmers capture the entire control state of
their programs, whereas delimited control operators are local
phenomena that provide programmers with fine-grain control over which
parts of the control state to capture.
%
Thus there are good reasons for preferring delimited control over
undelimited control for practical programming.
%
% The most (in)famous control operator
% \emph{call-with-current-continuation} appeared later during a revision
% of the programming language Scheme~\cite{AbelsonHAKBOBPCRFRHSHW85}.
%
% Virtually every programming language is equipped with one or more
% control flow operators, which enable the programmer to manipulate the
% flow of control of programs in interesting ways. The most well-known
% control operator may well be $\If\;V\;\Then\;M\;\Else\;N$, which
Nevertheless, the ability to manipulate continuations programmatically
is incredibly powerful as it enables programmers to perform non-local
transfers of control on the demand. This sort of power makes it
possible to implement a wealth of control idioms such as
coroutines~\cite{MouraI09}, generators/iterators~\cite{ShawWL77},
async/await~\cite{SymePL11} as user-definable
libraries~\cite{FriedmanHK84,FriedmanH85,Leijen17a,Leijen17,Pretnar15}. The
phenomenon of non-local transfer of control is known as a
\emph{control effect}. It turns out to be `the universal effect' in
the sense that it can simulate every other computational effect
(consult \citet{Filinski96} for a precise characterisation of what it
means to simulate an effect). More concretely, this means a
programming language equipped with first-class control is capable of
implementing effects such as exceptions, mutable state, transactional
memory, nondeterminism, concurrency, interactive input/output, stream
redirection, internally.
%
A whole programming paradigm known as \emph{effectful programming} is
built around the idea of simulating computational effects using
control effects.
In this dissertation I also advocate a new programming paradigm, which
I dub \emph{effect handler oriented programming}.
%
\dhil{This dissertation is about the operational foundations for
programming and implementing effect handlers, a particularly modular
and extensible programming abstraction for effectful programming}
% Control is an ample ingredient of virtually every programming
% language. A programming language typically feature a variety of
% control constructs, which let the programmer manipulate the control
% flow of programs in interesting ways. The most well-known control
% construct may well be $\If\;V\;\Then\;M\;\Else\;N$, which
% conditionally selects between two possible \emph{continuations} $M$
% and $N$ depending on whether the condition $V$ is $\True$ or $\False$.
% and $N$ depending on whether the condition $V$ is $\True$ or
% $\False$. Another familiar control construct is function application
% $\EC[(\lambda x.M)\,W]$, which evaluates some parameterised
% continuation $M$ at value argument $W$ to normal form and subsequently
% continues the current continuation induced by the invocation context
% $\EC$.
% %
% Another familiar operator is function application\dots
% At the time of writing the trendiest control construct happen to be
% async/await, which is designed for direct-style asynchronous
% programming~\cite{SymePL11}. It takes the form
% $\async.\,\EC[\await\;M]$, where $\async$ delimits an asynchronous
% context $\EC$ in which computations may be interleaved. The $\await$
% primitive may be used to defer execution of the current continuation
% until the result of the asynchronous computation $M$ is ready. Prior
% to async/await the most fashionable control construct was coroutines,
% which provide the programmer with a construct for performing non-local
% transfers of control by suspending the current continuation on
% demand~\cite{MouraI09}, e.g. in
% $\keyw{co_0}.\,\EC_0[\keyw{suspend}];\keyw{co_1}.\,\EC_1[\Unit]$ the
% two coroutines $\keyw{co_0}$ and $\keyw{co_1}$ work in tandem by
% invoking suspend in order to hand over control to the other coroutine;
% $\keyw{co_0}$ suspends the current continuation $\EC_0$ and transfers
% control to $\keyw{co_1}$, which resume its continuation $\EC_1$ with
% the unit value $\Unit$. The continuation $\EC_1$ may later suspend in
% order to transfer control back to $\keyw{co_0}$ such that it can
% resume execution of the continuation
% $\EC_0$~\cite{AbadiP10}. Coroutines are amongst the oldest ideas of
% the literature as they have been around since the dawn of
% programming~\cite{DahlMN68,DahlDH72,Knuth97,MouraI09}. Nevertheless
% coroutines frequently reappear in the literature in various guises.
Evidently, control is a pervasive phenomenon in programming. However,
not every control phenomenon is equal in terms of programmability and
expressiveness.
% The common denominator for the aforementioned control constructs is
% that they are all second-class.
\section{This thesis in a nutshell}
In this dissertation I am concerned only with
\citeauthor{PlotkinP09}'s deep handlers, their shallow variation, and
parameterised handlers which are a slight variation of deep handlers.
% % Virtually every programming language is equipped with one or more
% % control flow operators, which enable the programmer to manipulate the
% % flow of control of programs in interesting ways. The most well-known
% % control operator may well be $\If\;V\;\Then\;M\;\Else\;N$, which
% % conditionally selects between two possible \emph{continuations} $M$
% % and $N$ depending on whether the condition $V$ is $\True$ or $\False$.
% % %
% % Another familiar operator is function application\dots
% Evidently, control is a pervasive phenomenon in programming. However,
% not every control phenomenon is equal in terms of programmability and
% expressiveness.
% % \section{This thesis in a nutshell}
% % In this dissertation I am concerned only with
% % \citeauthor{PlotkinP09}'s deep handlers, their shallow variation, and
% % parameterised handlers which are a slight variation of deep handlers.
\section{Why first-class control matters}
From the perspective of programmers first-class control is a valuable
programming feature because it enables them to implement their own
control idioms as if they were native to the programming
language. More important with first-class control programmer-defined
language. More important, with first-class control programmer-defined
control idioms are local phenomena which can be encapsulated in a
library such that the rest of the program does not need to be made
aware of their existence. Conversely, without first-class control some
@@ -417,23 +487,25 @@ control mechanism rather than having to implement and maintain
individual runtime support for each control idiom of the source
language.
% From either perspective first-class control adds value to a
% programming language regardless of whether it is featured in the
% source language.
% \subsection{Flavours of control}
% \paragraph{Undelimited control}
% \paragraph{Delimited control}
% \paragraph{Composable control}
\subsection{Why effect handlers matter}
\dhil{Something about structured programming with delimited control}
\section{State of effectful programming}
Programming with effects can be done in basically three different
ways: 1) usage of builtin effects, 2) simulation via global program
transformations, or 3) simulation via control effects.
%
In this section I will provide a brief programming perspective on the
various approaches to programming with effects. We will look at each
approach through the lens of a singular effect, namely, global mutable
state.
In this section I will provide a brief programming perspective on
different approaches to programming with effects along with an
informal introduction to the related concepts. We will look at each
approach through the lens of global mutable state --- which is quite
possibly the most well-known effect.
% how
% effectful programming has evolved as well as providing an informal
@@ -454,8 +526,7 @@ state.
We can realise stateful behaviour by either using language-supported
state primitives, globally structure our program to follow a certain
style, or use first-class control in the form of delimited control to
simulate state. As for simulating state with control effects we
consider only delimited control, because undelimited control is
simulate state. We do not consider undelimited control, because it is
insufficient to express mutable state~\cite{FriedmanS00}.
% Programming in its infancy was effectful as the idea of first-class
% control was conceived already during the design of the programming
@@ -594,9 +665,9 @@ State initialisation is simply function application.
\]
%
Programming in state-passing style is laborious and no fun as it is
anti-modular, because effect-free higher-order functions to work with
stateful functions they too must be transformed or at the very least
be duplicated to be compatible with stateful function arguments.
anti-modular, because for effect-free higher-order functions to work
with stateful functions they too must be transformed or at the very
least be duplicated to be compatible with stateful function arguments.
%
Nevertheless, state-passing is an important technique as it is the
secret sauce that enables us to simulate mutable state with other
@@ -667,10 +738,10 @@ The body of $\getF$ applies $\shift$ to capture the current
continuation, which gets supplied to the anonymous function
$(\lambda k.\lambda st. k~st~st)$. The continuation parameter $k$ has
type $S \to S \to A \times S$. The continuation is applied to two
instances of the current state value $st$. The instance the value
returned to the caller of $\getF$, whilst the second instance is the
state value available during the next invocation of either $\getF$ or
$\putF$. This aligns with the intuition that accessing a state cell
instances of the current state value $st$. The first instance is the
value returned to the caller of $\getF$, whilst the second instance is
the state value available during the next invocation of either $\getF$
or $\putF$. This aligns with the intuition that accessing a state cell
does not modify its contents. The implementation of $\putF$ is
similar, except that the first argument to $k$ is the unit value,
because the caller of $\putF$ expects a unit in return. Also, it
@@ -734,10 +805,9 @@ The concept of monad has its origins in category theory and its
mathematical nature is well-understood~\cite{MacLane71,Borceux94}. The
emergence of monads as a programming abstraction began when
\citet{Moggi89,Moggi91} proposed to use monads as the basis for
denoting computational effects in denotational semantics such as
exceptions, state, nondeterminism, interactive input and
output. \citet{Wadler92,Wadler95} popularised monadic programming by
putting \citeauthor{Moggi89}'s proposal to use in functional
denoting computational effects in denotational
semantics. \citet{Wadler92,Wadler95} popularised monadic programming
by putting \citeauthor{Moggi89}'s proposal to use in functional
programming by demonstrating how monads increase the ease at which
programs may be retrofitted with computational effects.
%
@@ -745,10 +815,10 @@ In practical programming terms, monads may be thought of as
constituting a family of design patterns, where each pattern gives
rise to a distinct effect with its own operations.
%
The part of the appeal of monads is that they provide a structured
Part of the appeal of monads is that they provide a structured
interface for programming with effects such as state, exceptions,
nondeterminism, and so forth, whilst preserving the equational style
of reasoning about pure functional
nondeterminism, interactive input and output, and so forth, whilst
preserving the equational style of reasoning about pure functional
programs~\cite{GibbonsH11,Gibbons12}.
%
% Notably, they form the foundations for effectful programming in
@@ -758,7 +828,7 @@ programs~\cite{GibbonsH11,Gibbons12}.
The presentation of monads here is inspired by \citeauthor{Wadler92}'s
presentation of monads for functional programming~\cite{Wadler92}, and
it should be familiar to users of
it ought to be familiar to users of
Haskell~\cite{JonesABBBFHHHHJJLMPRRW99}.
\begin{definition}
@@ -785,7 +855,13 @@ Haskell~\cite{JonesABBBFHHHHJJLMPRRW99}.
\end{reductions}
\end{definition}
%
\dhil{Remark that the monad type $T~A$ may be read as a tainted value}
We may understand the type $T~A$ as inhabiting computations that
compute a \emph{tainted} value of type $A$. In this regard, we may
understand $T$ as denoting the taint involved in computing $A$,
i.e. we can think of $T$ as sort of effect annotation which informs us
about which effectful operations the computation may perform to
produce $A$.
%
The monad interface may be instantiated in different ways to realise
different computational effects. In the following subsections we will
see three different instantiations with which we will implement global
@@ -799,8 +875,7 @@ efficient code for monadic programs.
The success of monads as a programming idiom is difficult to
understate as monads have given rise to several popular
control-oriented programming abstractions including the asynchronous
programming idiom async/await has its origins in monadic
programming~\cite{Claessen99,LiZ07,SymePL11}.
programming idiom async/await~\cite{Claessen99,LiZ07,SymePL11}.
% \subsection{Option monad}
% The $\Option$ type is a unary type constructor with two data
@@ -836,7 +911,7 @@ programming~\cite{Claessen99,LiZ07,SymePL11}.
%
The state monad is an instantiation of the monad interface that
encapsulates mutable state by using the state-passing technique
internally. In addition it equip the monad with two operations for
internally. In addition it equips the monad with two operations for
manipulating the state cell.
%
\begin{definition}\label{def:state-monad}
@@ -964,7 +1039,16 @@ As in J.R.R. Tolkien's fictitious Middle-earth~\cite{Tolkien54} there
exists one monad to rule them all, one monad to realise them, one
monad to subsume them all, and in the term language bind them. This
powerful monad is the \emph{continuation monad}.
%
The continuation monad may be regarded as `the universal monad' as it
can embed any other monad, and thereby simulate any computational
effect~\cite{Filinski99}. It derives its name from its connection to
continuation passing style~\cite{Wadler92}, which is a particular
style of programming where each function is parameterised by the
current continuation (we will discuss continuation passing style in
detail in Chapter~\ref{ch:cps}). The continuation monad is powerful
exactly because each of its operations has access to the current
continuation.
\begin{definition}\label{def:cont-monad}
@@ -982,7 +1066,7 @@ powerful monad is the \emph{continuation monad}.
\multirow{2}{*}{
\bl
\bind ~: T~A \to (A \to T~B) \to T~B\\
\bind ~\defas \lambda m.\lambda k.\lambda f.m~(\lambda x.k~x~f)
\bind ~\defas \lambda m.\lambda k.\lambda c.m\,(\lambda x.k~x~c)
\el} \\ & % space hack to avoid the next paragraph from
% floating into the math environment.
\ea
@@ -991,10 +1075,13 @@ powerful monad is the \emph{continuation monad}.
\end{definition}
%
The $\Return$ operation lifts a value into the monad by using it as an
argument to the continuation $k$. The bind operator applies the monad
$m$ to an anonymous function that accepts a value $x$ of type
$A$. This value $x$ is supplied to the immediate continuation $k$
alongside the current continuation $f$.
argument to the continuation $k$. The bind operator binds the current
continuation to $c$. In the body it applies the monad $m$ to an
anonymous continuation function of type $A \to T~B$. Internally, the
monad $m$ will apply this continuation when it is on the form
$\Return$. Thus the parameter $x$ gets bound to the $\Return$ value of
the monad. This parameter gets supplied as an argument to the next
monadic action $k$ alongside the current continuation $c$.
If we instantiate $R = S \to A \times S$ for some type $S$ then we
can implement the state monad inside the continuation monad.
@@ -1041,14 +1128,18 @@ continuation.
The initial continuation $(\lambda x.\lambda st. \Record{x;st})$
corresponds to the $\Return$ of the state monad.
%
As usual by fixing $S = \Int$ and $A = \Bool$, we can use this
particular continuation monad instance to interpret $\incrEven$.
By fixing $S = \Int$ and $A = \Bool$, we can use the continuation
monad to interpret $\incrEven$.
%
\[
\runState~\incrEven~4 \reducesto^+ \Record{\True;5} : \Bool \times \Int
\]
%
The continuation monad gives us a succinct framework for implementing
and programming with computational effects, however, it comes at the
expense of extensibility and modularity. Adding a new operation to the
monad may require modifying its internal structure, which entails a
complete reimplementation of any existing operations.
% Scheme's undelimited control operator $\Callcc$ is definable as a
% monadic operation on the continuation monad~\cite{Wadler92}.
% %
@@ -1090,7 +1181,7 @@ the trivial continuation $\Unit$.
The meaning of a free monadic computation is ascribed by a separate
function, or interpreter, that traverses the computation tree.
%
The shape of computation trees is captured by the following generic
type definition.
%
@@ -1113,7 +1204,8 @@ operation is another computation tree node.
$(F^{\TypeCat \to \TypeCat}, \Return, \bind)$ which forms a monad
with respect to $F$. In addition an adequate instance of $F$ must
supply a map, $\dec{fmap} : (A \to B) \to F~A \to F~B$, over its
structure.
structure (in more precise technical terms: $F$ must be a
\emph{functor}).
%
\[
\bl
@@ -1137,6 +1229,17 @@ operation is another computation tree node.
\el
\]
%
The $\Return$ operation simplify reflects itself by injecting the
value $x$ into the computation tree as a leaf node. The bind
operator threads the continuation $k$ through the computation
tree. Upon encounter a leaf node the continuation gets applied to
the value of the node. Note how this is reminiscent of the
$\Return$ of the continuation monad. The bind operator works in
tandem with the $\fmap$ of $F$ to advance past $\OpF$ nodes. The
$\fmap$ function is responsible for applying its functional
argument to the next computation tree node which is embedded inside
$y$.
%
We define an auxiliary function to alleviate some of the
boilerplate involved with performing operations on the monad.
%
@@ -1147,56 +1250,213 @@ operation is another computation tree node.
\el
\]
%
This function injects some operation $op$ into the computation tree
as an operation node.
\end{definition}
%
In order to implement state with the free monad we must first declare
a signature of its operations and implement the required $\fmap$ for
the signature.
%
\[
\bl
\dec{FreeState}~S~R \defas [\Get:S \to R|\Put:\Record{S;R}] \smallskip\\
\FreeState~S~R \defas [\Get:S \to R|\Put:S \times (\UnitType \to R)] \smallskip\\
\fmap : (A \to B) \to \FreeState~S~A \to \FreeState~S~B\\
\fmap~f~op \defas \Case\;op\;\{
\ba[t]{@{}l@{~}c@{~}l}
\Get~k &\mapsto& \Get\,(\lambda st.f\,(k~st));\\
\Put~st~k &\mapsto& \Put\,\Record{st;f~k}\}
\Put\,\Record{st';k} &\mapsto& \Put\,\Record{st';\lambda\Unit.f\,(k\,\Unit)}\}
\ea
\el
\]
%
The signature $\FreeState$ declares the two stateful operations $\Get$
and $\Put$ over state type $S$ and continuation type $R$. The $\Get$
tag is parameterised a continuation function of type $S \to R$. The
idea is that an application of this function provides access to the
current state, whilst computing the next node of the computation
tree. The $\Put$ operation is parameterised by the new state value and
a thunk, which computes the next computation tree node. The $\fmap$
instance applies the function $f$ to the continuation $k$ of each
operation.
%
By instantiating $F = \FreeState\,S$ and using the $\DoF$ function we
can give the get and put operations a familiar look and feel.
%
\[
\ba{@{~}l@{\qquad\quad}@{~}r}
\multirow{2}{*}{
\bl
\getF : \UnitType \to T~S\\
\getF~\Unit \defas \DoF~(\Get\,(\lambda x.x))
\getF~\Unit \defas \DoF\,(\Get\,(\lambda st.st))
\el} &
\multirow{2}{*}{
\bl
\putF : S \to T~\UnitType\\
\putF~st \defas \DoF~(\Put\,\Record{st;\Unit})
\putF~st \defas \DoF\,(\Put\,\Record{st;\lambda\Unit.\Unit})
\el} \\ & % space hack to avoid the next paragraph from
% floating into the math environment.
\ea
\]
%
Both operations are performed with the identity function as their
respective continuation function. We do not have much choice in this
regard as for instance in the case of $\getF$ we must ultimately
return a computation of type $T~S$, and the only value of type $S$ we
have access to in this context is the one supplied externally to the
continuation function.
The state initialiser and runner for the monad is an interpreter. As
the programmers, we are free to choose whatever interpretation of
state we desire. For example, the following interprets the stateful
operations using the state-passing technique.
%
\[
\bl
\runState : \Free\,(\dec{FreeState}~S)\,A \to S \to A \times S\\
\runState : (\UnitType \to \Free\,(\FreeState~S)\,A) \to S \to A \times S\\
\runState~m~st \defas
\Case\;m\;\{
\ba[t]{@{~}l@{~}c@{~}l}
\Case\;m\,\Unit\;\{
\ba[t]{@{}l@{~}c@{~}l}
\return~x &\mapsto& (x, st);\\
\OpF\,(\Get~k) &\mapsto& \runState~st~(k~st);\\
\OpF\,(\Get~k) &\mapsto& \runState~st~(\lambda\Unit.k~st);\\
\OpF\,(\Put\,\Record{st';k}) &\mapsto& \runState~st'~k
\}\ea
\el
\]
%
The interpreter implements a \emph{fold} over the computation tree by
pattern matching on the shape of the tree (or equivalently monad). In
the case of a $\Return$ node the interpreter returns the payload $x$
along with the final state value $st$. If the current node is a $\Get$
operation, then the interpreter recursively calls itself with the same
state value $st$ and a thunked application of the continuation $k$ to
the current state $st$. The recursive activation of $\runState$ will
force the thunk in order to compute the next computation tree node.
In the case of a $\Put$ operation the interpreter calls itself
recursively with new state value $st'$ and the continuation $k$ (which
is a thunk). One may prove that this interpretation of get and put
satisfies the equations of Definition~\ref{def:state-monad}.
%
By instantiating $S = \Int$ we can use this interpreter to run
$\incrEven$.
%
\[
\runState~4~(\incrEven~\Unit) \reducesto^+ \Record{\True;5}
\runState~\incrEven~4 \reducesto^+ \Record{\True;5} : \Bool \times \Int
\]
%
The free monad brings us close to the essence of programming with
effect handlers.
\subsection{Back to direct-style}
Combining monadic effects~\cite{VazouL16}\dots
\dhil{The problem with monads is that they do not
compose. Cite~\citet{Espinosa95}, \citet{VazouL16}}
%
Another problem is that monads break the basic doctrine of modular
abstraction, which we should program against an abstract interface,
not an implementation. A monad is a concrete implementation.
\subsubsection{Handling state}
\dhil{Cite \citet{PlotkinP01,PlotkinP02,PlotkinP03,PlotkinP09,PlotkinP13}.}
\[
\ba{@{~}l@{~}r}
\Do\;\ell^{A \opto B}~M^A : B & \Handle\;M^A\;\With\;H^{A \Harrow B} : B \smallskip\\
\multicolumn{2}{c}{H^{A \Harrow B} ::= \{\Return\;x \mapsto M\} \mid \{\OpCase{\ell}{p}{k} \mapsto M\} \uplus H}
\ea
\]
The operation symbols are declared in a signature
$\ell : A \opto B \in \Sigma$.
%
\[
\ba{@{~}l@{\qquad\quad}@{~}r}
\multicolumn{2}{l}{\Sigma \defas \{\Get : \UnitType \opto S;\Put : S \opto \UnitType\}} \smallskip\\
\multirow{2}{*}{
\bl
\getF : \UnitType \to S\\
\getF~\Unit \defas \Do\;\Get\,\Unit
\el} &
\multirow{2}{*}{
\bl
\putF : S \to \UnitType\\
\putF~st \defas \Do\;\Put~st
\el} \\ & % space hack to avoid the next paragraph from
% floating into the math environment.
\ea
\]
%
As with the free monad, we are completely free to pick whatever
interpretation of state we desire. However, if we want an
interpretation that is compatible with the usual equations for state,
then we can simply use the state-passing technique again.
%
\[
\bl
\runState : (\UnitType \to A) \to S \to A \times S\\
\runState~m~st_0 \defas
\bl
\Let\;f \revto \Handle\;m\,\Unit\;\With\\~\{
\ba[t]{@{}l@{~}c@{~}l}
\Return\;x &\mapsto& \lambda st.\Record{x;st};\\
\OpCase{\Get}{\Unit}{k} &\mapsto& \lambda st.k~st~st;\\
\OpCase{\Put}{st'}{k} &\mapsto& \lambda st.k~\Unit~st'\}
\ea\\
\In\;f~st_0
\el
\el
\]
%
Note the similarity with the implementation of the interpreter for the
free state monad. Save for the syntactic differences, the main
difference between this implementation and the free state monad
interpreter is that here the continuation $k$ implicitly reinstalls
the handler, whereas in the free state monad interpreter we explicitly
reinstalled the handler via a recursive application.
%
By fixing $S = \Int$ we can use the above effect handler to run the
delimited control variant of $\incrEven$.
%
\[
\runState~\incrEven~4 \reducesto^+ \Record{\True;5} : \Bool \times \Int
\]
\subsubsection{Effect tracking}
\dhil{Cite \citet{GiffordL86}, \citet{LucassenG88}, \citet{TalpinJ92},
\citet{TofteT97}, \citet{NielsonN99}, \citet{WadlerT03}.}
A benefit of using monads for effectful programming is that we get
effect tracking `for free' (some might object to this statement and
claim we paid for it by having to program in monadic style). Effect
tracking is a useful tool for making programming with effects less
prone to error in much the same way a static type system is useful
detecting a wide range of potential runtime errors at compile
time.
The principal idea of an effect system is to annotate computation
types with the collection of effects that their inhabitants are
allowed to perform, e.g. the type $A \to B \eff E$ denotes a function
that accepts a value of type $A$ as input and ultimately returns a
value of type $B$. As it computes the $B$ value it is allowed to use
the operations given by the effect signature $E$.
This typing discipline fits nicely with the effect handlers-style of
programming. The $\Do$ construct provides a mechanism for injecting an
operation into the effect signature, whilst the $\Handle$ construct
provides a way to eliminate an effect operation from the signature.
%
If we instantiate $A = \UnitType$, $B = \Bool$, and $E = \Sigma$, then
we obtain a type-and-effect signature for the control effects version
of $\incrEven$.
%
\[
\incrEven : \UnitType \to \Bool \eff \{\Get:\UnitType \opto \Int;\Put:\Int \opto \UnitType\}
\]
%
Now, the signature of $\incrEven$ communicates precisely what it
expects from the ambient context. It is clear that we must run this
function under a handler that interprets at least $\Get$ and $\Put$.
\dhil{Mention the importance of polymorphism in effect tracking}
\section{Contributions}