1
0
mirror of https://github.com/dhil/phd-dissertation synced 2026-03-13 02:58:26 +00:00

Compare commits

...

2 Commits

Author SHA1 Message Date
b9aea3fab9 Tidy up HO translation. 2020-08-03 01:50:23 +01:00
9c4eed2e94 Describe the static meta language. 2020-08-03 00:02:36 +01:00
2 changed files with 200 additions and 261 deletions

View File

@@ -123,6 +123,9 @@
\newcommand{\CompCat}{\CatName{Comp}}
\newcommand{\UCompCat}{\CatName{UComp}}
\newcommand{\UValCat}{\CatName{UVal}}
\newcommand{\SCompCat}{\CatName{SComp}}
\newcommand{\SValCat}{\CatName{SVal}}
\newcommand{\SPatCat}{\CatName{SPat}}
\newcommand{\ValCat}{\CatName{Val}}
\newcommand{\VarCat}{\CatName{Var}}
\newcommand{\ValTypeCat}{\CatName{VType}}

View File

@@ -860,8 +860,9 @@ given term.
The function computes the set of free variables bottom-up. Most cases
are homomorphic on the syntax constructors. The interesting cases are
those constructs which feature term binders: lambda abstraction, let
bindings, pair destructing, and case splitting. In each of those cases
we subtract the relevant binder(s) from the set of free variables.
bindings, pair deconstructing, and case splitting. In each of those
cases we subtract the relevant binder(s) from the set of free
variables.
\subsection{Typing rules}
\label{sec:base-language-type-rules}
@@ -1007,7 +1008,7 @@ application $V\,A$ is well-typed whenever the abstractor term $V$ has
the polymorphic type $\forall \alpha^K.C$ and the type $A$ has kind
$K$. This rule makes use of type substitution.
%
The \tylab{Split} rule handles typing of record destructing. When
The \tylab{Split} rule handles typing of record deconstructing. When
splitting a record term $V$ on some label $\ell$ binding it to $x$ and
the remainder to $y$. The label we wish to split on must be present
with some type $A$, hence we require that
@@ -1566,9 +1567,9 @@ effect handlers which are an alternative to deep effect handlers.
%
Programming with effect handlers is a dichotomy of \emph{performing}
and \emph{handling} of effectful operations -- or alternatively a
dichotomy of \emph{constructing} and \emph{destructing}. An operation
dichotomy of \emph{constructing} and \emph{deconstructing}. An operation
is a constructor of an effect without a predefined semantics. A
handler destructs effects by pattern-matching on their operations. By
handler deconstructs effects by pattern-matching on their operations. By
matching on a particular operation, a handler instantiates the said
operation with a particular semantics of its own choosing. The key
ingredient to make this work in practice is \emph{delimited
@@ -2534,28 +2535,12 @@ operation clauses extract and convert the current resumption stack
into a function using the $\Res$, and $\hforward$ augments the current
resumption stack with the current continuation pair.
%
% Since we have only changed the representation of resumptions, the
% translation of top-level programs remains the same.
\subsection{Higher-order translation for deep effect handlers}
\label{sec:higher-order-uncurried-deep-handlers-cps}
%
\begin{figure}
%
% \textbf{Static patterns and static lists}
% %
% \begin{syntax}
% \slab{Static patterns} &\sP& ::= & \sk \scons \sP \mid \sks \\
% \slab{Static lists} &\VS& ::= & V \scons \VS \mid \reflect V \\
% \end{syntax}
% %
% \textbf{Reification}
% %
% \begin{equations}
% \reify (V \scons \VS) &\defas& V \dcons \reify \VS \\
% \reify \reflect V &\defas& V \\
% \end{equations}
%
\textbf{Values}
%
\begin{displaymath}
@@ -2573,7 +2558,7 @@ resumption stack with the current continuation pair.
\textbf{Computations}
%
\begin{equations}
\cps{-} &:& \CompCat \to \UValCat^\ast \to \UCompCat\\
\cps{-} &:& \CompCat \to \SValCat^\ast \to \UCompCat\\
\cps{V\,W} &\defas& \slam \sks.\cps{V} \dapp \cps{W} \dapp \reify \sks \\
\cps{V\,T} &\defas& \slam \sks.\cps{V} \dapp \Record{} \dapp \reify \sks \\
\cps{\Let\; \Record{\ell=x;y} = V \; \In \; N} &\defas& \slam \sks.\Let\; \Record{\ell=x;y} = \cps{V} \; \In \; \cps{N} \sapp \sks \\
@@ -2594,19 +2579,6 @@ resumption stack with the current continuation pair.
\cps{-} &:& \HandlerCat \to \UCompCat\\
\cps{\{\Return \; x \mapsto N\}} &\defas& \dlam x\, ks.\Let\; (h \dcons ks') = ks \;\In\; \cps{N} \sapp \reflect ks'
\\
% \cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}
% &\defas&
% \bl
% \dlam z \, ks.\Case \;z\; \{
% \bl
% (\ell~\Record{p, s} \mapsto \dLet\;r=\Fun\,s \;\dIn\; \cps{N_{\ell}} \sapp \reflect ks)_{\ell \in \mathcal{L}};\,
% y \mapsto \hforward \} \\
% \el \\
% \el \\
% \hforward &\defas& \bl
% \dLet\; (k' \dcons h' \dcons ks') = ks \;\dIn\; \\
% \Vmap\,(\dlam\Record{p, s}\,(k \dcons ks).k\,\Record{p, h' \dcons k' \dcons s}\,ks)\,y\,(h' \dcons ks') \\
% \el\\
\cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}
&\defas& \bl
\dlam \Record{z,\Record{p,rs}}\,ks.\Case \;z\; \{
@@ -2630,175 +2602,119 @@ resumption stack with the current continuation pair.
\label{fig:cps-higher-order-uncurried}
\end{figure}
%
% \begin{figure}[t]
% \flushleft
% Values
% \begin{equations}
% \cps{x} &\defas& x \\
% \cps{\lambda x.M} &\defas& \dlam x\,\dhk.\Let\;\dRecord{fs,\dRecord{\vhret,\vhops}}\dcons \dhk' = \dhk\;\In\;\cps{M} \sapp (\sRecord{\reflect fs, \sRecord{\reflect \vhret, \reflect \vhops}} \scons \reflect \dhk') \\
% \cps{\Lambda \alpha.M} &\defas& \dlam z\,\dhk.\Let\;\dRecord{fs,\dRecord{\vhret,\vhops}}\dcons \dhk' = \dhk\;\In\;\cps{M} \sapp (\sRecord{\reflect fs, \sRecord{\reflect \vhret, \reflect \vhops}} \scons \reflect \dhk') \\
% \cps{\Rec\,g\,x.M} &\defas& \Rec\,g\,x\,\dhk.\Let\;\dRecord{fs,\dRecord{\vhret,\vhops}}\dcons \dhk' = \dhk\;\In\;\cps{M} \sapp (\sRecord{\reflect fs, \sRecord{\reflect \vhret, \reflect \vhops}} \scons \reflect \dhk') \\
% \multicolumn{3}{c}{
% \cps{\Record{}} \defas \Record{}
% \qquad
% \cps{\Record{\ell = \!\!V; W}} \defas \Record{\ell = \!\cps{V}; \cps{W}}
% \qquad
% \cps{\ell\,V} \defas \ell\,\cps{V}
% }
% \end{equations}
% % \begin{displaymath}
% % \end{displaymath}
% Computations
% \begin{equations}
% \cps{V\,W} &\defas& \slam \shk.\cps{V} \dapp \cps{W} \dapp \reify \shk \\
% \cps{V\,T} &\defas& \slam \shk.\cps{V} \dapp \Record{} \dapp \reify \shk \\
% \cps{\Let\; \Record{\ell=x;y} = V \; \In \; N} &\defas& \slam \shk.\Let\; \Record{\ell=x;y} = \cps{V} \; \In \; \cps{N} \sapp \shk \\
% \cps{\Case~V~\{\ell~x \mapsto M; y \mapsto N\}} &\defas&
% \slam \shk.\Case~\cps{V}~\{\ell~x \mapsto \cps{M} \sapp \shk; y \mapsto \cps{N} \sapp \shk\} \\
% \cps{\Absurd~V} &\defas& \slam \shk.\Absurd~\cps{V} \\
% \end{equations}
% \begin{equations}
% \cps{\Return\,V} &=& \slam \shk.\kapp\;(\reify \shk)\;\cps{V} \\
% \cps{\Let~x \revto M~\In~N} &=&
% \bl\slam \sRecord{\shf, \sRecord{\svhret, \svhops}} \scons \shk.\\
% \qquad \cps{M} \sapp (\bl\sRecord{\reflect((\dlam x\,\dhk.\bl\Let\;\dRecord{fs,\dRecord{\vhret,\vhops}}\dcons k' = k\;\In\\
% \cps{N} \sapp (\sRecord{\reflect fs, \sRecord{\reflect \vhret, \reflect \vhops}} \scons \reflect k')) \dcons \reify\shf), \el\\
% \sRecord{\svhret, \svhops}} \scons \shk)\el
% \el\\
% \cps{\Do\;\ell\;V} &\defas&
% \slam \sRecord{\shf, \sRecord{\svhret, \svhops}} \scons \shk.\,
% \reify\svhops \dapp \dRecord{\ell,\dRecord{\cps{V}, \dRecord{\reify \shf, \dRecord{\reify\svhret, \reify\svhops}} \dcons \dnil}} \dapp \reify \shk \\
% \cps{\Handle^\depth \, M \; \With \; H} &\defas&
% \slam \shk . \cps{M} \sapp (\sRecord{\snil, \cps{H}^\depth} \scons \shk) \\
% \cps{H}^\depth &=& \sRecord{\reflect \cps{\hret}, \reflect \cps{\hops}^\depth}\\
% \cps{\{\Return \; x \mapsto N\}} &\defas& \dlam x\,\dhk.\bl\Let\;\dRecord{fs,\dRecord{\vhret,\vhops}} \dcons k' = k\;\In\\\cps{N} \sapp (\sRecord{\reflect fs, \sRecord{\reflect \vhret, \reflect \vhops}} \scons \reflect k')\el
% \\
% \cps{\{(\ell \; p \; r \mapsto N_\ell)_{\ell \in \mathcal{L}}\}}^\depth
% &\defas&
% \bl
% \dlam \dRecord{z,\dRecord{p,\dhkr}}\,\dhk.\bl
% \Case \;z\; \{
% \begin{eqs}
% (\ell &\mapsto& \Let\;r=\Res^\depth\,\dhkr\;\In\; \\
% & & \Let\;\dRecord{fs,\dRecord{\vhret, \vhops}}\dcons k'=k\;\In\\
% & & \cps{N_{\ell}} \sapp (\sRecord{\reflect fs, \sRecord{\reflect \vhret, \reflect \vhops}} \scons \reflect \dhk'))_{\ell \in \mathcal{L}}\\
% y &\mapsto& \hforward((y, p, \dhkr), \dhk) \} \\
% \end{eqs} \\
% \el \\
% \el \\
% \hforward((y, p, \dhkr), \dhk) &\defas& \bl
% \Let\; \dRecord{fs, \dRecord{\vhret, \vhops}} \dcons \dhk' = \dhk \;\In \\
% \Let\; rk' = \dRecord{fs, \dRecord{\vhret, \vhops}} \dcons \dhkr\;\In\\
% \vhops \dapp \dRecord{y,\dRecord{p, rk'}} \dapp \dhk' \\
% \el
% \end{equations}
% Top-level program
% \begin{equations}
% \pcps{M} &=& \cps{M} \sapp (\sRecord{\snil, \sRecord{\reflect \dlam x\,\dhk. x, \reflect \dlam \dRecord{z,\dRecord{p,rk}}\,\dhk.\Absurd~z}} \scons \snil) \\
% \end{equations}
% \caption{Higher-Order Uncurried CPS Translation of $\HCalc$}
% \label{fig:cps-higher-order-uncurried}
% \end{figure}
In the previous sections, we have step-wise refined the initial
curried CPS translation for deep effect handlers
(Section~\ref{sec:first-order-curried-cps}) to be properly
tail-recursive (Section~\ref{sec:first-order-uncurried-cps}) and to
avoid yielding unnecessary dynamic administrative redexes for
resumptions (Section~\ref{sec:first-order-explicit-resump}).
%
% We now adapt our uncurried CPS translation to a higher-order one-pass
% CPS translation~\cite{DanvyF90} that partially evaluates
% administrative redexes at translation time.
% %
% Following Danvy and Nielsen~\cite{DanvyN03}, we adopt a two-level
% lambda calculus notation to distinguish between \emph{static} lambda
% abstraction and application in the meta language and \emph{dynamic}
% lambda abstraction and application in the target language.
% %
% The idea is that redexes marked as static are reduced as part of the
% translation (at compile time), whereas those marked as dynamic are
% reduced at runtime.
% %
% The CPS translation is given in
% Figure~\ref{fig:cps-higher-order-uncurried}.
We now adapt the translation of Section~\ref{sec:pure-as-stack} to a
higher-order one-pass CPS translation~\citep{DanvyF90} that partially
evaluates administrative redexes at translation time.
There is still one outstanding issue, namely, that the translation
yields static administrative redexes. In this section we will further
refine the CPS translation to eliminate all static administrative
redexes at translation time.
%
Specifically, we will adapt the translation to a higher-order one-pass
CPS translation~\citep{DanvyF90} that partially evaluates
administrative redexes at translation time.
%
Following \citet{DanvyN03}, we adopt a two-level lambda calculus
notation to distinguish between \emph{static} lambda abstraction and
application in the meta language and \emph{dynamic} lambda abstraction
and application in the target language:
{\color{blue}$\overline{\text{overline}}$} denotes a static syntax
constructor; {\color{red}$\underline{\text{underline}}$} denotes a
dynamic syntax constructor. The principal idea is that redexes marked
as static are reduced as part of the translation (at compile time),
whereas those marked as dynamic are reduced at runtime. To facilitate
this notation we write application in both calculi with an infix
``at'' symbol ($@$).
and application in the target language. To disambiguate syntax
constructors in the respective calculi we mark static constructors
with a {\color{blue}$\overline{\text{blue overline}}$}, whilst dynamic
constructors are marked with a
{\color{red}$\underline{\text{red underline}}$}. The principal idea is
that redexes marked as static are reduced as part of the translation
(at compile time), whereas those marked as dynamic are reduced at
runtime. To facilitate this notation we write application in both
calculi with an infix ``at'' symbol ($@$).
\paragraph{Static terms}
Static constructs are marked in
{\color{blue}$\overline{\text{static blue}}$}, and their redexes are
reduced as part of the translation (at compile time). We make use of
static lambda abstractions, pairs, and lists. Reflection of dynamic
language values into the static language is written as $\reflect V$.
%
We use $\shk$ for variables representing statically known
continuations (frame stacks), $\shf$ for variables representing pure
frame stacks, and $\chi$ for variables representing handlers.
As in the dynamic target language, we will represent continuations as
alternating lists of pure continuation functions and effect
continuation functions. To ease notation we are going make use of
pattern matching notation. The static meta language is generated by
the following productions.
%
We let $\sV, \sW$ range over meta language values,
$\sM$ range over static language expressions,
and $\sP, \sQ$ over static language patterns.
\begin{syntax}
\slab{Static patterns} &\sP \in \SPatCat &::=& \sks \mid \sk \scons \sP\\
\slab{Static values} & \sV, \sW \in \SValCat &::=& V \mid \reflect V \mid \sV \scons \sW \mid \slam \sP. \sM\\
\slab{Static computations} & \sM \in \SCompCat &::=& \sV \sapp \sW \mid \sV \dapp V \dapp W
\end{syntax}
%
We use list and record pattern matching in the meta language, which
behaves as follows:
The patterns comprise only static list deconstructing. We let $\sP$
range over static patterns.
%
\begin{displaymath}
\ba{@{~}l@{~}l}
&(\slam \sRecord{\sP, \sQ}.\,\sM) \sapp \sRecord{\sV, \sW}\\
= &(\slam \sP.\,\slam \sQ.\,\sM) \sapp \sV \sapp \sW\\
= &(\slam (\sP \scons \sQ).\,\sM) \sapp (\sV \scons \sW)
\ea
\end{displaymath}
The static values comprise dynamic values, reflected dynamic values,
static lists, and static lambda abstractions. We let $\sV, \sW$ range
over meta language values; by convention we shall use variables $\sk$
to denote statically known pure continuations, $\sh$ to denote
statically known effect continuations, and $\sks$ to denote statically
known continuations.
%
Static language values, comprised of reflected values, pairs, and list
conses, are reified as dynamic language values $\reify \sV$ by
induction on their structure:
We shall use $\sM$ to range over static computations, which comprise
static application and binary dynamic application of a static value to
two dynamic values.
%
Static computations are subject to the following equational axioms.
%
\begin{equations}
(\slam \sks. \sM) \sapp \sV &\defas& \sM[\sV/\sks]\\
(\slam \sk \scons \sks. \sM) \sapp (\sV \scons \sW) &\defas& (\slam \sks. \sM[\sV/\sk]) \sapp \sW\\
\end{equations}
%
The first equation is static $\beta$-equivalence, it states that
applying a static lambda abstraction with binder $\sks$ and body $\sM$
to a static value $\sV$ is equal to substituting $\sV$ for $\sks$ in
$\sM$. The second equation provides a means for applying a static
lambda abstraction to a static list component-wise.
%
\dhil{What about $\eta$-equivalence?}
Reflected static values are reified as dynamic language values
$\reify \sV$ by induction on their structure.
%
\[
\ba{@{}l@{\qquad}c@{\qquad}r}
\ba{@{}l@{\qquad}c}
\reify \reflect V \defas V
&\reify (\sV \scons \sW) \defas \reify \sV \dcons \reify \sW
&\reify \sRecord{\sV, \sW} \defas \dRecord{\reify \sV, \reify \sW}
\ea
\]
%
We assume the static language is pure and hence respects the usual
$\beta$ and $\eta$ equivalences.
\dhil{Need to spell out that static pattern matching may induce dynamic (administrative) reductions}
%
\paragraph{Higher-order translation}
The CPS translation is given in
%
The complete CPS translation is given in
Figure~\ref{fig:cps-higher-order-uncurried}. In essence, it is the
same as the CPS translation for deep and shallow handlers we described
in Section~\ref{sec:pure-as-stack}, albeit separated into static and
dynamic parts. A major difference that has a large cosmetic effect on
the presentation of the translation is that we maintain the invariant
that the statically known stack ($\sk$) always contains at least one
frame, consisting of a triple
$\sRecord{\reflect V_{fs}, \sRecord{\reflect V_{ret}, \reflect
V_{ops}}}$ of reflected dynamic pure frame stacks, return
handlers, and operation handlers. Maintaining this invariant ensures
that all translations are uniform in whether they appear statically
within the scope of a handler or not, and this simplifies our
correctness proof. To maintain the invariant, any place where a
dynamically known stack is passed in (as a continuation parameter
$k$), it is immediately decomposed using a dynamic language $\Let$ and
repackaged as a static value with reflected variable
names. Unfortunately, this does add some clutter to the translation
definition, as compared to the translations above. However, there is a
payoff in the removal of administrative reductions at run time.
same as the refined first-order uncurried CPS translation, although
the notation is slightly more involved due to the separation of static
and dynamic parts.
%
\dhil{Rewrite the above.}
The translation on values is mostly homomorphic on the syntax
constructors, except for $\lambda$- and $\Lambda$-abstractions which
are translated in the exact same way, because the target calculus has
no notion of types. As per usual continuation passing style practice,
the translation of a lambda abstraction yields a dynamic binary lambda
abstraction, where the second parameter is intended to be the
continuation parameter. However, the translation slightly diverge from
the usual practice in the translation of the body as the result of
$\cps{M}$ is applied statically, rather than dynamically, to the
(reflected) continuation parameter.
%
The reason is that the translation on computations is staged: for any
given computation term the translation yields a static function, which
can be applied to a static continuation to ultimately produce a
$\UCalc$-computation term. This staging is key to eliminating static
administrative redexes as any static function may perform static
computation by manipulating its provided static continuation, as is
for instance done in the translation of $\Let$.
%
\dhil{Spell out why this translation qualifies as `higher-order'.}
Let us again revisit the example from
Section~\ref{sec:first-order-curried-cps} to see that the higher-order
translation eliminates the static redex at translation time.
@@ -2811,27 +2727,24 @@ translation eliminates the static redex at translation time.
\end{equations}
%
In contrast with the previous translations, the image of this
translation admits only a single dynamic reduction.
translation admits only a single dynamic reduction (disregarding the
dynamic administrative reductions arising from continuation
construction and deconstruction).
\subsubsection{Correctness}
\label{sec:higher-order-cps-deep-handlers-correctness}
To prove the correctness of our CPS translation
(Theorem~\ref{thm:ho-simulation}), we first state several lemmas
describing how translated terms behave. In view of the invariant of
the translation that we described above, we state each of these lemmas
in terms of static continuation stacks where the shape of the top
element is always known statically, i.e., it is of the form
$\sRecord{\sV_{fs}, \sRecord{\sV_{ret},\sV_{ops}}} \scons
\sW$. Moreover, the static values $\sV_{fs}$, $\sV_{ret}$, and
$\sV_{ops}$ are all reflected dynamic terms (i.e., of the form
$\reflect V$). This fact is used implicitly in our proofs, which are
given in Appendix~\ref{sec:proofs}.
First, the higher-order CPS translation commutes with substitution:
\begin{lemma}[Substitution]\label{lem:subst}
We establish the correctness of the higher-order uncurried CPS
translation via a simulation result in style of
Plotkin~\cite{Plotkin75} (Theorem~\ref{thm:ho-simulation}). However,
before we can state and prove this result, we first several auxiliary
lemmas describing how translated terms behave. First, the higher-order
CPS translation commutes with substitution.
%
\begin{lemma}[Substitution]\label{lem:ho-cps-subst}
%
The CPS translation $\cps{-}$ commutes with substitution in value terms
The higher-order uncurried CPS translation commutes with
substitution in value terms
%
\[
\cps{W}[\cps{V}/x] = \cps{W[V/x]},
@@ -2840,33 +2753,55 @@ First, the higher-order CPS translation commutes with substitution:
and with substitution in computation terms
\[
(\cps{M} \sapp (\sk \scons \sh \scons \sks))[\cps{V}/x]
= \cps{M[V/x]} \sapp (\sk \scons \sh \scons \sks)[\cps{V}/x].
= \cps{M[V/x]} \sapp (\sk \scons \sh \scons \sks)[\cps{V}/x],
\]
%
and with substitution in handler definitions
%
\begin{equations}
(\cps{\hret} \sapp (\sk \scons \sh \scons \sks))[\cps{V}/x]
&=& \cps{\hret[V/x]} \sapp (\sk \scons \sh \scons \sks)[\cps{V}/x],\\
(\cps{\hops} \sapp (\sk \scons \sh \scons \sks))[\cps{V}/x]
&=& \cps{\hops[V/x]} \sapp (\sk \scons \sh \scons \sks)[\cps{V}/x].
\end{equations}
\end{lemma}
%
\begin{proof}
TODO\dots
Proof is by mutual induction on the structure of $W$, $M$, $\hret$,
and $\hops$.
\end{proof}
%
In order to reason about the behaviour of the \semlab{Op} rule,
which is defined in terms of an evaluation context, we extend the CPS
It follows as a corollary that top-level substitution is well-behaved.
%
\begin{corollary}[Top-level substitution]
\[
\pcps{M}[\cps{V}/x] = \pcps{M[V/x]}.
\]
\end{corollary}
%
\begin{proof}
Follows immediately by the definitions of $\pcps{-}$ and
Lemma~\ref{lem:ho-cps-subst}.
\end{proof}
%
In order to reason about the behaviour \semlab{Op} rule, which is
defined in terms of an evaluation context, we need to extend the CPS
translation to evaluation contexts.
%
\begin{displaymath}
\ba{@{}r@{~}c@{~}r@{~}l@{}}
\cps{[~]} &=& \slam \sks. &\sks \\
\cps{\Let\; x \revto \EC \;\In\; N} &=& \slam \sk \scons \sks.&\cps{\EC} \sapp ((\dlam x\,ks.\cps{N} \sapp (k \scons \reflect ks)) \scons \sks) \\
\cps{\Handle\; \EC \;\With\; H} &=& \slam \sks. &\cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \sks) \\
\ea
\end{displaymath}
\begin{equations}
\cps{-} &:& \EvalCat \to \SValCat\\
\cps{[~]} &\defas& \slam \sks.\sks \\
\cps{\Let\; x \revto \EC \;\In\; N} &\defas& \slam \sk \scons \sks.\cps{\EC} \sapp ((\dlam x\,ks.\cps{N} \sapp (k \scons \reflect ks)) \scons \sks) \\
\cps{\Handle\; \EC \;\With\; H} &\defas& \slam \sks. \cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \sks)
\end{equations}
%
The following lemma is the characteristic property of the CPS
translation on evaluation contexts.
%
This allows us to focus on the computation contained within
an evaluation context.
It provides a means for decomposing an evaluation context, such that
we can focus on the computation contained within the evaluation
context.
%
\begin{lemma}[Decomposition]
\label{lem:decomposition}
%
@@ -2877,35 +2812,41 @@ an evaluation context.
\end{lemma}
%
\begin{proof}
TODO\dots
Proof by structural induction on the evaluation context $\EC$.
\end{proof}
%
Though we have eliminated the static administrative redexes, we are
still left with one form of administrative redex that cannot be
eliminated statically because it only appears at run-time. These arise
from pattern matching against a reified stack of continuations and are
given by the $\usemlab{SplitList}$ rule.
Even though we have eliminated the static administrative redexes, we
still need to account for the dynamic administrative redexes that
arise from pattern matching against a reified continuation. To
properly account for these administrative redexes it is convenient to
treat list pattern matching as a primitive in $\UCalc$, therefore we
introduce a new reduction rule $\usemlab{SplitList}$ in $\UCalc$.
%
\begin{reductions}
\usemlab{SplitList} & \Let\; (k \dcons ks) = V \dcons W \;\In\; M &\reducesto& M[V/k, W/ks] \\
\end{reductions}
%
This is isomorphic to the \usemlab{Split} rule, but we now treat lists
and \usemlab{SplitList} as distinct from pairs, unit, and
\usemlab{Split} in the higher-order translation so that we can
properly account for administrative reduction.
Note this rule is isomorphic to the \usemlab{Split} rule with lists
encoded as right nested pairs using unit to denote nil.
%
We write $\areducesto$ for the compatible closure of
\usemlab{SplitList}.
By definition, $\reify \reflect V = V$, but we also need to reason
about the inverse composition.
We also need to be able to reason about the computational content of
reflection after reification. By definition we have that
$\reify \reflect V = V$, the following lemma lets us reason about the
inverse composition.
%
\begin{lemma}[Reflect after reify]
\label{lem:reflect-after-reify}
$\cps{M} \sapp (V_1 \scons \dots V_n \scons \reflect \reify \VS)
\areducesto^*
\cps{M} \sapp (V_1 \scons \dots V_n \scons \VS)$
%
Reflection after reification may give rise to dynamic administrative
reductions, i.e.
%
\[
\cps{M} \sapp (V_1 \scons \dots V_n \scons \reflect \reify \sV)
\areducesto^* \cps{M} \sapp (V_1 \scons \dots V_n \scons \sV)
\]
\end{lemma}
%
\begin{proof}
@@ -2916,48 +2857,45 @@ We next observe that the CPS translation simulates forwarding.
%
\begin{lemma}[Forwarding]
\label{lem:forwarding}
If $\ell \notin dom(H_1)$ then:
If $\ell \notin dom(H_1)$ then
%
\begin{displaymath}
\cps{\hops_1} \dapp \ell\,\Record{U, V} \dapp (V_2 \dcons \cps{\hops_2} \dcons W)
\[
\cps{\hops_1} \dapp \Record{\ell,\Record{U, V}} \dapp (V_2 \dcons \cps{\hops_2} \dcons W)
\reducesto^+
\cps{\hops_2} \dapp \ell\,\Record{U, \cps{\hops_2} \dcons V_2 \dcons V} \dapp W
\end{displaymath}
\cps{\hops_2} \dapp \Record{\ell,\Record{U, \cps{\hops_2} \dcons V_2 \dcons V}} \dapp W
\]
%
\end{lemma}
%
\begin{proof}
TODO\dots
Proof by direct calculation.
\end{proof}
%
Now we show that the translation simulates the \semlab{Op}
rule.
%
\begin{lemma}[Handling]
\label{lem:handle-op}
If $\ell \notin BL(\EC)$ and $\hell = \{\ell\,p\,r \mapsto N_\ell\}$ then:
If $\ell \notin BL(\EC)$ and $\hell = \{\ell\,p\,r \mapsto N_\ell\}$ then
%
\begin{displaymath}
\bl
\cps{\Do\;\ell\;V} \sapp (\cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \VS)) \reducesto^+\areducesto^* \\
\cps{\Do\;\ell\;V} \sapp (\cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \sV)) \reducesto^+\areducesto^* \\
\quad
(\cps{N_\ell} \sapp \VS)[\cps{V}/p, (\lambda y\,ks.\cps{\Return\;y} \sapp (\cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \reflect ks)))/r] \\
(\cps{N_\ell} \sapp \sV)[\cps{V}/p, (\lambda y\,ks.\cps{\Return\;y} \sapp (\cps{\EC} \sapp (\cps{\hret} \scons \cps{\hops} \scons \reflect ks)))/r] \\
\el
\end{displaymath}
\end{lemma}
%
%
\begin{proof}
Follows from Lemmas~\ref{lem:decomposition},
\ref{lem:reflect-after-reify}, and \ref{lem:forwarding}.
\end{proof}
%
We now turn to our main result which is a simulation result in style
of Plotkin~\cite{Plotkin75}. The theorem shows that the only extra
behaviour exhibited by a translated term is the bureaucracy of
deconstructing the continuation stack.
%
Finally, we have the ingredients to state and prove the simulation
result. The following theorem shows that the only extra behaviour
exhibited by a translated term is the bureaucracy of deconstructing
the continuation stack.
%
\begin{theorem}[Simulation]
\label{thm:ho-simulation}
@@ -2965,25 +2903,23 @@ If $M \reducesto N$ then $\pcps{M} \reducesto^+ \areducesto^* \pcps{N}$.
\end{theorem}
%
\begin{proof}
TODO\dots
Proof is by case analysis on the reduction relation using Lemmas
\ref{lem:decomposition}--\ref{lem:handle-op}. The \semlab{Op} case
follows from Lemma~\ref{lem:handle-op}.
\end{proof}
%
The proof is by case analysis on the reduction relation using Lemmas
\ref{lem:decomposition}--\ref{lem:handle-op}. The \semlab{Op}
case follows from Lemma~\ref{lem:handle-op}.
In common with most CPS translations, full abstraction does not
hold. However, as our semantics is deterministic it is straightforward
to show a backward simulation result.
%
\begin{corollary}[Backwards simulation]
If $\pcps{M} \reducesto^+ \areducesto^* V$ then there exists $W$ such that
$M \reducesto^* W$ and $\pcps{W} = V$.
\end{corollary}
%
\begin{proof}
TODO\dots
\end{proof}
% In common with most CPS translations, full abstraction does not
% hold. However, as our semantics is deterministic it is straightforward
% to show a backward simulation result.
% %
% \begin{corollary}[Backwards simulation]
% If $\pcps{M} \reducesto^+ \areducesto^* V$ then there exists $W$ such that
% $M \reducesto^* W$ and $\pcps{W} = V$.
% \end{corollary}
% %
% \begin{proof}
% TODO\dots
% \end{proof}
%
\chapter{Abstract machine semantics}