From a8f9da95c22c7bf21df81404cd9c3f5b2e73e6a7 Mon Sep 17 00:00:00 2001
From: Chris Barker
Date: Sun, 26 Sep 2010 17:59:24 -0400
Subject: [PATCH] edits
---
lambda_evaluator.mdwn | 2 +-
week3.mdwn | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 59 insertions(+), 1 deletion(-)
diff --git a/lambda_evaluator.mdwn b/lambda_evaluator.mdwn
index 26de7109..4a5b9dc0 100644
--- a/lambda_evaluator.mdwn
+++ b/lambda_evaluator.mdwn
@@ -85,7 +85,7 @@ Object.prototype.error = function (message, t) {
// string = expr.to_string() + "\n\n~~>\n\n";
string = '';
eta = document.getElementById('ETA').checked;
- string = string + reduce(expr, eta, 1).to_string();
+ string = string + reduce(expr, eta, false).to_string();
} catch (e) {
string = JSON.stringify(e, ['name', 'message', 'from', 'to', 'key',
'value', 'arity', 'first', 'second', 'third', 'fourth'], 4);
diff --git a/week3.mdwn b/week3.mdwn
index 892a055a..45aa5b3d 100644
--- a/week3.mdwn
+++ b/week3.mdwn
@@ -268,6 +268,8 @@ In the lambda calculus, we say a fixed point of an expression `f` is any formula
What is a fixed point of the identity combinator I?
+What is a fixed point of the false combinator, KI?
+
It's a theorem of the lambda calculus that every formula has a fixed point. In fact, it will have infinitely many, non-equivalent fixed points. And we don't just know that they exist: for any given formula, we can name many of them.
Yes, even the formula that you're using the define the successor function will have a fixed point. Isn't that weird? Think about how it might be true.
@@ -427,3 +429,59 @@ then this is a fixed-point combinator:
L L L L L L L L L L L L L L L L L L L L L L L L L L
+##Watching Y in action##
+
+For those of you who like to watch ultra slow-mo movies of bullets
+piercing apples, here's a stepwise computation of the application of a
+recursive function. We'll use a function `sink`, which takes one
+argument. If the argument is boolean true (i.e., `\x y.x`), it
+returns itself (a copy of `sink`); if the argument is boolean false,
+it returns I. That is, we want the following behavior:
+
+ sink false ~~> I
+ sink true false ~~> I
+ sink true true false ~~> I
+ sink true true true false ~~> I
+
+So we make sink = Y (\fb.bfI):
+
+ 1. sink false
+ 2. Y (\fb.bfI) false
+ 3. (\f. (\h. f (h h)) (\h. f (h h))) (\fb.bfI) false
+ 4. (\h. [\fb.bfI] (h h)) (\h. [\fb.bfI] (h h)) false
+ 5. [\fb.bfI] ((\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))) false
+ 6. (\b.b[(\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))]I) false
+ 7. false [(\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))] I
+ --------------------------------------------
+ 8. I
+
+So far so good. The crucial thing to note is that as long as we
+always reduce the outermost redex first, we never have to get around
+to computing the underlined redex: because `false` ignores its first
+argument, we can throw it away unreduced.
+
+Now we try the next most complex example:
+
+ 1. sink true false
+ 2. Y (\fb.bfI) true false
+ 3. (\f. (\h. f (h h)) (\h. f (h h))) (\fb.bfI) true false
+ 4. (\h. [\fb.bfI] (h h)) (\h. [\fb.bfI] (h h)) true false
+ 5. [\fb.bfI] ((\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))) true false
+ 6. (\b.b[(\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))]I) true false
+ 7. true [(\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))] I false
+ 8. [(\h. [\fb.bsI] (h h))(\h. [\fb.bsI] (h h))] false
+
+We've now arrived at line (4) of the first computation, so the result
+is again I.
+
+You should be able to see that `sink` will consume as many `true`s as
+we throw at it, then turn into the identity function after it
+encounters the first `false`.
+
+The key to the recursion is that, thanks to Y, the definition of
+`sink` contains within it the ability to fully regenerate itself as
+many times as is necessary. The key to *ending* the recursion is that
+the behavior of `sink` is sensitive to the nature of the input: if the
+input is the magic function `false`, the self-regeneration machinery
+will be discarded, and the recursion will stop.
+
--
2.11.0