Merge "veyron/services/store/service: Remove Transaction and Watcher interfaces."
diff --git a/examples/mdb/Makefile b/examples/mdb/Makefile
new file mode 100644
index 0000000..aaa45dc
--- /dev/null
+++ b/examples/mdb/Makefile
@@ -0,0 +1,7 @@
+build:
+	${VEYRON_ROOT}/veyron/scripts/build/go install veyron/... veyron2/...
+
+run: build
+	./run.sh
+
+.PHONY: build run
diff --git a/examples/mdb/README.md b/examples/mdb/README.md
new file mode 100644
index 0000000..49cfc8d
--- /dev/null
+++ b/examples/mdb/README.md
@@ -0,0 +1,12 @@
+# mdb example
+
+A simple "movie database" example of store usage.
+
+## How to run
+
+Simply run `make run`. Under the hood, this generates a self-signed identity,
+starts a mounttable daemon, starts a store daemon, and initializes the store
+with mdb data and templates.
+
+Once everything's up and running, visit the store daemon's viewer in your
+browser (http://localhost:5000 by default) to explore the mdb data.
diff --git a/examples/storage/mdb/mdb_init/main.go b/examples/mdb/mdb_init/main.go
similarity index 95%
rename from examples/storage/mdb/mdb_init/main.go
rename to examples/mdb/mdb_init/main.go
index 28f8166..022a362 100644
--- a/examples/storage/mdb/mdb_init/main.go
+++ b/examples/mdb/mdb_init/main.go
@@ -22,7 +22,7 @@
 	"strings"
 	"time"
 
-	"veyron/examples/storage/mdb/schema"
+	"veyron/examples/mdb/schema"
 	"veyron2/naming"
 	"veyron2/rt"
 	"veyron2/storage"
@@ -353,7 +353,7 @@
 	return nil
 }
 
-// processFile stores the contens of the file to the store.
+// processFile stores the contents of the file in the store.
 func (st *state) processFile(path, name string) error {
 	switch filepath.Ext(path) {
 	case ".json":
@@ -375,11 +375,6 @@
 // main reads all the files in the templates directory and adds them to the
 // store.
 func main() {
-	// The client's identity needs to match the Admin ACLs at the empty
-	// store (since only the admin can put data).  The identity here
-	// matches with that used for server.ServerConfig.Admin in
-	// mdb_stored/main.go.  An alternative would be to relax the ACLs on
-	// the store.
 	rt.Init()
 
 	vlog.Infof("Binding to store on %s", storeName)
@@ -389,11 +384,13 @@
 	}
 	state := newState(st)
 
-	// Store all templates.
+	// Store all data and templates.
 	filepath.Walk(*templatesDir, func(path string, _ os.FileInfo, _ error) error {
 		err := state.processFile(path, strings.TrimPrefix(path, *templatesDir))
 		if err != nil {
-			vlog.Infof("%s: %s", path, err)
+			vlog.Fatalf("Error processing %s: %s", path, err)
+		} else {
+			vlog.Infof("Processed %s", path)
 		}
 		return err
 	})
diff --git a/examples/mdb/run.sh b/examples/mdb/run.sh
new file mode 100755
index 0000000..aa0d811
--- /dev/null
+++ b/examples/mdb/run.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -e
+set -u
+
+VEYRON_BIN=${VEYRON_ROOT}/veyron/go/bin
+ID_FILE=/var/tmp/id
+
+# Generate a self-signed identity.
+${VEYRON_BIN}/identity generate > ${ID_FILE}
+
+# Start the mounttable daemon.
+${VEYRON_BIN}/mounttabled --address=':8100' &
+
+export VEYRON_IDENTITY=${ID_FILE}
+export NAMESPACE_ROOT='/127.0.0.1:8100'
+
+sleep 1  # Wait for mounttabled to start up.
+
+# Start the store daemon.
+rm -rf /var/tmp/veyron_store.db
+${VEYRON_BIN}/stored &
+
+sleep 1  # Wait for stored to start up.
+
+# Initialize the store with mdb data and templates.
+${VEYRON_BIN}/mdb_init --load-all
+
+echo
+echo 'Visit http://localhost:5000 to browse the mdb data.'
+echo 'Hit Ctrl-C to kill all running services.'
+
+trap 'kill $(jobs -pr)' INT TERM EXIT
+wait
diff --git a/examples/storage/mdb/schema/init.go b/examples/mdb/schema/init.go
similarity index 100%
rename from examples/storage/mdb/schema/init.go
rename to examples/mdb/schema/init.go
diff --git a/examples/storage/mdb/schema/schema.vdl b/examples/mdb/schema/schema.vdl
similarity index 100%
rename from examples/storage/mdb/schema/schema.vdl
rename to examples/mdb/schema/schema.vdl
diff --git a/examples/storage/mdb/schema/schema.vdl.go b/examples/mdb/schema/schema.vdl.go
similarity index 100%
rename from examples/storage/mdb/schema/schema.vdl.go
rename to examples/mdb/schema/schema.vdl.go
diff --git a/examples/storage/mdb/templates/contents.json b/examples/mdb/templates/contents.json
similarity index 100%
rename from examples/storage/mdb/templates/contents.json
rename to examples/mdb/templates/contents.json
diff --git a/examples/storage/mdb/templates/css/movie.css b/examples/mdb/templates/css/movie.css
similarity index 100%
rename from examples/storage/mdb/templates/css/movie.css
rename to examples/mdb/templates/css/movie.css
diff --git a/examples/storage/mdb/templates/veyron/examples/storage/mdb/schema/Dir.tmpl b/examples/mdb/templates/veyron/examples/mdb/schema/Dir.tmpl
similarity index 100%
rename from examples/storage/mdb/templates/veyron/examples/storage/mdb/schema/Dir.tmpl
rename to examples/mdb/templates/veyron/examples/mdb/schema/Dir.tmpl
diff --git a/examples/storage/mdb/templates/veyron/examples/storage/mdb/schema/Movie.tmpl b/examples/mdb/templates/veyron/examples/mdb/schema/Movie.tmpl
similarity index 100%
rename from examples/storage/mdb/templates/veyron/examples/storage/mdb/schema/Movie.tmpl
rename to examples/mdb/templates/veyron/examples/mdb/schema/Movie.tmpl
diff --git a/examples/storage/mdb/templates/veyron/examples/storage/mdb/schema/Person.tmpl b/examples/mdb/templates/veyron/examples/mdb/schema/Person.tmpl
similarity index 100%
rename from examples/storage/mdb/templates/veyron/examples/storage/mdb/schema/Person.tmpl
rename to examples/mdb/templates/veyron/examples/mdb/schema/Person.tmpl
diff --git a/examples/storage/mdb/mdbd/main.go b/examples/storage/mdb/mdbd/main.go
deleted file mode 100644
index 1cd2735..0000000
--- a/examples/storage/mdb/mdbd/main.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// mdbd provides a UI for the mdb/schema movie database.
-//
-// The main purpose of this server is to register the types for the mdb/schema
-// so that the html/templates work correctly.
-package main
-
-import (
-	"flag"
-	"fmt"
-	"log"
-	"os"
-	"os/user"
-
-	// Register the mdb/schema types.
-	_ "veyron/examples/storage/mdb/schema"
-	"veyron/examples/storage/viewer"
-	"veyron2/rt"
-	"veyron2/storage/vstore"
-)
-
-var (
-	storeName string
-	port      = flag.Int("port", 10000, "IPV4 port number to serve")
-)
-
-func init() {
-	username := "unknown"
-	if u, err := user.Current(); err == nil {
-		username = u.Username
-	}
-	hostname := "unknown"
-	if h, err := os.Hostname(); err == nil {
-		hostname = h
-	}
-	dir := "global/vstore/" + hostname + "/" + username
-	flag.StringVar(&storeName, "store", dir, "Name of the Veyron store")
-}
-
-func main() {
-	rt.Init()
-
-	log.Printf("Binding to store on %s", storeName)
-	st, err := vstore.New(storeName)
-	if err != nil {
-		log.Fatalf("Can't connect to store: %s: %s", storeName, err)
-	}
-
-	viewer.ListenAndServe(fmt.Sprintf(":%d", *port), st)
-}
diff --git a/examples/todos/todos_appd/main.go b/examples/todos/todos_appd/main.go
index ebc1192..3c6c2b7 100644
--- a/examples/todos/todos_appd/main.go
+++ b/examples/todos/todos_appd/main.go
@@ -1,6 +1,6 @@
 // todos_appd is a web application backed by a Veyron store.
 //
-// For now, it simply displays the raw contents of the store.
+// It doesn't work yet, but it will soon. :)
 
 // TODO(sadovsky): Implement actual app, using Veyron {store,query,watch,sync}
 // over veyron.js.
@@ -11,22 +11,18 @@
 	"fmt"
 	"html/template"
 	"io"
-	"log"
 	"net/http"
 	"os"
 	"os/user"
 	"path"
 
-	"veyron/examples/storage/viewer"
 	_ "veyron/examples/todos/schema" // Register the todos/schema types.
 	"veyron2/rt"
-	"veyron2/storage/vstore"
 )
 
 var (
 	storeName string
-	port      = flag.Int("port", 10000, "IPV4 port number to serve")
-	useViewer = flag.Bool("useViewer", false, "If true, serve viewer instead")
+	port      = flag.Int("port", 10000, "IPV4 port to serve")
 )
 
 var rootDir = path.Join(
@@ -47,16 +43,6 @@
 	flag.StringVar(&storeName, "store", dir, "Name of the Veyron store")
 }
 
-func serveViewer() {
-	log.Printf("Binding to store on %s", storeName)
-	st, err := vstore.New(storeName)
-	if err != nil {
-		log.Fatalf("Can't connect to store: %s: %s", storeName, err)
-	}
-
-	viewer.ListenAndServe(fmt.Sprintf(":%d", *port), st)
-}
-
 func renderTemplate(w io.Writer, basename string, data interface{}) {
 	filename := path.Join(rootDir, "templates", basename)
 	t, err := template.ParseFiles(filename)
@@ -84,13 +70,10 @@
 func main() {
 	rt.Init()
 
-	if *useViewer {
-		serveViewer()
-	} else {
-		http.HandleFunc("/", wrap(handleHome))
-		http.Handle("/css/", http.FileServer(http.Dir(rootDir)))
-		http.Handle("/js/", http.FileServer(http.Dir(rootDir)))
-		fmt.Printf("Server running at http://localhost:%d\n", *port)
-		http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)
-	}
+	http.HandleFunc("/", wrap(handleHome))
+	http.Handle("/css/", http.FileServer(http.Dir(rootDir)))
+	http.Handle("/js/", http.FileServer(http.Dir(rootDir)))
+
+	fmt.Printf("Server running at http://localhost:%d\n", *port)
+	http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)
 }
diff --git a/examples/todos/todos_init/main.go b/examples/todos/todos_init/main.go
index 7013a0c..e163461 100644
--- a/examples/todos/todos_init/main.go
+++ b/examples/todos/todos_init/main.go
@@ -1,12 +1,4 @@
-// todos_init is a tool to initialize the store with an initial database. This
-// is really for demo purposes; in a real database, the contents would be
-// persistent.
-//
-// The data is loaded from a JSON file, todos_init/data.json.
-//
-// Since JSON doesn't support all of the store types, there is a translation
-// phase, where the contents are loaded into a string form, then converted to
-// the todos/schema schema.
+// todos_init reads data.json and populates the store with initial data.
 package main
 
 import (
@@ -42,7 +34,6 @@
 	if h, err := os.Hostname(); err == nil {
 		hostname = h
 	}
-	// TODO(sadovsky): Change this to be the correct veyron2 path.
 	dir := "global/vstore/" + hostname + "/" + username
 	flag.StringVar(&storeName, "store", dir, "Name of the Veyron store")
 }
@@ -61,7 +52,7 @@
 // state is the initial store state.
 type state struct {
 	store storage.Store
-	tname string // Current transaction name; nil if there's no transaction.
+	tname string // Current transaction name; empty if there's no transaction.
 }
 
 // newState returns a fresh state.
@@ -87,9 +78,11 @@
 	for i, _ := range l {
 		prefix := filepath.Join(l[:i]...)
 		o := st.store.BindObject(naming.Join(st.tname, prefix))
-		if _, err := o.Get(rt.R().TODOContext()); err != nil {
+		if exist, err := o.Exists(rt.R().TODOContext()); err != nil {
+			vlog.Infof("Error checking existence at %q: %s", prefix, err)
+		} else if !exist {
 			if _, err := o.Put(rt.R().TODOContext(), &schema.Dir{}); err != nil {
-				vlog.Errorf("Error creating parent %q: %s", prefix, err)
+				vlog.Infof("Error creating parent %q: %s", prefix, err)
 			}
 		}
 	}
@@ -171,9 +164,8 @@
 // main reads the data JSON file and populates the store.
 func main() {
 	// The client's identity needs to match the Admin ACLs at the empty store
-	// (since only the admin can put data). The identity here matches with that
-	// used for server.ServerConfig.Admin in todos_stored/main.go. An alternative
-	// would be to relax the ACLs on the store.
+	// (since only the admin can put data).
+	// TODO(sadovsky): What identity should we pass here?
 	rt.Init(veyron2.RuntimeID(security.FakePrivateID("anonymous")))
 
 	vlog.Infof("Binding to store on %s", storeName)
diff --git a/runtimes/google/rt/signal.go b/runtimes/google/rt/signal.go
index bbd5e55..d0ef7d4 100644
--- a/runtimes/google/rt/signal.go
+++ b/runtimes/google/rt/signal.go
@@ -8,6 +8,10 @@
 )
 
 func (r *vrt) initSignalHandling() {
+	// TODO(caprita): Given that our node manager implementation is to
+	// kill all child apps when the node manager dies, we should
+	// enable SIGHUP on apps by default.
+
 	// Automatically handle SIGHUP to prevent applications started as
 	// daemons from being killed.  The developer can choose to still listen
 	// on SIGHUP and take a different action if desired.
diff --git a/runtimes/google/vsync/dag.go b/runtimes/google/vsync/dag.go
index 7f97c99..9e860d9 100644
--- a/runtimes/google/vsync/dag.go
+++ b/runtimes/google/vsync/dag.go
@@ -72,8 +72,9 @@
 // and three in-memory (ephemeral) maps (graft, txSet, txGC):
 //   * nodes: one entry per (object, version) with references to the
 //            parent node(s) it is derived from, a reference to the
-//            log record identifying that change, and a reference to
-//            its transaction set (or NoTxID if none)
+//            log record identifying that change, a reference to its
+//            transaction set (or NoTxID if none), and a boolean to
+//            indicate whether this change was a deletion of the object.
 //   * heads: one entry per object pointing to its most recent version
 //            in the nodes table
 //   * trans: one entry per transaction ID containing the set of objects
@@ -137,6 +138,7 @@
 	Parents []storage.Version // references to parent versions
 	Logrec  string            // reference to log record change
 	TxID    TxID              // ID of a transaction set
+	Deleted bool              // true if the change was a delete
 }
 
 type graftInfo struct {
@@ -314,7 +316,7 @@
 //
 // If the transaction ID is set to NoTxID, this node is not part of a transaction.
 // Otherwise, track its membership in the given transaction ID.
-func (d *dag) addNode(oid storage.ID, version storage.Version, remote bool,
+func (d *dag) addNode(oid storage.ID, version storage.Version, remote, deleted bool,
 	parents []storage.Version, logrec string, tid TxID) error {
 	if d.store == nil {
 		return errors.New("invalid DAG")
@@ -413,7 +415,7 @@
 	}
 
 	// Insert the new node in the kvdb.
-	node := &dagNode{Level: level, Parents: parents, Logrec: logrec, TxID: tid}
+	node := &dagNode{Level: level, Parents: parents, Logrec: logrec, TxID: tid, Deleted: deleted}
 	return d.setNode(oid, version, node)
 }
 
@@ -426,6 +428,83 @@
 	return d.nodes.hasKey(key)
 }
 
+// childOf returns true if the node is a child of the parent version.
+// It means that the parent version is found in the node's Parents array.
+func childOf(node *dagNode, parent storage.Version) bool {
+	if node == nil || parent == storage.NoVersion {
+		return false
+	}
+	for _, pver := range node.Parents {
+		if pver == parent {
+			return true
+		}
+	}
+	return false
+}
+
+// hasParent returns true if the node (oid, version) exists in the DAG DB
+// and has (oid, parent) as a parent node. Either "version" or "parent"
+// could be NoVersion (zero).  Thus the 4 cases:
+// 1- "version" and "parent" are _not_ NoVersion: return true if both nodes
+//    exist and have a parent/child relationship.
+// 2- Only "parent" is NoVersion: return true if (oid, version) exists and
+//    either it has no parents (root of the DAG) or at least one of its
+//    parent nodes is a deleted node (i.e. has its "Deleted" flag set true).
+// 3- Only "version" is NoVersion: return true if (oid, parent) exists and
+//    at least one of its children is a deleted node.
+// 4- Both "version" and "parent" are NoVersion: return false
+func (d *dag) hasParent(oid storage.ID, version, parent storage.Version) bool {
+	if d.store == nil {
+		return false
+	}
+
+	switch {
+	case version != storage.NoVersion && parent != storage.NoVersion:
+		if !d.hasNode(oid, parent) {
+			return false
+		}
+		node, err := d.getNode(oid, version)
+		if err != nil {
+			return false
+		}
+		return childOf(node, parent)
+
+	case version != storage.NoVersion && parent == storage.NoVersion:
+		node, err := d.getNode(oid, version)
+		if err != nil {
+			return false
+		}
+		if node.Parents == nil {
+			return true
+		}
+		for _, pver := range node.Parents {
+			if pnode, err := d.getNode(oid, pver); err == nil && pnode.Deleted {
+				return true
+			}
+		}
+		return false
+
+	case version == storage.NoVersion && parent != storage.NoVersion:
+		if !d.hasNode(oid, parent) {
+			return false
+		}
+		head, err := d.getHead(oid)
+		if err != nil {
+			return false
+		}
+		found := false
+		d.ancestorIter(oid, []storage.Version{head}, func(oid storage.ID, v storage.Version, node *dagNode) error {
+			if node.Deleted && childOf(node, parent) {
+				found = true
+				return errors.New("found it -- stop the iteration")
+			}
+			return nil
+		})
+		return found
+	}
+	return false
+}
+
 // addParent adds to the DAG node (oid, version) linkage to this parent node.
 // If the parent linkage is due to a local change (from conflict resolution
 // by blessing an existing version), no need to update the grafting structure.
diff --git a/runtimes/google/vsync/dag_test.go b/runtimes/google/vsync/dag_test.go
index a36bc4c..0a50cdf 100644
--- a/runtimes/google/vsync/dag_test.go
+++ b/runtimes/google/vsync/dag_test.go
@@ -99,7 +99,7 @@
 		t.Error(err)
 	}
 
-	err = dag.addNode(oid, 4, false, []storage.Version{2, 3}, "foobar", NoTxID)
+	err = dag.addNode(oid, 4, false, false, []storage.Version{2, 3}, "foobar", NoTxID)
 	if err == nil || err.Error() != "invalid DAG" {
 		t.Errorf("addNode() did not fail on a closed DAG: %v", err)
 	}
@@ -199,6 +199,9 @@
 	if dag.hasNode(oid, 4) {
 		t.Errorf("hasNode() found an object on a closed DAG")
 	}
+	if dag.hasParent(oid, 3, 2) {
+		t.Errorf("hasParent() found an parent/child relationship on a closed DAG")
+	}
 	if pmap := dag.getParentMap(oid); len(pmap) != 0 {
 		t.Errorf("getParentMap() found data on a closed DAG: %v", pmap)
 	}
@@ -506,26 +509,26 @@
 	}
 
 	// Make sure an existing node cannot be added again.
-	if err = dag.addNode(oid, 1, false, []storage.Version{0, 2}, "foobar", NoTxID); err == nil {
+	if err = dag.addNode(oid, 1, false, false, []storage.Version{0, 2}, "foobar", NoTxID); err == nil {
 		t.Errorf("addNode() did not fail when given an existing node")
 	}
 
 	// Make sure a new node cannot have more than 2 parents.
-	if err = dag.addNode(oid, 3, false, []storage.Version{0, 1, 2}, "foobar", NoTxID); err == nil {
+	if err = dag.addNode(oid, 3, false, false, []storage.Version{0, 1, 2}, "foobar", NoTxID); err == nil {
 		t.Errorf("addNode() did not fail when given 3 parents")
 	}
 
 	// Make sure a new node cannot have an invalid parent.
-	if err = dag.addNode(oid, 3, false, []storage.Version{0, 555}, "foobar", NoTxID); err == nil {
+	if err = dag.addNode(oid, 3, false, false, []storage.Version{0, 555}, "foobar", NoTxID); err == nil {
 		t.Errorf("addNode() did not fail when using an invalid parent")
 	}
 
 	// Make sure a new root node (no parents) cannot be added once a root exists.
 	// For the parents array, check both the "nil" and the empty array as input.
-	if err = dag.addNode(oid, 6789, false, nil, "foobar", NoTxID); err == nil {
+	if err = dag.addNode(oid, 6789, false, false, nil, "foobar", NoTxID); err == nil {
 		t.Errorf("Adding a 2nd root node (nil parents) for object %d in DAG file %s did not fail", oid, dagfile)
 	}
-	if err = dag.addNode(oid, 6789, false, []storage.Version{}, "foobar", NoTxID); err == nil {
+	if err = dag.addNode(oid, 6789, false, false, []storage.Version{}, "foobar", NoTxID); err == nil {
 		t.Errorf("Adding a 2nd root node (empty parents) for object %d in DAG file %s did not fail", oid, dagfile)
 	}
 
@@ -1599,10 +1602,10 @@
 		t.Errorf("Transactions map for Tx ID %v has length %d instead of 0 in DAG file %s", tid_1, n, dagfile)
 	}
 
-	if err := dag.addNode(oid_a, 3, false, []storage.Version{2}, "logrec-a-03", tid_1); err != nil {
+	if err := dag.addNode(oid_a, 3, false, false, []storage.Version{2}, "logrec-a-03", tid_1); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
 	}
-	if err := dag.addNode(oid_b, 3, false, []storage.Version{2}, "logrec-b-03", tid_1); err != nil {
+	if err := dag.addNode(oid_b, 3, false, false, []storage.Version{2}, "logrec-b-03", tid_1); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_1, dagfile, err)
 	}
 
@@ -1620,7 +1623,7 @@
 		t.Errorf("Transactions map for Tx ID %v has length %d instead of 0 in DAG file %s", tid_2, n, dagfile)
 	}
 
-	if err := dag.addNode(oid_c, 2, false, []storage.Version{1}, "logrec-c-02", tid_2); err != nil {
+	if err := dag.addNode(oid_c, 2, false, false, []storage.Version{1}, "logrec-c-02", tid_2); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_c, tid_2, dagfile, err)
 	}
 
@@ -1651,7 +1654,7 @@
 		bad_tid++
 	}
 
-	if err := dag.addNode(oid_c, 3, false, []storage.Version{2}, "logrec-c-03", bad_tid); err == nil {
+	if err := dag.addNode(oid_c, 3, false, false, []storage.Version{2}, "logrec-c-03", bad_tid); err == nil {
 		t.Errorf("addNode() did not fail on object %d for a bad Tx ID %v in DAG file %s", oid_c, bad_tid, dagfile)
 	}
 	if err := dag.addNodeTxEnd(bad_tid); err == nil {
@@ -1783,10 +1786,10 @@
 	if tid_1 == NoTxID {
 		t.Fatal("Cannot start 1st DAG addNode() transaction")
 	}
-	if err := dag.addNode(oid_a, 3, false, []storage.Version{2}, "logrec-a-03", tid_1); err != nil {
+	if err := dag.addNode(oid_a, 3, false, false, []storage.Version{2}, "logrec-a-03", tid_1); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
 	}
-	if err := dag.addNode(oid_b, 3, false, []storage.Version{2}, "logrec-b-03", tid_1); err != nil {
+	if err := dag.addNode(oid_b, 3, false, false, []storage.Version{2}, "logrec-b-03", tid_1); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_1, dagfile, err)
 	}
 	if err := dag.addNodeTxEnd(tid_1); err != nil {
@@ -1797,20 +1800,20 @@
 	if tid_2 == NoTxID {
 		t.Fatal("Cannot start 2nd DAG addNode() transaction")
 	}
-	if err := dag.addNode(oid_b, 4, false, []storage.Version{3}, "logrec-b-04", tid_2); err != nil {
+	if err := dag.addNode(oid_b, 4, false, false, []storage.Version{3}, "logrec-b-04", tid_2); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_2, dagfile, err)
 	}
-	if err := dag.addNode(oid_c, 2, false, []storage.Version{1}, "logrec-c-02", tid_2); err != nil {
+	if err := dag.addNode(oid_c, 2, false, false, []storage.Version{1}, "logrec-c-02", tid_2); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_c, tid_2, dagfile, err)
 	}
 	if err := dag.addNodeTxEnd(tid_2); err != nil {
 		t.Errorf("Cannot addNodeTxEnd() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
 	}
 
-	if err := dag.addNode(oid_a, 4, false, []storage.Version{3}, "logrec-a-04", NoTxID); err != nil {
+	if err := dag.addNode(oid_a, 4, false, false, []storage.Version{3}, "logrec-a-04", NoTxID); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
 	}
-	if err := dag.addNode(oid_b, 5, false, []storage.Version{4}, "logrec-b-05", NoTxID); err != nil {
+	if err := dag.addNode(oid_b, 5, false, false, []storage.Version{4}, "logrec-b-05", NoTxID); err != nil {
 		t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_2, dagfile, err)
 	}
 
@@ -1885,7 +1888,7 @@
 	}
 
 	// Add c3 as a new head and prune at that point.  This should GC Tx-2.
-	if err := dag.addNode(oid_c, 3, false, []storage.Version{2}, "logrec-c-03", NoTxID); err != nil {
+	if err := dag.addNode(oid_c, 3, false, false, []storage.Version{2}, "logrec-c-03", NoTxID); err != nil {
 		t.Errorf("Cannot addNode() on object %d in DAG file %s: %v", oid_c, dagfile, err)
 	}
 	if err = dag.moveHead(oid_c, 3); err != nil {
@@ -1916,3 +1919,65 @@
 		}
 	}
 }
+
+// TestHasParent tests lookup of DAG parent/child relationship (i.e. hasParent()).
+func TestHasParent(t *testing.T) {
+	dagfile := dagFilename()
+	defer os.Remove(dagfile)
+
+	dag, err := openDAG(dagfile)
+	if err != nil {
+		t.Fatalf("Cannot open new DAG file %s", dagfile)
+	}
+
+	if err = dagReplayCommands(dag, "local-init-03.sync"); err != nil {
+		t.Fatal(err)
+	}
+
+	oid, err := strToObjID("12345")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// The object mutations are: v1 -> v2 (deleted) -> v3 with v3 as head node.
+
+	type hasParentTest struct {
+		parent storage.Version
+		child  storage.Version
+		result bool
+	}
+	tests := []hasParentTest{
+		{storage.NoVersion, storage.NoVersion, false},
+		{1, 2, true},
+		{2, 3, true},
+		{1, 999, false},
+		{999, 1, false},
+		{1, 3, false},
+		{1, storage.NoVersion, true},
+		{2, storage.NoVersion, false},
+		{3, storage.NoVersion, false},
+		{999, storage.NoVersion, false},
+		{storage.NoVersion, 1, true},
+		{storage.NoVersion, 2, false},
+		{storage.NoVersion, 3, true},
+		{storage.NoVersion, 999, false},
+	}
+
+	for _, test := range tests {
+		result := dag.hasParent(oid, test.child, test.parent)
+		if result != test.result {
+			t.Errorf("hasParent() for parent/child (%d/%d) in DAG file %s: %v instead of %v",
+				test.parent, test.child, dagfile, result, test.result)
+		}
+	}
+
+	// Increase coverage of internal helper function.
+	if childOf(nil, 3) {
+		t.Errorf("childOf() returned true on a nil node")
+	}
+	if childOf(&dagNode{}, storage.NoVersion) {
+		t.Errorf("childOf() returned true on a NoVersion parent")
+	}
+
+	dag.close()
+}
diff --git a/runtimes/google/vsync/ilog.go b/runtimes/google/vsync/ilog.go
index a40606b..f7c46f2 100644
--- a/runtimes/google/vsync/ilog.go
+++ b/runtimes/google/vsync/ilog.go
@@ -410,7 +410,7 @@
 	}
 
 	// Insert the new log record into dag.
-	if err = l.s.dag.addNode(rec.ObjID, rec.CurVers, false, rec.Parents, logKey, txID); err != nil {
+	if err = l.s.dag.addNode(rec.ObjID, rec.CurVers, false, val.Delete, rec.Parents, logKey, txID); err != nil {
 		return err
 	}
 
diff --git a/runtimes/google/vsync/initiator.go b/runtimes/google/vsync/initiator.go
index e885359..d6c786e 100644
--- a/runtimes/google/vsync/initiator.go
+++ b/runtimes/google/vsync/initiator.go
@@ -330,7 +330,7 @@
 	vlog.VI(2).Infof("insertRecInLogAndDag:: Adding log record %v, Tx %v", rec, txID)
 	switch rec.RecType {
 	case NodeRec:
-		return i.syncd.dag.addNode(rec.ObjID, rec.CurVers, true, rec.Parents, logKey, txID)
+		return i.syncd.dag.addNode(rec.ObjID, rec.CurVers, true, rec.Value.Delete, rec.Parents, logKey, txID)
 	case LinkRec:
 		return i.syncd.dag.addParent(rec.ObjID, rec.CurVers, rec.Parents[0], true)
 	default:
@@ -642,7 +642,7 @@
 				// TODO(hpucha): addNode operations arising out of conflict resolution
 				// may need to be part of a transaction when app-driven resolution
 				// is introduced.
-				err = i.syncd.dag.addNode(obj, rec.CurVers, false, rec.Parents, logKey, NoTxID)
+				err = i.syncd.dag.addNode(obj, rec.CurVers, false, rec.Value.Delete, rec.Parents, logKey, NoTxID)
 			case LinkRec:
 				err = i.syncd.dag.addParent(obj, rec.CurVers, rec.Parents[0], false)
 			default:
diff --git a/runtimes/google/vsync/initiator_test.go b/runtimes/google/vsync/initiator_test.go
index 4755fc8..987d4d7 100644
--- a/runtimes/google/vsync/initiator_test.go
+++ b/runtimes/google/vsync/initiator_test.go
@@ -49,7 +49,7 @@
 	if _, err := s.hdlInitiator.getLogRec(objID, expRec.CurVers); err == nil {
 		t.Errorf("GetLogRec didn't fail")
 	}
-	if err = s.dag.addNode(objID, expRec.CurVers, false, expRec.Parents, logKey, NoTxID); err != nil {
+	if err = s.dag.addNode(objID, expRec.CurVers, false, false, expRec.Parents, logKey, NoTxID); err != nil {
 		t.Errorf("AddNode failed with err %v", err)
 	}
 	curRec, err := s.hdlInitiator.getLogRec(objID, expRec.CurVers)
@@ -98,7 +98,7 @@
 		if err != nil {
 			t.Errorf("PutLogRec failed with err %v", err)
 		}
-		if err = s.dag.addNode(objID, expRec.CurVers, false, expRec.Parents, logKey, NoTxID); err != nil {
+		if err = s.dag.addNode(objID, expRec.CurVers, false, false, expRec.Parents, logKey, NoTxID); err != nil {
 			t.Errorf("AddNode failed with err %v", err)
 		}
 	}
diff --git a/runtimes/google/vsync/replay_test.go b/runtimes/google/vsync/replay_test.go
index 1f43a28..26df5fa 100644
--- a/runtimes/google/vsync/replay_test.go
+++ b/runtimes/google/vsync/replay_test.go
@@ -35,6 +35,7 @@
 	devID     DeviceID
 	genVec    GenVector
 	continued bool
+	deleted   bool
 }
 
 func strToObjID(objStr string) (storage.ID, error) {
@@ -91,7 +92,7 @@
 
 		switch args[0] {
 		case "addl", "addr":
-			expNargs := 7
+			expNargs := 8
 			if nargs != expNargs {
 				return nil, fmt.Errorf("%s:%d: need %d args instead of %d", file, lineno, expNargs, nargs)
 			}
@@ -114,7 +115,11 @@
 			if err != nil {
 				return nil, fmt.Errorf("%s:%d: invalid continued bit: %s", file, lineno, args[6])
 			}
-			cmd := syncCommand{version: version, parents: parents, logrec: args[5], continued: continued}
+			del, err := strconv.ParseBool(args[7])
+			if err != nil {
+				return nil, fmt.Errorf("%s:%d: invalid deleted bit: %s", file, lineno, args[7])
+			}
+			cmd := syncCommand{version: version, parents: parents, logrec: args[5], continued: continued, deleted: del}
 			if args[0] == "addl" {
 				cmd.cmd = addLocal
 			} else {
@@ -197,7 +202,8 @@
 	for _, cmd := range cmds {
 		switch cmd.cmd {
 		case addLocal:
-			if err = dag.addNode(cmd.objID, cmd.version, false, cmd.parents, cmd.logrec, NoTxID); err != nil {
+			err = dag.addNode(cmd.objID, cmd.version, false, cmd.deleted, cmd.parents, cmd.logrec, NoTxID)
+			if err != nil {
 				return fmt.Errorf("cannot add local node %d:%d to DAG: %v", cmd.objID, cmd.version, err)
 			}
 			if err := dag.moveHead(cmd.objID, cmd.version); err != nil {
@@ -206,7 +212,8 @@
 			dag.flush()
 
 		case addRemote:
-			if err = dag.addNode(cmd.objID, cmd.version, true, cmd.parents, cmd.logrec, NoTxID); err != nil {
+			err = dag.addNode(cmd.objID, cmd.version, true, cmd.deleted, cmd.parents, cmd.logrec, NoTxID)
+			if err != nil {
 				return fmt.Errorf("cannot add remote node %d:%d to DAG: %v", cmd.objID, cmd.version, err)
 			}
 			dag.flush()
diff --git a/runtimes/google/vsync/testdata/local-init-00.sync b/runtimes/google/vsync/testdata/local-init-00.sync
index 047b2de..d1dace2 100644
--- a/runtimes/google/vsync/testdata/local-init-00.sync
+++ b/runtimes/google/vsync/testdata/local-init-00.sync
@@ -1,6 +1,6 @@
 # Create an object locally and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addl|12345|0|||logrec-00|false
-addl|12345|1|0||logrec-01|false
-addl|12345|2|1||logrec-02|false
+addl|12345|0|||logrec-00|false|false
+addl|12345|1|0||logrec-01|false|false
+addl|12345|2|1||logrec-02|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-01.sync b/runtimes/google/vsync/testdata/local-init-01.sync
index 93f0b55..525dd09 100644
--- a/runtimes/google/vsync/testdata/local-init-01.sync
+++ b/runtimes/google/vsync/testdata/local-init-01.sync
@@ -1,12 +1,12 @@
 # Create an object DAG locally with branches and resolved conflicts.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addl|12345|0|||logrec-00|false
-addl|12345|1|0||logrec-01|false
-addl|12345|2|1||logrec-02|false
-addl|12345|3|1||logrec-03|false
-addl|12345|4|2|3|logrec-04|false
-addl|12345|5|4||logrec-05|false
-addl|12345|6|1||logrec-06|false
-addl|12345|7|5|6|logrec-07|false
-addl|12345|8|7||logrec-08|false
+addl|12345|0|||logrec-00|false|false
+addl|12345|1|0||logrec-01|false|false
+addl|12345|2|1||logrec-02|false|false
+addl|12345|3|1||logrec-03|false|false
+addl|12345|4|2|3|logrec-04|false|false
+addl|12345|5|4||logrec-05|false|false
+addl|12345|6|1||logrec-06|false|false
+addl|12345|7|5|6|logrec-07|false|false
+addl|12345|8|7||logrec-08|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-02.sync b/runtimes/google/vsync/testdata/local-init-02.sync
index 118e3f8..70b1319 100644
--- a/runtimes/google/vsync/testdata/local-init-02.sync
+++ b/runtimes/google/vsync/testdata/local-init-02.sync
@@ -1,10 +1,10 @@
 # Create DAGs for 3 objects locally.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addl|12345|1|||logrec-a-01|false
-addl|12345|2|1||logrec-a-02|false
+addl|12345|1|||logrec-a-01|false|false
+addl|12345|2|1||logrec-a-02|false|false
 
-addl|67890|1|||logrec-b-01|false
-addl|67890|2|1||logrec-b-02|false
+addl|67890|1|||logrec-b-01|false|false
+addl|67890|2|1||logrec-b-02|false|false
 
-addl|222|1|||logrec-c-01|false
+addl|222|1|||logrec-c-01|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-03.sync b/runtimes/google/vsync/testdata/local-init-03.sync
new file mode 100644
index 0000000..63f4fc6
--- /dev/null
+++ b/runtimes/google/vsync/testdata/local-init-03.sync
@@ -0,0 +1,5 @@
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
+
+addl|12345|1|||logrec-01|false|false
+addl|12345|2|1||logrec-02|false|true
+addl|12345|3|2||logrec-03|false|false
diff --git a/runtimes/google/vsync/testdata/local-resolve-00.sync b/runtimes/google/vsync/testdata/local-resolve-00.sync
index b4f1518..02e1c88 100644
--- a/runtimes/google/vsync/testdata/local-resolve-00.sync
+++ b/runtimes/google/vsync/testdata/local-resolve-00.sync
@@ -1,4 +1,4 @@
 # Create an object locally and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addl|12345|6|2|5|logrec-06|false
+addl|12345|6|2|5|logrec-06|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-00.log.sync b/runtimes/google/vsync/testdata/remote-conf-00.log.sync
index 23beca6..994a66f 100644
--- a/runtimes/google/vsync/testdata/remote-conf-00.log.sync
+++ b/runtimes/google/vsync/testdata/remote-conf-00.log.sync
@@ -1,8 +1,8 @@
 # Update an object remotely three times triggering one conflict after
 # it was created locally up to v2 (i.e. assume the remote sync received
 # it from the local sync at v1, then updated separately).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|1||VeyronPhone:1:0|false
-addr|12345|4|3||VeyronPhone:1:1|false
-addr|12345|5|4||VeyronPhone:1:2|false
+addr|12345|3|1||VeyronPhone:1:0|false|false
+addr|12345|4|3||VeyronPhone:1:1|false|false
+addr|12345|5|4||VeyronPhone:1:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-00.sync b/runtimes/google/vsync/testdata/remote-conf-00.sync
index ea6ab3f..7e555a6 100644
--- a/runtimes/google/vsync/testdata/remote-conf-00.sync
+++ b/runtimes/google/vsync/testdata/remote-conf-00.sync
@@ -1,8 +1,8 @@
 # Update an object remotely three times triggering one conflict after
 # it was created locally up to v2 (i.e. assume the remote sync received
 # it from the local sync at v1, then updated separately).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|1||logrec-03|false
-addr|12345|4|3||logrec-04|false
-addr|12345|5|4||logrec-05|false
+addr|12345|3|1||logrec-03|false|false
+addr|12345|4|3||logrec-04|false|false
+addr|12345|5|4||logrec-05|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-01.log.sync b/runtimes/google/vsync/testdata/remote-conf-01.log.sync
index 1bcc3c9..4fabf55 100644
--- a/runtimes/google/vsync/testdata/remote-conf-01.log.sync
+++ b/runtimes/google/vsync/testdata/remote-conf-01.log.sync
@@ -3,8 +3,8 @@
 # v0, made its own conflicting v3 that it resolved into v4 (against v1)
 # then made a v5 change.  When the local sync gets back this info it
 # sees 2 graft points: v0-v3 and v1-v4. 
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|0||VeyronLaptop:1:0|false
-addr|12345|4|1|3|VeyronPhone:1:0|false
-addr|12345|5|4||VeyronPhone:1:1|false
+addr|12345|3|0||VeyronLaptop:1:0|false|false
+addr|12345|4|1|3|VeyronPhone:1:0|false|false
+addr|12345|5|4||VeyronPhone:1:1|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-01.sync b/runtimes/google/vsync/testdata/remote-conf-01.sync
index b9aece7..4655a1e 100644
--- a/runtimes/google/vsync/testdata/remote-conf-01.sync
+++ b/runtimes/google/vsync/testdata/remote-conf-01.sync
@@ -3,8 +3,8 @@
 # v0, made its own conflicting v3 that it resolved into v4 (against v1)
 # then made a v5 change.  When the local sync gets back this info it
 # sees 2 graft points: v0-v3 and v1-v4. 
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|0||logrec-03|false
-addr|12345|4|1|3|logrec-04|false
-addr|12345|5|4||logrec-05|false
+addr|12345|3|0||logrec-03|false|false
+addr|12345|4|1|3|logrec-04|false|false
+addr|12345|5|4||logrec-05|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-link.log.sync b/runtimes/google/vsync/testdata/remote-conf-link.log.sync
index 627769a..65d0176 100644
--- a/runtimes/google/vsync/testdata/remote-conf-link.log.sync
+++ b/runtimes/google/vsync/testdata/remote-conf-link.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the local version.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|0||VeyronPhone:1:0|false
+addr|12345|3|0||VeyronPhone:1:0|false|false
 linkr|12345|3|1||VeyronPhone:1:1
diff --git a/runtimes/google/vsync/testdata/remote-init-00.log.sync b/runtimes/google/vsync/testdata/remote-init-00.log.sync
index 3a945e9..0030417 100644
--- a/runtimes/google/vsync/testdata/remote-init-00.log.sync
+++ b/runtimes/google/vsync/testdata/remote-init-00.log.sync
@@ -1,6 +1,6 @@
 # Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|0|||VeyronPhone:1:0|false
-addr|12345|1|0||VeyronPhone:1:1|false
-addr|12345|2|1||VeyronPhone:1:2|false
+addr|12345|0|||VeyronPhone:1:0|false|false
+addr|12345|1|0||VeyronPhone:1:1|false|false
+addr|12345|2|1||VeyronPhone:1:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-00.sync b/runtimes/google/vsync/testdata/remote-init-00.sync
index 9ae9adc..7f2189a 100644
--- a/runtimes/google/vsync/testdata/remote-init-00.sync
+++ b/runtimes/google/vsync/testdata/remote-init-00.sync
@@ -1,6 +1,6 @@
 # Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|0|||logrec-00|false
-addr|12345|1|0||logrec-01|false
-addr|12345|2|1||logrec-02|false
+addr|12345|0|||logrec-00|false|false
+addr|12345|1|0||logrec-01|false|false
+addr|12345|2|1||logrec-02|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-01.log.sync b/runtimes/google/vsync/testdata/remote-init-01.log.sync
index 9b02859..a357986 100644
--- a/runtimes/google/vsync/testdata/remote-init-01.log.sync
+++ b/runtimes/google/vsync/testdata/remote-init-01.log.sync
@@ -1,6 +1,6 @@
 # Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|0|||VeyronPhone:5:0|false
-addr|12345|1|0||VeyronPhone:5:1|false
-addr|12345|2|1||VeyronPhone:5:2|false
+addr|12345|0|||VeyronPhone:5:0|false|false
+addr|12345|1|0||VeyronPhone:5:1|false|false
+addr|12345|2|1||VeyronPhone:5:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-02.log.sync b/runtimes/google/vsync/testdata/remote-init-02.log.sync
index cf71306..c0053ba 100644
--- a/runtimes/google/vsync/testdata/remote-init-02.log.sync
+++ b/runtimes/google/vsync/testdata/remote-init-02.log.sync
@@ -1,17 +1,17 @@
 # Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|123|0|||VeyronPhone:1:0|false
+addr|123|0|||VeyronPhone:1:0|false|false
 
-addr|123|1|0||VeyronPhone:1:1|true
-addr|456|0|||VeyronPhone:1:2|true
-addr|789|0|||VeyronPhone:1:3|false
+addr|123|1|0||VeyronPhone:1:1|true|false
+addr|456|0|||VeyronPhone:1:2|true|false
+addr|789|0|||VeyronPhone:1:3|false|false
 
-addr|789|1|0||VeyronPhone:1:4|false
+addr|789|1|0||VeyronPhone:1:4|false|false
 
-addr|789|2|0||VeyronTab:1:0|false
+addr|789|2|0||VeyronTab:1:0|false|false
 
-addr|789|3|1|2|VeyronPhone:2:0|false
+addr|789|3|1|2|VeyronPhone:2:0|false|false
 
-addr|123|2|1||VeyronPhone:2:1|true
-addr|456|1|0||VeyronPhone:2:2|false
\ No newline at end of file
+addr|123|2|1||VeyronPhone:2:1|true|false
+addr|456|1|0||VeyronPhone:2:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-noconf-00.log.sync b/runtimes/google/vsync/testdata/remote-noconf-00.log.sync
index 55afb29..c291155 100644
--- a/runtimes/google/vsync/testdata/remote-noconf-00.log.sync
+++ b/runtimes/google/vsync/testdata/remote-noconf-00.log.sync
@@ -1,8 +1,8 @@
 # Update an object remotely three times without triggering a conflict
 # after it was created locally up to v2 (i.e. assume the remote sync
 # received it from the local sync first, then updated it).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|2||VeyronPhone:1:0|false
-addr|12345|4|3||VeyronPhone:1:1|false
-addr|12345|5|4||VeyronPhone:1:2|false
+addr|12345|3|2||VeyronPhone:1:0|false|false
+addr|12345|4|3||VeyronPhone:1:1|false|false
+addr|12345|5|4||VeyronPhone:1:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-noconf-00.sync b/runtimes/google/vsync/testdata/remote-noconf-00.sync
index fc7b633..9b95c86 100644
--- a/runtimes/google/vsync/testdata/remote-noconf-00.sync
+++ b/runtimes/google/vsync/testdata/remote-noconf-00.sync
@@ -1,8 +1,8 @@
 # Update an object remotely three times without triggering a conflict
 # after it was created locally up to v2 (i.e. assume the remote sync
 # received it from the local sync first, then updated it).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|2||logrec-03|false
-addr|12345|4|3||logrec-04|false
-addr|12345|5|4||logrec-05|false
+addr|12345|3|2||logrec-03|false|false
+addr|12345|4|3||logrec-04|false|false
+addr|12345|5|4||logrec-05|false|false
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync
index b3849d5..ab1e2c4 100644
--- a/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync
+++ b/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the remote version.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|0||VeyronPhone:1:0|false
+addr|12345|3|0||VeyronPhone:1:0|false|false
 linkr|12345|1|3||VeyronPhone:1:1
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync
index 9495dc1..4ee0b53 100644
--- a/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync
+++ b/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the local version.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|0||VeyronPhone:1:0|false
+addr|12345|3|0||VeyronPhone:1:0|false|false
 linkr|12345|3|2||VeyronPhone:1:1
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync
index 455fb5c..53d9735 100644
--- a/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync
+++ b/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync
@@ -1,6 +1,6 @@
 # Update an object remotely, detect conflict, and bless the remote version, and continue updating.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
-addr|12345|3|0||VeyronPhone:1:0|false
+addr|12345|3|0||VeyronPhone:1:0|false|false
 linkr|12345|2|3||VeyronPhone:1:1
-addr|12345|4|2||VeyronPhone:2:0|false
+addr|12345|4|2||VeyronPhone:2:0|false|false
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync
index 9a04969..2ca80bf 100644
--- a/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync
+++ b/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync
@@ -1,5 +1,5 @@
 # Resolve the same conflict on two different devices.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 
 linkr|12345|2|3||VeyronLaptop:1:0
 
diff --git a/runtimes/google/vsync/testdata/test-1obj.gc.sync b/runtimes/google/vsync/testdata/test-1obj.gc.sync
index 9cef648..93eca9e 100644
--- a/runtimes/google/vsync/testdata/test-1obj.gc.sync
+++ b/runtimes/google/vsync/testdata/test-1obj.gc.sync
@@ -1,12 +1,12 @@
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 # Local node is A. Remote nodes are B and C.
-addr|12345|0|||C:1:0|false
-addr|12345|1|0||B:1:0|false
-addl|12345|2|0||A:1:0|false
-addl|12345|3|1|2|A:2:0|false
-addr|12345|4|3||C:2:0|false
-addr|12345|5|3||B:2:0|false
-addr|12345|6|4|5|B:3:0|false
+addr|12345|0|||C:1:0|false|false
+addr|12345|1|0||B:1:0|false|false
+addl|12345|2|0||A:1:0|false|false
+addl|12345|3|1|2|A:2:0|false|false
+addr|12345|4|3||C:2:0|false|false
+addr|12345|5|3||B:2:0|false|false
+addr|12345|6|4|5|B:3:0|false|false
 # Devtable state
 setdev|A|A:2,B:3,C:2
 setdev|B|A:2,B:3,C:2
diff --git a/runtimes/google/vsync/testdata/test-3obj.gc.sync b/runtimes/google/vsync/testdata/test-3obj.gc.sync
index fcf6598..d9a3899 100644
--- a/runtimes/google/vsync/testdata/test-3obj.gc.sync
+++ b/runtimes/google/vsync/testdata/test-3obj.gc.sync
@@ -1,42 +1,42 @@
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>
+# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
 # Local node is A. Remote nodes are B and C.
-addl|123|1|||A:1:0|false
+addl|123|1|||A:1:0|false|false
 
-addr|456|1|||B:1:0|false
+addr|456|1|||B:1:0|false|false
 
-addr|456|2|1||B:2:0|false
-addr|123|2|1||B:2:1|false
+addr|456|2|1||B:2:0|false|false
+addr|123|2|1||B:2:1|false|false
 
-addl|456|3|2||A:2:0|false
-addl|123|4|2||A:2:1|false
+addl|456|3|2||A:2:0|false|false
+addl|123|4|2||A:2:1|false|false
 
-addr|789|1|||C:1:0|false
+addr|789|1|||C:1:0|false|false
 
-addr|789|2|1||C:2:0|false
+addr|789|2|1||C:2:0|false|false
 
-addr|123|3|1||C:3:0|false
-addr|789|3|2||C:3:1|false
+addr|123|3|1||C:3:0|false|false
+addr|789|3|2||C:3:1|false|false
 
-addr|123|5|3|2|C:4:0|false
+addr|123|5|3|2|C:4:0|false|false
 
-addl|123|6|4|5|A:3:0|false
-addl|456|4|3||A:3:1|false
-addl|789|4|3||A:3:2|false
+addl|123|6|4|5|A:3:0|false|false
+addl|456|4|3||A:3:1|false|false
+addl|789|4|3||A:3:2|false|false
 
-addr|456|5|2||B:3:0|false
+addr|456|5|2||B:3:0|false|false
 
-addl|456|7|4|5|A:4:0|false
+addl|456|7|4|5|A:4:0|false|false
 
-addr|456|6|2||C:5:0|false
-addr|123|7|5||C:5:1|false
-addr|123|8|7||C:5:2|false
-addr|789|5|3||C:5:3|false
+addr|456|6|2||C:5:0|false|false
+addr|123|7|5||C:5:1|false|false
+addr|123|8|7||C:5:2|false|false
+addr|789|5|3||C:5:3|false|false
 
-addl|123|9|6|8|A:5:0|false
-addl|456|8|6|7|A:5:1|false
-addl|789|6|4|5|A:5:2|false
+addl|123|9|6|8|A:5:0|false|false
+addl|456|8|6|7|A:5:1|false|false
+addl|789|6|4|5|A:5:2|false|false
 
-addl|123|10|9||A:6:0|false
+addl|123|10|9||A:6:0|false|false
 
 # Devtable state
 setdev|A|A:6,B:3,C:5
diff --git a/runtimes/google/vsync/util_test.go b/runtimes/google/vsync/util_test.go
index 002ab95..263ee30 100644
--- a/runtimes/google/vsync/util_test.go
+++ b/runtimes/google/vsync/util_test.go
@@ -172,7 +172,7 @@
 	if err != nil {
 		return err
 	}
-	if err := s.dag.addNode(rec.ObjID, rec.CurVers, false, rec.Parents, logKey, NoTxID); err != nil {
+	if err := s.dag.addNode(rec.ObjID, rec.CurVers, false, rec.Value.Delete, rec.Parents, logKey, NoTxID); err != nil {
 		return err
 	}
 	if err := s.dag.moveHead(rec.ObjID, rec.CurVers); err != nil {
@@ -205,7 +205,7 @@
 				ObjID:   cmd.objID,
 				CurVers: cmd.version,
 				Parents: cmd.parents,
-				Value:   LogValue{Continued: cmd.continued},
+				Value:   LogValue{Continued: cmd.continued, Delete: cmd.deleted},
 			}
 			if err := populateLogAndDAG(s, rec); err != nil {
 				return err
diff --git a/runtimes/google/vsync/vsync.vdl.go b/runtimes/google/vsync/vsync.vdl.go
index 3b4602e..459e9da 100644
--- a/runtimes/google/vsync/vsync.vdl.go
+++ b/runtimes/google/vsync/vsync.vdl.go
@@ -342,36 +342,29 @@
 			{Name: "Err", Type: 68},
 		},
 
-		OutStream: 82,
+		OutStream: 79,
 	}
 
 	result.TypeDefs = []_gen_vdlutil.Any{
-		_gen_wiretype.NamedPrimitiveType{Type: 0x3, Name: "veyron/runtimes/google/vsync.DeviceID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.GenID", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x41, Elem: 0x42, Name: "veyron/runtimes/google/vsync.GenVector", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.LSN", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.ArrayType{Elem: 0x46, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron2/storage.Version", Tags: []string(nil)}, _gen_wiretype.SliceType{Elem: 0x48, Name: "", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "veyron2/storage.TagOp", Tags: []string(nil)}, _gen_wiretype.StructType{
-			[]_gen_wiretype.FieldType{
-				_gen_wiretype.FieldType{Type: 0x4b, Name: "Op"},
-				_gen_wiretype.FieldType{Type: 0x47, Name: "ACL"},
-			},
-			"veyron2/storage.Tag", []string(nil)},
-		_gen_wiretype.SliceType{Elem: 0x4c, Name: "veyron2/storage.TagList", Tags: []string(nil)}, _gen_wiretype.StructType{
+		_gen_wiretype.NamedPrimitiveType{Type: 0x3, Name: "veyron/runtimes/google/vsync.DeviceID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.GenID", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x41, Elem: 0x42, Name: "veyron/runtimes/google/vsync.GenVector", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.LSN", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.ArrayType{Elem: 0x46, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron2/storage.Version", Tags: []string(nil)}, _gen_wiretype.SliceType{Elem: 0x48, Name: "", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.StructType{
 			[]_gen_wiretype.FieldType{
 				_gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
 				_gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
 			},
 			"veyron2/storage.DEntry", []string(nil)},
-		_gen_wiretype.SliceType{Elem: 0x4e, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
+		_gen_wiretype.SliceType{Elem: 0x4b, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
 			[]_gen_wiretype.FieldType{
 				_gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
 				_gen_wiretype.FieldType{Type: 0x48, Name: "PriorVersion"},
 				_gen_wiretype.FieldType{Type: 0x48, Name: "Version"},
 				_gen_wiretype.FieldType{Type: 0x2, Name: "IsRoot"},
 				_gen_wiretype.FieldType{Type: 0x4a, Name: "Value"},
-				_gen_wiretype.FieldType{Type: 0x4d, Name: "Tags"},
-				_gen_wiretype.FieldType{Type: 0x4f, Name: "Dir"},
+				_gen_wiretype.FieldType{Type: 0x4c, Name: "Dir"},
 			},
 			"veyron/services/store/raw.Mutation", []string(nil)},
 		_gen_wiretype.StructType{
 			[]_gen_wiretype.FieldType{
-				_gen_wiretype.FieldType{Type: 0x50, Name: "Mutation"},
+				_gen_wiretype.FieldType{Type: 0x4d, Name: "Mutation"},
 				_gen_wiretype.FieldType{Type: 0x25, Name: "SyncTime"},
 				_gen_wiretype.FieldType{Type: 0x2, Name: "Delete"},
 				_gen_wiretype.FieldType{Type: 0x2, Name: "Continued"},
@@ -386,7 +379,7 @@
 				_gen_wiretype.FieldType{Type: 0x47, Name: "ObjID"},
 				_gen_wiretype.FieldType{Type: 0x48, Name: "CurVers"},
 				_gen_wiretype.FieldType{Type: 0x49, Name: "Parents"},
-				_gen_wiretype.FieldType{Type: 0x51, Name: "Value"},
+				_gen_wiretype.FieldType{Type: 0x4e, Name: "Value"},
 			},
 			"veyron/runtimes/google/vsync.LogRec", []string(nil)},
 	}
diff --git a/runtimes/google/vsync/watcher.go b/runtimes/google/vsync/watcher.go
index d470ce0..4d887c3 100644
--- a/runtimes/google/vsync/watcher.go
+++ b/runtimes/google/vsync/watcher.go
@@ -163,8 +163,6 @@
 	w.syncd.lock.Lock()
 	defer w.syncd.lock.Unlock()
 
-	// TODO(rdaoud): handle object deletion (State == DoesNotExist)
-
 	vlog.VI(1).Infof("processChanges:: ready to process changes")
 
 	var lastResmark []byte
diff --git a/runtimes/google/vsync/watcher_test.go b/runtimes/google/vsync/watcher_test.go
index 64b5feb..dee40cf 100644
--- a/runtimes/google/vsync/watcher_test.go
+++ b/runtimes/google/vsync/watcher_test.go
@@ -166,7 +166,6 @@
 				PriorVersion: 0x0,
 				Version:      0x4d65822107fcfd52,
 				Value:        "value-root",
-				Tags:         nil,
 				Dir: []storage.DEntry{
 					storage.DEntry{
 						Name: "a",
@@ -187,7 +186,6 @@
 				PriorVersion: 0x0,
 				Version:      0x57e9d1860d1d68d8,
 				Value:        "value-a",
-				Tags:         nil,
 				Dir: []storage.DEntry{
 					storage.DEntry{
 						Name: "b",
@@ -208,7 +206,6 @@
 				PriorVersion: 0x0,
 				Version:      0x55104dc76695721d,
 				Value:        "value-b",
-				Tags:         nil,
 				Dir:          nil,
 			},
 			ResumeMarker: nil,
@@ -225,7 +222,6 @@
 				PriorVersion: 0x57e9d1860d1d68d8,
 				Version:      0x365a858149c6e2d1,
 				Value:        "value-a",
-				Tags:         nil,
 				Dir: []storage.DEntry{
 					storage.DEntry{
 						Name: "b",
@@ -251,7 +247,6 @@
 				PriorVersion: 0x0,
 				Version:      0x380704bb7b4d7c03,
 				Value:        "value-c",
-				Tags:         nil,
 				Dir:          nil,
 			},
 			ResumeMarker: nil,
diff --git a/security/auditor/auditor.go b/security/auditor/auditor.go
new file mode 100644
index 0000000..1dcdf0c
--- /dev/null
+++ b/security/auditor/auditor.go
@@ -0,0 +1,28 @@
+// Package auditor provides mechanisms to write method invocations to an audit log.
+//
+// Typical use would be for tracking sensitive operations like private key usage (NewPrivateID),
+// or sensitive RPC method invocations.
+package auditor
+
+import "time"
+
+// Auditor is the interface for writing auditable events.
+type Auditor interface {
+	Audit(entry Entry) error
+}
+
+// Entry is the information logged on each auditable event.
+type Entry struct {
+	// Method being invoked.
+	Method string
+	// Arguments to the method.
+	// Any sensitive data in the arguments should not be included,
+	// even if the argument was provided to the real method invocation.
+	Arguments []interface{}
+	// Result of the method invocation.
+	// A common use case is to audit only successful method invocations.
+	Results []interface{}
+
+	// Timestamp of method invocation.
+	Timestamp time.Time
+}
diff --git a/security/auditor/id.go b/security/auditor/id.go
new file mode 100644
index 0000000..5a7795a
--- /dev/null
+++ b/security/auditor/id.go
@@ -0,0 +1,79 @@
+package auditor
+
+import (
+	"crypto/ecdsa"
+	"fmt"
+	"time"
+
+	"veyron2/security"
+)
+
+type auditingID struct {
+	id      security.PrivateID
+	auditor Auditor
+}
+
+type args []interface{}
+
+// NewPrivateID returns a security.PrivateID implementation that wraps over 'wrapped' but
+// logs all operations that use the private key of wrapped to the auditor.
+func NewPrivateID(wrapped security.PrivateID, auditor Auditor) security.PrivateID {
+	return &auditingID{wrapped, auditor}
+}
+
+func (id *auditingID) Sign(message []byte) (security.Signature, error) {
+	sig, err := id.id.Sign(message)
+	// Do not save the signature itself.
+	if err = id.audit(err, "Sign", args{message}, nil); err != nil {
+		return security.Signature{}, err
+	}
+	return sig, nil
+}
+
+func (id *auditingID) PublicKey() *ecdsa.PublicKey {
+	return id.PublicKey()
+}
+
+func (id *auditingID) PublicID() security.PublicID {
+	return id.PublicID()
+}
+
+func (id *auditingID) Bless(blessee security.PublicID, blessingName string, duration time.Duration, caveats []security.ServiceCaveat) (security.PublicID, error) {
+	blessed, err := id.id.Bless(blessee, blessingName, duration, caveats)
+	if err = id.audit(err, "Bless", args{blessee, blessingName, duration, caveats}, blessed); err != nil {
+		return nil, err
+	}
+	return blessed, nil
+}
+
+func (id *auditingID) Derive(publicID security.PublicID) (security.PrivateID, error) {
+	derived, err := id.id.Derive(publicID)
+	// Do not save the derived private ID itself, do not want to log private keys etc. in the derived ID.
+	if err = id.audit(err, "Derive", args{publicID}, nil); err != nil {
+		return nil, err
+	}
+	return derived, nil
+}
+
+func (id *auditingID) MintDischarge(caveat security.ThirdPartyCaveat, context security.Context, duration time.Duration, caveats []security.ServiceCaveat) (security.ThirdPartyDischarge, error) {
+	d, err := id.id.MintDischarge(caveat, context, duration, caveats)
+	if err = id.audit(err, "MintDischarge", args{caveat, context, duration, caveats}, nil); err != nil {
+		return nil, err
+	}
+	return d, nil
+}
+
+func (id *auditingID) audit(err error, method string, args args, result interface{}) error {
+	if err != nil {
+		return err
+	}
+	if err = id.auditor.Audit(Entry{
+		Method:    method,
+		Arguments: []interface{}(args),
+		Results:   []interface{}{result},
+		Timestamp: time.Now(),
+	}); err != nil {
+		return fmt.Errorf("failed to audit call to %q: %v", method, err)
+	}
+	return nil
+}
diff --git a/security/auditor/id_test.go b/security/auditor/id_test.go
new file mode 100644
index 0000000..6ece0ec
--- /dev/null
+++ b/security/auditor/id_test.go
@@ -0,0 +1,215 @@
+package auditor_test
+
+import (
+	"crypto/ecdsa"
+	"errors"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+
+	"veyron/security/auditor"
+	"veyron2/naming"
+	"veyron2/security"
+)
+
+func TestAuditingID(t *testing.T) {
+	var (
+		// A bunch of  values that will be used as arguments to method calls.
+		byteSlice        []byte
+		publicID         = security.FakePublicID("publicid")
+		str              string
+		duration         time.Duration
+		caveats          []security.ServiceCaveat
+		thirdPartyCaveat thirdPartyCaveat
+		context          context
+
+		// A bunch of values that will be returned as results
+		rSignature = security.Signature{R: []byte{1}, S: []byte{1}}
+		rBlessing  = security.FakePublicID("blessed")
+		rDerived   = new(mockID)
+		rDischarge = new(discharge)
+
+		// The error returned by call calls to mockID operations
+		wantErr = errors.New("call failed")
+
+		// The PrivateID to wrap over
+		mockID      = new(mockID)
+		mockAuditor = new(mockAuditor)
+	)
+	id := auditor.NewPrivateID(mockID, mockAuditor)
+	tests := []struct {
+		Method        string
+		Args          V
+		Result        interface{} // Result returned by the Method call.
+		AuditedResult interface{} // Result written to the audit entry.
+	}{
+		{"Sign", V{byteSlice}, rSignature, nil},
+		{"Bless", V{publicID, str, duration, caveats}, rBlessing, rBlessing},
+		{"Derive", V{publicID}, rDerived, nil},
+		{"MintDischarge", V{thirdPartyCaveat, context, duration, caveats}, rDischarge, nil},
+	}
+	for _, test := range tests {
+		// Test1: Nothing is written to the audit log if the underlying operation fails.
+		mockID.NextError = wantErr
+		results, err := call(id, test.Method, test.Args)
+		if err != nil {
+			t.Errorf("failed to invoke id.%v(%#v): %v", test.Method, test.Args, err)
+			continue
+		}
+		got, ok := results[len(results)-1].(error)
+		if !ok || got != wantErr {
+			t.Errorf("id.%v(%#v) returned (..., %v), want (..., %v)", test.Method, test.Args, got, wantErr)
+		}
+		// Nothing should be audited
+		if audited := mockAuditor.Release(); !reflect.DeepEqual(audited, auditor.Entry{}) {
+			t.Errorf("id.%v(%#v) resulted in [%+v] being written to the audit log, nothing should have been", test.Method, test.Args, audited)
+		}
+
+		// Test2: If the auditing fails then the operation should fail too.
+		mockAuditor.NextError = errors.New("auditor failed")
+		results, err = call(id, test.Method, test.Args)
+		if err != nil {
+			t.Errorf("failed to invoke id.%v(%#v): %v", test.Method, test.Args, err)
+			continue
+		}
+		got, ok = results[len(results)-1].(error)
+		if !ok || !strings.HasSuffix(got.Error(), "auditor failed") {
+			t.Errorf("id.%v(%#v) returned (..., %v) when auditor failed, wanted (..., %v)", test.Method, test.Args, got, "... auditor failed")
+		}
+
+		// Test3: Should audit the call and return the same value as the underlying operation.
+		now := time.Now()
+		mockID.NextResult = test.Result
+		results, err = call(id, test.Method, test.Args)
+		audited := mockAuditor.Release()
+		if err != nil {
+			t.Errorf("failed to invoke id.%v(%#v): %v", test.Method, test.Args, err)
+			continue
+		}
+		if got := results[len(results)-1]; got != nil {
+			t.Errorf("id.%v(%#v) returned an error: %v", test.Method, test.Args, got)
+		}
+		if got := results[0]; !reflect.DeepEqual(got, test.Result) {
+			t.Errorf("id.%v(%#v) returned %v(%T) want %v(%T)", test.Method, test.Args, got, got, test.Result, test.Result)
+		}
+		if audited.Timestamp.Before(now) || audited.Timestamp.IsZero() {
+			t.Errorf("id.%v(%#v) audited the time as %v, should have been a time after %v", test.Method, test.Args, audited.Timestamp, now)
+		}
+		if want := (auditor.Entry{
+			Method:    test.Method,
+			Arguments: []interface{}(test.Args),
+			Results:   []interface{}{test.AuditedResult},
+			Timestamp: audited.Timestamp, // Hard to come up with the expected timestamp, relying on sanity check above.
+		}); !reflect.DeepEqual(audited, want) {
+			t.Errorf("id.%v(%#v) resulted in [%#v] being audited, wanted [%#v]", test.Method, test.Args, audited, want)
+		}
+	}
+}
+
+type mockID struct {
+	NextError  error
+	NextResult interface{}
+	publicKey  *ecdsa.PublicKey
+}
+
+func (id *mockID) Sign(message []byte) (security.Signature, error) {
+	defer id.reset()
+	ret, ok := id.NextResult.(security.Signature)
+	if ok {
+		return ret, id.NextError
+	}
+	return security.Signature{}, id.NextError
+}
+
+func (id *mockID) PublicID() security.PublicID {
+	defer id.reset()
+	return id.NextResult.(security.PublicID)
+}
+func (id *mockID) Bless(blessee security.PublicID, blessingName string, duration time.Duration, caveats []security.ServiceCaveat) (security.PublicID, error) {
+	defer id.reset()
+	result, _ := id.NextResult.(security.PublicID)
+	return result, id.NextError
+}
+func (id *mockID) Derive(publicID security.PublicID) (security.PrivateID, error) {
+	defer id.reset()
+	result, _ := id.NextResult.(security.PrivateID)
+	return result, id.NextError
+}
+func (id *mockID) MintDischarge(caveat security.ThirdPartyCaveat, context security.Context, duration time.Duration, caveats []security.ServiceCaveat) (security.ThirdPartyDischarge, error) {
+	defer id.reset()
+	result, _ := id.NextResult.(security.ThirdPartyDischarge)
+	return result, id.NextError
+}
+
+func (id *mockID) reset() {
+	id.NextError = nil
+	id.NextResult = nil
+}
+
+func (id *mockID) PublicKey() *ecdsa.PublicKey { return id.publicKey }
+
+type mockAuditor struct {
+	LastEntry auditor.Entry
+	NextError error
+}
+
+func (a *mockAuditor) Audit(entry auditor.Entry) error {
+	if a.NextError != nil {
+		err := a.NextError
+		a.NextError = nil
+		return err
+	}
+	a.LastEntry = entry
+	return nil
+}
+
+func (a *mockAuditor) Release() auditor.Entry {
+	entry := a.LastEntry
+	a.LastEntry = auditor.Entry{}
+	return entry
+}
+
+type V []interface{}
+
+// thirdPartyCaveat implements security.ThirdPartyCaveat
+type thirdPartyCaveat struct{}
+
+func (thirdPartyCaveat) Validate(security.Context) error { return nil }
+func (thirdPartyCaveat) ID() security.ThirdPartyCaveatID { return "thirdPartyCaveatID" }
+func (thirdPartyCaveat) Location() string                { return "thirdPartyCaveatLocation" }
+
+// context implements security.Context
+type context struct{}
+
+func (context) Method() string                                { return "method" }
+func (context) Name() string                                  { return "name" }
+func (context) Suffix() string                                { return "suffix" }
+func (context) Label() security.Label                         { return security.ReadLabel }
+func (context) CaveatDischarges() security.CaveatDischargeMap { return nil }
+func (context) LocalID() security.PublicID                    { return nil }
+func (context) RemoteID() security.PublicID                   { return nil }
+func (context) LocalEndpoint() naming.Endpoint                { return nil }
+func (context) RemoteEndpoint() naming.Endpoint               { return nil }
+
+// discharge implements the security.ThirdPartyDischarge interface
+type discharge struct{}
+
+func (*discharge) CaveatID() security.ThirdPartyCaveatID       { return "thirdPartyCaveatID" }
+func (*discharge) ThirdPartyCaveats() []security.ServiceCaveat { return nil }
+
+func call(receiver interface{}, method string, args V) (results []interface{}, err interface{}) {
+	defer func() {
+		err = recover()
+	}()
+	callargs := make([]reflect.Value, len(args))
+	for idx, arg := range args {
+		callargs[idx] = reflect.ValueOf(arg)
+	}
+	callresults := reflect.ValueOf(receiver).MethodByName(method).Call(callargs)
+	results = make([]interface{}, len(callresults))
+	for idx, res := range callresults {
+		results[idx] = res.Interface()
+	}
+	return
+}
diff --git a/security/serialization/serialization.go b/security/serialization/serialization.go
new file mode 100644
index 0000000..50a52e3
--- /dev/null
+++ b/security/serialization/serialization.go
@@ -0,0 +1,5 @@
+// Package serialization defines a general-purpose io.WriteCloser
+// for writing data along with an appropriate signature that
+// establishes integrity and authenticity of data, and an io.Reader
+// for reading the data after verifying the signature.
+package serialization
diff --git a/security/serialization/serialization_test.go b/security/serialization/serialization_test.go
new file mode 100644
index 0000000..524e6f3
--- /dev/null
+++ b/security/serialization/serialization_test.go
@@ -0,0 +1,142 @@
+package serialization
+
+import (
+	"bytes"
+	"crypto/ecdsa"
+	"fmt"
+	"io"
+	"io/ioutil"
+	mrand "math/rand"
+	"reflect"
+	"strings"
+	"testing"
+
+	"veyron/lib/testutil"
+
+	"veyron2/rt"
+	"veyron2/security"
+)
+
+type bufferCloser struct {
+	bytes.Buffer
+}
+
+func (*bufferCloser) Close() error {
+	return nil
+}
+
+func signingWrite(d, s io.WriteCloser, signer security.Signer, writeList [][]byte, opts *Options) error {
+	swc, err := NewSigningWriteCloser(d, s, signer, opts)
+	if err != nil {
+		return fmt.Errorf("NewSigningWriteCloser failed: %s", err)
+	}
+	for _, b := range writeList {
+		if _, err := swc.Write(b); err != nil {
+			return fmt.Errorf("signingWriteCloser.Write failed: %s", err)
+		}
+	}
+	if err := swc.Close(); err != nil {
+		return fmt.Errorf("signingWriteCloser.Close failed: %s", err)
+	}
+	return nil
+}
+
+func verifyingRead(d, s io.Reader, key *ecdsa.PublicKey) ([]byte, error) {
+	vr, err := NewVerifyingReader(d, s, key)
+	if err != nil {
+		return nil, fmt.Errorf("NewVerifyingReader failed: %s", err)
+	}
+	return ioutil.ReadAll(vr)
+}
+
+func newSigner() security.Signer {
+	// TODO(ashankar,ataly): Remove use of "rt" here and replace with a factory
+	// function for PrivateID/Signer when possible.
+	r, err := rt.New()
+	if err != nil {
+		panic(err)
+	}
+	id, err := r.NewIdentity("irrelevant")
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+func matchesErrorPattern(err error, pattern string) bool {
+	if (len(pattern) == 0) != (err == nil) {
+		return false
+	}
+	return err == nil || strings.Index(err.Error(), pattern) >= 0
+}
+
+func TestRoundTrip(t *testing.T) {
+	signer := newSigner()
+	d, s := &bufferCloser{}, &bufferCloser{}
+
+	testdata := []struct {
+		writeList [][]byte
+		opts      *Options
+	}{
+		{[][]byte{testutil.RandomBytes(1)}, nil},
+		{[][]byte{testutil.RandomBytes(100)}, nil},
+		{[][]byte{testutil.RandomBytes(100)}, &Options{ChunkSizeBytes: 10}},
+		{[][]byte{testutil.RandomBytes(25), testutil.RandomBytes(15), testutil.RandomBytes(60), testutil.RandomBytes(5)}, &Options{ChunkSizeBytes: 7}},
+	}
+	for _, test := range testdata {
+		d.Reset()
+		s.Reset()
+
+		if err := signingWrite(d, s, signer, test.writeList, test.opts); err != nil {
+			t.Errorf("signingWrite(_, _, %v, %v) failed: %s", test.writeList, test.opts, err)
+			continue
+		}
+		dataRead, err := verifyingRead(d, s, signer.PublicKey())
+		if err != nil {
+			t.Errorf("verifyingRead failed: %s", err)
+			continue
+		}
+
+		dataWritten := bytes.Join(test.writeList, nil)
+		if !reflect.DeepEqual(dataRead, dataWritten) {
+			t.Errorf("Read-Write mismatch: data read: %v, data written: %v", dataRead, dataWritten)
+			continue
+		}
+	}
+}
+
+func TestIntegrityAndAuthenticity(t *testing.T) {
+	tamper := func(b []byte) []byte {
+		c := make([]byte, len(b))
+		copy(c, b)
+		c[mrand.Int()%len(b)] += 1
+		return c
+	}
+
+	signer := newSigner()
+	d, s := &bufferCloser{}, &bufferCloser{}
+	if err := signingWrite(d, s, signer, [][]byte{testutil.RandomBytes(100)}, &Options{ChunkSizeBytes: 7}); err != nil {
+		t.Fatalf("signingWrite failed: %s", err)
+	}
+
+	// copy the data and signature bytes written.
+	dataBytes := d.Bytes()
+	sigBytes := s.Bytes()
+
+	// Test that any tampering of the data bytes, or any change
+	// to the signer causes a verifyingRead to fail.
+	testdata := []struct {
+		dataBytes, sigBytes []byte
+		key                 *ecdsa.PublicKey
+		wantErr             string
+	}{
+		{dataBytes, sigBytes, signer.PublicKey(), ""},
+		{dataBytes, sigBytes, newSigner().PublicKey(), "signature verification failed"},
+		{tamper(dataBytes), sigBytes, signer.PublicKey(), "data has been modified"},
+	}
+	for _, test := range testdata {
+		if _, err := verifyingRead(&bufferCloser{*bytes.NewBuffer(test.dataBytes)}, &bufferCloser{*bytes.NewBuffer(test.sigBytes)}, test.key); !matchesErrorPattern(err, test.wantErr) {
+			t.Errorf("verifyingRead: got error: %s, want to match: %v", err, test.wantErr)
+		}
+	}
+}
diff --git a/security/serialization/signing_writer.go b/security/serialization/signing_writer.go
new file mode 100644
index 0000000..643fe69
--- /dev/null
+++ b/security/serialization/signing_writer.go
@@ -0,0 +1,145 @@
+package serialization
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+
+	"veyron2/security"
+	"veyron2/vom"
+)
+
+const defaultChunkSizeBytes = 1 << 20
+
+type header struct {
+	ChunkSizeBytes int
+}
+
+// signingWriter implements io.WriteCloser.
+type signingWriter struct {
+	data      io.WriteCloser
+	signature io.WriteCloser
+	signer    security.Signer
+
+	chunkSizeBytes int
+	curChunk       bytes.Buffer
+	signatureHash  hash.Hash
+	sigEnc         *vom.Encoder
+}
+
+func (w *signingWriter) Write(p []byte) (int, error) {
+	bytesWritten := 0
+	for len(p) > 0 {
+		pLimited := p
+		curChunkFreeBytes := w.chunkSizeBytes - w.curChunk.Len()
+		if len(pLimited) > curChunkFreeBytes {
+			pLimited = pLimited[:curChunkFreeBytes]
+		}
+
+		n, err := w.curChunk.Write(pLimited)
+		bytesWritten = bytesWritten + n
+		if err != nil {
+			return bytesWritten, err
+		}
+		p = p[n:]
+
+		if err := w.commitChunk(false); err != nil {
+			return bytesWritten, err
+		}
+	}
+	return bytesWritten, nil
+}
+
+func (w *signingWriter) Close() error {
+	if w.curChunk.Len() > 0 {
+		if err := w.commitChunk(true); err != nil {
+			defer w.close()
+			return err
+		}
+	}
+	if err := w.commitSignature(); err != nil {
+		defer w.close()
+		return err
+	}
+	return w.close()
+}
+
+// Options specifies parameters to tune a SigningWriteCloser.
+type Options struct {
+	// ChunkSizeBytes controls the maximum amount of memory devoted to buffering
+	// data provided to Write calls. See NewSigningWriteCloser.
+	ChunkSizeBytes int
+}
+
+// NewSigningWriteCloser returns an io.WriteCloser that writes data along
+// with an appropriate signature that establishes the integrity and
+// authenticity of the data. It behaves as follows:
+// * A Write call writes chunks (of size provided by the Options or 1MB by default)
+//   of data to the provided data WriteCloser and a hash of the chunks to the provided
+//   signature WriteCloser.
+// * A Close call writes a signature (computed using the provided signer) of
+//   all the hashes written, and then closes the data and signature WriteClosers.
+func NewSigningWriteCloser(data, signature io.WriteCloser, s security.Signer, opts *Options) (io.WriteCloser, error) {
+	if (data == nil) || (signature == nil) {
+		return nil, errors.New("data or signature WriteCloser is nil")
+	}
+	w := &signingWriter{data: data, signature: signature, signer: s, signatureHash: sha256.New(), chunkSizeBytes: defaultChunkSizeBytes, sigEnc: vom.NewEncoder(signature)}
+
+	if opts != nil {
+		w.chunkSizeBytes = opts.ChunkSizeBytes
+	}
+
+	if err := w.commitHeader(); err != nil {
+		return nil, err
+	}
+	return w, nil
+}
+
+func (w *signingWriter) commitHeader() error {
+	if err := binary.Write(w.signatureHash, binary.LittleEndian, int64(w.chunkSizeBytes)); err != nil {
+		return err
+	}
+	if err := w.sigEnc.Encode(header{w.chunkSizeBytes}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *signingWriter) commitChunk(force bool) error {
+	if !force && w.curChunk.Len() < w.chunkSizeBytes {
+		return nil
+	}
+
+	hashBytes := sha256.Sum256(w.curChunk.Bytes())
+	if _, err := io.CopyN(w.data, &w.curChunk, int64(w.curChunk.Len())); err != nil {
+		return err
+	}
+	if _, err := w.signatureHash.Write(hashBytes[:]); err != nil {
+		return err
+	}
+	return w.sigEnc.Encode(hashBytes)
+}
+
+func (w *signingWriter) commitSignature() error {
+	sig, err := w.signer.Sign(w.signatureHash.Sum(nil))
+	if err != nil {
+		return fmt.Errorf("signing failed: %s", err)
+	}
+
+	return w.sigEnc.Encode(sig)
+}
+
+func (w *signingWriter) close() error {
+	var closeErr error
+	if err := w.data.Close(); err != nil && closeErr == nil {
+		closeErr = err
+	}
+	if err := w.signature.Close(); err != nil && closeErr == nil {
+		closeErr = err
+	}
+	return closeErr
+}
diff --git a/security/serialization/verifying_reader.go b/security/serialization/verifying_reader.go
new file mode 100644
index 0000000..89ce5cc
--- /dev/null
+++ b/security/serialization/verifying_reader.go
@@ -0,0 +1,123 @@
+package serialization
+
+import (
+	"bytes"
+	"crypto/ecdsa"
+	"crypto/sha256"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+
+	"veyron2/security"
+	"veyron2/vom"
+)
+
+// verifyingReader implements io.Reader.
+type verifyingReader struct {
+	data io.Reader
+
+	chunkSizeBytes int
+	curChunk       bytes.Buffer
+	hashes         bytes.Buffer
+}
+
+func (r *verifyingReader) Read(p []byte) (int, error) {
+	bytesRead := 0
+	for len(p) > 0 {
+		if err := r.readChunk(); err != nil {
+			return bytesRead, err
+		}
+
+		n, err := r.curChunk.Read(p)
+		bytesRead = bytesRead + n
+		if err != nil {
+			return bytesRead, err
+		}
+
+		p = p[n:]
+	}
+	return bytesRead, nil
+}
+
+// NewVerifyingReader returns an io.Reader that ensures that all data returned
+// by Read calls was written using a NewSigningWriter (by a principal possessing
+// a signer corresponding to the provided public key), and has not been modified
+// since (ensuring integrity and authenticity of data).
+func NewVerifyingReader(data, signature io.Reader, key *ecdsa.PublicKey) (io.Reader, error) {
+	if (data == nil) && (signature == nil) {
+		return nil, nil
+	}
+	if (data == nil) || (signature == nil) {
+		return nil, errors.New("data or signature Reader is nil")
+	}
+	r := &verifyingReader{data: data}
+	if err := r.verifySignature(signature, key); err != nil {
+		return nil, err
+	}
+	return r, nil
+}
+
+func (r *verifyingReader) readChunk() error {
+	if r.curChunk.Len() > 0 {
+		return nil
+	}
+	hash := make([]byte, sha256.Size)
+	if _, err := r.hashes.Read(hash); err == io.EOF {
+		return nil
+	} else if err != nil {
+		return err
+	}
+
+	if _, err := io.CopyN(&r.curChunk, r.data, int64(r.chunkSizeBytes)); err != nil && err != io.EOF {
+		return err
+	}
+
+	if wantHash := sha256.Sum256(r.curChunk.Bytes()); !bytes.Equal(hash, wantHash[:]) {
+		return errors.New("data has been modified since being written")
+	}
+	return nil
+}
+
+func (r *verifyingReader) verifySignature(signature io.Reader, key *ecdsa.PublicKey) error {
+	dec := vom.NewDecoder(signature)
+	signatureHash := sha256.New()
+
+	var h header
+	if err := dec.Decode(&h); err != nil {
+		return fmt.Errorf("failed to decode header: %v", err)
+	}
+	r.chunkSizeBytes = h.ChunkSizeBytes
+	if err := binary.Write(signatureHash, binary.LittleEndian, int64(r.chunkSizeBytes)); err != nil {
+		return err
+	}
+
+	var signatureFound bool
+	for !signatureFound {
+		var i interface{}
+		if err := dec.Decode(&i); err == io.EOF {
+			break
+		} else if err != nil {
+			return err
+		}
+
+		switch v := i.(type) {
+		case [sha256.Size]byte:
+			if _, err := io.MultiWriter(&r.hashes, signatureHash).Write(v[:]); err != nil {
+				return err
+			}
+		case security.Signature:
+			signatureFound = true
+			if !v.Verify(key, signatureHash.Sum(nil)) {
+				return errors.New("signature verification failed")
+			}
+		default:
+			return fmt.Errorf("invalid data of type: %T read from signature Reader", i)
+		}
+	}
+	// Verify that no more data can be read from the signature Reader.
+	if _, err := signature.Read(make([]byte, 1)); err != io.EOF {
+		return fmt.Errorf("unexpected data found after signature")
+	}
+	return nil
+}
diff --git a/services/mgmt/build/buildd/main.go b/services/mgmt/build/buildd/main.go
index 1904b2b..d12d603 100644
--- a/services/mgmt/build/buildd/main.go
+++ b/services/mgmt/build/buildd/main.go
@@ -34,7 +34,7 @@
 		vlog.Errorf("Listen(%v, %v) failed: %v", protocol, address, err)
 		return
 	}
-	if err := server.Serve(name, ipc.SoloDispatcher(build.NewServerBuild(impl.NewInvoker(gobin)), vflag.NewAuthorizerOrDie())); err != nil {
+	if err := server.Serve(name, ipc.SoloDispatcher(build.NewServerBuilder(impl.NewInvoker(gobin)), vflag.NewAuthorizerOrDie())); err != nil {
 		vlog.Errorf("Serve(%v) failed: %v", name, err)
 		return
 	}
diff --git a/services/mgmt/build/impl/impl_test.go b/services/mgmt/build/impl/impl_test.go
index 4d0bcd6..2f767da 100644
--- a/services/mgmt/build/impl/impl_test.go
+++ b/services/mgmt/build/impl/impl_test.go
@@ -18,7 +18,7 @@
 }
 
 // startServer starts the build server.
-func startServer(t *testing.T) (build.Build, func()) {
+func startServer(t *testing.T) (build.Builder, func()) {
 	root := os.Getenv("VEYRON_ROOT")
 	if root == "" {
 		t.Fatalf("VEYRON_ROOT is not set")
@@ -34,13 +34,13 @@
 		t.Fatalf("Listen(%v, %v) failed: %v", protocol, hostname, err)
 	}
 	unpublished := ""
-	if err := server.Serve(unpublished, ipc.SoloDispatcher(build.NewServerBuild(NewInvoker(gobin)), nil)); err != nil {
+	if err := server.Serve(unpublished, ipc.SoloDispatcher(build.NewServerBuilder(NewInvoker(gobin)), nil)); err != nil {
 		t.Fatalf("Serve(%q) failed: %v", unpublished, err)
 	}
 	name := "/" + endpoint.String()
-	client, err := build.BindBuild(name)
+	client, err := build.BindBuilder(name)
 	if err != nil {
-		t.Fatalf("BindBuild(%v) failed: %v", name, err)
+		t.Fatalf("BindBuilder(%v) failed: %v", name, err)
 	}
 	return client, func() {
 		if err := server.Stop(); err != nil {
@@ -49,7 +49,7 @@
 	}
 }
 
-func invokeBuild(t *testing.T, client build.Build, files []build.File) ([]byte, []build.File, error) {
+func invokeBuild(t *testing.T, client build.Builder, files []build.File) ([]byte, []build.File, error) {
 	arch, opsys := getArch(), getOS()
 	stream, err := client.Build(rt.R().NewContext(), arch, opsys)
 	if err != nil {
diff --git a/services/mgmt/build/impl/invoker.go b/services/mgmt/build/impl/invoker.go
index 407f8a6..ee62f28 100644
--- a/services/mgmt/build/impl/invoker.go
+++ b/services/mgmt/build/impl/invoker.go
@@ -41,7 +41,7 @@
 //
 // TODO(jsimsa): Analyze the binary files for shared library
 // dependencies and ship these back.
-func (i *invoker) Build(_ ipc.ServerContext, arch build.Architecture, opsys build.OperatingSystem, stream build.BuildServiceBuildStream) ([]byte, error) {
+func (i *invoker) Build(_ ipc.ServerContext, arch build.Architecture, opsys build.OperatingSystem, stream build.BuilderServiceBuildStream) ([]byte, error) {
 	vlog.VI(1).Infof("Build(%v, %v) called.", arch, opsys)
 	dir, prefix := "", ""
 	dirPerm, filePerm := os.FileMode(0700), os.FileMode(0600)
diff --git a/services/mgmt/lib/exec/doc.go b/services/mgmt/lib/exec/doc.go
index f0b8168..953965a 100644
--- a/services/mgmt/lib/exec/doc.go
+++ b/services/mgmt/lib/exec/doc.go
@@ -7,9 +7,7 @@
 // for the child to terminate, and to terminate the child cleaning up any state
 // associated with it.
 //
-// A child process uses the NewChildHandle function to complete the initial
-// authentication handshake and must then call the Run() function to run a
-// goroutine to handle the process rendezvous. The child must call SetReady to
-// indicate that it is fully initialized and ready for whatever purpose it is
-// intended to fulfill.
+// A child process uses the GetChildHandle function to complete the initial
+// authentication handshake. The child must call SetReady to indicate that it is
+// fully initialized and ready for whatever purpose it is intended to fulfill.
 package exec
diff --git a/services/mgmt/node/impl/dispatcher.go b/services/mgmt/node/impl/dispatcher.go
index 33abf91..c735952 100644
--- a/services/mgmt/node/impl/dispatcher.go
+++ b/services/mgmt/node/impl/dispatcher.go
@@ -2,7 +2,6 @@
 
 import (
 	"fmt"
-	"sync"
 
 	"veyron/services/mgmt/node"
 	"veyron/services/mgmt/node/config"
@@ -27,10 +26,8 @@
 	return &dispatcher{
 		auth: auth,
 		internal: &internalState{
-			channels:      make(map[string]chan string),
-			channelsMutex: new(sync.Mutex),
-			updating:      false,
-			updatingMutex: new(sync.Mutex),
+			channels: make(map[string]map[string]chan string),
+			updating: false,
 		},
 		config: config,
 	}, nil
@@ -39,6 +36,10 @@
 // DISPATCHER INTERFACE IMPLEMENTATION
 
 func (d *dispatcher) Lookup(suffix string) (ipc.Invoker, security.Authorizer, error) {
+	// TODO(caprita): Split out the logic that operates on the node manager
+	// from the logic that operates on the applications that the node
+	// manager runs.  We can have different invoker implementations,
+	// dispatching based on the suffix ("nm" vs. "apps").
 	return ipc.ReflectInvoker(node.NewServerNode(&invoker{
 		internal: d.internal,
 		config:   d.config,
diff --git a/services/mgmt/node/impl/impl_test.go b/services/mgmt/node/impl/impl_test.go
index 79d6c2b..16988ec 100644
--- a/services/mgmt/node/impl/impl_test.go
+++ b/services/mgmt/node/impl/impl_test.go
@@ -1,6 +1,8 @@
 package impl_test
 
 import (
+	"crypto/md5"
+	"encoding/base64"
 	"fmt"
 	"io/ioutil"
 	"os"
@@ -16,9 +18,11 @@
 	"veyron/services/mgmt/node/config"
 	"veyron/services/mgmt/node/impl"
 
+	"veyron2/ipc"
 	"veyron2/naming"
 	"veyron2/rt"
 	"veyron2/services/mgmt/application"
+	"veyron2/services/mgmt/node"
 	"veyron2/verror"
 	"veyron2/vlog"
 )
@@ -33,8 +37,9 @@
 	// create it here.
 	rt.Init()
 
-	blackbox.CommandTable["nodeManager"] = nodeManager
 	blackbox.CommandTable["execScript"] = execScript
+	blackbox.CommandTable["nodeManager"] = nodeManager
+	blackbox.CommandTable["app"] = app
 }
 
 // execScript launches the script passed as argument.
@@ -123,6 +128,38 @@
 	}
 }
 
+// appService defines a test service that the test app should be running.
+// TODO(caprita): Use this to make calls to the app and verify how Suspend/Stop
+// interact with an active service.
+type appService struct{}
+
+func (appService) Echo(_ ipc.ServerCall, message string) (string, error) {
+	return message, nil
+}
+
+func app(args []string) {
+	if expected, got := 1, len(args); expected != got {
+		vlog.Fatalf("Unexpected number of arguments: expected %d, got %d", expected, got)
+	}
+	publishName := args[0]
+
+	defer rt.R().Cleanup()
+	server, _ := newServer()
+	defer server.Stop()
+	if err := server.Serve(publishName, ipc.SoloDispatcher(new(appService), nil)); err != nil {
+		vlog.Fatalf("Serve(%v) failed: %v", publishName, err)
+	}
+	if call, err := rt.R().Client().StartCall(rt.R().NewContext(), "pingserver", "Ping", nil); err != nil {
+		vlog.Fatalf("StartCall failed: %v", err)
+	} else if err = call.Finish(); err != nil {
+		vlog.Fatalf("Finish failed: %v", err)
+	}
+	<-signals.ShutdownOnSignals()
+	if err := ioutil.WriteFile("testfile", []byte("goodbye world"), 0600); err != nil {
+		vlog.Fatalf("Failed to write testfile: %v", err)
+	}
+}
+
 // generateScript is very similar in behavior to its namesake in invoker.go.
 // However, we chose to re-implement it here for two reasons: (1) avoid making
 // generateScript public; and (2) how the test choses to invoke the node manager
@@ -145,22 +182,28 @@
 	return path
 }
 
+// nodeEnvelopeFromCmd returns a node manager application envelope that
+// describes the given command object.
+func nodeEnvelopeFromCmd(cmd *goexec.Cmd) *application.Envelope {
+	return envelopeFromCmd(application.NodeManagerTitle, cmd)
+}
+
 // envelopeFromCmd returns an envelope that describes the given command object.
-func envelopeFromCmd(cmd *goexec.Cmd) *application.Envelope {
+func envelopeFromCmd(title string, cmd *goexec.Cmd) *application.Envelope {
 	return &application.Envelope{
-		Title:  application.NodeManagerTitle,
+		Title:  title,
 		Args:   cmd.Args[1:],
 		Env:    cmd.Env,
 		Binary: "br",
 	}
 }
 
-// TestUpdateAndRevert makes the node manager go through the motions of updating
+// TestNodeManagerUpdateAndRevert makes the node manager go through the motions of updating
 // itself to newer versions (twice), and reverting itself back (twice).  It also
 // checks that update and revert fail when they're supposed to.  The initial
 // node manager is started 'by hand' via a blackbox command.  Further versions
 // are started through the soft link that the node manager itself updates.
-func TestUpdateAndRevert(t *testing.T) {
+func TestNodeManagerUpdateAndRevert(t *testing.T) {
 	// Set up mount table, application, and binary repositories.
 	defer setupLocalNamespace(t)()
 	envelope, cleanup := startApplicationRepository()
@@ -226,7 +269,7 @@
 	resolve(t, "factoryNM") // Verify the node manager has published itself.
 
 	// Simulate an invalid envelope in the application repository.
-	*envelope = *envelopeFromCmd(nm.Cmd)
+	*envelope = *nodeEnvelopeFromCmd(nm.Cmd)
 	envelope.Title = "bogus"
 	updateExpectError(t, "factoryNM", verror.BadArg)   // Incorrect title.
 	revertExpectError(t, "factoryNM", verror.NotFound) // No previous version available.
@@ -239,7 +282,7 @@
 	// node manager to stage the next version.
 	nmV2 := blackbox.HelperCommand(t, "nodeManager", "v2NM")
 	defer setupChildCommand(nmV2)()
-	*envelope = *envelopeFromCmd(nmV2.Cmd)
+	*envelope = *nodeEnvelopeFromCmd(nmV2.Cmd)
 	update(t, "factoryNM")
 
 	// Current link should have been updated to point to v2.
@@ -288,7 +331,7 @@
 	// Create a third version of the node manager and issue an update.
 	nmV3 := blackbox.HelperCommand(t, "nodeManager", "v3NM")
 	defer setupChildCommand(nmV3)()
-	*envelope = *envelopeFromCmd(nmV3.Cmd)
+	*envelope = *nodeEnvelopeFromCmd(nmV3.Cmd)
 	update(t, "v2NM")
 
 	scriptPathV3 := evalLink()
@@ -360,3 +403,104 @@
 	runNM.Expect("ready")
 	resolve(t, "factoryNM") // Current link should have been launching factory version.
 }
+
+type pingServerDisp chan struct{}
+
+func (p pingServerDisp) Ping(ipc.ServerCall) { close(p) }
+
+// TestAppStartStop installs an app, starts it, and then stops it.
+func TestAppStartStop(t *testing.T) {
+	// Set up mount table, application, and binary repositories.
+	defer setupLocalNamespace(t)()
+	envelope, cleanup := startApplicationRepository()
+	defer cleanup()
+	defer startBinaryRepository()()
+
+	// This is the local filesystem location that the node manager is told
+	// to use.
+	root := filepath.Join(os.TempDir(), "nodemanager")
+	defer os.RemoveAll(root)
+
+	// Set up the node manager.  Since we won't do node manager updates,
+	// don't worry about its application envelope and current link.
+	nm := blackbox.HelperCommand(t, "nodeManager", "nm", root, "unused app repo name", "unused curr link")
+	defer setupChildCommand(nm)()
+	if err := nm.Cmd.Start(); err != nil {
+		t.Fatalf("Start() failed: %v", err)
+	}
+	defer nm.Cleanup()
+	nm.Expect("ready")
+
+	// Create the local server that the app uses to let us know it's ready.
+	server, _ := newServer()
+	defer server.Stop()
+	pingCh := make(chan struct{})
+	if err := server.Serve("pingserver", ipc.SoloDispatcher(pingServerDisp(pingCh), nil)); err != nil {
+		t.Errorf("Failed to set up ping server")
+	}
+
+	// Create an envelope for an app.
+	app := blackbox.HelperCommand(t, "app", "app1")
+	defer setupChildCommand(app)()
+	appTitle := "google naps"
+	*envelope = *envelopeFromCmd(appTitle, app.Cmd)
+
+	appsName := "nm//apps"
+	stub, err := node.BindApplication(appsName)
+	if err != nil {
+		t.Fatalf("BindApplication(%v) failed: %v", appsName, err)
+	}
+	appID, err := stub.Install(rt.R().NewContext(), "ar")
+	if err != nil {
+		t.Fatalf("Install failed: %v", err)
+	}
+	appName := naming.Join(appsName, appID)
+	stub, err = node.BindApplication(appName)
+	if err != nil {
+		t.Fatalf("BindApplication(%v) failed: %v", appName, err)
+	}
+	var instanceID string
+	if instanceIDs, err := stub.Start(rt.R().NewContext()); err != nil {
+		t.Fatalf("Start failed: %v", err)
+	} else {
+		if want, got := 1, len(instanceIDs); want != got {
+			t.Fatalf("Expected %v instance ids, got %v instead", want, got)
+		}
+		instanceID = instanceIDs[0]
+	}
+	// Wait until the app pings us that it's ready.
+	<-pingCh
+
+	instanceName := naming.Join(appName, instanceID)
+	stub, err = node.BindApplication(instanceName)
+	if err != nil {
+		t.Fatalf("BindApplication(%v) failed: %v", instanceName, err)
+	}
+	if err := stub.Stop(rt.R().NewContext(), 5); err != nil {
+		t.Errorf("Stop failed: %v", err)
+	}
+
+	// HACK ALERT: for now, we peek inside the node manager's directory
+	// structure (which ought to be opaque) to check for what the app has
+	// written to its local root.
+	//
+	// TODO(caprita): add support to node manager to browse logs/app local
+	// root.
+	applicationDirName := func(title string) string {
+		h := md5.New()
+		h.Write([]byte(title))
+		hash := strings.TrimRight(base64.URLEncoding.EncodeToString(h.Sum(nil)), "=")
+		return "app-" + hash
+	}
+	components := strings.Split(appID, "/")
+	appTitle, installationID := components[0], components[1]
+	instanceDir := filepath.Join(root, applicationDirName(appTitle), "installation-"+installationID, "instances", "stopped-instance-"+instanceID)
+	rootDir := filepath.Join(instanceDir, "root")
+	testFile := filepath.Join(rootDir, "testfile")
+	if read, err := ioutil.ReadFile(testFile); err != nil {
+		t.Errorf("Failed to read %v: %v", testFile, err)
+	} else if want, got := "goodbye world", string(read); want != got {
+		t.Errorf("Expected to read %v, got %v instead", want, got)
+	}
+	// END HACK
+}
diff --git a/services/mgmt/node/impl/invoker.go b/services/mgmt/node/impl/invoker.go
index b7b4611..4e95dff 100644
--- a/services/mgmt/node/impl/invoker.go
+++ b/services/mgmt/node/impl/invoker.go
@@ -3,14 +3,13 @@
 // The implementation of the node manager expects that the node manager
 // installations are all organized in the following directory structure:
 //
-// VEYRON_NM_ROOT/
-//   workspace-1/
-//     noded - the node manager binary
-//     noded.sh - a shell script to start the binary
-//  ...
-//   workspace-n/
-//     noded - the node manager binary
-//     noded.sh - a shell script to start the binary
+// <config.Root>/
+//   node-manager/
+//     <version 1 timestamp>/  - timestamp of when the version was downloaded
+//       noded                 - the node manager binary
+//       noded.sh              - a shell script to start the binary
+//     <version 2 timestamp>
+//     ...
 //
 // The node manager is always expected to be started through the symbolic link
 // passed in as config.CurrentLink, which is monitored by an init daemon. This
@@ -20,25 +19,88 @@
 // the symlink is updated to the new noded.sh script. Similarly, to revert the
 // node manager to a previous version, all that is required is to update the
 // symlink to point to the previous noded.sh script.
+//
+//
+// The node manager manages the applications it installs and runs using the
+// following directory structure:
+//
+// TODO(caprita): Not all is yet implemented.
+//
+// <config.Root>/
+//   app-<hash 1>/                  - the application dir is named using a hash of the application title
+//     installation-<id 1>/         - installations are labelled with ids
+//       <version 1 timestamp>/     - timestamp of when the version was downloaded
+//         bin                      - application binary
+//         previous                 - symbolic link to previous version directory (TODO)
+//         origin                   - object name for application envelope
+//         envelope                 - application envelope (JSON-encoded)
+//       <version 2 timestamp>
+//       ...
+//       current                    - symbolic link to the current version
+//       instances/
+//         instance-<id a>/         - instances are labelled with ids
+//           root/                  - workspace that the instance is run from
+//           logs/                  - stderr/stdout and log files generated by instance
+//           info                   - app manager name and process id for the instance (if running)
+//           version                - symbolic link to installation version for the instance
+//         instance-<id b>
+//         ...
+//         stopped-instance-<id c>  - stopped instances have their directory name prepended by 'stopped-'
+//         ...
+//     installation-<id 2>
+//     ...
+//   app-<hash 2>
+//   ...
+//
+// When node manager starts up, it goes through all instances and resumes the
+// ones that are not suspended.  If the application was still running, it
+// suspends it first.  If an application fails to resume, it stays suspended.
+//
+// When node manager shuts down, it suspends all running instances.
+//
+// Start starts an instance.  Suspend kills the process but leaves the workspace
+// untouched. Resume restarts the process. Stop kills the process and prevents
+// future resumes (it also eventually gc's the workspace).
+//
+// If the process dies on its own, it stays dead and is assumed suspended.
+// TODO(caprita): Later, we'll add auto-restart option.
+//
+// Concurrency model: installations can be created independently of one another;
+// installations can be removed at any time (any running instances will be
+// stopped). The first call to Uninstall will rename the installation dir as a
+// first step; subsequent Uninstalls will fail. Instances can be created
+// independently of one another, as long as the installation exists (if it gets
+// Uninstalled during an instance Start, the Start may fail). When an instance
+// is stopped, the first call to Stop renames the instance dir; subsequent Stop
+// calls will fail. Resume will attempt to create an info file; if one exists
+// already, Resume fails. Suspend will attempt to rename the info file; if none
+// present, Suspend will fail.
+//
+// TODO(caprita): There is room for synergy between how node manager organizes
+// its own workspace and that for the applications it runs.  In particular,
+// previous, origin, and envelope could be part of a single config.  We'll
+// refine that later.
 
 import (
-	"bytes"
-	"errors"
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/binary"
+	"encoding/json"
 	"fmt"
+	"hash/crc64"
 	"io/ioutil"
-	"math/rand"
 	"os"
 	"os/exec"
 	"path/filepath"
 	"reflect"
 	"regexp"
-	"runtime"
+	"strconv"
 	"strings"
 	"sync"
 	"time"
 
 	"veyron/lib/config"
-	blib "veyron/services/mgmt/lib/binary"
+	binlib "veyron/services/mgmt/lib/binary"
 	vexec "veyron/services/mgmt/lib/exec"
 	iconfig "veyron/services/mgmt/node/config"
 	"veyron/services/mgmt/profile"
@@ -47,30 +109,40 @@
 	"veyron2/mgmt"
 	"veyron2/naming"
 	"veyron2/rt"
+	"veyron2/services/mgmt/appcycle"
 	"veyron2/services/mgmt/application"
-	"veyron2/services/mgmt/binary"
-	"veyron2/services/mgmt/build"
+	binapi "veyron2/services/mgmt/binary"
 	"veyron2/services/mgmt/node"
 	"veyron2/services/mgmt/repository"
 	"veyron2/verror"
 	"veyron2/vlog"
 )
 
+// instanceInfo holds state about a running instance.
+type instanceInfo struct {
+	AppCycleMgrName string
+	Pid             int
+}
+
 // internalState wraps state shared between different node manager
 // invocations.
 type internalState struct {
-	// channels maps callback identifiers to channels that are used to
-	// communicate information from child processes.
-	channels map[string]chan string
+	// channels maps callback identifiers and config keys to channels that
+	// are used to communicate corresponding config values from child
+	// processes.
+	channels map[string]map[string]chan string
+	// nextCallbackID provides the next callback identifier to use as key
+	// for the channels map.
+	nextCallbackID int64
 	// channelsMutex is a lock for coordinating concurrent access to
 	// <channels>.
-	channelsMutex *sync.Mutex
+	channelsMutex sync.Mutex
 	// updating is a flag that records whether this instance of node
 	// manager is being updated.
 	updating bool
 	// updatingMutex is a lock for coordinating concurrent access to
 	// <updating>.
-	updatingMutex *sync.Mutex
+	updatingMutex sync.Mutex
 }
 
 // invoker holds the state of a node manager invocation.
@@ -94,217 +166,42 @@
 	errUpdateInProgress   = verror.Existsf("update in progress")
 	errIncompatibleUpdate = verror.BadArgf("update failed: mismatching app title")
 	errUpdateNoOp         = verror.NotFoundf("no different version available")
+	errNotExist           = verror.NotFoundf("object does not exist")
+	errInvalidOperation   = verror.BadArgf("invalid operation")
 )
 
 // NODE INTERFACE IMPLEMENTATION
 
-// computeNodeProfile generates a description of the runtime
-// environment (supported file format, OS, architecture, libraries) of
-// the host node.
-//
-// TODO(jsimsa): Avoid computing the host node description from
-// scratch if a recent cached copy exists.
-func (i *invoker) computeNodeProfile() (*profile.Specification, error) {
-	result := profile.Specification{}
-
-	// Find out what the supported file format, operating system, and
-	// architecture is.
-	switch runtime.GOOS {
-	case "darwin":
-		result.Format = build.MACH
-		result.OS = build.Darwin
-	case "linux":
-		result.Format = build.ELF
-		result.OS = build.Linux
-	case "windows":
-		result.Format = build.PE
-		result.OS = build.Windows
-	default:
-		return nil, errors.New("Unsupported operating system: " + runtime.GOOS)
-	}
-	switch runtime.GOARCH {
-	case "amd64":
-		result.Arch = build.AMD64
-	case "arm":
-		result.Arch = build.ARM
-	case "x86":
-		result.Arch = build.X86
-	default:
-		return nil, errors.New("Unsupported hardware architecture: " + runtime.GOARCH)
-	}
-
-	// Find out what the installed dynamically linked libraries are.
-	switch runtime.GOOS {
-	case "linux":
-		// For Linux, we identify what dynamically linked libraries are
-		// install by parsing the output of "ldconfig -p".
-		command := exec.Command("ldconfig", "-p")
-		output, err := command.CombinedOutput()
-		if err != nil {
-			return nil, err
-		}
-		buf := bytes.NewBuffer(output)
-		// Throw away the first line of output from ldconfig.
-		if _, err := buf.ReadString('\n'); err != nil {
-			return nil, errors.New("Could not identify libraries.")
-		}
-		// Extract the library name and version from every subsequent line.
-		result.Libraries = make(map[profile.Library]struct{})
-		line, err := buf.ReadString('\n')
-		for err == nil {
-			words := strings.Split(strings.Trim(line, " \t\n"), " ")
-			if len(words) > 0 {
-				tokens := strings.Split(words[0], ".so")
-				if len(tokens) != 2 {
-					return nil, errors.New("Could not identify library: " + words[0])
-				}
-				name := strings.TrimPrefix(tokens[0], "lib")
-				major, minor := "", ""
-				tokens = strings.SplitN(tokens[1], ".", 3)
-				if len(tokens) >= 2 {
-					major = tokens[1]
-				}
-				if len(tokens) >= 3 {
-					minor = tokens[2]
-				}
-				result.Libraries[profile.Library{Name: name, MajorVersion: major, MinorVersion: minor}] = struct{}{}
-			}
-			line, err = buf.ReadString('\n')
-		}
-	case "darwin":
-		// TODO(jsimsa): Implement.
-	case "windows":
-		// TODO(jsimsa): Implement.
-	default:
-		return nil, errors.New("Unsupported operating system: " + runtime.GOOS)
-	}
-	return &result, nil
-}
-
-// getProfile gets a profile description for the given profile.
-//
-// TODO(jsimsa): Avoid retrieving the list of known profiles from a
-// remote server if a recent cached copy exists.
-func (i *invoker) getProfile(name string) (*profile.Specification, error) {
-	// TODO(jsimsa): This function assumes the existence of a profile
-	// server from which the profiles can be retrieved. The profile
-	// server is a work in progress. When it exists, the commented out
-	// code below should work.
-	var profile profile.Specification
-	/*
-			client, err := r.NewClient()
-			if err != nil {
-				vlog.Errorf("NewClient() failed: %v", err)
-				return nil, err
-			}
-			defer client.Close()
-		  server := // TODO
-			method := "Specification"
-			inputs := make([]interface{}, 0)
-			call, err := client.StartCall(server + "/" + name, method, inputs)
-			if err != nil {
-				vlog.Errorf("StartCall(%s, %q, %v) failed: %v\n", server + "/" + name, method, inputs, err)
-				return nil, err
-			}
-			if err := call.Finish(&profiles); err != nil {
-				vlog.Errorf("Finish(%v) failed: %v\n", &profiles, err)
-				return nil, err
-			}
-	*/
-	return &profile, nil
-}
-
-// getKnownProfiles gets a list of description for all publicly known
-// profiles.
-//
-// TODO(jsimsa): Avoid retrieving the list of known profiles from a
-// remote server if a recent cached copy exists.
-func (i *invoker) getKnownProfiles() ([]profile.Specification, error) {
-	// TODO(jsimsa): This function assumes the existence of a profile
-	// server from which a list of known profiles can be retrieved. The
-	// profile server is a work in progress. When it exists, the
-	// commented out code below should work.
-	knownProfiles := make([]profile.Specification, 0)
-	/*
-			client, err := r.NewClient()
-			if err != nil {
-				vlog.Errorf("NewClient() failed: %v\n", err)
-				return nil, err
-			}
-			defer client.Close()
-		  server := // TODO
-			method := "List"
-			inputs := make([]interface{}, 0)
-			call, err := client.StartCall(server, method, inputs)
-			if err != nil {
-				vlog.Errorf("StartCall(%s, %q, %v) failed: %v\n", server, method, inputs, err)
-				return nil, err
-			}
-			if err := call.Finish(&knownProfiles); err != nil {
-				vlog.Errorf("Finish(&knownProfile) failed: %v\n", err)
-				return nil, err
-			}
-	*/
-	return knownProfiles, nil
-}
-
-// matchProfiles inputs a profile that describes the host node and a
-// set of publicly known profiles and outputs a node description that
-// identifies the publicly known profiles supported by the host node.
-func (i *invoker) matchProfiles(p *profile.Specification, known []profile.Specification) node.Description {
-	result := node.Description{Profiles: make(map[string]struct{})}
-loop:
-	for _, profile := range known {
-		if profile.Format != p.Format {
-			continue
-		}
-		if profile.OS != p.OS {
-			continue
-		}
-		if profile.Arch != p.Arch {
-			continue
-		}
-		for library := range profile.Libraries {
-			// Current implementation requires exact library name and version match.
-			if _, found := p.Libraries[library]; !found {
-				continue loop
-			}
-		}
-		result.Profiles[profile.Label] = struct{}{}
-	}
-	return result
-}
-
 func (i *invoker) Describe(call ipc.ServerContext) (node.Description, error) {
 	vlog.VI(1).Infof("%v.Describe()", i.suffix)
 	empty := node.Description{}
-	nodeProfile, err := i.computeNodeProfile()
+	nodeProfile, err := computeNodeProfile()
 	if err != nil {
 		return empty, err
 	}
-	knownProfiles, err := i.getKnownProfiles()
+	knownProfiles, err := getKnownProfiles()
 	if err != nil {
 		return empty, err
 	}
-	result := i.matchProfiles(nodeProfile, knownProfiles)
+	result := matchProfiles(nodeProfile, knownProfiles)
 	return result, nil
 }
 
-func (i *invoker) IsRunnable(call ipc.ServerContext, description binary.Description) (bool, error) {
+func (i *invoker) IsRunnable(call ipc.ServerContext, description binapi.Description) (bool, error) {
 	vlog.VI(1).Infof("%v.IsRunnable(%v)", i.suffix, description)
-	nodeProfile, err := i.computeNodeProfile()
+	nodeProfile, err := computeNodeProfile()
 	if err != nil {
 		return false, err
 	}
 	binaryProfiles := make([]profile.Specification, 0)
 	for name, _ := range description.Profiles {
-		profile, err := i.getProfile(name)
+		profile, err := getProfile(name)
 		if err != nil {
 			return false, err
 		}
 		binaryProfiles = append(binaryProfiles, *profile)
 	}
-	result := i.matchProfiles(nodeProfile, binaryProfiles)
+	result := matchProfiles(nodeProfile, binaryProfiles)
 	return len(result.Profiles) > 0, nil
 }
 
@@ -316,13 +213,13 @@
 
 // APPLICATION INTERFACE IMPLEMENTATION
 
-func downloadBinary(workspace, name string) error {
-	data, err := blib.Download(name)
+func downloadBinary(workspace, fileName, name string) error {
+	data, err := binlib.Download(name)
 	if err != nil {
 		vlog.Errorf("Download(%v) failed: %v", name, err)
 		return errOperationFailed
 	}
-	path, perm := filepath.Join(workspace, "noded"), os.FileMode(755)
+	path, perm := filepath.Join(workspace, fileName), os.FileMode(755)
 	if err := ioutil.WriteFile(path, data, perm); err != nil {
 		vlog.Errorf("WriteFile(%v, %v) failed: %v", path, perm, err)
 		return errOperationFailed
@@ -347,13 +244,13 @@
 	return &envelope, nil
 }
 
-func generateBinary(workspace string, envelope *application.Envelope, newBinary bool) error {
+func generateBinary(workspace, fileName string, envelope *application.Envelope, newBinary bool) error {
 	if newBinary {
 		// Download the new binary.
-		return downloadBinary(workspace, envelope.Binary)
+		return downloadBinary(workspace, fileName, envelope.Binary)
 	}
 	// Link the current binary.
-	path := filepath.Join(workspace, "noded")
+	path := filepath.Join(workspace, fileName)
 	if err := os.Link(os.Args[0], path); err != nil {
 		vlog.Errorf("Link(%v, %v) failed: %v", os.Args[0], path, err)
 		return errOperationFailed
@@ -374,7 +271,7 @@
 	output += filepath.Join(workspace, "noded") + " "
 	output += strings.Join(envelope.Args, " ")
 	path = filepath.Join(workspace, "noded.sh")
-	if err := ioutil.WriteFile(path, []byte(output), 0755); err != nil {
+	if err := ioutil.WriteFile(path, []byte(output), 0700); err != nil {
 		vlog.Errorf("WriteFile(%v) failed: %v", path, err)
 		return errOperationFailed
 	}
@@ -419,10 +316,20 @@
 	return nil
 }
 
-func (i *invoker) registerCallback(id string, channel chan string) {
+func (i *invoker) generateCallbackID() string {
 	i.internal.channelsMutex.Lock()
 	defer i.internal.channelsMutex.Unlock()
-	i.internal.channels[id] = channel
+	i.internal.nextCallbackID++
+	return strconv.FormatInt(i.internal.nextCallbackID-1, 10)
+}
+
+func (i *invoker) registerCallback(id, key string, channel chan string) {
+	i.internal.channelsMutex.Lock()
+	defer i.internal.channelsMutex.Unlock()
+	if _, ok := i.internal.channels[id]; !ok {
+		i.internal.channels[id] = make(map[string]chan string)
+	}
+	i.internal.channels[id][key] = channel
 }
 
 func (i *invoker) revertNodeManager() error {
@@ -439,25 +346,30 @@
 	cmd.Stdout = os.Stdout
 	cmd.Stderr = os.Stderr
 	// Setup up the child process callback.
-	id := fmt.Sprintf("%d", rand.Int())
+	id := i.generateCallbackID()
 	cfg := config.New()
 	cfg.Set(mgmt.ParentNodeManagerConfigKey, naming.MakeTerminal(naming.Join(i.config.Name, id)))
 	handle := vexec.NewParentHandle(cmd, vexec.ConfigOpt{cfg})
-	callbackChan := make(chan string)
-	i.registerCallback(id, callbackChan)
-	defer i.unregisterCallback(id)
+	// Make the channel buffered to avoid blocking the Set method when
+	// nothing is receiving on the channel.  This happens e.g. when
+	// unregisterCallbacks executes before Set is called.
+	callbackChan := make(chan string, 1)
+	i.registerCallback(id, mgmt.ChildNodeManagerConfigKey, callbackChan)
+	defer i.unregisterCallbacks(id)
 	// Start the child process.
 	if err := handle.Start(); err != nil {
 		vlog.Errorf("Start() failed: %v", err)
 		return errOperationFailed
 	}
+	defer func() {
+		if err := handle.Clean(); err != nil {
+			vlog.Errorf("Clean() failed: %v", err)
+		}
+	}()
 	// Wait for the child process to start.
 	testTimeout := 10 * time.Second
 	if err := handle.WaitForReady(testTimeout); err != nil {
 		vlog.Errorf("WaitForReady(%v) failed: %v", testTimeout, err)
-		if err := cmd.Process.Kill(); err != nil {
-			vlog.Errorf("Kill() failed: %v", err)
-		}
 		return errOperationFailed
 	}
 	// Wait for the child process to invoke the Callback().
@@ -468,16 +380,10 @@
 		nmClient, err := node.BindNode(childName)
 		if err != nil {
 			vlog.Errorf("BindNode(%v) failed: %v", childName, err)
-			if err := handle.Clean(); err != nil {
-				vlog.Errorf("Clean() failed: %v", err)
-			}
 			return errOperationFailed
 		}
 		linkOld, pathOld, err := i.getCurrentFileInfo()
 		if err != nil {
-			if err := handle.Clean(); err != nil {
-				vlog.Errorf("Clean() failed: %v", err)
-			}
 			return errOperationFailed
 		}
 		// Since the resolution of mtime for files is seconds,
@@ -485,16 +391,10 @@
 		// check whether the current symlink is updated.
 		time.Sleep(time.Second)
 		if err := nmClient.Revert(rt.R().NewContext()); err != nil {
-			if err := handle.Clean(); err != nil {
-				vlog.Errorf("Clean() failed: %v", err)
-			}
 			return errOperationFailed
 		}
 		linkNew, pathNew, err := i.getCurrentFileInfo()
 		if err != nil {
-			if err := handle.Clean(); err != nil {
-				vlog.Errorf("Clean() failed: %v", err)
-			}
 			return errOperationFailed
 		}
 		// Check that the new node manager updated the current symbolic
@@ -512,15 +412,12 @@
 		}
 	case <-time.After(testTimeout):
 		vlog.Errorf("Waiting for callback timed out")
-		if err := handle.Clean(); err != nil {
-			vlog.Errorf("Clean() failed: %v", err)
-		}
 		return errOperationFailed
 	}
 	return nil
 }
 
-func (i *invoker) unregisterCallback(id string) {
+func (i *invoker) unregisterCallbacks(id string) {
 	i.internal.channelsMutex.Lock()
 	defer i.internal.channelsMutex.Unlock()
 	delete(i.internal.channels, id)
@@ -541,57 +438,175 @@
 		return errUpdateNoOp
 	}
 	// Create new workspace.
-	workspace := filepath.Join(i.config.Root, fmt.Sprintf("%v", time.Now().Format(time.RFC3339Nano)))
-	perm := os.FileMode(0755)
+	workspace := filepath.Join(i.config.Root, "node-manager", generateVersionDirName())
+	perm := os.FileMode(0700)
 	if err := os.MkdirAll(workspace, perm); err != nil {
 		vlog.Errorf("MkdirAll(%v, %v) failed: %v", workspace, perm, err)
 		return errOperationFailed
 	}
+	deferrer := func() {
+		if err := os.RemoveAll(workspace); err != nil {
+			vlog.Errorf("RemoveAll(%v) failed: %v", workspace, err)
+		}
+	}
+	defer func() {
+		if deferrer != nil {
+			deferrer()
+		}
+	}()
 	// Populate the new workspace with a node manager binary.
 	// TODO(caprita): match identical binaries on binary metadata
 	// rather than binary object name.
 	sameBinary := i.config.Envelope != nil && envelope.Binary == i.config.Envelope.Binary
-	if err := generateBinary(workspace, envelope, !sameBinary); err != nil {
-		if err := os.RemoveAll(workspace); err != nil {
-			vlog.Errorf("RemoveAll(%v) failed: %v", workspace, err)
-		}
+	if err := generateBinary(workspace, "noded", envelope, !sameBinary); err != nil {
 		return err
 	}
 	// Populate the new workspace with a node manager script.
 	configSettings, err := i.config.Save(envelope)
 	if err != nil {
-		if err := os.RemoveAll(workspace); err != nil {
-			vlog.Errorf("RemoveAll(%v) failed: %v", workspace, err)
-		}
 		return errOperationFailed
 	}
 	if err := generateScript(workspace, configSettings, envelope); err != nil {
-		if err := os.RemoveAll(workspace); err != nil {
-			vlog.Errorf("RemoveAll(%v) failed: %v", workspace, err)
-		}
 		return err
 	}
 	if err := i.testNodeManager(workspace, envelope); err != nil {
-		if err := os.RemoveAll(workspace); err != nil {
-			vlog.Errorf("RemoveAll(%v) failed: %v", workspace, err)
-		}
 		return err
 	}
 	// If the binary has changed, update the node manager symlink.
 	if err := i.updateLink(filepath.Join(workspace, "noded.sh")); err != nil {
-		if err := os.RemoveAll(workspace); err != nil {
-			vlog.Errorf("RemoveAll(%v) failed: %v", workspace, err)
-		}
 		return err
 	}
 	rt.R().Stop()
+	deferrer = nil
 	return nil
 }
 
-func (i *invoker) Install(call ipc.ServerContext, von string) (string, error) {
-	vlog.VI(1).Infof("%v.Install(%q)", i.suffix, von)
-	// TODO(jsimsa): Implement.
-	return "", nil
+func saveEnvelope(dir string, envelope *application.Envelope) error {
+	jsonEnvelope, err := json.Marshal(envelope)
+	if err != nil {
+		vlog.Errorf("Marshal(%v) failed: %v", envelope, err)
+		return errOperationFailed
+	}
+	envelopePath := filepath.Join(dir, "envelope")
+	if err := ioutil.WriteFile(envelopePath, jsonEnvelope, 0600); err != nil {
+		vlog.Errorf("WriteFile(%v) failed: %v", envelopePath, err)
+		return errOperationFailed
+	}
+	return nil
+}
+
+func loadEnvelope(dir string) (*application.Envelope, error) {
+	envelopePath := filepath.Join(dir, "envelope")
+	envelope := new(application.Envelope)
+	if envelopeBytes, err := ioutil.ReadFile(envelopePath); err != nil {
+		vlog.Errorf("ReadFile(%v) failed: %v", envelopePath, err)
+		return nil, errOperationFailed
+	} else if err := json.Unmarshal(envelopeBytes, envelope); err != nil {
+		vlog.Errorf("Unmarshal(%v) failed: %v", envelopeBytes, err)
+		return nil, errOperationFailed
+	}
+	return envelope, nil
+}
+
+func saveOrigin(dir, originVON string) error {
+	path := filepath.Join(dir, "origin")
+	if err := ioutil.WriteFile(path, []byte(originVON), 0600); err != nil {
+		vlog.Errorf("WriteFile(%v) failed: %v", path, err)
+		return errOperationFailed
+	}
+	return nil
+}
+
+// generateID returns a new unique id string.  The uniqueness is based on the
+// current timestamp.  Not cryptographically secure.
+func generateID() string {
+	timestamp := fmt.Sprintf("%v", time.Now().Format(time.RFC3339Nano))
+	h := crc64.New(crc64.MakeTable(crc64.ISO))
+	h.Write([]byte(timestamp))
+	b := make([]byte, 8)
+	binary.LittleEndian.PutUint64(b, uint64(h.Sum64()))
+	return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// TODO(caprita): Nothing prevents different applications from sharing the same
+// title, and thereby being installed in the same app dir.  Do we want to
+// prevent that for the same user or across users?
+
+// applicationDirName generates a cryptographic hash of the application title,
+// to be used as a directory name for installations of the application with the
+// given title.
+func applicationDirName(title string) string {
+	h := md5.New()
+	h.Write([]byte(title))
+	hash := strings.TrimRight(base64.URLEncoding.EncodeToString(h.Sum(nil)), "=")
+	return "app-" + hash
+}
+
+func installationDirName(installationID string) string {
+	return "installation-" + installationID
+}
+
+func instanceDirName(instanceID string) string {
+	return "instance-" + instanceID
+}
+
+func stoppedInstanceDirName(instanceID string) string {
+	return "stopped-instance-" + instanceID
+}
+
+func generateVersionDirName() string {
+	return time.Now().Format(time.RFC3339Nano)
+}
+
+func (i *invoker) Install(call ipc.ServerContext, applicationVON string) (string, error) {
+	vlog.VI(1).Infof("%v.Install(%q)", i.suffix, applicationVON)
+	if i.suffix != "apps" {
+		return "", errInvalidSuffix
+	}
+	envelope, err := fetchEnvelope(applicationVON)
+	if err != nil {
+		return "", err
+	}
+	if envelope.Title == application.NodeManagerTitle {
+		// Disallow node manager apps from being installed like a
+		// regular app.
+		return "", errInvalidOperation
+	}
+	installationID := generateID()
+	installationDir := filepath.Join(i.config.Root, applicationDirName(envelope.Title), installationDirName(installationID))
+	versionDir := filepath.Join(installationDir, generateVersionDirName())
+	perm := os.FileMode(0700)
+	if err := os.MkdirAll(versionDir, perm); err != nil {
+		vlog.Errorf("MkdirAll(%v, %v) failed: %v", versionDir, perm, err)
+		return "", errOperationFailed
+	}
+	deferrer := func() {
+		if err := os.RemoveAll(versionDir); err != nil {
+			vlog.Errorf("RemoveAll(%v) failed: %v", versionDir, err)
+		}
+	}
+	defer func() {
+		if deferrer != nil {
+			deferrer()
+		}
+	}()
+	// TODO(caprita): Share binaries if already existing locally.
+	if err := generateBinary(versionDir, "bin", envelope, true); err != nil {
+		return "", err
+	}
+	if err := saveEnvelope(versionDir, envelope); err != nil {
+		return "", err
+	}
+	if err := saveOrigin(versionDir, applicationVON); err != nil {
+		return "", err
+	}
+	link := filepath.Join(installationDir, "current")
+	if err := os.Symlink(versionDir, link); err != nil {
+		vlog.Errorf("Symlink(%v, %v) failed: %v", versionDir, link, err)
+		return "", errOperationFailed
+	}
+	deferrer = nil
+	return naming.Join(envelope.Title, installationID), nil
 }
 
 func (i *invoker) Refresh(call ipc.ServerContext) error {
@@ -634,15 +649,221 @@
 	return err
 }
 
-func (i *invoker) Start(call ipc.ServerContext) ([]string, error) {
-	vlog.VI(1).Infof("%v.Start()", i.suffix)
-	// TODO(jsimsa): Implement.
-	return make([]string, 0), nil
+func splitName(name string) (ret []string) {
+	components := strings.Split(name, "/")
+	for _, c := range components {
+		if len(c) > 0 {
+			ret = append(ret, c)
+		}
+	}
+	return
 }
 
-func (i *invoker) Stop(call ipc.ServerContext, deadline uint64) error {
+func generateCommand(envelope *application.Envelope, binPath, instanceDir string) (*exec.Cmd, error) {
+	// TODO(caprita): For the purpose of isolating apps, we should run them
+	// as different users.  We'll need to either use the root process or a
+	// suid script to be able to do it.
+	cmd := exec.Command(binPath)
+	// TODO(caprita): Also pass in configuration info like NAMESPACE_ROOT to
+	// the app (to point to the device mounttable).
+	cmd.Env = envelope.Env
+	rootDir := filepath.Join(instanceDir, "root")
+	perm := os.FileMode(0700)
+	if err := os.MkdirAll(rootDir, perm); err != nil {
+		vlog.Errorf("MkdirAll(%v, %v) failed: %v", rootDir, perm, err)
+		return nil, err
+	}
+	cmd.Dir = rootDir
+	logDir := filepath.Join(instanceDir, "logs")
+	if err := os.MkdirAll(logDir, perm); err != nil {
+		vlog.Errorf("MkdirAll(%v, %v) failed: %v", logDir, perm, err)
+		return nil, err
+	}
+	timestamp := time.Now().UnixNano()
+	var err error
+	perm = os.FileMode(0600)
+	cmd.Stdout, err = os.OpenFile(filepath.Join(logDir, fmt.Sprintf("STDOUT-%d", timestamp)), os.O_WRONLY|os.O_CREATE, perm)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd.Stderr, err = os.OpenFile(filepath.Join(logDir, fmt.Sprintf("STDERR-%d", timestamp)), os.O_WRONLY|os.O_CREATE, perm)
+	if err != nil {
+		return nil, err
+	}
+	// Set up args and env.
+	cmd.Args = append(cmd.Args, "--log_dir=../logs")
+	cmd.Args = append(cmd.Args, envelope.Args...)
+	return cmd, nil
+}
+
+func (i *invoker) Start(call ipc.ServerContext) ([]string, error) {
+	vlog.VI(1).Infof("%v.Start()", i.suffix)
+	if !strings.HasPrefix(i.suffix, "apps") {
+		return nil, errInvalidSuffix
+	}
+	components := splitName(strings.TrimPrefix(i.suffix, "apps"))
+	if nComponents := len(components); nComponents < 2 {
+		return nil, fmt.Errorf("Start all installations / all applications not yet implemented (%v)", i.suffix)
+	} else if nComponents > 2 {
+		return nil, errInvalidSuffix
+	}
+	app, installation := components[0], components[1]
+	installationDir := filepath.Join(i.config.Root, applicationDirName(app), installationDirName(installation))
+	if _, err := os.Stat(installationDir); err != nil {
+		if os.IsNotExist(err) {
+			return nil, errNotExist
+		}
+		vlog.Errorf("Stat(%v) failed: %v", installationDir, err)
+		return nil, errOperationFailed
+	}
+	currLink := filepath.Join(installationDir, "current")
+	envelope, err := loadEnvelope(currLink)
+	if err != nil {
+		return nil, err
+	}
+	binPath := filepath.Join(currLink, "bin")
+	if _, err := os.Stat(binPath); err != nil {
+		vlog.Errorf("Stat(%v) failed: %v", binPath, err)
+		return nil, errOperationFailed
+	}
+	instanceID := generateID()
+	// TODO(caprita): Clean up instanceDir upon failure.
+	instanceDir := filepath.Join(installationDir, "instances", instanceDirName(instanceID))
+	cmd, err := generateCommand(envelope, binPath, instanceDir)
+	if err != nil {
+		vlog.Errorf("generateCommand(%v, %v, %v) failed: %v", envelope, binPath, instanceDir, err)
+		return nil, errOperationFailed
+	}
+	// Setup up the child process callback.
+	id := i.generateCallbackID()
+	cfg := config.New()
+	cfg.Set(mgmt.ParentNodeManagerConfigKey, naming.MakeTerminal(naming.Join(i.config.Name, id)))
+	handle := vexec.NewParentHandle(cmd, vexec.ConfigOpt{cfg})
+	// Make the channel buffered to avoid blocking the Set method when
+	// nothing is receiving on the channel.  This happens e.g. when
+	// unregisterCallbacks executes before Set is called.
+	callbackChan := make(chan string, 1)
+	i.registerCallback(id, mgmt.AppCycleManagerConfigKey, callbackChan)
+	defer i.unregisterCallbacks(id)
+	// Start the child process.
+	if err := handle.Start(); err != nil {
+		vlog.Errorf("Start() failed: %v", err)
+		return nil, errOperationFailed
+	}
+	// Wait for the child process to start.
+	testTimeout := 10 * time.Second
+	if err := handle.WaitForReady(testTimeout); err != nil {
+		vlog.Errorf("WaitForReady(%v) failed: %v", testTimeout, err)
+		if err := handle.Clean(); err != nil {
+			vlog.Errorf("Clean() failed: %v", err)
+		}
+		return nil, errOperationFailed
+	}
+	select {
+	case childName := <-callbackChan:
+		instanceInfo := &instanceInfo{
+			AppCycleMgrName: childName,
+			Pid:             handle.Pid(),
+		}
+		if err := saveInstanceInfo(instanceDir, instanceInfo); err != nil {
+			if err := handle.Clean(); err != nil {
+				vlog.Errorf("Clean() failed: %v", err)
+			}
+			return nil, err
+		}
+		// TODO(caprita): Spin up a goroutine to reap child status upon
+		// exit and transition it to suspended state if it exits on its
+		// own.
+	case <-time.After(testTimeout):
+		vlog.Errorf("Waiting for callback timed out")
+		if err := handle.Clean(); err != nil {
+			vlog.Errorf("Clean() failed: %v", err)
+		}
+		return nil, errOperationFailed
+	}
+	return []string{instanceID}, nil
+}
+
+func saveInstanceInfo(dir string, info *instanceInfo) error {
+	jsonInfo, err := json.Marshal(info)
+	if err != nil {
+		vlog.Errorf("Marshal(%v) failed: %v", info, err)
+		return errOperationFailed
+	}
+	infoPath := filepath.Join(dir, "info")
+	if err := ioutil.WriteFile(infoPath, jsonInfo, 0600); err != nil {
+		vlog.Errorf("WriteFile(%v) failed: %v", infoPath, err)
+		return errOperationFailed
+	}
+	return nil
+}
+
+func loadInstanceInfo(dir string) (*instanceInfo, error) {
+	infoPath := filepath.Join(dir, "info")
+	info := new(instanceInfo)
+	if infoBytes, err := ioutil.ReadFile(infoPath); err != nil {
+		vlog.Errorf("ReadFile(%v) failed: %v", infoPath, err)
+		return nil, errOperationFailed
+	} else if err := json.Unmarshal(infoBytes, info); err != nil {
+		vlog.Errorf("Unmarshal(%v) failed: %v", infoBytes, err)
+		return nil, errOperationFailed
+	}
+	return info, nil
+}
+
+func (i *invoker) Stop(call ipc.ServerContext, deadline uint32) error {
+	// TODO(caprita): implement deadline.
 	vlog.VI(1).Infof("%v.Stop(%v)", i.suffix, deadline)
-	// TODO(jsimsa): Implement.
+	if !strings.HasPrefix(i.suffix, "apps") {
+		return errInvalidSuffix
+	}
+	components := splitName(strings.TrimPrefix(i.suffix, "apps"))
+	if nComponents := len(components); nComponents < 3 {
+		return fmt.Errorf("Stop all instances / all installations / all applications not yet implemented (%v)", i.suffix)
+	} else if nComponents > 3 {
+		return errInvalidSuffix
+	}
+	app, installation, instance := components[0], components[1], components[2]
+	instancesDir := filepath.Join(i.config.Root, applicationDirName(app), installationDirName(installation), "instances")
+	instanceDir := filepath.Join(instancesDir, instanceDirName(instance))
+	stoppedInstanceDir := filepath.Join(instancesDir, stoppedInstanceDirName(instance))
+	if err := os.Rename(instanceDir, stoppedInstanceDir); err != nil {
+		vlog.Errorf("Rename(%v, %v) failed: %v", instanceDir, stoppedInstanceDir, err)
+		if os.IsNotExist(err) {
+			return errNotExist
+		}
+		vlog.Errorf("Rename(%v, %v) failed: %v", instanceDir, stoppedInstanceDir, err)
+		return errOperationFailed
+	}
+	// TODO(caprita): restore the instance to unstopped upon failure?
+
+	info, err := loadInstanceInfo(stoppedInstanceDir)
+	if err != nil {
+		return errOperationFailed
+	}
+	appStub, err := appcycle.BindAppCycle(info.AppCycleMgrName)
+	if err != nil {
+		vlog.Errorf("BindAppCycle(%v) failed: %v", info.AppCycleMgrName, err)
+		return errOperationFailed
+	}
+	stream, err := appStub.Stop(rt.R().NewContext())
+	if err != nil {
+		vlog.Errorf("Got error: %v", err)
+		return errOperationFailed
+	}
+	rstream := stream.RecvStream()
+	for rstream.Advance() {
+		vlog.VI(2).Infof("%v.Stop(%v) task update: %v", i.suffix, deadline, rstream.Value())
+	}
+	if err := rstream.Err(); err != nil {
+		vlog.Errorf("Stream returned an error: %v", err)
+		return errOperationFailed
+	}
+	if err := stream.Finish(); err != nil {
+		vlog.Errorf("Got error: %v", err)
+		return errOperationFailed
+	}
 	return nil
 }
 
@@ -697,16 +918,16 @@
 
 func (i *invoker) Set(_ ipc.ServerContext, key, value string) error {
 	vlog.VI(1).Infof("%v.Set(%v, %v)", i.suffix, key, value)
-	// For now, only handle the child node manager name.  We'll add handling
-	// for the child's app cycle manager name later on.
-	if key != mgmt.ChildNodeManagerConfigKey {
-		return nil
-	}
+	id := i.suffix
 	i.internal.channelsMutex.Lock()
-	channel, ok := i.internal.channels[i.suffix]
+	if _, ok := i.internal.channels[id]; !ok {
+		i.internal.channelsMutex.Unlock()
+		return errInvalidSuffix
+	}
+	channel, ok := i.internal.channels[id][key]
 	i.internal.channelsMutex.Unlock()
 	if !ok {
-		return errInvalidSuffix
+		return nil
 	}
 	channel <- value
 	return nil
diff --git a/services/mgmt/node/impl/profile.go b/services/mgmt/node/impl/profile.go
new file mode 100644
index 0000000..d522784
--- /dev/null
+++ b/services/mgmt/node/impl/profile.go
@@ -0,0 +1,191 @@
+package impl
+
+import (
+	"bytes"
+	"errors"
+	"os/exec"
+	"runtime"
+	"strings"
+
+	"veyron/services/mgmt/profile"
+
+	"veyron2/services/mgmt/build"
+	"veyron2/services/mgmt/node"
+)
+
+// computeNodeProfile generates a description of the runtime
+// environment (supported file format, OS, architecture, libraries) of
+// the host node.
+//
+// TODO(jsimsa): Avoid computing the host node description from
+// scratch if a recent cached copy exists.
+func computeNodeProfile() (*profile.Specification, error) {
+	result := profile.Specification{}
+
+	// Find out what the supported file format, operating system, and
+	// architecture is.
+	switch runtime.GOOS {
+	case "darwin":
+		result.Format = build.MACH
+		result.OS = build.Darwin
+	case "linux":
+		result.Format = build.ELF
+		result.OS = build.Linux
+	case "windows":
+		result.Format = build.PE
+		result.OS = build.Windows
+	default:
+		return nil, errors.New("Unsupported operating system: " + runtime.GOOS)
+	}
+	switch runtime.GOARCH {
+	case "amd64":
+		result.Arch = build.AMD64
+	case "arm":
+		result.Arch = build.ARM
+	case "x86":
+		result.Arch = build.X86
+	default:
+		return nil, errors.New("Unsupported hardware architecture: " + runtime.GOARCH)
+	}
+
+	// Find out what the installed dynamically linked libraries are.
+	switch runtime.GOOS {
+	case "linux":
+		// For Linux, we identify what dynamically linked libraries are
+		// install by parsing the output of "ldconfig -p".
+		command := exec.Command("ldconfig", "-p")
+		output, err := command.CombinedOutput()
+		if err != nil {
+			return nil, err
+		}
+		buf := bytes.NewBuffer(output)
+		// Throw away the first line of output from ldconfig.
+		if _, err := buf.ReadString('\n'); err != nil {
+			return nil, errors.New("Could not identify libraries.")
+		}
+		// Extract the library name and version from every subsequent line.
+		result.Libraries = make(map[profile.Library]struct{})
+		line, err := buf.ReadString('\n')
+		for err == nil {
+			words := strings.Split(strings.Trim(line, " \t\n"), " ")
+			if len(words) > 0 {
+				tokens := strings.Split(words[0], ".so")
+				if len(tokens) != 2 {
+					return nil, errors.New("Could not identify library: " + words[0])
+				}
+				name := strings.TrimPrefix(tokens[0], "lib")
+				major, minor := "", ""
+				tokens = strings.SplitN(tokens[1], ".", 3)
+				if len(tokens) >= 2 {
+					major = tokens[1]
+				}
+				if len(tokens) >= 3 {
+					minor = tokens[2]
+				}
+				result.Libraries[profile.Library{Name: name, MajorVersion: major, MinorVersion: minor}] = struct{}{}
+			}
+			line, err = buf.ReadString('\n')
+		}
+	case "darwin":
+		// TODO(jsimsa): Implement.
+	case "windows":
+		// TODO(jsimsa): Implement.
+	default:
+		return nil, errors.New("Unsupported operating system: " + runtime.GOOS)
+	}
+	return &result, nil
+}
+
+// getProfile gets a profile description for the given profile.
+//
+// TODO(jsimsa): Avoid retrieving the list of known profiles from a
+// remote server if a recent cached copy exists.
+func getProfile(name string) (*profile.Specification, error) {
+	// TODO(jsimsa): This function assumes the existence of a profile
+	// server from which the profiles can be retrieved. The profile
+	// server is a work in progress. When it exists, the commented out
+	// code below should work.
+	var profile profile.Specification
+	/*
+			client, err := r.NewClient()
+			if err != nil {
+				vlog.Errorf("NewClient() failed: %v", err)
+				return nil, err
+			}
+			defer client.Close()
+		  server := // TODO
+			method := "Specification"
+			inputs := make([]interface{}, 0)
+			call, err := client.StartCall(server + "/" + name, method, inputs)
+			if err != nil {
+				vlog.Errorf("StartCall(%s, %q, %v) failed: %v\n", server + "/" + name, method, inputs, err)
+				return nil, err
+			}
+			if err := call.Finish(&profiles); err != nil {
+				vlog.Errorf("Finish(%v) failed: %v\n", &profiles, err)
+				return nil, err
+			}
+	*/
+	return &profile, nil
+}
+
+// getKnownProfiles gets a list of description for all publicly known
+// profiles.
+//
+// TODO(jsimsa): Avoid retrieving the list of known profiles from a
+// remote server if a recent cached copy exists.
+func getKnownProfiles() ([]profile.Specification, error) {
+	// TODO(jsimsa): This function assumes the existence of a profile
+	// server from which a list of known profiles can be retrieved. The
+	// profile server is a work in progress. When it exists, the
+	// commented out code below should work.
+	knownProfiles := make([]profile.Specification, 0)
+	/*
+			client, err := r.NewClient()
+			if err != nil {
+				vlog.Errorf("NewClient() failed: %v\n", err)
+				return nil, err
+			}
+			defer client.Close()
+		  server := // TODO
+			method := "List"
+			inputs := make([]interface{}, 0)
+			call, err := client.StartCall(server, method, inputs)
+			if err != nil {
+				vlog.Errorf("StartCall(%s, %q, %v) failed: %v\n", server, method, inputs, err)
+				return nil, err
+			}
+			if err := call.Finish(&knownProfiles); err != nil {
+				vlog.Errorf("Finish(&knownProfile) failed: %v\n", err)
+				return nil, err
+			}
+	*/
+	return knownProfiles, nil
+}
+
+// matchProfiles inputs a profile that describes the host node and a
+// set of publicly known profiles and outputs a node description that
+// identifies the publicly known profiles supported by the host node.
+func matchProfiles(p *profile.Specification, known []profile.Specification) node.Description {
+	result := node.Description{Profiles: make(map[string]struct{})}
+loop:
+	for _, profile := range known {
+		if profile.Format != p.Format {
+			continue
+		}
+		if profile.OS != p.OS {
+			continue
+		}
+		if profile.Arch != p.Arch {
+			continue
+		}
+		for library := range profile.Libraries {
+			// Current implementation requires exact library name and version match.
+			if _, found := p.Libraries[library]; !found {
+				continue loop
+			}
+		}
+		result.Profiles[profile.Label] = struct{}{}
+	}
+	return result
+}
diff --git a/services/store/memstore/acl/cache.go b/services/store/memstore/acl/cache.go
deleted file mode 100644
index cd3f5b9..0000000
--- a/services/store/memstore/acl/cache.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package acl
-
-import (
-	"bytes"
-
-	"veyron/runtimes/google/lib/functional"
-	"veyron/runtimes/google/lib/functional/rb"
-	"veyron2/storage"
-)
-
-// FindFunc is used to fetch the ACL given its ID.
-type FindFunc func(id storage.ID) *storage.ACL
-
-// Cache keeps a map from ID -> ACL, based on the contents of the store.
-//
-// Cache is thread-compatible -- mutating methods require the caller to
-// have exclusive access to the object.
-//
-// TODO(jyh): Currently, the cache can grow without bound.  Implement a
-// replacement policy, for a cache with a bounded number of entries.
-type Cache struct {
-	contents functional.Set // *cacheEntry
-	find     FindFunc
-}
-
-// cacheEntry is an entry in the cache, mapping storage.ID -> storage.ACL.
-type cacheEntry struct {
-	id  storage.ID
-	acl *storage.ACL
-}
-
-var (
-	emptyCacheContents = rb.NewSet(compareCacheEntriesByID)
-)
-
-// compareCacheEntriesByID compares the two cells' IDs.
-func compareCacheEntriesByID(a, b interface{}) bool {
-	return bytes.Compare(a.(*cacheEntry).id[:], b.(*cacheEntry).id[:]) < 0
-}
-
-// NewCache returns an empty cache.
-func NewCache(find FindFunc) Cache {
-	return Cache{contents: emptyCacheContents, find: find}
-}
-
-// Clear resets the cache.
-func (c *Cache) Clear() {
-	c.contents = emptyCacheContents
-	c.find = nil
-}
-
-// Invalidate removes cache entry.  It should be called whenever the ACL
-// associated with the ID is changed.
-func (c *Cache) Invalidate(id storage.ID) {
-	c.contents = c.contents.Remove(&cacheEntry{id: id})
-}
-
-// UpdateFind changes the find function.
-func (c *Cache) UpdateFinder(find FindFunc) {
-	c.find = find
-}
-
-// get fetches a cache entry.  If the entry is not in the cache, the function
-// <find> is used to fetch the value, the result is saved in the cache, and
-// returned.
-func (c *Cache) get(id storage.ID) *storage.ACL {
-	e := &cacheEntry{id: id}
-	x, ok := c.contents.Get(e)
-	if ok {
-		return x.(*cacheEntry).acl
-	}
-	acl := c.find(id)
-	if acl == nil {
-		return nil
-	}
-	e.acl = acl
-	c.contents = c.contents.Put(e)
-	return acl
-}
diff --git a/services/store/memstore/acl/checker.go b/services/store/memstore/acl/checker.go
deleted file mode 100644
index c3921e7..0000000
--- a/services/store/memstore/acl/checker.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package acl
-
-import (
-	"bytes"
-	"fmt"
-
-	"veyron2/security"
-	"veyron2/storage"
-)
-
-// Entry includes an ACL and flags to indicate whether the ACL should be inherited.
-type Entry struct {
-	ACL       *storage.ACL
-	Inherited bool
-}
-
-// Set is a set of Entries, indexed by their IDs.
-type Set map[storage.ID]Entry
-
-// Checker is used to check if a principal matches ACLs extracted from the tags
-// applied to objects in a store.
-//
-// While walking through a path as part of resolving a cell, call Update for each
-// component of the path.  Checker will then keep track of the inherited ACLs
-// for that path.
-type Checker struct {
-	cache     *Cache
-	principal security.PublicID
-	acls      Set
-}
-
-// NewChecker constructs a new Checker and returns it.
-func NewChecker(cache *Cache, clientID security.PublicID, acls Set) *Checker {
-	// Copy the Set.
-	cp := make(Set)
-	for id, acl := range acls {
-		cp[id] = acl
-	}
-	return &Checker{cache: cache, principal: clientID, acls: cp}
-}
-
-// Copy, so that updates do not affect the original.
-func (c Checker) Copy() *Checker {
-	acls := make(Set)
-	for id, acl := range c.acls {
-		acls[id] = acl
-	}
-	c.acls = acls
-	return &c
-}
-
-// Update is called for each step in a path traversal to update the
-// Checker using the TagList associated with a value in the store.
-func (c *Checker) Update(tags storage.TagList) {
-	// The caller has just made one step deeper into the path.  The non-inherited
-	// ACLs are no longer relevant, so prune them.
-	for id, entry := range c.acls {
-		if !entry.Inherited {
-			delete(c.acls, id)
-		}
-	}
-
-	// Add the new ACLc.
-	for _, tag := range tags {
-		switch tag.Op {
-		case storage.RemoveACL:
-			delete(c.acls, tag.ACL)
-		case storage.AddACL:
-			if acl := c.cache.get(tag.ACL); acl != nil {
-				c.acls[tag.ACL] = Entry{ACL: acl}
-			}
-		case storage.AddInheritedACL:
-			if acl := c.cache.get(tag.ACL); acl != nil {
-				c.acls[tag.ACL] = Entry{ACL: acl, Inherited: true}
-			}
-		}
-	}
-}
-
-// IsAllowed returns true iff the current acls allow the principal to use a
-// label.
-func (c *Checker) IsAllowed(label security.Label) bool {
-	for _, entry := range c.acls {
-		for key, labels := range entry.ACL.Contents {
-			if labels.HasLabel(label) {
-				if c.principal.Match(key) {
-					return true
-				}
-			}
-		}
-	}
-	return false
-}
-
-// IsEqual returns true iff the checkers are exactly equivalent, containing the same ACLs.
-func (c1 *Checker) IsEqual(c2 *Checker) bool {
-	if c1.cache != c2.cache || c1.principal != c2.principal || len(c1.acls) != len(c2.acls) {
-		return false
-	}
-
-	for id, _ := range c1.acls {
-		if _, ok := c2.acls[id]; !ok {
-			return false
-		}
-	}
-	return true
-}
-
-func (c Checker) String() string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "Checker{principal:%q", c.principal)
-	for p, l := range c.acls {
-		fmt.Fprintf(&buf, ", %s:%s", p, l)
-	}
-	buf.WriteRune('}')
-	return buf.String()
-}
-
-func (e Entry) String() string {
-	if e.Inherited {
-		return "[Inherited]" + e.ACL.String()
-	}
-	return e.ACL.String()
-}
diff --git a/services/store/memstore/query/eval_test.go b/services/store/memstore/query/eval_test.go
index d261a4e..8ab82d4 100644
--- a/services/store/memstore/query/eval_test.go
+++ b/services/store/memstore/query/eval_test.go
@@ -605,10 +605,6 @@
 	return m.it
 }
 
-func (m *mockSnapshot) PathMatch(pid security.PublicID, id storage.ID, regex *state.PathRegex) bool {
-	return false
-}
-
 func (m *mockSnapshot) Find(id storage.ID) *state.Cell {
 	return nil
 }
diff --git a/services/store/memstore/refs/builder.go b/services/store/memstore/refs/builder.go
index 05f12a9..18582f2 100644
--- a/services/store/memstore/refs/builder.go
+++ b/services/store/memstore/refs/builder.go
@@ -6,7 +6,6 @@
 
 	"veyron/runtimes/google/lib/functional"
 
-	"veyron2/security"
 	"veyron2/storage"
 )
 
@@ -45,7 +44,7 @@
 // AddDEntries adds the references contained in the DEntry list.
 func (b *Builder) AddDEntries(d []*storage.DEntry) {
 	for _, de := range d {
-		b.refs = b.refs.Put(&Ref{ID: de.ID, Path: NewSingletonPath(de.Name), Label: security.ReadLabel})
+		b.refs = b.refs.Put(&Ref{ID: de.ID, Path: NewSingletonPath(de.Name)})
 	}
 }
 
@@ -58,20 +57,13 @@
 	b.addRefs(nil, reflect.ValueOf(v))
 }
 
-// AddTags adds the references contained in the TagList.
-func (b *Builder) AddTags(v storage.TagList) {
-	for i, tag := range v {
-		b.refs.Put(&Ref{ID: tag.ACL, Path: tagsDir.Append(strconv.Itoa(i)), Label: security.AdminLabel})
-	}
-}
-
 func (b *Builder) addRefs(path *Path, v reflect.Value) {
 	if !v.IsValid() {
 		return
 	}
 	ty := v.Type()
 	if ty == tyID {
-		b.refs = b.refs.Put(&Ref{ID: v.Interface().(storage.ID), Path: path, Label: security.ReadLabel})
+		b.refs = b.refs.Put(&Ref{ID: v.Interface().(storage.ID), Path: path})
 		return
 	}
 	switch ty.Kind() {
diff --git a/services/store/memstore/refs/path.go b/services/store/memstore/refs/path.go
index 5cb86db..4a151ee 100644
--- a/services/store/memstore/refs/path.go
+++ b/services/store/memstore/refs/path.go
@@ -21,15 +21,9 @@
 	table map[Path]*Path
 }
 
-const (
-	TagsDirName = ".tags"
-)
-
 var (
 	pathTable = &pathHashConsTable{table: make(map[Path]*Path)}
 	nilPath   *Path
-
-	tagsDir = NewSingletonPath(TagsDirName)
 )
 
 // ComparePaths defines a total order over *Path values, based on pointer
diff --git a/services/store/memstore/refs/refs.go b/services/store/memstore/refs/refs.go
index 2576c73..c7f791b 100644
--- a/services/store/memstore/refs/refs.go
+++ b/services/store/memstore/refs/refs.go
@@ -5,7 +5,6 @@
 	"veyron/runtimes/google/lib/functional"
 	"veyron/runtimes/google/lib/functional/rb"
 
-	"veyron2/security"
 	"veyron2/storage"
 )
 
@@ -15,9 +14,8 @@
 // Ref represents a single reference in a store value.  It includes the
 // storage.ID, and the path to the reference.
 type Ref struct {
-	ID    storage.ID
-	Path  *Path
-	Label security.Label
+	ID   storage.ID
+	Path *Path
 }
 
 // Dir represents a directory, which is a set of *Ref, sorted by path.
@@ -58,7 +56,7 @@
 func BuildDir(l []*storage.DEntry) Dir {
 	d := EmptyDir
 	for _, de := range l {
-		d = d.Put(&Ref{ID: de.ID, Path: NewSingletonPath(de.Name), Label: security.ReadLabel})
+		d = d.Put(&Ref{ID: de.ID, Path: NewSingletonPath(de.Name)})
 	}
 	return d
 }
diff --git a/services/store/memstore/state/cell.go b/services/store/memstore/state/cell.go
index 2913146..f0f9c32 100644
--- a/services/store/memstore/state/cell.go
+++ b/services/store/memstore/state/cell.go
@@ -23,10 +23,7 @@
 	// Implicit directory.
 	Dir refs.Set
 
-	// tags are the access control tags.
-	Tags storage.TagList
-
-	// refs includes the references in the value, the dir, and the tags.
+	// refs includes the references in the value and the dir.
 	//
 	// TODO(jyh): This is simple, but it would be more space efficient to
 	// include only the refs in the value, or drop this field entirely.
@@ -69,7 +66,6 @@
 	r := refs.NewBuilder()
 	r.AddValue(c.Value)
 	r.AddDir(c.Dir)
-	r.AddTags(c.Tags)
 	c.refs = r.Get()
 }
 
diff --git a/services/store/memstore/state/iterator.go b/services/store/memstore/state/iterator.go
index 70eac5c..e7c5275 100644
--- a/services/store/memstore/state/iterator.go
+++ b/services/store/memstore/state/iterator.go
@@ -3,7 +3,6 @@
 import (
 	"fmt"
 
-	"veyron/services/store/memstore/acl"
 	"veyron/services/store/memstore/refs"
 
 	"veyron2/security"
@@ -59,12 +58,10 @@
 }
 
 type next struct {
-	// checker is the acl.Checker for the object _containing_ the reference.
-	checker *acl.Checker
-	parent  *refs.FullPath
-	path    *refs.Path
-	id      storage.ID
-	action  action
+	parent *refs.FullPath
+	path   *refs.Path
+	id     storage.ID
+	action action
 }
 
 type action int
@@ -116,8 +113,7 @@
 func (sn *snapshot) NewIterator(pid security.PublicID, path storage.PathName,
 	pathFilter PathFilter, filter IterFilter) Iterator {
 
-	checker := sn.newPermChecker(pid)
-	cell, suffix, v := sn.resolveCell(checker, path, nil)
+	cell, suffix, v := sn.resolveCell(path, nil)
 	if cell == nil {
 		return &errorIterator{snapshot: sn}
 	}
@@ -153,7 +149,7 @@
 	}
 
 	if expand {
-		it.pushVisitAll(checker, it.path, set)
+		it.pushVisitAll(it.path, set)
 	}
 	if !ret {
 		it.Next()
@@ -165,7 +161,7 @@
 func (it *iterator) pushUnvisit(path *refs.Path, id storage.ID) {
 	switch it.pathFilter {
 	case ListPaths:
-		it.next = append(it.next, next{nil, nil, path, id, unvisit})
+		it.next = append(it.next, next{nil, path, id, unvisit})
 	case ListObjects:
 		// Do not unvisit the object, as it is on a path already seen by
 		// it.filter.
@@ -174,14 +170,11 @@
 	}
 }
 
-func (it *iterator) pushVisitAll(checker *acl.Checker,
-	parentPath *refs.FullPath, set refs.Set) {
+func (it *iterator) pushVisitAll(parentPath *refs.FullPath, set refs.Set) {
 
 	set.Iter(func(x interface{}) bool {
 		ref := x.(*refs.Ref)
-		if checker.IsAllowed(ref.Label) {
-			it.next = append(it.next, next{checker, parentPath, ref.Path, ref.ID, visit})
-		}
+		it.next = append(it.next, next{parentPath, ref.Path, ref.ID, visit})
 		return true
 	})
 }
@@ -205,7 +198,6 @@
 func (it *iterator) Next() {
 	var n next
 	var fullPath *refs.FullPath
-	var checker *acl.Checker
 	var c *Cell
 	for {
 		topIndex := len(it.next) - 1
@@ -235,18 +227,11 @@
 			panic(fmt.Sprintf("Dangling reference: %s", n.id))
 		}
 
-		// Check permissions.
-		checker = n.checker.Copy()
-		checker.Update(c.Tags)
-		if !checker.IsAllowed(security.ReadLabel) {
-			continue
-		}
-
 		// Check the filter
 		ret, expand := it.filter(n.parent, n.path)
 		fullPath = n.parent.AppendPath(n.path)
 		if expand {
-			it.pushVisitAll(checker, fullPath, c.refs)
+			it.pushVisitAll(fullPath, c.refs)
 		}
 		if ret {
 			// Found a value.
diff --git a/services/store/memstore/state/iterator_test.go b/services/store/memstore/state/iterator_test.go
index a9f3b2b..ffc4bfc 100644
--- a/services/store/memstore/state/iterator_test.go
+++ b/services/store/memstore/state/iterator_test.go
@@ -163,107 +163,3 @@
 		{"teams/cardinals", "players/matt/team"},
 	})
 }
-
-func TestIteratorSecurity(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-
-	// Create /Users/jane and give her RWA permissions.
-	janeACLID := putPath(t, sn, rootPublicID, "/Users/jane/acls/janeRWA", &storage.ACL{
-		Name: "Jane",
-		Contents: security.ACL{
-			janeUser: security.LabelSet(security.ReadLabel | security.WriteLabel | security.AdminLabel),
-		},
-	})
-	janeTags := storage.TagList{
-		storage.Tag{Op: storage.AddInheritedACL, ACL: janeACLID},
-		storage.Tag{Op: storage.RemoveACL, ACL: state.EveryoneACLID},
-	}
-	put(t, sn, rootPublicID, "/Users/jane/.tags", janeTags)
-	put(t, sn, rootPublicID, "/Users/jane/aaa", "stuff")
-	sharedID := put(t, sn, rootPublicID, "/Users/jane/shared", "stuff")
-
-	// Create /Users/john and give him RWA permissions.
-	johnACLID := putPath(t, sn, rootPublicID, "/Users/john/acls/johnRWA", &storage.ACL{
-		Name: "John",
-		Contents: security.ACL{
-			johnUser: security.LabelSet(security.ReadLabel | security.WriteLabel | security.AdminLabel),
-		},
-	})
-	johnTags := storage.TagList{
-		storage.Tag{Op: storage.AddInheritedACL, ACL: johnACLID},
-		storage.Tag{Op: storage.RemoveACL, ACL: state.EveryoneACLID},
-	}
-	put(t, sn, rootPublicID, "/Users/john/.tags", johnTags)
-	put(t, sn, rootPublicID, "/Users/john/aaa", "stuff")
-	put(t, sn, rootPublicID, "/Users/john/shared", sharedID)
-
-	// Root gets everything.
-	checkAcyclicIterator(t, sn, rootPublicID, nil, []string{
-		"",
-		"Users",
-		"Users/jane",
-		"Users/jane/acls",
-		"Users/jane/acls/janeRWA",
-		"Users/jane/aaa",
-		"Users/john",
-		"Users/john/acls",
-		"Users/john/acls/johnRWA",
-		"Users/john/aaa",
-		"Users/jane/shared",
-		"Users/john/shared",
-	})
-	checkUniqueObjectsIterator(t, sn, rootPublicID, nil, [][]string{
-		{""},
-		{"Users"},
-		{"Users/jane"},
-		{"Users/jane/acls"},
-		{"Users/jane/acls/janeRWA"},
-		{"Users/jane/aaa"},
-		{"Users/john"},
-		{"Users/john/acls"},
-		{"Users/john/acls/johnRWA"},
-		{"Users/john/aaa"},
-		{"Users/jane/shared", "Users/john/shared"},
-	})
-
-	// Jane sees only her names.
-	checkAcyclicIterator(t, sn, janePublicID, nil, []string{
-		"",
-		"Users",
-		"Users/jane",
-		"Users/jane/acls",
-		"Users/jane/acls/janeRWA",
-		"Users/jane/aaa",
-		"Users/jane/shared",
-	})
-	checkUniqueObjectsIterator(t, sn, janePublicID, nil, [][]string{
-		{""},
-		{"Users"},
-		{"Users/jane"},
-		{"Users/jane/acls"},
-		{"Users/jane/acls/janeRWA"},
-		{"Users/jane/aaa"},
-		{"Users/jane/shared"},
-	})
-
-	// John sees only his names.
-	checkAcyclicIterator(t, sn, johnPublicID, nil, []string{
-		"",
-		"Users",
-		"Users/john",
-		"Users/john/acls",
-		"Users/john/acls/johnRWA",
-		"Users/john/aaa",
-		"Users/john/shared",
-	})
-	checkUniqueObjectsIterator(t, sn, johnPublicID, nil, [][]string{
-		{""},
-		{"Users"},
-		{"Users/john"},
-		{"Users/john/acls"},
-		{"Users/john/acls/johnRWA"},
-		{"Users/john/aaa"},
-		{"Users/john/shared"},
-	})
-}
diff --git a/services/store/memstore/state/mutable_snapshot.go b/services/store/memstore/state/mutable_snapshot.go
index f640eb1..731d6ca 100644
--- a/services/store/memstore/state/mutable_snapshot.go
+++ b/services/store/memstore/state/mutable_snapshot.go
@@ -3,7 +3,6 @@
 import (
 	"fmt"
 
-	"veyron/services/store/memstore/acl"
 	"veyron/services/store/memstore/field"
 	"veyron/services/store/memstore/refs"
 	"veyron/services/store/raw"
@@ -88,11 +87,6 @@
 	// Value is the new value.
 	Value interface{}
 
-	// Tags are the new tags.
-	//
-	// TODO(jyh): Replace with a delta encoding.
-	Tags storage.TagList
-
 	// Dir is the set of new directory entries.
 	//
 	// TODO(jyh): Replace this with a delta, to support large directories.
@@ -108,8 +102,6 @@
 	errBadValue             = verror.BadArgf("value has the wrong type")
 	errDuplicatePutMutation = verror.BadArgf("duplicate calls to PutMutation for the same ID")
 	errNotFound             = verror.NotFoundf("not found")
-	errNotTagList           = verror.BadArgf("not a TagList")
-	errPermissionDenied     = verror.NotAuthorizedf("")
 	errPreconditionFailed   = verror.Abortedf("precondition failed")
 
 	nullID storage.ID
@@ -168,7 +160,6 @@
 	// Perform a GC to clear out gcRoots.
 	sn.gc()
 	cp := sn.snapshot
-	cp.resetACLCache()
 	return &cp
 }
 
@@ -180,7 +171,6 @@
 	cp := *sn
 	cp.mutations = newMutations()
 	cp.gcRoots = make(map[storage.ID]struct{})
-	cp.resetACLCache()
 	return &cp
 }
 
@@ -206,7 +196,6 @@
 func (sn *MutableSnapshot) delete(c *Cell) {
 	sn.idTable = sn.idTable.Remove(c)
 	sn.deletions[c.ID] = c.Version
-	sn.aclCache.Invalidate(c.ID)
 }
 
 // put adds a cell to the state, also adding the new value to the Mutations set.
@@ -218,44 +207,32 @@
 		m.Value = c.Value
 		m.refs = c.refs
 		m.Dir = d
-		m.Tags = c.Tags
 	} else {
 		mu.Preconditions[c.ID] = c.Version
 		m = &Mutation{
 			Postcondition: storage.NewVersion(),
 			Value:         c.Value,
 			Dir:           d,
-			Tags:          c.Tags,
 			refs:          c.refs,
 		}
 		mu.Delta[c.ID] = m
 	}
 	c.Version = m.Postcondition
 	sn.idTable = sn.idTable.Put(c)
-	sn.aclCache.Invalidate(c.ID)
 }
 
 // add adds a new Value to the state, updating reference counts.  Fails if the
 // new value contains dangling references.
-func (sn *MutableSnapshot) add(parentChecker *acl.Checker, id storage.ID, v interface{}) (*Cell, error) {
+func (sn *MutableSnapshot) add(id storage.ID, v interface{}) (*Cell, error) {
 	c := sn.Find(id)
 	if c == nil {
 		// There is no current value, so create a new cell for the value and add
 		// it.
-		//
-		// There is no permissions check here because the caller is not modifying a preexisting value.
-		//
-		// TODO(jyh): However, the new value is created with default
-		// permissions, which does not include the ability to set the tags on
-		// the cell.  So the caller can wind up in a odd situation where they
-		// can create a value, but not be able to read it back, and no way to
-		// fix it.  Figure out whether this is a problem.
 		c = &Cell{
 			ID:       id,
 			refcount: 0,
 			Value:    v,
 			Dir:      refs.EmptyDir,
-			Tags:     storage.TagList{},
 			inRefs:   refs.Empty,
 			Version:  storage.NoVersion,
 		}
@@ -269,16 +246,11 @@
 	}
 
 	// There is already a value in the state, so replace it with the new value.
-	checker := parentChecker.Copy()
-	checker.Update(c.Tags)
-	return sn.replaceValue(checker, c, v)
+	return sn.replaceValue(c, v)
 }
 
 // replaceValue updates the cell.value.
-func (sn *MutableSnapshot) replaceValue(checker *acl.Checker, c *Cell, v interface{}) (*Cell, error) {
-	if !checker.IsAllowed(security.WriteLabel) {
-		return nil, errPermissionDenied
-	}
+func (sn *MutableSnapshot) replaceValue(c *Cell, v interface{}) (*Cell, error) {
 	cp := *c
 	cp.Value = v
 	cp.setRefs()
@@ -291,10 +263,7 @@
 }
 
 // replaceDir updates the cell.dir.
-func (sn *MutableSnapshot) replaceDir(checker *acl.Checker, c *Cell, d functional.Set) (*Cell, error) {
-	if !checker.IsAllowed(security.WriteLabel) {
-		return nil, errPermissionDenied
-	}
+func (sn *MutableSnapshot) replaceDir(c *Cell, d functional.Set) (*Cell, error) {
 	cp := *c
 	cp.Dir = d
 	cp.setRefs()
@@ -306,26 +275,9 @@
 	return &cp, nil
 }
 
-// replaceTags replaces the cell.tags.
-func (sn *MutableSnapshot) replaceTags(checker *acl.Checker, c *Cell, tags storage.TagList) (*Cell, error) {
-	if !checker.IsAllowed(security.AdminLabel) {
-		return nil, errPermissionDenied
-	}
-	cp := *c
-	cp.Tags = tags
-	cp.setRefs()
-	if !sn.refsExist(cp.refs) {
-		return nil, errBadRef
-	}
-	sn.put(&cp)
-	sn.updateRefs(c.ID, c.refs, cp.refs)
-	return &cp, nil
-}
-
 // Get returns the value for a path.
 func (sn *MutableSnapshot) Get(pid security.PublicID, path storage.PathName) (*storage.Entry, error) {
-	checker := sn.newPermChecker(pid)
-	cell, suffix, v := sn.resolveCell(checker, path, sn.mutations)
+	cell, suffix, v := sn.resolveCell(path, sn.mutations)
 	if cell == nil {
 		return nil, errNotFound
 	}
@@ -341,21 +293,20 @@
 // Put adds a new value to the state or replaces an existing one.  Returns
 // the *Stat for the enclosing *cell.
 func (sn *MutableSnapshot) Put(pid security.PublicID, path storage.PathName, v interface{}) (*storage.Stat, error) {
-	checker := sn.newPermChecker(pid)
-	c, err := sn.putValueByPath(checker, path, v)
+	c, err := sn.putValueByPath(path, v)
 	if err != nil {
 		return nil, err
 	}
 	return c.getStat(), nil
 }
 
-func (sn *MutableSnapshot) putValueByPath(checker *acl.Checker, path storage.PathName, v interface{}) (*Cell, error) {
+func (sn *MutableSnapshot) putValueByPath(path storage.PathName, v interface{}) (*Cell, error) {
 	v = deepcopy(v)
 
 	if path.IsRoot() {
-		return sn.putRoot(checker, v)
+		return sn.putRoot(v)
 	}
-	return sn.putValue(checker, path, v)
+	return sn.putValue(path, v)
 }
 
 // putValue is called for a normal Put() operation, where a new value is being
@@ -363,15 +314,12 @@
 // There are two cases: 1) the value <v> is written directly into the parent, or
 // 2) the field has type storage.ID.  In the latter case, the <id> is assigned
 // into the parent, and the value id->v is added to the idTable.
-func (sn *MutableSnapshot) putValue(checker *acl.Checker, path storage.PathName, v interface{}) (*Cell, error) {
+func (sn *MutableSnapshot) putValue(path storage.PathName, v interface{}) (*Cell, error) {
 	// Find the parent object.
-	c, suffix, _ := sn.resolveCell(checker, path[:len(path)-1], sn.mutations)
+	c, suffix, _ := sn.resolveCell(path[:len(path)-1], sn.mutations)
 	if c == nil {
 		return nil, errNotFound
 	}
-	if len(suffix) > 0 && suffix[0] == refs.TagsDirName {
-		return sn.putTagsValue(checker, path, suffix[1:], c, v)
-	}
 	value := deepcopy(c.Value)
 	p, s := field.Get(makeInnerReference(value), suffix)
 	if len(s) != 0 {
@@ -386,72 +334,28 @@
 		if len(suffix) != 0 {
 			return nil, errNotFound
 		}
-		if name == refs.TagsDirName {
-			return sn.putTags(checker, c, v)
-		}
-		return sn.putDirEntry(checker, c, name, v)
+		return sn.putDirEntry(c, name, v)
 	case field.SetFailedWrongType:
 		return nil, errBadValue
 	case field.SetAsID:
-		nc, err := sn.add(checker, id, v)
+		nc, err := sn.add(id, v)
 		if err != nil {
 			return nil, err
 		}
 		// The sn.add may have modified the cell, so fetch it again.
-		if _, err = sn.replaceValue(checker, sn.Find(c.ID), value); err != nil {
+		if _, err = sn.replaceValue(sn.Find(c.ID), value); err != nil {
 			return nil, err
 		}
 		return nc, nil
 	case field.SetAsValue:
-		return sn.replaceValue(checker, c, value)
+		return sn.replaceValue(c, value)
 	}
 	panic("not reached")
 }
 
-// putTagsValue modifies the cell.tags value.
-func (sn *MutableSnapshot) putTagsValue(checker *acl.Checker, path, suffix storage.PathName, c *Cell, v interface{}) (*Cell, error) {
-	tags := deepcopy(c.Tags).(storage.TagList)
-	p, s := field.Get(&tags, suffix)
-	if len(s) != 0 {
-		return nil, errNotFound
-	}
-
-	// Add value to the parent.
-	name := path[len(path)-1]
-	result, id := field.Set(p, name, v)
-	switch result {
-	case field.SetFailedNotFound:
-		return nil, errNotFound
-	case field.SetFailedWrongType:
-		return nil, errBadValue
-	case field.SetAsID:
-		nc, err := sn.add(checker, id, v)
-		if err != nil {
-			return nil, err
-		}
-		// The sn.add may have modified the cell, so fetch it again.
-		if _, err = sn.replaceTags(checker, sn.Find(c.ID), tags); err != nil {
-			return nil, err
-		}
-		return nc, nil
-	case field.SetAsValue:
-		return sn.replaceTags(checker, c, tags)
-	}
-	panic("not reached")
-}
-
-// putTags updates the tags.
-func (sn *MutableSnapshot) putTags(checker *acl.Checker, c *Cell, v interface{}) (*Cell, error) {
-	tags, ok := v.(storage.TagList)
-	if !ok {
-		return nil, errNotTagList
-	}
-	return sn.replaceTags(checker, c, tags)
-}
-
 // putDirEntry replaces or adds a directory entry.
-func (sn *MutableSnapshot) putDirEntry(checker *acl.Checker, c *Cell, name string, v interface{}) (*Cell, error) {
-	r := &refs.Ref{Path: refs.NewSingletonPath(name), Label: security.ReadLabel}
+func (sn *MutableSnapshot) putDirEntry(c *Cell, name string, v interface{}) (*Cell, error) {
+	r := &refs.Ref{Path: refs.NewSingletonPath(name)}
 	if id, ok := v.(storage.ID); ok {
 		ncell := sn.Find(id)
 		if ncell == nil {
@@ -459,7 +363,7 @@
 		}
 		r.ID = id
 		dir := c.Dir.Put(r)
-		if _, err := sn.replaceDir(checker, c, dir); err != nil {
+		if _, err := sn.replaceDir(c, dir); err != nil {
 			return nil, err
 		}
 		return ncell, nil
@@ -469,7 +373,7 @@
 	if !ok {
 		// The entry does not exist yet; create it.
 		id := storage.NewID()
-		ncell, err := sn.add(checker, id, v)
+		ncell, err := sn.add(id, v)
 		if err != nil {
 			return nil, err
 		}
@@ -477,22 +381,18 @@
 		// The sn.add may have modified the cell, so fetch it again.
 		c = sn.Find(c.ID)
 		dir := c.Dir.Put(r)
-		if _, err := sn.replaceDir(checker, c, dir); err != nil {
+		if _, err := sn.replaceDir(c, dir); err != nil {
 			return nil, err
 		}
 		return ncell, nil
 	}
 
 	// Replace the existing value.
-	return sn.add(checker, x.(*refs.Ref).ID, v)
+	return sn.add(x.(*refs.Ref).ID, v)
 }
 
 // putRoot replaces the root.
-func (sn *MutableSnapshot) putRoot(checker *acl.Checker, v interface{}) (*Cell, error) {
-	if !checker.IsAllowed(security.WriteLabel) {
-		return nil, errPermissionDenied
-	}
-
+func (sn *MutableSnapshot) putRoot(v interface{}) (*Cell, error) {
 	id := sn.rootID
 	c := sn.Find(id)
 	if c == nil {
@@ -500,7 +400,7 @@
 	}
 
 	// Add the new element.
-	ncell, err := sn.add(checker, id, v)
+	ncell, err := sn.add(id, v)
 	if err != nil {
 		return nil, err
 	}
@@ -517,11 +417,7 @@
 
 // Remove removes a value.
 func (sn *MutableSnapshot) Remove(pid security.PublicID, path storage.PathName) error {
-	checker := sn.newPermChecker(pid)
 	if path.IsRoot() {
-		if !checker.IsAllowed(security.WriteLabel) {
-			return errPermissionDenied
-		}
 		sn.unref(sn.rootID)
 		sn.rootID = nullID
 		sn.mutations.RootID = nullID
@@ -530,20 +426,16 @@
 	}
 
 	// Split the names into directory and field parts.
-	cell, suffix, _ := sn.resolveCell(checker, path[:len(path)-1], sn.mutations)
+	cell, suffix, _ := sn.resolveCell(path[:len(path)-1], sn.mutations)
 	if cell == nil {
 		return errNotFound
 	}
 
 	// Remove the field.
 	name := path[len(path)-1]
-	if name == refs.TagsDirName {
-		_, err := sn.replaceTags(checker, cell, storage.TagList{})
-		return err
-	}
-	r := &refs.Ref{Path: refs.NewSingletonPath(name), Label: security.ReadLabel}
+	r := &refs.Ref{Path: refs.NewSingletonPath(name)}
 	if cell.Dir.Contains(r) {
-		_, err := sn.replaceDir(checker, cell, cell.Dir.Remove(r))
+		_, err := sn.replaceDir(cell, cell.Dir.Remove(r))
 		return err
 	}
 	value := deepcopy(cell.Value)
@@ -552,7 +444,7 @@
 		return errNotFound
 	}
 
-	_, err := sn.replaceValue(checker, cell, value)
+	_, err := sn.replaceValue(cell, value)
 	return err
 }
 
diff --git a/services/store/memstore/state/mutable_snapshot_test.go b/services/store/memstore/state/mutable_snapshot_test.go
index 34366c6..df567ba 100644
--- a/services/store/memstore/state/mutable_snapshot_test.go
+++ b/services/store/memstore/state/mutable_snapshot_test.go
@@ -75,7 +75,7 @@
 
 func expectValue(t *testing.T, sn *MutableSnapshot, path string, v interface{}) {
 	_, file, line, _ := runtime.Caller(1)
-	cell, _, _ := sn.resolveCell(sn.newPermChecker(rootPublicID), storage.ParsePath(path), nil)
+	cell, _, _ := sn.resolveCell(storage.ParsePath(path), nil)
 	if cell == nil {
 		t.Errorf("%s(%d): path does not exist: %s", file, line, path)
 	}
diff --git a/services/store/memstore/state/pathmatch.go b/services/store/memstore/state/pathmatch.go
deleted file mode 100644
index 4e23a21..0000000
--- a/services/store/memstore/state/pathmatch.go
+++ /dev/null
@@ -1,378 +0,0 @@
-package state
-
-import (
-	"fmt"
-
-	"veyron/services/store/memstore/acl"
-	"veyron/services/store/memstore/pathregex"
-	"veyron/services/store/memstore/refs"
-	"veyron2/security"
-	"veyron2/storage"
-)
-
-// Path matching is used to test whether a value has a pathname that matches a
-// regular expression.  The snapshot is a labeled directed graph that can be
-// viewed as a finite automaton, and the pathregex is a finite automaton, so the
-// matching problem asks whether the intersection of the regular languages
-// defined by these automata is nonempty.
-//
-// We implement it in two phases.
-//
-// The first phase is an optimization, where we compute the intersection
-// automaton from the reversed automata.  This is typically more efficient
-// because the subgraph formed from the incoming edges is usually much smaller
-// than the entire graph.  Note that the snapshot is deterministic when viewed
-// as a forward automaton, since every field of a value has a unique field name,
-// but the reversed automaton is nondeterministic.  However, that doesn't have
-// any bearing on the algorithm here.
-//
-// We keep a queue of work to do, and a visited set.  The queue contains a set
-// of cells that are scheduled to be visited, along with the StateSet of the
-// pathregex when visiting.  The visited table contains the set of cells that
-// have already been visited, along with the StateSet when they were last
-// visited.  We build a reducedGraph that contains only the edges in the
-// intersection.
-//
-// On each step, the main search loop pulls an element from the queue.  If the
-// cell has already been visited with a StateSet that is just as large as the
-// one in the queue, quit.  Otherwise, compute the transitions for all the
-// inRefs and add the new entries to the queue.  The search teriminates when the
-// root is reached in a final state, or else the queue becomes empty and there
-// is no match.
-//
-// Security properties are inherited, meaning that the properties are propagated
-// down from the root along paths.  That means access control can't be performed
-// during the first phase.
-//
-// During the second phase, we recompute the intersection from the forward
-// automata, this time taking security into account.
-
-// PathRegex is the result of compiling the path regular expression to forward
-// and reverse NFAs.
-type PathRegex struct {
-	forwardSS forwardStateSet
-	reverseSS reverseStateSet
-}
-
-// forwardStateSet and reverseStateSet wrap the pathregex.StateSet to avoid
-// confusion about which automata they belong to.
-type forwardStateSet struct {
-	pathregex.StateSet
-}
-
-type reverseStateSet struct {
-	pathregex.StateSet
-}
-
-// reducer is used to compute the reducedGraph.
-type reducer struct {
-	snapshot *snapshot
-}
-
-// reducedGraph is the forward reference graph, pruned to include only the
-// references in the intersection automaton.  Does not prune edges inaccessible
-// due to security restrictions.
-type reducedGraph map[storage.ID]*reducedCell
-
-type reducedCell struct {
-	tags storage.TagList
-	refs refs.Set
-}
-
-type reduceVisitedSet map[storage.ID]reverseStateSet
-type reduceQueue map[storage.ID]reverseStateSet
-
-// matcher is used to perform a forward match on the reducedGraph.
-type matcher struct {
-	graph    reducedGraph
-	targetID storage.ID
-}
-
-// matchVisitedSet contains the checker/state-set encountered when visiting a
-// node.  Since an entry in the store might have multiple name, it might be
-// visited multiple times with different checkers.  The matchVisitedSet contains
-// a list of checkers and state sets visited with each checker.
-type matchVisitedSet map[storage.ID]*matchVisitedEntry
-
-type matchVisitedEntry struct {
-	checkers []*matchVisitedCheckerSet
-}
-
-type matchVisitedCheckerSet struct {
-	checker *acl.Checker
-	ss      forwardStateSet
-}
-
-// matchQueue contains the worklist for the matcher.
-type matchQueue struct {
-	queue []*matchQueueEntry
-}
-
-type matchQueueEntry struct {
-	id      storage.ID
-	checker *acl.Checker
-	ss      forwardStateSet
-}
-
-// CompilePathRegex compiles the regular expression to a PathRegex.
-func CompilePathRegex(regex string) (*PathRegex, error) {
-	forwardSS, err := pathregex.Compile(regex)
-	if err != nil {
-		return nil, err
-	}
-	reverseSS, err := pathregex.CompileReverse(regex)
-	if err != nil {
-		return nil, err
-	}
-	p := &PathRegex{
-		forwardSS: forwardStateSet{StateSet: forwardSS},
-		reverseSS: reverseStateSet{StateSet: reverseSS},
-	}
-	return p, nil
-}
-
-// PathMatch returns true iff there is a name for the store value that matches
-// the pathRegex.
-func (sn *snapshot) PathMatch(pid security.PublicID, id storage.ID, regex *PathRegex) bool {
-	r := &reducer{snapshot: sn}
-	g := r.reduce(id, regex.reverseSS)
-	m := &matcher{graph: g, targetID: id}
-	checker := sn.newPermChecker(pid)
-	return m.match(checker, sn.rootID, regex.forwardSS)
-}
-
-// reduce computes the reducedGraph (the intersection) without regard to
-// security.
-func (r *reducer) reduce(id storage.ID, rss reverseStateSet) reducedGraph {
-	visited := reduceVisitedSet{}
-	graph := reducedGraph{}
-	graph.add(id)
-	queue := reduceQueue{}
-	queue.add(id, rss)
-	for len(queue) != 0 {
-		for id, ssUpdate := range queue {
-			// Take an element from the queue.
-			delete(queue, id)
-
-			// Check whether it has already been visited.
-			ss, changed := visited.add(id, ssUpdate)
-			if !changed {
-				// We already visited this node.
-				continue
-			}
-
-			// Enqueue new entries for each of the inRefs.
-			c := r.find(id)
-			c.inRefs.Iter(func(it interface{}) bool {
-				ref := it.(*refs.Ref)
-				if ssNew := ss.step(ref.Path); !ssNew.IsReject() {
-					graph.addRef(ref, id)
-					queue.add(ref.ID, ssNew)
-				}
-				return true
-			})
-		}
-	}
-	graph.setTags(r.snapshot)
-	return graph
-}
-
-// find returns the cell for the id.  Panics if the value doesn't exist.
-func (r *reducer) find(id storage.ID) *Cell {
-	c := r.snapshot.Find(id)
-	if c == nil {
-		panic(fmt.Sprintf("dangling reference: %s", id))
-	}
-	return c
-}
-
-// add updates the visited state to include ssUpdate.  Returns true iff the
-// state set changed.
-func (visited reduceVisitedSet) add(id storage.ID, ssUpdate reverseStateSet) (reverseStateSet, bool) {
-	ss, ok := visited[id]
-	if !ok {
-		visited[id] = ssUpdate
-		return ssUpdate, true
-	}
-
-	ssNew := reverseStateSet{StateSet: ss.Union(ssUpdate.StateSet)}
-	if ssNew.Equals(ss.StateSet) {
-		// Nothing changed.
-		return ss, false
-	}
-
-	visited[id] = ssNew
-	return ssNew, true
-}
-
-// add adds a new entry to the queue.
-func (queue reduceQueue) add(id storage.ID, ss reverseStateSet) {
-	if ssCurrent, ok := queue[id]; ok {
-		ss = reverseStateSet{StateSet: ss.Union(ssCurrent.StateSet)}
-	}
-	queue[id] = ss
-}
-
-// add adds a reference if it doesn't already exist.
-func (g reducedGraph) add(id storage.ID) *reducedCell {
-	v, ok := g[id]
-	if !ok {
-		// Ignore the tags, they will be filled in later in setTags.
-		v = &reducedCell{refs: refs.Empty}
-		g[id] = v
-	}
-	return v
-}
-
-// addRef adds a forward edge (r.ID -> id) to the reduced graph.
-func (g reducedGraph) addRef(ref *refs.Ref, id storage.ID) {
-	v := g.add(ref.ID)
-	v.refs = v.refs.Put(&refs.Ref{ID: id, Path: ref.Path, Label: ref.Label})
-}
-
-// setTags sets the tags values in the graph.
-func (g reducedGraph) setTags(sn Snapshot) {
-	for id, v := range g {
-		c := sn.Find(id)
-		if c == nil {
-			panic(fmt.Sprintf("dangling reference: %s", id))
-		}
-		v.tags = c.Tags
-	}
-}
-
-// match performs a secure forward match.
-func (m *matcher) match(checker *acl.Checker, rootID storage.ID, rss forwardStateSet) bool {
-	if !m.contains(rootID) {
-		return false
-	}
-	visited := matchVisitedSet{}
-	queue := matchQueue{}
-	queue.add(rootID, checker, rss)
-	for !queue.isEmpty() {
-		// Take an entry from the queue.
-		qentry := queue.pop()
-
-		// Check whether it has already been visited.
-		ss, changed := visited.add(qentry)
-		if !changed {
-			// We already visited this node.
-			continue
-		}
-
-		// If we reached the target, we're done.
-		if qentry.id == m.targetID && ss.IsFinal() {
-			return true
-		}
-
-		// Enqueue new entries for each of the refs.
-		src := m.find(qentry.id)
-		src.refs.Iter(func(it interface{}) bool {
-			ref := it.(*refs.Ref)
-			if qentry.checker.IsAllowed(ref.Label) {
-				if ssNew := ss.step(ref.Path); !ssNew.IsReject() {
-					dst := m.find(ref.ID)
-					checker := qentry.checker.Copy()
-					checker.Update(dst.tags)
-					if checker.IsAllowed(security.ReadLabel) {
-						queue.add(ref.ID, checker, ssNew)
-					}
-				}
-			}
-			return true
-		})
-	}
-	return false
-}
-
-// contains returns true iff the reduced graph contains the ID.
-func (m *matcher) contains(id storage.ID) bool {
-	_, ok := m.graph[id]
-	return ok
-}
-
-// find returns the entry in the matchQueue.  Panics if the entry doesn't exist.
-func (m *matcher) find(id storage.ID) *reducedCell {
-	v, ok := m.graph[id]
-	if !ok {
-		panic(fmt.Sprintf("dangling reference: %s", id))
-	}
-	return v
-}
-
-// add updates the visited state to include ssUpdate.  Returns true iff the
-// state set changed.
-func (visited matchVisitedSet) add(qentry *matchQueueEntry) (forwardStateSet, bool) {
-	e, ok := visited[qentry.id]
-	if !ok {
-		c := &matchVisitedCheckerSet{checker: qentry.checker, ss: qentry.ss}
-		e = &matchVisitedEntry{checkers: []*matchVisitedCheckerSet{c}}
-		visited[qentry.id] = e
-		return c.ss, true
-	}
-
-	// Add the checker if it is different from the others.
-	c := e.addCheckerSet(qentry.checker)
-
-	// Update the state set.
-	ssNew := c.ss.Union(qentry.ss.StateSet)
-	if !ssNew.Equals(c.ss.StateSet) {
-		c.ss = forwardStateSet{StateSet: ssNew}
-		return c.ss, true
-	}
-	return c.ss, false
-}
-
-// addChecker add the checker if it is different from the others.  Returns true iff
-// the set of checkers changed.
-func (e *matchVisitedEntry) addCheckerSet(checker *acl.Checker) *matchVisitedCheckerSet {
-	for _, c := range e.checkers {
-		if checker.IsEqual(c.checker) {
-			return c
-		}
-	}
-	c := &matchVisitedCheckerSet{checker: checker}
-	e.checkers = append(e.checkers, c)
-	return c
-}
-
-// add adds a new entry to the queue.
-func (queue *matchQueue) add(id storage.ID, checker *acl.Checker, ss forwardStateSet) {
-	queue.queue = append(queue.queue, &matchQueueEntry{
-		id:      id,
-		checker: checker,
-		ss:      ss,
-	})
-}
-
-// isEmpty returns true iff the queue is empty.
-func (queue *matchQueue) isEmpty() bool {
-	return len(queue.queue) == 0
-}
-
-// pop removes an entry from the queue.
-func (queue *matchQueue) pop() *matchQueueEntry {
-	i := len(queue.queue)
-	qentry := queue.queue[i-1]
-	queue.queue = queue.queue[:i-1]
-	return qentry
-}
-
-// step advances the automaton for all components of the path.
-func (ss reverseStateSet) step(p *refs.Path) reverseStateSet {
-	for p != nil {
-		var s string
-		p, s = p.Split()
-		ss = reverseStateSet{StateSet: ss.Step(s)}
-	}
-	return ss
-}
-
-// step advances the automaton for all components of the path.
-func (ss forwardStateSet) step(p *refs.Path) forwardStateSet {
-	if p == nil {
-		return ss
-	}
-	p, s := p.Split()
-	return forwardStateSet{StateSet: ss.step(p).Step(s)}
-}
diff --git a/services/store/memstore/state/pathmatch_test.go b/services/store/memstore/state/pathmatch_test.go
deleted file mode 100644
index d7b256b..0000000
--- a/services/store/memstore/state/pathmatch_test.go
+++ /dev/null
@@ -1,375 +0,0 @@
-package state_test
-
-import (
-	"fmt"
-	"testing"
-
-	"veyron/services/store/memstore/state"
-	"veyron2/security"
-	"veyron2/storage"
-)
-
-// Simple DAG based on teams and players.
-func TestPathMatchDAG(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-	johnID := mkdir(t, sn, rootPublicID, "/teamsapp/players/john")
-	janeID := mkdir(t, sn, rootPublicID, "/teamsapp/players/jane")
-	joanID := mkdir(t, sn, rootPublicID, "/teamsapp/players/joan")
-	link(t, sn, rootPublicID, "/teamsapp/teams/rockets/players/john", johnID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/rockets/players/jane", janeID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/hornets/players/jane", janeID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/hornets/players/joan", joanID)
-
-	rJohn, _ := state.CompilePathRegex(".../john")
-	if !sn.PathMatch(rootPublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-
-	rTRockets, _ := state.CompilePathRegex(".../teams/rockets/...")
-	if !sn.PathMatch(rootPublicID, johnID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if sn.PathMatch(rootPublicID, joanID, rTRockets) {
-		t.Errorf("Unexpected match")
-	}
-
-	rTHornets, _ := state.CompilePathRegex(".../teams/hornets/...")
-	if sn.PathMatch(rootPublicID, johnID, rTHornets) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-
-	rTRocketsOrHornets, _ := state.CompilePathRegex(".../teams/{rockets,hornets}/...")
-	if !sn.PathMatch(rootPublicID, johnID, rTRocketsOrHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTRocketsOrHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTRocketsOrHornets) {
-		t.Errorf("Expected match")
-	}
-
-	rTJoanOrRockets, _ := state.CompilePathRegex(".../{players/joan,teams/rockets}/...")
-	if !sn.PathMatch(rootPublicID, johnID, rTJoanOrRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTJoanOrRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTJoanOrRockets) {
-		t.Errorf("Expected match")
-	}
-}
-
-// Similar to above, but introduce loops by adding a teams directory to each of
-// the players, looping back to their teams.
-func TestPathMatchLoop(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-	johnID := mkdir(t, sn, rootPublicID, "/teamsapp/players/john")
-	janeID := mkdir(t, sn, rootPublicID, "/teamsapp/players/jane")
-	joanID := mkdir(t, sn, rootPublicID, "/teamsapp/players/joan")
-	rocketsID := mkdir(t, sn, rootPublicID, "/teamsapp/teams/rockets")
-	hornetsID := mkdir(t, sn, rootPublicID, "/teamsapp/teams/hornets")
-	link(t, sn, rootPublicID, "/teamsapp/teams/rockets/players/john", johnID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/rockets/players/jane", janeID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/hornets/players/jane", janeID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/hornets/players/joan", joanID)
-	link(t, sn, rootPublicID, "/teamsapp/players/john/teams/rockets", rocketsID)
-	link(t, sn, rootPublicID, "/teamsapp/players/jane/teams/rockets", rocketsID)
-	link(t, sn, rootPublicID, "/teamsapp/players/jane/teams/hornets", hornetsID)
-	link(t, sn, rootPublicID, "/teamsapp/players/joan/teams/hornets", hornetsID)
-	if err := st.ApplyMutations(sn.Mutations()); err != nil {
-		t.Errorf("ApplyMutations failed: %s", err)
-	}
-
-	rJohn, _ := state.CompilePathRegex(".../john")
-	if !sn.PathMatch(rootPublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-
-	rTRockets, _ := state.CompilePathRegex(".../teams/rockets/players/*")
-	if !sn.PathMatch(rootPublicID, johnID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if sn.PathMatch(rootPublicID, joanID, rTRockets) {
-		t.Errorf("Unexpected match")
-	}
-
-	rTHornets, _ := state.CompilePathRegex(".../teams/hornets/players/*")
-	if sn.PathMatch(rootPublicID, johnID, rTHornets) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-
-	rTFancyPath, _ := state.CompilePathRegex(".../teams/rockets/players/*/teams/hornets/players/*")
-	if sn.PathMatch(rootPublicID, johnID, rTFancyPath) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTFancyPath) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTFancyPath) {
-		t.Errorf("Expected match")
-	}
-}
-
-// Similar to above, but use the explicit E field rather than the implicit
-// directory.
-func TestPathMatchFieldDAG(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-	johnID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/players/E/john")
-	janeID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/players/E/jane")
-	joanID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/players/E/joan")
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/rockets/E/players/E/john", johnID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/rockets/E/players/E/jane", janeID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/hornets/E/players/E/jane", janeID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/hornets/E/players/E/joan", joanID)
-	if err := st.ApplyMutations(sn.Mutations()); err != nil {
-		t.Errorf("ApplyMutations failed: %s", err)
-	}
-
-	rJohn, _ := state.CompilePathRegex(".../E/john")
-	if !sn.PathMatch(rootPublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-
-	rTRockets, _ := state.CompilePathRegex(".../E/teams/E/rockets/E/...")
-	if !sn.PathMatch(rootPublicID, johnID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if sn.PathMatch(rootPublicID, joanID, rTRockets) {
-		t.Errorf("Unexpected match")
-	}
-
-	rTHornets, _ := state.CompilePathRegex(".../E/teams/E/hornets/E/...")
-	if sn.PathMatch(rootPublicID, johnID, rTHornets) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-
-	rTRocketsOrHornets, _ := state.CompilePathRegex(".../E/teams/E/{rockets,hornets}/E/...")
-	if !sn.PathMatch(rootPublicID, johnID, rTRocketsOrHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTRocketsOrHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTRocketsOrHornets) {
-		t.Errorf("Expected match")
-	}
-
-	rTJoanOrRockets, _ := state.CompilePathRegex(".../E/{players/E/joan,teams/E/rockets}/...")
-	if !sn.PathMatch(rootPublicID, johnID, rTJoanOrRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTJoanOrRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTJoanOrRockets) {
-		t.Errorf("Expected match")
-	}
-}
-
-func TestPathMatchFieldLoop(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-	johnID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/players/E/john")
-	janeID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/players/E/jane")
-	joanID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/players/E/joan")
-	rocketsID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/rockets")
-	hornetsID := mkdir(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/hornets")
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/rockets/E/players/E/john", johnID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/rockets/E/players/E/jane", janeID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/hornets/E/players/E/jane", janeID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/teams/E/hornets/E/players/E/joan", joanID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/players/E/john/E/teams/E/rockets", rocketsID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/players/E/jane/E/teams/E/rockets", rocketsID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/players/E/jane/E/teams/E/hornets", hornetsID)
-	link(t, sn, rootPublicID, "/E/teamsapp/E/players/E/joan/E/teams/E/hornets", hornetsID)
-	if err := st.ApplyMutations(sn.Mutations()); err != nil {
-		t.Errorf("ApplyMutations failed: %s", err)
-	}
-
-	rJohn, _ := state.CompilePathRegex(".../E/john")
-	if !sn.PathMatch(rootPublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-
-	rTRockets, _ := state.CompilePathRegex(".../E/teams/E/rockets/E/players/E/*")
-	if !sn.PathMatch(rootPublicID, johnID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if sn.PathMatch(rootPublicID, joanID, rTRockets) {
-		t.Errorf("Unexpected match")
-	}
-
-	rTHornets, _ := state.CompilePathRegex(".../E/teams/E/hornets/E/players/E/*")
-	if sn.PathMatch(rootPublicID, johnID, rTHornets) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTHornets) {
-		t.Errorf("Expected match")
-	}
-
-	rTFancyPath, _ := state.CompilePathRegex(".../E/teams/E/rockets/E/players/E/*/E/teams/E/hornets/E/players/E/*")
-	if sn.PathMatch(rootPublicID, johnID, rTFancyPath) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(rootPublicID, janeID, rTFancyPath) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(rootPublicID, joanID, rTFancyPath) {
-		t.Errorf("Expected match")
-	}
-}
-
-// Create a player, and add security restrictions.
-func mkSecureHome(t *testing.T, sn *state.MutableSnapshot, pid security.PublicID, name string, user security.PrincipalPattern) (storage.ID, storage.TagList) {
-	id := mkdir(t, sn, rootPublicID, fmt.Sprintf("/teamsapp/home/%s", name))
-	aclID := putPath(t, sn, rootPublicID, fmt.Sprintf("/teamsapp/home/%s/acls/rwa", name), &storage.ACL{
-		Name: name,
-		Contents: security.ACL{
-			user: security.LabelSet(security.ReadLabel | security.WriteLabel | security.AdminLabel),
-		},
-	})
-	tags := storage.TagList{
-		storage.Tag{Op: storage.RemoveACL, ACL: state.EveryoneACLID},
-		storage.Tag{Op: storage.AddInheritedACL, ACL: aclID},
-	}
-	put(t, sn, rootPublicID, fmt.Sprintf("/teamsapp/home/%s/.tags", name), tags)
-	return id, tags
-}
-
-// Check security restrictions.
-func TestPathMatchSecurity(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-
-	// Create the players and set their ACLs.
-	johnID, johnTags := mkSecureHome(t, sn, rootPublicID, "john", johnUser)
-	janeID, janeTags := mkSecureHome(t, sn, rootPublicID, "jane", janeUser)
-	joanID, _ := mkSecureHome(t, sn, rootPublicID, "joan", joanUser)
-	rocketsID := mkdir(t, sn, rootPublicID, "/teamsapp/teams/rockets")
-	hornetsID := mkdir(t, sn, rootPublicID, "/teamsapp/teams/hornets")
-	link(t, sn, rootPublicID, "/teamsapp/teams/rockets/players/john", johnID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/rockets/players/jane", janeID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/hornets/players/jane", janeID)
-	link(t, sn, rootPublicID, "/teamsapp/teams/hornets/players/joan", joanID)
-	link(t, sn, rootPublicID, "/teamsapp/home/john/teams/rockets", rocketsID)
-	link(t, sn, rootPublicID, "/teamsapp/home/jane/teams/rockets", rocketsID)
-	link(t, sn, rootPublicID, "/teamsapp/home/jane/teams/hornets", hornetsID)
-	link(t, sn, rootPublicID, "/teamsapp/home/joan/teams/hornets", hornetsID)
-	if err := st.ApplyMutations(sn.Mutations()); err != nil {
-		t.Errorf("ApplyMutations failed: %s", err)
-	}
-
-	sn = st.MutableSnapshot()
-	rJohn, _ := state.CompilePathRegex(".../john")
-	if !sn.PathMatch(rootPublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(johnPublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(janePublicID, johnID, rJohn) {
-		t.Errorf("Expected match")
-	}
-
-	rTRockets, _ := state.CompilePathRegex(".../teams/rockets/players/*")
-	if !sn.PathMatch(janePublicID, johnID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(janePublicID, janeID, rTRockets) {
-		t.Errorf("Expected match")
-	}
-	if sn.PathMatch(janePublicID, joanID, rTRockets) {
-		t.Errorf("Unexpected match")
-	}
-
-	rHomeJane, _ := state.CompilePathRegex(".../home/jane")
-	if _, err := sn.Get(johnPublicID, storage.ParsePath("/teamsapp/home/jane")); err == nil {
-		t.Errorf("Security error")
-	}
-	// John can't see Jane's home path.
-	if sn.PathMatch(johnPublicID, janeID, rHomeJane) {
-		t.Errorf("Unexpected match")
-	}
-	// Jane can see it.
-	if !sn.PathMatch(janePublicID, janeID, rHomeJane) {
-		t.Errorf("Expected match")
-	}
-
-	// Both can see Jane through the teams directory.
-	rPlayersJane, _ := state.CompilePathRegex(".../players/jane")
-	if !sn.PathMatch(johnPublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(janePublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-
-	// Restrict /teamsapp/teams to Jane.
-	put(t, sn, rootPublicID, "/teamsapp/teams/.tags", janeTags)
-	// John can still see through /teamsapp/home/john/teams/rockets/players/jane.
-	if !sn.PathMatch(johnPublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(janePublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-
-	// Restrict /teamsapp/teams/rockets.
-	put(t, sn, rootPublicID, "/teamsapp/teams/rockets/.tags", janeTags)
-	// We took away EveryoneACLID, but John is still inherited.
-	if !sn.PathMatch(johnPublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-	if !sn.PathMatch(janePublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-
-	// Take away John from the rockets too.
-	tag := storage.Tag{Op: storage.RemoveACL, ACL: johnTags[1].ACL}
-	put(t, sn, rootPublicID, "/teamsapp/teams/rockets/.tags/@", tag)
-	// John is now locked out.
-	if sn.PathMatch(johnPublicID, janeID, rPlayersJane) {
-		t.Errorf("Unexpected match")
-	}
-	if !sn.PathMatch(janePublicID, janeID, rPlayersJane) {
-		t.Errorf("Expected match")
-	}
-}
diff --git a/services/store/memstore/state/perm.go b/services/store/memstore/state/perm.go
index 712be36..7bf2df5 100644
--- a/services/store/memstore/state/perm.go
+++ b/services/store/memstore/state/perm.go
@@ -1,67 +1 @@
 package state
-
-import (
-	"veyron/services/store/memstore/acl"
-
-	"veyron2/security"
-	"veyron2/storage"
-)
-
-var (
-	// adminACLID is the storage.ID used for the administrator's default ACL.
-	AdminACLID = storage.ID{0}
-
-	// everyoneACLID is the storage.ID used for the default ACL for non-administrators.
-	EveryoneACLID = storage.ID{1}
-
-	// uidTagList is the storage.TagList for the /uid directory.  It ensures that
-	// /uid/* is accessible only to the administrators of the storage.
-	//
-	// TODO(jyh): Consider having an actual /uid object, so that the
-	// administrator could configure permissions on it.
-	uidTagList = storage.TagList{storage.Tag{Op: storage.RemoveACL, ACL: EveryoneACLID}}
-)
-
-// makeDefaultACLSet returns the default ACL for the store, allowing admin
-// universal access, and everyone else gets readonly access.
-func makeDefaultACLSet(admin security.PublicID) acl.Set {
-	adminContents := security.ACL{}
-	for _, name := range admin.Names() {
-		adminContents[security.PrincipalPattern(name)] = security.LabelSet(security.ReadLabel | security.WriteLabel | security.AdminLabel)
-	}
-	adminACL := &storage.ACL{
-		Name:     "admin",
-		Contents: adminContents,
-	}
-	everyoneACL := &storage.ACL{
-		Name:     "everyone",
-		Contents: security.ACL{security.AllPrincipals: security.LabelSet(security.ReadLabel)},
-	}
-	return acl.Set{
-		AdminACLID:    acl.Entry{ACL: adminACL, Inherited: true},
-		EveryoneACLID: acl.Entry{ACL: everyoneACL, Inherited: true},
-	}
-}
-
-// newPermChecker returns a new acl.Checker in the current state.
-func (sn *snapshot) newPermChecker(pid security.PublicID) *acl.Checker {
-	return acl.NewChecker(&sn.aclCache, pid, sn.defaultACLSet)
-}
-
-// makeFindACLFunc returns a function to fetch ACL values from the storage.
-func (sn *snapshot) makeFindACLFunc() acl.FindFunc {
-	return func(id storage.ID) *storage.ACL {
-		v, ok := sn.idTable.Get(&Cell{ID: id})
-		if !ok {
-			return nil
-		}
-		x := v.(*Cell).Value
-		if acl, ok := x.(*storage.ACL); ok {
-			return acl
-		}
-		if acl, ok := x.(storage.ACL); ok {
-			return &acl
-		}
-		return nil
-	}
-}
diff --git a/services/store/memstore/state/refs.go b/services/store/memstore/state/refs.go
index a131339..77c8560 100644
--- a/services/store/memstore/state/refs.go
+++ b/services/store/memstore/state/refs.go
@@ -67,13 +67,13 @@
 
 	// Add the inverse link.
 	c := sn.Find(r.ID)
-	c.inRefs = c.inRefs.Put(&refs.Ref{ID: id, Path: r.Path, Label: r.Label})
+	c.inRefs = c.inRefs.Put(&refs.Ref{ID: id, Path: r.Path})
 }
 
 func (sn *MutableSnapshot) removeRef(id storage.ID, r *refs.Ref) {
 	// Remove the inverse link.
 	c := sn.deref(r.ID)
-	c.inRefs = c.inRefs.Remove(&refs.Ref{ID: id, Path: r.Path, Label: r.Label})
+	c.inRefs = c.inRefs.Remove(&refs.Ref{ID: id, Path: r.Path})
 
 	// Update refcount.
 	sn.unref(r.ID)
diff --git a/services/store/memstore/state/security_test.go b/services/store/memstore/state/security_test.go
deleted file mode 100644
index 1c0ddfb..0000000
--- a/services/store/memstore/state/security_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package state_test
-
-import (
-	"reflect"
-	"testing"
-
-	"veyron/services/store/memstore/state"
-
-	"veyron2/security"
-	"veyron2/storage"
-)
-
-func TestSecurity(t *testing.T) {
-	st := state.New(rootPublicID)
-	sn := st.MutableSnapshot()
-
-	// Create /Users/jane and give her RWA permissions.
-	aclID := putPath(t, sn, rootPublicID, "/Users/jane/acls/janeRWA", &storage.ACL{
-		Name: "Jane",
-		Contents: security.ACL{
-			janeUser: security.LabelSet(security.ReadLabel | security.WriteLabel | security.AdminLabel),
-		},
-	})
-	janeTags := storage.TagList{storage.Tag{Op: storage.AddInheritedACL, ACL: aclID}}
-	put(t, sn, rootPublicID, "/Users/jane/.tags", janeTags)
-
-	// John should not be able to do anything.
-	{
-		if _, err := maybePut(sn, johnPublicID, "/", Node{}); err == nil {
-			t.Errorf("Security violation")
-		}
-		if _, err := maybePut(sn, johnPublicID, "/Users/jane", Node{}); err == nil {
-			t.Errorf("Security violation")
-		}
-		if _, err := maybeGet(sn, johnPublicID, "/Users/jane/.tags"); err == nil {
-			t.Errorf("Security violation")
-		}
-	}
-
-	// Jane can access her own directory.
-	{
-		if _, err := maybePut(sn, janePublicID, "/", Node{}); err == nil {
-			t.Errorf("Security violation")
-		}
-		if _, err := maybeGet(sn, janePublicID, "/Users/jane"); err != nil {
-			t.Errorf("Expected /Users/jane to exist: %s", err)
-		}
-		if _, err := maybePut(sn, janePublicID, "/Users/jane", Node{}); err != nil {
-			t.Errorf("Unexpected security error: %s %s", err, janePublicID)
-		}
-		if tags, err := maybeGet(sn, janePublicID, "/Users/jane/.tags"); err == nil {
-			if !reflect.DeepEqual(janeTags, tags) {
-				t.Errorf("Expected %+v, got %+v", janeTags, tags)
-			}
-		} else {
-			t.Errorf("Unexpected security error: %s", err)
-		}
-	}
-
-	// Jane gives John read/write permission.
-	var johnTag storage.Tag
-	{
-		aclID := putPath(t, sn, janePublicID, "/Users/jane/acls/johnRW", storage.ACL{
-			Name: "John",
-			Contents: security.ACL{
-				johnUser: security.LabelSet(security.ReadLabel | security.WriteLabel),
-			},
-		})
-		johnTag = storage.Tag{Op: storage.AddInheritedACL, ACL: aclID}
-		// The @ is a pseudo-index, meaning append the tag to the list of tags.
-		put(t, sn, janePublicID, "/Users/jane/.tags/@", johnTag)
-	}
-
-	// Jane can still access.
-	janeTags = append(janeTags, johnTag)
-	{
-		if _, err := maybeGet(sn, janePublicID, "/Users/jane"); err != nil {
-			t.Errorf("Expected /Users/jane to exist: %s", err)
-		}
-		if _, err := maybePut(sn, janePublicID, "/Users/jane", Node{}); err != nil {
-			t.Errorf("Unexpected security error: %s", err)
-		}
-		if tags, err := maybeGet(sn, janePublicID, "/Users/jane/.tags"); err == nil {
-			if !reflect.DeepEqual(janeTags, tags) {
-				t.Errorf("Expected %+v, got %+v", janeTags, tags)
-			}
-		} else {
-			t.Errorf("Unexpected security error: %s", err)
-		}
-	}
-
-	// John also has access.
-	{
-		if _, err := maybePut(sn, johnPublicID, "/Users/jane", Node{}); err != nil {
-			t.Errorf("Unexpected error: %s", err)
-		}
-		mkdir(t, sn, johnPublicID, "/Users/jane/john")
-
-		// John is still not allowed to access the tags.
-		if _, err := maybeGet(sn, johnPublicID, "/Users/jane/.tags"); err == nil {
-			t.Errorf("Security violation")
-		}
-	}
-}
diff --git a/services/store/memstore/state/snapshot.go b/services/store/memstore/state/snapshot.go
index 9c352f5..80f6372 100644
--- a/services/store/memstore/state/snapshot.go
+++ b/services/store/memstore/state/snapshot.go
@@ -3,7 +3,6 @@
 import (
 	"reflect"
 
-	"veyron/services/store/memstore/acl"
 	"veyron/services/store/memstore/field"
 	"veyron/services/store/memstore/refs"
 
@@ -19,10 +18,6 @@
 	// of the specified path are returned.
 	NewIterator(pid security.PublicID, path storage.PathName, pathFilter PathFilter, filter IterFilter) Iterator
 
-	// PathMatch returns true iff there is a name for the store value that
-	// matches the pathRegex.
-	PathMatch(pid security.PublicID, id storage.ID, regex *PathRegex) bool
-
 	// Find performs a lookup based on storage.ID, returning nil if the cell is not found.
 	Find(id storage.ID) *Cell
 
@@ -57,29 +52,16 @@
 
 	// rootID is the identifier of the root object.
 	rootID storage.ID
-
-	// aclCache caches a set of ACLs.
-	aclCache acl.Cache
-
-	// defaultACLSet is the ACLSet used to access the root directory.
-	defaultACLSet acl.Set
 }
 
 // newSnapshot returns an empty snapshot.
 func newSnapshot(admin security.PublicID) snapshot {
 	sn := snapshot{
-		idTable:       emptyIDTable,
-		defaultACLSet: makeDefaultACLSet(admin),
+		idTable: emptyIDTable,
 	}
-	sn.aclCache = acl.NewCache(sn.makeFindACLFunc())
 	return sn
 }
 
-// resetACLCache resets the aclCache.
-func (sn *snapshot) resetACLCache() {
-	sn.aclCache.UpdateFinder(sn.makeFindACLFunc())
-}
-
 // Find performs a lookup based on storage.ID, returning nil if the cell is not found.
 func (sn *snapshot) Find(id storage.ID) *Cell {
 	v, ok := sn.idTable.Get(&Cell{ID: id})
@@ -91,9 +73,8 @@
 
 // Get implements the Snapshot method.
 func (sn *snapshot) Get(pid security.PublicID, path storage.PathName) (*storage.Entry, error) {
-	checker := sn.newPermChecker(pid)
 	// Pass nil for 'mutations' since the snapshot is immutable.
-	cell, suffix, v := sn.resolveCell(checker, path, nil)
+	cell, suffix, v := sn.resolveCell(path, nil)
 	if cell == nil {
 		return nil, errNotFound
 	}
@@ -111,7 +92,7 @@
 // Returns (cell, suffix, v), where cell contains the value, suffix is the path
 // to the value, v is the value itself.  If the operation failed, the returned
 // cell is nil.
-func (sn *snapshot) resolveCell(checker *acl.Checker, path storage.PathName, mu *Mutations) (*Cell, storage.PathName, interface{}) {
+func (sn *snapshot) resolveCell(path storage.PathName, mu *Mutations) (*Cell, storage.PathName, interface{}) {
 	cell := sn.Find(sn.rootID)
 	if cell == nil {
 		return nil, nil, nil
@@ -120,23 +101,9 @@
 		if mu != nil {
 			mu.addPrecondition(cell)
 		}
-		checker.Update(cell.Tags)
 		var v reflect.Value
 		var suffix storage.PathName
-		if len(path) > 0 && path[0] == refs.TagsDirName {
-			if !checker.IsAllowed(security.AdminLabel) {
-				// Access to .tags requires admin priviledges.
-				return nil, nil, errPermissionDenied
-			}
-			v, suffix = field.Get(cell.Tags, path[1:])
-		} else {
-			if !checker.IsAllowed(security.ReadLabel) {
-				// Do not return errPermissionDenied because that would leak the
-				// existence of the inaccessible value.
-				return nil, nil, nil
-			}
-			v, suffix = field.Get(cell.Value, path)
-		}
+		v, suffix = field.Get(cell.Value, path)
 		x := v.Interface()
 		if id, ok := x.(storage.ID); ok {
 			// Always dereference IDs.
diff --git a/services/store/memstore/state/state.go b/services/store/memstore/state/state.go
index 04f914c..c398b86 100644
--- a/services/store/memstore/state/state.go
+++ b/services/store/memstore/state/state.go
@@ -75,11 +75,6 @@
 }
 
 // ApplyMutations applies a set of mutations atomically.
-//
-// We don't need to check permissions because:
-//    1. Permissions were checked as the mutations were created.
-//    2. Preconditions ensure that all paths to modified values haven't changed.
-//    3. The client cannot fabricate a mutations value.
 func (st *State) ApplyMutations(mu *Mutations) error {
 	// Assign a timestamp.
 	ts := uint64(time.Now().UnixNano())
@@ -125,14 +120,12 @@
 	for id, m := range mu.Delta {
 		d := refs.BuildDir(m.Dir)
 		cl, ok := table.Get(&Cell{ID: id})
-		sn.aclCache.Invalidate(id)
 		if !ok {
 			c := &Cell{
 				ID:      id,
 				Version: m.Postcondition,
 				Value:   m.Value,
 				Dir:     d,
-				Tags:    m.Tags,
 				refs:    m.refs,
 				inRefs:  refs.Empty,
 			}
@@ -144,7 +137,6 @@
 			cp.Version = m.Postcondition
 			cp.Value = m.Value
 			cp.Dir = d
-			cp.Tags = m.Tags
 			cp.refs = m.refs
 			table = table.Put(&cp)
 			updates = append(updates, &refUpdate{id: c.ID, before: c.refs, after: cp.refs})
diff --git a/services/store/memstore/state/state_test.go b/services/store/memstore/state/state_test.go
index 1933631..7a8045f 100644
--- a/services/store/memstore/state/state_test.go
+++ b/services/store/memstore/state/state_test.go
@@ -16,14 +16,6 @@
 
 var (
 	rootPublicID security.PublicID = security.FakePublicID("root")
-	johnPublicID security.PublicID = security.FakePublicID("john")
-	janePublicID security.PublicID = security.FakePublicID("jane")
-	joanPublicID security.PublicID = security.FakePublicID("joan")
-
-	rootUser = security.PrincipalPattern("fake/root")
-	johnUser = security.PrincipalPattern("fake/john")
-	janeUser = security.PrincipalPattern("fake/jane")
-	joanUser = security.PrincipalPattern("fake/joan")
 )
 
 // makeParentNodes creates the parent nodes if they do not already exist.
diff --git a/services/store/memstore/watch/raw_processor.go b/services/store/memstore/watch/raw_processor.go
index 68dc10a..962e0bd 100644
--- a/services/store/memstore/watch/raw_processor.go
+++ b/services/store/memstore/watch/raw_processor.go
@@ -83,7 +83,6 @@
 			Version:      cell.Version,
 			IsRoot:       isRoot,
 			Value:        cell.Value,
-			Tags:         cell.Tags,
 			Dir:          flattenDir(refs.FlattenDir(cell.Dir)),
 		}
 		change := watch.Change{
@@ -148,7 +147,6 @@
 			Version:      mu.Postcondition,
 			IsRoot:       isRoot,
 			Value:        mu.Value,
-			Tags:         mu.Tags,
 			Dir:          flattenDir(mu.Dir),
 		}
 		// TODO(tilaks): don't clone value.
diff --git a/services/store/raw/service.vdl b/services/store/raw/service.vdl
index 3384837..4ec7e2c 100644
--- a/services/store/raw/service.vdl
+++ b/services/store/raw/service.vdl
@@ -38,9 +38,6 @@
   // Value is value stored at this entry.
   Value any
 
-  // Tags specify permissions on this entry.
-  Tags storage.TagList
-
   // Dir is the implicit directory of this entry, and may contain references
   // to other entries in the store.
   Dir []storage.DEntry
diff --git a/services/store/raw/service.vdl.go b/services/store/raw/service.vdl.go
index 0cbbb42..1eb0cfd 100644
--- a/services/store/raw/service.vdl.go
+++ b/services/store/raw/service.vdl.go
@@ -35,8 +35,6 @@
 	IsRoot bool
 	// Value is value stored at this entry.
 	Value _gen_vdlutil.Any
-	// Tags specify permissions on this entry.
-	Tags storage.TagList
 	// Dir is the implicit directory of this entry, and may contain references
 	// to other entries in the store.
 	Dir []storage.DEntry
@@ -479,7 +477,7 @@
 		OutArgs: []_gen_ipc.MethodArgument{
 			{Name: "", Type: 68},
 		},
-		InStream: 80,
+		InStream: 77,
 	}
 	result.Methods["Watch"] = _gen_ipc.MethodSignature{
 		InArgs: []_gen_ipc.MethodArgument{
@@ -512,27 +510,20 @@
 				_gen_wiretype.FieldType{Type: 0x47, Name: "Changes"},
 			},
 			"veyron2/services/watch.ChangeBatch", []string(nil)},
-		_gen_wiretype.ArrayType{Elem: 0x41, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron2/storage.Version", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "veyron2/storage.TagOp", Tags: []string(nil)}, _gen_wiretype.StructType{
-			[]_gen_wiretype.FieldType{
-				_gen_wiretype.FieldType{Type: 0x4b, Name: "Op"},
-				_gen_wiretype.FieldType{Type: 0x49, Name: "ACL"},
-			},
-			"veyron2/storage.Tag", []string(nil)},
-		_gen_wiretype.SliceType{Elem: 0x4c, Name: "veyron2/storage.TagList", Tags: []string(nil)}, _gen_wiretype.StructType{
+		_gen_wiretype.ArrayType{Elem: 0x41, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron2/storage.Version", Tags: []string(nil)}, _gen_wiretype.StructType{
 			[]_gen_wiretype.FieldType{
 				_gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
 				_gen_wiretype.FieldType{Type: 0x49, Name: "ID"},
 			},
 			"veyron2/storage.DEntry", []string(nil)},
-		_gen_wiretype.SliceType{Elem: 0x4e, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
+		_gen_wiretype.SliceType{Elem: 0x4b, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
 			[]_gen_wiretype.FieldType{
 				_gen_wiretype.FieldType{Type: 0x49, Name: "ID"},
 				_gen_wiretype.FieldType{Type: 0x4a, Name: "PriorVersion"},
 				_gen_wiretype.FieldType{Type: 0x4a, Name: "Version"},
 				_gen_wiretype.FieldType{Type: 0x2, Name: "IsRoot"},
 				_gen_wiretype.FieldType{Type: 0x45, Name: "Value"},
-				_gen_wiretype.FieldType{Type: 0x4d, Name: "Tags"},
-				_gen_wiretype.FieldType{Type: 0x4f, Name: "Dir"},
+				_gen_wiretype.FieldType{Type: 0x4c, Name: "Dir"},
 			},
 			"veyron/services/store/raw.Mutation", []string(nil)},
 	}
diff --git a/services/store/stored/main.go b/services/store/stored/main.go
index 0988933..5104a64 100644
--- a/services/store/stored/main.go
+++ b/services/store/stored/main.go
@@ -14,14 +14,17 @@
 
 import (
 	"flag"
+	"fmt"
 	"log"
 	"os"
 	"os/user"
 
 	vflag "veyron/security/flag"
 	"veyron/services/store/server"
+	"veyron/services/store/viewer"
 
 	"veyron2/rt"
+	"veyron2/storage/vstore"
 
 	_ "veyron/services/store/typeregistryhack"
 )
@@ -29,8 +32,11 @@
 var (
 	mountName string
 	dbName    = flag.String("db", "/var/tmp/veyron_store.db", "Metadata database")
-	// TODO(rthellend): Remove the address flag when the config manager is working.
-	address = flag.String("address", ":0", "Address to listen on.")
+	// TODO(rthellend): Remove the address flag when the config manager is
+	// working.
+	address    = flag.String("address", ":0", "Address to listen on")
+	viewerPort = flag.Int("viewerPort", 5000,
+		"IPV4 port to serve viewer from, or 0 to disable viewer")
 )
 
 func init() {
@@ -46,8 +52,7 @@
 	flag.StringVar(&mountName, "name", dir, "Mount point for media")
 }
 
-// Main starts the content service, taking arguments from the command line
-// flags.
+// main starts the store service, taking args from command line flags.
 func main() {
 	r := rt.Init()
 
@@ -58,7 +63,8 @@
 	}
 
 	// Create a new StoreService.
-	storeService, err := server.New(server.ServerConfig{Admin: r.Identity().PublicID(), DBName: *dbName})
+	storeService, err := server.New(
+		server.ServerConfig{Admin: r.Identity().PublicID(), DBName: *dbName})
 	if err != nil {
 		log.Fatal("server.New() failed: ", err)
 	}
@@ -80,6 +86,15 @@
 		log.Fatal("s.Serve() failed: ", err)
 	}
 
+	// Run viewer if requested.
+	if *viewerPort > 0 {
+		vst, err := vstore.New(mountName)
+		if err != nil {
+			log.Fatalf("Failed to start viewer: %s", err)
+		}
+		go viewer.ListenAndServe(fmt.Sprintf(":%d", *viewerPort), vst)
+	}
+
 	// Wait forever.
 	done := make(chan struct{})
 	<-done
diff --git a/services/store/typeregistryhack/init.go b/services/store/typeregistryhack/init.go
index 72b5216..bea5d02 100644
--- a/services/store/typeregistryhack/init.go
+++ b/services/store/typeregistryhack/init.go
@@ -8,7 +8,7 @@
 	// Register boxes types.
 	"veyron/examples/boxes"
 	// Register mdb types.
-	_ "veyron/examples/storage/mdb/schema"
+	_ "veyron/examples/mdb/schema"
 	// Register todos types.
 	_ "veyron/examples/todos/schema"
 	// Register bank types.
diff --git a/examples/storage/viewer/reflect.go b/services/store/viewer/reflect.go
similarity index 100%
rename from examples/storage/viewer/reflect.go
rename to services/store/viewer/reflect.go
diff --git a/examples/storage/viewer/value.go b/services/store/viewer/value.go
similarity index 100%
rename from examples/storage/viewer/value.go
rename to services/store/viewer/value.go
diff --git a/examples/storage/viewer/viewer.go b/services/store/viewer/viewer.go
similarity index 96%
rename from examples/storage/viewer/viewer.go
rename to services/store/viewer/viewer.go
index f7cee5b..0c6dc8c 100644
--- a/examples/storage/viewer/viewer.go
+++ b/services/store/viewer/viewer.go
@@ -1,4 +1,5 @@
-// package viewer exports a store through an HTTP server, with the following features.
+// package viewer exports a store through an HTTP server, with the following
+// features.
 //
 // URL paths correspond to store paths.  For example, if the URL is
 // http://myhost/a/b/c, the store value that is fetched is /a/b/c.
@@ -8,12 +9,13 @@
 // value that they format, using a path /templates/<pkgPath>/<typeName>.  For
 // example, suppose we are viewing the page for /movies/Inception, and it
 // contains a value of type *examples/store/mdb/schema.Movie.  We fetch the
-// template /templates/examples/store/mdb/schema/Movie, which must be a string in
-// html/template format.  If it exists, the template is compiled and used to
+// template /templates/examples/store/mdb/schema/Movie, which must be a string
+// in html/template format.  If it exists, the template is compiled and used to
 // print the value.  If the template does not exist, the value is formatted in
 // raw form.
 //
-// String values that have a path ending with suffix .css are printed in raw form.
+// String values that have a path ending with suffix .css are printed in raw
+// form.
 package viewer
 
 import (
diff --git a/services/wsprd/app/app.go b/services/wsprd/app/app.go
new file mode 100644
index 0000000..c7ba3c1
--- /dev/null
+++ b/services/wsprd/app/app.go
@@ -0,0 +1,546 @@
+// The app package contains the struct that keeps per javascript app state and handles translating
+// javascript requests to veyron requests and vice versa.
+package app
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"sync"
+
+	"veyron/services/wsprd/ipc/client"
+	"veyron/services/wsprd/ipc/server"
+	"veyron/services/wsprd/ipc/stream"
+	"veyron/services/wsprd/lib"
+	"veyron/services/wsprd/signature"
+	"veyron2"
+	"veyron2/ipc"
+	"veyron2/rt"
+	"veyron2/security"
+	"veyron2/verror"
+	"veyron2/vlog"
+	"veyron2/vom"
+	vom_wiretype "veyron2/vom/wiretype"
+	wiretype_build "veyron2/wiretype/build"
+)
+
+// Temporary holder of RPC so that we can store the unprocessed args.
+type veyronTempRPC struct {
+	Name        string
+	Method      string
+	InArgs      []json.RawMessage
+	NumOutArgs  int32
+	IsStreaming bool
+}
+
+type veyronRPC struct {
+	Name        string
+	Method      string
+	InArgs      []interface{}
+	NumOutArgs  int32
+	IsStreaming bool
+}
+
+// A request javascript to serve undern a particular name
+type serveRequest struct {
+	Name     string
+	ServerId uint64
+	Service  signature.JSONServiceSignature
+}
+
+type outstandingStream struct {
+	stream stream.Sender
+	inType vom.Type
+}
+
+// Controller represents all the state of a Veyron Web App.  This is the struct
+// that is in charge performing all the veyron options.
+type Controller struct {
+	// Protects outstandingStreams and outstandingServerRequests.
+	sync.Mutex
+
+	logger vlog.Logger
+
+	// A set of options that will be used to construct a runtime.  This will
+	// eventually be removed, since we should construct the runtime in
+	// the constructor instead of at a later point.
+	opts []veyron2.ROpt
+
+	// The runtime to use to create new clients.
+	rt veyron2.Runtime
+
+	// Used to generate unique ids for requests initiated by the proxy.
+	// These ids will be even so they don't collide with the ids generated
+	// by the client.
+	lastGeneratedId int64
+
+	// Streams for the outstanding requests.
+	outstandingStreams map[int64]outstandingStream
+
+	// Maps flowids to the server that owns them.
+	flowMap map[int64]*server.Server
+
+	// A manager that Handles fetching and caching signature of remote services
+	signatureManager lib.SignatureManager
+
+	// We maintain multiple Veyron server per websocket pipe for serving JavaScript
+	// services.
+	servers map[uint64]*server.Server
+
+	// Creates a client writer for a given flow.  This is a member so that tests can override
+	// the default implementation.
+	writerCreator func(id int64) lib.ClientWriter
+
+	// There is only one client per Controller since there is only one identity per app.
+	client ipc.Client
+
+	veyronProxyEP string
+
+	// privateId associated with the app.
+	// TODO(bjornick): We probably don't need the identity anymore. Verify and then remove.
+	privateId security.PrivateID
+}
+
+// NewController creates a new Controller.  writerCreator will be used to create a new flow for rpcs to
+// javascript server. veyronProxyEP is an endpoint for the veyron proxy to serve through.  It can't be empty.
+// opts are any options that should be passed to the rt.New(), such as the mounttable root.
+func NewController(writerCreator func(id int64) lib.ClientWriter, veyronProxyEP string, opts ...veyron2.ROpt) *Controller {
+	controller := &Controller{writerCreator: writerCreator, veyronProxyEP: veyronProxyEP, opts: opts}
+	controller.setup()
+	return controller
+}
+
+// finishCall waits for the call to finish and write out the response to w.
+func (c *Controller) finishCall(w lib.ClientWriter, clientCall ipc.Call, msg *veyronRPC) {
+	if msg.IsStreaming {
+		for {
+			var item interface{}
+			if err := clientCall.Recv(&item); err != nil {
+				if err == io.EOF {
+					break
+				}
+				w.Error(err) // Send streaming error as is
+				return
+			}
+			if err := w.Send(lib.ResponseStream, item); err != nil {
+				w.Error(verror.Internalf("unable to marshal: %v", item))
+			}
+		}
+
+		if err := w.Send(lib.ResponseStreamClose, nil); err != nil {
+			w.Error(verror.Internalf("unable to marshal close stream message"))
+		}
+	}
+
+	results := make([]interface{}, msg.NumOutArgs)
+	// This array will have pointers to the values in result.
+	resultptrs := make([]interface{}, msg.NumOutArgs)
+	for ax := range results {
+		resultptrs[ax] = &results[ax]
+	}
+	if err := clientCall.Finish(resultptrs...); err != nil {
+		// return the call system error as is
+		w.Error(err)
+		return
+	}
+	// for now we assume last out argument is always error
+	if len(results) < 1 {
+		w.Error(verror.Internalf("client call did not return any results"))
+		return
+	}
+
+	if err, ok := results[len(results)-1].(error); ok {
+		// return the call Application error as is
+		w.Error(err)
+		return
+	}
+
+	if err := w.Send(lib.ResponseFinal, results[0:len(results)-1]); err != nil {
+		w.Error(verror.Internalf("error marshalling results: %v", err))
+	}
+}
+
+// UpdateIdentity updates the identity used by the Controller. This must be called before any veyron requests are
+// made. This is only temporary as in the future, we'd expect to set the identity at construction time.
+func (c *Controller) UpdateIdentity(identity security.PrivateID) error {
+	c.Lock()
+	defer c.Unlock()
+	args := c.opts
+	if identity != nil {
+		args = append(c.opts, veyron2.RuntimeID(identity))
+	}
+	r, err := rt.New(args...)
+	if err != nil {
+		return err
+	}
+	client, err := r.NewClient(veyron2.CallTimeout(ipc.NoTimeout))
+	if err != nil {
+		return err
+	}
+	c.rt = r
+	c.logger = c.rt.Logger()
+	c.client = client
+	return nil
+}
+
+func (c *Controller) startCall(w lib.ClientWriter, msg *veyronRPC) (ipc.Call, error) {
+	c.Lock()
+	defer c.Unlock()
+	if c.client == nil {
+		return nil, verror.BadArgf("no client created")
+	}
+	methodName := lib.UppercaseFirstCharacter(msg.Method)
+	clientCall, err := c.client.StartCall(c.rt.TODOContext(), msg.Name, methodName, msg.InArgs)
+	if err != nil {
+		return nil, fmt.Errorf("error starting call (name: %v, method: %v, args: %v): %v", msg.Name, methodName, msg.InArgs, err)
+	}
+
+	return clientCall, nil
+}
+
+// Implements the serverHelper interface
+
+// CreateNewFlow creats a new server flow that will be used to write out
+// streaming messages to Javascript.
+func (c *Controller) CreateNewFlow(s *server.Server, stream stream.Sender) *server.Flow {
+	c.Lock()
+	defer c.Unlock()
+	id := c.lastGeneratedId
+	c.lastGeneratedId += 2
+	c.flowMap[id] = s
+	c.outstandingStreams[id] = outstandingStream{stream, vom_wiretype.Type{ID: 1}}
+	return &server.Flow{ID: id, Writer: c.writerCreator(id)}
+}
+
+// CleanupFlow removes the bookkeping for a previously created flow.
+func (c *Controller) CleanupFlow(id int64) {
+	c.Lock()
+	defer c.Unlock()
+	delete(c.outstandingStreams, id)
+	delete(c.flowMap, id)
+}
+
+// GetLogger returns a Veyron logger to use.
+func (c *Controller) GetLogger() vlog.Logger {
+	return c.logger
+}
+
+// RT returns the runtime of the app.
+func (c *Controller) RT() veyron2.Runtime {
+	return c.rt
+}
+
+// Cleanup Cleans up any outstanding rpcs.
+func (c *Controller) Cleanup() {
+	c.logger.VI(0).Info("Cleaning up websocket")
+	c.Lock()
+	defer c.Unlock()
+	for _, stream := range c.outstandingStreams {
+		if call, ok := stream.stream.(ipc.Call); ok {
+			call.Cancel()
+		}
+	}
+
+	for _, server := range c.servers {
+		server.Stop()
+	}
+}
+
+func (c *Controller) setup() {
+	c.signatureManager = lib.NewSignatureManager()
+	c.outstandingStreams = make(map[int64]outstandingStream)
+	c.flowMap = make(map[int64]*server.Server)
+	c.servers = make(map[uint64]*server.Server)
+}
+
+func (c *Controller) sendParsedMessageOnStream(id int64, msg interface{}, w lib.ClientWriter) {
+	c.Lock()
+	defer c.Unlock()
+	stream := c.outstandingStreams[id].stream
+	if stream == nil {
+		w.Error(fmt.Errorf("unknown stream"))
+		return
+	}
+
+	stream.Send(msg, w)
+
+}
+
+// SendOnStream writes data on id's stream.  Returns an error if the send failed.
+func (c *Controller) SendOnStream(id int64, data string, w lib.ClientWriter) {
+	c.Lock()
+	typ := c.outstandingStreams[id].inType
+	c.Unlock()
+	if typ == nil {
+		vlog.Errorf("no inType for stream %d (%q)", id, data)
+		return
+	}
+	payload, err := vom.JSONToObject(data, typ)
+	if err != nil {
+		vlog.Errorf("error while converting json to InStreamType (%s): %v", data, err)
+		return
+	}
+	c.sendParsedMessageOnStream(id, payload, w)
+}
+
+// SendVeyronRequest makes a veyron request for the given flowId.  If signal is non-nil, it will receive
+// the call object after it has been constructed.
+func (c *Controller) sendVeyronRequest(id int64, veyronMsg *veyronRPC, w lib.ClientWriter, signal chan ipc.Stream) {
+	// We have to make the start call synchronous so we can make sure that we populate
+	// the call map before we can Handle a recieve call.
+	call, err := c.startCall(w, veyronMsg)
+	if err != nil {
+		w.Error(verror.Internalf("can't start Veyron Request: %v", err))
+		return
+	}
+
+	if signal != nil {
+		signal <- call
+	}
+
+	c.finishCall(w, call, veyronMsg)
+	if signal != nil {
+		c.Lock()
+		delete(c.outstandingStreams, id)
+		c.Unlock()
+	}
+}
+
+// HandleVeyronRequest starts a veyron rpc and returns before the rpc has been completed.
+func (c *Controller) HandleVeyronRequest(id int64, data string, w lib.ClientWriter) {
+	veyronMsg, inStreamType, err := c.parseVeyronRequest(bytes.NewBufferString(data))
+	if err != nil {
+		w.Error(verror.Internalf("can't parse Veyron Request: %v", err))
+		return
+	}
+
+	c.Lock()
+	defer c.Unlock()
+	// If this rpc is streaming, we would expect that the client would try to send
+	// on this stream.  Since the initial handshake is done asynchronously, we have
+	// to basically put a queueing stream in the map before we make the async call
+	// so that the future sends on the stream can see the queuing stream, even if
+	// the client call isn't actually ready yet.
+	var signal chan ipc.Stream
+	if veyronMsg.IsStreaming {
+		signal = make(chan ipc.Stream)
+		c.outstandingStreams[id] = outstandingStream{
+			stream: client.StartQueueingStream(signal),
+			inType: inStreamType,
+		}
+	}
+	go c.sendVeyronRequest(id, veyronMsg, w, signal)
+}
+
+// CloseStream closes the stream for a given id.
+func (c *Controller) CloseStream(id int64) {
+	c.Lock()
+	defer c.Unlock()
+	stream := c.outstandingStreams[id].stream
+	if stream == nil {
+		c.logger.Errorf("close called on non-existent call: %v", id)
+		return
+	}
+
+	var call client.QueueingStream
+	var ok bool
+	if call, ok = stream.(client.QueueingStream); !ok {
+		c.logger.Errorf("can't close server stream: %v", id)
+		return
+	}
+
+	if err := call.Close(); err != nil {
+		c.logger.Errorf("client call close failed with: %v", err)
+	}
+}
+
+func (c *Controller) maybeCreateServer(serverId uint64) (*server.Server, error) {
+	c.Lock()
+	defer c.Unlock()
+	if server, ok := c.servers[serverId]; ok {
+		return server, nil
+	}
+	server, err := server.NewServer(serverId, c.veyronProxyEP, c)
+	if err != nil {
+		return nil, err
+	}
+	c.servers[serverId] = server
+	return server, nil
+}
+
+func (c *Controller) removeServer(serverId uint64) {
+	c.Lock()
+	server := c.servers[serverId]
+	if server == nil {
+		c.Unlock()
+		return
+	}
+	delete(c.servers, serverId)
+	c.Unlock()
+
+	server.Stop()
+}
+
+func (c *Controller) serve(serveRequest serveRequest, w lib.ClientWriter) {
+	// Create a server for the websocket pipe, if it does not exist already
+	server, err := c.maybeCreateServer(serveRequest.ServerId)
+	if err != nil {
+		w.Error(verror.Internalf("error creating server: %v", err))
+	}
+
+	c.logger.VI(2).Infof("serving under name: %q", serveRequest.Name)
+
+	endpoint, err := server.Serve(serveRequest.Name, serveRequest.Service)
+	if err != nil {
+		w.Error(verror.Internalf("error serving service: %v", err))
+		return
+	}
+	// Send the endpoint back
+	if err := w.Send(lib.ResponseFinal, endpoint); err != nil {
+		w.Error(verror.Internalf("error marshalling results: %v", err))
+		return
+	}
+}
+
+// HandleServeRequest takes a request to serve a server, creates
+// a server, registers the provided services and sends the endpoint back.
+func (c *Controller) HandleServeRequest(data string, w lib.ClientWriter) {
+	// Decode the serve request which includes IDL, registered services and name
+	var serveRequest serveRequest
+	decoder := json.NewDecoder(bytes.NewBufferString(data))
+	if err := decoder.Decode(&serveRequest); err != nil {
+		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
+		return
+	}
+	c.serve(serveRequest, w)
+}
+
+// HandleStopRequest takes a request to stop a server.
+func (c *Controller) HandleStopRequest(data string, w lib.ClientWriter) {
+
+	var serverId uint64
+	decoder := json.NewDecoder(bytes.NewBufferString(data))
+	if err := decoder.Decode(&serverId); err != nil {
+		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
+		return
+	}
+
+	c.removeServer(serverId)
+
+	// Send true to indicate stop has finished
+	if err := w.Send(lib.ResponseFinal, true); err != nil {
+		w.Error(verror.Internalf("error marshalling results: %v", err))
+		return
+	}
+}
+
+// HandleServerResponse handles the completion of outstanding calls to JavaScript services
+// by filling the corresponding channel with the result from JavaScript.
+func (c *Controller) HandleServerResponse(id int64, data string) {
+	c.Lock()
+	server := c.flowMap[id]
+	c.Unlock()
+	if server == nil {
+		c.logger.Errorf("unexpected result from JavaScript. No channel "+
+			"for MessageId: %d exists. Ignoring the results.", id)
+		//Ignore unknown responses that don't belong to any channel
+		return
+	}
+	server.HandleServerResponse(id, data)
+}
+
+// parseVeyronRequest parses a json rpc request into a veyronRPC object.
+func (c *Controller) parseVeyronRequest(r io.Reader) (*veyronRPC, vom.Type, error) {
+	var tempMsg veyronTempRPC
+	decoder := json.NewDecoder(r)
+	if err := decoder.Decode(&tempMsg); err != nil {
+		return nil, nil, fmt.Errorf("can't unmarshall JSONMessage: %v", err)
+	}
+
+	// Fetch and adapt signature from the SignatureManager
+	ctx := c.rt.TODOContext()
+	sig, err := c.signatureManager.Signature(ctx, tempMsg.Name, c.client)
+	if err != nil {
+		return nil, nil, verror.Internalf("error getting service signature for %s: %v", tempMsg.Name, err)
+	}
+
+	methName := lib.UppercaseFirstCharacter(tempMsg.Method)
+	methSig, ok := sig.Methods[methName]
+	if !ok {
+		return nil, nil, fmt.Errorf("Method not found in signature: %v (full sig: %v)", methName, sig)
+	}
+
+	var msg veyronRPC
+	if len(methSig.InArgs) != len(tempMsg.InArgs) {
+		return nil, nil, fmt.Errorf("invalid number of arguments: %v vs. %v", methSig, tempMsg)
+	}
+	msg.InArgs = make([]interface{}, len(tempMsg.InArgs))
+	td := wiretype_build.TypeDefs(sig.TypeDefs)
+
+	for i := 0; i < len(tempMsg.InArgs); i++ {
+		argTypeId := methSig.InArgs[i].Type
+		argType := vom_wiretype.Type{
+			ID:   argTypeId,
+			Defs: &td,
+		}
+
+		val, err := vom.JSONToObject(string(tempMsg.InArgs[i]), argType)
+		if err != nil {
+			return nil, nil, fmt.Errorf("error while converting json to object for arg %d (%s): %v", i, methSig.InArgs[i].Name, err)
+		}
+		msg.InArgs[i] = val
+	}
+
+	msg.Name = tempMsg.Name
+	msg.Method = tempMsg.Method
+	msg.NumOutArgs = tempMsg.NumOutArgs
+	msg.IsStreaming = tempMsg.IsStreaming
+
+	inStreamType := vom_wiretype.Type{
+		ID:   methSig.InStream,
+		Defs: &td,
+	}
+
+	c.logger.VI(2).Infof("VeyronRPC: %s.%s(id=%v, ..., streaming=%v)", msg.Name, msg.Method, msg.IsStreaming)
+	return &msg, inStreamType, nil
+}
+
+type signatureRequest struct {
+	Name string
+}
+
+func (c *Controller) getSignature(name string) (signature.JSONServiceSignature, error) {
+	// Fetch and adapt signature from the SignatureManager
+	ctx := c.rt.TODOContext()
+	sig, err := c.signatureManager.Signature(ctx, name, c.client)
+	if err != nil {
+		return nil, verror.Internalf("error getting service signature for %s: %v", name, err)
+	}
+
+	return signature.NewJSONServiceSignature(*sig), nil
+}
+
+// HandleSignatureRequest uses signature manager to get and cache signature of a remote server
+func (c *Controller) HandleSignatureRequest(data string, w lib.ClientWriter) {
+	// Decode the request
+	var request signatureRequest
+	decoder := json.NewDecoder(bytes.NewBufferString(data))
+	if err := decoder.Decode(&request); err != nil {
+		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
+		return
+	}
+
+	c.logger.VI(2).Infof("requesting Signature for %q", request.Name)
+	jsSig, err := c.getSignature(request.Name)
+	if err != nil {
+		w.Error(err)
+		return
+	}
+
+	// Send the signature back
+	if err := w.Send(lib.ResponseFinal, jsSig); err != nil {
+		w.Error(verror.Internalf("error marshalling results: %v", err))
+		return
+	}
+}
diff --git a/services/wsprd/wspr/wspr_test.go b/services/wsprd/app/app_test.go
similarity index 83%
rename from services/wsprd/wspr/wspr_test.go
rename to services/wsprd/app/app_test.go
index d508beb..de01da9 100644
--- a/services/wsprd/wspr/wspr_test.go
+++ b/services/wsprd/app/app_test.go
@@ -1,4 +1,4 @@
-package wspr
+package app
 
 import (
 	"bytes"
@@ -15,7 +15,6 @@
 	"veyron2/ipc"
 	"veyron2/naming"
 	"veyron2/rt"
-	"veyron2/security"
 	"veyron2/vdl/vdlutil"
 	"veyron2/verror"
 	"veyron2/vlog"
@@ -133,6 +132,11 @@
 	return startAnyServer(true, mt)
 }
 
+type response struct {
+	Type    lib.ResponseType
+	Message interface{}
+}
+
 type testWriter struct {
 	sync.Mutex
 	stream []response
@@ -236,10 +240,9 @@
 		return
 	}
 	defer s.Stop()
-	wspr := NewWSPR(0, "mockVeyronProxyEP")
-	wsp := websocketPipe{ctx: wspr}
-	wsp.setup()
-	jsSig, err := wsp.getSignature("/" + endpoint.String())
+	controller := NewController(nil, "mockVeyronProxyEP")
+	controller.UpdateIdentity(nil)
+	jsSig, err := controller.getSignature("/" + endpoint.String())
 	if err != nil {
 		t.Errorf("Failed to get signature: %v", err)
 	}
@@ -249,45 +252,6 @@
 	}
 }
 
-func TestEncodeDecodeIdentity(t *testing.T) {
-	identity := security.FakePrivateID("/fake/private/id")
-	resultIdentity := decodeIdentity(r.Logger(), encodeIdentity(r.Logger(), identity))
-	if identity != resultIdentity {
-		t.Errorf("expected decodeIdentity(encodeIdentity(identity)) to be %v, got %v", identity, resultIdentity)
-	}
-}
-
-func TestHandleAssocIdentity(t *testing.T) {
-	wspr := NewWSPR(0, "mockVeyronProxyEP")
-	wsp := websocketPipe{ctx: wspr}
-	wsp.setup()
-
-	privateId := security.FakePrivateID("/fake/private/id")
-	identityData := assocIdentityData{
-		Account:  "test@example.org",
-		Identity: encodeIdentity(wspr.logger, privateId),
-		Origin:   "my.webapp.com",
-	}
-	jsonIdentityDataBytes, err := json.Marshal(identityData)
-	if err != nil {
-		t.Errorf("json.Marshal(%v) failed: %v", identityData, err)
-	}
-	jsonIdentityData := string(jsonIdentityDataBytes)
-	writer := testWriter{
-		logger: wspr.logger,
-	}
-	wsp.handleAssocIdentity(jsonIdentityData, lib.ClientWriter(&writer))
-	// Check that the pipe has the privateId
-	if wsp.privateId != privateId {
-		t.Errorf("wsp.privateId was not set. got: %v, expected: %v", wsp.privateId, identityData.Identity)
-	}
-	// Check that wspr idManager has the origin
-	_, err = wspr.idManager.Identity(identityData.Origin)
-	if err != nil {
-		t.Errorf("wspr.idManager.Identity(%v) failed: %v", identityData.Origin, err)
-	}
-}
-
 type goServerTestCase struct {
 	method             string
 	inArgs             []interface{}
@@ -307,25 +271,24 @@
 	}
 	defer s.Stop()
 
-	wspr := NewWSPR(0, "mockVeyronProxyEP")
-	wsp := websocketPipe{ctx: wspr}
-	wsp.setup()
-	writer := testWriter{
-		logger: wspr.logger,
-	}
+	controller := NewController(nil, "mockVeyronProxyEP")
+	controller.UpdateIdentity(nil)
 
+	writer := testWriter{
+		logger: controller.logger,
+	}
 	var signal chan ipc.Stream
 	if len(test.streamingInputs) > 0 {
 		signal = make(chan ipc.Stream, 1)
-		wsp.outstandingStreams[0] = outstandingStream{
+		controller.outstandingStreams[0] = outstandingStream{
 			stream: client.StartQueueingStream(signal),
 			inType: test.streamingInputType,
 		}
 		go func() {
 			for _, value := range test.streamingInputs {
-				wsp.sendOnStream(0, value, &writer)
+				controller.SendOnStream(0, value, &writer)
 			}
-			wsp.closeStream(0)
+			controller.CloseStream(0)
 		}()
 	}
 
@@ -336,7 +299,7 @@
 		NumOutArgs:  test.numOutArgs,
 		IsStreaming: signal != nil,
 	}
-	wsp.sendVeyronRequest(0, &request, &writer, signal)
+	controller.sendVeyronRequest(0, &request, &writer, signal)
 
 	checkResponses(&writer, test.expectedStream, test.expectedError, t)
 }
@@ -401,8 +364,7 @@
 }
 
 type runningTest struct {
-	wspr             *WSPR
-	wsp              *websocketPipe
+	controller       *Controller
 	writer           *testWriter
 	mounttableServer ipc.Server
 	proxyServer      *proxy.Proxy
@@ -423,22 +385,22 @@
 
 	proxyEndpoint := proxyServer.Endpoint().String()
 
-	wspr := NewWSPR(0, "/"+proxyEndpoint, veyron2.NamespaceRoots{"/" + endpoint.String()})
-	wsp := websocketPipe{ctx: wspr}
-	writer := testWriter{
-		logger: wspr.logger,
-	}
-	wsp.writerCreator = func(int64) lib.ClientWriter {
+	writer := testWriter{}
+
+	writerCreator := func(int64) lib.ClientWriter {
 		return &writer
 	}
-	wsp.setup()
-	wsp.serve(serveRequest{
+	controller := NewController(writerCreator, "/"+proxyEndpoint, veyron2.NamespaceRoots{"/" + endpoint.String()})
+	controller.UpdateIdentity(nil)
+	writer.logger = controller.logger
+
+	controller.serve(serveRequest{
 		Name:    "adder",
 		Service: adderServiceSignature,
 	}, &writer)
 
 	return &runningTest{
-		wspr, &wsp, &writer, mounttableServer, proxyServer,
+		controller, &writer, mounttableServer, proxyServer,
 	}, nil
 }
 
@@ -446,7 +408,7 @@
 	rt, err := serveServer()
 	defer rt.mounttableServer.Stop()
 	defer rt.proxyServer.Shutdown()
-	defer rt.wsp.cleanup()
+	defer rt.controller.Cleanup()
 	if err != nil {
 		t.Fatalf("could not serve server %v", err)
 	}
@@ -475,7 +437,7 @@
 	rt, err := serveServer()
 	defer rt.mounttableServer.Stop()
 	defer rt.proxyServer.Shutdown()
-	defer rt.wsp.cleanup()
+	defer rt.controller.Cleanup()
 
 	if err != nil {
 		t.Errorf("could not serve server %v", err)
@@ -483,17 +445,17 @@
 	}
 
 	// ensure there is only one server and then stop the server
-	if len(rt.wsp.servers) != 1 {
-		t.Errorf("expected only one server but got: %d", len(rt.wsp.servers))
+	if len(rt.controller.servers) != 1 {
+		t.Errorf("expected only one server but got: %d", len(rt.controller.servers))
 		return
 	}
-	for serverId := range rt.wsp.servers {
-		rt.wsp.removeServer(serverId)
+	for serverId := range rt.controller.servers {
+		rt.controller.removeServer(serverId)
 	}
 
 	// ensure there is no more servers now
-	if len(rt.wsp.servers) != 0 {
-		t.Errorf("expected no server after stopping the only one but got: %d", len(rt.wsp.servers))
+	if len(rt.controller.servers) != 0 {
+		t.Errorf("expected no server after stopping the only one but got: %d", len(rt.controller.servers))
 		return
 	}
 
@@ -511,9 +473,9 @@
 	err           *verror.Standard
 }
 
-func sendServerStream(t *testing.T, wsp *websocketPipe, test *jsServerTestCase, w lib.ClientWriter) {
+func sendServerStream(t *testing.T, controller *Controller, test *jsServerTestCase, w lib.ClientWriter) {
 	for _, msg := range test.serverStream {
-		wsp.sendParsedMessageOnStream(0, msg, w)
+		controller.sendParsedMessageOnStream(0, msg, w)
 	}
 
 	serverReply := map[string]interface{}{
@@ -525,14 +487,14 @@
 	if err != nil {
 		t.Fatalf("Failed to serialize the reply: %v", err)
 	}
-	wsp.handleServerResponse(0, string(bytes))
+	controller.HandleServerResponse(0, string(bytes))
 }
 
 func runJsServerTestCase(t *testing.T, test jsServerTestCase) {
 	rt, err := serveServer()
 	defer rt.mounttableServer.Stop()
 	defer rt.proxyServer.Shutdown()
-	defer rt.wsp.cleanup()
+	defer rt.controller.Cleanup()
 
 	if err != nil {
 		t.Errorf("could not serve server %v", err)
@@ -561,14 +523,14 @@
 
 	rt.writer.stream = nil
 
-	// Create a client using wspr's runtime so it points to the right mounttable.
-	client, err := rt.wspr.rt.NewClient()
+	// Create a client using app's runtime so it points to the right mounttable.
+	client, err := rt.controller.rt.NewClient()
 
 	if err != nil {
 		t.Errorf("unable to create client: %v", err)
 	}
 
-	call, err := client.StartCall(rt.wspr.rt.NewContext(), "/"+msg+"/adder", test.method, test.inArgs)
+	call, err := client.StartCall(rt.controller.rt.NewContext(), "/"+msg+"/adder", test.method, test.inArgs)
 	if err != nil {
 		t.Errorf("failed to start call: %v", err)
 	}
@@ -607,7 +569,7 @@
 	expectedWebsocketMessage = append(expectedWebsocketMessage, response{Type: lib.ResponseStreamClose})
 
 	expectedStream := test.serverStream
-	go sendServerStream(t, rt.wsp, &test, rt.writer)
+	go sendServerStream(t, rt.controller, &test, rt.writer)
 	for {
 		var data interface{}
 		if err := call.Recv(&data); err != nil {
diff --git a/services/wsprd/identity/identity.go b/services/wsprd/identity/identity.go
index 96bc943..51b8ec6 100644
--- a/services/wsprd/identity/identity.go
+++ b/services/wsprd/identity/identity.go
@@ -13,11 +13,13 @@
 package identity
 
 import (
-	"crypto/sha256"
 	"io"
+	"net/url"
 	"sync"
 	"time"
 
+	"veyron/security/serialization"
+
 	"veyron2"
 	"veyron2/security"
 	"veyron2/verror"
@@ -42,24 +44,15 @@
 	Accounts map[string]security.PrivateID
 }
 
-// Serializer is a factory for managing the readers and writers used by the IDManager
-// for serialization and deserialization
+// Serializer is a factory for managing the readers and writers used by the
+// IDManager for serialization and deserialization
 type Serializer interface {
-	// DataWriter returns a writer that is used to write the data portion
-	// of the IDManager
-	DataWriter() io.WriteCloser
-
-	// SignatureWriter returns a writer that is used to write the signature
-	// of the serialized data.
-	SignatureWriter() io.WriteCloser
-
-	// DataReader returns a reader that is used to read the serialized data.
-	// If nil is returned, then there is no seralized data to load.
-	DataReader() io.Reader
-
-	// SignatureReader returns a reader that is used to read the signature of the
-	// serialized data.  If nil is returned, then there is no signature to load.
-	SignatureReader() io.Reader
+	// Readers returns io.Readers for reading the IDManager's serialized
+	// data and its signature.
+	Readers() (data io.Reader, signature io.Reader, err error)
+	// Writers returns io.WriteClosers for writing the IDManager's
+	// serialized data and integrity its signature.
+	Writers() (data io.WriteCloser, signature io.WriteCloser, err error)
 }
 
 var OriginDoesNotExist = verror.NotFoundf("origin not found")
@@ -76,7 +69,8 @@
 	serializer Serializer
 }
 
-// NewIDManager creates a new IDManager from the reader passed in. serializer can't be nil
+// NewIDManager creates a new IDManager by reading it from the serializer passed in.
+// serializer can't be nil
 func NewIDManager(rt veyron2.Runtime, serializer Serializer) (*IDManager, error) {
 	result := &IDManager{
 		rt: rt,
@@ -87,68 +81,38 @@
 		serializer: serializer,
 	}
 
-	reader := serializer.DataReader()
-	var hadData bool
-	hash := sha256.New()
-	if reader != nil {
-		hadData = true
-		if err := vom.NewDecoder(io.TeeReader(reader, hash)).Decode(&result.state); err != nil {
-			return nil, err
-		}
-
+	data, signature, err := serializer.Readers()
+	if err != nil {
+		return nil, err
 	}
-	signed := hash.Sum(nil)
-
-	var sig security.Signature
-
-	reader = serializer.SignatureReader()
-
-	var hadSig bool
-	if reader != nil {
-		hadSig = true
-		if err := vom.NewDecoder(serializer.SignatureReader()).Decode(&sig); err != nil {
-			return nil, err
-		}
+	vr, err := serialization.NewVerifyingReader(data, signature, rt.Identity().PublicKey())
+	if err != nil {
+		return nil, err
 	}
-
-	if !hadSig && !hadData {
+	if vr == nil {
+		// No serialized data exists, returning aan empty IDManager.
 		return result, nil
 	}
-
-	if !sig.Verify(rt.Identity().PublicID().PublicKey(), signed) {
-		return nil, verror.NotAuthorizedf("signature verification failed")
+	if err := vom.NewDecoder(vr).Decode(&result.state); err != nil {
+		return nil, err
 	}
-
 	return result, nil
 }
 
-// Save serializes the IDManager to the writer.
 func (i *IDManager) save() error {
-	hash := sha256.New()
-	writer := i.serializer.DataWriter()
-
-	if err := vom.NewEncoder(io.MultiWriter(writer, hash)).Encode(i.state); err != nil {
-		return err
-	}
-
-	if err := writer.Close(); err != nil {
-		return err
-	}
-
-	signed := hash.Sum(nil)
-	signature, err := i.rt.Identity().Sign(signed)
-
+	data, signature, err := i.serializer.Writers()
 	if err != nil {
 		return err
 	}
 
-	writer = i.serializer.SignatureWriter()
-
-	if err := vom.NewEncoder(writer).Encode(signature); err != nil {
+	swc, err := serialization.NewSigningWriteCloser(data, signature, i.rt.Identity(), nil)
+	if err != nil {
 		return err
 	}
-
-	return writer.Close()
+	if err := vom.NewEncoder(swc).Encode(i.state); err != nil {
+		return err
+	}
+	return swc.Close()
 }
 
 // Identity returns the identity for an origin.  Returns OriginDoesNotExist if
@@ -160,12 +124,11 @@
 	if !found {
 		return nil, OriginDoesNotExist
 	}
-	// TODO(bjornick): Return a blessed identity, not the raw identity for the account.
-	identity, found := i.state.Accounts[perm.Account]
-	if !found {
-		return nil, OriginDoesNotExist
+	blessedID, err := i.generateBlessedID(origin, perm.Account, perm.Caveats)
+	if err != nil {
+		return nil, err
 	}
-	return identity, nil
+	return blessedID, nil
 }
 
 // AccountsMatching returns a list of accounts that match the given pattern.
@@ -221,12 +184,14 @@
 	return nil
 }
 
-func (i *IDManager) generateBlessedID(name string, account string, caveats []security.ServiceCaveat) (security.PrivateID, error) {
+func (i *IDManager) generateBlessedID(origin string, account string, caveats []security.ServiceCaveat) (security.PrivateID, error) {
 	blessor := i.state.Accounts[account]
 	if blessor == nil {
 		return nil, verror.NotFoundf("unknown account %s", account)
 	}
-	// The name here is irrelevant, since we'll only be storing the blessed name.
+	// Origins have the form protocol://hostname:port, which is not a valid
+	// blessing name. Hence we must url-encode.
+	name := url.QueryEscape(origin)
 	blessee, err := i.rt.NewIdentity(name)
 	if err != nil {
 		return nil, err
@@ -243,3 +208,7 @@
 	}
 	return blessee, nil
 }
+
+func init() {
+	vom.Register(&persistentState{})
+}
diff --git a/services/wsprd/identity/identity_test.go b/services/wsprd/identity/identity_test.go
index c35415d..8ca77c4 100644
--- a/services/wsprd/identity/identity_test.go
+++ b/services/wsprd/identity/identity_test.go
@@ -1,6 +1,7 @@
 package identity
 
 import (
+	"net/url"
 	"reflect"
 	"sort"
 	"strings"
@@ -42,17 +43,25 @@
 	if err != nil {
 		t.Fatalf("creating identity manager failed with: %v", err)
 	}
-	manager.AddAccount("google/user1", createChain(r, "google/user1"))
-	if err := manager.AddOrigin("sampleapp.com", "google/user1", nil); err != nil {
+	origin := "http://sampleapp.com:80"
+	account := "google/user1"
+	manager.AddAccount(account, createChain(r, account))
+	if err := manager.AddOrigin(origin, account, nil); err != nil {
 		t.Fatalf("failed to generate id: %v", err)
 	}
 
-	if _, err := manager.Identity("sampleapp.com"); err != nil {
-		t.Errorf("failed to get  an identity for sampleapp.com: %v", err)
+	id, err := manager.Identity(origin)
+	if err != nil {
+		t.Errorf("failed to get an identity for %v: %v", origin, err)
+	}
+	want := []string{createChain(r, account).PublicID().Names()[0] + "/" + url.QueryEscape(origin)}
+	if got := id.PublicID().Names(); !reflect.DeepEqual(got, want) {
+		t.Errorf("unexpected identity name. got: %v, wanted: %v", got, want)
 	}
 
-	if _, err := manager.Identity("unknown.com"); err != OriginDoesNotExist {
-		t.Error("should not have found an identity for unknown.com")
+	unknownOrigin := "http://unknown.com:80"
+	if _, err := manager.Identity(unknownOrigin); err != OriginDoesNotExist {
+		t.Error("should not have found an identity for %v", unknownOrigin)
 	}
 }
 
@@ -63,13 +72,16 @@
 	if err != nil {
 		t.Fatalf("creating identity manager failed with: %v", err)
 	}
-	manager.AddAccount("google/user1", createChain(r, "google/user1"))
-	manager.AddAccount("google/user2", createChain(r, "google/user2"))
-	manager.AddAccount("facebook/user1", createChain(r, "facebook/user1"))
+	googleAccount1 := "google/user1"
+	googleAccount2 := "google/user2"
+	facebookAccount := "facebook/user1"
+	manager.AddAccount(googleAccount1, createChain(r, googleAccount1))
+	manager.AddAccount(googleAccount2, createChain(r, googleAccount2))
+	manager.AddAccount(facebookAccount, createChain(r, facebookAccount))
 
 	result := manager.AccountsMatching(security.PrincipalPattern(topLevelName + "/google/*"))
 	sort.StringSlice(result).Sort()
-	expected := []string{"google/user1", "google/user2"}
+	expected := []string{googleAccount1, googleAccount2}
 	if !reflect.DeepEqual(result, expected) {
 		t.Errorf("unexpected result from AccountsMatching, expected :%v, got: %v", expected, result)
 	}
@@ -82,7 +94,7 @@
 		t.Fatalf("creating identity manager failed with: %v", err)
 	}
 
-	err = manager.AddOrigin("sampleapp.com", "google/user1", nil)
+	err = manager.AddOrigin("http://sampleapp.com:80", "google/user1", nil)
 
 	if err == nil {
 		t.Errorf("should have failed to generated an id blessed by google/user1")
@@ -98,7 +110,9 @@
 		t.Fatalf("creating identity manager failed with: %v", err)
 	}
 	manager.AddAccount("google/user1", createChain(r, "google/user1"))
-	if err = manager.AddOrigin("sampleapp.com", "google/user1", nil); err != nil {
+	origin1 := "https://sampleapp-1.com:443"
+	account := "google/user1"
+	if err = manager.AddOrigin(origin1, account, nil); err != nil {
 		t.Fatalf("failed to generate id: %v", err)
 	}
 
@@ -107,11 +121,17 @@
 	if err != nil {
 		t.Fatalf("failed to deserialize data: %v", err)
 	}
-	if _, err := newManager.Identity("sampleapp.com"); err != nil {
-		t.Errorf("can't find the sampleapp.com identity: %v", err)
+	id, err := newManager.Identity(origin1)
+	if err != nil {
+		t.Errorf("can't find the %v identity: %v", origin1, err)
+	}
+	want := []string{createChain(r, account).PublicID().Names()[0] + "/" + url.QueryEscape(origin1)}
+	if got := id.PublicID().Names(); !reflect.DeepEqual(got, want) {
+		t.Errorf("unexpected identity name. got: %v, wanted: %v", got, want)
 	}
 
-	if err := newManager.AddOrigin("sampleapp2.com", "google/user1", nil); err != nil {
-		t.Errorf("failed to create sampleapp2.com identity: %v", err)
+	origin2 := "https://sampleapp-2.com:443"
+	if err := newManager.AddOrigin(origin2, account, nil); err != nil {
+		t.Errorf("can't find the %v identity: %v", origin2, err)
 	}
 }
diff --git a/services/wsprd/identity/in_memory_serializer.go b/services/wsprd/identity/in_memory_serializer.go
index c412bb6..99a565d 100644
--- a/services/wsprd/identity/in_memory_serializer.go
+++ b/services/wsprd/identity/in_memory_serializer.go
@@ -5,6 +5,7 @@
 	"io"
 )
 
+// bufferCloser implements io.ReadWriteCloser.
 type bufferCloser struct {
 	bytes.Buffer
 }
@@ -13,33 +14,27 @@
 	return nil
 }
 
+// InMemorySerializer implements Serializer. This Serializer should only be
+// used in tests.
+// TODO(ataly, bjornick): Get rid of all uses of this Serializer from non-test
+// code and use a file backed (or some persistent storage backed) Serializer there
+// instead.
 type InMemorySerializer struct {
 	data      bufferCloser
 	signature bufferCloser
 	hasData   bool
 }
 
-func (s *InMemorySerializer) DataWriter() io.WriteCloser {
+func (s *InMemorySerializer) Readers() (io.Reader, io.Reader, error) {
+	if !s.hasData {
+		return nil, nil, nil
+	}
+	return &s.data, &s.signature, nil
+}
+
+func (s *InMemorySerializer) Writers() (io.WriteCloser, io.WriteCloser, error) {
 	s.hasData = true
 	s.data.Reset()
-	return &s.data
-}
-
-func (s *InMemorySerializer) SignatureWriter() io.WriteCloser {
 	s.signature.Reset()
-	return &s.signature
-}
-
-func (s *InMemorySerializer) DataReader() io.Reader {
-	if s.hasData {
-		return &s.data
-	}
-	return nil
-}
-
-func (s *InMemorySerializer) SignatureReader() io.Reader {
-	if s.hasData {
-		return &s.signature
-	}
-	return nil
+	return &s.data, &s.signature, nil
 }
diff --git a/services/wsprd/wspr/pipe.go b/services/wsprd/wspr/pipe.go
index b8689be..fbae344 100644
--- a/services/wsprd/wspr/pipe.go
+++ b/services/wsprd/wspr/pipe.go
@@ -10,22 +10,14 @@
 	_ "net/http/pprof"
 	"os"
 	"strings"
-	"sync"
 	"time"
 
-	"veyron/services/wsprd/ipc/client"
-	"veyron/services/wsprd/ipc/server"
-	"veyron/services/wsprd/ipc/stream"
+	"veyron/services/wsprd/app"
 	"veyron/services/wsprd/lib"
-	"veyron/services/wsprd/signature"
-	"veyron2"
-	"veyron2/ipc"
 	"veyron2/security"
 	"veyron2/verror"
 	"veyron2/vlog"
 	"veyron2/vom"
-	vom_wiretype "veyron2/vom/wiretype"
-	wiretype_build "veyron2/wiretype/build"
 
 	"github.com/gorilla/websocket"
 )
@@ -69,207 +61,100 @@
 	Type websocketMessageType
 }
 
-// Temporary holder of RPC so that we can store the unprocessed args.
-type veyronTempRPC struct {
-	Name        string
-	Method      string
-	InArgs      []json.RawMessage
-	NumOutArgs  int32
-	IsStreaming bool
+// wsMessage is the struct that is put on the write queue.
+type wsMessage struct {
+	buf         []byte
+	messageType int
 }
 
-type veyronRPC struct {
-	Name        string
-	Method      string
-	InArgs      []interface{}
-	NumOutArgs  int32
-	IsStreaming bool
-}
+type pipe struct {
+	// The struct that handles the translation of javascript request to veyron requests.
+	controller *app.Controller
 
-// A request javascript to serve undern a particular name
-type serveRequest struct {
-	Name     string
-	ServerId uint64
-	Service  signature.JSONServiceSignature
-}
-type websocketPipe struct {
-	// Protects outstandingStreams and outstandingServerRequests.
-	sync.Mutex
-	ws  *websocket.Conn
-	ctx *WSPR
-	// Used to generate unique ids for requests initiated by the proxy.
-	// These ids will be even so they don't collide with the ids generated
-	// by the client.
-	lastGeneratedId int64
+	ws *websocket.Conn
 
-	// Streams for the outstanding requests.
-	outstandingStreams map[int64]outstandingStream
+	logger vlog.Logger
 
-	// Maps flowids to the server that owns them.
-	flowMap map[int64]*server.Server
+	wspr *WSPR
 
-	// A manager that handles fetching and caching signature of remote services
-	signatureManager lib.SignatureManager
-
-	// We maintain multiple Veyron server per websocket pipe for serving JavaScript
-	// services.
-	servers map[uint64]*server.Server
-
-	// Creates a client writer for a given flow.  This is a member so that tests can override
-	// the default implementation.
 	writerCreator func(id int64) lib.ClientWriter
 
+	// There is a single write goroutine because ws.NewWriter() creates a new writer that
+	// writes to a shared buffer in the websocket, so it is not safe to have multiple go
+	// routines writing to different websocket writers.
 	writeQueue chan wsMessage
 
-	// privateId associated with the pipe
-	privateId security.PrivateID
+	// This request is used to tell WSPR which pipe to remove when we shutdown.
+	req *http.Request
 }
 
-// finishCall waits for the call to finish and write out the response to w.
-func (wsp *websocketPipe) finishCall(w lib.ClientWriter, clientCall ipc.Call, msg *veyronRPC) {
-	if msg.IsStreaming {
-		for {
-			var item interface{}
-			if err := clientCall.Recv(&item); err != nil {
-				if err == io.EOF {
-					break
-				}
-				w.Error(err) // Send streaming error as is
-				return
-			}
-			if err := w.Send(lib.ResponseStream, item); err != nil {
-				w.Error(verror.Internalf("unable to marshal: %v", item))
-			}
-		}
-
-		if err := w.Send(lib.ResponseStreamClose, nil); err != nil {
-			w.Error(verror.Internalf("unable to marshal close stream message"))
-		}
-	}
-
-	results := make([]interface{}, msg.NumOutArgs)
-	// This array will have pointers to the values in result.
-	resultptrs := make([]interface{}, msg.NumOutArgs)
-	for ax := range results {
-		resultptrs[ax] = &results[ax]
-	}
-	if err := clientCall.Finish(resultptrs...); err != nil {
-		// return the call system error as is
-		w.Error(err)
-		return
-	}
-	// for now we assume last out argument is always error
-	if len(results) < 1 {
-		w.Error(verror.Internalf("client call did not return any results"))
-		return
-	}
-
-	if err, ok := results[len(results)-1].(error); ok {
-		// return the call application error as is
-		w.Error(err)
-		return
-	}
-
-	if err := w.Send(lib.ResponseFinal, results[0:len(results)-1]); err != nil {
-		w.Error(verror.Internalf("error marshalling results: %v", err))
-	}
-}
-
-// Implements the serverHelper interface
-func (wsp *websocketPipe) CreateNewFlow(s *server.Server, stream stream.Sender) *server.Flow {
-	wsp.Lock()
-	defer wsp.Unlock()
-	id := wsp.lastGeneratedId
-	wsp.lastGeneratedId += 2
-	wsp.flowMap[id] = s
-	wsp.outstandingStreams[id] = outstandingStream{stream, vom_wiretype.Type{ID: 1}}
-	return &server.Flow{ID: id, Writer: wsp.writerCreator(id)}
-}
-
-func (wsp *websocketPipe) CleanupFlow(id int64) {
-	wsp.Lock()
-	defer wsp.Unlock()
-	delete(wsp.outstandingStreams, id)
-	delete(wsp.flowMap, id)
-}
-
-func (wsp *websocketPipe) GetLogger() vlog.Logger {
-	return wsp.ctx.logger
-}
-
-func (wsp *websocketPipe) RT() veyron2.Runtime {
-	return wsp.ctx.rt
+func newPipe(w http.ResponseWriter, req *http.Request, wspr *WSPR, creator func(id int64) lib.ClientWriter) *pipe {
+	pipe := &pipe{logger: wspr.rt.Logger(), writerCreator: creator, req: req, wspr: wspr}
+	pipe.start(w, req)
+	return pipe
 }
 
 // cleans up any outstanding rpcs.
-func (wsp *websocketPipe) cleanup() {
-	wsp.ctx.logger.VI(0).Info("Cleaning up websocket")
-	wsp.Lock()
-	defer wsp.Unlock()
-	for _, stream := range wsp.outstandingStreams {
-		if call, ok := stream.stream.(ipc.Call); ok {
-			call.Cancel()
+func (p *pipe) cleanup() {
+	p.logger.VI(0).Info("Cleaning up websocket")
+	p.controller.Cleanup()
+	p.wspr.CleanUpPipe(p.req)
+}
+
+func (p *pipe) setup() {
+	if p.writerCreator == nil {
+		p.writerCreator = func(id int64) lib.ClientWriter {
+			return &websocketWriter{p: p, id: id, logger: p.logger}
 		}
 	}
 
-	for _, server := range wsp.servers {
-		server.Stop()
-	}
+	p.writeQueue = make(chan wsMessage, 50)
+	go p.writeLoop()
+
+	p.controller = app.NewController(p.writerCreator, p.wspr.veyronProxyEP)
+	// TODO(bjornick):  Pass in the identity linked to this origin.
+	p.controller.UpdateIdentity(nil)
 }
 
-func (wsp *websocketPipe) setup() {
-	wsp.signatureManager = lib.NewSignatureManager()
-	wsp.outstandingStreams = make(map[int64]outstandingStream)
-	wsp.flowMap = make(map[int64]*server.Server)
-	wsp.servers = make(map[uint64]*server.Server)
-	wsp.writeQueue = make(chan wsMessage, 50)
-	go wsp.writeLoop()
-
-	if wsp.writerCreator == nil {
-		wsp.writerCreator = func(id int64) lib.ClientWriter {
-			return &websocketWriter{wsp: wsp, id: id, logger: wsp.ctx.logger}
-		}
-	}
-}
-
-func (wsp *websocketPipe) writeLoop() {
+func (p *pipe) writeLoop() {
 	for {
-		msg, ok := <-wsp.writeQueue
+		msg, ok := <-p.writeQueue
 		if !ok {
-			wsp.ctx.logger.Errorf("write queue was closed")
+			p.logger.Errorf("write queue was closed")
 			return
 		}
 
 		if msg.messageType == websocket.PingMessage {
-			wsp.ctx.logger.Infof("sending ping")
+			p.logger.Infof("sending ping")
 		}
-		if err := wsp.ws.WriteMessage(msg.messageType, msg.buf); err != nil {
-			wsp.ctx.logger.Errorf("failed to write bytes: %s", err)
+		if err := p.ws.WriteMessage(msg.messageType, msg.buf); err != nil {
+			p.logger.Errorf("failed to write bytes: %s", err)
 		}
 	}
 }
 
-func (wsp *websocketPipe) start(w http.ResponseWriter, req *http.Request) {
+func (p *pipe) start(w http.ResponseWriter, req *http.Request) {
 	ws, err := websocket.Upgrade(w, req, nil, 1024, 1024)
 	if _, ok := err.(websocket.HandshakeError); ok {
 		http.Error(w, "Not a websocket handshake", 400)
 		return
 	} else if err != nil {
 		http.Error(w, "Internal Error", 500)
-		wsp.ctx.logger.Errorf("websocket upgrade failed: %s", err)
+		p.logger.Errorf("websocket upgrade failed: %s", err)
 		return
 	}
 
-	wsp.setup()
-	wsp.ws = ws
-	wsp.ws.SetPongHandler(wsp.pongHandler)
-	wsp.sendInitialMessage()
-	go wsp.readLoop()
-	go wsp.pingLoop()
+	p.ws = ws
+	p.ws.SetPongHandler(p.pongHandler)
+	p.setup()
+	p.sendInitialMessage()
+
+	go p.readLoop()
+	go p.pingLoop()
 }
 
 // Upon first connect, we send a message with the wsprConfig.
-func (wsp *websocketPipe) sendInitialMessage() {
+func (p *pipe) sendInitialMessage() {
 	mounttableRoots := strings.Split(os.Getenv("NAMESPACE_ROOT"), ",")
 	if len(mounttableRoots) == 1 && mounttableRoots[0] == "" {
 		mounttableRoots = []string{}
@@ -280,154 +165,56 @@
 
 	var buf bytes.Buffer
 	if err := vom.ObjToJSON(&buf, vom.ValueOf(msg)); err != nil {
-		wsp.ctx.logger.Errorf("failed to convert wspr config to json: %s", err)
+		p.logger.Errorf("failed to convert wspr config to json: %s", err)
 		return
 	}
-	wsp.writeQueue <- wsMessage{messageType: websocket.TextMessage, buf: buf.Bytes()}
+	p.writeQueue <- wsMessage{messageType: websocket.TextMessage, buf: buf.Bytes()}
 }
 
-func (wsp *websocketPipe) pingLoop() {
+func (p *pipe) pingLoop() {
 	for {
 		time.Sleep(pingInterval)
-		wsp.ctx.logger.VI(2).Info("ws: ping")
-		wsp.writeQueue <- wsMessage{messageType: websocket.PingMessage, buf: []byte{}}
+		p.logger.VI(2).Info("ws: ping")
+		p.writeQueue <- wsMessage{messageType: websocket.PingMessage, buf: []byte{}}
 	}
 }
 
-func (wsp *websocketPipe) pongHandler(msg string) error {
-	wsp.ctx.logger.VI(2).Infof("ws: pong")
-	wsp.ws.SetReadDeadline(time.Now().Add(pongTimeout))
+func (p *pipe) pongHandler(msg string) error {
+	p.logger.VI(2).Infof("ws: pong")
+	p.ws.SetReadDeadline(time.Now().Add(pongTimeout))
 	return nil
 }
 
-func (wsp *websocketPipe) sendParsedMessageOnStream(id int64, msg interface{}, w lib.ClientWriter) {
-	wsp.Lock()
-	defer wsp.Unlock()
-	stream := wsp.outstandingStreams[id].stream
-	if stream == nil {
-		w.Error(fmt.Errorf("unknown stream"))
-		return
-	}
-
-	stream.Send(msg, w)
-
-}
-
-// sendOnStream writes data on id's stream.  Returns an error if the send failed.
-func (wsp *websocketPipe) sendOnStream(id int64, data string, w lib.ClientWriter) {
-	wsp.Lock()
-	typ := wsp.outstandingStreams[id].inType
-	wsp.Unlock()
-	if typ == nil {
-		vlog.Errorf("no inType for stream %d (%q)", id, data)
-		return
-	}
-	payload, err := vom.JSONToObject(data, typ)
-	if err != nil {
-		vlog.Errorf("error while converting json to InStreamType (%s): %v", data, err)
-		return
-	}
-	wsp.sendParsedMessageOnStream(id, payload, w)
-}
-
-func (wsp *websocketPipe) sendVeyronRequest(id int64, veyronMsg *veyronRPC, w lib.ClientWriter, signal chan ipc.Stream) {
-	// We have to make the start call synchronous so we can make sure that we populate
-	// the call map before we can handle a recieve call.
-	call, err := wsp.ctx.startVeyronRequest(w, veyronMsg)
-	if err != nil {
-		w.Error(verror.Internalf("can't start Veyron Request: %v", err))
-		return
-	}
-
-	if signal != nil {
-		signal <- call
-	}
-
-	wsp.finishCall(w, call, veyronMsg)
-	if signal != nil {
-		wsp.Lock()
-		delete(wsp.outstandingStreams, id)
-		wsp.Unlock()
-	}
-}
-
-// handleVeyronRequest starts a veyron rpc and returns before the rpc has been completed.
-func (wsp *websocketPipe) handleVeyronRequest(id int64, data string, w lib.ClientWriter) {
-	veyronMsg, inStreamType, err := wsp.parseVeyronRequest(bytes.NewBufferString(data))
-	if err != nil {
-		w.Error(verror.Internalf("can't parse Veyron Request: %v", err))
-		return
-	}
-
-	wsp.Lock()
-	defer wsp.Unlock()
-	// If this rpc is streaming, we would expect that the client would try to send
-	// on this stream.  Since the initial handshake is done asynchronously, we have
-	// to basically put a queueing stream in the map before we make the async call
-	// so that the future sends on the stream can see the queuing stream, even if
-	// the client call isn't actually ready yet.
-	var signal chan ipc.Stream
-	if veyronMsg.IsStreaming {
-		signal = make(chan ipc.Stream)
-		wsp.outstandingStreams[id] = outstandingStream{
-			stream: client.StartQueueingStream(signal),
-			inType: inStreamType,
-		}
-	}
-	go wsp.sendVeyronRequest(id, veyronMsg, w, signal)
-}
-
-func (wsp *websocketPipe) closeStream(id int64) {
-	wsp.Lock()
-	defer wsp.Unlock()
-	stream := wsp.outstandingStreams[id].stream
-	if stream == nil {
-		wsp.ctx.logger.Errorf("close called on non-existent call: %v", id)
-		return
-	}
-
-	var call client.QueueingStream
-	var ok bool
-	if call, ok = stream.(client.QueueingStream); !ok {
-		wsp.ctx.logger.Errorf("can't close server stream: %v", id)
-		return
-	}
-
-	if err := call.Close(); err != nil {
-		wsp.ctx.logger.Errorf("client call close failed with: %v", err)
-	}
-}
-
-func (wsp *websocketPipe) readLoop() {
-	wsp.ws.SetReadDeadline(time.Now().Add(pongTimeout))
+func (p *pipe) readLoop() {
+	p.ws.SetReadDeadline(time.Now().Add(pongTimeout))
 	for {
-		op, r, err := wsp.ws.NextReader()
+		op, r, err := p.ws.NextReader()
 		if err == io.ErrUnexpectedEOF { // websocket disconnected
 			break
 		}
 		if err != nil {
-			wsp.ctx.logger.VI(1).Infof("websocket receive: %s", err)
+			p.logger.VI(1).Infof("websocket receive: %s", err)
 			break
 		}
 
 		if op != websocket.TextMessage {
-			wsp.ctx.logger.Errorf("unexpected websocket op: %v", op)
+			p.logger.Errorf("unexpected websocket op: %v", op)
 		}
 
 		var msg websocketMessage
 		decoder := json.NewDecoder(r)
 		if err := decoder.Decode(&msg); err != nil {
 			errMsg := fmt.Sprintf("can't unmarshall JSONMessage: %v", err)
-			wsp.ctx.logger.Error(errMsg)
-			wsp.writeQueue <- wsMessage{messageType: websocket.TextMessage, buf: []byte(errMsg)}
+			p.logger.Error(errMsg)
+			p.writeQueue <- wsMessage{messageType: websocket.TextMessage, buf: []byte(errMsg)}
 			continue
 		}
 
-		ww := wsp.writerCreator(msg.Id)
+		ww := p.writerCreator(msg.Id)
 
 		switch msg.Type {
 		case websocketVeyronRequest:
-			wsp.handleVeyronRequest(msg.Id, msg.Data, ww)
+			p.controller.HandleVeyronRequest(msg.Id, msg.Data, ww)
 		case websocketStreamingValue:
 			// This will asynchronous for a client rpc, but synchronous for a
 			// server rpc.  This could be potentially bad if the server is sending
@@ -435,263 +222,22 @@
 			// it difficult to guarantee that all stream messages make it to the client
 			// before the finish call.
 			// TODO(bjornick): Make the server send also asynchronous.
-			wsp.sendOnStream(msg.Id, msg.Data, ww)
+			p.controller.SendOnStream(msg.Id, msg.Data, ww)
 		case websocketStreamClose:
-			wsp.closeStream(msg.Id)
+			p.controller.CloseStream(msg.Id)
 		case websocketServe:
-			go wsp.handleServeRequest(msg.Data, ww)
+			go p.controller.HandleServeRequest(msg.Data, ww)
 		case websocketStopServer:
-			go wsp.handleStopRequest(msg.Data, ww)
+			go p.controller.HandleStopRequest(msg.Data, ww)
 		case websocketServerResponse:
-			go wsp.handleServerResponse(msg.Id, msg.Data)
+			go p.controller.HandleServerResponse(msg.Id, msg.Data)
 		case websocketSignatureRequest:
-			go wsp.handleSignatureRequest(msg.Data, ww)
-		case websocketAssocIdentity:
-			wsp.handleAssocIdentity(msg.Data, ww)
+			go p.controller.HandleSignatureRequest(msg.Data, ww)
 		default:
 			ww.Error(verror.Unknownf("unknown message type: %v", msg.Type))
 		}
 	}
-	wsp.cleanup()
-}
-
-func (wsp *websocketPipe) maybeCreateServer(serverId uint64) (*server.Server, error) {
-	wsp.Lock()
-	defer wsp.Unlock()
-	if server, ok := wsp.servers[serverId]; ok {
-		return server, nil
-	}
-	server, err := server.NewServer(serverId, wsp.ctx.veyronProxyEP, wsp)
-	if err != nil {
-		return nil, err
-	}
-	wsp.servers[serverId] = server
-	return server, nil
-}
-
-func (wsp *websocketPipe) removeServer(serverId uint64) {
-	wsp.Lock()
-	server := wsp.servers[serverId]
-	if server == nil {
-		wsp.Unlock()
-		return
-	}
-	delete(wsp.servers, serverId)
-	wsp.Unlock()
-
-	server.Stop()
-}
-
-func (wsp *websocketPipe) serve(serveRequest serveRequest, w lib.ClientWriter) {
-	// Create a server for the websocket pipe, if it does not exist already
-	server, err := wsp.maybeCreateServer(serveRequest.ServerId)
-	if err != nil {
-		w.Error(verror.Internalf("error creating server: %v", err))
-	}
-
-	wsp.ctx.logger.VI(2).Infof("serving under name: %q", serveRequest.Name)
-
-	endpoint, err := server.Serve(serveRequest.Name, serveRequest.Service)
-	if err != nil {
-		w.Error(verror.Internalf("error serving service: %v", err))
-		return
-	}
-	// Send the endpoint back
-	if err := w.Send(lib.ResponseFinal, endpoint); err != nil {
-		w.Error(verror.Internalf("error marshalling results: %v", err))
-		return
-	}
-}
-
-// handleServeRequest takes a request to serve a server, creates
-// a server, registers the provided services and sends the endpoint back.
-func (wsp *websocketPipe) handleServeRequest(data string, w lib.ClientWriter) {
-	// Decode the serve request which includes IDL, registered services and name
-	var serveRequest serveRequest
-	decoder := json.NewDecoder(bytes.NewBufferString(data))
-	if err := decoder.Decode(&serveRequest); err != nil {
-		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
-		return
-	}
-	wsp.serve(serveRequest, w)
-}
-
-// handleStopRequest takes a request to stop a server.
-func (wsp *websocketPipe) handleStopRequest(data string, w lib.ClientWriter) {
-
-	var serverId uint64
-	decoder := json.NewDecoder(bytes.NewBufferString(data))
-	if err := decoder.Decode(&serverId); err != nil {
-		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
-		return
-	}
-
-	wsp.removeServer(serverId)
-
-	// Send true to indicate stop has finished
-	if err := w.Send(lib.ResponseFinal, true); err != nil {
-		w.Error(verror.Internalf("error marshalling results: %v", err))
-		return
-	}
-}
-
-// handleServerResponse handles the completion of outstanding calls to JavaScript services
-// by filling the corresponding channel with the result from JavaScript.
-func (wsp *websocketPipe) handleServerResponse(id int64, data string) {
-	wsp.Lock()
-	server := wsp.flowMap[id]
-	wsp.Unlock()
-	if server == nil {
-		wsp.ctx.logger.Errorf("unexpected result from JavaScript. No channel "+
-			"for MessageId: %d exists. Ignoring the results.", id)
-		//Ignore unknown responses that don't belong to any channel
-		return
-	}
-	server.HandleServerResponse(id, data)
-}
-
-// parseVeyronRequest parses a json rpc request into a veyronRPC object.
-func (wsp *websocketPipe) parseVeyronRequest(r io.Reader) (*veyronRPC, vom.Type, error) {
-	var tempMsg veyronTempRPC
-	decoder := json.NewDecoder(r)
-	if err := decoder.Decode(&tempMsg); err != nil {
-		return nil, nil, fmt.Errorf("can't unmarshall JSONMessage: %v", err)
-	}
-
-	client, err := wsp.ctx.newClient()
-	if err != nil {
-		return nil, nil, verror.Internalf("error creating client: %v", err)
-	}
-
-	// Fetch and adapt signature from the SignatureManager
-	ctx := wsp.ctx.rt.TODOContext()
-	sig, err := wsp.signatureManager.Signature(ctx, tempMsg.Name, client)
-	if err != nil {
-		return nil, nil, verror.Internalf("error getting service signature for %s: %v", tempMsg.Name, err)
-	}
-
-	methName := lib.UppercaseFirstCharacter(tempMsg.Method)
-	methSig, ok := sig.Methods[methName]
-	if !ok {
-		return nil, nil, fmt.Errorf("Method not found in signature: %v (full sig: %v)", methName, sig)
-	}
-
-	var msg veyronRPC
-	if len(methSig.InArgs) != len(tempMsg.InArgs) {
-		return nil, nil, fmt.Errorf("invalid number of arguments: %v vs. %v", methSig, tempMsg)
-	}
-	msg.InArgs = make([]interface{}, len(tempMsg.InArgs))
-	td := wiretype_build.TypeDefs(sig.TypeDefs)
-
-	for i := 0; i < len(tempMsg.InArgs); i++ {
-		argTypeId := methSig.InArgs[i].Type
-		argType := vom_wiretype.Type{
-			ID:   argTypeId,
-			Defs: &td,
-		}
-
-		val, err := vom.JSONToObject(string(tempMsg.InArgs[i]), argType)
-		if err != nil {
-			return nil, nil, fmt.Errorf("error while converting json to object for arg %d (%s): %v", i, methSig.InArgs[i].Name, err)
-		}
-		msg.InArgs[i] = val
-	}
-
-	msg.Name = tempMsg.Name
-	msg.Method = tempMsg.Method
-	msg.NumOutArgs = tempMsg.NumOutArgs
-	msg.IsStreaming = tempMsg.IsStreaming
-
-	inStreamType := vom_wiretype.Type{
-		ID:   methSig.InStream,
-		Defs: &td,
-	}
-
-	wsp.ctx.logger.VI(2).Infof("VeyronRPC: %s.%s(id=%v, ..., streaming=%v)", msg.Name, msg.Method, msg.IsStreaming)
-	return &msg, inStreamType, nil
-}
-
-type signatureRequest struct {
-	Name string
-}
-
-func (wsp *websocketPipe) getSignature(name string) (signature.JSONServiceSignature, error) {
-	client, err := wsp.ctx.newClient()
-	if err != nil {
-		return nil, verror.Internalf("error creating client: %v", err)
-	}
-
-	// Fetch and adapt signature from the SignatureManager
-	ctx := wsp.ctx.rt.TODOContext()
-	sig, err := wsp.signatureManager.Signature(ctx, name, client)
-	if err != nil {
-		return nil, verror.Internalf("error getting service signature for %s: %v", name, err)
-	}
-
-	return signature.NewJSONServiceSignature(*sig), nil
-}
-
-// handleSignatureRequest uses signature manager to get and cache signature of a remote server
-func (wsp *websocketPipe) handleSignatureRequest(data string, w lib.ClientWriter) {
-	// Decode the request
-	var request signatureRequest
-	decoder := json.NewDecoder(bytes.NewBufferString(data))
-	if err := decoder.Decode(&request); err != nil {
-		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
-		return
-	}
-
-	wsp.ctx.logger.VI(2).Infof("requesting Signature for %q", request.Name)
-	jsSig, err := wsp.getSignature(request.Name)
-	if err != nil {
-		w.Error(err)
-		return
-	}
-
-	// Send the signature back
-	if err := w.Send(lib.ResponseFinal, jsSig); err != nil {
-		w.Error(verror.Internalf("error marshalling results: %v", err))
-		return
-	}
-}
-
-type assocIdentityData struct {
-	Account  string
-	Identity string // base64(vom(security.PrivateID))
-	Origin   string
-}
-
-// handleAssocIdentityRequest associates the identity with the origin
-func (wsp *websocketPipe) handleAssocIdentity(data string, w lib.ClientWriter) {
-	// Decode the request
-	var parsedData assocIdentityData
-	decoder := json.NewDecoder(bytes.NewBufferString(data))
-	if err := decoder.Decode(&parsedData); err != nil {
-		w.Error(verror.Internalf("can't unmarshal JSONMessage: %v", err))
-		return
-	}
-
-	wsp.ctx.logger.VI(2).Info("associating name %v and private id %v to origin %v",
-		parsedData.Account,
-		parsedData.Identity,
-		parsedData.Origin)
-
-	idManager := wsp.ctx.idManager
-
-	wsp.privateId = decodeIdentity(wsp.ctx.logger, parsedData.Identity)
-
-	if err := idManager.AddAccount(parsedData.Account, wsp.privateId); err != nil {
-		w.Error(verror.Internalf("identity.AddAccount(%v, %v) failed: %v", parsedData.Account, wsp.privateId, err))
-	}
-
-	if err := idManager.AddOrigin(parsedData.Origin, parsedData.Account, []security.ServiceCaveat{}); err != nil {
-		w.Error(verror.Internalf("identity.AddOrigin(%v, %v, %v) failed: %v", parsedData.Origin, parsedData.Account, []security.ServiceCaveat{}, err))
-	}
-
-	if err := w.Send(lib.ResponseFinal, nil); err != nil {
-		w.Error(verror.Internalf("error marshalling results: %v", err))
-		return
-	}
+	p.cleanup()
 }
 
 func decodeIdentity(logger vlog.Logger, msg string) security.PrivateID {
diff --git a/services/wsprd/wspr/pipe_test.go b/services/wsprd/wspr/pipe_test.go
new file mode 100644
index 0000000..8a271c6
--- /dev/null
+++ b/services/wsprd/wspr/pipe_test.go
@@ -0,0 +1,28 @@
+package wspr
+
+import (
+	"testing"
+	"veyron/services/wsprd/lib"
+	"veyron2"
+	"veyron2/rt"
+	"veyron2/security"
+)
+
+var r veyron2.Runtime
+
+func init() {
+	r = rt.Init()
+}
+
+type testWriter struct{}
+
+func (*testWriter) Send(lib.ResponseType, interface{}) error { return nil }
+func (*testWriter) Error(error)                              {}
+
+func TestEncodeDecodeIdentity(t *testing.T) {
+	identity := security.FakePrivateID("/fake/private/id")
+	resultIdentity := decodeIdentity(r.Logger(), encodeIdentity(r.Logger(), identity))
+	if identity != resultIdentity {
+		t.Errorf("expected decodeIdentity(encodeIdentity(identity)) to be %v, got %v", identity, resultIdentity)
+	}
+}
diff --git a/services/wsprd/wspr/writer.go b/services/wsprd/wspr/writer.go
index ebf799e..c7ca1b8 100644
--- a/services/wsprd/wspr/writer.go
+++ b/services/wsprd/wspr/writer.go
@@ -23,7 +23,7 @@
 
 // Implements clientWriter interface for sending messages over websockets.
 type websocketWriter struct {
-	wsp    *websocketPipe
+	p      *pipe
 	logger vlog.Logger
 	id     int64
 }
@@ -42,7 +42,7 @@
 		return err
 	}
 
-	w.wsp.writeQueue <- wsMessage{messageType: websocket.TextMessage, buf: buf2.Bytes()}
+	w.p.writeQueue <- wsMessage{messageType: websocket.TextMessage, buf: buf2.Bytes()}
 
 	return nil
 }
diff --git a/services/wsprd/wspr/wspr.go b/services/wsprd/wspr/wspr.go
index af24916..0780d1c 100644
--- a/services/wsprd/wspr/wspr.go
+++ b/services/wsprd/wspr/wspr.go
@@ -17,22 +17,18 @@
 import (
 	"bytes"
 	"crypto/tls"
-	"encoding/binary"
 	"fmt"
 	"io"
 	"log"
 	"net/http"
 	_ "net/http/pprof"
+	"sync"
 	"time"
 
 	"veyron/services/wsprd/identity"
-	"veyron/services/wsprd/ipc/stream"
-	"veyron/services/wsprd/lib"
 	"veyron2"
-	"veyron2/ipc"
 	"veyron2/rt"
 	"veyron2/vlog"
-	"veyron2/vom"
 )
 
 const (
@@ -45,47 +41,18 @@
 }
 
 type WSPR struct {
+	mu            sync.Mutex
 	tlsCert       *tls.Certificate
 	rt            veyron2.Runtime
 	logger        vlog.Logger
 	port          int
 	veyronProxyEP string
 	idManager     *identity.IDManager
+	pipes         map[*http.Request]*pipe
 }
 
 var logger vlog.Logger
 
-func (ctx WSPR) newClient() (ipc.Client, error) {
-	return ctx.rt.NewClient(veyron2.CallTimeout(ipc.NoTimeout))
-}
-
-func (ctx WSPR) startVeyronRequest(w lib.ClientWriter, msg *veyronRPC) (ipc.Call, error) {
-	// Issue request to the endpoint.
-	client, err := ctx.newClient()
-	if err != nil {
-		return nil, err
-	}
-	methodName := lib.UppercaseFirstCharacter(msg.Method)
-	clientCall, err := client.StartCall(ctx.rt.TODOContext(), msg.Name, methodName, msg.InArgs)
-
-	if err != nil {
-		return nil, fmt.Errorf("error starting call (name: %v, method: %v, args: %v): %v", msg.Name, methodName, msg.InArgs, err)
-	}
-
-	return clientCall, nil
-}
-
-func intToByteSlice(i int32) []byte {
-	rw := new(bytes.Buffer)
-	binary.Write(rw, binary.BigEndian, i)
-	buf := make([]byte, 4)
-	n, err := io.ReadFull(rw, buf)
-	if n != 4 || err != nil {
-		panic(fmt.Sprintf("Read less than 4 bytes: %d", n))
-	}
-	return buf[:n]
-}
-
 func (ctx WSPR) handleDebug(w http.ResponseWriter, r *http.Request) {
 	w.Header().Set("Content-Type", "text/html")
 	w.Write([]byte(`<html>
@@ -113,24 +80,15 @@
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 }
 
-type outstandingStream struct {
-	stream stream.Sender
-	inType vom.Type
-}
-
-type wsMessage struct {
-	buf         []byte
-	messageType int
-}
-
 // Starts the proxy and listens for requests. This method is blocking.
 func (ctx WSPR) Run() {
 	http.HandleFunc("/debug", ctx.handleDebug)
 	http.Handle("/favicon.ico", http.NotFoundHandler())
 	http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
 		ctx.logger.VI(0).Info("Creating a new websocket")
-		pipe := &websocketPipe{ctx: &ctx}
-		pipe.start(w, r)
+		ctx.mu.Lock()
+		defer ctx.mu.Unlock()
+		ctx.pipes[r] = newPipe(w, r, &ctx, nil)
 	})
 	ctx.logger.VI(1).Infof("Listening on port %d.", ctx.port)
 	httpErr := http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", ctx.port), nil)
@@ -143,6 +101,12 @@
 	ctx.rt.Cleanup()
 }
 
+func (ctx WSPR) CleanUpPipe(req *http.Request) {
+	ctx.mu.Lock()
+	defer ctx.mu.Unlock()
+	delete(ctx.pipes, req)
+}
+
 // Creates a new WebSocket Proxy object.
 func NewWSPR(port int, veyronProxyEP string, opts ...veyron2.ROpt) *WSPR {
 	if veyronProxyEP == "" {
diff --git a/tools/build/impl/impl.go b/tools/build/impl/impl.go
new file mode 100644
index 0000000..9804101
--- /dev/null
+++ b/tools/build/impl/impl.go
@@ -0,0 +1,229 @@
+package impl
+
+import (
+	"fmt"
+	"go/build"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"veyron/lib/cmdline"
+
+	"veyron2/rt"
+	vbuild "veyron2/services/mgmt/build"
+)
+
+var (
+	flagArch string
+	flagOS   string
+)
+
+func init() {
+	cmdBuild.Flags.StringVar(&flagArch, "arch", runtime.GOARCH, "Target architecture.")
+	cmdBuild.Flags.StringVar(&flagOS, "os", runtime.GOOS, "Target operating system.")
+}
+
+var cmdRoot = &cmdline.Command{
+	Name:     "build",
+	Short:    "Command-line tool for interacting with the veyron build server",
+	Long:     "Command-line tool for interacting with the veyron build server.",
+	Children: []*cmdline.Command{cmdBuild},
+}
+
+// Root returns a command that represents the root of the veyron tool.
+func Root() *cmdline.Command {
+	return cmdRoot
+}
+
+var cmdBuild = &cmdline.Command{
+	Run:   runBuild,
+	Name:  "build",
+	Short: "Build veyron Go packages",
+	Long: `
+Build veyron Go packages using a remote build server. The command
+collects all source code files that are not part of the Go standard
+library that the target packages depend on, sends them to a build
+server, and receives the built binaries.
+`,
+	ArgsName: "<name> <packages>",
+	ArgsLong: `
+<name> is a veyron object name of a build server
+<packages> is a list of packages to build, specified as arguments for
+each command. The format is similar to the go tool.  In its simplest
+form each package is an import path; e.g. "veyron/tools/build". A
+package that ends with "..." does a wildcard match against all
+packages with that prefix.
+`,
+}
+
+// TODO(jsimsa): Add support for importing (and remotely building)
+// packages from multiple package source root GOPATH directories with
+// identical names.
+func importPackages(paths []string, pkgMap map[string]*build.Package) error {
+	for _, path := range paths {
+		recurse := false
+		if strings.HasSuffix(path, "...") {
+			recurse = true
+			path = strings.TrimSuffix(path, "...")
+		}
+		if _, exists := pkgMap[path]; !exists {
+			srcDir, mode := "", build.ImportMode(0)
+			pkg, err := build.Import(path, srcDir, mode)
+			if err != nil {
+				return fmt.Errorf("Import(%q,%q,%v) failed: %v", path, srcDir, mode, err)
+			}
+			if pkg.Goroot {
+				continue
+			}
+			pkgMap[path] = pkg
+			if err := importPackages(pkg.Imports, pkgMap); err != nil {
+				return err
+			}
+		}
+		if recurse {
+			pkg := pkgMap[path]
+			fis, err := ioutil.ReadDir(pkg.Dir)
+			if err != nil {
+				return fmt.Errorf("ReadDir(%v) failed: %v", pkg.Dir)
+			}
+			for _, fi := range fis {
+				if fi.IsDir() {
+					subPath := filepath.Join(path, fi.Name(), "...")
+					if err := importPackages([]string{subPath}, pkgMap); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func getSources(pkgMap map[string]*build.Package, cancel <-chan struct{}, errchan chan<- error) <-chan vbuild.File {
+	sources := make(chan vbuild.File)
+	go func() {
+		defer close(sources)
+		for _, pkg := range pkgMap {
+			for _, files := range [][]string{pkg.CFiles, pkg.CgoFiles, pkg.GoFiles, pkg.SFiles} {
+				for _, file := range files {
+					path := filepath.Join(pkg.Dir, file)
+					bytes, err := ioutil.ReadFile(path)
+					if err != nil {
+						errchan <- fmt.Errorf("ReadFile(%v) failed: %v", path, err)
+						return
+					}
+					select {
+					case sources <- vbuild.File{Contents: bytes, Name: filepath.Join(pkg.ImportPath, file)}:
+					case <-cancel:
+						errchan <- nil
+						return
+					}
+				}
+			}
+		}
+		errchan <- nil
+	}()
+	return sources
+}
+
+func invokeBuild(name string, sources <-chan vbuild.File, cancel <-chan struct{}, errchan chan<- error) <-chan vbuild.File {
+	binaries := make(chan vbuild.File)
+	go func() {
+		defer close(binaries)
+		rt.Init()
+		client, err := vbuild.BindBuilder(name)
+		if err != nil {
+			errchan <- fmt.Errorf("BindBuilder(%v) failed: %v", name, err)
+			return
+		}
+		stream, err := client.Build(rt.R().NewContext(), vbuild.Architecture(flagArch), vbuild.OperatingSystem(flagOS))
+		if err != nil {
+			errchan <- fmt.Errorf("Build() failed: %v", err)
+			return
+		}
+		sender := stream.SendStream()
+		for source := range sources {
+			if err := sender.Send(source); err != nil {
+				stream.Cancel()
+				errchan <- fmt.Errorf("Send() failed: %v", err)
+				return
+			}
+		}
+		if err := sender.Close(); err != nil {
+			errchan <- fmt.Errorf("Close() failed: %v", err)
+			return
+		}
+		iterator := stream.RecvStream()
+		for iterator.Advance() {
+			select {
+			case binaries <- iterator.Value():
+			case <-cancel:
+				errchan <- nil
+				return
+			}
+		}
+		if err := iterator.Err(); err != nil {
+			errchan <- fmt.Errorf("Advance() failed: %v", err)
+			return
+		}
+		if _, err := stream.Finish(); err != nil {
+			errchan <- fmt.Errorf("Finish() failed: %v", err)
+			return
+		}
+		errchan <- nil
+	}()
+	return binaries
+}
+
+func saveBinaries(prefix string, binaries <-chan vbuild.File, cancel chan<- struct{}, errchan chan<- error) {
+	go func() {
+		for binary := range binaries {
+			path, perm := filepath.Join(prefix, filepath.Base(binary.Name)), os.FileMode(0755)
+			if err := ioutil.WriteFile(path, binary.Contents, perm); err != nil {
+				errchan <- fmt.Errorf("WriteFile(%v, %v) failed: %v", path, perm, err)
+				return
+			}
+			fmt.Printf("Generated binary %v\n", path)
+		}
+		errchan <- nil
+	}()
+}
+
+// runBuild identifies the source files needed to build the packages
+// specified on command-line and then creates a pipeline that
+// concurrently 1) reads the source files, 2) sends them to the build
+// server and receives binaries from the build server, and 3) writes
+// the binaries out to the disk.
+func runBuild(command *cmdline.Command, args []string) error {
+	name, paths := args[0], args[1:]
+	pkgMap := map[string]*build.Package{}
+	if err := importPackages(paths, pkgMap); err != nil {
+		return err
+	}
+	cancel, errchan := make(chan struct{}), make(chan error)
+	defer close(errchan)
+	// Start all stages of the pipeline.
+	sources := getSources(pkgMap, cancel, errchan)
+	binaries := invokeBuild(name, sources, cancel, errchan)
+	saveBinaries(os.TempDir(), binaries, cancel, errchan)
+	// Wait for all stages of the pipeline to terminate.
+	cancelled, errors, numStages := false, []error{}, 3
+	for i := 0; i < numStages; i++ {
+		if err := <-errchan; err != nil {
+			errors = append(errors, err)
+			if !cancelled {
+				close(cancel)
+				cancelled = true
+			}
+		}
+	}
+	for _, err := range errors {
+		fmt.Fprintf(command.Stderr(), "%v", err)
+	}
+	if len(errors) != 0 {
+		return fmt.Errorf("build failed")
+	}
+	return nil
+}
diff --git a/tools/build/impl/impl_test.go b/tools/build/impl/impl_test.go
new file mode 100644
index 0000000..32a987f
--- /dev/null
+++ b/tools/build/impl/impl_test.go
@@ -0,0 +1,71 @@
+package impl_test
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+
+	"veyron/tools/build/impl"
+
+	"veyron2/ipc"
+	"veyron2/naming"
+	"veyron2/rt"
+	"veyron2/services/mgmt/binary"
+	"veyron2/services/mgmt/build"
+	"veyron2/vlog"
+)
+
+type mock struct{}
+
+func (mock) Build(_ ipc.ServerContext, arch build.Architecture, opsys build.OperatingSystem, _ build.BuilderServiceBuildStream) ([]byte, error) {
+	vlog.VI(2).Infof("Build(%v, %v) was called", arch, opsys)
+	return nil, nil
+}
+
+func (mock) Describe(_ ipc.ServerContext, name string) (binary.Description, error) {
+	vlog.VI(2).Infof("Describe(%v) was called", name)
+	return binary.Description{}, nil
+}
+
+type dispatcher struct{}
+
+func startServer(t *testing.T) (ipc.Server, naming.Endpoint) {
+	server, err := rt.R().NewServer()
+	if err != nil {
+		t.Fatalf("NewServer failed: %v", err)
+	}
+	protocol, address := "tcp", "localhost:0"
+	endpoint, err := server.Listen(protocol, address)
+	if err != nil {
+		t.Fatalf("Listen(%v, %v) failed: %v", protocol, address, err)
+	}
+	unpublished := ""
+	if err := server.Serve(unpublished, ipc.SoloDispatcher(build.NewServerBuilder(&mock{}), nil)); err != nil {
+		t.Fatalf("Serve(%v) failed: %v", unpublished, err)
+	}
+	return server, endpoint
+}
+
+func stopServer(t *testing.T, server ipc.Server) {
+	if err := server.Stop(); err != nil {
+		t.Errorf("Stop() failed: %v", err)
+	}
+}
+
+func TestBuildClient(t *testing.T) {
+	rt.Init()
+	server, endpoint := startServer(t)
+	defer stopServer(t, server)
+
+	cmd := impl.Root()
+	var stdout, stderr bytes.Buffer
+	cmd.Init(nil, &stdout, &stderr)
+
+	// Test the 'Build' command.
+	if err := cmd.Execute([]string{"build", naming.JoinAddressName(endpoint.String(), ""), "veyron/tools/build"}); err != nil {
+		t.Fatalf("%v", err)
+	}
+	if expected, got := "", strings.TrimSpace(stdout.String()); got != expected {
+		t.Errorf("Unexpected output from build: got %q, expected %q", got, expected)
+	}
+}
diff --git a/tools/build/main.go b/tools/build/main.go
new file mode 100644
index 0000000..4e872f5
--- /dev/null
+++ b/tools/build/main.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+	"veyron/tools/build/impl"
+
+	"veyron2/rt"
+)
+
+func main() {
+	r := rt.Init()
+	defer r.Cleanup()
+
+	impl.Root().Main()
+}
diff --git a/tools/findunusedport/main.go b/tools/findunusedport/main.go
new file mode 100644
index 0000000..9d11884
--- /dev/null
+++ b/tools/findunusedport/main.go
@@ -0,0 +1,31 @@
+package main
+
+// findunusedport finds a random unused TCP port in the range 1k to 64k and prints it to standard out.
+
+import (
+	"fmt"
+	"math/rand"
+	"os"
+	"syscall"
+
+	"veyron2/vlog"
+)
+
+func main() {
+	rand.Seed(int64(os.Getpid()))
+	for i := 0; i < 1000; i++ {
+		port := 1024 + rand.Int31n(64512)
+		fd, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
+		if err != nil {
+			continue
+		}
+		sa := &syscall.SockaddrInet4{Port: int(port)}
+		if err := syscall.Bind(fd, sa); err != nil {
+			continue
+		}
+		syscall.Close(fd)
+		fmt.Println(port)
+		return
+	}
+	vlog.Fatal("can't find unused port")
+}
diff --git a/tools/qsh/impl/impl.go b/tools/qsh/impl/impl.go
new file mode 100644
index 0000000..f85b6f9
--- /dev/null
+++ b/tools/qsh/impl/impl.go
@@ -0,0 +1,79 @@
+package impl
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"sort"
+
+	"veyron2/query"
+	"veyron2/rt"
+	"veyron2/storage"
+	"veyron2/storage/vstore"
+
+	// TODO(rjkroege@google.com): Replace with the appropriate vom2 functionality when available.
+	_ "veyron/services/store/typeregistryhack"
+)
+
+func indenter(w io.Writer, indent int) {
+	for i := 0; i < indent; i++ {
+		fmt.Fprintf(w, "\t")
+	}
+}
+
+// Prints a single QueryResult to the provided io.Writer.
+func printResult(qr storage.QueryResult, w io.Writer, indent int) {
+	// TODO(rjkroege@google.com): Consider permitting the user to provide a Go template to format output.
+	if v := qr.Value(); v != nil {
+		indenter(w, indent)
+		fmt.Fprintf(w, "%s: %#v\n", qr.Name(), v)
+	} else {
+		// Force fields to be consistently ordered.
+		fields := qr.Fields()
+		names := make([]string, 0, len(fields))
+		for k, _ := range fields {
+			names = append(names, k)
+		}
+		sort.Strings(names)
+
+		indenter(w, indent)
+		fmt.Fprintf(w, "%s: map[string]interface {}{\n", qr.Name())
+		for _, k := range names {
+			f := fields[k]
+			switch v := f.(type) {
+			case storage.QueryStream:
+				indenter(w, indent+1)
+				fmt.Fprintf(w, "%s: {\n", k)
+				printStream(v, w, indent+2)
+				indenter(w, indent+1)
+				fmt.Fprintf(w, "},\n")
+			default:
+				indenter(w, indent+1)
+				fmt.Fprintf(w, "\"%s\":%#v,\n", k, v)
+			}
+		}
+		indenter(w, indent)
+		fmt.Fprintf(w, "},\n")
+	}
+}
+
+func printStream(qs storage.QueryStream, w io.Writer, indent int) error {
+	for qs.Advance() {
+		printResult(qs.Value(), w, indent)
+	}
+	if err := qs.Err(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func Runquery(storeName, queryString string) error {
+	ctx := rt.R().TODOContext()
+
+	store, err := vstore.New(storeName)
+	if err != nil {
+		return err
+	}
+	defer store.Close()
+	return printStream(store.BindObject("").Query(ctx, query.Query{queryString}), os.Stdout, 0)
+}
diff --git a/tools/qsh/impl/impl_test.go b/tools/qsh/impl/impl_test.go
new file mode 100644
index 0000000..8419b95
--- /dev/null
+++ b/tools/qsh/impl/impl_test.go
@@ -0,0 +1,132 @@
+package impl
+
+import (
+	"bytes"
+	"testing"
+
+	"veyron2/storage"
+)
+
+type mockQueryResult struct {
+	name   string
+	value  interface{}
+	fields map[string]interface{}
+}
+
+func (mqr mockQueryResult) Name() string {
+	return mqr.name
+}
+
+func (mqr mockQueryResult) Value() interface{} {
+	return mqr.value
+}
+
+func (mqr mockQueryResult) Fields() map[string]interface{} {
+	return mqr.fields
+}
+
+type mockQueryStream struct {
+	index   int
+	results []mockQueryResult
+	error   error
+}
+
+func (mqs *mockQueryStream) Advance() bool {
+	if mqs.error != nil {
+		return false
+	}
+	// Initialize index to -1
+	mqs.index++
+	if mqs.index >= len(mqs.results) {
+		return false
+	}
+	return true
+}
+
+func (mqs *mockQueryStream) Value() storage.QueryResult {
+	return mqs.results[mqs.index]
+}
+
+func (mqs *mockQueryStream) Err() error {
+	return mqs.error
+}
+
+func (mqs *mockQueryStream) Cancel() {
+	mqs.index = len(mqs.results) + 1
+}
+
+type testCase struct {
+	result         mockQueryResult
+	expectedOutput string
+}
+
+const (
+	result3Out = `result3: map[string]interface {}{
+	qs: {
+		resultNested1: 10
+		resultNested2: 11
+	},
+},
+`
+)
+
+func TestPrintResult(t *testing.T) {
+	tests := []testCase{
+		{
+			result: mockQueryResult{
+				name:   "result1",
+				value:  10,
+				fields: nil,
+			},
+			expectedOutput: "result1: 10\n",
+		},
+
+		{
+			result: mockQueryResult{
+				name:   "result2",
+				value:  nil,
+				fields: map[string]interface{}{"a": 1, "b": 2},
+			},
+			expectedOutput: `result2: map[string]interface {}{
+	"a":1,
+	"b":2,
+},
+`,
+		},
+
+		{
+			result: mockQueryResult{
+				name:  "result3",
+				value: nil,
+				fields: map[string]interface{}{
+					"qs": storage.QueryStream(&mockQueryStream{
+						index: -1,
+						error: nil,
+						results: []mockQueryResult{
+							mockQueryResult{
+								name:   "resultNested1",
+								value:  10,
+								fields: nil,
+							},
+							mockQueryResult{
+								name:   "resultNested2",
+								value:  11,
+								fields: nil,
+							},
+						},
+					}),
+				},
+			},
+			expectedOutput: result3Out,
+		},
+	}
+
+	for _, d := range tests {
+		var b bytes.Buffer
+		printResult(d.result, &b, 0)
+
+		if got, want := b.String(), d.expectedOutput; got != want {
+			t.Errorf("got <%s>, want <%s>", got, want)
+		}
+	}
+}
diff --git a/tools/qsh/main.go b/tools/qsh/main.go
new file mode 100644
index 0000000..feb52c3
--- /dev/null
+++ b/tools/qsh/main.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+	"veyron/tools/qsh/impl"
+
+	"flag"
+	"log"
+	"os"
+
+	"veyron2/rt"
+)
+
+var flagStoreName = flag.String("targetstore", "", "Store object name")
+
+const usage = `
+Synopsis: qsh [--targetstore=<store in veyron namespace>] query...
+
+Runs each given query against the specified Veyron store instance. If
+no target store is specified on the command line, qsh expects the
+environment variable VEYRON_STORE to specify the store to query.
+`
+
+func main() {
+	rt.Init()
+
+	// TODO(rjkroege@google.com): Handle ^c nicely.
+	flag.Parse()
+	queryStringArgs := flag.Args()
+
+	// Command line overrides.
+	storeName := *flagStoreName
+	if storeName == "" {
+		storeName = os.ExpandEnv("${VEYRON_STORE}")
+	}
+
+	if storeName == "" {
+		log.Fatalf("qsh: No store specified\n" + usage)
+	}
+
+	err := impl.Runquery(storeName, queryStringArgs[0])
+	if err != nil {
+		log.Printf("qsh: When attempting query: \"%s\" experienced an error: ", queryStringArgs[0], err.Error())
+	}
+}