Merge ""x/ref": Maintain a cache of IBE encryptions performed during connection setup"
diff --git a/cmd/principal/principal_v23_test.go b/cmd/principal/principal_v23_test.go
index 65d79a8..3835a0c 100644
--- a/cmd/principal/principal_v23_test.go
+++ b/cmd/principal/principal_v23_test.go
@@ -12,17 +12,23 @@
 	"path/filepath"
 	"regexp"
 	"strings"
+	"testing"
+	"time"
 
 	"v.io/x/ref"
-	"v.io/x/ref/test/v23tests"
+	"v.io/x/ref/lib/v23test"
+	"v.io/x/ref/test/expect"
 )
 
-//go:generate jiri test generate
+func withCreds(dir string, cmd *v23test.Cmd) *v23test.Cmd {
+	cmd.Vars[ref.EnvCredentials] = dir
+	return cmd
+}
 
-// redirect redirects the stdout of the given invocation to the file at the
-// given path.
-func redirect(t *v23tests.T, inv *v23tests.Invocation, path string) {
-	if err := ioutil.WriteFile(path, []byte(inv.Output()), 0600); err != nil {
+// redirect redirects the stdout of the given command to the file at the given
+// path.
+func redirect(t *testing.T, cmd *v23test.Cmd, path string) {
+	if err := ioutil.WriteFile(path, []byte(cmd.CombinedOutput()), 0600); err != nil {
 		t.Fatalf("WriteFile(%q) failed: %v\n", path, err)
 	}
 }
@@ -41,19 +47,21 @@
 	return input
 }
 
-func V23TestBlessSelf(t *v23tests.T) {
+func TestV23BlessSelf(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir         = t.NewTempDir("")
+		outputDir         = sh.MakeTempDir()
 		aliceDir          = filepath.Join(outputDir, "alice")
 		aliceBlessingFile = filepath.Join(outputDir, "aliceself")
 	)
 
-	bin := t.BuildGoPkg("v.io/x/ref/cmd/principal")
-	bin.Run("create", aliceDir, "alice")
+	bin := sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
-	bin = bin.WithEnv(credEnv(aliceDir))
-	redirect(t, bin.Start("blessself", "alicereborn"), aliceBlessingFile)
-	got := removePublicKeys(bin.Start("dumpblessings", aliceBlessingFile).Output())
+	redirect(t, withCreds(aliceDir, sh.Cmd(bin, "blessself", "alicereborn")), aliceBlessingFile)
+	got := removePublicKeys(withCreds(aliceDir, sh.Cmd(bin, "dumpblessings", aliceBlessingFile)).CombinedOutput())
 	want := `Blessings          : alicereborn
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -65,10 +73,13 @@
 	}
 }
 
-func V23TestStore(t *v23tests.T) {
+func TestV23Store(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir   = t.NewTempDir("")
-		bin         = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir   = sh.MakeTempDir()
+		bin         = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir    = filepath.Join(outputDir, "alice")
 		aliceFriend = filepath.Join(outputDir, "alice.bless")
 		bobDir      = filepath.Join(outputDir, "bob")
@@ -76,18 +87,17 @@
 	)
 
 	// Create two principals: alice and bob.
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
 
 	// Bless Bob with Alice's principal.
-	blessEnv := credEnv(aliceDir)
-	redirect(t, bin.WithEnv(blessEnv).Start("bless", "--for=1m", bobDir, "friend"), aliceFriend)
+	redirect(t, withCreds(aliceDir, sh.Cmd(bin, "bless", "--for=1m", bobDir, "friend")), aliceFriend)
 
 	// Run store forpeer on bob.
-	bin.Start("--v23.credentials="+bobDir, "set", "forpeer", aliceFriend, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	redirect(t, bin.WithEnv(blessEnv).Start("--v23.credentials="+bobDir, "get", "forpeer", "alice:server"), bobForPeer)
+	sh.Cmd(bin, "--v23.credentials="+bobDir, "set", "forpeer", aliceFriend, "alice").Run()
+	redirect(t, withCreds(aliceDir, sh.Cmd(bin, "--v23.credentials="+bobDir, "get", "forpeer", "alice:server")), bobForPeer)
 
-	got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", bobForPeer).Output()))
+	got := removeCaveats(removePublicKeys(sh.Cmd(bin, "dumpblessings", bobForPeer).CombinedOutput()))
 	want := `Blessings          : bob,alice:friend
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 2
@@ -103,7 +113,7 @@
 	}
 
 	// Test the names flag.
-	got = bin.WithEnv(blessEnv).Start("--v23.credentials="+bobDir, "get", "forpeer", "--names", "alice:server").Output()
+	got = withCreds(aliceDir, sh.Cmd(bin, "--v23.credentials="+bobDir, "get", "forpeer", "--names", "alice:server")).CombinedOutput()
 	want = `bob
 alice:friend
 `
@@ -112,32 +122,34 @@
 	}
 
 	// Test the rootkey flag. In particular alice:friend's rootkey should be equal to alice's publickey.
-	got = bin.WithEnv(blessEnv).Start("--v23.credentials="+bobDir, "get", "forpeer", "--rootkey", "alice:friend", "alice:server").Output()
-	want = bin.WithEnv(blessEnv).Start("get", "publickey", "--pretty").Output()
+	got = withCreds(aliceDir, sh.Cmd(bin, "--v23.credentials="+bobDir, "get", "forpeer", "--rootkey", "alice:friend", "alice:server")).CombinedOutput()
+	want = withCreds(aliceDir, sh.Cmd(bin, "get", "publickey", "--pretty")).CombinedOutput()
 	if got != want {
 		t.Errorf("unexpected output, got %s, want %s", got, want)
 	}
 
 	// Test the caveats flag.
-	got = bin.WithEnv(blessEnv).Start("--v23.credentials="+bobDir, "get", "forpeer", "--caveats", "alice:friend", "alice:server").Output()
+	got = withCreds(aliceDir, sh.Cmd(bin, "--v23.credentials="+bobDir, "get", "forpeer", "--caveats", "alice:friend", "alice:server")).CombinedOutput()
 	want = "Expires at"
 	if !strings.HasPrefix(got, want) {
 		t.Errorf("unexpected output, got %s, want %s", got, want)
 	}
 }
 
-func V23TestDump(t *v23tests.T) {
+func TestV23Dump(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir       = t.NewTempDir("")
-		bin             = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir       = sh.MakeTempDir()
+		bin             = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir        = filepath.Join(outputDir, "alice")
 		aliceExpiredDir = filepath.Join(outputDir, "alice-expired")
 	)
 
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
-	blessEnv := credEnv(aliceDir)
-	got := removePublicKeys(bin.WithEnv(blessEnv).Start("dump").Output())
+	got := removePublicKeys(withCreds(aliceDir, sh.Cmd(bin, "dump")).CombinedOutput())
 	want := `Public key : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Default Blessings : alice
 ---------------- BlessingStore ----------------
@@ -152,15 +164,14 @@
 		t.Fatalf("unexpected output, got\n%s, wanted\n%s", got, want)
 	}
 
-	got = bin.WithEnv(blessEnv).Start("dump", "-s").Output()
+	got = withCreds(aliceDir, sh.Cmd(bin, "dump", "-s")).CombinedOutput()
 	want = "alice\n"
 	if want != got {
 		t.Fatalf("unexpected output, got\n%s, wanted\n%s", got, want)
 	}
 
-	bin.Start("--v23.credentials="+aliceDir, "fork", "--for", "-1h", aliceExpiredDir, "expired").WaitOrDie(os.Stdout, os.Stderr)
-	blessEnv = credEnv(aliceExpiredDir)
-	got = removePublicKeys(bin.WithEnv(blessEnv).Start("dump").Output())
+	sh.Cmd(bin, "--v23.credentials="+aliceDir, "fork", "--for", "-1h", aliceExpiredDir, "expired").Run()
+	got = removePublicKeys(withCreds(aliceExpiredDir, sh.Cmd(bin, "dump")).CombinedOutput())
 	want = `Public key : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Default Blessings : alice:expired [EXPIRED]
 ---------------- BlessingStore ----------------
@@ -175,24 +186,26 @@
 		t.Fatalf("unexpected output, got\n%s, wanted\n%s", got, want)
 	}
 
-	got = bin.WithEnv(blessEnv).Start("dump", "-s").Output()
+	got = withCreds(aliceExpiredDir, sh.Cmd(bin, "dump", "-s")).CombinedOutput()
 	want = "alice:expired [EXPIRED]\n"
 	if want != got {
 		t.Fatalf("unexpected output, got\n%s, wanted\n%s", got, want)
 	}
 }
 
-func V23TestGetRecognizedRoots(t *v23tests.T) {
+func TestV23GetRecognizedRoots(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir = t.NewTempDir("")
-		bin       = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir = sh.MakeTempDir()
+		bin       = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir  = filepath.Join(outputDir, "alice")
 	)
 
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
-	blessEnv := credEnv(aliceDir)
-	got := removePublicKeys(bin.WithEnv(blessEnv).Start("get", "recognizedroots").Output())
+	got := removePublicKeys(withCreds(aliceDir, sh.Cmd(bin, "get", "recognizedroots")).CombinedOutput())
 	want := `Public key                                        Pattern
 XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX   [alice]
 `
@@ -201,17 +214,19 @@
 	}
 }
 
-func V23TestGetPeermap(t *v23tests.T) {
+func TestV23GetPeermap(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir = t.NewTempDir("")
-		bin       = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir = sh.MakeTempDir()
+		bin       = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir  = filepath.Join(outputDir, "alice")
 	)
 
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
-	blessEnv := credEnv(aliceDir)
-	got := bin.WithEnv(blessEnv).Start("get", "peermap").Output()
+	got := withCreds(aliceDir, sh.Cmd(bin, "get", "peermap")).CombinedOutput()
 	want := `Default Blessings                alice
 Peer pattern                     Blessings
 ...                              alice
@@ -233,15 +248,18 @@
 //
 // In that case, this method would return:
 // { "--remote-key=<some_public_key>", "--remote-token=<some_token>", "extensionfoo"}
-func blessArgsFromRecvBlessings(inv *v23tests.Invocation) []string {
-	cmd := inv.ExpectSetEventuallyRE("(^principal bless .*$)")[0][0]
+func blessArgsFromRecvBlessings(s *expect.Session) []string {
+	cmd := s.ExpectSetEventuallyRE("(^principal bless .*$)")[0][0]
 	return strings.Split(cmd, " ")[2:]
 }
 
-func V23TestRecvBlessings(t *v23tests.T) {
+func TestV23RecvBlessings(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir    = t.NewTempDir("")
-		bin          = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir    = sh.MakeTempDir()
+		bin          = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir     = filepath.Join(outputDir, "alice")
 		bobDir       = filepath.Join(outputDir, "bob")
 		carolDir     = filepath.Join(outputDir, "carol")
@@ -249,74 +267,85 @@
 	)
 
 	// Generate principals
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", carolDir, "carol").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
+	sh.Cmd(bin, "create", carolDir, "carol").Run()
 
 	// Run recvblessings on carol, and have alice send blessings over
 	// (blessings received must be set as default and shareable with all peers).
 	var args []string
 	{
-		inv := bin.Start("--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings")
-		args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(inv)...)
+		cmd := sh.Cmd(bin, "--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings")
+		session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+		cmd.Start()
+		args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(session)...)
 		// Use the "friend:carol" extension
 		args = append(args, "friend:carol")
 	}
-	bin.WithEnv(credEnv(aliceDir)).Start(args...).WaitOrDie(os.Stdout, os.Stderr)
+	withCreds(aliceDir, sh.Cmd(bin, args...)).Run()
 
 	// Run recvblessings on carol, and have alice send blessings over
 	// (blessings received must be set as shareable with peers matching 'alice:...'.)
 	{
-		inv := bin.Start("--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings", "--for-peer=alice", "--set-default=false")
+		cmd := sh.Cmd(bin, "--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings", "--for-peer=alice", "--set-default=false")
+		session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+		cmd.Start()
 		// recvblessings suggests a random extension, find the extension and replace it with friend:carol:foralice.
-		args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(inv)...)
+		args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(session)...)
 		args = append(args, "friend:carol:foralice")
 	}
-	bin.WithEnv(credEnv(aliceDir)).Start(args...).WaitOrDie(os.Stdout, os.Stderr)
+	withCreds(aliceDir, sh.Cmd(bin, args...)).Run()
 
 	// Run recvblessings on carol with the --remote-arg-file flag, and have bob send blessings over with the --remote-arg-file flag.
 	{
-		inv := bin.Start("--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings", "--for-peer=bob", "--set-default=false", "--remote-arg-file="+bobBlessFile)
+		cmd := sh.Cmd(bin, "--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings", "--for-peer=bob", "--set-default=false", "--remote-arg-file="+bobBlessFile)
+		session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+		cmd.Start()
 		// recvblessings suggests a random extension, use friend:carol:forbob instead.
-		args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(inv)...)
+		args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(session)...)
 		args = append(args, "friend:carol:forbob")
 	}
-	bin.WithEnv(credEnv(bobDir)).Start(args...).WaitOrDie(os.Stdout, os.Stderr)
+	withCreds(bobDir, sh.Cmd(bin, args...)).Run()
 
-	listenerInv := bin.Start("--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings", "--for-peer=alice:...", "--set-default=false", "--vmodule=*=2", "--logtostderr")
-
-	args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(listenerInv)...)
+	cmd := sh.Cmd(bin, "--v23.credentials="+carolDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings", "--for-peer=alice:...", "--set-default=false", "--vmodule=*=2", "--logtostderr")
+	session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+	cmd.Start()
+	args = append([]string{"bless", "--require-caveats=false"}, blessArgsFromRecvBlessings(session)...)
 	args = append(args, "willfail")
 
 	{
 		// Mucking around with remote-key should fail.
 		cpy := strings.Split(regexp.MustCompile("remote-key=").ReplaceAllString(strings.Join(args, " "), "remote-key=BAD"), " ")
-		var buf bytes.Buffer
-		if bin.WithEnv(credEnv(aliceDir)).Start(cpy...).Wait(os.Stdout, &buf) == nil {
+		cmd := withCreds(aliceDir, sh.Cmd(bin, cpy...))
+		cmd.ExitErrorIsOk = true
+		_, stderr := cmd.Output()
+		if cmd.Err == nil {
 			t.Fatalf("%v should have failed, but did not", cpy)
 		}
 
-		if want, got := "key mismatch", buf.String(); !strings.Contains(got, want) {
+		if want, got := "key mismatch", stderr; !strings.Contains(got, want) {
 			t.Fatalf("expected %q to be contained within\n%s\n, but was not", want, got)
 		}
 	}
 
 	{
-		var buf bytes.Buffer
 		// Mucking around with the token should fail.
 		cpy := strings.Split(regexp.MustCompile("remote-token=").ReplaceAllString(strings.Join(args, " "), "remote-token=BAD"), " ")
-		if bin.WithEnv(credEnv(aliceDir)).Start(cpy...).Wait(os.Stdout, &buf) == nil {
+		cmd := withCreds(aliceDir, sh.Cmd(bin, cpy...))
+		cmd.ExitErrorIsOk = true
+		_, stderr := cmd.Output()
+		if cmd.Err == nil {
 			t.Fatalf("%v should have failed, but did not", cpy)
 		}
 
-		if want, got := "blessings received from unexpected sender", buf.String(); !strings.Contains(got, want) {
+		if want, got := "blessings received from unexpected sender", stderr; !strings.Contains(got, want) {
 			t.Fatalf("expected %q to be contained within\n%s\n, but was not", want, got)
 		}
 	}
 
 	// Dump carol out, the only blessing that survives should be from the
 	// first "bless" command. (alice:friend:carol).
-	got := removePublicKeys(bin.Start("--v23.credentials="+carolDir, "dump").Output())
+	got := removePublicKeys(sh.Cmd(bin, "--v23.credentials="+carolDir, "dump").CombinedOutput())
 	want := `Public key : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Default Blessings : alice:friend:carol
 ---------------- BlessingStore ----------------
@@ -336,54 +365,60 @@
 	}
 }
 
-func V23TestRecvBlessingsInteractive(t *v23tests.T) {
+func TestV23RecvBlessingsInteractive(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir = t.NewTempDir("")
-		bin       = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir = sh.MakeTempDir()
+		bin       = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir  = filepath.Join(outputDir, "alice")
 		bobDir    = filepath.Join(outputDir, "bob")
-		aliceBin  = bin.WithEnv(credEnv(aliceDir))
 	)
 
 	// Generate principals
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
 
 	// Run recvblessings on bob
-	recv := bin.Start("--v23.credentials="+bobDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings")
-	args := blessArgsFromRecvBlessings(recv)
+	cmd := sh.Cmd(bin, "--v23.credentials="+bobDir, "--v23.tcp.address=127.0.0.1:0", "recvblessings")
+	session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+	cmd.Start()
+	args := blessArgsFromRecvBlessings(session)
 
 	// When running the exact command, must be prompted about caveats.
 	{
-		inv := aliceBin.Start(append([]string{"bless"}, args...)...)
-		inv.Expect("WARNING: No caveats provided")
+		cmd := withCreds(aliceDir, sh.Cmd(bin, append([]string{"bless"}, args...)...))
+		session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+		cmd.Stdin = bytes.NewBufferString("yeah\n")
+		cmd.ExitErrorIsOk = true
+		cmd.Start()
+		session.Expect("WARNING: No caveats provided")
 		// Saying something other than "yes" or "YES"
 		// should fail.
-		fmt.Fprintln(inv.Stdin(), "yeah")
-		if err := inv.Wait(os.Stdout, os.Stderr); err == nil {
+		if cmd.Wait(); cmd.Err == nil {
 			t.Fatalf("Expected principal bless to fail because the wrong input was provided")
 		}
 	}
 	// When agreeing to have no caveats, must specify an extension
 	{
-		inv := aliceBin.Start(append([]string{"bless"}, args...)...)
-		inv.Expect("WARNING: No caveats provided")
-		fmt.Fprintln(inv.Stdin(), "yes")
-		inv.CloseStdin()
-		if err := inv.Wait(os.Stdout, os.Stderr); err == nil {
-			t.Fatalf("Expected principal bless to fail because no extension was provided")
+		cmd := withCreds(aliceDir, sh.Cmd(bin, append([]string{"bless"}, args...)...))
+		session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+		cmd.Stdin = bytes.NewBufferString("yes\n")
+		cmd.ExitErrorIsOk = true
+		cmd.Start()
+		session.Expect("WARNING: No caveats provided")
+		if cmd.Wait(); cmd.Err == nil {
+			t.Fatalf("Expected principal bless to fail because the wrong input was provided")
 		}
 	}
 	// When providing both, the bless command should succeed.
 	{
-		inv := aliceBin.Start(append([]string{"bless"}, args...)...)
-		fmt.Fprintln(inv.Stdin(), "YES")
-		fmt.Fprintln(inv.Stdin(), "friend:bobby")
-		if err := inv.Wait(os.Stdout, os.Stderr); err != nil {
-			t.Fatal(err)
-		}
+		cmd := withCreds(aliceDir, sh.Cmd(bin, append([]string{"bless"}, args...)...))
+		cmd.Stdin = bytes.NewBufferString("YES\nfriend:bobby\n")
+		cmd.Run()
 	}
-	got := removePublicKeys(bin.Start("--v23.credentials="+bobDir, "dump").Output())
+	got := removePublicKeys(sh.Cmd(bin, "--v23.credentials="+bobDir, "dump").CombinedOutput())
 	want := `Public key : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Default Blessings : alice:friend:bobby
 ---------------- BlessingStore ----------------
@@ -400,10 +435,13 @@
 	}
 }
 
-func V23TestFork(t *v23tests.T) {
+func TestV23Fork(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir             = t.NewTempDir("")
-		bin                   = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir             = sh.MakeTempDir()
+		bin                   = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir              = filepath.Join(outputDir, "alice")
 		alicePhoneDir         = filepath.Join(outputDir, "alice-phone")
 		alicePhoneCalendarDir = filepath.Join(outputDir, "alice-phone-calendar")
@@ -411,15 +449,15 @@
 	)
 
 	// Generate principals for alice.
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
 	// Run fork to setup up credentials for alice:phone that are
 	// blessed by alice under the extension "phone".
-	bin.Start("--v23.credentials="+aliceDir, "fork", "--for", "1h", alicePhoneDir, "phone").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "--v23.credentials="+aliceDir, "fork", "--for", "1h", alicePhoneDir, "phone").Run()
 
 	// Dump alice-phone out, the only blessings it has must be from alice (alice:phone).
 	{
-		got := removePublicKeys(bin.Start("--v23.credentials="+alicePhoneDir, "dump").Output())
+		got := removePublicKeys(sh.Cmd(bin, "--v23.credentials="+alicePhoneDir, "dump").CombinedOutput())
 		want := `Public key : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Default Blessings : alice:phone
 ---------------- BlessingStore ----------------
@@ -436,8 +474,8 @@
 	}
 	// And it should have an expiry caveat
 	{
-		redirect(t, bin.Start("--v23.credentials", alicePhoneDir, "get", "default"), tmpfile)
-		got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", tmpfile).Output()))
+		redirect(t, sh.Cmd(bin, "--v23.credentials", alicePhoneDir, "get", "default"), tmpfile)
+		got := removeCaveats(removePublicKeys(sh.Cmd(bin, "dumpblessings", tmpfile).CombinedOutput()))
 		want := `Blessings          : alice:phone
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -453,9 +491,9 @@
 
 	// Run fork to setup up credentials for alice:phone:calendar that are
 	// blessed by alice:phone under the extension "calendar".
-	bin.Start("--v23.credentials="+alicePhoneDir, "fork", "--for", "1h", alicePhoneCalendarDir, "calendar").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "--v23.credentials="+alicePhoneDir, "fork", "--for", "1h", alicePhoneCalendarDir, "calendar").Run()
 	{
-		got := removePublicKeys(bin.Start("--v23.credentials="+alicePhoneCalendarDir, "dump").Output())
+		got := removePublicKeys(sh.Cmd(bin, "--v23.credentials="+alicePhoneCalendarDir, "dump").CombinedOutput())
 		want := `Public key : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Default Blessings : alice:phone:calendar
 ---------------- BlessingStore ----------------
@@ -471,8 +509,8 @@
 		}
 	}
 	{
-		redirect(t, bin.Start("--v23.credentials", alicePhoneCalendarDir, "get", "default"), tmpfile)
-		got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", tmpfile).Output()))
+		redirect(t, sh.Cmd(bin, "--v23.credentials", alicePhoneCalendarDir, "get", "default"), tmpfile)
+		got := removeCaveats(removePublicKeys(sh.Cmd(bin, "dumpblessings", tmpfile).CombinedOutput()))
 		want := `Blessings          : alice:phone:calendar
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -489,44 +527,51 @@
 	}
 }
 
-func V23TestCreate(t *v23tests.T) {
+func TestV23Create(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir = t.NewTempDir("")
-		bin       = t.BuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir = sh.MakeTempDir()
+		bin       = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 		aliceDir  = filepath.Join(outputDir, "alice")
 	)
 
 	// Creating a principal should succeed the first time.
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
 	// The second time should fail (the create command won't override an existing principal).
-	if bin.Start("create", aliceDir, "alice").Wait(os.Stdout, os.Stderr) == nil {
+	cmd := sh.Cmd(bin, "create", aliceDir, "alice")
+	cmd.ExitErrorIsOk = true
+	if cmd.Run(); cmd.Err == nil {
 		t.Fatalf("principal creation should have failed, but did not")
 	}
 
 	// If we specify -overwrite, it will.
-	bin.Start("create", "--overwrite", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", "--overwrite", aliceDir, "alice").Run()
 }
 
-func V23TestCaveats(t *v23tests.T) {
+func TestV23Caveats(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		outputDir         = t.NewTempDir("")
+		outputDir         = sh.MakeTempDir()
 		aliceDir          = filepath.Join(outputDir, "alice")
 		aliceBlessingFile = filepath.Join(outputDir, "aliceself")
 	)
 
-	bin := t.BuildGoPkg("v.io/x/ref/cmd/principal")
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
+	bin := sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
 
-	bin = bin.WithEnv(credEnv(aliceDir))
 	args := []string{
 		"blessself",
 		"--caveat=\"v.io/v23/security\".MethodCaveat={\"method\"}",
 		"--caveat={{0x54,0xa6,0x76,0x39,0x81,0x37,0x18,0x7e,0xcd,0xb2,0x6d,0x2d,0x69,0xba,0x0,0x3},typeobject([]string)}={\"method\"}",
 		"alicereborn",
 	}
-	redirect(t, bin.Start(args...), aliceBlessingFile)
-	got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", aliceBlessingFile).Output()))
+	redirect(t, withCreds(aliceDir, sh.Cmd(bin, args...)), aliceBlessingFile)
+	got := removeCaveats(removePublicKeys(withCreds(aliceDir, sh.Cmd(bin, "dumpblessings", aliceBlessingFile)).CombinedOutput()))
 	want := `Blessings          : alicereborn
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -540,72 +585,87 @@
 	}
 }
 
-func V23TestForkWithoutVDLPATH(t *v23tests.T) {
+func TestV23ForkWithoutVDLPATH(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
+	delete(sh.Vars, "JIRI_ROOT")
+	delete(sh.Vars, "VDLPATH")
+
 	var (
-		parent = t.NewTempDir("")
-		bin    = t.BuildGoPkg("v.io/x/ref/cmd/principal").WithEnv("JIRI_ROOT=''", "VDLPATH=''")
+		parent = sh.MakeTempDir()
+		bin    = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 	)
-	if err := bin.Start("create", parent, "parent").Wait(os.Stdout, os.Stderr); err != nil {
-		t.Fatalf("create %q failed: %v", parent, err)
-	}
-	if err := bin.Start("--v23.credentials="+parent, "fork", "--for=1s", t.NewTempDir(""), "child").Wait(os.Stdout, os.Stderr); err != nil {
-		t.Errorf("fork failed: %v", err)
-	}
+
+	sh.Cmd(bin, "create", parent, "parent").Run()
+	sh.Cmd(bin, "--v23.credentials="+parent, "fork", "--for=1s", sh.MakeTempDir(), "child").Run()
 }
 
-func V23TestForkWithoutCaveats(t *v23tests.T) {
+func TestV23ForkWithoutCaveats(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		parent = t.NewTempDir("")
-		child  = t.NewTempDir("")
-		bin    = t.BuildGoPkg("v.io/x/ref/cmd/principal")
-		buf    bytes.Buffer
+		parent = sh.MakeTempDir()
+		child  = sh.MakeTempDir()
+		bin    = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
 	)
-	if err := bin.Start("create", parent, "parent").Wait(os.Stdout, os.Stderr); err != nil {
-		t.Fatalf("create %q failed: %v", parent, err)
-	}
-	if err := bin.Start("--v23.credentials", parent, "fork", child, "child").Wait(os.Stdout, &buf); err == nil {
+
+	sh.Cmd(bin, "create", parent, "parent").Run()
+
+	cmd := sh.Cmd(bin, "--v23.credentials", parent, "fork", child, "child")
+	cmd.ExitErrorIsOk = true
+	if _, stderr := cmd.Output(); cmd.Err == nil {
 		t.Errorf("fork should have failed without any caveats, but did not")
-	} else if got, want := buf.String(), "ERROR: no caveats provided"; !strings.Contains(got, want) {
+	} else if got, want := stderr, "ERROR: no caveats provided"; !strings.Contains(got, want) {
 		t.Errorf("fork returned error: %q, expected error to contain %q", got, want)
 	}
-	if err := bin.Start("--v23.credentials", parent, "fork", "--for=0", child, "child").Wait(os.Stdout, &buf); err == nil {
+
+	cmd = sh.Cmd(bin, "--v23.credentials", parent, "fork", "--for=0", child, "child")
+	cmd.ExitErrorIsOk = true
+	if _, stderr := cmd.Output(); cmd.Err == nil {
 		t.Errorf("fork should have failed without any caveats, but did not")
-	} else if got, want := buf.String(), "ERROR: no caveats provided"; !strings.Contains(got, want) {
+	} else if got, want := stderr, "ERROR: no caveats provided"; !strings.Contains(got, want) {
 		t.Errorf("fork returned error: %q, expected error to contain %q", got, want)
 	}
-	if err := bin.Start("--v23.credentials", parent, "fork", "--require-caveats=false", child, "child").Wait(os.Stdout, os.Stderr); err != nil {
-		t.Errorf("fork --require-caveats=false failed with: %v", err)
-	}
+
+	sh.Cmd(bin, "--v23.credentials", parent, "fork", "--require-caveats=false", child, "child").Run()
 }
 
-func V23TestBless(t *v23tests.T) {
+func TestV23Bless(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		bin      = t.BuildGoPkg("v.io/x/ref/cmd/principal")
-		dir      = t.NewTempDir("")
+		bin      = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
+		dir      = sh.MakeTempDir()
 		aliceDir = filepath.Join(dir, "alice")
 		bobDir   = filepath.Join(dir, "bob")
 		tmpfile  = filepath.Join(dir, "tmpfile")
 	)
 	// Create two principals: alice and bob
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
 
 	// All blessings will be done by "alice"
-	bin = bin.WithEnv(credEnv(aliceDir))
+	aliceCmd := func(name string, args ...string) *v23test.Cmd {
+		return withCreds(aliceDir, sh.Cmd(name, args...))
+	}
 
 	{
 		// "alice" should fail to bless "bob" without any caveats
-		var buf bytes.Buffer
-		if err := bin.Start("bless", bobDir, "friend").Wait(os.Stdout, &buf); err == nil {
+		cmd := aliceCmd(bin, "bless", bobDir, "friend")
+		cmd.ExitErrorIsOk = true
+		if _, stderr := cmd.Output(); cmd.Err == nil {
 			t.Errorf("bless should have failed when no caveats are specified")
-		} else if got, want := buf.String(), "ERROR: no caveats provided"; !strings.Contains(got, want) {
+		} else if got, want := stderr, "ERROR: no caveats provided"; !strings.Contains(got, want) {
 			t.Errorf("got error %q, expected to match %q", got, want)
 		}
 	}
 	{
 		// But succeed if --require-caveats=false is specified
-		redirect(t, bin.Start("bless", "--require-caveats=false", bobDir, "friend"), tmpfile)
-		got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", tmpfile).Output()))
+		redirect(t, aliceCmd(bin, "bless", "--require-caveats=false", bobDir, "friend"), tmpfile)
+		got := removeCaveats(removePublicKeys(aliceCmd(bin, "dumpblessings", tmpfile).CombinedOutput()))
 		want := `Blessings          : alice:friend
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -620,8 +680,8 @@
 	}
 	{
 		// And succeed if --for is specified
-		redirect(t, bin.Start("bless", "--for=1m", bobDir, "friend"), tmpfile)
-		got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", tmpfile).Output()))
+		redirect(t, aliceCmd(bin, "bless", "--for=1m", bobDir, "friend"), tmpfile)
+		got := removeCaveats(removePublicKeys(aliceCmd(bin, "dumpblessings", tmpfile).CombinedOutput()))
 		want := `Blessings          : alice:friend
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -636,8 +696,8 @@
 	}
 	{
 		// If the Blessings are expired, dumpBlessings should print so.
-		redirect(t, bin.Start("bless", "--for=-1s", bobDir, "friend"), tmpfile)
-		got := removeCaveats(removePublicKeys(bin.Start("dumpblessings", tmpfile).Output()))
+		redirect(t, aliceCmd(bin, "bless", "--for=-1s", bobDir, "friend"), tmpfile)
+		got := removeCaveats(removePublicKeys(aliceCmd(bin, "dumpblessings", tmpfile).CombinedOutput()))
 		want := `Blessings          : alice:friend [EXPIRED]
 PublicKey          : XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
 Certificate chains : 1
@@ -652,38 +712,42 @@
 	}
 	{
 		// But not if --for=0
-		var buf bytes.Buffer
-		if err := bin.Start("bless", "--for=0", bobDir, "friend").Wait(os.Stdout, &buf); err == nil {
+		cmd := aliceCmd(bin, "bless", "--for=0", bobDir, "friend")
+		cmd.ExitErrorIsOk = true
+		if _, stderr := cmd.Output(); cmd.Err == nil {
 			t.Errorf("bless should have failed when no caveats are specified")
-		} else if got, want := buf.String(), "ERROR: no caveats provided"; !strings.Contains(got, want) {
+		} else if got, want := stderr, "ERROR: no caveats provided"; !strings.Contains(got, want) {
 			t.Errorf("got error %q, expected to match %q", got, want)
 		}
 	}
 }
 
-func V23TestAddBlessingsToRoots(t *v23tests.T) {
+func TestV23AddBlessingsToRoots(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		bin          = t.BuildGoPkg("v.io/x/ref/cmd/principal")
-		aliceDir     = t.NewTempDir("")
-		bobDir       = t.NewTempDir("")
-		blessingFile = filepath.Join(t.NewTempDir(""), "bobfile")
+		bin          = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
+		aliceDir     = sh.MakeTempDir()
+		bobDir       = sh.MakeTempDir()
+		blessingFile = filepath.Join(sh.MakeTempDir(), "bobfile")
 
 		// Extract the public key from the first line of output from
 		// "principal dump", which is formatted as:
 		// Public key : <the public key>
 		publicKey = func(dir string) string {
-			output := bin.Start("--v23.credentials="+dir, "dump").Output()
+			output := sh.Cmd(bin, "--v23.credentials="+dir, "dump").CombinedOutput()
 			line := strings.SplitN(output, "\n", 2)[0]
 			fields := strings.Split(line, " ")
 			return fields[len(fields)-1]
 		}
 	)
 	// Create two principals, "alice" and "bob"
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
 	// Have bob create a "bob/friend" blessing and have alice recognize that.
-	redirect(t, bin.Start("--v23.credentials="+bobDir, "bless", "--require-caveats=false", aliceDir, "friend"), blessingFile)
-	bin.Start("--v23.credentials="+aliceDir, "recognize", blessingFile).WaitOrDie(os.Stdout, os.Stderr)
+	redirect(t, sh.Cmd(bin, "--v23.credentials="+bobDir, "bless", "--require-caveats=false", aliceDir, "friend"), blessingFile)
+	sh.Cmd(bin, "--v23.credentials="+aliceDir, "recognize", blessingFile).Run()
 
 	want := fmt.Sprintf(`Public key                                        Pattern
 %v   [alice]
@@ -691,28 +755,31 @@
 `, publicKey(aliceDir), publicKey(bobDir))
 
 	// Finally view alice's recognized roots, it should have lines corresponding to aliceLine and bobLine.
-	got := bin.Start("--v23.credentials="+aliceDir, "get", "recognizedroots").Output()
+	got := sh.Cmd(bin, "--v23.credentials="+aliceDir, "get", "recognizedroots").CombinedOutput()
 	if got != want {
 		t.Fatalf("Got:\n%v\n\nWant:\n%v", got, want)
 	}
 }
 
-func V23TestAddKeyToRoots(t *v23tests.T) {
+func TestV23AddKeyToRoots(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		bin       = t.BuildGoPkg("v.io/x/ref/cmd/principal")
-		outputDir = t.NewTempDir("")
+		bin       = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir = sh.MakeTempDir()
 		aliceDir  = filepath.Join(outputDir, "alice")
 		bobDir    = filepath.Join(outputDir, "bob")
 	)
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
 	// Get bob's public key and add it to roots for alice
-	bobKey := strings.TrimSpace(bin.Start("--v23.credentials="+bobDir, "get", "publickey").Output())
-	bobPrettyKey := strings.TrimSpace(bin.Start("--v23.credentials="+bobDir, "get", "publickey", "--pretty").Output())
-	bin.Start("--v23.credentials="+aliceDir, "recognize", "bob", bobKey).WaitOrDie(os.Stdout, os.Stderr)
+	bobKey := strings.TrimSpace(sh.Cmd(bin, "--v23.credentials="+bobDir, "get", "publickey").CombinedOutput())
+	bobPrettyKey := strings.TrimSpace(sh.Cmd(bin, "--v23.credentials="+bobDir, "get", "publickey", "--pretty").CombinedOutput())
+	sh.Cmd(bin, "--v23.credentials="+aliceDir, "recognize", "bob", bobKey).Run()
 
 	// Verify that it has been added
-	output := bin.Start("--v23.credentials="+aliceDir, "dump").Output()
+	output := sh.Cmd(bin, "--v23.credentials="+aliceDir, "dump").CombinedOutput()
 	want := fmt.Sprintf("%v   [bob]", bobPrettyKey)
 	for _, line := range strings.Split(output, "\n") {
 		if line == want {
@@ -722,29 +789,32 @@
 	t.Errorf("Could not find line:\n%v\nin output:\n%v\n", want, output)
 }
 
-func V23TestDumpRoots(t *v23tests.T) {
+func TestV23DumpRoots(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+
 	var (
-		bin             = t.BuildGoPkg("v.io/x/ref/cmd/principal")
-		outputDir       = t.NewTempDir("")
+		bin             = sh.JiriBuildGoPkg("v.io/x/ref/cmd/principal")
+		outputDir       = sh.MakeTempDir()
 		aliceDir        = filepath.Join(outputDir, "alice")
 		bobDir          = filepath.Join(outputDir, "bob")
 		aliceFriend     = filepath.Join(outputDir, "alicefriend")
 		aliceFriendRoot = filepath.Join(outputDir, "alicefriendroot")
 		aliceDefault    = filepath.Join(outputDir, "alicedefault")
 	)
-	bin.Start("create", aliceDir, "alice").WaitOrDie(os.Stdout, os.Stderr)
-	bin.Start("create", bobDir, "bob").WaitOrDie(os.Stdout, os.Stderr)
-	redirect(t, bin.Start("--v23.credentials="+aliceDir, "bless", "--require-caveats=false", bobDir, "friend"), aliceFriend)
-	redirect(t, bin.Start("dumproots", aliceFriend), aliceFriendRoot)
-	redirect(t, bin.Start("--v23.credentials="+aliceDir, "get", "default"), aliceDefault)
+	sh.Cmd(bin, "create", aliceDir, "alice").Run()
+	sh.Cmd(bin, "create", bobDir, "bob").Run()
+	redirect(t, sh.Cmd(bin, "--v23.credentials="+aliceDir, "bless", "--require-caveats=false", bobDir, "friend"), aliceFriend)
+	redirect(t, sh.Cmd(bin, "dumproots", aliceFriend), aliceFriendRoot)
+	redirect(t, sh.Cmd(bin, "--v23.credentials="+aliceDir, "get", "default"), aliceDefault)
 
-	want := bin.Start("dumpblessings", aliceDefault).Output()
-	got := bin.Start("dumpblessings", aliceFriendRoot).Output()
+	want := sh.Cmd(bin, "dumpblessings", aliceDefault).CombinedOutput()
+	got := sh.Cmd(bin, "dumpblessings", aliceFriendRoot).CombinedOutput()
 	if got != want {
 		t.Errorf("Got:\n%s\nWant:\n%s\n", got, want)
 	}
 }
 
-func credEnv(dir string) string {
-	return fmt.Sprintf("%s=%s", ref.EnvCredentials, dir)
+func TestMain(m *testing.M) {
+	os.Exit(v23test.Run(m.Run))
 }
diff --git a/cmd/principal/v23_test.go b/cmd/principal/v23_test.go
deleted file mode 100644
index 1bf254c..0000000
--- a/cmd/principal/v23_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file was auto-generated via go generate.
-// DO NOT UPDATE MANUALLY
-
-package main_test
-
-import (
-	"os"
-	"testing"
-
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
-)
-
-func TestMain(m *testing.M) {
-	modules.DispatchAndExitIfChild()
-	cleanup := v23tests.UseSharedBinDir()
-	r := m.Run()
-	cleanup()
-	os.Exit(r)
-}
-
-func TestV23BlessSelf(t *testing.T) {
-	v23tests.RunTest(t, V23TestBlessSelf)
-}
-
-func TestV23Store(t *testing.T) {
-	v23tests.RunTest(t, V23TestStore)
-}
-
-func TestV23Dump(t *testing.T) {
-	v23tests.RunTest(t, V23TestDump)
-}
-
-func TestV23GetRecognizedRoots(t *testing.T) {
-	v23tests.RunTest(t, V23TestGetRecognizedRoots)
-}
-
-func TestV23GetPeermap(t *testing.T) {
-	v23tests.RunTest(t, V23TestGetPeermap)
-}
-
-func TestV23RecvBlessings(t *testing.T) {
-	v23tests.RunTest(t, V23TestRecvBlessings)
-}
-
-func TestV23RecvBlessingsInteractive(t *testing.T) {
-	v23tests.RunTest(t, V23TestRecvBlessingsInteractive)
-}
-
-func TestV23Fork(t *testing.T) {
-	v23tests.RunTest(t, V23TestFork)
-}
-
-func TestV23Create(t *testing.T) {
-	v23tests.RunTest(t, V23TestCreate)
-}
-
-func TestV23Caveats(t *testing.T) {
-	v23tests.RunTest(t, V23TestCaveats)
-}
-
-func TestV23ForkWithoutVDLPATH(t *testing.T) {
-	v23tests.RunTest(t, V23TestForkWithoutVDLPATH)
-}
-
-func TestV23ForkWithoutCaveats(t *testing.T) {
-	v23tests.RunTest(t, V23TestForkWithoutCaveats)
-}
-
-func TestV23Bless(t *testing.T) {
-	v23tests.RunTest(t, V23TestBless)
-}
-
-func TestV23AddBlessingsToRoots(t *testing.T) {
-	v23tests.RunTest(t, V23TestAddBlessingsToRoots)
-}
-
-func TestV23AddKeyToRoots(t *testing.T) {
-	v23tests.RunTest(t, V23TestAddKeyToRoots)
-}
-
-func TestV23DumpRoots(t *testing.T) {
-	v23tests.RunTest(t, V23TestDumpRoots)
-}
diff --git a/examples/tunnel/tunneld/tunneld_v23_test.go b/examples/tunnel/tunneld/tunneld_v23_test.go
index 907368e..9221d90 100644
--- a/examples/tunnel/tunneld/tunneld_v23_test.go
+++ b/examples/tunnel/tunneld/tunneld_v23_test.go
@@ -4,67 +4,80 @@
 
 package main_test
 
-//go:generate jiri test generate .
-
 import (
 	"bytes"
 	"io/ioutil"
+	"os"
 	"path/filepath"
+	"testing"
+	"time"
 
 	"v.io/x/ref"
-	"v.io/x/ref/test/v23tests"
+	"v.io/x/ref/lib/v23test"
+	"v.io/x/ref/test/expect"
 )
 
-func V23TestTunneld(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func TestV23Tunneld(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
-	tunneldBin := t.BuildV23Pkg("v.io/x/ref/examples/tunnel/tunneld")
-	vsh := t.BuildV23Pkg("v.io/x/ref/examples/tunnel/vsh")
-	mounttableBin := t.BuildV23Pkg("v.io/x/ref/cmd/mounttable")
+	tunneldBin := sh.JiriBuildGoPkg("v.io/x/ref/examples/tunnel/tunneld")
+	vsh := sh.JiriBuildGoPkg("v.io/x/ref/examples/tunnel/vsh")
+	mounttableBin := sh.JiriBuildGoPkg("v.io/x/ref/cmd/mounttable")
 
 	// Start tunneld with a known endpoint.
-	tunnelEndpoint := tunneldBin.Start("--v23.tcp.address=127.0.0.1:0", "--name=tunnel/test").ExpectVar("NAME")
+	cmd := sh.Cmd(tunneldBin, "--v23.tcp.address=127.0.0.1:0", "--name=tunnel/test")
+	session := expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+	cmd.Start()
+	tunnelEndpoint := session.ExpectVar("NAME")
 
 	// Run remote command with the endpoint.
-	if want, got := "HELLO ENDPOINT\n", vsh.Start(tunnelEndpoint, "echo", "HELLO", "ENDPOINT").Output(); want != got {
+	if want, got := "HELLO ENDPOINT\n", sh.Cmd(vsh, tunnelEndpoint, "echo", "HELLO", "ENDPOINT").CombinedOutput(); want != got {
 		t.Fatalf("unexpected output, got %s, want %s", got, want)
 	}
 
-	if want, got := "HELLO NAME\n", vsh.Start("tunnel/test", "echo", "HELLO", "NAME").Output(); want != got {
+	if want, got := "HELLO NAME\n", sh.Cmd(vsh, "tunnel/test", "echo", "HELLO", "NAME").CombinedOutput(); want != got {
 		t.Fatalf("unexpected output, got %s, want %s", got, want)
 	}
 
 	// Send input to remote command.
 	want := "HELLO SERVER"
-	if got := vsh.WithStdin(bytes.NewBufferString(want)).Start(tunnelEndpoint, "cat").Output(); want != got {
+	cmd = sh.Cmd(vsh, tunnelEndpoint, "cat")
+	cmd.Stdin = bytes.NewBufferString(want)
+	if got := cmd.CombinedOutput(); want != got {
 		t.Fatalf("unexpected output, got %s, want %s", got, want)
 	}
 
 	// And again with a file redirection this time.
-	outDir := t.NewTempDir("")
+	outDir := sh.MakeTempDir()
 	outPath := filepath.Join(outDir, "hello.txt")
 
-	// TODO(sjr): instead of using Output() here, we'd really rather do
-	// WaitOrDie(os.Stdout, os.Stderr). There is currently a race caused by
-	// WithStdin that makes this flaky.
-	vsh.WithStdin(bytes.NewBufferString(want)).Start(tunnelEndpoint, "cat > "+outPath).Output()
-	if got, err := ioutil.ReadFile(outPath); err != nil || string(got) != want {
-		if err != nil {
-			t.Fatalf("ReadFile(%v) failed: %v", outPath, err)
-		} else {
-			t.Fatalf("unexpected output, got %s, want %s", got, want)
-		}
+	cmd = sh.Cmd(vsh, tunnelEndpoint, "cat > "+outPath)
+	cmd.Stdin = bytes.NewBufferString(want)
+	cmd.Run()
+	if got, err := ioutil.ReadFile(outPath); err != nil {
+		t.Fatalf("ReadFile(%v) failed: %v", outPath, err)
+	} else if string(got) != want {
+		t.Fatalf("unexpected output, got %s, want %s", string(got), want)
 	}
 
 	// Verify that all published names are there.
-	root, _ := t.GetVar(ref.EnvNamespacePrefix)
-	inv := mounttableBin.Start("glob", root, "tunnel/test")
+	root := sh.Vars[ref.EnvNamespacePrefix]
+
+	cmd = sh.Cmd(mounttableBin, "glob", root, "tunnel/test")
+	session = expect.NewSession(t, cmd.StdoutPipe(), time.Minute)
+	cmd.Start()
 
 	// Expect one entry: the tunnel name.
-	matches := inv.ExpectSetEventuallyRE("tunnel/test" + " (.*) \\(Deadline .*\\)")
+	matches := session.ExpectSetEventuallyRE("tunnel/test" + " (.*) \\(Deadline .*\\)")
 
 	// The full endpoint should be the one we saw originally.
 	if got, want := matches[0][1], tunnelEndpoint; "/"+got != want {
 		t.Fatalf("expected tunnel endpoint %s to be %s, but it was not", got, want)
 	}
 }
+
+func TestMain(m *testing.M) {
+	os.Exit(v23test.Run(m.Run))
+}
diff --git a/examples/tunnel/tunneld/v23_test.go b/examples/tunnel/tunneld/v23_test.go
deleted file mode 100644
index 5ae483b..0000000
--- a/examples/tunnel/tunneld/v23_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file was auto-generated via go generate.
-// DO NOT UPDATE MANUALLY
-
-package main_test
-
-import (
-	"os"
-	"testing"
-
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
-)
-
-func TestMain(m *testing.M) {
-	modules.DispatchAndExitIfChild()
-	cleanup := v23tests.UseSharedBinDir()
-	r := m.Run()
-	cleanup()
-	os.Exit(r)
-}
-
-func TestV23Tunneld(t *testing.T) {
-	v23tests.RunTest(t, V23TestTunneld)
-}
diff --git a/lib/discovery/query.go b/lib/discovery/query.go
index 3c6ce4a..f639832 100644
--- a/lib/discovery/query.go
+++ b/lib/discovery/query.go
@@ -13,6 +13,7 @@
 	"v.io/v23/query/engine"
 	"v.io/v23/query/engine/datasource"
 	"v.io/v23/query/engine/public"
+	"v.io/v23/query/syncql"
 	"v.io/v23/vdl"
 )
 
@@ -44,8 +45,13 @@
 }
 
 // Implements datasource.Database.
-func (ds *dDS) GetContext() *context.T                    { return ds.ctx }
-func (ds *dDS) GetTable(string) (datasource.Table, error) { return ds, nil }
+func (ds *dDS) GetContext() *context.T { return ds.ctx }
+func (ds *dDS) GetTable(table string, writeAccessReq bool) (datasource.Table, error) {
+	if writeAccessReq {
+		return nil, syncql.NewErrNotWritable(ds.ctx, table)
+	}
+	return ds, nil
+}
 
 // Implements datasource.Table.
 func (ds *dDS) GetIndexFields() []datasource.Index {
@@ -60,6 +66,9 @@
 	}
 	return ds, nil
 }
+func (ds *dDS) Delete(k string) (bool, error) {
+	return false, syncql.NewErrOperationNotSupported(ds.ctx, "delete")
+}
 
 func limitToSingleInterface(idxRange datasource.IndexRanges) (bool, string) {
 	if idxRange.NilAllowed == false && len(*idxRange.StringRanges) == 1 && len((*idxRange.StringRanges)[0].Start) > 0 {
diff --git a/lib/security/blessingstore.go b/lib/security/blessingstore.go
index 587a38b..b0ee2ed 100644
--- a/lib/security/blessingstore.go
+++ b/lib/security/blessingstore.go
@@ -123,12 +123,10 @@
 
 func (bs *blessingStore) CacheDischarge(discharge security.Discharge, caveat security.Caveat, impetus security.DischargeImpetus) {
 	id := discharge.ID()
-	tp := caveat.ThirdPartyDetails()
-	// Only add to the cache if the caveat did not require arguments.
-	if id == "" || tp == nil || tp.Requirements().ReportArguments {
+	key, cacheable := dcacheKey(caveat.ThirdPartyDetails(), impetus)
+	if id == "" || !cacheable {
 		return
 	}
-	key := dcacheKey(tp, impetus)
 	bs.mu.Lock()
 	defer bs.mu.Unlock()
 	old, hadold := bs.state.DischargeCache[key]
@@ -145,49 +143,39 @@
 
 func (bs *blessingStore) ClearDischarges(discharges ...security.Discharge) {
 	bs.mu.Lock()
-	bs.clearDischargesLocked(discharges...)
+	clearDischargesFromCache(bs.state.DischargeCache, discharges...)
 	bs.mu.Unlock()
 	return
 }
 
-func (bs *blessingStore) clearDischargesLocked(discharges ...security.Discharge) {
-	for _, d := range discharges {
-		for k, cached := range bs.state.DischargeCache {
-			if cached.Equivalent(d) {
-				delete(bs.state.DischargeCache, k)
-			}
-		}
+func (bs *blessingStore) Discharge(caveat security.Caveat, impetus security.DischargeImpetus) security.Discharge {
+	key, cacheable := dcacheKey(caveat.ThirdPartyDetails(), impetus)
+	if !cacheable {
+		return security.Discharge{}
 	}
-}
-
-func (bs *blessingStore) Discharge(caveat security.Caveat, impetus security.DischargeImpetus) (out security.Discharge) {
 	defer bs.mu.Unlock()
 	bs.mu.Lock()
-	tp := caveat.ThirdPartyDetails()
+	return dischargeFromCache(bs.state.DischargeCache, key)
+}
+
+func dischargeFromCache(dcache map[dischargeCacheKey]security.Discharge, key dischargeCacheKey) security.Discharge {
+	cached, exists := dcache[key]
+	if !exists {
+		return security.Discharge{}
+	}
+	if expiry := cached.Expiry(); expiry.IsZero() || expiry.After(time.Now()) {
+		return cached
+	}
+	delete(dcache, key)
+	return security.Discharge{}
+}
+
+func dcacheKey(tp security.ThirdPartyCaveat, impetus security.DischargeImpetus) (key dischargeCacheKey, cacheable bool) {
+	// The cache key is based on the method and servers for impetus, it ignores arguments.
+	// So fail if the caveat requires arguments.
 	if tp == nil || tp.Requirements().ReportArguments {
-		return
+		return key, false
 	}
-	key := dcacheKey(tp, impetus)
-	if cached, exists := bs.state.DischargeCache[key]; exists {
-		out = cached
-		// If the discharge has expired, purge it from the cache.
-		if hasDischargeExpired(out) {
-			out = security.Discharge{}
-			bs.clearDischargesLocked(cached)
-		}
-	}
-	return
-}
-
-func hasDischargeExpired(dis security.Discharge) bool {
-	expiry := dis.Expiry()
-	if expiry.IsZero() {
-		return false
-	}
-	return expiry.Before(time.Now())
-}
-
-func dcacheKey(tp security.ThirdPartyCaveat, impetus security.DischargeImpetus) dischargeCacheKey {
 	// If the algorithm for computing dcacheKey changes, cacheKeyFormat must be changed as well.
 	id := tp.ID()
 	r := tp.Requirements()
@@ -210,9 +198,18 @@
 	h.Write(hashString(id))
 	h.Write(hashString(method))
 	h.Write(hashString(servers))
-	var key [sha256.Size]byte
 	copy(key[:], h.Sum(nil))
-	return key
+	return key, true
+}
+
+func clearDischargesFromCache(dcache map[dischargeCacheKey]security.Discharge, discharges ...security.Discharge) {
+	for _, d := range discharges {
+		for k, cached := range dcache {
+			if cached.Equivalent(d) {
+				delete(dcache, k)
+			}
+		}
+	}
 }
 
 func hashString(d string) []byte {
diff --git a/lib/security/forked_principal.go b/lib/security/forked_principal.go
new file mode 100644
index 0000000..0efd657
--- /dev/null
+++ b/lib/security/forked_principal.go
@@ -0,0 +1,212 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package security
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+
+	"v.io/v23/security"
+	"v.io/v23/verror"
+)
+
+var (
+	errImmutable         = verror.Register(pkgPath+".errImmutable", verror.NoRetry, "{1:}{2:} mutation not supported on this immutable type (type={3:} method={4:}")
+	errPublicKeyMismatch = verror.Register(pkgPath+".errPublicKeyMismatch", verror.NoRetry, "{1:}{2:} principal's public key {3:} does not match store's public key {4:}")
+)
+
+// ForkPrincipal returns a principal that has the same private key as p but
+// uses store and roots instead of the BlessingStore and BlessingRoots in p.
+func ForkPrincipal(p security.Principal, store security.BlessingStore, roots security.BlessingRoots) (security.Principal, error) {
+	k1, err := p.PublicKey().MarshalBinary()
+	if err != nil {
+		return nil, err
+	}
+	k2, err := store.PublicKey().MarshalBinary()
+	if err != nil {
+		return nil, err
+	}
+	if !bytes.Equal(k1, k2) {
+		return nil, verror.New(errPublicKeyMismatch, nil, p.PublicKey(), store.PublicKey())
+	}
+	return &forkedPrincipal{p, store, roots}, nil
+}
+
+// MustForkPrincipal is identical to ForkPrincipal, except that it panics on
+// error (such as if store is bound to a different PublicKey than p).
+func MustForkPrincipal(p security.Principal, store security.BlessingStore, roots security.BlessingRoots) security.Principal {
+	p, err := ForkPrincipal(p, store, roots)
+	if err != nil {
+		panic(err)
+	}
+	return p
+}
+
+// ImmutableBlessingRoots returns a BlessingRoots implementation that is
+// identical to r, except that all mutation operations fail.
+func ImmutableBlessingRoots(r security.BlessingRoots) security.BlessingRoots {
+	return &immutableBlessingRoots{impl: r}
+}
+
+// ImmutableBlessingStore returns a BlessingStore implementation that is
+// identical to r, except that Set* methods will fail.
+// (Mutation in the form of adding discharges via CacheDischarge are still allowed).
+func ImmutableBlessingStore(s security.BlessingStore) security.BlessingStore {
+	return &immutableBlessingStore{impl: s}
+}
+
+// FixedBlessingsStore returns a BlessingStore implementation that always
+// returns a fixed set of blessings (b) for both Default and ForPeer.
+//
+// If dcache is non-nil, then it will be used to cache discharges, otherwise
+// it will create a cache of its own.
+func FixedBlessingsStore(b security.Blessings, dcache DischargeCache) security.BlessingStore {
+	if dcache == nil {
+		dcache = &dischargeCacheImpl{m: make(map[dischargeCacheKey]security.Discharge)}
+	}
+	return &fixedBlessingsStore{b, dcache}
+}
+
+type forkedPrincipal struct {
+	security.Principal
+	store security.BlessingStore
+	roots security.BlessingRoots
+}
+
+func (p *forkedPrincipal) BlessingStore() security.BlessingStore {
+	return p.store
+}
+
+func (p *forkedPrincipal) Roots() security.BlessingRoots {
+	return p.roots
+}
+
+type immutableBlessingStore struct {
+	// Do not embed BlessingRoots since that will make it easy to miss
+	// interface changes if a mutating method is added to the interface.
+	impl security.BlessingStore
+}
+
+func (s *immutableBlessingStore) Set(security.Blessings, security.BlessingPattern) (security.Blessings, error) {
+	return security.Blessings{}, verror.New(errImmutable, nil, fmt.Sprintf("%T", s), "Set")
+}
+func (s *immutableBlessingStore) ForPeer(peerBlessings ...string) security.Blessings {
+	return s.impl.ForPeer(peerBlessings...)
+}
+func (s *immutableBlessingStore) SetDefault(security.Blessings) error {
+	return verror.New(errImmutable, nil, fmt.Sprintf("%T", s), "SetDefault")
+}
+func (s *immutableBlessingStore) Default() security.Blessings {
+	return s.impl.Default()
+}
+func (s *immutableBlessingStore) PublicKey() security.PublicKey {
+	return s.impl.PublicKey()
+}
+func (s *immutableBlessingStore) PeerBlessings() map[security.BlessingPattern]security.Blessings {
+	return s.impl.PeerBlessings()
+}
+func (s *immutableBlessingStore) CacheDischarge(discharge security.Discharge, caveat security.Caveat, impetus security.DischargeImpetus) {
+	s.impl.CacheDischarge(discharge, caveat, impetus)
+}
+func (s *immutableBlessingStore) ClearDischarges(discharges ...security.Discharge) {
+	s.impl.ClearDischarges(discharges...)
+}
+func (s *immutableBlessingStore) Discharge(caveat security.Caveat, impetus security.DischargeImpetus) security.Discharge {
+	return s.impl.Discharge(caveat, impetus)
+}
+func (s *immutableBlessingStore) DebugString() string {
+	return s.impl.DebugString()
+}
+
+// DischargeCache is a subset of the security.BlessingStore interface that deals with caching discharges.
+type DischargeCache interface {
+	CacheDischarge(discharge security.Discharge, caveat security.Caveat, impetus security.DischargeImpetus)
+	ClearDischarges(discharges ...security.Discharge)
+	Discharge(caveat security.Caveat, impetus security.DischargeImpetus) security.Discharge
+}
+
+type dischargeCacheImpl struct {
+	l sync.Mutex
+	m map[dischargeCacheKey]security.Discharge
+}
+
+func (c *dischargeCacheImpl) CacheDischarge(discharge security.Discharge, caveat security.Caveat, impetus security.DischargeImpetus) {
+	id := discharge.ID()
+	key, cacheable := dcacheKey(caveat.ThirdPartyDetails(), impetus)
+	if id == "" || !cacheable {
+		return
+	}
+	c.l.Lock()
+	c.m[key] = discharge
+	c.l.Unlock()
+}
+func (c *dischargeCacheImpl) ClearDischarges(discharges ...security.Discharge) {
+	c.l.Lock()
+	clearDischargesFromCache(c.m, discharges...)
+	c.l.Unlock()
+}
+func (c *dischargeCacheImpl) Discharge(caveat security.Caveat, impetus security.DischargeImpetus) security.Discharge {
+	key, cacheable := dcacheKey(caveat.ThirdPartyDetails(), impetus)
+	if !cacheable {
+		return security.Discharge{}
+	}
+	c.l.Lock()
+	defer c.l.Unlock()
+	return dischargeFromCache(c.m, key)
+}
+
+type fixedBlessingsStore struct {
+	b      security.Blessings
+	dcache DischargeCache
+}
+
+func (s *fixedBlessingsStore) Set(security.Blessings, security.BlessingPattern) (security.Blessings, error) {
+	return security.Blessings{}, verror.New(errImmutable, nil, fmt.Sprintf("%T", s), "Set")
+}
+func (s *fixedBlessingsStore) ForPeer(peerBlessings ...string) security.Blessings {
+	return s.b
+}
+func (s *fixedBlessingsStore) SetDefault(security.Blessings) error {
+	return verror.New(errImmutable, nil, fmt.Sprintf("%T", s), "SetDefault")
+}
+func (s *fixedBlessingsStore) Default() security.Blessings {
+	return s.b
+}
+func (s *fixedBlessingsStore) PublicKey() security.PublicKey {
+	return s.b.PublicKey()
+}
+func (s *fixedBlessingsStore) PeerBlessings() map[security.BlessingPattern]security.Blessings {
+	return map[security.BlessingPattern]security.Blessings{security.AllPrincipals: s.b}
+}
+func (s *fixedBlessingsStore) CacheDischarge(discharge security.Discharge, caveat security.Caveat, impetus security.DischargeImpetus) {
+	s.dcache.CacheDischarge(discharge, caveat, impetus)
+}
+func (s *fixedBlessingsStore) ClearDischarges(discharges ...security.Discharge) {
+	s.dcache.ClearDischarges(discharges...)
+}
+func (s *fixedBlessingsStore) Discharge(caveat security.Caveat, impetus security.DischargeImpetus) security.Discharge {
+	return s.dcache.Discharge(caveat, impetus)
+}
+func (s *fixedBlessingsStore) DebugString() string {
+	return fmt.Sprintf("FixedBlessingsStore:[%v]", s.b)
+}
+
+type immutableBlessingRoots struct {
+	// Do not embed BlessingRoots since that will make it easy to miss
+	// interface changes if a mutation method is added to the interface.
+	impl security.BlessingRoots
+}
+
+func (r *immutableBlessingRoots) Recognized(root []byte, blessing string) error {
+	return r.impl.Recognized(root, blessing)
+}
+func (r *immutableBlessingRoots) Dump() map[security.BlessingPattern][]security.PublicKey {
+	return r.impl.Dump()
+}
+func (r *immutableBlessingRoots) DebugString() string { return r.impl.DebugString() }
+func (r *immutableBlessingRoots) Add([]byte, security.BlessingPattern) error {
+	return verror.New(errImmutable, nil, fmt.Sprintf("%T", r), "Add")
+}
diff --git a/lib/security/forked_principal_test.go b/lib/security/forked_principal_test.go
new file mode 100644
index 0000000..950e137
--- /dev/null
+++ b/lib/security/forked_principal_test.go
@@ -0,0 +1,146 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package security
+
+import (
+	"fmt"
+	"reflect"
+	"testing"
+
+	"v.io/v23/security"
+)
+
+func TestFixedBlessingStore(t *testing.T) {
+	p, b1 := newPrincipal("self")
+	b2, _ := p.BlessSelf("other")
+	tpcav := mkCaveat(security.NewPublicKeyCaveat(p.PublicKey(), "location", security.ThirdPartyRequirements{}, security.UnconstrainedUse()))
+	d := mkDischarge(p.MintDischarge(tpcav, security.UnconstrainedUse()))
+	s1 := newInMemoryBlessingStore(p.PublicKey())
+	s2 := FixedBlessingsStore(b1, nil)
+
+	// Set and SetDefault should fail, other than that, s2 should behave
+	// identically to s1 after setting s1 up
+	s1.Set(b1, security.AllPrincipals)
+	s1.SetDefault(b1)
+
+	if _, err := s2.Set(b2, security.AllPrincipals); err == nil {
+		t.Fatal("%T.Set should fail", s2)
+	}
+	if err := s2.SetDefault(b2); err == nil {
+		t.Fatal("%T.SetDefault should fail", s2)
+	}
+	for idx, c := range []call{
+		{"ForPeer", []interface{}{"foobar"}},
+		{"Default", nil},
+		{"PublicKey", nil},
+		{"PeerBlessings", nil},
+		{"CacheDischarge", []interface{}{d, tpcav, security.DischargeImpetus{}}},
+		{"Discharge", []interface{}{tpcav, security.DischargeImpetus{}}},
+		{"ClearDischarges", []interface{}{d}},
+		{"Discharge", []interface{}{tpcav, security.DischargeImpetus{}}},
+	} {
+		if err := c.Compare(s1, s2); err != nil {
+			t.Errorf("#%d: %v", idx, err)
+		}
+	}
+
+}
+
+func TestImmutableBlessingStore(t *testing.T) {
+	p, b1 := newPrincipal("self")
+	b2, _ := p.BlessSelf("othername")
+	bdef, _ := p.BlessSelf("default")
+	tpcav := mkCaveat(security.NewPublicKeyCaveat(p.PublicKey(), "location", security.ThirdPartyRequirements{}, security.UnconstrainedUse()))
+	d := mkDischarge(p.MintDischarge(tpcav, security.UnconstrainedUse()))
+	s1 := newInMemoryBlessingStore(p.PublicKey())
+	s2 := ImmutableBlessingStore(s1)
+
+	// Set and SetDefault called on s1 should affect s2
+	if _, err := s1.Set(b1, security.AllPrincipals); err != nil {
+		t.Fatal(err)
+	}
+	if err := s1.SetDefault(bdef); err != nil {
+		t.Fatal(err)
+	}
+	// But Set and SetDefault on s2 should fail
+	if _, err := s2.Set(b2, security.AllPrincipals); err == nil {
+		t.Fatalf("%T.Set should fail", s2)
+	}
+	if err := s2.SetDefault(b2); err == nil {
+		t.Fatalf("%T.SetDefault should fail", s2)
+	}
+	// All other method calls should defer to s1
+	for idx, c := range []call{
+		{"ForPeer", []interface{}{"foobar"}},
+		{"Default", nil},
+		{"PublicKey", nil},
+		{"PeerBlessings", nil},
+		{"CacheDischarge", []interface{}{d, tpcav, security.DischargeImpetus{}}},
+		{"Discharge", []interface{}{tpcav, security.DischargeImpetus{}}},
+		{"DebugString", nil},
+		{"ClearDischarges", []interface{}{d}},
+		{"Discharge", []interface{}{tpcav, security.DischargeImpetus{}}},
+	} {
+		if err := c.Compare(s1, s2); err != nil {
+			t.Errorf("#%d: %v", idx, err)
+		}
+	}
+}
+
+func TestImmutableBlessingRoots(t *testing.T) {
+	pubkey, _, _ := NewPrincipalKey()
+	pk, _ := pubkey.MarshalBinary()
+	r1 := newInMemoryBlessingRoots()
+	r2 := ImmutableBlessingRoots(r1)
+	pat := "pattern1"
+
+	// Adding to r1 should affect r2
+	if err := r1.Add(pk, security.BlessingPattern(pat)); err != nil {
+		t.Fatal(err)
+	}
+	// But Add on r2 should fail
+	if err := r2.Add(pk, "otherpattern"); err == nil {
+		t.Errorf("%T.Add should fail", r2)
+	}
+	// All other methods should be the same
+	for _, c := range []call{
+		{"Recognized", []interface{}{pk, pat}},
+		{"Dump", nil},
+		{"DebugString", nil},
+	} {
+		if err := c.Compare(r1, r2); err != nil {
+			t.Error(err)
+		}
+	}
+}
+
+type call struct {
+	Method string
+	Args   []interface{}
+}
+
+// Run executes c.Method on object with c.Args and returns the results.
+func (c *call) Call(object interface{}) []interface{} {
+	args := make([]reflect.Value, len(c.Args))
+	for i, a := range c.Args {
+		args[i] = reflect.ValueOf(a)
+	}
+	results := reflect.ValueOf(object).MethodByName(c.Method).Call(args)
+	ret := make([]interface{}, len(results))
+	for i, r := range results {
+		ret[i] = r.Interface()
+	}
+	return ret
+}
+
+// Compare calls Run on o1 and o2 and returns an error if the results do not match.
+func (c *call) Compare(o1, o2 interface{}) error {
+	r1 := c.Call(o1)
+	r2 := c.Call(o2)
+	if !reflect.DeepEqual(r1, r2) {
+		return fmt.Errorf("%v(%v): Got %v, want %v", c.Method, c.Args, r2, r1)
+	}
+	return nil
+}
diff --git a/lib/security/immutable_principal.go b/lib/security/immutable_principal.go
deleted file mode 100644
index 361b56e..0000000
--- a/lib/security/immutable_principal.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package security
-
-import (
-	"v.io/v23/security"
-	"v.io/v23/verror"
-)
-
-var errImmutablePrincipal = verror.Register(pkgPath+".errImmutablePrincipal", verror.NoRetry, "Principal is immutable")
-
-// NewImmutablePrincipal returns an immutable version of the given Principal.
-// All the methods that would normally change the state of the Principal return
-// an error instead.
-func NewImmutablePrincipal(p security.Principal) security.Principal {
-	return &immutablePrincipal{
-		p, &immutableBlessingStore{p.BlessingStore()}, &immutableBlessingRoots{p.Roots()},
-	}
-}
-
-type immutablePrincipal struct {
-	security.Principal
-	store *immutableBlessingStore
-	roots *immutableBlessingRoots
-}
-
-func (p *immutablePrincipal) BlessingStore() security.BlessingStore {
-	return p.store
-}
-
-func (p *immutablePrincipal) Roots() security.BlessingRoots {
-	return p.roots
-}
-
-func (p *immutablePrincipal) AddToRoots(security.Blessings) error {
-	return verror.New(errImmutablePrincipal, nil)
-}
-
-type immutableBlessingStore struct {
-	security.BlessingStore
-}
-
-func (s *immutableBlessingStore) Set(security.Blessings, security.BlessingPattern) (security.Blessings, error) {
-	return security.Blessings{}, verror.New(errImmutablePrincipal, nil)
-}
-
-func (s *immutableBlessingStore) SetDefault(security.Blessings) error {
-	return verror.New(errImmutablePrincipal, nil)
-}
-
-type immutableBlessingRoots struct {
-	security.BlessingRoots
-}
-
-func (r *immutableBlessingRoots) Add([]byte, security.BlessingPattern) error {
-	return verror.New(errImmutablePrincipal, nil)
-}
diff --git a/lib/signals/signals_test.go b/lib/signals/signals_test.go
index e92abae..c45f184 100644
--- a/lib/signals/signals_test.go
+++ b/lib/signals/signals_test.go
@@ -13,23 +13,23 @@
 	"runtime"
 	"syscall"
 	"testing"
+	"time"
 
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/rpc"
 	"v.io/v23/services/appcycle"
-	"v.io/v23/vtrace"
+	"v.io/x/lib/gosh"
 	"v.io/x/ref/lib/mgmt"
 	"v.io/x/ref/lib/security/securityflag"
+	"v.io/x/ref/lib/v23test"
+	_ "v.io/x/ref/runtime/factories/generic"
 	"v.io/x/ref/services/device"
 	"v.io/x/ref/test"
+	"v.io/x/ref/test/expect"
 	"v.io/x/ref/test/modules"
-
-	_ "v.io/x/ref/runtime/factories/generic"
 )
 
-//go:generate jiri test generate
-
 func stopLoop(stop func(), stdin io.Reader, ch chan<- struct{}) {
 	scanner := bufio.NewScanner(stdin)
 	for scanner.Scan() {
@@ -43,46 +43,42 @@
 	}
 }
 
-func program(stdin io.Reader, stdout io.Writer, signals ...os.Signal) {
+func program(signals ...os.Signal) {
 	ctx, shutdown := test.V23Init()
 	closeStopLoop := make(chan struct{})
 	// obtain ac here since stopLoop may execute after shutdown is called below
 	ac := v23.GetAppCycle(ctx)
-	go stopLoop(func() { ac.Stop(ctx) }, stdin, closeStopLoop)
+	go stopLoop(func() { ac.Stop(ctx) }, os.Stdin, closeStopLoop)
 	wait := ShutdownOnSignals(ctx, signals...)
-	fmt.Fprintf(stdout, "ready\n")
-	fmt.Fprintf(stdout, "received signal %s\n", <-wait)
+	fmt.Printf("ready\n")
+	fmt.Printf("received signal %s\n", <-wait)
 	shutdown()
 	<-closeStopLoop
 }
 
-var handleDefaults = modules.Register(func(env *modules.Env, args ...string) error {
-	program(env.Stdin, env.Stdout)
-	return nil
-}, "handleDefaults")
+var handleDefaults = gosh.Register("handleDefaults", func() {
+	program()
+})
 
-var handleCustom = modules.Register(func(env *modules.Env, args ...string) error {
-	program(env.Stdin, env.Stdout, syscall.SIGABRT)
-	return nil
-}, "handleCustom")
+var handleCustom = gosh.Register("handleCustom", func() {
+	program(syscall.SIGABRT)
+})
 
-var handleCustomWithStop = modules.Register(func(env *modules.Env, args ...string) error {
-	program(env.Stdin, env.Stdout, STOP, syscall.SIGABRT, syscall.SIGHUP)
-	return nil
-}, "handleCustomWithStop")
+var handleCustomWithStop = gosh.Register("handleCustomWithStop", func() {
+	program(STOP, syscall.SIGABRT, syscall.SIGHUP)
+})
 
-var handleDefaultsIgnoreChan = modules.Register(func(env *modules.Env, args ...string) error {
+var handleDefaultsIgnoreChan = gosh.Register("handleDefaultsIgnoreChan", func() {
 	ctx, shutdown := test.V23Init()
 	defer shutdown()
 	closeStopLoop := make(chan struct{})
 	// obtain ac here since stopLoop may execute after shutdown is called below
 	ac := v23.GetAppCycle(ctx)
-	go stopLoop(func() { ac.Stop(ctx) }, env.Stdin, closeStopLoop)
+	go stopLoop(func() { ac.Stop(ctx) }, os.Stdin, closeStopLoop)
 	ShutdownOnSignals(ctx)
-	fmt.Fprintf(env.Stdout, "ready\n")
+	fmt.Printf("ready\n")
 	<-closeStopLoop
-	return nil
-}, "handleDefaultsIgnoreChan")
+})
 
 func isSignalInSet(sig os.Signal, set []os.Signal) bool {
 	for _, s := range set {
@@ -105,73 +101,70 @@
 	}
 }
 
-func newShell(t *testing.T, ctx *context.T, prog modules.Program) (*modules.Shell, modules.Handle) {
-	sh, err := modules.NewShell(ctx, nil, testing.Verbose(), t)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	handle, err := sh.Start(nil, prog)
-	if err != nil {
-		sh.Cleanup(os.Stderr, os.Stderr)
-		t.Fatalf("unexpected error: %s", err)
-		return nil, nil
-	}
-	return sh, handle
+func startFn(t *testing.T, sh *v23test.Shell, fn *gosh.Fn, exitErrorIsOk bool) (*v23test.Cmd, *expect.Session, io.WriteCloser) {
+	cmd := sh.Fn(fn)
+	r, w := io.Pipe()
+	cmd.Stdin = r
+	session := expect.NewSession(t, cmd.StdoutPipe(), 5*time.Second)
+	cmd.ExitErrorIsOk = true
+	cmd.Start()
+	return cmd, session, w
+}
+
+func checkEOF(cmd *v23test.Cmd, session *expect.Session, stdinPipe io.WriteCloser) {
+	stdinPipe.Close()
+	cmd.Wait()
+	session.ExpectEOF()
 }
 
 // TestCleanShutdownSignal verifies that sending a signal to a child that
 // handles it by default causes the child to shut down cleanly.
 func TestCleanShutdownSignal(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaults)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaults, false)
+	session.Expect("ready")
 	checkSignalIsDefault(t, syscall.SIGINT)
-	syscall.Kill(h.Pid(), syscall.SIGINT)
-	h.Expectf("received signal %s", syscall.SIGINT)
-	fmt.Fprintf(h.Stdin(), "close\n")
-	h.ExpectEOF()
+	syscall.Kill(cmd.Process().Pid, syscall.SIGINT)
+	session.Expectf("received signal %s", syscall.SIGINT)
+	fmt.Fprintf(stdinPipe, "close\n")
+	checkEOF(cmd, session, stdinPipe)
 }
 
-// TestCleanShutdownStop verifies that sending a stop comamnd to a child that
+// TestCleanShutdownStop verifies that sending a stop command to a child that
 // handles stop commands by default causes the child to shut down cleanly.
 func TestCleanShutdownStop(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaults)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	h.Expectf("received signal %s", v23.LocalStop)
-	fmt.Fprintf(h.Stdin(), "close\n")
-	h.ExpectEOF()
-
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaults, false)
+	session.Expect("ready")
+	fmt.Fprintf(stdinPipe, "stop\n")
+	session.Expectf("received signal %s", v23.LocalStop)
+	fmt.Fprintf(stdinPipe, "close\n")
+	checkEOF(cmd, session, stdinPipe)
 }
 
-// TestCleanShutdownStopCustom verifies that sending a stop comamnd to a child
+// TestCleanShutdownStopCustom verifies that sending a stop command to a child
 // that handles stop command as part of a custom set of signals handled, causes
 // the child to shut down cleanly.
 func TestCleanShutdownStopCustom(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleCustomWithStop)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	h.Expectf("received signal %s", v23.LocalStop)
-	fmt.Fprintf(h.Stdin(), "close\n")
-	h.ExpectEOF()
+	cmd, session, stdinPipe := startFn(t, sh, handleCustomWithStop, false)
+	session.Expect("ready")
+	fmt.Fprintf(stdinPipe, "stop\n")
+	session.Expectf("received signal %s", v23.LocalStop)
+	fmt.Fprintf(stdinPipe, "close\n")
+	checkEOF(cmd, session, stdinPipe)
 }
 
-func testExitStatus(t *testing.T, h modules.Handle, code int) {
-	h.ExpectEOF()
-	_, file, line, _ := runtime.Caller(1)
-	file = filepath.Base(file)
-	if got, want := h.Shutdown(os.Stdout, os.Stderr), fmt.Errorf("exit status %d", code); got.Error() != want.Error() {
+func checkExitStatus(t *testing.T, cmd *v23test.Cmd, code int) {
+	if got, want := cmd.Err, fmt.Errorf("exit status %d", code); got.Error() != want.Error() {
+		_, file, line, _ := runtime.Caller(1)
+		file = filepath.Base(file)
 		t.Errorf("%s:%d: got %q, want %q", file, line, got, want)
 	}
 }
@@ -179,79 +172,79 @@
 // TestStopNoHandler verifies that sending a stop command to a child that does
 // not handle stop commands causes the child to exit immediately.
 func TestStopNoHandler(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleCustom)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	testExitStatus(t, h, v23.UnhandledStopExitCode)
+	cmd, session, stdinPipe := startFn(t, sh, handleCustom, true)
+	session.Expect("ready")
+	fmt.Fprintf(stdinPipe, "stop\n")
+	checkEOF(cmd, session, stdinPipe)
+	checkExitStatus(t, cmd, v23.UnhandledStopExitCode)
 }
 
 // TestDoubleSignal verifies that sending a succession of two signals to a child
 // that handles these signals by default causes the child to exit immediately
 // upon receiving the second signal.
 func TestDoubleSignal(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaults)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaults, true)
+	session.Expect("ready")
 	checkSignalIsDefault(t, syscall.SIGTERM)
-	syscall.Kill(h.Pid(), syscall.SIGTERM)
-	h.Expectf("received signal %s", syscall.SIGTERM)
+	syscall.Kill(cmd.Process().Pid, syscall.SIGTERM)
+	session.Expectf("received signal %s", syscall.SIGTERM)
 	checkSignalIsDefault(t, syscall.SIGINT)
-	syscall.Kill(h.Pid(), syscall.SIGINT)
-	testExitStatus(t, h, DoubleStopExitCode)
+	syscall.Kill(cmd.Process().Pid, syscall.SIGINT)
+	checkEOF(cmd, session, stdinPipe)
+	checkExitStatus(t, cmd, DoubleStopExitCode)
 }
 
 // TestSignalAndStop verifies that sending a signal followed by a stop command
 // to a child that handles these by default causes the child to exit immediately
 // upon receiving the stop command.
 func TestSignalAndStop(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaults)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaults, true)
+	session.Expect("ready")
 	checkSignalIsDefault(t, syscall.SIGTERM)
-	syscall.Kill(h.Pid(), syscall.SIGTERM)
-	h.Expectf("received signal %s", syscall.SIGTERM)
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	testExitStatus(t, h, DoubleStopExitCode)
+	syscall.Kill(cmd.Process().Pid, syscall.SIGTERM)
+	session.Expectf("received signal %s", syscall.SIGTERM)
+	fmt.Fprintf(stdinPipe, "stop\n")
+	checkEOF(cmd, session, stdinPipe)
+	checkExitStatus(t, cmd, DoubleStopExitCode)
 }
 
 // TestDoubleStop verifies that sending a succession of stop commands to a child
 // that handles stop commands by default causes the child to exit immediately
 // upon receiving the second stop command.
 func TestDoubleStop(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaults)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	h.Expectf("received signal %s", v23.LocalStop)
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	testExitStatus(t, h, DoubleStopExitCode)
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaults, true)
+	session.Expect("ready")
+	fmt.Fprintf(stdinPipe, "stop\n")
+	session.Expectf("received signal %s", v23.LocalStop)
+	fmt.Fprintf(stdinPipe, "stop\n")
+	checkEOF(cmd, session, stdinPipe)
+	checkExitStatus(t, cmd, DoubleStopExitCode)
 }
 
 // TestSendUnhandledSignal verifies that sending a signal that the child does
 // not handle causes the child to exit as per the signal being sent.
 func TestSendUnhandledSignal(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaults)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaults, true)
+	session.Expect("ready")
 	checkSignalIsNotDefault(t, syscall.SIGABRT)
-	syscall.Kill(h.Pid(), syscall.SIGABRT)
-	testExitStatus(t, h, 2)
+	syscall.Kill(cmd.Process().Pid, syscall.SIGABRT)
+	checkEOF(cmd, session, stdinPipe)
+	checkExitStatus(t, cmd, 2)
 }
 
 // TestDoubleSignalIgnoreChan verifies that, even if we ignore the channel that
@@ -259,54 +252,53 @@
 // process to exit (ensures that there is no dependency in ShutdownOnSignals
 // on having a goroutine read from the returned channel).
 func TestDoubleSignalIgnoreChan(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleDefaultsIgnoreChan)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
+	cmd, session, stdinPipe := startFn(t, sh, handleDefaultsIgnoreChan, true)
+	session.Expect("ready")
 	// Even if we ignore the channel that ShutdownOnSignals returns,
 	// sending two signals should still cause the process to exit.
 	checkSignalIsDefault(t, syscall.SIGTERM)
-	syscall.Kill(h.Pid(), syscall.SIGTERM)
+	syscall.Kill(cmd.Process().Pid, syscall.SIGTERM)
 	checkSignalIsDefault(t, syscall.SIGINT)
-	syscall.Kill(h.Pid(), syscall.SIGINT)
-	testExitStatus(t, h, DoubleStopExitCode)
+	syscall.Kill(cmd.Process().Pid, syscall.SIGINT)
+	checkEOF(cmd, session, stdinPipe)
+	checkExitStatus(t, cmd, DoubleStopExitCode)
 }
 
 // TestHandlerCustomSignal verifies that sending a non-default signal to a
 // server that listens for that signal causes the server to shut down cleanly.
 func TestHandlerCustomSignal(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
+	sh := v23test.NewShell(t, v23test.Opts{})
+	defer sh.Cleanup()
 
-	sh, h := newShell(t, ctx, handleCustom)
-	defer sh.Cleanup(os.Stderr, os.Stderr)
-	h.Expect("ready")
+	cmd, session, stdinPipe := startFn(t, sh, handleCustom, true)
+	session.Expect("ready")
 	checkSignalIsNotDefault(t, syscall.SIGABRT)
-	syscall.Kill(h.Pid(), syscall.SIGABRT)
-	h.Expectf("received signal %s", syscall.SIGABRT)
-	fmt.Fprintf(h.Stdin(), "stop\n")
-	h.ExpectEOF()
+	syscall.Kill(cmd.Process().Pid, syscall.SIGABRT)
+	session.Expectf("received signal %s", syscall.SIGABRT)
+	fmt.Fprintf(stdinPipe, "stop\n")
+	checkEOF(cmd, session, stdinPipe)
 }
 
 // TestHandlerCustomSignalWithStop verifies that sending a custom stop signal
 // to a server that listens for that signal causes the server to shut down
 // cleanly, even when a STOP signal is also among the handled signals.
 func TestHandlerCustomSignalWithStop(t *testing.T) {
-	rootCtx, shutdown := test.V23Init()
-	defer shutdown()
-
 	for _, signal := range []syscall.Signal{syscall.SIGABRT, syscall.SIGHUP} {
-		ctx, _ := vtrace.WithNewTrace(rootCtx)
-		sh, h := newShell(t, ctx, handleCustomWithStop)
-		h.Expect("ready")
-		checkSignalIsNotDefault(t, signal)
-		syscall.Kill(h.Pid(), signal)
-		h.Expectf("received signal %s", signal)
-		fmt.Fprintf(h.Stdin(), "close\n")
-		h.ExpectEOF()
-		sh.Cleanup(os.Stderr, os.Stderr)
+		func() {
+			sh := v23test.NewShell(t, v23test.Opts{})
+			defer sh.Cleanup()
+
+			cmd, session, stdinPipe := startFn(t, sh, handleCustomWithStop, true)
+			session.Expect("ready")
+			checkSignalIsNotDefault(t, signal)
+			syscall.Kill(cmd.Process().Pid, signal)
+			session.Expectf("received signal %s", signal)
+			fmt.Fprintf(stdinPipe, "close\n")
+			checkEOF(cmd, session, stdinPipe)
+		}()
 	}
 }
 
@@ -336,7 +328,26 @@
 
 }
 
+func modulesProgram(stdin io.Reader, stdout io.Writer, signals ...os.Signal) {
+	ctx, shutdown := test.V23Init()
+	closeStopLoop := make(chan struct{})
+	// obtain ac here since stopLoop may execute after shutdown is called below
+	ac := v23.GetAppCycle(ctx)
+	go stopLoop(func() { ac.Stop(ctx) }, stdin, closeStopLoop)
+	wait := ShutdownOnSignals(ctx, signals...)
+	fmt.Fprintf(stdout, "ready\n")
+	fmt.Fprintf(stdout, "received signal %s\n", <-wait)
+	shutdown()
+	<-closeStopLoop
+}
+
+var modulesHandleDefaults = modules.Register(func(env *modules.Env, args ...string) error {
+	modulesProgram(env.Stdin, env.Stdout)
+	return nil
+}, "modulesHandleDefaults")
+
 // TestCleanRemoteShutdown verifies that remote shutdown works correctly.
+// TODO(caprita): Rewrite this test to not use the modules package.
 func TestCleanRemoteShutdown(t *testing.T) {
 	ctx, shutdown := test.V23Init()
 	defer shutdown()
@@ -357,7 +368,7 @@
 	sh.SetConfigKey(mgmt.ParentNameConfigKey, configServiceName)
 	sh.SetConfigKey(mgmt.ProtocolConfigKey, "tcp")
 	sh.SetConfigKey(mgmt.AddressConfigKey, "127.0.0.1:0")
-	h, err := sh.Start(nil, handleDefaults)
+	h, err := sh.Start(nil, modulesHandleDefaults)
 	if err != nil {
 		t.Fatalf("unexpected error: %s", err)
 	}
@@ -379,3 +390,8 @@
 	fmt.Fprintf(h.Stdin(), "close\n")
 	h.ExpectEOF()
 }
+
+func TestMain(m *testing.M) {
+	modules.DispatchAndExitIfChild()
+	os.Exit(v23test.Run(m.Run))
+}
diff --git a/lib/signals/v23_internal_test.go b/lib/signals/v23_internal_test.go
deleted file mode 100644
index 9e784d5..0000000
--- a/lib/signals/v23_internal_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file was auto-generated via go generate.
-// DO NOT UPDATE MANUALLY
-
-package signals
-
-import (
-	"os"
-	"testing"
-
-	"v.io/x/ref/test/modules"
-)
-
-func TestMain(m *testing.M) {
-	modules.DispatchAndExitIfChild()
-	os.Exit(m.Run())
-}
diff --git a/lib/timekeeper/timekeeper.go b/lib/timekeeper/timekeeper.go
index 2fee233..0398624 100644
--- a/lib/timekeeper/timekeeper.go
+++ b/lib/timekeeper/timekeeper.go
@@ -19,6 +19,8 @@
 	// Sleep pauses the current goroutine for at least the duration d. A
 	// negative or zero duration causes Sleep to return immediately.
 	Sleep(d time.Duration)
+	// Current time.
+	Now() time.Time
 }
 
 // realTime is the default implementation of TimeKeeper, using the time package.
@@ -40,3 +42,8 @@
 func RealTime() TimeKeeper {
 	return &rt
 }
+
+// Now implements TimeKeeper.Now.
+func (t *realTime) Now() time.Time {
+	return time.Now()
+}
diff --git a/lib/v23test/v23test.go b/lib/v23test/v23test.go
index 2f00bfd..18c4a8c 100644
--- a/lib/v23test/v23test.go
+++ b/lib/v23test/v23test.go
@@ -37,6 +37,8 @@
 )
 
 // Cmd wraps gosh.Cmd and provides Vanadium-specific functionality.
+// TODO(sadovsky): Maybe add a Cmd.Session field (and update existing clients to
+// use it).
 type Cmd struct {
 	*gosh.Cmd
 }
@@ -81,11 +83,11 @@
 		}
 		// The "jiri test run vanadium-integration-test" command looks for test
 		// function names that start with "TestV23", and runs "go test" for only
-		// those Go packages containing at least one such test. That's how it avoids
-		// passing the -v23.tests flag to test packages for which the flag is not
+		// those packages containing at least one such test. That's how it avoids
+		// passing the -v23.tests flag to packages for which the flag is not
 		// registered.
-		// TODO(sadovsky): Share a common helper function for determining whether a
-		// given test function is an integration test.
+		// TODO(sadovsky): Share a common helper for determining whether a given
+		// test function is an integration test.
 		name, err := callerName()
 		if err != nil {
 			t.Fatal(err)
diff --git a/runtime/internal/rpc/test/full_test.go b/runtime/internal/rpc/test/full_test.go
index 8ebab4f..e859a4b 100644
--- a/runtime/internal/rpc/test/full_test.go
+++ b/runtime/internal/rpc/test/full_test.go
@@ -755,11 +755,13 @@
 	cctx := withPrincipal(t, ctx, "client")
 	bclient := v23.GetPrincipal(cctx).BlessingStore().Default()
 	cctx, err := v23.WithPrincipal(cctx,
-		&singleBlessingPrincipal{Principal: v23.GetPrincipal(cctx)})
+		vsecurity.MustForkPrincipal(
+			v23.GetPrincipal(cctx),
+			&singleBlessingStore{pk: v23.GetPrincipal(cctx).PublicKey()},
+			vsecurity.ImmutableBlessingRoots(v23.GetPrincipal(ctx).Roots())))
 	if err != nil {
 		t.Fatal(err)
 	}
-	v23.GetPrincipal(cctx).BlessingStore().SetDefault(bclient)
 
 	bvictim := v23.GetPrincipal(withPrincipal(t, ctx, "victim")).BlessingStore().Default()
 
diff --git a/runtime/internal/rpc/test/testutil_test.go b/runtime/internal/rpc/test/testutil_test.go
index 28c0325..cdadf55 100644
--- a/runtime/internal/rpc/test/testutil_test.go
+++ b/runtime/internal/rpc/test/testutil_test.go
@@ -61,6 +61,7 @@
 // shareable with any peer. It does not care about the public key that
 // blessing being set is bound to.
 type singleBlessingStore struct {
+	pk  security.PublicKey
 	b   security.Blessings
 	def security.Blessings
 }
@@ -79,8 +80,8 @@
 func (s *singleBlessingStore) Default() security.Blessings {
 	return s.def
 }
-func (*singleBlessingStore) PublicKey() security.PublicKey {
-	return nil
+func (s *singleBlessingStore) PublicKey() security.PublicKey {
+	return s.pk // This may be inconsistent with s.b & s.def, by design, for tests.
 }
 func (*singleBlessingStore) DebugString() string {
 	return ""
@@ -98,18 +99,6 @@
 	return security.Discharge{}
 }
 
-// singleBlessingPrincipal implements security.Principal. It is a wrapper over
-// a security.Principal that intercepts  all invocations on the
-// principal's BlessingStore and serves them via a singleBlessingStore.
-type singleBlessingPrincipal struct {
-	security.Principal
-	b singleBlessingStore
-}
-
-func (p *singleBlessingPrincipal) BlessingStore() security.BlessingStore {
-	return &p.b
-}
-
 func extractKey(t *testing.T, rootCtx *context.T, root *bcrypter.Root, blessing string) *bcrypter.PrivateKey {
 	key, err := root.Extract(rootCtx, blessing)
 	if err != nil {
diff --git a/runtime/internal/rt/mgmt_test.go b/runtime/internal/rt/mgmt_test.go
index 1fd1d8d..0a30932 100644
--- a/runtime/internal/rt/mgmt_test.go
+++ b/runtime/internal/rt/mgmt_test.go
@@ -25,8 +25,6 @@
 	"v.io/x/ref/test/modules"
 )
 
-//go:generate jiri test generate
-
 // TestBasic verifies that the basic plumbing works: LocalStop calls result in
 // stop messages being sent on the channel passed to WaitForStop.
 func TestBasic(t *testing.T) {
diff --git a/runtime/internal/rt/rt_test.go b/runtime/internal/rt/rt_test.go
index 3a673db..01bf52d 100644
--- a/runtime/internal/rt/rt_test.go
+++ b/runtime/internal/rt/rt_test.go
@@ -15,7 +15,6 @@
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/security"
-
 	"v.io/x/ref"
 	"v.io/x/ref/internal/logger"
 	vsecurity "v.io/x/ref/lib/security"
@@ -24,8 +23,6 @@
 	"v.io/x/ref/test/modules"
 )
 
-//go:generate jiri test generate
-
 func TestInit(t *testing.T) {
 	ref.EnvClearCredentials()
 	ctx, shutdown := v23.Init()
diff --git a/runtime/internal/rt/runtime_test.go b/runtime/internal/rt/runtime_test.go
index 6984ae3..ba80820 100644
--- a/runtime/internal/rt/runtime_test.go
+++ b/runtime/internal/rt/runtime_test.go
@@ -13,7 +13,6 @@
 	"v.io/v23/context"
 	"v.io/v23/naming"
 	"v.io/v23/rpc"
-
 	"v.io/x/ref/lib/stats"
 	_ "v.io/x/ref/runtime/factories/generic"
 	"v.io/x/ref/services/debug/debuglib"
diff --git a/runtime/internal/rt/shutdown_test.go b/runtime/internal/rt/shutdown_test.go
index 2a5177a..ca9f9dc 100644
--- a/runtime/internal/rt/shutdown_test.go
+++ b/runtime/internal/rt/shutdown_test.go
@@ -16,8 +16,6 @@
 	"v.io/x/ref/test/modules"
 )
 
-//go:generate jiri test generate
-
 var cstderr io.Writer
 
 func init() {
diff --git a/runtime/internal/rt/v23_test.go b/runtime/internal/rt/v23_test.go
index 79b162e..9b5e755 100644
--- a/runtime/internal/rt/v23_test.go
+++ b/runtime/internal/rt/v23_test.go
@@ -2,19 +2,17 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// This file was auto-generated via go generate.
-// DO NOT UPDATE MANUALLY
-
 package rt_test
 
 import (
 	"os"
 	"testing"
 
+	"v.io/x/ref/lib/v23test"
 	"v.io/x/ref/test/modules"
 )
 
 func TestMain(m *testing.M) {
 	modules.DispatchAndExitIfChild()
-	os.Exit(m.Run())
+	os.Exit(v23test.Run(m.Run))
 }
diff --git a/runtime/internal/vtrace/logger.go b/runtime/internal/vtrace/logger.go
index 450fab0..a0bf4dd 100644
--- a/runtime/internal/vtrace/logger.go
+++ b/runtime/internal/vtrace/logger.go
@@ -61,7 +61,7 @@
 }
 
 func (*VTraceLogger) VDepth(ctx *context.T, depth int, level int) bool {
-	return GetVTraceLevel(ctx) > level
+	return GetVTraceLevel(ctx) >= level
 }
 
 func (v *VTraceLogger) VIDepth(ctx *context.T, depth int, level int) context.ContextLogger {
diff --git a/runtime/internal/vtrace/vtrace.go b/runtime/internal/vtrace/vtrace.go
index a8e428b..638b204 100644
--- a/runtime/internal/vtrace/vtrace.go
+++ b/runtime/internal/vtrace/vtrace.go
@@ -121,6 +121,7 @@
 	if err != nil {
 		ctx.Error(err)
 	}
+	ctx = WithVTraceLevel(ctx, int(req.LogLevel))
 	return context.WithValue(ctx, spanKey, newSpan), newSpan
 }
 
@@ -160,6 +161,7 @@
 			SpanId:  span.id,
 			TraceId: span.trace,
 			Flags:   span.flags(),
+			LogLevel: int32(GetVTraceLevel(ctx)),
 		}
 	}
 	return vtrace.Request{}
diff --git a/runtime/internal/vtrace/vtrace_logging_test.go b/runtime/internal/vtrace/vtrace_logging_test.go
index 3961bac..ebaf600 100644
--- a/runtime/internal/vtrace/vtrace_logging_test.go
+++ b/runtime/internal/vtrace/vtrace_logging_test.go
@@ -8,9 +8,20 @@
 	"strings"
 	"testing"
 
+	"v.io/v23"
 	"v.io/v23/vtrace"
+	"v.io/v23/context"
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/test/testutil"
+	ivtrace "v.io/x/ref/runtime/internal/vtrace"
 )
 
+func (c *testServer) Log(ctx *context.T, _ rpc.ServerCall) error {
+	ctx.VI(1).Info("logging")
+	return nil
+}
+
 // TestLogging tests to make sure that ctx.Infof comments are added to the trace
 func TestLogging(t *testing.T) {
 	ctx, shutdown, _ := initForTest(t)
@@ -30,14 +41,82 @@
 	span.Finish()
 	record := vtrace.GetStore(ctx).TraceRecord(span.Trace())
 	messages := []string{
-		"vtrace_logging_test.go:22] logging from info",
-		"vtrace_logging_test.go:23] logging from infof",
-		"vtrace_logging_test.go:24] logging from info depth",
-		"vtrace_logging_test.go:26] logging from error",
-		"vtrace_logging_test.go:27] logging from errorf",
-		"vtrace_logging_test.go:28] logging from error depth",
+		"vtrace_logging_test.go:33] logging from info",
+		"vtrace_logging_test.go:34] logging from infof",
+		"vtrace_logging_test.go:35] logging from info depth",
+		"vtrace_logging_test.go:37] logging from error",
+		"vtrace_logging_test.go:38] logging from errorf",
+		"vtrace_logging_test.go:39] logging from error depth",
 	}
 	expectSequence(t, *record, []string{
 		"foo: " + strings.Join(messages, ", "),
 	})
-}
\ No newline at end of file
+}
+
+func startLoggingServer(ctx *context.T, idp *testutil.IDProvider)  error {
+	principal := testutil.NewPrincipal()
+	if err := idp.Bless(principal, "server"); err != nil {
+		return err
+	}
+	_, _, err := makeTestServer(ctx, principal, "logger")
+	if err != nil {
+		return err
+	}
+	// Make sure the server is mounted to avoid any retries in when StartCall
+	// is invoked in runCallChain which complicate the span comparisons.
+	verifyMount(ctx, "logger")
+	return nil
+}
+
+func runLoggingCall(ctx *context.T) (*vtrace.TraceRecord, error) {
+	ctx, span := vtrace.WithNewSpan(ctx, "logging")
+	call, err := v23.GetClient(ctx).StartCall(ctx, "logger", "Log", nil)
+	if err != nil {
+		return nil, err
+	}
+	if err := call.Finish(); err != nil {
+		return nil, err
+	}
+	span.Finish()
+
+	return vtrace.GetStore(ctx).TraceRecord(span.Trace()), nil
+}
+
+func TestVIRPCWithNoLogging(t *testing.T) {
+	ctx, shutdown, idp := initForTest(t)
+	defer shutdown()
+
+	if err := startLoggingServer(ctx, idp); err != nil {
+		t.Fatalf("failed to start server: %v", err)
+	}
+	vtrace.ForceCollect(ctx)
+	record, err := runLoggingCall(ctx)
+
+	if err != nil {
+		t.Fatalf("logging call failed: %v", err)
+	}
+	expectSequence(t, *record, []string{
+		"logging",
+		"<rpc.Client>\"logger\".Log",
+		"\"\".Log",
+	})
+}
+
+func TestVIRPCWithLogging(t *testing.T) {
+	ctx, shutdown, idp := initForTest(t)
+	defer shutdown()
+
+	if err := startLoggingServer(ctx, idp); err != nil {
+		t.Fatalf("failed to start server: %v", err)
+	}
+	vtrace.ForceCollect(ctx)
+	ctx = ivtrace.WithVTraceLevel(ctx, 1)
+	record, err := runLoggingCall(ctx)
+
+	if err != nil {
+		t.Fatalf("logging call failed: %v", err)
+	}
+	expectSequence(t, *record, []string{
+		"\"\".Log: vtrace_logging_test.go:21] logging",
+	})
+}
diff --git a/services/agent/agentd/doc.go b/services/agent/agentd/doc.go
index 75e8a99..2570b1b 100644
--- a/services/agent/agentd/doc.go
+++ b/services/agent/agentd/doc.go
@@ -7,11 +7,16 @@
 
 /*
 Command agentd runs the security agent daemon, which holds a private key in
-memory and makes it available to a subprocess.
+memory and makes it available to other processes.
 
-Loads the private key specified in privatekey.pem in the specified credentials
-directory into memory, then starts the specified command with access to the
-private key via the agent protocol instead of directly reading from disk.
+Loads the credentials from the specified directory into memory. Then optionally
+starts a command with access to these credentials via agent protocol.
+
+Other processes can access the agent credentials when V23_AGENT_PATH is set to
+<credential dir>/agent.sock.
+
+Example: $ agentd --v23.credentials=$HOME/.credentials $
+V23_AGENT_PATH=$HOME/.credentials/agent.sock principal dump
 
 Usage:
    agentd [flags] command [command_args...]
diff --git a/services/agent/agentd/main.go b/services/agent/agentd/main.go
index 584baae..51d980e 100644
--- a/services/agent/agentd/main.go
+++ b/services/agent/agentd/main.go
@@ -65,15 +65,20 @@
 var cmdAgentD = &cmdline.Command{
 	Runner: cmdline.RunnerFunc(runAgentD),
 	Name:   "agentd",
-	Short:  "Holds a private key in memory and makes it available to a subprocess",
+	Short:  "Holds a private key in memory and makes it available to other processes",
 	Long: `
 Command agentd runs the security agent daemon, which holds a private key in
-memory and makes it available to a subprocess.
+memory and makes it available to other processes.
 
-Loads the private key specified in privatekey.pem in the specified
-credentials directory into memory, then starts the specified command
-with access to the private key via the agent protocol instead of
-directly reading from disk.
+Loads the credentials from the specified directory into memory. Then optionally
+starts a command with access to these credentials via agent protocol.
+
+Other processes can access the agent credentials when V23_AGENT_PATH is set to
+<credential dir>/agent.sock.
+
+Example:
+$ agentd --v23.credentials=$HOME/.credentials
+$ V23_AGENT_PATH=$HOME/.credentials/agent.sock principal dump
 `,
 	ArgsName: "command [command_args...]",
 	ArgsLong: `
@@ -82,9 +87,6 @@
 }
 
 func runAgentD(env *cmdline.Env, args []string) error {
-	if len(args) < 1 {
-		return env.UsageErrorf("Need at least one argument.")
-	}
 	var restartOpts restartOptions
 	if err := restartOpts.parse(); err != nil {
 		return env.UsageErrorf("%v", err)
@@ -137,6 +139,11 @@
 		return err
 	}
 
+	if len(args) == 0 {
+		<-vsignals.ShutdownOnSignals(nil)
+		return nil
+	}
+
 	// Clear out the environment variable before starting the child.
 	if err = ref.EnvClearCredentials(); err != nil {
 		return fmt.Errorf("ref.EnvClearCredentials: %v", err)
diff --git a/services/agent/pod_agentd/main.go b/services/agent/pod_agentd/main.go
index 569bd4b..b681abf 100644
--- a/services/agent/pod_agentd/main.go
+++ b/services/agent/pod_agentd/main.go
@@ -91,7 +91,10 @@
 	// Run the server.
 	i := ipc.NewIPC()
 	defer i.Close()
-	if err = server.ServeAgent(i, lsecurity.NewImmutablePrincipal(p)); err != nil {
+	if err = server.ServeAgent(i, lsecurity.MustForkPrincipal(
+		p,
+		lsecurity.ImmutableBlessingStore(p.BlessingStore()),
+		lsecurity.ImmutableBlessingRoots(p.Roots()))); err != nil {
 		return err
 	}
 	if _, err := os.Stat(socketPath); err == nil {
diff --git a/services/debug/debug/doc.go b/services/debug/debug/doc.go
index d1c9467..d1aab46 100644
--- a/services/debug/debug/doc.go
+++ b/services/debug/debug/doc.go
@@ -234,7 +234,8 @@
 
 All the [passthru args] are passed to the pprof tool directly, e.g.
 
-$ debug pprof run a/b/c heap --text $ debug pprof run a/b/c profile -gv
+  $ debug pprof run a/b/c/__debug/pprof heap --text
+  $ debug pprof run a/b/c/__debug/pprof profile -gv
 
 The debug pprof run flags are:
  -pprofcmd=jiri go tool pprof
diff --git a/services/debug/debug/impl.go b/services/debug/debug/impl.go
index bce7e3e..664b3db 100644
--- a/services/debug/debug/impl.go
+++ b/services/debug/debug/impl.go
@@ -10,6 +10,8 @@
 import (
 	"encoding/json"
 	"fmt"
+	"net"
+	"net/http"
 	"os"
 	"os/exec"
 	"regexp"
@@ -517,8 +519,8 @@
 
 All the [passthru args] are passed to the pprof tool directly, e.g.
 
-$ debug pprof run a/b/c heap --text
-$ debug pprof run a/b/c profile -gv
+  $ debug pprof run a/b/c/__debug/pprof heap --text
+  $ debug pprof run a/b/c/__debug/pprof profile -gv
 `,
 }
 
@@ -531,19 +533,19 @@
 		return showPProfProfiles(ctx, env, name)
 	}
 	profile := args[1]
-	listener, err := pproflib.StartProxy(ctx, name)
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+	addr, err := startPprofProxyHTTPServer(ctx, name)
 	if err != nil {
 		return err
 	}
-	defer listener.Close()
-
 	// Construct the pprof command line:
 	// <pprofCmd> http://<proxyaddr>/pprof/<profile> [pprof flags]
 	pargs := []string{pprofCmd} // pprofCmd is purposely not escaped.
 	for i := 2; i < len(args); i++ {
 		pargs = append(pargs, shellEscape(args[i]))
 	}
-	pargs = append(pargs, shellEscape(fmt.Sprintf("http://%s/pprof/%s", listener.Addr(), profile)))
+	pargs = append(pargs, shellEscape(fmt.Sprintf("http://%s/pprof/%s", addr, profile)))
 	pcmd := strings.Join(pargs, " ")
 	fmt.Fprintf(env.Stdout, "Running: %s\n", pcmd)
 	c := exec.Command("sh", "-c", pcmd)
@@ -590,15 +592,12 @@
 	if want, got := 1, len(args); got != want {
 		return env.UsageErrorf("proxy: incorrect number of arguments, got %d, want %d", got, want)
 	}
-	name := args[0]
-	listener, err := pproflib.StartProxy(ctx, name)
+	addr, err := startPprofProxyHTTPServer(ctx, args[0])
 	if err != nil {
 		return err
 	}
-	defer listener.Close()
-
 	fmt.Fprintln(env.Stdout)
-	fmt.Fprintf(env.Stdout, "The pprof proxy is listening at http://%s/pprof\n", listener.Addr())
+	fmt.Fprintf(env.Stdout, "The pprof proxy is listening at http://%s/pprof\n", addr)
 	fmt.Fprintln(env.Stdout)
 	fmt.Fprintln(env.Stdout, "Hit CTRL-C to exit")
 
@@ -634,3 +633,17 @@
 		cmdBrowse,
 	},
 }
+
+func startPprofProxyHTTPServer(ctx *context.T, name string) (string, error) {
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		return "", err
+	}
+	http.Handle("/", pproflib.PprofProxy(ctx, "", name))
+	go http.Serve(ln, nil)
+	go func() {
+		<-ctx.Done()
+		ln.Close()
+	}()
+	return ln.Addr().String(), nil
+}
diff --git a/services/groups/groupsd/groupsd_v23_test.go b/services/groups/groupsd/groupsd_v23_test.go
index 0bacf23..89f11b4 100644
--- a/services/groups/groupsd/groupsd_v23_test.go
+++ b/services/groups/groupsd/groupsd_v23_test.go
@@ -4,6 +4,7 @@
 
 package main_test
 
+// FIXME: Why doesn't this need a runtime?
 import (
 	"encoding/json"
 	"fmt"
diff --git a/services/identity/internal/handlers/bless.go b/services/identity/internal/handlers/bless.go
index 45142ac..8d4a2c4 100644
--- a/services/identity/internal/handlers/bless.go
+++ b/services/identity/internal/handlers/bless.go
@@ -142,7 +142,7 @@
 func (a *accessTokenBlesser) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	remoteKey, err := a.remotePublicKey(r)
 	if err != nil {
-		a.ctx.Info("Failed to decode public key [%v] for request %#v", err, r)
+		a.ctx.Infof("Failed to decode public key [%v] for request %#v", err, r)
 		util.HTTPServerError(w, fmt.Errorf("failed to decode public key: %v", err))
 		return
 	}
@@ -152,21 +152,21 @@
 
 	caveats, err := a.blessingCaveats(r, p)
 	if err != nil {
-		a.ctx.Info("Failed to constuct caveats for blessing [%v] for request %#v", err, r)
+		a.ctx.Infof("Failed to constuct caveats for blessing [%v] for request %#v", err, r)
 		util.HTTPServerError(w, fmt.Errorf("failed to construct caveats for blessing: %v", err))
 		return
 	}
 
 	extension, err := a.blessingExtension(r)
 	if err != nil {
-		a.ctx.Info("Failed to process access token [%v] for request %#v", err, r)
+		a.ctx.Infof("Failed to process access token [%v] for request %#v", err, r)
 		util.HTTPServerError(w, fmt.Errorf("failed to process access token: %v", err))
 		return
 	}
 
 	blessings, err := p.Bless(remoteKey, with, extension, caveats[0], caveats[1:]...)
 	if err != nil {
-		a.ctx.Info("Failed to Bless [%v] for request %#v", err, r)
+		a.ctx.Infof("Failed to Bless [%v] for request %#v", err, r)
 		util.HTTPServerError(w, fmt.Errorf("failed to Bless: %v", err))
 		return
 	}
@@ -179,7 +179,7 @@
 	case jsonFormat:
 		encodedBlessings, err := a.encodeBlessingsJson(blessings)
 		if err != nil {
-			a.ctx.Info("Failed to encode blessings [%v] for request %#v", err, r)
+			a.ctx.Infof("Failed to encode blessings [%v] for request %#v", err, r)
 			util.HTTPServerError(w, fmt.Errorf("failed to encode blessings in format %v: %v", outputFormat, err))
 			return
 		}
@@ -188,14 +188,14 @@
 	case base64VomFormat:
 		encodedBlessings, err := a.encodeBlessingsVom(blessings)
 		if err != nil {
-			a.ctx.Info("Failed to encode blessings [%v] for request %#v", err, r)
+			a.ctx.Infof("Failed to encode blessings [%v] for request %#v", err, r)
 			util.HTTPServerError(w, fmt.Errorf("failed to encode blessings in format %v: %v", outputFormat, err))
 			return
 		}
 		w.Header().Set("Content-Type", "application/text")
 		w.Write([]byte(encodedBlessings))
 	default:
-		a.ctx.Info("Unrecognized output format [%v] in request %#v", outputFormat, r)
+		a.ctx.Infof("Unrecognized output format [%v] in request %#v", outputFormat, r)
 		util.HTTPServerError(w, fmt.Errorf("unrecognized output format [%v] in request. Allowed formats are [%v, %v]", outputFormat, base64VomFormat, jsonFormat))
 		return
 	}
diff --git a/services/internal/pproflib/proxy.go b/services/internal/pproflib/proxy.go
index 4226395..967799d 100644
--- a/services/internal/pproflib/proxy.go
+++ b/services/internal/pproflib/proxy.go
@@ -15,46 +15,50 @@
 	"fmt"
 	"html/template"
 	"io"
-	"net"
 	"net/http"
 	"strconv"
 	"strings"
-	"time"
 
 	"v.io/v23/context"
 	"v.io/v23/services/pprof"
 	"v.io/v23/vtrace"
 )
 
-// StartProxy starts the pprof proxy to a remote pprof object.
-func StartProxy(ctx *context.T, name string) (net.Listener, error) {
-	listener, err := net.Listen("tcp", "127.0.0.1:0")
-	if err != nil {
-		return nil, err
+// PprofProxy returns an http.Handler implements to serve profile information
+// of a remote process with the vanadium object name 'name'.
+//
+// The handler assumes that it is serving paths under "pathPrefix".
+func PprofProxy(ctx *context.T, pathPrefix, name string) http.Handler {
+	return &proxy{
+		ctx:        ctx,
+		name:       name,
+		pathPrefix: pathPrefix,
 	}
-	p := &proxy{ctx, name}
-	mux := http.NewServeMux()
-	mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-		w.Header().Add("Location", "/pprof/")
-		w.WriteHeader(http.StatusFound)
-	})
-	mux.HandleFunc("/pprof/", p.index)
-	mux.HandleFunc("/pprof/cmdline", p.cmdLine)
-	mux.HandleFunc("/pprof/profile", p.profile)
-	mux.HandleFunc("/pprof/symbol", p.symbol)
-
-	server := &http.Server{
-		Handler:      mux,
-		ReadTimeout:  time.Hour,
-		WriteTimeout: time.Hour,
-	}
-	go server.Serve(listener)
-	return listener, nil
 }
 
 type proxy struct {
-	ctx  *context.T
-	name string
+	ctx        *context.T
+	name       string
+	pathPrefix string
+}
+
+func (p *proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	switch path := strings.TrimPrefix(strings.TrimPrefix(r.URL.Path, p.pathPrefix), "/"); path {
+	case "":
+		http.Redirect(w, r, r.URL.Path+"/pprof/", http.StatusTemporaryRedirect)
+	case "pprof/cmdline":
+		p.cmdLine(w, r)
+	case "pprof/profile":
+		p.profile(w, r)
+	case "pprof/symbol":
+		p.symbol(w, r)
+	default:
+		if strings.HasPrefix(path, "pprof/") || path == "pprof" {
+			p.index(w, r)
+		} else {
+			http.NotFound(w, r)
+		}
+	}
 }
 
 func replyUnavailable(w http.ResponseWriter, err error) {
diff --git a/services/internal/pproflib/proxy_test.go b/services/internal/pproflib/proxy_test.go
index 5861a64..273c8e0 100644
--- a/services/internal/pproflib/proxy_test.go
+++ b/services/internal/pproflib/proxy_test.go
@@ -7,6 +7,7 @@
 import (
 	"fmt"
 	"io/ioutil"
+	"net"
 	"net/http"
 	"testing"
 
@@ -35,37 +36,37 @@
 		t.Fatalf("failed to start server: %v", err)
 	}
 	endpoints := s.Status().Endpoints
-	l, err := pproflib.StartProxy(ctx, endpoints[0].Name())
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
 	if err != nil {
-		t.Fatalf("failed to start proxy: %v", err)
+		t.Fatal(err)
 	}
-	defer l.Close()
-
+	defer ln.Close()
+	go http.Serve(ln, pproflib.PprofProxy(ctx, "/myprefix", endpoints[0].Name()))
 	testcases := []string{
-		"/pprof/",
-		"/pprof/cmdline",
-		"/pprof/profile?seconds=1",
-		"/pprof/heap",
-		"/pprof/goroutine",
+		"/myprefix/pprof/",
+		"/myprefix/pprof/cmdline",
+		"/myprefix/pprof/profile?seconds=1",
+		"/myprefix/pprof/heap",
+		"/myprefix/pprof/goroutine",
 		fmt.Sprintf("/pprof/symbol?%p", TestPProfProxy),
 	}
 	for _, c := range testcases {
-		url := "http://" + l.Addr().String() + c
-		t.Log(url)
+		url := fmt.Sprintf("http://%s%s", ln.Addr(), c)
 		resp, err := http.Get(url)
 		if err != nil {
-			t.Fatalf("http.Get failed: %v", err)
+			t.Fatalf("%v: http.Get failed: %v", url, err)
 		}
 		if resp.StatusCode != 200 {
-			t.Errorf("unexpected status code. Got %d, want 200", resp.StatusCode)
+			t.Errorf("%v: unexpected status code. Got %d, want 200", url, resp.StatusCode)
 		}
 		body, err := ioutil.ReadAll(resp.Body)
 		if err != nil {
-			t.Fatalf("ReadAll failed: %v", err)
+			t.Errorf("%v: ReadAll failed: %v", url, err)
+			continue
 		}
 		resp.Body.Close()
 		if len(body) == 0 {
-			t.Errorf("unexpected empty body")
+			t.Errorf("%v: unexpected empty body", url)
 		}
 	}
 }
diff --git a/services/mounttable/mounttablelib/mounttable.go b/services/mounttable/mounttablelib/mounttable.go
index 71055a5..49fb5c4 100644
--- a/services/mounttable/mounttablelib/mounttable.go
+++ b/services/mounttable/mounttablelib/mounttable.go
@@ -23,6 +23,7 @@
 	"v.io/v23/services/mounttable"
 	"v.io/v23/verror"
 	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/lib/timekeeper"
 )
 
 const pkgPath = "v.io/x/ref/services/mounttable/mounttablelib"
@@ -72,6 +73,7 @@
 	serverCounter      *stats.Integer
 	perUserNodeCounter *stats.Map
 	maxNodesPerUser    int64
+	slm                *serverListManager
 }
 
 var _ rpc.Dispatcher = (*mountTable)(nil)
@@ -132,12 +134,16 @@
 //
 // statsPrefix is the prefix for for exported statistics objects.
 func NewMountTableDispatcher(ctx *context.T, permsFile, persistDir, statsPrefix string) (rpc.Dispatcher, error) {
+	return NewMountTableDispatcherWithClock(ctx, permsFile, persistDir, statsPrefix, timekeeper.RealTime())
+}
+func NewMountTableDispatcherWithClock(ctx *context.T, permsFile, persistDir, statsPrefix string, clock timekeeper.TimeKeeper) (rpc.Dispatcher, error) {
 	mt := &mountTable{
 		root:               new(node),
 		nodeCounter:        stats.NewInteger(naming.Join(statsPrefix, "num-nodes")),
 		serverCounter:      stats.NewInteger(naming.Join(statsPrefix, "num-mounted-servers")),
 		perUserNodeCounter: stats.NewMap(naming.Join(statsPrefix, "num-nodes-per-user")),
 		maxNodesPerUser:    defaultMaxNodesPerUser,
+		slm:                newServerListManager(clock),
 	}
 	mt.root.parent = mt.newNode() // just for its lock
 	if persistDir != "" {
@@ -551,7 +557,7 @@
 		n.mount = nil
 	}
 	if n.mount == nil {
-		n.mount = &mount{servers: newServerList(), mt: wantMT, leaf: wantLeaf}
+		n.mount = &mount{servers: mt.slm.newServerList(), mt: wantMT, leaf: wantLeaf}
 	}
 	n.mount.servers.add(server, time.Duration(ttlsecs)*time.Second)
 	mt.serverCounter.Incr(numServers(n) - nServersBefore)
diff --git a/services/mounttable/mounttablelib/mounttable_test.go b/services/mounttable/mounttablelib/mounttable_test.go
index 9fb4300..8cd9e99 100644
--- a/services/mounttable/mounttablelib/mounttable_test.go
+++ b/services/mounttable/mounttablelib/mounttable_test.go
@@ -32,6 +32,7 @@
 	"v.io/x/ref/services/mounttable/mounttablelib"
 	"v.io/x/ref/test"
 	"v.io/x/ref/test/testutil"
+	"v.io/x/ref/test/timekeeper"
 )
 
 // Simulate different processes with different runtimes.
@@ -181,12 +182,13 @@
 	}
 }
 
-func newMT(t *testing.T, permsFile, persistDir, statsDir string, rootCtx *context.T) (func() error, string) {
+func newMT(t *testing.T, permsFile, persistDir, statsDir string, rootCtx *context.T) (func() error, string, timekeeper.ManualTime) {
 	reservedDisp := debuglib.NewDispatcher(nil)
 	ctx := v23.WithReservedNameDispatcher(rootCtx, reservedDisp)
 
 	// Add mount table service.
-	mt, err := mounttablelib.NewMountTableDispatcher(ctx, permsFile, persistDir, statsDir)
+	clock := timekeeper.NewManualTime()
+	mt, err := mounttablelib.NewMountTableDispatcherWithClock(ctx, permsFile, persistDir, statsDir, clock)
 	if err != nil {
 		boom(t, "mounttablelib.NewMountTableDispatcher: %v", err)
 	}
@@ -199,7 +201,7 @@
 
 	estr := server.Status().Endpoints[0].String()
 	t.Logf("endpoint %s", estr)
-	return server.Stop, estr
+	return server.Stop, estr, clock
 }
 
 func newCollection(t *testing.T, rootCtx *context.T) (func() error, string) {
@@ -218,7 +220,7 @@
 	rootCtx, aliceCtx, bobCtx, shutdown := initTest()
 	defer shutdown()
 
-	stop, mtAddr := newMT(t, "testdata/test.perms", "", "testMountTable", rootCtx)
+	stop, mtAddr, clock := newMT(t, "testdata/test.perms", "", "testMountTable", rootCtx)
 	defer stop()
 	stop, collectionAddr := newCollection(t, rootCtx)
 	defer stop()
@@ -307,11 +309,9 @@
 
 	// Try timing out a mount.
 	rootCtx.Info("Try timing out a mount.")
-	ft := mounttablelib.NewFakeTimeClock()
-	mounttablelib.SetServerListClock(ft)
 	doMount(t, rootCtx, mtAddr, "stuffWithTTL", collectionName, true)
 	checkContents(t, rootCtx, naming.JoinAddressName(mtAddr, "stuffWithTTL/the/rain"), "the rain", true)
-	ft.Advance(time.Duration(ttlSecs+4) * time.Second)
+	clock.AdvanceTime(time.Duration(ttlSecs+4) * time.Second)
 	checkContents(t, rootCtx, naming.JoinAddressName(mtAddr, "stuffWithTTL/the/rain"), "the rain", false)
 
 	// Test unauthorized mount.
@@ -392,7 +392,7 @@
 	rootCtx, shutdown := test.V23InitWithMounttable()
 	defer shutdown()
 
-	stop, estr := newMT(t, "", "", "testGlob", rootCtx)
+	stop, estr, _ := newMT(t, "", "", "testGlob", rootCtx)
 	defer stop()
 
 	// set up a mount space
@@ -507,7 +507,7 @@
 	rootCtx, aliceCtx, bobCtx, shutdown := initTest()
 	defer shutdown()
 
-	stop, estr := newMT(t, "testdata/test.perms", "", "testAccessListTemplate", rootCtx)
+	stop, estr, _ := newMT(t, "testdata/test.perms", "", "testAccessListTemplate", rootCtx)
 	defer stop()
 	fakeServer := naming.JoinAddressName(estr, "quux")
 
@@ -580,7 +580,7 @@
 	rootCtx, aliceCtx, bobCtx, shutdown := initTest()
 	defer shutdown()
 
-	stop, estr := newMT(t, "testdata/test.perms", "", "testGlobAccessLists", rootCtx)
+	stop, estr, _ := newMT(t, "testdata/test.perms", "", "testGlobAccessLists", rootCtx)
 	defer stop()
 
 	// set up a mount space
@@ -613,7 +613,7 @@
 	rootCtx, shutdown := test.V23InitWithMounttable()
 	defer shutdown()
 
-	stop, estr := newMT(t, "", "", "testCleanup", rootCtx)
+	stop, estr, _ := newMT(t, "", "", "testCleanup", rootCtx)
 	defer stop()
 
 	// Set up one mount.
@@ -641,7 +641,7 @@
 	rootCtx, aliceCtx, bobCtx, shutdown := initTest()
 	defer shutdown()
 
-	stop, estr := newMT(t, "testdata/test.perms", "", "testDelete", rootCtx)
+	stop, estr, _ := newMT(t, "testdata/test.perms", "", "testDelete", rootCtx)
 	defer stop()
 
 	// set up a mount space
@@ -674,7 +674,7 @@
 	rootCtx, shutdown := test.V23InitWithMounttable()
 	defer shutdown()
 
-	stop, estr := newMT(t, "", "", "testerverFormat", rootCtx)
+	stop, estr, _ := newMT(t, "", "", "testerverFormat", rootCtx)
 	defer stop()
 
 	doMount(t, rootCtx, estr, "endpoint", naming.JoinAddressName(estr, "life/on/the/mississippi"), true)
@@ -688,28 +688,26 @@
 	rootCtx, shutdown := test.V23InitWithMounttable()
 	defer shutdown()
 
-	stop, estr := newMT(t, "", "", "testExpiry", rootCtx)
+	stop, estr, clock := newMT(t, "", "", "testExpiry", rootCtx)
 	defer stop()
 	stop, collectionAddr := newCollection(t, rootCtx)
 	defer stop()
 
 	collectionName := naming.JoinAddressName(collectionAddr, "collection")
 
-	ft := mounttablelib.NewFakeTimeClock()
-	mounttablelib.SetServerListClock(ft)
 	doMount(t, rootCtx, estr, "a1/b1", collectionName, true)
 	doMount(t, rootCtx, estr, "a1/b2", collectionName, true)
 	doMount(t, rootCtx, estr, "a2/b1", collectionName, true)
 	doMount(t, rootCtx, estr, "a2/b2/c", collectionName, true)
 
 	checkMatch(t, []string{"a1/b1", "a2/b1"}, doGlob(t, rootCtx, estr, "", "*/b1/..."))
-	ft.Advance(time.Duration(ttlSecs/2) * time.Second)
+	clock.AdvanceTime(time.Duration(ttlSecs/2) * time.Second)
 	checkMatch(t, []string{"a1/b1", "a2/b1"}, doGlob(t, rootCtx, estr, "", "*/b1/..."))
 	checkMatch(t, []string{"c"}, doGlob(t, rootCtx, estr, "a2/b2", "*"))
 	// Refresh only a1/b1.  All the other mounts will expire upon the next
 	// ft advance.
 	doMount(t, rootCtx, estr, "a1/b1", collectionName, true)
-	ft.Advance(time.Duration(ttlSecs/2+4) * time.Second)
+	clock.AdvanceTime(time.Duration(ttlSecs/2+4) * time.Second)
 	checkMatch(t, []string{"a1", "a1/b1"}, doGlob(t, rootCtx, estr, "", "*/..."))
 	checkMatch(t, []string{"a1/b1"}, doGlob(t, rootCtx, estr, "", "*/b1/..."))
 }
@@ -755,10 +753,7 @@
 	rootCtx, shutdown := test.V23InitWithMounttable()
 	defer shutdown()
 
-	ft := mounttablelib.NewFakeTimeClock()
-	mounttablelib.SetServerListClock(ft)
-
-	stop, estr := newMT(t, "", "", "mounttable", rootCtx)
+	stop, estr, clock := newMT(t, "", "", "mounttable", rootCtx)
 	defer stop()
 
 	// Test flat tree
@@ -855,7 +850,7 @@
 
 	// Test expired mounts
 	// "1/2/3/4/5" is still mounted from earlier.
-	ft.Advance(time.Duration(ttlSecs+4) * time.Second)
+	clock.AdvanceTime(time.Duration(ttlSecs+4) * time.Second)
 	if _, err := resolve(rootCtx, naming.JoinAddressName(estr, "1/2/3/4/5")); err == nil {
 		t.Errorf("Expected failure. Got success")
 	}
@@ -868,7 +863,7 @@
 	rootCtx, _, _, shutdown := initTest()
 	defer shutdown()
 
-	stop, estr := newMT(t, "testdata/intermediate.perms", "", "TestIntermediateNodesCreatedFromConfig", rootCtx)
+	stop, estr, _ := newMT(t, "testdata/intermediate.perms", "", "TestIntermediateNodesCreatedFromConfig", rootCtx)
 	defer stop()
 
 	// x and x/y should have the same permissions at the root.
diff --git a/services/mounttable/mounttablelib/persist_test.go b/services/mounttable/mounttablelib/persist_test.go
index 74e6e1e..b7de026 100644
--- a/services/mounttable/mounttablelib/persist_test.go
+++ b/services/mounttable/mounttablelib/persist_test.go
@@ -25,7 +25,7 @@
 	}
 	defer os.RemoveAll(td)
 	fmt.Printf("temp persist dir %s\n", td)
-	stop, mtAddr := newMT(t, "", td, "testPersistence", rootCtx)
+	stop, mtAddr, _ := newMT(t, "", td, "testPersistence", rootCtx)
 
 	perms1 := access.Permissions{
 		"Read":    access.AccessList{In: []security.BlessingPattern{security.AllPrincipals}},
@@ -53,7 +53,7 @@
 	stop()
 
 	// Restart with the persisted data.
-	stop, mtAddr = newMT(t, "", td, "testPersistence", rootCtx)
+	stop, mtAddr, _ = newMT(t, "", td, "testPersistence", rootCtx)
 
 	// Add root as Admin to each of the perms since the mounttable itself will.
 	perms1["Admin"] = access.AccessList{In: []security.BlessingPattern{"root"}}
diff --git a/services/mounttable/mounttablelib/serverlist.go b/services/mounttable/mounttablelib/serverlist.go
index d5b8084..88aefff 100644
--- a/services/mounttable/mounttablelib/serverlist.go
+++ b/services/mounttable/mounttablelib/serverlist.go
@@ -11,21 +11,14 @@
 
 	"v.io/v23/naming"
 	vdltime "v.io/v23/vdlroot/time"
+
+	"v.io/x/ref/lib/timekeeper"
 )
 
-type serverListClock interface {
-	Now() time.Time
+type serverListManager struct {
+	clock timekeeper.TimeKeeper
 }
 
-type realTime bool
-
-func (t realTime) Now() time.Time {
-	return time.Now()
-}
-
-// TODO(caprita): Replace this with the timekeeper library.
-var slc = serverListClock(realTime(true))
-
 // server maintains the state of a single server.  Unless expires is refreshed before the
 // time is reached, the entry will be removed.
 type server struct {
@@ -36,12 +29,23 @@
 // serverList represents an ordered list of servers.
 type serverList struct {
 	sync.Mutex
+	m *serverListManager
 	l *list.List // contains entries of type *server
 }
 
+// newServerListManager starts a serverlist manager with a particular clock.
+func newServerListManager(clock timekeeper.TimeKeeper) *serverListManager {
+	return &serverListManager{clock: clock}
+}
+
+// SetServerListClock does what it says.
+func (slm *serverListManager) setClock(clock timekeeper.TimeKeeper) {
+	slm.clock = clock
+}
+
 // newServerList creates a synchronized list of servers.
-func newServerList() *serverList {
-	return &serverList{l: list.New()}
+func (slm *serverListManager) newServerList() *serverList {
+	return &serverList{l: list.New(), m: slm}
 }
 
 func (sl *serverList) len() int {
@@ -60,7 +64,7 @@
 // update the expiration time and move to the front of the list.  That
 // way the most recently refreshed is always first.
 func (sl *serverList) add(oa string, ttl time.Duration) {
-	expires := slc.Now().Add(ttl)
+	expires := sl.m.clock.Now().Add(ttl)
 	sl.Lock()
 	defer sl.Unlock()
 	for e := sl.l.Front(); e != nil; e = e.Next() {
@@ -97,7 +101,7 @@
 	sl.Lock()
 	defer sl.Unlock()
 
-	now := slc.Now()
+	now := sl.m.clock.Now()
 	var next *list.Element
 	removed := 0
 	for e := sl.l.Front(); e != nil; e = next {
diff --git a/services/mounttable/mounttablelib/serverlist_test.go b/services/mounttable/mounttablelib/serverlist_test.go
index 3f8c56a..deda538 100644
--- a/services/mounttable/mounttablelib/serverlist_test.go
+++ b/services/mounttable/mounttablelib/serverlist_test.go
@@ -11,6 +11,7 @@
 
 	"v.io/v23/naming"
 	vdltime "v.io/v23/vdlroot/time"
+	"v.io/x/ref/test/timekeeper"
 )
 
 func TestServerList(t *testing.T) {
@@ -22,9 +23,9 @@
 	}
 
 	// Test adding entries.
-	ft := NewFakeTimeClock()
-	SetServerListClock(ft)
-	sl := newServerList()
+	clock := timekeeper.NewManualTime()
+	start := clock.Now()
+	sl := newServerListManager(clock).newServerList()
 	for i, ep := range eps {
 		sl.add(ep, time.Duration(5*i)*time.Second)
 	}
@@ -33,8 +34,8 @@
 	}
 
 	// Test timing out entries.
-	ft.Advance(6 * time.Second)
-	if numLeft, _ := sl.removeExpired(); numLeft != len(eps)-2 {
+	clock.AdvanceTime(6 * time.Second)
+	if numLeclock, _ := sl.removeExpired(); numLeclock != len(eps)-2 {
 		t.Fatalf("got %d, want %d", sl.len(), len(eps)-2)
 	}
 
@@ -48,7 +49,7 @@
 	if got, want := sl.copyToSlice(), []naming.MountedServer{
 		{
 			Server:   "endpoint:dfgsfdg@@",
-			Deadline: vdltime.Deadline{Time: now.Add(15 * time.Second)},
+			Deadline: vdltime.Deadline{Time: start.Add(15 * time.Second)},
 		},
 	}; !reflect.DeepEqual(got, want) {
 		t.Errorf("Got %v, want %v", got, want)
diff --git a/services/mounttable/mounttablelib/util_test.go b/services/mounttable/mounttablelib/util_test.go
index b99129b..32d4731 100644
--- a/services/mounttable/mounttablelib/util_test.go
+++ b/services/mounttable/mounttablelib/util_test.go
@@ -4,32 +4,7 @@
 
 package mounttablelib
 
-import "time"
-
-// These routines are used by external tests.
-
-// SetServerListClock sets up an alternate clock.
-func SetServerListClock(x serverListClock) {
-	slc = x
-}
-
 // DefaultMaxNodesPerUser returns the maximum number of nodes per user.
 func DefaultMaxNodesPerUser() int {
 	return defaultMaxNodesPerUser
 }
-
-var now = time.Now()
-
-type fakeTime struct {
-	theTime time.Time
-}
-
-func NewFakeTimeClock() *fakeTime {
-	return &fakeTime{theTime: now}
-}
-func (ft *fakeTime) Now() time.Time {
-	return ft.theTime
-}
-func (ft *fakeTime) Advance(d time.Duration) {
-	ft.theTime = ft.theTime.Add(d)
-}
diff --git a/services/syncbase/server/app.go b/services/syncbase/server/app.go
index adf4a5f..3ea1d9a 100644
--- a/services/syncbase/server/app.go
+++ b/services/syncbase/server/app.go
@@ -5,7 +5,9 @@
 package server
 
 import (
+	"crypto/rand"
 	"encoding/hex"
+	"fmt"
 	"path"
 	"sync"
 
@@ -132,18 +134,16 @@
 	return dbNames, nil
 }
 
-func (a *app) CreateNoSQLDatabase(ctx *context.T, call rpc.ServerCall, dbName string, perms access.Permissions, metadata *nosqlwire.SchemaMetadata) error {
+func (a *app) CreateNoSQLDatabase(ctx *context.T, call rpc.ServerCall, dbName string, perms access.Permissions, metadata *nosqlwire.SchemaMetadata) (reterr error) {
 	if !a.exists {
 		vlog.Fatalf("app %q does not exist", a.name)
 	}
-	// TODO(sadovsky): Crash if any step fails, and use WAL to ensure that if we
-	// crash, upon restart we execute any remaining steps before we start handling
-	// client requests.
-	//
 	// Steps:
-	// 1. Check appData perms, create dbInfo record.
-	// 2. Initialize database.
-	// 3. Flip dbInfo.Initialized to true. <===== CHANGE BECOMES VISIBLE
+	// 1. Check appData perms.
+	// 2. Put dbInfo record into garbage collection log, to clean up database if
+	//    remaining steps fail or syncbased crashes.
+	// 3. Initialize database.
+	// 4. Move dbInfo from GC log into active databases. <===== CHANGE BECOMES VISIBLE
 	a.mu.Lock()
 	defer a.mu.Unlock()
 	if _, ok := a.dbs[dbName]; ok {
@@ -151,14 +151,69 @@
 		return verror.New(verror.ErrExist, ctx, dbName)
 	}
 
-	// 1. Check appData perms, create dbInfo record.
-	rootDir, engine := a.rootDirForDb(dbName), a.s.opts.Engine
+	// 1. Check appData perms.
 	aData := &AppData{}
+	if err := util.GetWithAuth(ctx, call, a.s.st, a.stKey(), aData); err != nil {
+		return err
+	}
+
+	// 2. Put dbInfo record into garbage collection log, to clean up database if
+	//    remaining steps fail or syncbased crashes.
+	rootDir, err := a.rootDirForDb(dbName)
+	if err != nil {
+		return verror.New(verror.ErrInternal, ctx, err)
+	}
+	dbInfo := &DbInfo{
+		Name:    dbName,
+		RootDir: rootDir,
+		Engine:  a.s.opts.Engine,
+	}
+	if err := putDbGCEntry(ctx, a.s.st, dbInfo); err != nil {
+		return err
+	}
+	var stRef store.Store
+	defer func() {
+		if reterr != nil {
+			// Best effort database destroy on error. If it fails, it will be retried
+			// on syncbased restart. (It is safe to pass nil stRef if step 3 fails.)
+			// TODO(ivanpi): Consider running asynchronously. However, see TODO in
+			// finalizeDatabaseDestroy.
+			if err := finalizeDatabaseDestroy(ctx, a.s.st, dbInfo, stRef); err != nil {
+				vlog.Error(err)
+			}
+		}
+	}()
+
+	// 3. Initialize database.
+	if perms == nil {
+		perms = aData.Perms
+	}
+	// TODO(ivanpi): NewDatabase doesn't close the store on failure.
+	d, err := nosql.NewDatabase(ctx, a, dbName, metadata, nosql.DatabaseOptions{
+		Perms:   perms,
+		RootDir: dbInfo.RootDir,
+		Engine:  dbInfo.Engine,
+	})
+	if err != nil {
+		return err
+	}
+	// Save reference to Store to allow finalizeDatabaseDestroy to close it in
+	// case of error.
+	stRef = d.St()
+
+	// 4. Move dbInfo from GC log into active databases.
 	if err := store.RunInTransaction(a.s.st, func(tx store.Transaction) error {
-		// Check appData perms.
-		if err := util.GetWithAuth(ctx, call, tx, a.stKey(), aData); err != nil {
+		// Even though perms and database existence are checked in step 1 to fail
+		// early before initializing the database, there are possibly rare corner
+		// cases which make it prudent to repeat the checks in a transaction.
+		// Recheck appData perms. Verify perms version hasn't changed.
+		aDataRepeat := &AppData{}
+		if err := util.GetWithAuth(ctx, call, a.s.st, a.stKey(), aDataRepeat); err != nil {
 			return err
 		}
+		if aData.Version != aDataRepeat.Version {
+			return verror.NewErrBadVersion(ctx)
+		}
 		// Check for "database already exists".
 		if _, err := a.getDbInfo(ctx, tx, dbName); verror.ErrorID(err) != verror.ErrNoExist.ID {
 			if err != nil {
@@ -167,36 +222,12 @@
 			// TODO(sadovsky): Should this be ErrExistOrNoAccess, for privacy?
 			return verror.New(verror.ErrExist, ctx, dbName)
 		}
-		// Write new dbInfo.
-		info := &DbInfo{
-			Name:    dbName,
-			RootDir: rootDir,
-			Engine:  engine,
+		// Write dbInfo into active databases.
+		if err := a.putDbInfo(ctx, tx, dbName, dbInfo); err != nil {
+			return err
 		}
-		return a.putDbInfo(ctx, tx, dbName, info)
-	}); err != nil {
-		return err
-	}
-
-	// 2. Initialize database.
-	if perms == nil {
-		perms = aData.Perms
-	}
-	d, err := nosql.NewDatabase(ctx, a, dbName, metadata, nosql.DatabaseOptions{
-		Perms:   perms,
-		RootDir: rootDir,
-		Engine:  engine,
-	})
-	if err != nil {
-		return err
-	}
-
-	// 3. Flip dbInfo.Initialized to true.
-	if err := store.RunInTransaction(a.s.st, func(tx store.Transaction) error {
-		return a.updateDbInfo(ctx, tx, dbName, func(info *DbInfo) error {
-			info.Initialized = true
-			return nil
-		})
+		// Delete dbInfo from GC log.
+		return delDbGCEntry(ctx, tx, dbInfo)
 	}); err != nil {
 		return err
 	}
@@ -209,15 +240,11 @@
 	if !a.exists {
 		vlog.Fatalf("app %q does not exist", a.name)
 	}
-	// TODO(sadovsky): Crash if any step fails, and use WAL to ensure that if we
-	// crash, upon restart we execute any remaining steps before we start handling
-	// client requests.
-	//
 	// Steps:
 	// 1. Check databaseData perms.
-	// 2. Flip dbInfo.Deleted to true. <===== CHANGE BECOMES VISIBLE
-	// 3. Delete database.
-	// 4. Delete dbInfo record.
+	// 2. Move dbInfo from active databases into GC log. <===== CHANGE BECOMES VISIBLE
+	// 3. Best effort database destroy. If it fails, it will be retried on
+	//    syncbased restart.
 	a.mu.Lock()
 	defer a.mu.Unlock()
 	d, ok := a.dbs[dbName]
@@ -233,30 +260,27 @@
 		return err
 	}
 
-	// 2. Flip dbInfo.Deleted to true.
-	if err := store.RunInTransaction(a.s.st, func(tx store.Transaction) error {
-		return a.updateDbInfo(ctx, tx, dbName, func(info *DbInfo) error {
-			info.Deleted = true
-			return nil
-		})
+	// 2. Move dbInfo from active databases into GC log.
+	var dbInfo *DbInfo
+	if err := store.RunInTransaction(a.s.st, func(tx store.Transaction) (err error) {
+		dbInfo, err = deleteDatabaseEntry(ctx, tx, a, dbName)
+		return
 	}); err != nil {
 		return err
 	}
-
-	// 3. Delete database.
-	if err := d.St().Close(); err != nil {
-		return err
-	}
-	if err := util.DestroyStore(a.s.opts.Engine, a.rootDirForDb(dbName)); err != nil {
-		return err
-	}
-
-	// 4. Delete dbInfo record.
-	if err := a.delDbInfo(ctx, a.s.st, dbName); err != nil {
-		return err
-	}
-
 	delete(a.dbs, dbName)
+
+	// 3. Best effort database destroy. If it fails, it will be retried on
+	//    syncbased restart.
+	// TODO(ivanpi): Consider returning an error on failure here, even though
+	// database was made inaccessible. Note, if Close() failed, ongoing RPCs
+	// might still be using the store.
+	// TODO(ivanpi): Consider running asynchronously. However, see TODO in
+	// finalizeDatabaseDestroy.
+	if err := finalizeDatabaseDestroy(ctx, a.s.st, dbInfo, d.St()); err != nil {
+		vlog.Error(err)
+	}
+
 	return nil
 }
 
@@ -288,25 +312,34 @@
 	return a.name
 }
 
-func (a *app) rootDirForDb(dbName string) string {
+func (a *app) rootDirForDb(dbName string) (string, error) {
 	// Note: Common Linux filesystems such as ext4 allow almost any character to
 	// appear in a filename, but other filesystems are more restrictive. For
-	// example, by default the OS X filesystem uses case-insensitive filenames. To
-	// play it safe, we hex-encode app and database names, yielding filenames that
-	// match "^[0-9a-f]+$".
+	// example, by default the OS X filesystem uses case-insensitive filenames.
+	// To play it safe, we hex-encode app and database names, yielding filenames
+	// that match "^[0-9a-f]+$". To allow recreating databases independently of
+	// garbage collecting old destroyed versions, a random suffix is appended to
+	// the database name.
 	appHex := hex.EncodeToString([]byte(a.name))
 	dbHex := hex.EncodeToString([]byte(dbName))
+	var suffix [32]byte
+	if _, err := rand.Read(suffix[:]); err != nil {
+		return "", fmt.Errorf("failed to generate suffix: %v", err)
+	}
+	suffixHex := hex.EncodeToString(suffix[:])
+	dbHexWithSuffix := dbHex + "-" + suffixHex
 	// ValidAppName and ValidDatabaseName require len([]byte(name)) <= 64, so even
-	// after the 2x blowup from hex-encoding, the lengths of these names should be
-	// well below the filesystem limit of 255 bytes.
+	// after appending the random suffix and with the 2x blowup from hex-encoding,
+	// the lengths of these names should be well below the filesystem limit of 255
+	// bytes.
 	// TODO(sadovsky): Currently, our client-side app/db creation tests verify
 	// that the server does not crash when too-long names are specified; we rely
 	// on the server-side dispatcher logic to return errors for too-long names.
 	// However, we ought to add server-side tests for this behavior, so that we
 	// don't accidentally remove the server-side name validation after adding
 	// client-side name validation.
-	if len(appHex) > 255 || len(dbHex) > 255 {
-		vlog.Fatalf("appHex %s or dbHex %s is too long", appHex, dbHex)
+	if len(appHex) > 255 || len(dbHexWithSuffix) > 255 {
+		vlog.Fatalf("appHex %s or dbHexWithSuffix %s is too long", appHex, dbHexWithSuffix)
 	}
-	return path.Join(a.s.opts.RootDir, util.AppDir, appHex, util.DbDir, dbHex)
+	return path.Join(a.s.opts.RootDir, util.AppDir, appHex, util.DbDir, dbHexWithSuffix), nil
 }
diff --git a/services/syncbase/server/db_gc.go b/services/syncbase/server/db_gc.go
new file mode 100644
index 0000000..d3cf87f
--- /dev/null
+++ b/services/syncbase/server/db_gc.go
@@ -0,0 +1,114 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package server
+
+// This file defines methods for consistently destroying unreferenced database
+// stores by using a garbage collection log keeping dbInfos for inactive (not
+// fully created or deleted) databases.
+
+import (
+	"fmt"
+
+	"v.io/v23/context"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/services/syncbase/server/util"
+	"v.io/x/ref/services/syncbase/store"
+)
+
+func dbGCKey(path string) string {
+	return util.JoinKeyParts(util.DbGCPrefix, path)
+}
+
+// putDbGCEntry puts a dbInfo into the garbage collection log. It should be
+// used before creating a new database and when marking a database for
+// destruction (deleting the dbInfo from active databases).
+func putDbGCEntry(ctx *context.T, stw store.StoreWriter, dbInfo *DbInfo) error {
+	return util.Put(ctx, stw, dbGCKey(dbInfo.RootDir), dbInfo)
+}
+
+// delDbGCEntry removes a dbInfo from the garbage collection log. It should
+// be used after successfully destroying the database and when finalizing
+// database creation (putting the dbInfo into active databases).
+func delDbGCEntry(ctx *context.T, stw store.StoreWriter, dbInfo *DbInfo) error {
+	return util.Delete(ctx, stw, dbGCKey(dbInfo.RootDir))
+}
+
+// deleteDatabaseEntry marks a database for destruction by moving its dbInfo
+// record from active databases into the garbage collection log. It returns
+// the dbInfo that can be passed to finalizeDatabaseDestroy.
+func deleteDatabaseEntry(ctx *context.T, tx store.Transaction, a *app, dbName string) (*DbInfo, error) {
+	dbInfo, err := a.getDbInfo(ctx, tx, dbName)
+	if err != nil {
+		return nil, err
+	}
+	if err := putDbGCEntry(ctx, tx, dbInfo); err != nil {
+		return nil, err
+	}
+	if err := a.delDbInfo(ctx, tx, dbName); err != nil {
+		return nil, err
+	}
+	return dbInfo, nil
+}
+
+// finalizeDatabaseDestroy attempts to close (if stRef is not nil) and destroy
+// the database store. If successful, it removes the dbInfo record from the
+// garbage collection log.
+func finalizeDatabaseDestroy(ctx *context.T, stw store.StoreWriter, dbInfo *DbInfo, stRef store.Store) error {
+	vlog.VI(2).Infof("server: app: destroying store at %q for database %s (closing: %v)", dbInfo.RootDir, dbInfo.Name, stRef != nil)
+	if stRef != nil {
+		// TODO(ivanpi): Safer to crash syncbased on Close() failure? Otherwise,
+		// already running calls might continue using the database. Alternatively,
+		// explicitly cancel running calls in Destroy.
+		if err := stRef.Close(); err != nil {
+			return wrapGCError(dbInfo, err)
+		}
+	}
+	if err := util.DestroyStore(dbInfo.Engine, dbInfo.RootDir); err != nil {
+		return wrapGCError(dbInfo, err)
+	}
+	if err := delDbGCEntry(ctx, stw, dbInfo); err != nil {
+		return wrapGCError(dbInfo, err)
+	}
+	return nil
+}
+
+// runGCInactiveDatabases iterates over the garbage collection log, attempting
+// to destroy each database store, removing dbInfo records from the log when
+// destruction is successful.
+// NOTE: runGCInactiveDatabases should not be run in parallel with database
+// creation or deletion since it can cause spurious failures (e.g. garbage
+// collecting a database that is being created).
+// TODO(ivanpi): Consider adding mutex to allow running GC concurrently.
+func runGCInactiveDatabases(ctx *context.T, st store.Store) error {
+	vlog.VI(2).Infof("server: app: starting garbage collection of inactive databases")
+	total := 0
+	deleted := 0
+	gcIt := st.Scan(util.ScanPrefixArgs(util.DbGCPrefix, ""))
+	var diBytes []byte
+	for gcIt.Advance() {
+		diBytes = gcIt.Value(diBytes)
+		var dbInfo DbInfo
+		if err := vom.Decode(diBytes, &dbInfo); err != nil {
+			verror.New(verror.ErrInternal, ctx, err)
+		}
+		total++
+		if err := finalizeDatabaseDestroy(ctx, st, &dbInfo, nil); err != nil {
+			vlog.Error(err)
+		} else {
+			deleted++
+		}
+	}
+	if err := gcIt.Err(); err != nil {
+		return verror.New(verror.ErrInternal, ctx, err)
+	}
+	vlog.VI(2).Infof("server: app: garbage collected %d out of %d inactive databases", deleted, total)
+	return nil
+}
+
+func wrapGCError(dbInfo *DbInfo, err error) error {
+	return fmt.Errorf("failed to destroy store at %q for database %s: %v", dbInfo.RootDir, dbInfo.Name, err)
+}
diff --git a/services/syncbase/server/db_info.go b/services/syncbase/server/db_info.go
index 156ce05..c4dad86 100644
--- a/services/syncbase/server/db_info.go
+++ b/services/syncbase/server/db_info.go
@@ -41,15 +41,3 @@
 func (a *app) delDbInfo(ctx *context.T, stw store.StoreWriter, dbName string) error {
 	return util.Delete(ctx, stw, dbInfoStKey(a, dbName))
 }
-
-// updateDbInfo performs a read-modify-write. fn should "modify" v.
-func (a *app) updateDbInfo(ctx *context.T, tx store.Transaction, dbName string, fn func(info *DbInfo) error) error {
-	info, err := a.getDbInfo(ctx, tx, dbName)
-	if err != nil {
-		return err
-	}
-	if err := fn(info); err != nil {
-		return err
-	}
-	return a.putDbInfo(ctx, tx, dbName, info)
-}
diff --git a/services/syncbase/server/interfaces/sync.vdl b/services/syncbase/server/interfaces/sync.vdl
index eab3924..25fb3c8 100644
--- a/services/syncbase/server/interfaces/sync.vdl
+++ b/services/syncbase/server/interfaces/sync.vdl
@@ -83,4 +83,5 @@
 	ConnFail() {"en": "connection to peer failed{:_}"}
 	BrokenCrConnection() {"en": "CrConnection stream to client does not exist or is broken"}
 	GetTimeFailed() {"en": "GetTime failed{:_}"}
+	NotAdmin() {"en": "not an admin of the syncgroup"}
 )
diff --git a/services/syncbase/server/interfaces/sync.vdl.go b/services/syncbase/server/interfaces/sync.vdl.go
index d35e512..095f456 100644
--- a/services/syncbase/server/interfaces/sync.vdl.go
+++ b/services/syncbase/server/interfaces/sync.vdl.go
@@ -27,6 +27,7 @@
 	ErrConnFail            = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.ConnFail", verror.NoRetry, "{1:}{2:} connection to peer failed{:_}")
 	ErrBrokenCrConnection  = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.BrokenCrConnection", verror.NoRetry, "{1:}{2:} CrConnection stream to client does not exist or is broken")
 	ErrGetTimeFailed       = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.GetTimeFailed", verror.NoRetry, "{1:}{2:} GetTime failed{:_}")
+	ErrNotAdmin            = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.NotAdmin", verror.NoRetry, "{1:}{2:} not an admin of the syncgroup")
 )
 
 func init() {
@@ -34,6 +35,7 @@
 	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrConnFail.ID), "{1:}{2:} connection to peer failed{:_}")
 	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrBrokenCrConnection.ID), "{1:}{2:} CrConnection stream to client does not exist or is broken")
 	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrGetTimeFailed.ID), "{1:}{2:} GetTime failed{:_}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrNotAdmin.ID), "{1:}{2:} not an admin of the syncgroup")
 }
 
 // NewErrDupSyncgroupPublish returns an error with the ErrDupSyncgroupPublish ID.
@@ -56,6 +58,11 @@
 	return verror.New(ErrGetTimeFailed, ctx)
 }
 
+// NewErrNotAdmin returns an error with the ErrNotAdmin ID.
+func NewErrNotAdmin(ctx *context.T) error {
+	return verror.New(ErrNotAdmin, ctx)
+}
+
 // SyncClientMethods is the client interface
 // containing Sync methods.
 //
diff --git a/services/syncbase/server/nosql/database.go b/services/syncbase/server/nosql/database.go
index d578b2b..9307094 100644
--- a/services/syncbase/server/nosql/database.go
+++ b/services/syncbase/server/nosql/database.go
@@ -14,6 +14,7 @@
 	"v.io/v23/glob"
 	"v.io/v23/query/engine"
 	ds "v.io/v23/query/engine/datasource"
+	"v.io/v23/query/syncql"
 	"v.io/v23/rpc"
 	"v.io/v23/security/access"
 	wire "v.io/v23/services/syncbase/nosql"
@@ -449,7 +450,10 @@
 	return db.ctx
 }
 
-func (db *queryDb) GetTable(name string) (ds.Table, error) {
+func (db *queryDb) GetTable(name string, writeAccessReq bool) (ds.Table, error) {
+	if writeAccessReq {
+		return nil, syncql.NewErrNotWritable(db.GetContext(), name)
+	}
 	tDb := &tableDb{
 		qdb: db,
 		req: &tableReq{
@@ -476,6 +480,10 @@
 	return []ds.Index{}
 }
 
+func (t *tableDb) Delete(k string) (bool, error) {
+	return false, syncql.NewErrOperationNotSupported(t.qdb.ctx, "delete")
+}
+
 func (t *tableDb) Scan(indexRanges ...ds.IndexRanges) (ds.KeyValueStream, error) {
 	streams := []store.Stream{}
 	// Syncbase does not currently support secondary indexes.  As such, indexRanges is
diff --git a/services/syncbase/server/service.go b/services/syncbase/server/service.go
index 03579b5..0dc1403 100644
--- a/services/syncbase/server/service.go
+++ b/services/syncbase/server/service.go
@@ -136,7 +136,14 @@
 			}
 		}
 		vlog.Infof("Using persisted permissions: %v", PermsString(readPerms))
-		// Service exists. Initialize in-memory data structures.
+		// Service exists.
+		// Run garbage collection of inactive databases.
+		// TODO(ivanpi): This is currently unsafe to call concurrently with
+		// database creation/deletion. Add mutex and run asynchronously.
+		if err := runGCInactiveDatabases(ctx, st); err != nil {
+			return nil, err
+		}
+		// Initialize in-memory data structures.
 		// Read all apps, populate apps map.
 		aIt := st.Scan(util.ScanPrefixArgs(util.AppPrefix, ""))
 		aBytes := []byte{}
@@ -152,10 +159,10 @@
 				exists: true,
 				dbs:    make(map[string]interfaces.Database),
 			}
-			s.apps[a.name] = a
 			if err := openDatabases(ctx, st, a); err != nil {
 				return nil, verror.New(verror.ErrInternal, ctx, err)
 			}
+			s.apps[a.name] = a
 		}
 		if err := aIt.Err(); err != nil {
 			return nil, verror.New(verror.ErrInternal, ctx, err)
@@ -432,26 +439,56 @@
 func (s *service) destroyApp(ctx *context.T, call rpc.ServerCall, appName string) error {
 	s.mu.Lock()
 	defer s.mu.Unlock()
+	// TODO(ivanpi): Destroying an app is in a possible race with creating a
+	// database in that app. Consider locking app mutex here, possibly other
+	// nested mutexes as well, and cancelling calls on nested objects. Same for
+	// database and table destroy.
 	a, ok := s.apps[appName]
 	if !ok {
 		return nil // destroy is idempotent
 	}
 
+	type dbTombstone struct {
+		store  store.Store
+		dbInfo *DbInfo
+	}
+	var tombstones []dbTombstone
 	if err := store.RunInTransaction(s.st, func(tx store.Transaction) error {
-		// Read-check-delete appData.
+		// Check appData perms.
 		if err := util.GetWithAuth(ctx, call, tx, a.stKey(), &AppData{}); err != nil {
 			if verror.ErrorID(err) == verror.ErrNoExist.ID {
 				return nil // destroy is idempotent
 			}
 			return err
 		}
-		// TODO(sadovsky): Delete all databases in this app.
+		// Mark all databases in this app for destruction.
+		for _, db := range a.dbs {
+			dbInfo, err := deleteDatabaseEntry(ctx, tx, a, db.Name())
+			if err != nil {
+				return err
+			}
+			tombstones = append(tombstones, dbTombstone{
+				store:  db.St(),
+				dbInfo: dbInfo,
+			})
+		}
+		// Delete appData.
 		return util.Delete(ctx, tx, a.stKey())
 	}); err != nil {
 		return err
 	}
-
 	delete(s.apps, appName)
+
+	// Best effort destroy for all databases in this app. If any destroy fails,
+	// it will be attempted again on syncbased restart.
+	// TODO(ivanpi): Consider running asynchronously. However, see TODO in
+	// finalizeDatabaseDestroy.
+	for _, ts := range tombstones {
+		if err := finalizeDatabaseDestroy(ctx, s.st, ts.dbInfo, ts.store); err != nil {
+			vlog.Error(err)
+		}
+	}
+
 	return nil
 }
 
diff --git a/services/syncbase/server/types.vdl b/services/syncbase/server/types.vdl
index 9941e80..f11d1b7 100644
--- a/services/syncbase/server/types.vdl
+++ b/services/syncbase/server/types.vdl
@@ -24,11 +24,9 @@
 // DbInfo contains information about one database for an App.
 // TODO(sadovsky): Track NoSQL vs. SQL.
 type DbInfo struct {
-	Name        string
-	Initialized bool
-	Deleted     bool
+	Name  string
 	// Select fields from nosql.DatabaseOptions, needed in order to open storage
 	// engine on restart.
 	RootDir string // interpreted by storage engine
-	Engine string // name of storage engine, e.g. "leveldb"
+	Engine  string // name of storage engine, e.g. "leveldb"
 }
diff --git a/services/syncbase/server/types.vdl.go b/services/syncbase/server/types.vdl.go
index 409215f..0d8f47f 100644
--- a/services/syncbase/server/types.vdl.go
+++ b/services/syncbase/server/types.vdl.go
@@ -41,9 +41,7 @@
 // DbInfo contains information about one database for an App.
 // TODO(sadovsky): Track NoSQL vs. SQL.
 type DbInfo struct {
-	Name        string
-	Initialized bool
-	Deleted     bool
+	Name string
 	// Select fields from nosql.DatabaseOptions, needed in order to open storage
 	// engine on restart.
 	RootDir string // interpreted by storage engine
diff --git a/services/syncbase/server/util/constants.go b/services/syncbase/server/util/constants.go
index e743054..45b9a8c 100644
--- a/services/syncbase/server/util/constants.go
+++ b/services/syncbase/server/util/constants.go
@@ -11,6 +11,7 @@
 	VClockPrefix     = "c"
 	DatabasePrefix   = "d"
 	DbInfoPrefix     = "i"
+	DbGCPrefix       = "iw"
 	LogPrefix        = "l"
 	PermsPrefix      = "p"
 	RowPrefix        = "r"
diff --git a/services/syncbase/server/util/store_util.go b/services/syncbase/server/util/store_util.go
index 61352c5..1db9490 100644
--- a/services/syncbase/server/util/store_util.go
+++ b/services/syncbase/server/util/store_util.go
@@ -174,6 +174,7 @@
 	}
 }
 
+// DestroyStore destroys the specified store. Idempotent.
 func DestroyStore(engine, path string) error {
 	switch engine {
 	case "memstore":
diff --git a/services/syncbase/vclock/constants.go b/services/syncbase/vclock/constants.go
index 2e3a81b..79226ca 100644
--- a/services/syncbase/vclock/constants.go
+++ b/services/syncbase/vclock/constants.go
@@ -17,5 +17,5 @@
 	// Used by MaybeUpdateFromPeerData.
 	PeerSyncSkewThreshold = NtpSkewDeltaThreshold
 	RebootSkewThreshold   = time.Minute
-	MaxNumHops            = 1
+	MaxNumHops            = 2
 )
diff --git a/services/syncbase/vclock/update_from_peer.go b/services/syncbase/vclock/update_from_peer.go
index 385f2d5..0e6df39 100644
--- a/services/syncbase/vclock/update_from_peer.go
+++ b/services/syncbase/vclock/update_from_peer.go
@@ -40,6 +40,10 @@
 // MaybeUpdateFromPeerData updates data (the local VClockData) based on the
 // given PeerSyncData. Returns nil if data has been updated; otherwise, the
 // returned error specifies why the data was not updated.
+// TODO(sadovsky): This design assumes trust across syncgroups, which is
+// generally not desirable. Eventually we may need to perform
+// MaybeUpdateFromPeerData separately for each app or syncgroup, or something
+// along these lines.
 func MaybeUpdateFromPeerData(c *VClock, data *VClockData, psd *PeerSyncData) (*VClockData, error) {
 	// Same skew calculation as in NTP.
 	skew := (psd.RecvTs.Sub(psd.MySendTs) + psd.SendTs.Sub(psd.MyRecvTs)) / 2
@@ -54,10 +58,11 @@
 	if psd.LastNtpTs.Before(data.LastNtpTs) {
 		return nil, nue("peer's NTP is less recent than local")
 	}
+	// TODO(sadovsky): Do we really need the abs(skew) > RebootSkewThreshold part?
 	if psd.NumReboots > 0 && abs(skew) > RebootSkewThreshold {
 		return nil, nue("peer's vclock exceeds reboot tolerance")
 	}
-	if psd.NumHops > MaxNumHops {
+	if psd.NumHops+1 > MaxNumHops {
 		return nil, nue("peer's NTP is from too many hops away")
 	}
 
diff --git a/services/syncbase/vclock/vclockd.go b/services/syncbase/vclock/vclockd.go
index 8e98fd8..3bb9adb 100644
--- a/services/syncbase/vclock/vclockd.go
+++ b/services/syncbase/vclock/vclockd.go
@@ -117,20 +117,20 @@
 // also update either Skew or NumReboots. It does not touch LastNtpTs or
 // NumHops.
 func (d *VClockD) DoLocalUpdate() error {
-	vlog.VI(2).Info("vclock: DoLocalUpdate: start")
-	defer vlog.VI(2).Info("vclock: DoLocalUpdate: end")
+	vlog.VI(2).Info("vclockd: DoLocalUpdate: start")
+	defer vlog.VI(2).Info("vclockd: DoLocalUpdate: end")
 
 	err := d.vclock.UpdateVClockData(func(data *VClockData) (*VClockData, error) {
 		now, elapsedTime, err := d.vclock.SysClockVals()
 		if err != nil {
-			vlog.Errorf("vclock: DoLocalUpdate: SysClockVals failed: %v", err)
+			vlog.Errorf("vclockd: DoLocalUpdate: SysClockVals failed: %v", err)
 			return nil, err
 		}
 
 		// Check for a reboot: elapsed time is monotonic, so if the current elapsed
 		// time is less than data.ElapsedTimeSinceBoot, a reboot has taken place.
 		if elapsedTime < data.ElapsedTimeSinceBoot {
-			vlog.VI(2).Info("vclock: DoLocalUpdate: detected reboot")
+			vlog.VI(2).Info("vclockd: DoLocalUpdate: detected reboot")
 			data.NumReboots += 1
 		} else {
 			// No reboot detected. Check whether the system clock has drifted
@@ -139,7 +139,7 @@
 			expectedNow := data.SystemTimeAtBoot.Add(elapsedTime)
 			delta := expectedNow.Sub(now)
 			if abs(delta) > SystemClockDriftThreshold {
-				vlog.VI(2).Infof("vclock: DoLocalUpdate: detected clock drift of %v; updating SystemTimeAtBoot", delta)
+				vlog.VI(2).Infof("vclockd: DoLocalUpdate: detected clock drift of %v; updating SystemTimeAtBoot", delta)
 				data.Skew = data.Skew + delta
 			}
 		}
@@ -151,7 +151,7 @@
 	})
 
 	if err != nil {
-		vlog.Errorf("vclock: DoLocalUpdate: update failed: %v", err)
+		vlog.Errorf("vclockd: DoLocalUpdate: update failed: %v", err)
 	}
 	return err
 }
@@ -161,22 +161,22 @@
 
 // DoNtpUpdate talks to an NTP server and updates VClockData.
 func (d *VClockD) DoNtpUpdate() error {
-	vlog.VI(2).Info("vclock: DoNtpUpdate: start")
-	defer vlog.VI(2).Info("vclock: DoNtpUpdate: end")
+	vlog.VI(2).Info("vclockd: DoNtpUpdate: start")
+	defer vlog.VI(2).Info("vclockd: DoNtpUpdate: end")
 
 	d.ntpSourceMu.RLock()
 	ntpData, err := d.ntpSource.NtpSync(NtpSampleCount)
 	d.ntpSourceMu.RUnlock()
 	if err != nil {
-		vlog.Errorf("vclock: DoNtpUpdate: failed to fetch NTP time: %v", err)
+		vlog.Errorf("vclockd: DoNtpUpdate: failed to fetch NTP time: %v", err)
 		return err
 	}
-	vlog.VI(2).Infof("vclock: DoNtpUpdate: NTP skew is %v", ntpData.skew)
+	vlog.VI(2).Infof("vclockd: DoNtpUpdate: NTP skew is %v", ntpData.skew)
 
 	err = d.vclock.UpdateVClockData(func(data *VClockData) (*VClockData, error) {
 		now, elapsedTime, err := d.vclock.SysClockVals()
 		if err != nil {
-			vlog.Errorf("vclock: DoNtpUpdate: SysClockVals failed: %v", err)
+			vlog.Errorf("vclockd: DoNtpUpdate: SysClockVals failed: %v", err)
 			return nil, err
 		}
 
@@ -184,7 +184,7 @@
 		// avoid constant tweaking of the clock.
 		delta := ntpData.skew - data.Skew
 		if abs(delta) > NtpSkewDeltaThreshold {
-			vlog.VI(2).Infof("vclock: DoNtpUpdate: NTP time minus Syncbase vclock time is %v; updating Skew", delta)
+			vlog.VI(2).Infof("vclockd: DoNtpUpdate: NTP time minus Syncbase vclock time is %v; updating Skew", delta)
 			data.Skew = ntpData.skew
 		}
 
@@ -197,7 +197,7 @@
 	})
 
 	if err != nil {
-		vlog.Errorf("vclock: DoNtpUpdate: update failed: %v", err)
+		vlog.Errorf("vclockd: DoNtpUpdate: update failed: %v", err)
 	}
 	return err
 }
diff --git a/services/syncbase/vsync/initiator.go b/services/syncbase/vsync/initiator.go
index 9455069..6726d6f 100644
--- a/services/syncbase/vsync/initiator.go
+++ b/services/syncbase/vsync/initiator.go
@@ -207,7 +207,7 @@
 		// peer ok? The thinking here is that the peer uses the read tag
 		// on the GetDeltas RPC to authorize the initiator and this
 		// makes it symmetric.
-		if acl, exists := sg.Spec.Perms[string(access.Read)]; !exists || !acl.Includes(blessingNames...) {
+		if err := authorizeForTag(ctx, sg.Spec.Perms, access.Read, blessingNames); err != nil {
 			vlog.VI(4).Infof("sync: filterSyncGroups: skipping sg %v", gid)
 			continue
 		}
@@ -808,6 +808,10 @@
 			if err := iSt.config.sync.putDbGenInfoRemote(ctx, iSt.config.appName, iSt.config.dbName, iSt.sg, iSt.updLocal); err != nil {
 				vlog.Fatalf("sync: processUpdatedObjects: putting geninfo in memory failed for app %s db %s, err %v", iSt.config.appName, iSt.config.dbName, err)
 			}
+
+			// Ignore errors.
+			iSt.advertiseSyncgroups(ctx)
+
 			vlog.VI(4).Info("sync: processUpdatedObjects: end: changes committed")
 			return nil
 		}
@@ -1135,6 +1139,33 @@
 	}
 }
 
+func (iSt *initiationState) advertiseSyncgroups(ctx *context.T) error {
+	if !iSt.sg {
+		return nil
+	}
+
+	// For all the syncgroup changes we learned, see if the latest acl makes
+	// this node an admin or removes it from its admin role, and if so,
+	// advertise the syncgroup or cancel the existing advertisement over the
+	// neighborhood as applicable.
+	for objid := range iSt.updObjects {
+		gid, err := sgID(objid)
+		if err != nil {
+			return err
+		}
+		var sg *interfaces.Syncgroup
+		sg, err = getSyncgroupById(ctx, iSt.config.st, gid)
+		if err != nil {
+			return err
+		}
+		if err := iSt.config.sync.advertiseSyncgroupInNeighborhood(sg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // Internal helpers to make different RPCs needed for syncing.
 
diff --git a/services/syncbase/vsync/peer_manager.go b/services/syncbase/vsync/peer_manager.go
index 79c4b2e..b46d3f5 100644
--- a/services/syncbase/vsync/peer_manager.go
+++ b/services/syncbase/vsync/peer_manager.go
@@ -159,10 +159,10 @@
 	ticker := time.NewTicker(peerManagementInterval)
 	defer ticker.Stop()
 
-	for !pm.s.Closed() {
+	for !pm.s.isClosed() {
 		select {
 		case <-ticker.C:
-			if pm.s.Closed() {
+			if pm.s.isClosed() {
 				break
 			}
 			pm.managePeersInternal(ctx)
diff --git a/services/syncbase/vsync/sync.go b/services/syncbase/vsync/sync.go
index 2bdcb19..294f99e 100644
--- a/services/syncbase/vsync/sync.go
+++ b/services/syncbase/vsync/sync.go
@@ -47,12 +47,10 @@
 	name string // name derived from the global id.
 	sv   interfaces.Service
 
-	// Root context to be used to create a context for advertising over
-	// neighborhood.
+	// Root context. Used for example to create a context for advertising
+	// over neighborhood.
 	ctx *context.T
 
-	nameLock sync.Mutex // lock needed to serialize adding and removing of Syncbase names.
-
 	// High-level lock to serialize the watcher and the initiator. This lock is
 	// needed to handle the following cases: (a) When the initiator is
 	// cutting a local generation, it waits for the watcher to commit the
@@ -96,9 +94,15 @@
 	discoverySyncgroups map[string]map[string]*discovery.Service
 	discoveryLock       sync.RWMutex
 
-	// Cancel function for a context derived from the root context when
+	nameLock sync.Mutex // lock needed to serialize adding and removing of Syncbase names.
+
+	// Handle to the server for adding other names in the future.
+	svr rpc.Server
+
+	// Cancel functions for contexts derived from the root context when
 	// advertising over neighborhood. This is needed to stop advertising.
-	advCancel context.CancelFunc
+	cancelAdvSyncbase   context.CancelFunc            // cancels Syncbase advertising.
+	cancelAdvSyncgroups map[string]context.CancelFunc // cancels syncgroup advertising.
 
 	// Whether to enable neighborhood advertising.
 	publishInNH bool
@@ -147,20 +151,6 @@
 	_       interfaces.SyncServerMethods = (*syncService)(nil)
 )
 
-// rand64 generates an unsigned 64-bit pseudo-random number.
-func rand64() uint64 {
-	rngLock.Lock()
-	defer rngLock.Unlock()
-	return (uint64(rng.Int63()) << 1) | uint64(rng.Int63n(2))
-}
-
-// randIntn mimics rand.Intn (generates a non-negative pseudo-random number in [0,n)).
-func randIntn(n int) int {
-	rngLock.Lock()
-	defer rngLock.Unlock()
-	return rng.Intn(n)
-}
-
 // New creates a new sync module.
 //
 // Concurrency: sync initializes four goroutines at startup: a "watcher", a
@@ -174,12 +164,13 @@
 // incoming RPCs from remote sync modules and local clients.
 func New(ctx *context.T, sv interfaces.Service, blobStEngine, blobRootDir string, cl *vclock.VClock, publishInNH bool) (*syncService, error) {
 	s := &syncService{
-		sv:             sv,
-		batches:        make(batchSet),
-		sgPublishQueue: list.New(),
-		vclock:         cl,
-		ctx:            ctx,
-		publishInNH:    publishInNH,
+		sv:                  sv,
+		batches:             make(batchSet),
+		sgPublishQueue:      list.New(),
+		vclock:              cl,
+		ctx:                 ctx,
+		publishInNH:         publishInNH,
+		cancelAdvSyncgroups: make(map[string]context.CancelFunc),
 	}
 
 	data := &SyncData{}
@@ -202,7 +193,7 @@
 	s.id = data.Id
 	s.name = syncbaseIdToName(s.id)
 
-	vlog.VI(1).Infof("sync: New: Syncbase ID is %v", s.id)
+	vlog.VI(1).Infof("sync: New: Syncbase ID is %x", s.id)
 
 	// Initialize in-memory state for the sync module before starting any threads.
 	if err := s.initSync(ctx); err != nil {
@@ -224,18 +215,20 @@
 	// Start watcher thread to watch for updates to local store.
 	go s.watchStore(ctx)
 
-	// Start initiator thread to periodically get deltas from peers.
-	go s.syncer(ctx)
-
-	// Start the discovery service thread to listen to neighborhood updates.
-	go s.discoverNeighborhood(ctx)
-
 	// Initialize a peer manager with the peer selection policy.
 	s.pm = newPeerManager(ctx, s, selectRandom)
 
 	// Start the peer manager thread to maintain peers viable for syncing.
 	go s.pm.managePeers(ctx)
 
+	// Start initiator thread to periodically get deltas from peers. The
+	// initiator threads consults the peer manager to pick peers to sync
+	// with. So we initialize the peer manager before starting the syncer.
+	go s.syncer(ctx)
+
+	// Start the discovery service thread to listen to neighborhood updates.
+	go s.discoverNeighborhood(ctx)
+
 	return s, nil
 }
 
@@ -244,17 +237,25 @@
 	return nil
 }
 
-// Closed returns true if the sync service channel is closed indicating that the
-// service is shutting down.
-func (s *syncService) Closed() bool {
-	select {
-	case <-s.closed:
-		return true
-	default:
-		return false
-	}
+// Close waits for spawned sync threads to shut down, and closes the local blob
+// store handle.
+func Close(ss interfaces.SyncServerMethods) {
+	vlog.VI(2).Infof("sync: Close: begin")
+	defer vlog.VI(2).Infof("sync: Close: end")
+
+	s := ss.(*syncService)
+	close(s.closed)
+	s.pending.Wait()
+	s.bst.Close()
 }
 
+func NewSyncDatabase(db interfaces.Database) *syncDatabase {
+	return &syncDatabase{db: db, sync: db.App().Service().Sync()}
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+// Neighborhood based discovery of syncgroups and Syncbases.
+
 // discoverNeighborhood listens to updates from the discovery service to learn
 // about sync peers and syncgroups (if they have admins in the neighborhood) as
 // they enter and leave the neighborhood.
@@ -273,10 +274,10 @@
 		return
 	}
 
-	for !s.Closed() {
+	for !s.isClosed() {
 		select {
 		case update, ok := <-ch:
-			if !ok || s.Closed() {
+			if !ok || s.isClosed() {
 				break
 			}
 
@@ -394,9 +395,9 @@
 	return sgNeighbors
 }
 
-// discoverySyncgroupAdmins returns syncgroup admins found in the neighborhood
-// via the discovery service.
-func (s *syncService) discoverySyncgroupAdmins(sgName string) []*discovery.Service {
+// filterSyncgroupAdmins returns syncgroup admins for the specified syncgroup
+// found in the neighborhood via the discovery service.
+func (s *syncService) filterSyncgroupAdmins(sgName string) []*discovery.Service {
 	s.discoveryLock.Lock()
 	defer s.discoveryLock.Unlock()
 
@@ -429,31 +430,66 @@
 	s.nameLock.Lock()
 	defer s.nameLock.Unlock()
 
-	mInfo := s.copyMemberInfo(ctx, s.name)
-	if mInfo == nil || len(mInfo.mtTables) == 0 {
+	s.svr = svr
+
+	info := s.copyMemberInfo(ctx, s.name)
+	if info == nil || len(info.mtTables) == 0 {
 		vlog.VI(2).Infof("sync: AddNames: end returning no names")
 		return nil
 	}
 
-	for mt := range mInfo.mtTables {
+	for mt := range info.mtTables {
 		name := naming.Join(mt, s.name)
+		// Note that AddName will retry the publishing if not
+		// successful. So if a node is offline, it will publish the name
+		// when possible.
 		if err := svr.AddName(name); err != nil {
-			vlog.VI(2).Infof("sync: AddNames: end returning err %v", err)
+			vlog.VI(2).Infof("sync: AddNames: end returning AddName err %v", err)
 			return err
 		}
 	}
-	return s.publishInNeighborhood(svr)
+	if err := s.advertiseSyncbaseInNeighborhood(); err != nil {
+		vlog.VI(2).Infof("sync: AddNames: end returning advertiseSyncbaseInNeighborhood err %v", err)
+		return err
+	}
+
+	// Advertise syncgroups.
+	for gdbName, dbInfo := range info.db2sg {
+		appName, dbName, err := splitAppDbName(ctx, gdbName)
+		if err != nil {
+			return err
+		}
+		st, err := s.getDbStore(ctx, nil, appName, dbName)
+		if err != nil {
+			return err
+		}
+		for gid := range dbInfo {
+			sg, err := getSyncgroupById(ctx, st, gid)
+			if err != nil {
+				return err
+			}
+			if err := s.advertiseSyncgroupInNeighborhood(sg); err != nil {
+				vlog.VI(2).Infof("sync: AddNames: end returning advertiseSyncgroupInNeighborhood err %v", err)
+				return err
+			}
+		}
+	}
+	return nil
 }
 
-// publishInNeighborhood checks if the Syncbase service is already being
-// advertised over the neighborhood. If not, it begins advertising. The caller
-// of the function is holding nameLock.
-func (s *syncService) publishInNeighborhood(svr rpc.Server) error {
+// advertiseSyncbaseInNeighborhood checks if the Syncbase service is already
+// being advertised over the neighborhood. If not, it begins advertising. The
+// caller of the function is holding nameLock.
+func (s *syncService) advertiseSyncbaseInNeighborhood() error {
 	if !s.publishInNH {
 		return nil
 	}
+
+	vlog.VI(4).Infof("sync: advertiseSyncbaseInNeighborhood: begin")
+	defer vlog.VI(4).Infof("sync: advertiseSyncbaseInNeighborhood: end")
+
 	// Syncbase is already being advertised.
-	if s.advCancel != nil {
+	if s.cancelAdvSyncbase != nil {
 		return nil
 	}
 
@@ -466,35 +502,97 @@
 
 	ctx, stop := context.WithCancel(s.ctx)
 
-	// Duplicate calls to advertise will return an error.
-	_, err := idiscovery.AdvertiseServer(ctx, svr, "", &sbService, nil)
+	// Note that duplicate calls to advertise will return an error.
+	_, err := idiscovery.AdvertiseServer(ctx, s.svr, "", &sbService, nil)
 
 	if err == nil {
-		s.advCancel = stop
+		vlog.VI(4).Infof("sync: advertiseSyncbaseInNeighborhood: successful")
+		s.cancelAdvSyncbase = stop
+		return nil
 	}
+	stop()
 	return err
 }
 
-// Close waits for spawned sync threads (watcher and initiator) to shut down,
-// and closes the local blob store handle.
-func Close(ss interfaces.SyncServerMethods) {
-	vlog.VI(2).Infof("sync: Close: begin")
-	defer vlog.VI(2).Infof("sync: Close: end")
+// advertiseSyncgroupInNeighborhood checks if this Syncbase is an admin of the
+// syncgroup, and if so advertises the syncgroup over neighborhood. If the
+// Syncbase loses admin access, any previous syncgroup advertisements are
+// cancelled. The caller of the function is holding nameLock.
+func (s *syncService) advertiseSyncgroupInNeighborhood(sg *interfaces.Syncgroup) error {
+	if !s.publishInNH {
+		return nil
+	}
 
-	s := ss.(*syncService)
-	close(s.closed)
-	s.pending.Wait()
-	s.bst.Close()
+	vlog.VI(4).Infof("sync: advertiseSyncgroupInNeighborhood: begin")
+	defer vlog.VI(4).Infof("sync: advertiseSyncgroupInNeighborhood: end")
+
+	if !syncgroupAdmin(s.ctx, sg.Spec.Perms) {
+		if cancel := s.cancelAdvSyncgroups[sg.Name]; cancel != nil {
+			cancel()
+			delete(s.cancelAdvSyncgroups, sg.Name)
+		}
+		return nil
+	}
+
+	// Syncgroup is already being advertised.
+	if s.cancelAdvSyncgroups[sg.Name] != nil {
+		return nil
+	}
+
+	sbService := discovery.Service{
+		InterfaceName: ifName,
+		Attrs: discovery.Attributes{
+			discoveryAttrSyncgroup: sg.Name,
+		},
+	}
+	ctx, stop := context.WithCancel(s.ctx)
+
+	vlog.VI(4).Infof("sync: advertiseSyncgroupInNeighborhood: advertising %v", sbService)
+
+	// Note that duplicate calls to advertise will return an error.
+	_, err := idiscovery.AdvertiseServer(ctx, s.svr, "", &sbService, nil)
+
+	if err == nil {
+		vlog.VI(4).Infof("sync: advertiseSyncgroupInNeighborhood: successful")
+		s.cancelAdvSyncgroups[sg.Name] = stop
+		return nil
+	}
+	stop()
+	return err
+}
+
+//////////////////////////////
+// Helpers.
+
+// isClosed returns true if the sync service channel is closed indicating that
+// the service is shutting down.
+func (s *syncService) isClosed() bool {
+	select {
+	case <-s.closed:
+		return true
+	default:
+		return false
+	}
+}
+
+// rand64 generates an unsigned 64-bit pseudo-random number.
+func rand64() uint64 {
+	rngLock.Lock()
+	defer rngLock.Unlock()
+	return (uint64(rng.Int63()) << 1) | uint64(rng.Int63n(2))
+}
+
+// randIntn mimics rand.Intn (generates a non-negative pseudo-random number in [0,n)).
+func randIntn(n int) int {
+	rngLock.Lock()
+	defer rngLock.Unlock()
+	return rng.Intn(n)
 }
 
 func syncbaseIdToName(id uint64) string {
 	return fmt.Sprintf("%x", id)
 }
 
-func NewSyncDatabase(db interfaces.Database) *syncDatabase {
-	return &syncDatabase{db: db, sync: db.App().Service().Sync()}
-}
-
 func (s *syncService) stKey() string {
 	return util.SyncPrefix
 }
diff --git a/services/syncbase/vsync/sync_test.go b/services/syncbase/vsync/sync_test.go
index 9b9404c..105cc39 100644
--- a/services/syncbase/vsync/sync_test.go
+++ b/services/syncbase/vsync/sync_test.go
@@ -72,7 +72,7 @@
 	s := svc.sync
 
 	checkSyncgroupAdmins := func(sgName string, want []*discovery.Service) {
-		got := s.discoverySyncgroupAdmins(sgName)
+		got := s.filterSyncgroupAdmins(sgName)
 
 		g := make(map[*discovery.Service]bool)
 		for _, e := range got {
diff --git a/services/syncbase/vsync/syncer.go b/services/syncbase/vsync/syncer.go
index 596ca7b..8ad84fe 100644
--- a/services/syncbase/vsync/syncer.go
+++ b/services/syncbase/vsync/syncer.go
@@ -30,10 +30,10 @@
 	ticker := time.NewTicker(peerSyncInterval)
 	defer ticker.Stop()
 
-	for !s.Closed() {
+	for !s.isClosed() {
 		select {
 		case <-ticker.C:
-			if s.Closed() {
+			if s.isClosed() {
 				break
 			}
 			s.syncerWork(ctx)
diff --git a/services/syncbase/vsync/syncgroup.go b/services/syncbase/vsync/syncgroup.go
index d4ec0e4..a9398e9 100644
--- a/services/syncbase/vsync/syncgroup.go
+++ b/services/syncbase/vsync/syncgroup.go
@@ -19,6 +19,7 @@
 	"strings"
 	"time"
 
+	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/naming"
 	"v.io/v23/rpc"
@@ -637,7 +638,7 @@
 
 // TODO(hpucha): Pass blessings along.
 func (sd *syncDatabase) CreateSyncgroup(ctx *context.T, call rpc.ServerCall, sgName string, spec wire.SyncgroupSpec, myInfo wire.SyncgroupMemberInfo) error {
-	vlog.VI(2).Infof("sync: CreateSyncgroup: begin: %s", sgName)
+	vlog.VI(2).Infof("sync: CreateSyncgroup: begin: %s, spec %v", sgName, spec)
 	defer vlog.VI(2).Infof("sync: CreateSyncgroup: end: %s", sgName)
 
 	ss := sd.sync.(*syncService)
@@ -683,6 +684,25 @@
 		return err
 	}
 
+	// Advertise the Syncbase at the chosen mount table and in the
+	// neighborhood.
+	if err := ss.advertiseSyncbase(ctx, call, sg); err != nil {
+		// The failure in this step is rare. However, if there is a
+		// failure, create must be failed as well.
+		//
+		// TODO(hpucha): Implement failure handling here and in
+		// advertiseSyncbase. Currently, with the transaction above,
+		// failure here means rolling back the create. However, roll
+		// back is not straight forward since by the time we are ready
+		// to roll back, the persistent sg state could be used for
+		// another join or a leave request from the app. To handle this
+		// contention, we might have to serialize all syncgroup related
+		// operations pertaining to a database with a single lock, and
+		// further serialize all syncbase publishing with the another
+		// lock across all databases.
+		return err
+	}
+
 	ss.initSyncStateInMem(ctx, appName, dbName, sgOID(gid))
 
 	// Local SG create succeeded. Publish the SG at the chosen server, or if
@@ -691,9 +711,6 @@
 		ss.enqueuePublishSyncgroup(sgName, appName, dbName, true)
 	}
 
-	// Publish at the chosen mount table and in the neighborhood.
-	ss.publishInMountTables(ctx, call, spec)
-
 	return nil
 }
 
@@ -724,7 +741,8 @@
 			return sgErr
 		}
 
-		// Check SG ACL.
+		// Check SG ACL. Caller must have Read access on the syncgroup
+		// acl to join a syncgroup.
 		if err := authorize(ctx, call.Security(), sg); err != nil {
 			return err
 		}
@@ -806,10 +824,15 @@
 		return nullSpec, err
 	}
 
-	ss.initSyncStateInMem(ctx, sg2.AppName, sg2.DbName, sgOID(sg2.Id))
+	// Advertise the Syncbase at the chosen mount table and in the
+	// neighborhood.
+	if err := ss.advertiseSyncbase(ctx, call, &sg2); err != nil {
+		// TODO(hpucha): Implement failure handling. See note in
+		// CreateSyncgroup for more details.
+		return nullSpec, err
+	}
 
-	// Publish at the chosen mount table and in the neighborhood.
-	ss.publishInMountTables(ctx, call, sg2.Spec)
+	ss.initSyncStateInMem(ctx, sg2.AppName, sg2.DbName, sgOID(sg2.Id))
 
 	return sg2.Spec, nil
 }
@@ -934,11 +957,22 @@
 			return verror.NewErrBadState(ctx)
 		}
 
+		// TODO(hpucha): The code below to be enabled once client blesses syncbase.
+		//
+		// Check if this peer is allowed to change the spec.
+		// blessingNames, _ := security.RemoteBlessingNames(ctx, call.Security())
+		// vlog.VI(4).Infof("sync: SetSyncgroupSpec: authorizing blessings %v against permissions %v", blessingNames, sg.Spec.Perms)
+		// if err := authorizeForTag(ctx, sg.Spec.Perms, access.Admin, blessingNames); err != nil {
+		// return err
+		// }
+
+		// TODO(hpucha): Check syncgroup ACL for sanity checking.
+		// TODO(hpucha): Check if the acl change causes neighborhood
+		// advertising to change.
+
 		// Reserve a log generation and position counts for the new syncgroup.
 		gen, pos := ss.reserveGenAndPosInDbLog(ctx, appName, dbName, sgOID(sg.Id), 1)
 
-		// TODO(hpucha): Check syncgroup ACL.
-
 		newVersion := newSyncgroupVersion()
 		sg.Spec = spec
 		sg.SpecVersion = newVersion
@@ -1114,26 +1148,71 @@
 	return nil
 }
 
-func (s *syncService) publishInMountTables(ctx *context.T, call rpc.ServerCall, spec wire.SyncgroupSpec) error {
+// advertiseSyncbase advertises this Syncbase at the chosen mount tables and
+// over the neighborhood.
+func (s *syncService) advertiseSyncbase(ctx *context.T, call rpc.ServerCall, sg *interfaces.Syncgroup) error {
 	s.nameLock.Lock()
 	defer s.nameLock.Unlock()
 
-	for _, mt := range spec.MountTables {
+	for _, mt := range sg.Spec.MountTables {
 		name := naming.Join(mt, s.name)
-		// AddName is idempotent.
+		// AddName is idempotent. Note that AddName will retry the
+		// publishing if not successful. So if a node is offline, it
+		// will publish the name when possible.
 		if err := call.Server().AddName(name); err != nil {
 			return err
 		}
 	}
 
-	return s.publishInNeighborhood(call.Server())
+	if err := s.advertiseSyncbaseInNeighborhood(); err != nil {
+		return err
+	}
+
+	// TODO(hpucha): In case of a joiner, this can be optimized such that we
+	// don't advertise until the syncgroup is in pending state.
+	return s.advertiseSyncgroupInNeighborhood(sg)
 }
 
-func (sd *syncDatabase) joinSyncgroupAtAdmin(ctx *context.T, call rpc.ServerCall, sgName, name string, myInfo wire.SyncgroupMemberInfo) (interfaces.Syncgroup, string, interfaces.GenVector, error) {
-	c := interfaces.SyncClient(sgName)
-	return c.JoinSyncgroupAtAdmin(ctx, sgName, name, myInfo)
+func (sd *syncDatabase) joinSyncgroupAtAdmin(ctxIn *context.T, call rpc.ServerCall, sgName, name string, myInfo wire.SyncgroupMemberInfo) (interfaces.Syncgroup, string, interfaces.GenVector, error) {
+	vlog.VI(2).Infof("sync: joinSyncgroupAtAdmin: begin %v", sgName)
 
-	// TODO(hpucha): Try to join using an Admin on neighborhood if the publisher is not reachable.
+	ctx, cancel := context.WithTimeout(ctxIn, connectionTimeOut)
+	c := interfaces.SyncClient(sgName)
+	sg, vers, gv, err := c.JoinSyncgroupAtAdmin(ctx, sgName, name, myInfo)
+	cancel()
+
+	if err == nil {
+		vlog.VI(2).Infof("sync: joinSyncgroupAtAdmin: end succeeded at %v, returned sg %v vers %v gv %v", sgName, sg, vers, gv)
+		return sg, vers, gv, err
+	}
+
+	vlog.VI(2).Infof("sync: joinSyncgroupAtAdmin: try neighborhood %v", sgName)
+
+	// TODO(hpucha): Restrict the set of errors when retry happens to
+	// network related errors or other retriable errors.
+
+	// Get this Syncbase's sync module handle.
+	ss := sd.sync.(*syncService)
+
+	// Try to join using an Admin on neighborhood in case this node does not
+	// have connectivity.
+	neighbors := ss.filterSyncgroupAdmins(sgName)
+	for _, svc := range neighbors {
+		for _, addr := range svc.Addrs {
+			ctx, cancel := context.WithTimeout(ctxIn, connectionTimeOut)
+			c := interfaces.SyncClient(naming.Join(addr, util.SyncbaseSuffix))
+			sg, vers, gv, err := c.JoinSyncgroupAtAdmin(ctx, sgName, name, myInfo)
+			cancel()
+
+			if err == nil {
+				vlog.VI(2).Infof("sync: joinSyncgroupAtAdmin: end succeeded at addr %v, returned sg %v vers %v gv %v", addr, sg, vers, gv)
+				return sg, vers, gv, err
+			}
+		}
+	}
+
+	vlog.VI(2).Infof("sync: joinSyncgroupAtAdmin: failed %v", sgName)
+	return interfaces.Syncgroup{}, "", interfaces.GenVector{}, verror.New(wire.ErrSyncgroupJoinFailed, ctx)
 }
 
 func authorize(ctx *context.T, call security.Call, sg *interfaces.Syncgroup) error {
@@ -1144,6 +1223,31 @@
 	return nil
 }
 
+func authorizeForTag(ctx *context.T, perms access.Permissions, tag access.Tag, blessingNames []string) error {
+	acl, exists := perms[string(tag)]
+	if exists && acl.Includes(blessingNames...) {
+		return nil
+	}
+	return verror.New(verror.ErrNoAccess, ctx)
+}
+
+// Check the acl against all known blessings.
+//
+// TODO(hpucha): Should this be restricted to default or should we use
+// ForPeer?
+func syncgroupAdmin(ctx *context.T, perms access.Permissions) bool {
+	var blessingNames []string
+	p := v23.GetPrincipal(ctx)
+	for _, blessings := range p.BlessingStore().PeerBlessings() {
+		blessingNames = append(blessingNames, security.BlessingNames(p, blessings)...)
+	}
+
+	if err := authorizeForTag(ctx, perms, access.Admin, blessingNames); err != nil {
+		return false
+	}
+	return true
+}
+
 ////////////////////////////////////////////////////////////
 // Methods for syncgroup create/join between Syncbases.
 
@@ -1206,8 +1310,12 @@
 	if err == nil {
 		s.initSyncStateInMem(ctx, sg.AppName, sg.DbName, sgOID(sg.Id))
 
-		// Publish at the chosen mount table and in the neighborhood.
-		s.publishInMountTables(ctx, call, sg.Spec)
+		// Advertise the Syncbase at the chosen mount table and in the
+		// neighborhood.
+		//
+		// TODO(hpucha): Implement failure handling. See note in
+		// CreateSyncgroup for more details.
+		err = s.advertiseSyncbase(ctx, call, &sg)
 	}
 
 	return s.name, err
@@ -1260,7 +1368,13 @@
 			return err
 		}
 
-		// Check SG ACL.
+		// Check SG ACL to see if this node is still a valid admin.
+		if !syncgroupAdmin(s.ctx, sg.Spec.Perms) {
+			return interfaces.NewErrNotAdmin(ctx)
+		}
+
+		// Check SG ACL. Caller must have Read access on the syncgroup
+		// ACL to join a syncgroup.
 		if err := authorize(ctx, call.Security(), sg); err != nil {
 			return err
 		}
diff --git a/services/syncbase/vsync/watcher.go b/services/syncbase/vsync/watcher.go
index 875e4a4..cf15573 100644
--- a/services/syncbase/vsync/watcher.go
+++ b/services/syncbase/vsync/watcher.go
@@ -56,10 +56,10 @@
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
-	for !s.Closed() {
+	for !s.isClosed() {
 		select {
 		case <-ticker.C:
-			if s.Closed() {
+			if s.isClosed() {
 				break
 			}
 			s.processStoreUpdates(ctx)
diff --git a/test/timekeeper/manual_time.go b/test/timekeeper/manual_time.go
index afc5b36..e11658a 100644
--- a/test/timekeeper/manual_time.go
+++ b/test/timekeeper/manual_time.go
@@ -120,3 +120,8 @@
 	heap.Init(&mt.schedule)
 	return mt
 }
+
+// Now implements TimeKeeper.Now.
+func (mt *manualTime) Now() time.Time {
+	return mt.current
+}