Merge branch 'master' into discovery
diff --git a/cmd/mounttable/impl_test.go b/cmd/mounttable/impl_test.go
index ddb997e..4540f3c 100644
--- a/cmd/mounttable/impl_test.go
+++ b/cmd/mounttable/impl_test.go
@@ -28,10 +28,6 @@
 
 var now = time.Now()
 
-func init() {
-	test.Init()
-}
-
 func deadline(minutes int) vdltime.Deadline {
 	return vdltime.Deadline{
 		Time: now.Add(time.Minute * time.Duration(minutes)),
diff --git a/cmd/servicerunner/servicerunner_test.go b/cmd/servicerunner/servicerunner_test.go
index d5a2a1c..c3d3a31 100644
--- a/cmd/servicerunner/servicerunner_test.go
+++ b/cmd/servicerunner/servicerunner_test.go
@@ -21,13 +21,11 @@
 	"testing"
 
 	"v.io/x/ref"
-	"v.io/x/ref/test"
 )
 
 // We provide our own TestMain, rather than allowing jiri test generate to
 // create one for us, to ensure all files require the "wspr" build tag.
 func TestMain(m *testing.M) {
-	test.Init()
 	os.Exit(m.Run())
 }
 
diff --git a/lib/flags/listen.go b/lib/flags/listen.go
index da8bb90..225113c 100644
--- a/lib/flags/listen.go
+++ b/lib/flags/listen.go
@@ -88,7 +88,7 @@
 		if addr := net.ParseIP(host); addr == nil {
 			// Could be a hostname.
 			addrs, err := net.LookupIP(host)
-			if err != nil {
+			if err != nil || len(addrs) == 0 {
 				return verror.New(errNeedIPOrHostName, nil, host, err)
 			}
 			for _, a := range addrs {
diff --git a/lib/flags/listen_test.go b/lib/flags/listen_test.go
index e5c23c8..d38d2da 100644
--- a/lib/flags/listen_test.go
+++ b/lib/flags/listen_test.go
@@ -84,12 +84,10 @@
 		t.Errorf("got %q, want %q", got, want)
 	}
 
-	for _, s := range []string{
-		":", ":59999999", "nohost.invalid", "nohost.invalid:"} {
+	for _, s := range []string{":", ":59999999", "nohost.invalid", "nohost.invalid:"} {
 		f := &flags.IPHostPortFlag{}
 		if err := f.Set(s); err == nil {
-			t.Errorf("expected an error for %q", s)
+			t.Errorf("expected an error for %q, %#v", s, f)
 		}
 	}
-
 }
diff --git a/lib/security/prepare_discharges_test.go b/lib/security/prepare_discharges_test.go
index 5aee79f..1c06d74 100644
--- a/lib/security/prepare_discharges_test.go
+++ b/lib/security/prepare_discharges_test.go
@@ -19,10 +19,6 @@
 	"v.io/x/ref/test/testutil"
 )
 
-func init() {
-	test.Init()
-}
-
 type expiryDischarger struct {
 	called bool
 }
diff --git a/lib/security/serialization/serialization_test.go b/lib/security/serialization/serialization_test.go
index 3ed62a7..4b7b58e 100644
--- a/lib/security/serialization/serialization_test.go
+++ b/lib/security/serialization/serialization_test.go
@@ -20,14 +20,12 @@
 
 	"v.io/v23/security"
 	"v.io/x/ref/lib/security/serialization"
-	"v.io/x/ref/test"
 	"v.io/x/ref/test/testutil"
 )
 
 // We call our own TestMain here because jiri test generate causes an import cycle
 // in this package.
 func TestMain(m *testing.M) {
-	test.Init()
 	os.Exit(m.Run())
 }
 
diff --git a/runtime/internal/flow/conn/conn_test.go b/runtime/internal/flow/conn/conn_test.go
index 7501fb1..d52a733 100644
--- a/runtime/internal/flow/conn/conn_test.go
+++ b/runtime/internal/flow/conn/conn_test.go
@@ -17,7 +17,6 @@
 	"v.io/v23/rpc/version"
 	_ "v.io/x/ref/runtime/factories/fake"
 	"v.io/x/ref/runtime/internal/flow/flowtest"
-	"v.io/x/ref/test"
 	"v.io/x/ref/test/goroutines"
 )
 
@@ -26,7 +25,6 @@
 var randData []byte
 
 func init() {
-	test.Init()
 	randData = make([]byte, 2*DefaultBytesBufferedPerFlow)
 	if _, err := rand.Read(randData); err != nil {
 		panic("Could not read random data.")
diff --git a/runtime/internal/flow/conn/flow.go b/runtime/internal/flow/conn/flow.go
index 1cad267..4681bde 100644
--- a/runtime/internal/flow/conn/flow.go
+++ b/runtime/internal/flow/conn/flow.go
@@ -254,7 +254,6 @@
 				Flags:           d.Flags,
 				Payload:         d.Payload,
 			})
-			f.conn.unopenedFlows.Done()
 		}
 		sent += size
 
@@ -262,6 +261,12 @@
 		// opened.  Note that since we've definitely sent a message now opened is surely
 		// true.
 		f.conn.mu.Lock()
+		// We need to ensure that we only call Done() exactly once, so we need to
+		// recheck f.opened, to ensure that f.close didn't decrement the wait group
+		// while we were not holding the lock.
+		if !f.opened {
+			f.conn.unopenedFlows.Done()
+		}
 		f.opened = true
 	}
 	f.writing = false
@@ -435,6 +440,9 @@
 		if !f.opened {
 			// Closing a flow that was never opened.
 			f.conn.unopenedFlows.Done()
+			// We mark the flow as opened to prevent mulitple calls to
+			// f.conn.unopenedFlows.Done().
+			f.opened = true
 		} else if !closedRemotely && !connClosing {
 			// Note: If the conn is closing there is no point in trying to
 			// send the flow close message as it will fail.  This is racy
diff --git a/runtime/internal/flow/manager/manager_test.go b/runtime/internal/flow/manager/manager_test.go
index 60f9a2a..d633a4c 100644
--- a/runtime/internal/flow/manager/manager_test.go
+++ b/runtime/internal/flow/manager/manager_test.go
@@ -17,14 +17,9 @@
 	_ "v.io/x/ref/runtime/factories/fake"
 	"v.io/x/ref/runtime/internal/flow/conn"
 	"v.io/x/ref/runtime/internal/flow/flowtest"
-	"v.io/x/ref/test"
 	"v.io/x/ref/test/goroutines"
 )
 
-func init() {
-	test.Init()
-}
-
 const leakWaitTime = 250 * time.Millisecond
 
 func TestDirectConnection(t *testing.T) {
diff --git a/runtime/internal/naming/namespace/perms_test.go b/runtime/internal/naming/namespace/perms_test.go
index 5e169d6..9e0d857 100644
--- a/runtime/internal/naming/namespace/perms_test.go
+++ b/runtime/internal/naming/namespace/perms_test.go
@@ -21,10 +21,6 @@
 	"v.io/x/ref/test/testutil"
 )
 
-func init() {
-	test.Init()
-}
-
 func initTest() (rootCtx *context.T, aliceCtx *context.T, bobCtx *context.T, shutdown v23.Shutdown) {
 	ctx, shutdown := test.V23Init()
 	var err error
diff --git a/runtime/internal/rpc/benchmark/benchmark_test.go b/runtime/internal/rpc/benchmark/benchmark_test.go
index aefc28c..9e938d0 100644
--- a/runtime/internal/rpc/benchmark/benchmark_test.go
+++ b/runtime/internal/rpc/benchmark/benchmark_test.go
@@ -106,7 +106,6 @@
 func TestNoOp(t *testing.T) {}
 
 func TestMain(m *testing.M) {
-	test.Init()
 	// We do not use defer here since this program will exit at the end of
 	// this function through os.Exit().
 	var shutdown v23.Shutdown
diff --git a/runtime/internal/rpc/benchmark/simple/main.go b/runtime/internal/rpc/benchmark/simple/main.go
index 1b450ac..ff29b6a 100644
--- a/runtime/internal/rpc/benchmark/simple/main.go
+++ b/runtime/internal/rpc/benchmark/simple/main.go
@@ -129,7 +129,6 @@
 func main() {
 	// Set the default benchmark time.
 	flag.Set("test.benchtime", defaultBenchTime.String())
-	test.Init()
 
 	var shutdown v23.Shutdown
 	ctx, shutdown = test.V23Init()
diff --git a/runtime/internal/rpc/full_test.go b/runtime/internal/rpc/full_test.go
index bd1c492..e6f0cad 100644
--- a/runtime/internal/rpc/full_test.go
+++ b/runtime/internal/rpc/full_test.go
@@ -510,6 +510,36 @@
 	}
 }
 
+func TestAddNameLater(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x66666666))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(ctx, sm, ns, nil, options.SecurityNone)
+	if err != nil {
+		t.Fatalf("InternalNewServer failed: %v", err)
+	}
+	if _, err = server.Listen(listenSpec); err != nil {
+		t.Fatalf("server.Listen failed: %v", err)
+	}
+	disp := &testServerDisp{&testServer{}}
+	if err := server.ServeDispatcher("", disp); err != nil {
+		t.Fatalf("server.Serve failed: %v", err)
+	}
+	if err := server.AddName("mp/server"); err != nil {
+		t.Fatalf("server.AddName failed: %v", err)
+	}
+	client := DeprecatedNewClient(sm, ns)
+	var got string
+	if err := client.Call(ctx, "mp/server", "Unauthorized", nil, []interface{}{&got}, options.SecurityNone); err != nil {
+		t.Fatalf("client.Call failed: %v", err)
+	}
+	if want := "UnauthorizedResult"; got != want {
+		t.Errorf("got (%v), want (%v)", got, want)
+	}
+}
+
 func TestNoPrincipal(t *testing.T) {
 	ctx, shutdown := initForTest()
 	defer shutdown()
diff --git a/runtime/internal/rpc/server.go b/runtime/internal/rpc/server.go
index 7cb149d..e770aa5 100644
--- a/runtime/internal/rpc/server.go
+++ b/runtime/internal/rpc/server.go
@@ -909,12 +909,12 @@
 	}
 	vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
 	s.disp = disp
-	if len(name) > 0 {
-		for ls, _ := range s.listenState {
-			for _, iep := range ls.ieps {
-				s.publisher.AddServer(iep.String())
-			}
+	for ls, _ := range s.listenState {
+		for _, iep := range ls.ieps {
+			s.publisher.AddServer(iep.String())
 		}
+	}
+	if len(name) > 0 {
 		s.publisher.AddName(name, s.servesMountTable, s.isLeaf)
 	}
 	return nil
diff --git a/runtime/internal/rpc/stream/manager/manager_test.go b/runtime/internal/rpc/stream/manager/manager_test.go
index e495136..dc68a1b 100644
--- a/runtime/internal/rpc/stream/manager/manager_test.go
+++ b/runtime/internal/rpc/stream/manager/manager_test.go
@@ -41,10 +41,7 @@
 // We write our own TestMain here instead of relying on jiri test generate because
 // we need to set runtime.GOMAXPROCS.
 func TestMain(m *testing.M) {
-	test.Init()
-
-	// testutil.Init sets GOMAXPROCS to NumCPU.  We want to force
-	// GOMAXPFDROCS to remain at 1, in order to trigger a particular race
+	// For GOMAXPROCS to remain at 1 in order to trigger a particular race
 	// condition that occurs when closing the server; also, using 1 cpu
 	// introduces less variance in the behavior of the test.
 	runtime.GOMAXPROCS(1)
diff --git a/runtime/internal/rpc/test/client_test.go b/runtime/internal/rpc/test/client_test.go
index a42aca4..af0cd1b 100644
--- a/runtime/internal/rpc/test/client_test.go
+++ b/runtime/internal/rpc/test/client_test.go
@@ -539,7 +539,11 @@
 
 	// Make sure we failed because we really did close the connection
 	// with our filter
-	<-ch
+	select {
+	case <-ch:
+	case <-time.After(10 * time.Second):
+		t.Errorf("timeout waiting for chan")
+	}
 }
 
 func TestStartCallSecurity(t *testing.T) {
diff --git a/runtime/internal/rpc/xclient.go b/runtime/internal/rpc/xclient.go
index 875e6fe..8dd7143 100644
--- a/runtime/internal/rpc/xclient.go
+++ b/runtime/internal/rpc/xclient.go
@@ -55,9 +55,10 @@
 	// typeCache maintains a cache of type encoders and decoders.
 	typeCache *typeCache
 
-	wg     sync.WaitGroup
-	mu     sync.Mutex
-	closed bool
+	wg      sync.WaitGroup
+	mu      sync.Mutex
+	closing bool
+	closed  chan struct{}
 }
 
 var _ rpc.Client = (*xclient)(nil)
@@ -69,6 +70,7 @@
 		ns:        ns,
 		typeCache: newTypeCache(),
 		stop:      cancel,
+		closed:    make(chan struct{}),
 	}
 
 	for _, opt := range opts {
@@ -86,12 +88,12 @@
 	go func() {
 		<-ctx.Done()
 		c.mu.Lock()
-		// TODO(mattr): Do we really need c.closed?
-		c.closed = true
+		c.closing = true
 		c.mu.Unlock()
 
 		<-c.flowMgr.Closed()
 		c.wg.Wait()
+		close(c.closed)
 	}()
 
 	return c
@@ -334,7 +336,7 @@
 	authorizer := newServerAuthorizer(blessingPattern, opts...)
 	for i, server := range resolved.Names() {
 		c.mu.Lock()
-		if c.closed {
+		if c.closing {
 			c.mu.Unlock()
 			return nil, verror.NoRetry, false, verror.New(errClientCloseAlreadyCalled, ctx)
 		}
@@ -504,7 +506,7 @@
 }
 
 func (c *xclient) Closed() <-chan struct{} {
-	return c.flowMgr.Closed()
+	return c.closed
 }
 
 // flowXClient implements the RPC client-side protocol for a single RPC, over a
diff --git a/runtime/internal/vtrace/vtrace_test.go b/runtime/internal/vtrace/vtrace_test.go
index b20bf24..a8efe25 100644
--- a/runtime/internal/vtrace/vtrace_test.go
+++ b/runtime/internal/vtrace/vtrace_test.go
@@ -26,10 +26,6 @@
 	"v.io/x/ref/test/testutil"
 )
 
-func init() {
-	test.Init()
-}
-
 // initForTest initializes the vtrace runtime and starts a mounttable.
 func initForTest(t *testing.T) (*context.T, v23.Shutdown, *testutil.IDProvider) {
 	ctx, shutdown := test.V23Init()
diff --git a/services/agent/pod_agentd/main.go b/services/agent/pod_agentd/main.go
index a83d872..19f668a 100644
--- a/services/agent/pod_agentd/main.go
+++ b/services/agent/pod_agentd/main.go
@@ -10,6 +10,7 @@
 import (
 	"encoding/base64"
 	"io/ioutil"
+	"os"
 	"strings"
 
 	"v.io/v23"
@@ -93,6 +94,9 @@
 	if err = server.ServeAgent(i, lsecurity.NewImmutablePrincipal(p)); err != nil {
 		return err
 	}
+	if _, err := os.Stat(socketPath); err == nil {
+		os.Remove(socketPath)
+	}
 	if err = i.Listen(socketPath); err != nil {
 		return err
 	}
diff --git a/services/application/applicationd/impl_test.go b/services/application/applicationd/impl_test.go
index 770c20f..44a9ab7 100644
--- a/services/application/applicationd/impl_test.go
+++ b/services/application/applicationd/impl_test.go
@@ -39,29 +39,29 @@
 
 func checkEnvelope(t *testing.T, ctx *context.T, expected application.Envelope, stub repository.ApplicationClientStub, profiles ...string) {
 	if output, err := stub.Match(ctx, profiles); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Match() failed: %v", err))
+		t.Fatal(testutil.FormatLogLine(2, "Match() failed: %v", err))
 	} else if !reflect.DeepEqual(expected, output) {
-		t.Fatalf(testutil.FormatLogLine(2, "Incorrect Match output: expected %#v, got %#v", expected, output))
+		t.Fatal(testutil.FormatLogLine(2, "Incorrect Match output: expected %#v, got %#v", expected, output))
 	}
 }
 
 func checkNoEnvelope(t *testing.T, ctx *context.T, stub repository.ApplicationClientStub, profiles ...string) {
 	if _, err := stub.Match(ctx, profiles); err == nil || verror.ErrorID(err) != verror.ErrNoExist.ID {
-		t.Fatalf(testutil.FormatLogLine(2, "Unexpected error: expected %v, got %v", verror.ErrNoExist, err))
+		t.Fatal(testutil.FormatLogLine(2, "Unexpected error: expected %v, got %v", verror.ErrNoExist, err))
 	}
 }
 
 func checkProfiles(t *testing.T, ctx *context.T, stub repository.ApplicationClientStub, expected ...string) {
 	if output, err := stub.Profiles(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Profiles() failed: %v", err))
+		t.Fatal(testutil.FormatLogLine(2, "Profiles() failed: %v", err))
 	} else if !reflect.DeepEqual(expected, output) {
-		t.Fatalf(testutil.FormatLogLine(2, "Incorrect Profiles output: expected %v, got %v", expected, output))
+		t.Fatal(testutil.FormatLogLine(2, "Incorrect Profiles output: expected %v, got %v", expected, output))
 	}
 }
 
 func checkNoProfile(t *testing.T, ctx *context.T, stub repository.ApplicationClientStub) {
 	if _, err := stub.Profiles(ctx); err == nil || verror.ErrorID(err) != verror.ErrNoExist.ID {
-		t.Fatalf(testutil.FormatLogLine(2, "Unexpected error: expected %v, got %v", verror.ErrNoExist, err))
+		t.Fatal(testutil.FormatLogLine(2, "Unexpected error: expected %v, got %v", verror.ErrNoExist, err))
 	}
 }
 
diff --git a/services/build/buildd/service.go b/services/build/buildd/service.go
index 44ca144..431b0bb 100644
--- a/services/build/buildd/service.go
+++ b/services/build/buildd/service.go
@@ -93,6 +93,9 @@
 	if i.goroot != "" {
 		cmd.Env = append(cmd.Env, "GOROOT="+i.goroot)
 	}
+	if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
+		cmd.Env = append(cmd.Env, "TMPDIR="+tmpdir)
+	}
 	var output bytes.Buffer
 	cmd.Stdout = &output
 	cmd.Stderr = &output
diff --git a/services/cluster/vkube/doc.go b/services/cluster/vkube/doc.go
index 24bbfaf..570125a 100644
--- a/services/cluster/vkube/doc.go
+++ b/services/cluster/vkube/doc.go
@@ -12,7 +12,6 @@
    vkube [flags] <command>
 
 The vkube commands are:
-   get-credentials     Gets the kubernetes credentials from Google Cloud.
    start               Starts an application.
    update              Updates an application.
    stop                Stops an application.
@@ -20,6 +19,7 @@
    stop-cluster-agent  Stops the cluster agent.
    claim-cluster-agent Claims the cluster agent.
    build-docker-images Builds the docker images for the cluster and pod agents.
+   ctl                 Runs kubectl on the cluster defined in vkube.cfg.
    help                Display help for commands or topics
 
 The vkube flags are:
@@ -76,13 +76,6 @@
  -vpath=
    comma-separated list of pattern=N settings for file pathname-filtered logging
 
-Vkube get-credentials
-
-Gets the kubernetes credentials from Google Cloud.
-
-Usage:
-   vkube get-credentials
-
 Vkube start
 
 Starts an application.
@@ -95,6 +88,8 @@
 The vkube start flags are:
  -f=
    Filename to use to create the kubernetes resource.
+ -wait=false
+   Wait for at least one replica to be ready.
 
 Vkube update
 
@@ -107,6 +102,8 @@
 The vkube update flags are:
  -f=
    Filename to use to update the kubernetes resource.
+ -wait=false
+   Wait for at least one replica to be ready after the update.
 
 Vkube stop
 
@@ -124,7 +121,11 @@
 Starts the cluster agent.
 
 Usage:
-   vkube start-cluster-agent
+   vkube start-cluster-agent [flags]
+
+The vkube start-cluster-agent flags are:
+ -wait=false
+   Wait for the cluster agent to be ready.
 
 Vkube stop-cluster-agent
 
@@ -148,9 +149,20 @@
    vkube build-docker-images [flags]
 
 The vkube build-docker-images flags are:
+ -tag=
+   The tag to add to the docker images. If empty, the current timestamp is used.
  -v=false
    When true, the output is more verbose.
 
+Vkube ctl
+
+Runs kubectl on the cluster defined in vkube.cfg.
+
+Usage:
+   vkube ctl -- <kubectl args>
+
+<kubectl args> are passed directly to the kubectl command.
+
 Vkube help - Display help for commands or topics
 
 Help with no args displays the usage of the parent command.
diff --git a/services/cluster/vkube/docker.go b/services/cluster/vkube/docker.go
index 4eb223b..ba840f4 100644
--- a/services/cluster/vkube/docker.go
+++ b/services/cluster/vkube/docker.go
@@ -17,7 +17,7 @@
 
 const (
 	clusterAgentDockerfile = `
-FROM debian:stable
+FROM google/debian:wheezy
 
 # gcloud
 RUN apt-get update && apt-get install -y -qq --no-install-recommends wget unzip python php5-mysql php5-cli php5-cgi openjdk-7-jre-headless openssh-client python-openssl && apt-get clean
@@ -33,8 +33,6 @@
 #RUN apt-get install --no-install-recommends -y -q libssl1.0.0
 ADD claimable cluster_agent cluster_agentd init.sh /usr/local/bin/
 RUN chmod 755 /usr/local/bin/*
-
-EXPOSE 8193
 CMD ["/usr/local/bin/init.sh"]
 `
 	clusterAgentInitSh = `#!/bin/sh
@@ -62,8 +60,8 @@
 `
 
 	podAgentDockerfile = `
-FROM debian:stable
-RUN apt-get update && apt-get install --no-install-recommends -y -q libssl1.0.0
+FROM google/debian:wheezy
+RUN apt-get update && apt-get install --no-install-recommends -y -q libssl1.0.0 && apt-get clean
 ADD pod_agentd /usr/local/bin/
 RUN chmod 755 /usr/local/bin/pod_agentd
 `
@@ -79,11 +77,13 @@
 	args []string
 }
 
-func buildDockerImages(config *vkubeConfig, verbose bool, stdout io.Writer) error {
-	ts := time.Now().Format("20060102150405")
+func buildDockerImages(config *vkubeConfig, tag string, verbose bool, stdout io.Writer) error {
+	if tag == "" {
+		tag = time.Now().Format("20060102150405")
+	}
 	// Cluster agent image.
 	imageName := removeTag(config.ClusterAgent.Image)
-	imageNameTag := fmt.Sprintf("%s:%s", imageName, ts)
+	imageNameTag := fmt.Sprintf("%s:%s", imageName, tag)
 
 	var out io.Writer
 	if verbose {
@@ -107,7 +107,7 @@
 
 	// Pod agent image.
 	imageName = removeTag(config.PodAgent.Image)
-	imageNameTag = fmt.Sprintf("%s:%s", imageName, ts)
+	imageNameTag = fmt.Sprintf("%s:%s", imageName, tag)
 
 	if err := buildDockerImage([]dockerFile{
 		{"Dockerfile", []byte(podAgentDockerfile)},
diff --git a/services/cluster/vkube/main.go b/services/cluster/vkube/main.go
index 4360be1..b00fd81 100644
--- a/services/cluster/vkube/main.go
+++ b/services/cluster/vkube/main.go
@@ -9,7 +9,11 @@
 
 import (
 	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
 	"strings"
+	"time"
 
 	"v.io/v23"
 	"v.io/v23/context"
@@ -26,6 +30,8 @@
 	flagGcloudBin    string
 	flagResourceFile string
 	flagVerbose      bool
+	flagTag          string
+	flagWait         bool
 )
 
 func main() {
@@ -36,7 +42,6 @@
 		Short: "Manages Vanadium applications on kubernetes",
 		Long:  "Manages Vanadium applications on kubernetes",
 		Children: []*cmdline.Command{
-			cmdGetCredentials,
 			cmdStart,
 			cmdUpdate,
 			cmdStop,
@@ -44,6 +49,7 @@
 			cmdStopClusterAgent,
 			cmdClaimClusterAgent,
 			cmdBuildDockerImages,
+			cmdCtl,
 		},
 	}
 	cmd.Flags.StringVar(&flagConfigFile, "config", "vkube.cfg", "The 'vkube.cfg' file to use.")
@@ -51,42 +57,44 @@
 	cmd.Flags.StringVar(&flagGcloudBin, "gcloud", "gcloud", "The 'gcloud' binary to use.")
 
 	cmdStart.Flags.StringVar(&flagResourceFile, "f", "", "Filename to use to create the kubernetes resource.")
+	cmdStart.Flags.BoolVar(&flagWait, "wait", false, "Wait for at least one replica to be ready.")
 
 	cmdUpdate.Flags.StringVar(&flagResourceFile, "f", "", "Filename to use to update the kubernetes resource.")
+	cmdUpdate.Flags.BoolVar(&flagWait, "wait", false, "Wait for at least one replica to be ready after the update.")
 
 	cmdStop.Flags.StringVar(&flagResourceFile, "f", "", "Filename to use to stop the kubernetes resource.")
 
+	cmdStartClusterAgent.Flags.BoolVar(&flagWait, "wait", false, "Wait for the cluster agent to be ready.")
+
 	cmdBuildDockerImages.Flags.BoolVar(&flagVerbose, "v", false, "When true, the output is more verbose.")
+	cmdBuildDockerImages.Flags.StringVar(&flagTag, "tag", "", "The tag to add to the docker images. If empty, the current timestamp is used.")
 
 	cmdline.Main(cmd)
 }
 
-var cmdGetCredentials = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdGetCredentials),
-	Name:   "get-credentials",
-	Short:  "Gets the kubernetes credentials from Google Cloud.",
-	Long:   "Gets the kubernetes credentials from Google Cloud.",
-}
+func kubeCmdRunner(kcmd func(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error) cmdline.Runner {
+	return v23cmd.RunnerFunc(func(ctx *context.T, env *cmdline.Env, args []string) error {
+		config, err := readConfig(flagConfigFile)
+		if err != nil {
+			return err
+		}
+		f, err := ioutil.TempFile("", "kubeconfig-")
+		if err != nil {
+			return err
+		}
+		os.Setenv("KUBECONFIG", f.Name())
+		defer os.Remove(f.Name())
+		f.Close()
 
-func runCmdGetCredentials(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
-	if config.Cluster == "" {
-		return fmt.Errorf("Cluster must be set.")
-	}
-	if config.Project == "" {
-		return fmt.Errorf("Project must be set.")
-	}
-	if config.Zone == "" {
-		return fmt.Errorf("Zone must be set.")
-	}
-	return getCredentials(config.Cluster, config.Project, config.Zone)
+		if out, err := exec.Command(flagGcloudBin, "container", "clusters", "get-credentials", config.Cluster, "--project", config.Project, "--zone", config.Zone).CombinedOutput(); err != nil {
+			return fmt.Errorf("failed to get credentials for %q: %v: %s", config.Cluster, err, out)
+		}
+		return kcmd(ctx, env, args, config)
+	})
 }
 
 var cmdStart = &cmdline.Command{
-	Runner:   v23cmd.RunnerFunc(runCmdStart),
+	Runner:   kubeCmdRunner(runCmdStart),
 	Name:     "start",
 	Short:    "Starts an application.",
 	Long:     "Starts an application.",
@@ -94,16 +102,11 @@
 	ArgsLong: "<extension> The blessing name extension to give to the application.",
 }
 
-func runCmdStart(ctx *context.T, env *cmdline.Env, args []string) error {
+func runCmdStart(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
 	if expected, got := 1, len(args); expected != got {
 		return env.UsageErrorf("start: incorrect number of arguments, expected %d, got %d", expected, got)
 	}
 	extension := args[0]
-
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
 	if flagResourceFile == "" {
 		return fmt.Errorf("-f must be specified.")
 	}
@@ -126,13 +129,15 @@
 	}
 	namespace := rc.getString("metadata.namespace")
 	appName := rc.getString("spec.template.metadata.labels.application")
-	if n, err := findReplicationControllerNameForApp(appName, namespace); err == nil {
-		return fmt.Errorf("replication controller for application=%q already running: %s", appName, n)
+	if n, err := findReplicationControllerNamesForApp(appName, namespace); err != nil {
+		return err
+	} else if len(n) != 0 {
+		return fmt.Errorf("replication controller for application=%q already running: %q", appName, n)
 	}
 	if err := createSecret(ctx, secretName, namespace, agentAddr, extension); err != nil {
 		return err
 	}
-	fmt.Fprintln(env.Stdout, "Created Secret successfully.")
+	fmt.Fprintln(env.Stdout, "Created secret successfully.")
 
 	if err := createReplicationController(ctx, config, rc, secretName); err != nil {
 		if err := deleteSecret(ctx, config, secretName, namespace); err != nil {
@@ -141,21 +146,23 @@
 		return err
 	}
 	fmt.Fprintln(env.Stdout, "Created replication controller successfully.")
+	if flagWait {
+		if err := waitForReadyPods(appName, namespace); err != nil {
+			return err
+		}
+		fmt.Fprintln(env.Stdout, "Application is running.")
+	}
 	return nil
 }
 
 var cmdUpdate = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdUpdate),
+	Runner: kubeCmdRunner(runCmdUpdate),
 	Name:   "update",
 	Short:  "Updates an application.",
 	Long:   "Updates an application to a new version with a rolling update, preserving the existing blessings.",
 }
 
-func runCmdUpdate(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
+func runCmdUpdate(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
 	if flagResourceFile == "" {
 		return fmt.Errorf("-f must be specified.")
 	}
@@ -167,21 +174,25 @@
 		return err
 	}
 	fmt.Fprintln(env.Stdout, "Updated replication controller successfully.")
+	if flagWait {
+		namespace := rc.getString("metadata.namespace")
+		appName := rc.getString("spec.template.metadata.labels.application")
+		if err := waitForReadyPods(appName, namespace); err != nil {
+			return err
+		}
+		fmt.Fprintln(env.Stdout, "Application is running.")
+	}
 	return nil
 }
 
 var cmdStop = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdStop),
+	Runner: kubeCmdRunner(runCmdStop),
 	Name:   "stop",
 	Short:  "Stops an application.",
 	Long:   "Stops an application.",
 }
 
-func runCmdStop(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
+func runCmdStop(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
 	if flagResourceFile == "" {
 		return fmt.Errorf("-f must be specified.")
 	}
@@ -201,64 +212,64 @@
 	if out, err := kubectl("--namespace="+namespace, "stop", "rc", name); err != nil {
 		return fmt.Errorf("failed to stop replication controller: %v: %s", err, out)
 	}
-	fmt.Fprintf(env.Stdout, "Stopping replication controller.\n")
+	fmt.Fprintln(env.Stdout, "Stopping replication controller.")
 	if err := deleteSecret(ctx, config, secretName, namespace); err != nil {
-		return fmt.Errorf("failed to delete Secret: %v", err)
+		return fmt.Errorf("failed to delete secret: %v", err)
 	}
-	fmt.Fprintf(env.Stdout, "Deleting Secret.\n")
+	fmt.Fprintln(env.Stdout, "Deleting secret.")
 	return nil
 }
 
 var cmdStartClusterAgent = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdStartClusterAgent),
+	Runner: kubeCmdRunner(runCmdStartClusterAgent),
 	Name:   "start-cluster-agent",
 	Short:  "Starts the cluster agent.",
 	Long:   "Starts the cluster agent.",
 }
 
-func runCmdStartClusterAgent(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
+func runCmdStartClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
 	if err := createClusterAgent(ctx, config); err != nil {
 		return err
 	}
-	fmt.Fprintf(env.Stdout, "Starting Cluster Agent.\n")
+	fmt.Fprintln(env.Stdout, "Starting cluster agent.")
+	if flagWait {
+		if err := waitForReadyPods(clusterAgentApplicationName, config.ClusterAgent.Namespace); err != nil {
+			return err
+		}
+		for {
+			if _, err := findClusterAgent(config, true); err == nil {
+				break
+			}
+			time.Sleep(time.Second)
+		}
+		fmt.Fprintf(env.Stdout, "Cluster agent is ready.\n")
+	}
 	return nil
 }
 
 var cmdStopClusterAgent = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdStopClusterAgent),
+	Runner: kubeCmdRunner(runCmdStopClusterAgent),
 	Name:   "stop-cluster-agent",
 	Short:  "Stops the cluster agent.",
 	Long:   "Stops the cluster agent.",
 }
 
-func runCmdStopClusterAgent(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
+func runCmdStopClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
 	if err := stopClusterAgent(config); err != nil {
 		return err
 	}
-	fmt.Fprintf(env.Stdout, "Stopping Cluster Agent.\n")
+	fmt.Fprintln(env.Stdout, "Stopping cluster agent.")
 	return nil
 }
 
 var cmdClaimClusterAgent = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdClaimClusterAgent),
+	Runner: kubeCmdRunner(runCmdClaimClusterAgent),
 	Name:   "claim-cluster-agent",
 	Short:  "Claims the cluster agent.",
 	Long:   "Claims the cluster agent.",
 }
 
-func runCmdClaimClusterAgent(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
+func runCmdClaimClusterAgent(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
 	myBlessings := v23.GetPrincipal(ctx).BlessingStore().Default()
 	claimer := clusterAgentClaimer(config)
 	if !myBlessings.CouldHaveNames([]string{claimer}) {
@@ -271,21 +282,34 @@
 		}
 		return err
 	}
-	fmt.Fprintf(env.Stdout, "Claimed Cluster Agent successfully.\n")
+	fmt.Fprintln(env.Stdout, "Claimed cluster agent successfully.")
 	return nil
 }
 
 var cmdBuildDockerImages = &cmdline.Command{
-	Runner: v23cmd.RunnerFunc(runCmdBuildDockerImages),
+	Runner: kubeCmdRunner(runCmdBuildDockerImages),
 	Name:   "build-docker-images",
 	Short:  "Builds the docker images for the cluster and pod agents.",
 	Long:   "Builds the docker images for the cluster and pod agents.",
 }
 
-func runCmdBuildDockerImages(ctx *context.T, env *cmdline.Env, args []string) error {
-	config, err := readConfig(flagConfigFile)
-	if err != nil {
-		return err
-	}
-	return buildDockerImages(config, flagVerbose, env.Stdout)
+func runCmdBuildDockerImages(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
+	return buildDockerImages(config, flagTag, flagVerbose, env.Stdout)
+}
+
+var cmdCtl = &cmdline.Command{
+	Runner:   kubeCmdRunner(runCmdCtl),
+	Name:     "ctl",
+	Short:    "Runs kubectl on the cluster defined in vkube.cfg.",
+	Long:     "Runs kubectl on the cluster defined in vkube.cfg.",
+	ArgsName: "-- <kubectl args>",
+	ArgsLong: "<kubectl args> are passed directly to the kubectl command.",
+}
+
+func runCmdCtl(ctx *context.T, env *cmdline.Env, args []string, config *vkubeConfig) error {
+	cmd := exec.Command(flagKubectlBin, args...)
+	cmd.Stdin = env.Stdin
+	cmd.Stdout = env.Stdout
+	cmd.Stderr = env.Stderr
+	return cmd.Run()
 }
diff --git a/services/cluster/vkube/util.go b/services/cluster/vkube/util.go
index cd69360..650bebd 100644
--- a/services/cluster/vkube/util.go
+++ b/services/cluster/vkube/util.go
@@ -13,6 +13,7 @@
 	"io/ioutil"
 	"os/exec"
 	"strings"
+	"time"
 
 	"v.io/v23"
 	"v.io/v23/context"
@@ -22,18 +23,6 @@
 	"v.io/x/ref/services/cluster"
 )
 
-// getCredentials uses the gcloud command to get the credentials required to
-// access the kubernetes cluster.
-func getCredentials(cluster, project, zone string) error {
-	if out, err := exec.Command(flagGcloudBin, "config", "set", "container/cluster", cluster).CombinedOutput(); err != nil {
-		return fmt.Errorf("failed to set container/cluster: %v: %s", err, out)
-	}
-	if out, err := exec.Command(flagGcloudBin, "container", "clusters", "get-credentials", cluster, "--project", project, "--zone", zone).CombinedOutput(); err != nil {
-		return fmt.Errorf("failed to set get credentials for %q: %v: %s", cluster, err, out)
-	}
-	return nil
-}
-
 // localAgentAddress returns the address of the cluster agent to use from within
 // the cluster.
 func localAgentAddress(config *vkubeConfig) string {
@@ -209,11 +198,15 @@
 // updateReplicationController takes a ReplicationController object, adds a
 // pod-agent, and then performs a rolling update.
 func updateReplicationController(ctx *context.T, config *vkubeConfig, rc object) error {
-	oldName, err := findReplicationControllerNameForApp(rc.getString("spec.template.metadata.labels.application"), rc.getString("metadata.namespace"))
+	namespace := rc.getString("metadata.namespace")
+	oldNames, err := findReplicationControllerNamesForApp(rc.getString("spec.template.metadata.labels.application"), namespace)
 	if err != nil {
 		return err
 	}
-	secretName, err := findSecretName(oldName, rc.getString("metadata.namespace"))
+	if len(oldNames) != 1 {
+		return fmt.Errorf("found %d replication controllers for this application: %q", len(oldNames), oldNames)
+	}
+	secretName, err := findSecretName(oldNames[0], namespace)
 	if err != nil {
 		return err
 	}
@@ -224,10 +217,10 @@
 	if err != nil {
 		return err
 	}
-	cmd := exec.Command(flagKubectlBin, "rolling-update", oldName, "-f", "-")
+	cmd := exec.Command(flagKubectlBin, "rolling-update", oldNames[0], "-f", "-", "--namespace="+namespace)
 	cmd.Stdin = bytes.NewBuffer(json)
 	if out, err := cmd.CombinedOutput(); err != nil {
-		return fmt.Errorf("failed to update replication controller %q: %v\n%s\n", oldName, err, string(out))
+		return fmt.Errorf("failed to update replication controller %q: %v\n%s\n", oldNames[0], err, string(out))
 	}
 	return nil
 }
@@ -258,26 +251,22 @@
 	return fmt.Sprintf("secret-%s", hex.EncodeToString(b)), nil
 }
 
-// findReplicationControllerNameForApp returns the name of the
-// ReplicationController that is currently used to run the given application.
-func findReplicationControllerNameForApp(app, namespace string) (string, error) {
+// findReplicationControllerNamesForApp returns the names of the
+// ReplicationController that are currently used to run the given application.
+func findReplicationControllerNamesForApp(app, namespace string) ([]string, error) {
 	data, err := kubectl("--namespace="+namespace, "get", "rc", "-l", "application="+app, "-o", "json")
 	if err != nil {
-		return "", fmt.Errorf("failed to get replication controller for application %q: %v\n%s\n", app, err, string(data))
+		return nil, fmt.Errorf("failed to get replication controller for application %q: %v\n%s\n", app, err, string(data))
 	}
 	var list object
 	if err := list.importJSON(data); err != nil {
-		return "", fmt.Errorf("failed to parse kubectl output: %v", err)
+		return nil, fmt.Errorf("failed to parse kubectl output: %v", err)
 	}
-	items := list.getObjectArray("items")
-	if c := len(items); c != 1 {
-		return "", fmt.Errorf("found %d replication controllers for application %q", c, app)
+	names := []string{}
+	for _, item := range list.getObjectArray("items") {
+		names = append(names, item.getString("metadata.name"))
 	}
-	name := items[0].getString("metadata.name")
-	if name == "" {
-		return "", fmt.Errorf("missing metadata.name")
-	}
-	return name, nil
+	return names, nil
 }
 
 // findSecretName finds the name of the Secret Object associated the given
@@ -299,6 +288,46 @@
 	return "", fmt.Errorf("failed to find secretName in replication controller %q", rcName)
 }
 
+func readyPods(appName, namespace string) ([]string, error) {
+	data, err := kubectl("--namespace="+namespace, "get", "pod", "-l", "application="+appName, "-o", "json")
+	if err != nil {
+		return nil, err
+	}
+	var list object
+	if err := list.importJSON(data); err != nil {
+		return nil, fmt.Errorf("failed to parse kubectl output: %v", err)
+	}
+	names := []string{}
+	for _, item := range list.getObjectArray("items") {
+		if item.get("status.phase") != "Running" {
+			continue
+		}
+		for _, cond := range item.getObjectArray("status.conditions") {
+			if cond.get("type") == "Ready" && cond.get("status") == "True" {
+				names = append(names, item.getString("metadata.name"))
+				break
+			}
+		}
+		for _, status := range item.getObjectArray("status.containerStatuses") {
+			if status.get("ready") == false && status.getInt("restartCount") >= 5 {
+				return nil, fmt.Errorf("application is failing: %#v", item)
+			}
+		}
+	}
+	return names, nil
+}
+
+func waitForReadyPods(appName, namespace string) error {
+	for {
+		if n, err := readyPods(appName, namespace); err != nil {
+			return err
+		} else if len(n) > 0 {
+			return nil
+		}
+		time.Sleep(time.Second)
+	}
+}
+
 // kubectlCreate runs 'kubectl create -f' on the given object and returns the
 // output.
 func kubectlCreate(o object) ([]byte, error) {
diff --git a/services/cluster/vkube/v23_test.go b/services/cluster/vkube/v23_test.go
new file mode 100644
index 0000000..4694e29
--- /dev/null
+++ b/services/cluster/vkube/v23_test.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+
+package main_test
+
+import (
+	"os"
+	"testing"
+
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/modules"
+	"v.io/x/ref/test/v23tests"
+)
+
+func TestMain(m *testing.M) {
+	test.Init()
+	modules.DispatchAndExitIfChild()
+	cleanup := v23tests.UseSharedBinDir()
+	r := m.Run()
+	cleanup()
+	os.Exit(r)
+}
+
+func TestV23Vkube(t *testing.T) {
+	v23tests.RunTest(t, V23TestVkube)
+}
diff --git a/services/cluster/vkube/vkube_v23_test.go b/services/cluster/vkube/vkube_v23_test.go
new file mode 100644
index 0000000..8b97f69
--- /dev/null
+++ b/services/cluster/vkube/vkube_v23_test.go
@@ -0,0 +1,298 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate jiri test generate
+
+package main_test
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+	"sync"
+	"testing"
+	"text/template"
+	"time"
+
+	"v.io/x/ref/test/testutil"
+	"v.io/x/ref/test/v23tests"
+)
+
+var (
+	flagProject = flag.String("project", "", "The name of the GCE project to use.")
+	flagZone    = flag.String("zone", "", "The name of the GCE zone to use.")
+	flagCluster = flag.String("cluster", "", "The name of the kubernetes cluster to use.")
+)
+
+// V23TestVkube is an end-to-end test for the vkube command. It operates on a
+// pre-existing kubernetes cluster running on GCE.
+// This test can easily exceed the default test timeout of 10m. It is
+// recommended to use -test.timeout=20m.
+func V23TestVkube(t *v23tests.T) {
+	if *flagProject == "" || *flagZone == "" || *flagCluster == "" {
+		t.Skip("--project, --zone, or --cluster not specified")
+	}
+	if testing.Short() {
+		t.Skip("skipping test in short mode.")
+	}
+	workdir := t.NewTempDir("")
+
+	id := fmt.Sprintf("vkube-test-%08x", rand.Uint32())
+
+	vkubeCfgPath := filepath.Join(workdir, "vkube.cfg")
+	if err := createVkubeConfig(vkubeCfgPath, id); err != nil {
+		t.Fatal(err)
+	}
+
+	creds, err := t.Shell().NewChildCredentials("alice")
+	if err != nil {
+		t.Fatalf("Failed to create alice credentials: %v", err)
+	}
+
+	vkubeBin := t.BuildV23Pkg("v.io/x/ref/services/cluster/vkube")
+	opts := vkubeBin.StartOpts().WithCustomCredentials(creds)
+	opts.ShutdownTimeout = 10 * time.Minute
+	vkubeBin = vkubeBin.WithStartOpts(opts)
+
+	vshBin := t.BuildV23Pkg("v.io/x/ref/examples/tunnel/vsh")
+	vshBin = vshBin.WithStartOpts(vshBin.StartOpts().WithCustomCredentials(creds))
+
+	var (
+		cmd = func(bin *v23tests.Binary, expectSuccess bool, baseArgs ...string) func(args ...string) string {
+			return func(args ...string) string {
+				cmd := filepath.Base(bin.Path())
+				w := &writer{name: cmd}
+				args = append(baseArgs, args...)
+				if err := bin.Start(args...).Wait(w, w); expectSuccess && err != nil {
+					t.Error(testutil.FormatLogLine(2, "Unexpected failure: %s %s :%v", cmd, strings.Join(args, " "), err))
+				} else if !expectSuccess && err == nil {
+					t.Error(testutil.FormatLogLine(2, "Unexpected success %d: %s %s", cmd, strings.Join(args, " ")))
+				}
+				return w.output()
+			}
+		}
+		gsutil      = cmd(t.BinaryFromPath("gsutil"), true)
+		gcloud      = cmd(t.BinaryFromPath("gcloud"), true, "--project="+*flagProject)
+		docker      = cmd(t.BinaryFromPath("docker"), true)
+		vkubeOK     = cmd(vkubeBin, true, "--config="+vkubeCfgPath)
+		vkubeFail   = cmd(vkubeBin, false, "--config="+vkubeCfgPath)
+		kubectlOK   = cmd(vkubeBin, true, "--config="+vkubeCfgPath, "ctl", "--", "--namespace="+id)
+		kubectlFail = cmd(vkubeBin, false, "--config="+vkubeCfgPath, "ctl", "--", "--namespace="+id)
+		vshOK       = cmd(vshBin, true)
+	)
+
+	if out := kubectlOK("cluster-info"); strings.Contains(out, "ERROR:") {
+		// Exit early if we don't have valid credentials.
+		t.Fatalf("Failed to get cluster information: %v", out)
+	}
+
+	// Create a bucket to store the docker images.
+	gsutil("mb", "-p", *flagProject, "gs://"+id)
+	defer func() {
+		kubectlOK("delete", "namespace", id)
+		gsutil("-m", "rm", "-R", "-a", "gs://"+id+"/*")
+		gsutil("rb", "gs://"+id)
+	}()
+
+	// Create app's docker image and configs.
+	dockerDir, err := setupDockerDirectory(workdir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	appImage := "b.gcr.io/" + id + "/tunneld:latest"
+	docker("build", "-t", appImage, dockerDir)
+	gcloud("docker", "push", appImage)
+	appConf1 := filepath.Join(workdir, "app1.json")
+	if err := createAppConfig(appConf1, id, appImage, "1"); err != nil {
+		t.Fatal(err)
+	}
+	appConf2 := filepath.Join(workdir, "app2.json")
+	if err := createAppConfig(appConf2, id, appImage, "2"); err != nil {
+		t.Fatal(err)
+	}
+
+	vkubeOK("build-docker-images", "-v", "-tag=test")
+	// Clean up local docker images.
+	docker(
+		"rmi",
+		"b.gcr.io/"+id+"/cluster-agent",
+		"b.gcr.io/"+id+"/cluster-agent:test",
+		"b.gcr.io/"+id+"/pod-agent",
+		"b.gcr.io/"+id+"/pod-agent:test",
+		"b.gcr.io/"+id+"/tunneld",
+	)
+
+	// Run the actual tests.
+	vkubeOK("start-cluster-agent", "--wait")
+	kubectlOK("get", "service", "cluster-agent")
+	kubectlOK("get", "rc", "cluster-agentd-latest")
+	vkubeOK("claim-cluster-agent")
+	vkubeFail("start-cluster-agent") // Already running
+	vkubeFail("claim-cluster-agent") // Already claimed
+
+	vkubeOK("start", "-f", appConf1, "--wait", "my-app")
+	kubectlOK("get", "rc", "tunneld-1")
+	vkubeFail("start", "-f", appConf1, "my-app") // Already running
+
+	vkubeOK("update", "-f", appConf2, "--wait")
+	kubectlOK("get", "rc", "tunneld-2")
+
+	// Find the pod running tunneld, get the server's addr from its stdout.
+	podName := kubectlOK("get", "pod", "-l", "application=tunneld", "--template={{range .items}}{{.metadata.name}}{{end}}")
+	addr := strings.TrimPrefix(strings.TrimSpace(kubectlOK("logs", podName, "-c", "tunneld")), "NAME=")
+	if got, expected := vshOK(addr, "echo", "hello", "world"), "hello world\n"; got != expected {
+		t.Errorf("Unexpected output. Got %q, expected %q", got, expected)
+	}
+
+	vkubeOK("stop", "-f", appConf2)
+	kubectlFail("get", "rc", "tunneld-2") // No longer running
+	vkubeFail("stop", "-f", appConf2)     // No longer running
+
+	vkubeOK("stop-cluster-agent")
+	kubectlFail("get", "service", "cluster-agent")
+	kubectlFail("get", "rc", "cluster-agentd-latest")
+}
+
+// An io.Writer that sends everything to stdout, each line prefixed with
+// "name> ", and also captures all the output.
+type writer struct {
+	sync.Mutex
+	name string
+	line bytes.Buffer
+	out  bytes.Buffer
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+	w.Lock()
+	defer w.Unlock()
+	n = len(p)
+	w.out.Write(p)
+	for len(p) > 0 {
+		if w.line.Len() == 0 {
+			fmt.Fprintf(&w.line, "%s> ", w.name)
+		}
+		if off := bytes.IndexByte(p, '\n'); off != -1 {
+			off += 1
+			w.line.Write(p[:off])
+			w.line.WriteTo(os.Stdout)
+			p = p[off:]
+			continue
+		}
+		w.line.Write(p)
+		break
+	}
+	return
+}
+
+func (w *writer) output() string {
+	w.Lock()
+	defer w.Unlock()
+	if w.line.Len() != 0 {
+		w.line.WriteString(" [no \\n at EOL]\n")
+		w.line.WriteTo(os.Stdout)
+	}
+	return w.out.String()
+}
+
+func createVkubeConfig(path, id string) error {
+	f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	params := struct{ Project, Zone, Cluster, ID string }{*flagProject, *flagZone, *flagCluster, id}
+	return template.Must(template.New("cfg").Parse(`{
+  "project": "{{.Project}}",
+  "zone": "{{.Zone}}",
+  "cluster": "{{.Cluster}}",
+  "clusterAgent": {
+    "namespace": "{{.ID}}",
+    "image": "b.gcr.io/{{.ID}}/cluster-agent:latest",
+    "blessing": "root/alice/cluster-agent",
+    "admin": "root/alice",
+    "cpu": "0.1",
+    "memory": "100M"
+  },
+  "podAgent": {
+    "image": "b.gcr.io/{{.ID}}/pod-agent:latest"
+  }
+}`)).Execute(f, params)
+}
+
+func setupDockerDirectory(workdir string) (string, error) {
+	dockerDir := filepath.Join(workdir, "docker")
+	if err := os.Mkdir(dockerDir, 0755); err != nil {
+		return "", err
+	}
+	if err := ioutil.WriteFile(filepath.Join(dockerDir, "Dockerfile"), []byte(
+		`FROM google/debian:wheezy
+RUN apt-get update && apt-get install -y -qq --no-install-recommends libssl1.0.0 && apt-get clean
+ADD tunneld /usr/local/bin/
+`), 0644); err != nil {
+		return "", err
+	}
+	if out, err := exec.Command("jiri", "go", "build", "-o", filepath.Join(dockerDir, "tunneld"), "v.io/x/ref/examples/tunnel/tunneld").CombinedOutput(); err != nil {
+		return "", fmt.Errorf("build failed: %v: %s", err, string(out))
+	}
+	return dockerDir, nil
+}
+
+func createAppConfig(path, id, image, version string) error {
+	f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	params := struct{ ID, Image, Version string }{id, image, version}
+	return template.Must(template.New("appcfg").Parse(`{
+  "apiVersion": "v1",
+  "kind": "ReplicationController",
+  "metadata": {
+    "name": "tunneld-{{.Version}}",
+    "namespace": "{{.ID}}",
+    "labels": {
+      "application": "tunneld"
+    }
+  },
+  "spec": {
+    "replicas": 1,
+    "template": {
+      "metadata": {
+        "labels": {
+          "application": "tunneld",
+          "deployment": "{{.Version}}"
+        }
+      },
+      "spec": {
+        "containers": [
+          {
+            "name": "tunneld",
+            "image": "{{.Image}}",
+            "command": [
+              "tunneld",
+              "--v23.tcp.address=:8193",
+              "--v23.permissions.literal={\"Admin\":{\"In\":[\"root/alice\"]}}",
+	      "--alsologtostderr=false"
+            ],
+            "ports": [
+              { "containerPort": 8193, "hostPort": 8193 }
+            ],
+            "resources": {
+              "limits": { "cpu": "0.1", "memory": "100M" }
+            }
+          }
+        ]
+      }
+    }
+  }
+}`)).Execute(f, params)
+}
diff --git a/services/device/deviced/internal/impl/perms/debug_perms_test.go b/services/device/deviced/internal/impl/perms/debug_perms_test.go
index 5affa34..ed3b063 100644
--- a/services/device/deviced/internal/impl/perms/debug_perms_test.go
+++ b/services/device/deviced/internal/impl/perms/debug_perms_test.go
@@ -25,17 +25,17 @@
 	accessStub := permissions.ObjectClient(naming.Join(name...))
 	perms, version, err := accessStub.GetPermissions(ctx)
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "GetPermissions(%v) failed %v", name, err))
+		t.Fatal(testutil.FormatLogLine(2, "GetPermissions(%v) failed %v", name, err))
 	}
 	perms.Add(security.BlessingPattern(blessing), right)
 	if err = accessStub.SetPermissions(ctx, perms, version); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "SetPermissions(%v, %v, %v) failed: %v", name, blessing, right, err))
+		t.Fatal(testutil.FormatLogLine(2, "SetPermissions(%v, %v, %v) failed: %v", name, blessing, right, err))
 	}
 }
 
 func testAccessFail(t *testing.T, expected verror.ID, ctx *context.T, who string, name ...string) {
 	if _, err := utiltest.StatsStub(name...).Value(ctx); verror.ErrorID(err) != expected {
-		t.Fatalf(testutil.FormatLogLine(2, "%s got error %v but expected %v", who, err, expected))
+		t.Fatal(testutil.FormatLogLine(2, "%s got error %v but expected %v", who, err, expected))
 	}
 }
 
diff --git a/services/device/deviced/internal/impl/utiltest/app.go b/services/device/deviced/internal/impl/utiltest/app.go
index 3e10ea9..7e64b30 100644
--- a/services/device/deviced/internal/impl/utiltest/app.go
+++ b/services/device/deviced/internal/impl/utiltest/app.go
@@ -185,7 +185,7 @@
 	select {
 	case args = <-p.ing:
 	case <-time.After(pingTimeout):
-		t.Fatalf(testutil.FormatLogLine(2, "failed to get ping"))
+		t.Fatal(testutil.FormatLogLine(2, "failed to get ping"))
 	}
 	return args
 }
@@ -193,7 +193,7 @@
 func (p PingServer) VerifyPingArgs(t *testing.T, username, flagValue, envValue string) PingArgs {
 	args := p.WaitForPingArgs(t)
 	if args.Username != username || args.FlagValue != flagValue || args.EnvValue != envValue {
-		t.Fatalf(testutil.FormatLogLine(2, "got ping args %q, expected [username = %v, flag value = %v, env value = %v]", args, username, flagValue, envValue))
+		t.Fatal(testutil.FormatLogLine(2, "got ping args %q, expected [username = %v, flag value = %v, env value = %v]", args, username, flagValue, envValue))
 	}
 	return args // Useful for tests that want to check other values in the PingArgs result
 }
diff --git a/services/device/deviced/internal/impl/utiltest/helpers.go b/services/device/deviced/internal/impl/utiltest/helpers.go
index 1558095..1026035 100644
--- a/services/device/deviced/internal/impl/utiltest/helpers.go
+++ b/services/device/deviced/internal/impl/utiltest/helpers.go
@@ -125,9 +125,9 @@
 				continue
 			}
 			if err == nil {
-				f.Fatalf(testutil.FormatLogLine(2, "Resolve(%v) succeeded with results %v when it was expected to fail", name, me.Names()))
+				f.Fatal(testutil.FormatLogLine(2, "Resolve(%v) succeeded with results %v when it was expected to fail", name, me.Names()))
 			} else {
-				f.Fatalf(testutil.FormatLogLine(2, "Resolve(%v) failed with error %v, expected error ID %v", name, err, expectErr))
+				f.Fatal(testutil.FormatLogLine(2, "Resolve(%v) failed with error %v, expected error ID %v", name, err, expectErr))
 			}
 		} else {
 			return
@@ -178,7 +178,7 @@
 	// Call the Claim RPC: Skip server authorization because the unclaimed
 	// device presents nothing that can be used to recognize it.
 	if err := device.ClaimableClient(claimableName).Claim(ctx, pairingToken, g, s); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Claim(%q) failed: %v [%v]", claimableName, pairingToken, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Claim(%q) failed: %v [%v]", claimableName, pairingToken, verror.ErrorID(err), err))
 	}
 	// Wait for the device to remount itself with the device service after
 	// being claimed.
@@ -202,43 +202,43 @@
 	s := options.ServerAuthorizer{security.AllowEveryone()}
 	// Call the Claim RPC
 	if err := device.ClaimableClient(name).Claim(ctx, pairingToken, g, s); verror.ErrorID(err) != errID {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Claim(%q) expected to fail with %v, got %v [%v]", name, pairingToken, errID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Claim(%q) expected to fail with %v, got %v [%v]", name, pairingToken, errID, verror.ErrorID(err), err))
 	}
 }
 
 func UpdateDeviceExpectError(t *testing.T, ctx *context.T, name string, errID verror.ID) {
 	if err := DeviceStub(name).Update(ctx); verror.ErrorID(err) != errID {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Update expected to fail with %v, got %v [%v]", name, errID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Update expected to fail with %v, got %v [%v]", name, errID, verror.ErrorID(err), err))
 	}
 }
 
 func UpdateDevice(t *testing.T, ctx *context.T, name string) {
 	if err := DeviceStub(name).Update(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Update() failed: %v [%v]", name, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Update() failed: %v [%v]", name, verror.ErrorID(err), err))
 	}
 }
 
 func RevertDeviceExpectError(t *testing.T, ctx *context.T, name string, errID verror.ID) {
 	if err := DeviceStub(name).Revert(ctx); verror.ErrorID(err) != errID {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Revert() expected to fail with %v, got %v [%v]", name, errID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Revert() expected to fail with %v, got %v [%v]", name, errID, verror.ErrorID(err), err))
 	}
 }
 
 func RevertDevice(t *testing.T, ctx *context.T, name string) {
 	if err := DeviceStub(name).Revert(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Revert() failed: %v [%v]", name, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Revert() failed: %v [%v]", name, verror.ErrorID(err), err))
 	}
 }
 
 func KillDevice(t *testing.T, ctx *context.T, name string) {
 	if err := DeviceStub(name).Kill(ctx, killTimeout); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Kill(%v) failed: %v [%v]", name, killTimeout, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Kill(%v) failed: %v [%v]", name, killTimeout, verror.ErrorID(err), err))
 	}
 }
 
 func ShutdownDevice(t *testing.T, ctx *context.T, name string) {
 	if err := DeviceStub(name).Delete(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "%q.Delete() failed: %v [%v]", name, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "%q.Delete() failed: %v [%v]", name, verror.ErrorID(err), err))
 	}
 }
 
@@ -277,14 +277,14 @@
 func InstallApp(t *testing.T, ctx *context.T, opt ...interface{}) string {
 	appID, err := AppStub().Install(ctx, MockApplicationRepoName, Ocfg(opt), Opkg(opt))
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Install failed: %v [%v]", verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Install failed: %v [%v]", verror.ErrorID(err), err))
 	}
 	return appID
 }
 
 func InstallAppExpectError(t *testing.T, ctx *context.T, expectedError verror.ID, opt ...interface{}) {
 	if _, err := AppStub().Install(ctx, MockApplicationRepoName, Ocfg(opt), Opkg(opt)); err == nil || verror.ErrorID(err) != expectedError {
-		t.Fatalf(testutil.FormatLogLine(2, "Install expected to fail with %v, got %v [%v]", expectedError, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Install expected to fail with %v, got %v [%v]", expectedError, verror.ErrorID(err), err))
 	}
 }
 
@@ -342,96 +342,96 @@
 func LaunchApp(t *testing.T, ctx *context.T, appID string) string {
 	instanceID, err := LaunchAppImpl(t, ctx, appID, "forapp")
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "launching %v failed: %v [%v]", appID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "launching %v failed: %v [%v]", appID, verror.ErrorID(err), err))
 	}
 	return instanceID
 }
 
 func LaunchAppExpectError(t *testing.T, ctx *context.T, appID string, expectedError verror.ID) {
 	if _, err := LaunchAppImpl(t, ctx, appID, "forapp"); err == nil || verror.ErrorID(err) != expectedError {
-		t.Fatalf(testutil.FormatLogLine(2, "launching %v expected to fail with %v, got %v [%v]", appID, expectedError, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "launching %v expected to fail with %v, got %v [%v]", appID, expectedError, verror.ErrorID(err), err))
 	}
 }
 
 func TerminateApp(t *testing.T, ctx *context.T, appID, instanceID string) {
 	if err := AppStub(appID, instanceID).Kill(ctx, killTimeout); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Kill(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Kill(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
 	}
 	if err := AppStub(appID, instanceID).Delete(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Delete(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Delete(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
 	}
 }
 
 func KillApp(t *testing.T, ctx *context.T, appID, instanceID string) {
 	if err := AppStub(appID, instanceID).Kill(ctx, killTimeout); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Kill(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Kill(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
 	}
 }
 
 func DeleteApp(t *testing.T, ctx *context.T, appID, instanceID string) {
 	if err := AppStub(appID, instanceID).Delete(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Delete(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Delete(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
 	}
 }
 
 func RunApp(t *testing.T, ctx *context.T, appID, instanceID string) {
 	if err := AppStub(appID, instanceID).Run(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Run(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Run(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
 	}
 }
 
 func RunAppExpectError(t *testing.T, ctx *context.T, appID, instanceID string, expectedError verror.ID) {
 	if err := AppStub(appID, instanceID).Run(ctx); err == nil || verror.ErrorID(err) != expectedError {
-		t.Fatalf(testutil.FormatLogLine(2, "Run(%v/%v) expected to fail with %v, got %v [%v]", appID, instanceID, expectedError, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Run(%v/%v) expected to fail with %v, got %v [%v]", appID, instanceID, expectedError, verror.ErrorID(err), err))
 	}
 }
 
 func UpdateInstance(t *testing.T, ctx *context.T, appID, instanceID string) {
 	if err := AppStub(appID, instanceID).Update(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Update(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Update(%v/%v) failed: %v [%v]", appID, instanceID, verror.ErrorID(err), err))
 	}
 }
 
 func UpdateInstanceExpectError(t *testing.T, ctx *context.T, appID, instanceID string, expectedError verror.ID) {
 	if err := AppStub(appID, instanceID).Update(ctx); err == nil || verror.ErrorID(err) != expectedError {
-		t.Fatalf(testutil.FormatLogLine(2, "Update(%v/%v) expected to fail with %v, got %v [%v]", appID, instanceID, expectedError, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Update(%v/%v) expected to fail with %v, got %v [%v]", appID, instanceID, expectedError, verror.ErrorID(err), err))
 	}
 }
 
 func UpdateApp(t *testing.T, ctx *context.T, appID string) {
 	if err := AppStub(appID).Update(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Update(%v) failed: %v [%v]", appID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Update(%v) failed: %v [%v]", appID, verror.ErrorID(err), err))
 	}
 }
 
 func UpdateAppExpectError(t *testing.T, ctx *context.T, appID string, expectedError verror.ID) {
 	if err := AppStub(appID).Update(ctx); err == nil || verror.ErrorID(err) != expectedError {
-		t.Fatalf(testutil.FormatLogLine(2, "Update(%v) expected to fail with %v, got %v [%v]", appID, expectedError, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Update(%v) expected to fail with %v, got %v [%v]", appID, expectedError, verror.ErrorID(err), err))
 	}
 }
 
 func RevertApp(t *testing.T, ctx *context.T, appID string) {
 	if err := AppStub(appID).Revert(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Revert(%v) failed: %v [%v]", appID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Revert(%v) failed: %v [%v]", appID, verror.ErrorID(err), err))
 	}
 }
 
 func RevertAppExpectError(t *testing.T, ctx *context.T, appID string, expectedError verror.ID) {
 	if err := AppStub(appID).Revert(ctx); err == nil || verror.ErrorID(err) != expectedError {
-		t.Fatalf(testutil.FormatLogLine(2, "Revert(%v) expected to fail with %v, got %v [%v]", appID, expectedError, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Revert(%v) expected to fail with %v, got %v [%v]", appID, expectedError, verror.ErrorID(err), err))
 	}
 }
 
 func UninstallApp(t *testing.T, ctx *context.T, appID string) {
 	if err := AppStub(appID).Uninstall(ctx); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Uninstall(%v) failed: %v [%v]", appID, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Uninstall(%v) failed: %v [%v]", appID, verror.ErrorID(err), err))
 	}
 }
 
 func Debug(t *testing.T, ctx *context.T, nameComponents ...string) string {
 	dbg, err := AppStub(nameComponents...).Debug(ctx)
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Debug(%v) failed: %v [%v]", nameComponents, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Debug(%v) failed: %v [%v]", nameComponents, verror.ErrorID(err), err))
 	}
 	return dbg
 }
@@ -439,14 +439,14 @@
 func VerifyDeviceState(t *testing.T, ctx *context.T, want device.InstanceState, name string) string {
 	s, err := DeviceStub(name).Status(ctx)
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "Status(%v) failed: %v [%v]", name, verror.ErrorID(err), err))
+		t.Fatal(testutil.FormatLogLine(2, "Status(%v) failed: %v [%v]", name, verror.ErrorID(err), err))
 	}
 	status, ok := s.(device.StatusDevice)
 	if !ok {
-		t.Fatalf(testutil.FormatLogLine(2, "Status(%v) returned unknown type: %T", name, s))
+		t.Fatal(testutil.FormatLogLine(2, "Status(%v) returned unknown type: %T", name, s))
 	}
 	if status.Value.State != want {
-		t.Fatalf(testutil.FormatLogLine(2, "Status(%v) state: wanted %v, got %v", name, want, status.Value.State))
+		t.Fatal(testutil.FormatLogLine(2, "Status(%v) state: wanted %v, got %v", name, want, status.Value.State))
 	}
 	return status.Value.Version
 }
@@ -454,7 +454,7 @@
 func Status(t *testing.T, ctx *context.T, nameComponents ...string) device.Status {
 	s, err := AppStub(nameComponents...).Status(ctx)
 	if err != nil {
-		t.Errorf(testutil.FormatLogLine(3, "Status(%v) failed: %v [%v]", nameComponents, verror.ErrorID(err), err))
+		t.Error(testutil.FormatLogLine(3, "Status(%v) failed: %v [%v]", nameComponents, verror.ErrorID(err), err))
 	}
 	return s
 }
@@ -473,10 +473,10 @@
 		state = s.Value.State
 		version = s.Value.Version
 	default:
-		t.Errorf(testutil.FormatLogLine(2, "Status(%v) returned unknown type: %T", nameComponents, s))
+		t.Error(testutil.FormatLogLine(2, "Status(%v) returned unknown type: %T", nameComponents, s))
 	}
 	if state != want {
-		t.Errorf(testutil.FormatLogLine(2, "Status(%v) state: wanted %v (%T), got %v (%T)", nameComponents, want, want, state, state))
+		t.Error(testutil.FormatLogLine(2, "Status(%v) state: wanted %v (%T), got %v (%T)", nameComponents, want, want, state, state))
 	}
 	return version
 }
@@ -549,10 +549,10 @@
 func CtxWithNewPrincipal(t *testing.T, ctx *context.T, idp *testutil.IDProvider, extension string) *context.T {
 	ret, err := v23.WithPrincipal(ctx, testutil.NewPrincipal())
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "v23.WithPrincipal failed: %v", err))
+		t.Fatal(testutil.FormatLogLine(2, "v23.WithPrincipal failed: %v", err))
 	}
 	if err := idp.Bless(v23.GetPrincipal(ret), extension); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "idp.Bless(?, %q) failed: %v", extension, err))
+		t.Fatal(testutil.FormatLogLine(2, "idp.Bless(?, %q) failed: %v", extension, err))
 	}
 	return ret
 }
@@ -614,7 +614,7 @@
 	for _, tc := range testcases {
 		results, _, err := testutil.GlobName(ctx, tc.Name, tc.Pattern)
 		if err != nil {
-			t.Errorf(testutil.FormatLogLine(2, "unexpected glob error for (%q, %q): %v", tc.Name, tc.Pattern, err))
+			t.Error(testutil.FormatLogLine(2, "unexpected glob error for (%q, %q): %v", tc.Name, tc.Pattern, err))
 			continue
 		}
 		filteredResults := []string{}
@@ -635,7 +635,7 @@
 		sort.Strings(filteredResults)
 		sort.Strings(tc.Expected)
 		if !reflect.DeepEqual(filteredResults, tc.Expected) {
-			t.Errorf(testutil.FormatLogLine(2, "unexpected result for (%q, %q). Got %q, want %q", tc.Name, tc.Pattern, filteredResults, tc.Expected))
+			t.Error(testutil.FormatLogLine(2, "unexpected result for (%q, %q). Got %q, want %q", tc.Name, tc.Pattern, filteredResults, tc.Expected))
 		}
 	}
 }
@@ -646,7 +646,7 @@
 	for _, tc := range testcases {
 		results, _, _ := testutil.GlobName(ctx, tc.Name, tc.Pattern)
 		if len(results) != 0 {
-			t.Errorf(testutil.FormatLogLine(2, "verifyFailGlob should have failed for %q, %q", tc.Name, tc.Pattern))
+			t.Error(testutil.FormatLogLine(2, "verifyFailGlob should have failed for %q, %q", tc.Name, tc.Pattern))
 		}
 	}
 }
@@ -660,16 +660,16 @@
 	path := naming.Join(prefix...)
 	files, _, err := testutil.GlobName(ctx, path, pattern)
 	if err != nil {
-		t.Errorf(testutil.FormatLogLine(2, "unexpected glob error: %v", err))
+		t.Error(testutil.FormatLogLine(2, "unexpected glob error: %v", err))
 	}
 	if want, got := 4, len(files); got < want {
-		t.Errorf(testutil.FormatLogLine(2, "Unexpected number of matches. Got %d, want at least %d", got, want))
+		t.Error(testutil.FormatLogLine(2, "Unexpected number of matches. Got %d, want at least %d", got, want))
 	}
 	for _, file := range files {
 		name := naming.Join(path, file)
 		c := logreader.LogFileClient(name)
 		if _, err := c.Size(ctx); err != nil {
-			t.Errorf(testutil.FormatLogLine(2, "Size(%q) failed: %v", name, err))
+			t.Error(testutil.FormatLogLine(2, "Size(%q) failed: %v", name, err))
 		}
 	}
 }
@@ -683,16 +683,16 @@
 	objects, _, err := testutil.GlobName(ctx, path, pattern)
 
 	if err != nil {
-		t.Errorf(testutil.FormatLogLine(2, "unexpected glob error: %v", err))
+		t.Error(testutil.FormatLogLine(2, "unexpected glob error: %v", err))
 	}
 	if want, got := 2, len(objects); got != want {
-		t.Errorf(testutil.FormatLogLine(2, "Unexpected number of matches. Got %d, want %d", got, want))
+		t.Error(testutil.FormatLogLine(2, "Unexpected number of matches. Got %d, want %d", got, want))
 	}
 	for _, obj := range objects {
 		name := naming.Join(path, obj)
 		c := stats.StatsClient(name)
 		if _, err := c.Value(ctx); err != nil {
-			t.Errorf(testutil.FormatLogLine(2, "Value(%q) failed: %v", name, err))
+			t.Error(testutil.FormatLogLine(2, "Value(%q) failed: %v", name, err))
 		}
 	}
 }
@@ -704,13 +704,13 @@
 	c := pprof.PProfClient(name)
 	v, err := c.CmdLine(ctx)
 	if err != nil {
-		t.Errorf(testutil.FormatLogLine(2, "CmdLine(%q) failed: %v", name, err))
+		t.Error(testutil.FormatLogLine(2, "CmdLine(%q) failed: %v", name, err))
 	}
 	if len(v) == 0 {
 		t.Errorf("Unexpected empty cmdline: %v", v)
 	}
 	if got, want := filepath.Base(v[0]), appName; got != want {
-		t.Errorf(testutil.FormatLogLine(2, "Unexpected value for argv[0]. Got %v, want %v", got, want))
+		t.Error(testutil.FormatLogLine(2, "Unexpected value for argv[0]. Got %v, want %v", got, want))
 	}
 
 }
@@ -804,6 +804,7 @@
 }
 
 type Fatalist interface {
+	Fatal(args ...interface{})
 	Fatalf(format string, args ...interface{})
 }
 
diff --git a/services/device/deviced/internal/impl/utiltest/modules.go b/services/device/deviced/internal/impl/utiltest/modules.go
index d834eaf..073b837 100644
--- a/services/device/deviced/internal/impl/utiltest/modules.go
+++ b/services/device/deviced/internal/impl/utiltest/modules.go
@@ -144,7 +144,6 @@
 }, "DeviceManagerV10")
 
 func TestMainImpl(m *testing.M) {
-	test.Init()
 	isSuidHelper := len(os.Getenv("V23_SUIDHELPER_TEST")) > 0
 	if modules.IsChildProcess() && !isSuidHelper {
 		if err := modules.Dispatch(); err != nil {
diff --git a/services/groups/groupsd/groupsd_v23_test.go b/services/groups/groupsd/groupsd_v23_test.go
index cc319f0..15f6ced 100644
--- a/services/groups/groupsd/groupsd_v23_test.go
+++ b/services/groups/groupsd/groupsd_v23_test.go
@@ -23,6 +23,7 @@
 	"v.io/v23/verror"
 	"v.io/x/lib/set"
 	"v.io/x/ref/services/groups/groupsd/testdata/kvstore"
+	"v.io/x/ref/test"
 	"v.io/x/ref/test/modules"
 	"v.io/x/ref/test/v23tests"
 )
@@ -160,7 +161,7 @@
 )
 
 var runServer = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
+	ctx, shutdown := test.V23Init()
 	defer shutdown()
 	// Use a shorter timeout to reduce the test overall runtime as the
 	// permission authorizer will attempt to connect to a non-existing
@@ -188,7 +189,7 @@
 		return fmt.Errorf("unexpected number of arguments: got %v, want at least %v", got, want)
 	}
 	command := args[0]
-	client := kvstore.StoreClient("key-value-store")
+	client := kvstore.StoreClient(kvServerName)
 	switch command {
 	case "get":
 		if got, want := len(args), 2; got != want {
diff --git a/services/internal/binarylib/util_test.go b/services/internal/binarylib/util_test.go
index cd3d785..6e7fc6a 100644
--- a/services/internal/binarylib/util_test.go
+++ b/services/internal/binarylib/util_test.go
@@ -83,7 +83,7 @@
 func prepDirectory(t *testing.T, rootDir string) {
 	path, perm := filepath.Join(rootDir, binarylib.VersionFile), os.FileMode(0600)
 	if err := ioutil.WriteFile(path, []byte(binarylib.Version), perm); err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "WriteFile(%v, %v, %v) failed: %v", path, binarylib.Version, perm, err))
+		t.Fatal(testutil.FormatLogLine(2, "WriteFile(%v, %v, %v) failed: %v", path, binarylib.Version, perm, err))
 	}
 }
 
diff --git a/services/internal/servicetest/modules.go b/services/internal/servicetest/modules.go
index ae44db1..8279ad5 100644
--- a/services/internal/servicetest/modules.go
+++ b/services/internal/servicetest/modules.go
@@ -69,7 +69,7 @@
 func setNSRoots(t *testing.T, ctx *context.T, roots ...string) {
 	ns := v23.GetNamespace(ctx)
 	if err := ns.SetRoots(roots...); err != nil {
-		t.Fatalf(testutil.FormatLogLine(3, "SetRoots(%v) failed with %v", roots, err))
+		t.Fatal(testutil.FormatLogLine(3, "SetRoots(%v) failed with %v", roots, err))
 	}
 }
 
@@ -98,7 +98,7 @@
 		ctx.VI(1).Info("---------------------------------")
 		ctx.VI(1).Info("--(cleaning up shell)------------")
 		if err := sh.Cleanup(os.Stdout, os.Stderr); err != nil {
-			t.Errorf(testutil.FormatLogLine(2, "sh.Cleanup failed with %v", err))
+			t.Error(testutil.FormatLogLine(2, "sh.Cleanup failed with %v", err))
 		}
 		ctx.VI(1).Info("--(done cleaning up shell)-------")
 		setNSRoots(t, ctx, oldNamespaceRoots...)
@@ -125,7 +125,7 @@
 		ctx.VI(1).Info("---------------------------------")
 		ctx.VI(1).Info("--(cleaning up shell)------------")
 		if err := sh.Cleanup(os.Stdout, os.Stderr); err != nil {
-			t.Errorf(testutil.FormatLogLine(2, "sh.Cleanup failed with %v", err))
+			t.Error(testutil.FormatLogLine(2, "sh.Cleanup failed with %v", err))
 		}
 		ctx.VI(1).Info("--(done cleaning up shell)-------")
 	}
@@ -141,7 +141,7 @@
 func RunCommand(t *testing.T, sh *modules.Shell, env []string, prog modules.Program, args ...string) modules.Handle {
 	h, err := sh.StartWithOpts(sh.DefaultStartOpts(), env, prog, args...)
 	if err != nil {
-		t.Fatalf(testutil.FormatLogLine(2, "failed to start %q: %s", prog, err))
+		t.Fatal(testutil.FormatLogLine(2, "failed to start %q: %s", prog, err))
 		return nil
 	}
 	h.SetVerbosity(testing.Verbose())
@@ -155,11 +155,11 @@
 	if len(m) == 1 && len(m[0]) == 2 {
 		pid, err := strconv.Atoi(m[0][1])
 		if err != nil {
-			t.Fatalf(testutil.FormatLogLine(2, "Atoi(%q) failed: %v", m[0][1], err))
+			t.Fatal(testutil.FormatLogLine(2, "Atoi(%q) failed: %v", m[0][1], err))
 		}
 		return pid
 	}
-	t.Fatalf(testutil.FormatLogLine(2, "failed to extract pid: %v", m))
+	t.Fatal(testutil.FormatLogLine(2, "failed to extract pid: %v", m))
 	return 0
 }
 
diff --git a/services/mounttable/mounttablelib/mounttable.go b/services/mounttable/mounttablelib/mounttable.go
index e2e7743..71055a5 100644
--- a/services/mounttable/mounttablelib/mounttable.go
+++ b/services/mounttable/mounttablelib/mounttable.go
@@ -40,6 +40,7 @@
 	errTooManyNodes       = verror.Register(pkgPath+".errTooManyNodes", verror.NoRetry, "{1:}{2:} User has exceeded his node limit {:_}")
 	errNoSharedRoot       = verror.Register(pkgPath+".errNoSharedRoot", verror.NoRetry, "{1:}{2:} Server and User share no blessing root {:_}")
 	errNameElementTooLong = verror.Register(pkgPath+".errNameElementTooLong", verror.NoRetry, "{1:}{2:} path element {3}: too long {:_}")
+	errInvalidPermsFile   = verror.Register(pkgPath+".errInvalidPermsFile", verror.NoRetry, "{1:}{2:} perms file {3} invalid {:_}")
 )
 
 var (
@@ -144,7 +145,8 @@
 		mt.persisting = mt.persist != nil
 	}
 	if err := mt.parsePermFile(ctx, permsFile); err != nil && !os.IsNotExist(err) {
-		return nil, err
+		return nil, verror.New(errInvalidPermsFile, ctx, permsFile, err)
+
 	}
 	return mt, nil
 }
diff --git a/services/mounttable/mounttablelib/mounttable_test.go b/services/mounttable/mounttablelib/mounttable_test.go
index 114fd36..a85a2ce 100644
--- a/services/mounttable/mounttablelib/mounttable_test.go
+++ b/services/mounttable/mounttablelib/mounttable_test.go
@@ -34,10 +34,6 @@
 	"v.io/x/ref/test/testutil"
 )
 
-func init() {
-	test.Init()
-}
-
 // Simulate different processes with different runtimes.
 // rootCtx is the one running the mounttable service.
 const ttlSecs = 60 * 60
@@ -889,7 +885,6 @@
 }
 
 func initTest() (rootCtx *context.T, aliceCtx *context.T, bobCtx *context.T, shutdown v23.Shutdown) {
-	test.Init()
 	ctx, shutdown := test.V23Init()
 	var err error
 	if rootCtx, err = v23.WithPrincipal(ctx, testutil.NewPrincipal("root")); err != nil {
diff --git a/services/proxy/proxyd/proxyd_v23_test.go b/services/proxy/proxyd/proxyd_v23_test.go
index 2915e51..9efde9c 100644
--- a/services/proxy/proxyd/proxyd_v23_test.go
+++ b/services/proxy/proxyd/proxyd_v23_test.go
@@ -57,6 +57,8 @@
 var runServer = modules.Register(func(env *modules.Env, args ...string) error {
 	ctx, shutdown := v23.Init()
 	defer shutdown()
+	// Set the listen spec to listen only via the proxy.
+	ctx = v23.WithListenSpec(ctx, rpc.ListenSpec{Proxy: proxyName})
 	if _, _, err := v23.WithNewServer(ctx, serverName, service{}, security.AllowEveryone()); err != nil {
 		return err
 	}
diff --git a/services/syncbase/server/db_info_test.go b/services/syncbase/server/db_info_test.go
index 386cd4c..0268f82 100644
--- a/services/syncbase/server/db_info_test.go
+++ b/services/syncbase/server/db_info_test.go
@@ -14,7 +14,7 @@
 		dbName  string
 		stKey   string
 	}{
-		{"app1", "db1", "$dbInfo\xfeapp1\xfedb1"},
+		{"app1", "db1", "i\xfeapp1\xfedb1"},
 	}
 	for _, test := range tests {
 		got, want := dbInfoStKey(&app{name: test.appName}, test.dbName), test.stKey
diff --git a/services/syncbase/server/interfaces/sync_types.vdl b/services/syncbase/server/interfaces/sync_types.vdl
index aa79fa5..7b695fb 100644
--- a/services/syncbase/server/interfaces/sync_types.vdl
+++ b/services/syncbase/server/interfaces/sync_types.vdl
@@ -17,14 +17,14 @@
 // TODO(hpucha): These are not final yet. This is an intermediate step.
 
 const (
-        // NodeRec type log record adds a new node in the dag.
-        NodeRec = byte(0)
+	// NodeRec type log record adds a new node in the dag.
+	NodeRec = byte(0)
 
-        // LinkRec type log record adds a new link in the dag. Link records are
-        // added when a conflict is resolved by picking the local or the remote
-        // version as the resolution of a conflict, instead of creating a new
-        // version.
-        LinkRec = byte(1)
+	// LinkRec type log record adds a new link in the dag. Link records are
+	// added when a conflict is resolved by picking the local or the remote
+	// version as the resolution of a conflict, instead of creating a new
+	// version.
+	LinkRec = byte(1)
 )
 
 // PrefixGenVector is the generation vector for a data prefix, which maps each
@@ -44,26 +44,26 @@
 // TODO(hpucha): Add readset/scanset. Look into sending tx metadata only once
 // per transaction.
 type LogRecMetadata struct {
-        // Log related information.
-        Id        uint64 // device id that created the log record.
-	Gen       uint64 // generation number for the log record.
-	RecType   byte   // type of log record.
+	// Log related information.
+	Id      uint64 // device id that created the log record.
+	Gen     uint64 // generation number for the log record.
+	RecType byte   // type of log record.
 
-        // Object related information.
+	// Object related information.
 
 	// Id of the object that was updated. This id is relative to Application
 	// and Database names and is the store key for a particular row in a
 	// table.
-        ObjId       string
-        CurVers     string      // current version number of the object.
-        Parents     []string    // 0, 1 or 2 parent versions that the current version is derived from.
-	UpdTime     time.Time   // timestamp when the update is generated.
-	PermId      string      // id of the permissions object controlling this version.
-	PermVers    string      // current version of the permissions object.
-	Shell       bool        // true when the mutation data is hidden due to permissions.
-	Delete      bool        // indicates whether the update resulted in object being deleted from the store.
-	BatchId     uint64      // unique id of the Batch this update belongs to.
-	BatchCount  uint64      // number of objects in the Batch.
+	ObjId      string
+	CurVers    string    // current version number of the object.
+	Parents    []string  // 0, 1 or 2 parent versions that the current version is derived from.
+	UpdTime    time.Time // timestamp when the update is generated.
+	PermId     string    // id of the permissions object controlling this version.
+	PermVers   string    // current version of the permissions object.
+	Shell      bool      // true when the mutation data is hidden due to permissions.
+	Delete     bool      // indicates whether the update resulted in object being deleted from the store.
+	BatchId    uint64    // unique id of the Batch this update belongs to.
+	BatchCount uint64    // number of objects in the Batch.
 }
 
 // LogRec represents the on-wire representation of an entire log record: its
@@ -107,7 +107,7 @@
 // DeltaReq contains a request to sync either data or syncgroup metadata for a
 // Database.
 type DeltaReq union {
-	Sgs SgDeltaReq
+	Sgs  SgDeltaReq
 	Data DataDeltaReq
 }
 
@@ -116,7 +116,7 @@
 // requesting deltas for that Database.
 type DataDeltaReq struct {
 	AppName string
-	DbName string
+	DbName  string
 	SgIds   set[GroupId]
 	InitVec GenVector
 }
@@ -126,7 +126,7 @@
 // requesting deltas for those syncgroups.
 type SgDeltaReq struct {
 	AppName string
-	DbName string
+	DbName  string
 	InitVec GenVector // Contains a genvector per syncgroup.
 }
 
diff --git a/services/syncbase/server/mojo_impl.go b/services/syncbase/server/mojo_impl.go
index f9b8bb2..d276923 100644
--- a/services/syncbase/server/mojo_impl.go
+++ b/services/syncbase/server/mojo_impl.go
@@ -21,6 +21,8 @@
 	mojom "mojom/syncbase"
 
 	"v.io/v23/context"
+	"v.io/v23/glob"
+	"v.io/v23/naming"
 	"v.io/v23/rpc"
 	"v.io/v23/security/access"
 	"v.io/v23/services/permissions"
@@ -28,6 +30,7 @@
 	nosqlwire "v.io/v23/services/syncbase/nosql"
 	watchwire "v.io/v23/services/watch"
 	"v.io/v23/syncbase/nosql"
+	"v.io/v23/syncbase/util"
 	"v.io/v23/vdl"
 	"v.io/v23/verror"
 	"v.io/v23/vom"
@@ -103,10 +106,10 @@
 	if err != nil {
 		return nosqlwire.SyncgroupSpec{}, err
 	}
-	prefixes := make([]nosqlwire.SyncgroupPrefix, len(mSpec.Prefixes))
+	prefixes := make([]nosqlwire.TableRow, len(mSpec.Prefixes))
 	for i, v := range mSpec.Prefixes {
 		prefixes[i].TableName = v.TableName
-		prefixes[i].RowPrefix = v.RowPrefix
+		prefixes[i].Row = v.Row
 	}
 	return nosqlwire.SyncgroupSpec{
 		Description: mSpec.Description,
@@ -122,10 +125,10 @@
 	if err != nil {
 		return mojom.SyncgroupSpec{}, err
 	}
-	prefixes := make([]mojom.SyncgroupPrefix, len(vSpec.Prefixes))
+	prefixes := make([]mojom.TableRow, len(vSpec.Prefixes))
 	for i, v := range vSpec.Prefixes {
 		prefixes[i].TableName = v.TableName
-		prefixes[i].RowPrefix = v.RowPrefix
+		prefixes[i].Row = v.Row
 	}
 	return mojom.SyncgroupSpec{
 		Description: vSpec.Description,
@@ -186,6 +189,20 @@
 	}
 }
 
+func (m *mojoImpl) getGlobber(ctx *context.T, call rpc.ServerCall, name string) (rpc.ChildrenGlobber, error) {
+	resInt, err := m.lookupAndAuthorize(ctx, call, name)
+	if err != nil {
+		return nil, err
+	}
+	if res, ok := resInt.(rpc.Globber); !ok {
+		return nil, verror.NewErrInternal(ctx)
+	} else if res.Globber() == nil || res.Globber().ChildrenGlobber == nil {
+		return nil, verror.NewErrInternal(ctx)
+	} else {
+		return res.Globber().ChildrenGlobber, nil
+	}
+}
+
 func (m *mojoImpl) getTable(ctx *context.T, call rpc.ServerCall, name string) (nosqlwire.TableServerStubMethods, error) {
 	resInt, err := m.lookupAndAuthorize(ctx, call, name)
 	if err != nil {
@@ -211,6 +228,54 @@
 }
 
 ////////////////////////////////////////
+// Glob utils
+
+func (m *mojoImpl) listChildren(name string) (mojom.Error, []string, error) {
+
+	ctx, call := m.newCtxCall(name, rpc.MethodDesc{
+		Name: "GlobChildren__",
+	})
+	stub, err := m.getGlobber(ctx, call, name)
+	if err != nil {
+		return toMojoError(err), nil, nil
+	}
+	gcsCall := &globChildrenServerCall{call, ctx, make([]string, 0)}
+	g, err := glob.Parse("*")
+	if err != nil {
+		return toMojoError(err), nil, nil
+	}
+	err = stub.GlobChildren__(ctx, gcsCall, g.Head())
+	return toMojoError(err), gcsCall.Results, nil
+}
+
+type globChildrenServerCall struct {
+	rpc.ServerCall
+	ctx     *context.T
+	Results []string
+}
+
+func (g *globChildrenServerCall) SendStream() interface {
+	Send(naming.GlobChildrenReply) error
+} {
+	return g
+}
+
+func (g *globChildrenServerCall) Send(reply naming.GlobChildrenReply) error {
+	if v, ok := reply.(naming.GlobChildrenReplyName); ok {
+		escName := v.Value[strings.LastIndex(v.Value, "/")+1:]
+		// Component names within object names are always escaped. See comment in
+		// server/nosql/dispatcher.go for explanation.
+		name, ok := util.Unescape(escName)
+		if !ok {
+			return verror.New(verror.ErrInternal, g.ctx, escName)
+		}
+		g.Results = append(g.Results, name)
+	}
+
+	return nil
+}
+
+////////////////////////////////////////
 // Service
 
 // TODO(sadovsky): All stub implementations return a nil error (the last return
@@ -248,6 +313,10 @@
 	return toMojoError(err), nil
 }
 
+func (m *mojoImpl) ServiceListApps() (mojom.Error, []string, error) {
+	return m.listChildren("")
+}
+
 ////////////////////////////////////////
 // App
 
@@ -302,6 +371,10 @@
 	return toMojoError(err), mPerms, version, nil
 }
 
+func (m *mojoImpl) AppListDatabases(name string) (mojom.Error, []string, error) {
+	return m.listChildren(name)
+}
+
 func (m *mojoImpl) AppSetPermissions(name string, mPerms mojom.Perms, version string) (mojom.Error, error) {
 	ctx, call := m.newCtxCall(name, methodDesc(permissions.ObjectDesc, "SetPermissions"))
 	stub, err := m.getApp(ctx, call, name)
diff --git a/services/syncbase/server/service.go b/services/syncbase/server/service.go
index 23ab144..f6bd7f6 100644
--- a/services/syncbase/server/service.go
+++ b/services/syncbase/server/service.go
@@ -9,16 +9,22 @@
 // preserve privacy.
 
 import (
+	"bytes"
+	"fmt"
 	"path"
+	"reflect"
 	"sync"
 
+	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/glob"
 	"v.io/v23/rpc"
+	"v.io/v23/security"
 	"v.io/v23/security/access"
 	wire "v.io/v23/services/syncbase"
 	"v.io/v23/verror"
 	"v.io/v23/vom"
+	"v.io/x/lib/vlog"
 	"v.io/x/ref/services/syncbase/clock"
 	"v.io/x/ref/services/syncbase/server/interfaces"
 	"v.io/x/ref/services/syncbase/server/nosql"
@@ -46,7 +52,7 @@
 
 // ServiceOptions configures a service.
 type ServiceOptions struct {
-	// Service-level permissions.
+	// Service-level permissions.  Used only when creating a brand-new storage instance.
 	Perms access.Permissions
 	// Root dir for data storage.
 	RootDir string
@@ -54,12 +60,31 @@
 	Engine string
 }
 
+// defaultPerms returns a permissions object that grants all permissions to the
+// provided blessing patterns.
+func defaultPerms(blessingPatterns []security.BlessingPattern) access.Permissions {
+	perms := access.Permissions{}
+	for _, tag := range access.AllTypicalTags() {
+		for _, bp := range blessingPatterns {
+			perms.Add(bp, string(tag))
+		}
+	}
+	return perms
+}
+
+// PermsString returns a JSON-based string representation of the permissions.
+func PermsString(perms access.Permissions) string {
+	var buf bytes.Buffer
+	if err := access.WritePermissions(&buf, perms); err != nil {
+		vlog.Errorf("Failed to serialize permissions %+v: %v", perms, err)
+		return fmt.Sprintf("[unserializable] %+v", perms)
+	}
+	return buf.String()
+}
+
 // NewService creates a new service instance and returns it.
 // TODO(sadovsky): If possible, close all stores when the server is stopped.
 func NewService(ctx *context.T, call rpc.ServerCall, opts ServiceOptions) (*service, error) {
-	if opts.Perms == nil {
-		return nil, verror.New(verror.ErrInternal, ctx, "perms must be specified")
-	}
 	st, err := util.OpenStore(opts.Engine, path.Join(opts.RootDir, opts.Engine), util.OpenOptions{CreateIfMissing: true, ErrorIfExists: false})
 	if err != nil {
 		return nil, err
@@ -69,13 +94,18 @@
 		opts: opts,
 		apps: map[string]*app{},
 	}
-	data := &serviceData{
-		Perms: opts.Perms,
-	}
-	if err := util.Get(ctx, st, s.stKey(), &serviceData{}); verror.ErrorID(err) != verror.ErrNoExist.ID {
+	var sd serviceData
+	if err := util.Get(ctx, st, s.stKey(), &sd); verror.ErrorID(err) != verror.ErrNoExist.ID {
 		if err != nil {
 			return nil, err
 		}
+		readPerms := sd.Perms.Normalize()
+		if opts.Perms != nil {
+			if givenPerms := opts.Perms.Copy().Normalize(); !reflect.DeepEqual(givenPerms, readPerms) {
+				vlog.Infof("Warning: configured permissions will be ignored: %v", PermsString(givenPerms))
+			}
+		}
+		vlog.Infof("Using persisted permissions: %v", PermsString(readPerms))
 		// Service exists. Initialize in-memory data structures.
 		// Read all apps, populate apps map.
 		aIt := st.Scan(util.ScanPrefixArgs(util.AppPrefix, ""))
@@ -122,7 +152,16 @@
 			return nil, verror.New(verror.ErrInternal, ctx, err)
 		}
 	} else {
+		perms := opts.Perms
 		// Service does not exist.
+		if perms == nil {
+			vlog.Info("Permissions flag not set. Giving local principal all permissions.")
+			perms = defaultPerms(security.DefaultBlessingPatterns(v23.GetPrincipal(ctx)))
+		}
+		vlog.Infof("Using permissions: %v", PermsString(perms))
+		data := &serviceData{
+			Perms: perms,
+		}
 		if err := util.Put(ctx, st, s.stKey(), data); err != nil {
 			return nil, err
 		}
diff --git a/services/syncbase/server/util/constants.go b/services/syncbase/server/util/constants.go
index 272233e..fc0b6a5 100644
--- a/services/syncbase/server/util/constants.go
+++ b/services/syncbase/server/util/constants.go
@@ -10,21 +10,19 @@
 
 // Constants related to storage engine keys.
 // Note, these are persisted and therefore must not be modified.
-// TODO(sadovsky): Use one-byte strings. Changing these prefixes breaks various
-// tests. Tests generally shouldn't depend on the values of these constants.
 const (
-	AppPrefix        = "$app"
-	ClockPrefix      = "$clock"
-	DatabasePrefix   = "$database"
-	DbInfoPrefix     = "$dbInfo"
-	LogPrefix        = "$log"
-	PermsPrefix      = "$perms"
-	PermsIndexPrefix = "$iperms"
-	RowPrefix        = "$row"
-	ServicePrefix    = "$service"
-	SyncPrefix       = "$sync"
-	TablePrefix      = "$table"
-	VersionPrefix    = "$version"
+	AppPrefix        = "a"
+	ClockPrefix      = "c"
+	DatabasePrefix   = "d"
+	DbInfoPrefix     = "i"
+	LogPrefix        = "l"
+	PermsPrefix      = "p"
+	RowPrefix        = "r"
+	ServicePrefix    = "s"
+	TablePrefix      = "t"
+	VersionPrefix    = "v"
+	PermsIndexPrefix = "x"
+	SyncPrefix       = "y"
 
 	// KeyPartSep is a separator for parts of storage engine keys, e.g. separating
 	// table name from row key.
diff --git a/services/syncbase/server/watchable/watcher.go b/services/syncbase/server/watchable/watcher.go
index ab24d81..80db6eb 100644
--- a/services/syncbase/server/watchable/watcher.go
+++ b/services/syncbase/server/watchable/watcher.go
@@ -146,6 +146,7 @@
 }
 
 func parseResumeMarker(resumeMarker string) (uint64, error) {
+	// See logEntryKey() for the structure of a resume marker key.
 	parts := util.SplitNKeyParts(resumeMarker, 2)
 	if len(parts) != 2 {
 		return 0, verror.New(watch.ErrUnknownResumeMarker, nil, resumeMarker)
diff --git a/services/syncbase/syncbased/main.go b/services/syncbase/syncbased/main.go
index ebe3fb7..c6a9e85 100644
--- a/services/syncbase/syncbased/main.go
+++ b/services/syncbase/syncbased/main.go
@@ -10,8 +10,6 @@
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/rpc"
-	"v.io/v23/security"
-	"v.io/v23/security/access"
 	"v.io/x/lib/vlog"
 	"v.io/x/ref/lib/security/securityflag"
 	_ "v.io/x/ref/runtime/factories/roaming"
@@ -24,18 +22,6 @@
 	engine  = flag.String("engine", "leveldb", "Storage engine to use. Currently supported: memstore and leveldb.")
 )
 
-// defaultPerms returns a permissions object that grants all permissions to the
-// provided blessing patterns.
-func defaultPerms(blessingPatterns []security.BlessingPattern) access.Permissions {
-	perms := access.Permissions{}
-	for _, tag := range access.AllTypicalTags() {
-		for _, bp := range blessingPatterns {
-			perms.Add(bp, string(tag))
-		}
-	}
-	return perms
-}
-
 // TODO(sadovsky): We return rpc.Server and rpc.Dispatcher as a quick hack to
 // support Mojo.
 func Serve(ctx *context.T) (rpc.Server, rpc.Dispatcher, func()) {
@@ -44,12 +30,8 @@
 		vlog.Fatal("securityflag.PermissionsFromFlag() failed: ", err)
 	}
 	if perms != nil {
-		vlog.Info("Using perms from command line flag.")
-	} else {
-		vlog.Info("Perms flag not set. Giving local principal all perms.")
-		perms = defaultPerms(security.DefaultBlessingPatterns(v23.GetPrincipal(ctx)))
+		vlog.Infof("Read permissions from command line flag: %v", server.PermsString(perms))
 	}
-	vlog.Infof("Perms: %v", perms)
 	service, err := server.NewService(ctx, nil, server.ServiceOptions{
 		Perms:   perms,
 		RootDir: *rootDir,
@@ -65,6 +47,9 @@
 	if err != nil {
 		vlog.Fatal("v23.WithNewDispatchingServer() failed: ", err)
 	}
+	if eps := s.Status().Endpoints; len(eps) > 0 {
+		vlog.Info("Serving as: ", eps[0].Name())
+	}
 	if *name != "" {
 		vlog.Info("Mounted at: ", *name)
 	}
diff --git a/services/syncbase/testutil/util.go b/services/syncbase/testutil/util.go
index a3a8c72..6b7d2c9 100644
--- a/services/syncbase/testutil/util.go
+++ b/services/syncbase/testutil/util.go
@@ -71,11 +71,7 @@
 }
 
 func SetupOrDieCustom(clientSuffix, serverSuffix string, perms access.Permissions) (ctx, clientCtx *context.T, serverName string, rootp security.Principal, cleanup func()) {
-	// TODO(mattr): Instead of SetDefaultHostPort the arguably more correct thing
-	// would be to call v.io/x/ref/test.Init() from the test packages that import
-	// the profile.  Note you should only call that from the package that imports
-	// the profile, not from libraries like this.  Also, it would be better if
-	// v23.Init was test.V23Init().
+	// TODO(mattr): It would be better if v23.Init was test.V23Init().
 	flags.SetDefaultHostPort("127.0.0.1:0")
 	ctx, shutdown := v23.Init()
 
diff --git a/services/syncbase/vsync/constants.go b/services/syncbase/vsync/constants.go
new file mode 100644
index 0000000..ddefac5
--- /dev/null
+++ b/services/syncbase/vsync/constants.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vsync
+
+import "v.io/x/ref/services/syncbase/server/util"
+
+// Key prefixes for sync-related metadata.
+var (
+	dagNodePrefix  = util.JoinKeyParts(util.SyncPrefix, "a")
+	dagHeadPrefix  = util.JoinKeyParts(util.SyncPrefix, "b")
+	dagBatchPrefix = util.JoinKeyParts(util.SyncPrefix, "c")
+	dbssKey        = util.JoinKeyParts(util.SyncPrefix, "d") // database sync state
+	sgIdPrefix     = util.JoinKeyParts(util.SyncPrefix, "i") // syncgroup ID --> syncgroup local state
+	logPrefix      = util.JoinKeyParts(util.SyncPrefix, "l") // log state
+	sgNamePrefix   = util.JoinKeyParts(util.SyncPrefix, "n") // syncgroup name --> syncgroup ID
+	sgDataPrefix   = util.JoinKeyParts(util.SyncPrefix, "s") // syncgroup (ID, version) --> syncgroup synced state
+)
+
+const (
+	// The sync log contains <logPrefix>:<logDataPrefix> records (for data) and
+	// <logPrefix>:<sgoid> records (for syncgroup metadata), where <logDataPrefix>
+	// is defined below, and <sgoid> is <sgDataPrefix>:<GroupId>.
+	logDataPrefix = "d"
+)
diff --git a/services/syncbase/vsync/dag.go b/services/syncbase/vsync/dag.go
index 9739330..55b0043 100644
--- a/services/syncbase/vsync/dag.go
+++ b/services/syncbase/vsync/dag.go
@@ -723,7 +723,7 @@
 
 // nodeKey returns the key used to access a DAG node (oid, version).
 func nodeKey(oid, version string) string {
-	return util.JoinKeyParts(util.SyncPrefix, dagPrefix, "n", oid, version)
+	return util.JoinKeyParts(dagNodePrefix, oid, version)
 }
 
 // setNode stores the DAG node entry.
@@ -769,7 +769,7 @@
 
 // headKey returns the key used to access the DAG object head.
 func headKey(oid string) string {
-	return util.JoinKeyParts(util.SyncPrefix, dagPrefix, "h", oid)
+	return util.JoinKeyParts(dagHeadPrefix, oid)
 }
 
 // setHead stores version as the DAG object head.
@@ -798,7 +798,7 @@
 
 // batchKey returns the key used to access the DAG batch info.
 func batchKey(btid uint64) string {
-	return util.JoinKeyParts(util.SyncPrefix, dagPrefix, "b", fmt.Sprintf("%d", btid))
+	return util.JoinKeyParts(dagBatchPrefix, fmt.Sprintf("%d", btid))
 }
 
 // setBatch stores the DAG batch entry.
diff --git a/services/syncbase/vsync/dag_test.go b/services/syncbase/vsync/dag_test.go
index 8ec653b..05c841e 100644
--- a/services/syncbase/vsync/dag_test.go
+++ b/services/syncbase/vsync/dag_test.go
@@ -331,7 +331,7 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "y\xfel\xfed\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %v", oid, newHead, logrec)
 	}
 
@@ -407,10 +407,10 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "y\xfel\xfed\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %v", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "y\xfel\xfed\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %v", oid, newHead, logrec)
 	}
 
@@ -490,13 +490,13 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "y\xfel\xfed\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "y\xfel\xfed\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe2" {
+	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "y\xfel\xfed\xfe10\xfe2" {
 		t.Errorf("invalid logrec for ancestor object %s:%s: %s", oid, ancestor, logrec)
 	}
 
@@ -583,13 +583,13 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "y\xfel\xfed\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe2" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "y\xfel\xfed\xfe11\xfe2" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe2" {
+	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "y\xfel\xfed\xfe10\xfe2" {
 		t.Errorf("invalid logrec for ancestor object %s:%s: %s", oid, ancestor, logrec)
 	}
 
@@ -670,10 +670,10 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "y\xfel\xfed\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "y\xfel\xfed\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
 	}
 
@@ -989,11 +989,11 @@
 }
 
 // TestRemoteLinkedConflict tests sync of remote updates that contain linked
-// nodes (conflict resolution by selecting an existing version) on top of a local
-// initial state triggering a local conflict.  An object is created locally and
-// updated twice (v1 -> v2 -> v3).  Another device has along the way learned
+// nodes (conflict resolution by selecting an existing version) on top of a
+// local initial state triggering a local conflict. An object is created locally
+// and updated twice (v1 -> v2 -> v3). Another device has along the way learned
 // about v1, created (v1 -> v4), then learned about (v1 -> v2) and resolved that
-// conflict by selecting v4 over v2.  Now it sends that new info (v4 and the
+// conflict by selecting v4 over v2. Now it sends that new info (v4 and the
 // v4/v2 link) back to the original (local) device which sees a v3/v4 conflict.
 func TestRemoteLinkedConflict(t *testing.T) {
 	svc := createService(t)
diff --git a/services/syncbase/vsync/initiator.go b/services/syncbase/vsync/initiator.go
index d39590a..16b217b 100644
--- a/services/syncbase/vsync/initiator.go
+++ b/services/syncbase/vsync/initiator.go
@@ -1059,7 +1059,7 @@
 			if state.SyncPending {
 				curgv := genvec[rpfx]
 				res := curgv.Compare(state.PendingGenVec)
-				vlog.VI(4).Infof("sync: updateSyncSt:: checking join pending %v, curgv %v, res %v", state.PendingGenVec, curgv, res)
+				vlog.VI(4).Infof("sync: updateSyncSt: checking join pending %v, curgv %v, res %v", state.PendingGenVec, curgv, res)
 				if res >= 0 {
 					state.SyncPending = false
 					if err := setSGIdEntry(ctx, iSt.tx, gid, state); err != nil {
diff --git a/services/syncbase/vsync/initiator_test.go b/services/syncbase/vsync/initiator_test.go
index fd37fb6..41f5272 100644
--- a/services/syncbase/vsync/initiator_test.go
+++ b/services/syncbase/vsync/initiator_test.go
@@ -402,7 +402,7 @@
 		Creator:     "mockCreator",
 		SpecVersion: "etag-0",
 		Spec: wire.SyncgroupSpec{
-			Prefixes:    []wire.SyncgroupPrefix{{TableName: "foo", RowPrefix: ""}, {TableName: "bar", RowPrefix: ""}},
+			Prefixes:    []wire.TableRow{{TableName: "foo", Row: ""}, {TableName: "bar", Row: ""}},
 			MountTables: []string{"1/2/3/4", "5/6/7/8"},
 		},
 		Joiners: map[string]wire.SyncgroupMemberInfo{
diff --git a/services/syncbase/vsync/replay_test.go b/services/syncbase/vsync/replay_test.go
index 2547966..02337fe 100644
--- a/services/syncbase/vsync/replay_test.go
+++ b/services/syncbase/vsync/replay_test.go
@@ -397,28 +397,21 @@
 	if len(parts) != 5 && len(parts) != 7 {
 		return "", 0, 0, verr
 	}
-	if parts[0] != util.SyncPrefix || parts[1] != logPrefix {
+	if util.JoinKeyParts(parts[:2]...) != logPrefix {
 		return "", 0, 0, verr
 	}
 
 	var idStr, genStr, prefix string
-	if parts[2] == logDataPrefix {
-		if len(parts) != 5 {
+	if len(parts) == 5 {
+		if parts[2] != logDataPrefix {
 			return "", 0, 0, verr
 		}
-		prefix = parts[2]
-		idStr = parts[3]
-		genStr = parts[4]
-	} else {
-		if len(parts) != 7 {
+		prefix, idStr, genStr = parts[2], parts[3], parts[4]
+	} else { // len(parts) == 7
+		if _, err := strconv.ParseUint(parts[4], 10, 64); err != nil { // GroupId
 			return "", 0, 0, verr
 		}
-		prefix = util.JoinKeyParts(parts[2:5]...)
-		if _, err := strconv.ParseUint(parts[4], 10, 64); err != nil {
-			return "", 0, 0, verr
-		}
-		idStr = parts[5]
-		genStr = parts[6]
+		prefix, idStr, genStr = util.JoinKeyParts(parts[2:5]...), parts[5], parts[6]
 	}
 	id, err := strconv.ParseUint(idStr, 10, 64)
 	if err != nil {
@@ -433,13 +426,14 @@
 
 func TestSplitLogRecKey(t *testing.T) {
 	invalid := []string{
-		"$sync\xfe100\xfebb",
-		"log\xfe100\xfebb",
-		"$sync\xfelog\xfedata\xfe100\xfexx",
-		"$sync\xfelog\xfedata\xfeaa\xfebb",
-		"$sync\xfelog\xfexx\xfe100\xfebb",
-		"$sync\xfelog\xfedata\xfeaa\xfe100\xfebb",
-		"$sync\xfelog\xfe$sync\xfesgd\xfexx\xfe100\xfebb",
+		"y\xfe100\xfebb",
+		"l\xfe100\xfebb",
+		"y\xfel\xfed\xfe100\xfexx",
+		"y\xfel\xfed\xfeaa\xfe100",
+		"y\xfel\xfex\xfe100\xfe100",
+		"y\xfel\xfed\xfe100",
+		"y\xfel\xfed\xfe100\xfe100\xfebb",
+		"y\xfel\xfey\xfes\xfexx\xfe100\xfe100",
 	}
 
 	for _, k := range invalid {
@@ -454,14 +448,14 @@
 		gen uint64
 	}{
 		{logDataPrefix, 10, 20},
-		{"$sync\xfesgd\xfe2500", 190, 540},
-		{"$sync\xfesgd\xfe4200", 9999, 999999},
+		{"y\xfes\xfe2500", 190, 540},
+		{"y\xfes\xfe4200", 9999, 999999},
 	}
 
 	for _, v := range valid {
 		gotPfx, gotId, gotGen, err := splitLogRecKey(nil, logRecKey(v.pfx, v.id, v.gen))
 		if gotPfx != v.pfx || gotId != v.id || gotGen != v.gen || err != nil {
-			t.Fatalf("failed key conversion pfx got %v want %v, id got %v want %v, gen got %v want %v, err %v", gotPfx, v.pfx, gotId, v.id, gotGen, v.gen, err)
+			t.Fatalf("failed key conversion: pfx got %v want %v, id got %v want %v, gen got %v want %v, err %v", gotPfx, v.pfx, gotId, v.id, gotGen, v.gen, err)
 		}
 	}
 }
diff --git a/services/syncbase/vsync/sync_state.go b/services/syncbase/vsync/sync_state.go
index fe389d1..7b571c4 100644
--- a/services/syncbase/vsync/sync_state.go
+++ b/services/syncbase/vsync/sync_state.go
@@ -35,11 +35,10 @@
 // the data log records, these log records are used to sync syncgroup metadata.
 //
 // The generations for the data mutations and mutations for each syncgroup are
-// in separate spaces. Data mutations in a Database start at gen 1, and
-// grow. Mutations for each syncgroup start at gen 1, and grow. Thus, for the
-// local data log records, the keys are of the form
-// $sync:log:data:<devid>:<gen>, and the keys for local syncgroup log record are
-// of the form $sync:log:<sgid>:<devid>:<gen>.
+// in separate spaces. Data mutations in a Database start at gen 1, and grow.
+// Mutations for each syncgroup start at gen 1, and grow. Thus, for the local
+// data log records, the keys are of the form y:l:d:<devid>:<gen>, and the keys
+// for local syncgroup log record are of the form y:l:<sgoid>:<devid>:<gen>.
 
 // TODO(hpucha): Should this space be separate from the data or not? If it is
 // not, it can provide consistency between data and syncgroup metadata. For
@@ -85,7 +84,7 @@
 	data *localGenInfoInMem // info for data.
 
 	// Info for syncgroups. The key here is the syncgroup oid of the form
-	// $sync:sgd:<group id>. More details in syncgroup.go.
+	// y:s:<groupId>. More details in syncgroup.go.
 	sgs map[string]*localGenInfoInMem
 
 	// Note: Generation vector contains state from remote devices only.
@@ -126,8 +125,8 @@
 // c) republish names in mount tables for all syncgroups.
 // d) in-memory queue of syncgroups to be published.
 func (s *syncService) initSync(ctx *context.T) error {
-	vlog.VI(2).Infof("sync: initSync:: begin")
-	defer vlog.VI(2).Infof("sync: initSync:: end")
+	vlog.VI(2).Infof("sync: initSync: begin")
+	defer vlog.VI(2).Infof("sync: initSync: end")
 	s.syncStateLock.Lock()
 	defer s.syncStateLock.Unlock()
 
@@ -154,7 +153,7 @@
 			dsInMem.sggenvec = ds.SgGenVec
 		}
 
-		vlog.VI(2).Infof("sync: initSync:: initing app %v db %v, dsInMem %v", appName, dbName, dsInMem)
+		vlog.VI(2).Infof("sync: initSync: initing app %v db %v, dsInMem %v", appName, dbName, dsInMem)
 
 		sgCount := 0
 		name := appDbName(appName, dbName)
@@ -201,13 +200,13 @@
 			}
 			info.checkptGen = info.gen - 1
 
-			vlog.VI(4).Infof("sync: initSync:: initing app %v db %v sg %v info %v", appName, dbName, sgoid, info)
+			vlog.VI(4).Infof("sync: initSync: initing app %v db %v sg %v info %v", appName, dbName, sgoid, info)
 
 			return false
 		})
 
 		if sgCount == 0 {
-			vlog.VI(2).Infof("sync: initSync:: initing app %v db %v done (no sgs found)", appName, dbName)
+			vlog.VI(2).Infof("sync: initSync: initing app %v db %v done (no sgs found)", appName, dbName)
 			return false
 		}
 
@@ -231,7 +230,7 @@
 
 		s.syncState[name] = dsInMem
 
-		vlog.VI(2).Infof("sync: initSync:: initing app %v db %v done dsInMem %v (data %v)", appName, dbName, dsInMem, dsInMem.data)
+		vlog.VI(2).Infof("sync: initSync: initing app %v db %v done dsInMem %v (data %v)", appName, dbName, dsInMem, dsInMem.data)
 
 		return false
 	})
@@ -517,20 +516,15 @@
 ////////////////////////////////////////////////////////////
 // Low-level utility functions to access sync state.
 
-// dbSyncStateKey returns the key used to access the sync state of a Database.
-func dbSyncStateKey() string {
-	return util.JoinKeyParts(util.SyncPrefix, dbssPrefix)
-}
-
 // putDbSyncState persists the sync state object for a given Database.
 func putDbSyncState(ctx *context.T, tx store.Transaction, ds *dbSyncState) error {
-	return util.Put(ctx, tx, dbSyncStateKey(), ds)
+	return util.Put(ctx, tx, dbssKey, ds)
 }
 
 // getDbSyncState retrieves the sync state object for a given Database.
 func getDbSyncState(ctx *context.T, st store.StoreReader) (*dbSyncState, error) {
 	var ds dbSyncState
-	if err := util.Get(ctx, st, dbSyncStateKey(), &ds); err != nil {
+	if err := util.Get(ctx, st, dbssKey, &ds); err != nil {
 		return nil, err
 	}
 	return &ds, nil
@@ -541,12 +535,12 @@
 
 // logRecsPerDeviceScanPrefix returns the prefix used to scan log records for a particular device.
 func logRecsPerDeviceScanPrefix(pfx string, id uint64) string {
-	return util.JoinKeyParts(util.SyncPrefix, logPrefix, pfx, fmt.Sprintf("%d", id))
+	return util.JoinKeyParts(logPrefix, pfx, fmt.Sprintf("%d", id))
 }
 
 // logRecKey returns the key used to access a specific log record.
 func logRecKey(pfx string, id, gen uint64) string {
-	return util.JoinKeyParts(util.SyncPrefix, logPrefix, pfx, fmt.Sprintf("%d", id), fmt.Sprintf("%016x", gen))
+	return util.JoinKeyParts(logPrefix, pfx, fmt.Sprintf("%d", id), fmt.Sprintf("%016x", gen))
 }
 
 // hasLogRec returns true if the log record for (devid, gen) exists.
diff --git a/services/syncbase/vsync/syncgroup.go b/services/syncbase/vsync/syncgroup.go
index 243fc01..4ded8b1 100644
--- a/services/syncbase/vsync/syncgroup.go
+++ b/services/syncbase/vsync/syncgroup.go
@@ -125,10 +125,10 @@
 	prefixes := make(map[string]bool, len(spec.Prefixes))
 	for _, p := range spec.Prefixes {
 		if !pubutil.ValidTableName(p.TableName) {
-			return verror.New(verror.ErrBadArg, ctx, fmt.Sprintf("group has a SyncgroupPrefix with invalid table name %q", p.TableName))
+			return verror.New(verror.ErrBadArg, ctx, fmt.Sprintf("group has a TableRow with invalid table name %q", p.TableName))
 		}
-		if p.RowPrefix != "" && !pubutil.ValidRowKey(p.RowPrefix) {
-			return verror.New(verror.ErrBadArg, ctx, fmt.Sprintf("group has a SyncgroupPrefix with invalid row prefix %q", p.RowPrefix))
+		if p.Row != "" && !pubutil.ValidRowKey(p.Row) {
+			return verror.New(verror.ErrBadArg, ctx, fmt.Sprintf("group has a TableRow with invalid row prefix %q", p.Row))
 		}
 		prefixes[toTableRowPrefixStr(p)] = true
 	}
@@ -139,7 +139,7 @@
 }
 
 // samePrefixes returns true if the two sets of prefixes are the same.
-func samePrefixes(pfx1, pfx2 []wire.SyncgroupPrefix) bool {
+func samePrefixes(pfx1, pfx2 []wire.TableRow) bool {
 	pfxMap := make(map[string]uint8)
 	for _, p := range pfx1 {
 		pfxMap[toTableRowPrefixStr(p)] |= 0x01
@@ -419,7 +419,7 @@
 // make forEachSyncgroup() stop the iteration earlier; otherwise the function
 // loops across all Syncgroups in the Database.
 func forEachSyncgroup(st store.StoreReader, callback func(*interfaces.Syncgroup) bool) {
-	stream := st.Scan(util.ScanPrefixArgs(sgNameKeyPrefix, ""))
+	stream := st.Scan(util.ScanPrefixArgs(sgNamePrefix, ""))
 	defer stream.Cancel()
 
 	for stream.Advance() {
@@ -497,47 +497,34 @@
 // relationships.
 // Use the functions above to manipulate syncgroups.
 
-var (
-	// Prefixes used to store the different mappings of a syncgroup:
-	// sgNameKeyPrefix: name --> ID
-	// sgIdKeyPrefix: ID --> syncgroup local state
-	// sgDataKeyPrefix: (ID, version) --> syncgroup data (synchronized)
-	//
-	// Note: as with other syncable objects, the DAG "heads" table contains
-	// a reference to the current syncgroup version, and the DAG "nodes"
-	// table tracks its history of mutations.
-	sgNameKeyPrefix = util.JoinKeyParts(util.SyncPrefix, sgPrefix, "n")
-	sgIdKeyPrefix   = util.JoinKeyParts(util.SyncPrefix, sgPrefix, "i")
-	sgDataKeyPrefix = util.JoinKeyParts(util.SyncPrefix, sgDataPrefix)
-)
+// Note: as with other syncable objects, the DAG "heads" table contains a
+// reference to the current syncgroup version, and the DAG "nodes" table tracks
+// its history of mutations.
 
 // sgNameKey returns the key used to access the syncgroup name entry.
 func sgNameKey(name string) string {
-	return util.JoinKeyParts(sgNameKeyPrefix, name)
+	return util.JoinKeyParts(sgNamePrefix, name)
 }
 
 // sgIdKey returns the key used to access the syncgroup ID entry.
 func sgIdKey(gid interfaces.GroupId) string {
-	return util.JoinKeyParts(sgIdKeyPrefix, fmt.Sprintf("%d", gid))
+	return util.JoinKeyParts(sgIdPrefix, fmt.Sprintf("%d", gid))
 }
 
 // sgOID converts a group id into an oid string.
 func sgOID(gid interfaces.GroupId) string {
-	return util.JoinKeyParts(sgDataKeyPrefix, fmt.Sprintf("%d", gid))
+	return util.JoinKeyParts(sgDataPrefix, fmt.Sprintf("%d", gid))
 }
 
-// sgID is approximately the inverse of sgOID: it converts an oid string into a
-// group id, but assumes that oid is prefixed with util.SyncPrefix (whereas
-// sgOID does not prepend util.SyncPrefix).
-// TODO(hpucha): Add unittests that cover sgOID/sgID (and other such helpers).
-// In CL v.io/c/16919, an incorrect change to the implementation of sgID was
-// only caught by integration tests.
+// sgID is the inverse of sgOID.
+// TODO(hpucha): Add unittests for sgOID/sgID and other such helpers. In CLs
+// v.io/c/16919 and v.io/c/17043, bugs in sgID were only caught by integration
+// tests.
 func sgID(oid string) (interfaces.GroupId, error) {
-	parts := util.SplitKeyParts(oid)
+	parts := util.SplitNKeyParts(oid, 3)
 	if len(parts) != 3 {
 		return 0, fmt.Errorf("invalid sgoid %s", oid)
 	}
-
 	id, err := strconv.ParseUint(parts[2], 10, 64)
 	if err != nil {
 		return 0, err
@@ -547,7 +534,7 @@
 
 // sgDataKey returns the key used to access a version of the syncgroup data.
 func sgDataKey(gid interfaces.GroupId, version string) string {
-	return util.JoinKeyParts(sgDataKeyPrefix, fmt.Sprintf("%d", gid), version)
+	return sgDataKeyByOID(sgOID(gid), version)
 }
 
 // sgDataKeyByOID returns the key used to access a version of the syncgroup
@@ -560,7 +547,7 @@
 func splitSgNameKey(ctx *context.T, key string) (string, error) {
 	// Note that the actual syncgroup name may contain ":" as a separator.
 	// So don't split the key on the separator, instead trim its prefix.
-	prefix := util.JoinKeyParts(sgNameKeyPrefix, "")
+	prefix := util.JoinKeyParts(sgNamePrefix, "")
 	name := strings.TrimPrefix(key, prefix)
 	if name == key {
 		return "", verror.New(verror.ErrInternal, ctx, "invalid sgNamekey", key)
@@ -618,11 +605,7 @@
 
 // getSGDataEntry retrieves the syncgroup data for a given group ID and version.
 func getSGDataEntry(ctx *context.T, st store.StoreReader, gid interfaces.GroupId, version string) (*interfaces.Syncgroup, error) {
-	var sg interfaces.Syncgroup
-	if err := util.Get(ctx, st, sgDataKey(gid, version), &sg); err != nil {
-		return nil, err
-	}
-	return &sg, nil
+	return getSGDataEntryByOID(ctx, st, sgOID(gid), version)
 }
 
 // getSGDataEntryByOID retrieves the syncgroup data for a given group OID and
@@ -845,7 +828,7 @@
 	}
 
 	// Scan all the syncgroup names found in the Database.
-	stream := sn.Scan(util.ScanPrefixArgs(sgNameKeyPrefix, ""))
+	stream := sn.Scan(util.ScanPrefixArgs(sgNamePrefix, ""))
 	var sgNames []string
 	var key []byte
 	for stream.Advance() {
@@ -1082,7 +1065,7 @@
 // be time consuming.  Consider doing it asynchronously and letting the server
 // reply to the client earlier.  However it must happen within the scope of this
 // transaction (and its snapshot view).
-func (sd *syncDatabase) bootstrapSyncgroup(ctx *context.T, tx store.Transaction, sgId interfaces.GroupId, prefixes []wire.SyncgroupPrefix) error {
+func (sd *syncDatabase) bootstrapSyncgroup(ctx *context.T, tx store.Transaction, sgId interfaces.GroupId, prefixes []wire.TableRow) error {
 	if len(prefixes) == 0 {
 		return verror.New(verror.ErrInternal, ctx, "no prefixes specified")
 	}
diff --git a/services/syncbase/vsync/syncgroup_test.go b/services/syncbase/vsync/syncgroup_test.go
index 6975eff..c26ac2a 100644
--- a/services/syncbase/vsync/syncgroup_test.go
+++ b/services/syncbase/vsync/syncgroup_test.go
@@ -69,7 +69,7 @@
 		Creator:     "mockCreator",
 		SpecVersion: "etag-0",
 		Spec: wire.SyncgroupSpec{
-			Prefixes: []wire.SyncgroupPrefix{{TableName: "foo", RowPrefix: ""}, {TableName: "bar", RowPrefix: ""}},
+			Prefixes: []wire.TableRow{{TableName: "foo", Row: ""}, {TableName: "bar", Row: ""}},
 		},
 		Joiners: map[string]wire.SyncgroupMemberInfo{
 			"phone":  wire.SyncgroupMemberInfo{SyncPriority: 10},
@@ -210,7 +210,7 @@
 			Creator:     "mockCreator",
 			SpecVersion: "etag-0",
 			Spec: wire.SyncgroupSpec{
-				Prefixes: []wire.SyncgroupPrefix{{TableName: "foo", RowPrefix: ""}, {TableName: "bar", RowPrefix: ""}},
+				Prefixes: []wire.TableRow{{TableName: "foo", Row: ""}, {TableName: "bar", Row: ""}},
 			},
 			Joiners: map[string]wire.SyncgroupMemberInfo{
 				"phone":  wire.SyncgroupMemberInfo{SyncPriority: 10},
@@ -253,15 +253,15 @@
 	checkBadAddSyncgroup(t, st, sg, "SG w/o Prefixes")
 
 	sg = mkSg()
-	sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "foo", RowPrefix: ""}, {TableName: "bar", RowPrefix: ""}, {TableName: "foo", RowPrefix: ""}}
+	sg.Spec.Prefixes = []wire.TableRow{{TableName: "foo", Row: ""}, {TableName: "bar", Row: ""}, {TableName: "foo", Row: ""}}
 	checkBadAddSyncgroup(t, st, sg, "SG with duplicate Prefixes")
 
 	sg = mkSg()
-	sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "", RowPrefix: ""}}
+	sg.Spec.Prefixes = []wire.TableRow{{TableName: "", Row: ""}}
 	checkBadAddSyncgroup(t, st, sg, "SG with invalid (empty) table name")
 
 	sg = mkSg()
-	sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "a", RowPrefix: "\xfe"}}
+	sg.Spec.Prefixes = []wire.TableRow{{TableName: "a", Row: "\xfe"}}
 	checkBadAddSyncgroup(t, st, sg, "SG with invalid row prefix")
 }
 
@@ -300,7 +300,7 @@
 		Creator:     "mockCreator",
 		SpecVersion: "etag-0",
 		Spec: wire.SyncgroupSpec{
-			Prefixes: []wire.SyncgroupPrefix{{TableName: "foo", RowPrefix: ""}, {TableName: "bar", RowPrefix: ""}},
+			Prefixes: []wire.TableRow{{TableName: "foo", Row: ""}, {TableName: "bar", Row: ""}},
 		},
 		Joiners: map[string]wire.SyncgroupMemberInfo{
 			"phone":  wire.SyncgroupMemberInfo{SyncPriority: 10},
@@ -385,7 +385,7 @@
 		SpecVersion: "etag-1",
 		Spec: wire.SyncgroupSpec{
 			MountTables: []string{"mt1"},
-			Prefixes:    []wire.SyncgroupPrefix{{TableName: "foo", RowPrefix: ""}},
+			Prefixes:    []wire.TableRow{{TableName: "foo", Row: ""}},
 		},
 		Joiners: map[string]wire.SyncgroupMemberInfo{
 			"phone":  wire.SyncgroupMemberInfo{SyncPriority: 10},
@@ -402,7 +402,7 @@
 		SpecVersion: "etag-2",
 		Spec: wire.SyncgroupSpec{
 			MountTables: []string{"mt2", "mt3"},
-			Prefixes:    []wire.SyncgroupPrefix{{TableName: "bar", RowPrefix: ""}},
+			Prefixes:    []wire.TableRow{{TableName: "bar", Row: ""}},
 		},
 		Joiners: map[string]wire.SyncgroupMemberInfo{
 			"tablet": wire.SyncgroupMemberInfo{SyncPriority: 111},
@@ -568,14 +568,14 @@
 
 // TestPrefixCompare tests the prefix comparison utility.
 func TestPrefixCompare(t *testing.T) {
-	mksgps := func(strs []string) []wire.SyncgroupPrefix {
-		res := make([]wire.SyncgroupPrefix, len(strs))
+	mksgps := func(strs []string) []wire.TableRow {
+		res := make([]wire.TableRow, len(strs))
 		for i, v := range strs {
 			parts := strings.SplitN(v, ":", 2)
 			if len(parts) != 2 {
-				t.Fatalf("invalid SyncgroupPrefix string: %s", v)
+				t.Fatalf("invalid TableRow string: %s", v)
 			}
-			res[i] = wire.SyncgroupPrefix{TableName: parts[0], RowPrefix: parts[1]}
+			res[i] = wire.TableRow{TableName: parts[0], Row: parts[1]}
 		}
 		return res
 	}
diff --git a/services/syncbase/vsync/testdata/local-init-00.log.sync b/services/syncbase/vsync/testdata/local-init-00.log.sync
index 59fb856..543536f 100644
--- a/services/syncbase/vsync/testdata/local-init-00.log.sync
+++ b/services/syncbase/vsync/testdata/local-init-00.log.sync
@@ -1,6 +1,6 @@
 # Create an object locally and update it twice (linked-list).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addl|tb\xfefoo1|1|||$sync\xfelog\xfedata\xfe10\xfe1|0|1|false
-addl|tb\xfefoo1|2|1||$sync\xfelog\xfedata\xfe10\xfe2|0|1|false
-addl|tb\xfefoo1|3|2||$sync\xfelog\xfedata\xfe10\xfe3|0|1|false
+addl|tb\xfefoo1|1|||y\xfel\xfed\xfe10\xfe1|0|1|false
+addl|tb\xfefoo1|2|1||y\xfel\xfed\xfe10\xfe2|0|1|false
+addl|tb\xfefoo1|3|2||y\xfel\xfed\xfe10\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-00.log.sync b/services/syncbase/vsync/testdata/remote-conf-00.log.sync
index 261f520..985885b 100644
--- a/services/syncbase/vsync/testdata/remote-conf-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-00.log.sync
@@ -3,6 +3,6 @@
 # it from the local sync at v2, then updated separately).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|2||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
-addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
-addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+addr|tb\xfefoo1|4|2||y\xfel\xfed\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||y\xfel\xfed\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||y\xfel\xfed\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-01.log.sync b/services/syncbase/vsync/testdata/remote-conf-01.log.sync
index ee1b836..a2f230f 100644
--- a/services/syncbase/vsync/testdata/remote-conf-01.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-01.log.sync
@@ -5,6 +5,6 @@
 # sees 2 graft points: v1-v4 and v2-v5.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|1||$sync\xfelog\xfedata\xfe12\xfe1|0|1|false
-addr|tb\xfefoo1|5|2|4|$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
-addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|4|1||y\xfel\xfed\xfe12\xfe1|0|1|false
+addr|tb\xfefoo1|5|2|4|y\xfel\xfed\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|6|5||y\xfel\xfed\xfe11\xfe2|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-03.log.sync b/services/syncbase/vsync/testdata/remote-conf-03.log.sync
index 8aed9dc..2a68ea7 100644
--- a/services/syncbase/vsync/testdata/remote-conf-03.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-03.log.sync
@@ -1,6 +1,6 @@
 # Create the same object remotely from scratch and update it twice (linked-list).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
-addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
-addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+addr|tb\xfefoo1|4|||y\xfel\xfed\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||y\xfel\xfed\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||y\xfel\xfed\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-link.log.sync b/services/syncbase/vsync/testdata/remote-conf-link.log.sync
index d3be9e4..10d083c 100644
--- a/services/syncbase/vsync/testdata/remote-conf-link.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-link.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the local version.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
-linkr|tb\xfefoo1|4|2||$sync\xfelog\xfe11\xfe2
+addr|tb\xfefoo1|4|1||logrec-01|0|1|false
+linkr|tb\xfefoo1|4|2||logrec-02
diff --git a/services/syncbase/vsync/testdata/remote-init-00.log.sync b/services/syncbase/vsync/testdata/remote-init-00.log.sync
index 1683d08..d77bf85 100644
--- a/services/syncbase/vsync/testdata/remote-init-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-init-00.log.sync
@@ -2,7 +2,7 @@
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 # TODO(rdaoud): The above comment is incorrect for the 'genvec' line.
 
-addr|tb\xfefoo1|1|||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
-addr|tb\xfefoo1|2|1||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
-addr|tb\xfefoo1|3|2||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+addr|tb\xfefoo1|1|||y\xfel\xfed\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|2|1||y\xfel\xfed\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|3|2||y\xfel\xfed\xfe11\xfe3|0|1|false
 genvec|tb\xfefoo1|10:0,11:3|tb\xfebar|11:0
diff --git a/services/syncbase/vsync/testdata/remote-noconf-00.log.sync b/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
index e54a5ec..4f0b2e4 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
@@ -4,7 +4,7 @@
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 # TODO(rdaoud): The above comment is incorrect for the 'genvec' line.
 
-addr|tb\xfefoo1|4|3||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
-addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
-addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+addr|tb\xfefoo1|4|3||y\xfel\xfed\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||y\xfel\xfed\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||y\xfel\xfed\xfe11\xfe3|0|1|false
 genvec|tb\xfefoo1|10:0,11:3|tb\xfebar|11:0
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
index 11e0df5..c9b6b25 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the remote version.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
-linkr|tb\xfefoo1|2|4||$sync\xfelog\xfe11\xfe2
+addr|tb\xfefoo1|4|1||logrec-01|0|1|false
+linkr|tb\xfefoo1|2|4||logrec-02
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
index 840514c..3606a13 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the local version.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
-linkr|tb\xfefoo1|4|3||$sync\xfelog\xfe11\xfe2
+addr|tb\xfefoo1|4|1||logrec-01|0|1|false
+linkr|tb\xfefoo1|4|3||logrec-02
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
index cd42033..a1db71a 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
@@ -1,6 +1,6 @@
 # Update an object remotely, detect conflict, and bless the remote version, and continue updating.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
-linkr|tb\xfefoo1|3|4||$sync\xfelog\xfe11\xfe2
-addr|tb\xfefoo1|5|3||$sync\xfelog\xfe11\xfe3|0|1|false
+addr|tb\xfefoo1|4|1||logrec-01|0|1|false
+linkr|tb\xfefoo1|3|4||logrec-02
+addr|tb\xfefoo1|5|3||logrec-03|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
index cbcdd58..6c08648 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
@@ -1,4 +1,4 @@
 # Resolve the same conflict on two different devices.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-linkr|tb\xfefoo1|3|4||$sync\xfelog\xfe12\xfe1
+linkr|tb\xfefoo1|3|4||logrec-01
diff --git a/services/syncbase/vsync/types.vdl b/services/syncbase/vsync/types.vdl
index 419d59a..12f8cbf 100644
--- a/services/syncbase/vsync/types.vdl
+++ b/services/syncbase/vsync/types.vdl
@@ -8,17 +8,6 @@
 	"v.io/x/ref/services/syncbase/server/interfaces"
 )
 
-// Key prefixes for sync data structures. All these prefixes are prepended with
-// util.SyncPrefix.
-const (
-	logPrefix     = "log"  // log state.
-	logDataPrefix = "data" // data log state.
-	dbssPrefix    = "dbss" // database sync state.
-	dagPrefix     = "dag"  // dag state.
-	sgPrefix      = "sg"   // local syncgroup state.
-	sgDataPrefix  = "sgd"  // synced syncgroup state.
-)
-
 // syncData represents the persistent state of the sync module.
 type syncData struct {
 	Id uint64
@@ -105,7 +94,7 @@
 // - The total count of batch objects, including non-syncable ones.
 // TODO(rdaoud): add support to track the read and scan sets.
 type batchInfo struct {
-	Objects map[string]string
+	Objects       map[string]string
 	LinkedObjects map[string]string
-	Count   uint64
+	Count         uint64
 }
diff --git a/services/syncbase/vsync/types.vdl.go b/services/syncbase/vsync/types.vdl.go
index c61e061..ec04be8 100644
--- a/services/syncbase/vsync/types.vdl.go
+++ b/services/syncbase/vsync/types.vdl.go
@@ -141,15 +141,3 @@
 	vdl.Register((*dagNode)(nil))
 	vdl.Register((*batchInfo)(nil))
 }
-
-const logPrefix = "log" // log state.
-
-const logDataPrefix = "data" // data log state.
-
-const dbssPrefix = "dbss" // database sync state.
-
-const dagPrefix = "dag" // dag state.
-
-const sgPrefix = "sg" // local syncgroup state.
-
-const sgDataPrefix = "sgd" // synced syncgroup state.
diff --git a/services/syncbase/vsync/util.go b/services/syncbase/vsync/util.go
index 4842a0c..b41887c 100644
--- a/services/syncbase/vsync/util.go
+++ b/services/syncbase/vsync/util.go
@@ -90,17 +90,17 @@
 	return time.Unix(timestamp/nanoPerSec, timestamp%nanoPerSec)
 }
 
-// toTableRowPrefixStr converts a SyncgroupPrefix (tableName-rowPrefix pair) to
-// a string of the form used for storing perms and row data in the underlying
-// storage engine.
-func toTableRowPrefixStr(p wire.SyncgroupPrefix) string {
-	return util.JoinKeyParts(p.TableName, p.RowPrefix)
+// toTableRowPrefixStr converts a TableRow (table name and row key or prefix
+// pair) to a string of the form used for storing perms and row data in the
+// underlying storage engine.
+func toTableRowPrefixStr(p wire.TableRow) string {
+	return util.JoinKeyParts(p.TableName, p.Row)
 }
 
 // toRowKey prepends RowPrefix to what is presumably a "<table>:<row>" string,
 // yielding a storage engine key for a row.
 // TODO(sadovsky): Only used by CR code. Should go away once CR stores table
-// name and row key as separate fields in a "TableAndRow" struct.
+// name and row key as separate fields in a "TableRow" struct.
 func toRowKey(tableRow string) string {
 	return util.JoinKeyParts(util.RowPrefix, tableRow)
 }
diff --git a/services/syncbase/vsync/watcher.go b/services/syncbase/vsync/watcher.go
index c61ff55..e43d8d1 100644
--- a/services/syncbase/vsync/watcher.go
+++ b/services/syncbase/vsync/watcher.go
@@ -203,7 +203,7 @@
 
 	if err != nil {
 		// TODO(rdaoud): don't crash, quarantine this app database.
-		vlog.Fatalf("sync: processWatchLogBatch:: %s, %s: watcher cannot process batch: %v", appName, dbName, err)
+		vlog.Fatalf("sync: processWatchLogBatch: %s, %s: watcher cannot process batch: %v", appName, dbName, err)
 	}
 }
 
diff --git a/services/syncbase/vsync/watcher_test.go b/services/syncbase/vsync/watcher_test.go
index 66a6c55..86d9bb2 100644
--- a/services/syncbase/vsync/watcher_test.go
+++ b/services/syncbase/vsync/watcher_test.go
@@ -306,7 +306,7 @@
 	// batch is an application batch with 3 keys of which 2 are syncable.
 	// The 3rd batch is also a syncgroup snapshot.
 	count := 0
-	start, limit := util.ScanPrefixArgs(util.JoinKeyParts(util.SyncPrefix, "dag", "b"), "")
+	start, limit := util.ScanPrefixArgs(dagBatchPrefix, "")
 	stream := st.Scan(start, limit)
 	for stream.Advance() {
 		count++
@@ -323,6 +323,6 @@
 		}
 	}
 	if count != 1 {
-		t.Errorf("wrong count of batches: got %d instead of 2", count)
+		t.Errorf("wrong count of batches: got %d instead of 1", count)
 	}
 }
diff --git a/test/doc.go b/test/doc.go
index 8352f95..35bee24 100644
--- a/test/doc.go
+++ b/test/doc.go
@@ -4,17 +4,9 @@
 
 // Package test implements initalization for unit and integration tests.
 //
-// Init configures logging, random number generators and other global state.
-// Typical usage in _test.go files:
-//
-// import "v.io/x/ref/test"
-//
-// func TestMain(m *testing.M) {
-//     test.Init()
-//     os.Exit(m.Run())
-// }
-//
 // V23Init can be used within test functions as a safe alternative to v23.Init.
+// It sets up the context so that only localhost ports are used for
+// communication.
 //
 // func TestFoo(t *testing.T) {
 //    ctx, shutdown := test.V23Init()
diff --git a/test/init.go b/test/init.go
index e1812e9..8605a86 100644
--- a/test/init.go
+++ b/test/init.go
@@ -7,15 +7,14 @@
 import (
 	"flag"
 	"os"
-	"runtime"
 	"sync"
 
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/naming"
 	"v.io/v23/options"
+	"v.io/v23/rpc"
 	"v.io/x/ref/internal/logger"
-	"v.io/x/ref/lib/flags"
 	"v.io/x/ref/services/mounttable/mounttablelib"
 	"v.io/x/ref/test/testutil"
 )
@@ -36,31 +35,10 @@
 	flag.BoolVar(&IntegrationTestsDebugShellOnError, IntegrationTestsDebugShellOnErrorFlag, false, "Drop into a debug shell if an integration test fails.")
 }
 
-// Init sets up state for running tests: Adjusting GOMAXPROCS,
-// configuring the logging library, setting up the random number generator
-// etc.
-//
-// Doing so requires flags to be parsed, so this function explicitly parses
-// flags. Thus, it is NOT a good idea to call this from the init() function
-// of any module except "main" or _test.go files.
+// Init does nothing.
+// TODO(ashankar): Remove this after updating "jiri-test generate" so that it
+// doesn't add a call to test.Init in generated v23_test.go files.
 func Init() {
-	init := func() {
-		if os.Getenv("GOMAXPROCS") == "" {
-			// Set the number of logical processors to the number of CPUs,
-			// if GOMAXPROCS is not set in the environment.
-			runtime.GOMAXPROCS(runtime.NumCPU())
-		}
-		flags.SetDefaultProtocol("tcp")
-		flags.SetDefaultHostPort("127.0.0.1:0")
-		flags.SetDefaultNamespaceRoot("/127.0.0.1:8101")
-		// At this point all of the flags that we're going to use for
-		// tests must be defined.
-		// This will be the case if this is called from the init()
-		// function of a _test.go file.
-		flag.Parse()
-		logger.Manager(logger.Global()).ConfigureFromFlags()
-	}
-	once.Do(init)
 }
 
 // V23Init initializes the runtime and sets up some convenient infrastructure for tests:
@@ -87,6 +65,7 @@
 // Specific aspects of initialization can be controlled via the params struct.
 func initWithParams(params initParams) (*context.T, v23.Shutdown) {
 	ctx, shutdown := v23.Init()
+	ctx = v23.WithListenSpec(ctx, rpc.ListenSpec{Addrs: rpc.ListenAddrs{{Protocol: "tcp", Address: "127.0.0.1:0"}}})
 	if params.CreatePrincipal {
 		var err error
 		if ctx, err = v23.WithPrincipal(ctx, testutil.NewPrincipal(TestBlessing)); err != nil {
diff --git a/test/modules/modules_test.go b/test/modules/modules_test.go
index cff9ce6..57509fa 100644
--- a/test/modules/modules_test.go
+++ b/test/modules/modules_test.go
@@ -35,7 +35,6 @@
 // We must call TestMain ourselves because using jiri test generate
 // creates an import cycle for this package.
 func TestMain(m *testing.M) {
-	test.Init()
 	modules.DispatchAndExitIfChild()
 	os.Exit(m.Run())
 }
diff --git a/test/v23tests/internal/cached_test.go b/test/v23tests/internal/cached_test.go
index 6f14bfd..99951d7 100644
--- a/test/v23tests/internal/cached_test.go
+++ b/test/v23tests/internal/cached_test.go
@@ -14,7 +14,6 @@
 	"time"
 
 	_ "v.io/x/ref/runtime/factories/generic"
-	"v.io/x/ref/test"
 	"v.io/x/ref/test/v23tests"
 )
 
@@ -90,7 +89,6 @@
 }
 
 func TestMain(m *testing.M) {
-	test.Init()
 	r := m.Run()
 	if len(tmpDir) > 0 {
 		os.RemoveAll(tmpDir)
diff --git a/test/v23tests/v23tests_test.go b/test/v23tests/v23tests_test.go
index c6ece5a..a54d111 100644
--- a/test/v23tests/v23tests_test.go
+++ b/test/v23tests/v23tests_test.go
@@ -21,7 +21,6 @@
 	"v.io/x/ref"
 	"v.io/x/ref/internal/logger"
 	_ "v.io/x/ref/runtime/factories/generic"
-	"v.io/x/ref/test"
 	"v.io/x/ref/test/modules"
 	"v.io/x/ref/test/testutil"
 	"v.io/x/ref/test/v23tests"
@@ -114,10 +113,6 @@
 	return nil
 }, "RunIntegrationTestInChild")
 
-func init() {
-	test.Init()
-}
-
 func TestDeferHandling(t *testing.T) {
 	t.Skip("https://v.io/i/686 -- test is flaky in Go1.5")
 	sh, _ := modules.NewShell(nil, nil, testing.Verbose(), t)
@@ -299,7 +294,7 @@
 		msg := recover().(string)
 		// this, and the tests below are intended to ensure that line #s
 		// are captured and reported correctly.
-		if got, want := msg, "v23tests_test.go:309"; !strings.Contains(got, want) {
+		if got, want := msg, "v23tests_test.go:304"; !strings.Contains(got, want) {
 			t.Fatalf("%q does not contain %q", got, want)
 		}
 		if got, want := msg, "fork/exec /bin/echox: no such file or directory"; !strings.Contains(got, want) {
@@ -321,7 +316,7 @@
 	sh.SetDefaultStartOpts(opts)
 	defer func() {
 		msg := recover().(string)
-		if got, want := msg, "v23tests_test.go:331"; !strings.Contains(got, want) {
+		if got, want := msg, "v23tests_test.go:326"; !strings.Contains(got, want) {
 			t.Fatalf("%q does not contain %q", got, want)
 		}
 		if got, want := msg, "StartWithOpts"; !strings.Contains(got, want) {
@@ -345,7 +340,7 @@
 		if iterations == 0 {
 			t.Fatalf("our sleeper didn't get to run")
 		}
-		if got, want := recover().(string), "v23tests_test.go:352: timed out"; !strings.Contains(got, want) {
+		if got, want := recover().(string), "v23tests_test.go:347: timed out"; !strings.Contains(got, want) {
 			t.Fatalf("%q does not contain %q", got, want)
 		}
 	}()
@@ -367,7 +362,7 @@
 		if iterations != 0 {
 			t.Fatalf("our sleeper got to run")
 		}
-		if got, want := recover().(string), "v23tests_test.go:374: timed out"; !strings.Contains(got, want) {
+		if got, want := recover().(string), "v23tests_test.go:369: timed out"; !strings.Contains(got, want) {
 			t.Fatalf("%q does not contain %q", got, want)
 		}
 	}()