Merge "syncbase/tests: close the test service properly"
diff --git a/lib/discovery/discovery_test.go b/lib/discovery/discovery_test.go
index 28984cd..4718703 100644
--- a/lib/discovery/discovery_test.go
+++ b/lib/discovery/discovery_test.go
@@ -168,12 +168,12 @@
 	// Open a new scan channel and consume expected advertisements first.
 	scan, scanStop, err := startScan(ctx, ds, "v.io/v23/a")
 	if err != nil {
-		t.Error(err)
+		t.Fatal(err)
 	}
 	defer scanStop()
 	update := <-scan
 	if !matchFound([]discovery.Update{update}, services[0]) {
-		t.Errorf("Unexpected scan: %v", update)
+		t.Errorf("unexpected scan: %v", update)
 	}
 
 	// Make sure scan returns the lost advertisement when advertising is stopped.
@@ -181,7 +181,7 @@
 
 	update = <-scan
 	if !matchLost([]discovery.Update{update}, services[0]) {
-		t.Errorf("Unexpected scan: %v", update)
+		t.Errorf("unexpected scan: %v", update)
 	}
 
 	// Also it shouldn't affect the other.
@@ -269,6 +269,68 @@
 	}
 }
 
+func TestMerge(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	p1, p2 := mock.New(), mock.New()
+	ds := idiscovery.NewWithPlugins([]idiscovery.Plugin{p1, p2})
+	defer ds.Close()
+
+	service := discovery.Service{
+		InstanceUuid:  idiscovery.NewInstanceUUID(),
+		InterfaceName: "v.io/v23/a",
+		Addrs:         []string{"/h1:123/x"},
+	}
+	ad := idiscovery.Advertisement{
+		ServiceUuid: idiscovery.NewServiceUUID(service.InterfaceName),
+		Service:     service,
+	}
+
+	scan, scanStop, err := startScan(ctx, ds, "v.io/v23/a")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer scanStop()
+
+	// A plugin returns an advertisement and we should see it.
+	p1.RegisterAdvertisement(ad)
+	update := <-scan
+	if !matchFound([]discovery.Update{update}, service) {
+		t.Errorf("unexpected scan: %v", update)
+	}
+
+	// The other plugin returns the same advertisement, but we should not see it.
+	p2.RegisterAdvertisement(ad)
+	select {
+	case update = <-scan:
+		t.Errorf("unexpected scan: %v", update)
+	case <-time.After(5 * time.Millisecond):
+	}
+
+	// Two plugins update the service, but we should see the update only once.
+	service.Addrs = []string{"/h2:123/x"}
+	ad.Service = service
+
+	go func() { p1.RegisterAdvertisement(ad) }()
+	go func() { p2.RegisterAdvertisement(ad) }()
+
+	// Should see 'Lost' first.
+	update = <-scan
+	if !matchLost([]discovery.Update{update}, service) {
+		t.Errorf("unexpected scan: %v", update)
+	}
+	update = <-scan
+	if !matchFound([]discovery.Update{update}, service) {
+		t.Errorf("unexpected scan: %v", update)
+	}
+	select {
+	case update = <-scan:
+		t.Errorf("unexpected scan: %v", update)
+	case <-time.After(5 * time.Millisecond):
+	}
+}
+
 func TestClose(t *testing.T) {
 	ctx, shutdown := test.V23Init()
 	defer shutdown()
diff --git a/lib/discovery/factory/factory.go b/lib/discovery/factory/factory.go
index 30e28fd..4a90c96 100644
--- a/lib/discovery/factory/factory.go
+++ b/lib/discovery/factory/factory.go
@@ -11,8 +11,6 @@
 	"v.io/v23/discovery"
 
 	idiscovery "v.io/x/ref/lib/discovery"
-	"v.io/x/ref/lib/discovery/plugins/ble"
-	"v.io/x/ref/lib/discovery/plugins/mdns"
 )
 
 // New returns a new Discovery instance with the given protocols.
@@ -37,10 +35,8 @@
 
 	// Verify protocols.
 	for _, p := range protocols {
-		switch p {
-		case "mdns", "ble":
-		default:
-			return nil, fmt.Errorf("not supported discovery protocol: %s\n", p)
+		if _, exists := pluginFactories[p]; !exists {
+			return nil, fmt.Errorf("discovery protocol %q is not supported", p)
 		}
 	}
 
@@ -50,20 +46,11 @@
 func newInstance(host string, protocols []string) (discovery.T, error) {
 	plugins := make([]idiscovery.Plugin, 0, len(protocols))
 	for _, p := range protocols {
-		switch p {
-		case "mdns":
-			mdns, err := mdns.New(host)
-			if err != nil {
-				return nil, err
-			}
-			plugins = append(plugins, mdns)
-		case "ble":
-			ble, err := ble.NewPlugin(host)
-			if err != nil {
-				return nil, err
-			}
-			plugins = append(plugins, ble)
+		plugin, err := pluginFactories[p](host)
+		if err != nil {
+			return nil, err
 		}
+		plugins = append(plugins, plugin)
 	}
 	return idiscovery.NewWithPlugins(plugins), nil
 }
diff --git a/lib/discovery/factory/plugins_ios.go b/lib/discovery/factory/plugins_ios.go
new file mode 100644
index 0000000..96d34b4
--- /dev/null
+++ b/lib/discovery/factory/plugins_ios.go
@@ -0,0 +1,19 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ios
+
+// The paypal gatt library, which the ble plugin currently depends on doesn't work
+// in iOS, so remove the ble plugin entirely for now.
+
+package factory
+
+import (
+	"v.io/x/ref/lib/discovery"
+	"v.io/x/ref/lib/discovery/plugins/mdns"
+)
+
+var pluginFactories = map[string]func(host string) (discovery.Plugin, error){
+	"mdns": mdns.New,
+}
diff --git a/lib/discovery/factory/plugins_other.go b/lib/discovery/factory/plugins_other.go
new file mode 100644
index 0000000..fd08c8d
--- /dev/null
+++ b/lib/discovery/factory/plugins_other.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !ios
+
+package factory
+
+import (
+	"v.io/x/ref/lib/discovery"
+	"v.io/x/ref/lib/discovery/plugins/ble"
+	"v.io/x/ref/lib/discovery/plugins/mdns"
+)
+
+var pluginFactories = map[string]func(host string) (discovery.Plugin, error){
+	"mdns": mdns.New,
+	"ble":  ble.NewPlugin,
+}
diff --git a/lib/discovery/plugins/ble/neighborhood.go b/lib/discovery/plugins/ble/neighborhood.go
index e6a1d37..0cbaea8 100644
--- a/lib/discovery/plugins/ble/neighborhood.go
+++ b/lib/discovery/plugins/ble/neighborhood.go
@@ -126,7 +126,7 @@
 					delete(b.knownNeighbors, entry.id)
 					for id, adv := range entry.advertisements {
 						for _, scanner := range b.scannersById {
-							scanner.handleChange(uuid.Parse(id), adv, nil)
+							scanner.handleLost(uuid.Parse(id), adv)
 						}
 					}
 				}
@@ -425,11 +425,11 @@
 			hash)
 		return
 	}
-	oldAdvs := map[string]*bleAdv{}
+
+	var oldAdvs map[string]*bleAdv
 	if oldEntry, ok := b.knownNeighbors[id]; ok {
 		oldAdvs = oldEntry.advertisements
 	}
-
 	newEntry := &bleCacheEntry{
 		id:             id,
 		hash:           hash,
@@ -438,21 +438,21 @@
 	}
 	b.neighborsHashCache[hash] = newEntry
 	b.knownNeighbors[id] = newEntry
-	for id, oldAdv := range oldAdvs {
-		newValue := services[id]
-		if !reflect.DeepEqual(oldAdv, newValue) {
+
+	for id, newAdv := range services {
+		oldAdv := oldAdvs[id]
+		if !reflect.DeepEqual(oldAdv, newAdv) {
 			uid := uuid.Parse(id)
 			for _, s := range b.scannersByService[id] {
-				s.handleChange(uid, oldAdv, newValue)
+				s.handleUpdate(uid, oldAdv, newAdv)
 			}
 		}
 	}
-
-	for id, newAdv := range newEntry.advertisements {
-		if _, ok := oldAdvs[id]; !ok {
+	for id, oldAdv := range oldAdvs {
+		if _, exist := services[id]; !exist {
 			uid := uuid.Parse(id)
 			for _, s := range b.scannersByService[id] {
-				s.handleChange(uid, nil, newAdv)
+				s.handleLost(uid, oldAdv)
 			}
 		}
 	}
diff --git a/lib/discovery/plugins/ble/scanner.go b/lib/discovery/plugins/ble/scanner.go
index a05e809..3983043 100644
--- a/lib/discovery/plugins/ble/scanner.go
+++ b/lib/discovery/plugins/ble/scanner.go
@@ -21,31 +21,34 @@
 	done bool
 }
 
-func (s *scanner) handleChange(id uuid.UUID, oldAdv *bleAdv, newAdv *bleAdv) error {
+func (s *scanner) handleLost(id uuid.UUID, oldAdv *bleAdv) error {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if s.done {
 		return nil
 	}
 
-	// TODO(bjornick,jhahn): Revisit this strategy to provide the consistent behavior
-	// for updated advertisements across all plugins.
-	if oldAdv != nil {
-		s.ch <- &discovery.Advertisement{
-			Service: vdiscovery.Service{
-				InstanceUuid: oldAdv.instanceID,
-			},
-			Lost: true,
-		}
+	s.ch <- &discovery.Advertisement{
+		Service: vdiscovery.Service{
+			InstanceUuid: oldAdv.instanceID,
+		},
+		Lost: true,
+	}
+	return nil
+}
+
+func (s *scanner) handleUpdate(id uuid.UUID, oldAdv *bleAdv, newAdv *bleAdv) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.done {
+		return nil
 	}
 
-	if newAdv != nil {
-		a, err := newAdv.toDiscoveryAdvertisement()
-		if err != nil {
-			return err
-		}
-		s.ch <- a
+	a, err := newAdv.toDiscoveryAdvertisement()
+	if err != nil {
+		return err
 	}
+	s.ch <- a
 	return nil
 }
 
diff --git a/lib/discovery/plugins/mock/mock.go b/lib/discovery/plugins/mock/mock.go
index d12caef..229a11a 100644
--- a/lib/discovery/plugins/mock/mock.go
+++ b/lib/discovery/plugins/mock/mock.go
@@ -5,11 +5,10 @@
 package mock
 
 import (
+	"bytes"
 	"reflect"
 	"sync"
 
-	"github.com/pborman/uuid"
-
 	"v.io/v23/context"
 
 	"v.io/x/ref/lib/discovery"
@@ -20,38 +19,16 @@
 	services map[string][]discovery.Advertisement // GUARDED_BY(mu)
 
 	updated   *sync.Cond
-	updateSeq int
+	updateSeq int // GUARDED_BY(mu)
 }
 
 func (p *plugin) Advertise(ctx *context.T, ad discovery.Advertisement, done func()) error {
-	p.mu.Lock()
-	key := string(ad.ServiceUuid)
-	ads := p.services[key]
-	p.services[key] = append(ads, ad)
-	p.updateSeq++
-	p.mu.Unlock()
-	p.updated.Broadcast()
+	p.RegisterAdvertisement(ad)
 
 	go func() {
 		defer done()
 		<-ctx.Done()
-
-		p.mu.Lock()
-		ads := p.services[key]
-		for i, a := range ads {
-			if uuid.Equal(uuid.UUID(a.Service.InstanceUuid), uuid.UUID(ad.Service.InstanceUuid)) {
-				ads = append(ads[:i], ads[i+1:]...)
-				break
-			}
-		}
-		if len(ads) > 0 {
-			p.services[key] = ads
-		} else {
-			delete(p.services, key)
-		}
-		p.updateSeq++
-		p.mu.Unlock()
-		p.updated.Broadcast()
+		p.UnregisterAdvertisement(ad)
 	}()
 	return nil
 }
@@ -61,12 +38,12 @@
 	go func() {
 		var updateSeqSeen int
 		for {
-			p.updated.L.Lock()
+			p.mu.Lock()
 			for updateSeqSeen == p.updateSeq {
 				p.updated.Wait()
 			}
 			updateSeqSeen = p.updateSeq
-			p.updated.L.Unlock()
+			p.mu.Unlock()
 			select {
 			case rescan <- struct{}{}:
 			case <-ctx.Done():
@@ -128,9 +105,53 @@
 	return nil
 }
 
-func New() discovery.Plugin {
-	return &plugin{
-		services: make(map[string][]discovery.Advertisement),
-		updated:  sync.NewCond(&sync.Mutex{}),
+// RegisterService registers an advertisement service to the plugin. If there is
+// an advertisement with the same instance uuid, it will be updated with the
+// given advertisement.
+func (p *plugin) RegisterAdvertisement(ad discovery.Advertisement) {
+	p.mu.Lock()
+	key := string(ad.ServiceUuid)
+	ads := p.services[key]
+	if i := findAd(ads, ad.Service.InstanceUuid); i >= 0 {
+		ads[i] = ad
+	} else {
+		ads = append(ads, ad)
 	}
+	p.services[key] = ads
+	p.updateSeq++
+	p.mu.Unlock()
+	p.updated.Broadcast()
+}
+
+// UnregisterAdvertisement unregisters a registered service from the plugin.
+func (p *plugin) UnregisterAdvertisement(ad discovery.Advertisement) {
+	p.mu.Lock()
+	key := string(ad.ServiceUuid)
+	ads := p.services[key]
+	if i := findAd(ads, ad.Service.InstanceUuid); i >= 0 {
+		ads = append(ads[:i], ads[i+1:]...)
+		if len(ads) > 0 {
+			p.services[key] = ads
+		} else {
+			delete(p.services, key)
+		}
+		p.updateSeq++
+	}
+	p.mu.Unlock()
+	p.updated.Broadcast()
+}
+
+func findAd(ads []discovery.Advertisement, instanceUuid []byte) int {
+	for i, ad := range ads {
+		if bytes.Equal(ad.Service.InstanceUuid, instanceUuid) {
+			return i
+		}
+	}
+	return -1
+}
+
+func New() *plugin {
+	p := &plugin{services: make(map[string][]discovery.Advertisement)}
+	p.updated = sync.NewCond(&p.mu)
+	return p
 }
diff --git a/lib/discovery/scan.go b/lib/discovery/scan.go
index 1f5953f..773f370 100644
--- a/lib/discovery/scan.go
+++ b/lib/discovery/scan.go
@@ -5,6 +5,8 @@
 package discovery
 
 import (
+	"reflect"
+
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/discovery"
@@ -56,10 +58,7 @@
 		names = security.BlessingNames(principal, principal.BlessingStore().Default())
 	}
 
-	// A plugin may returns a Lost event with clearing all attributes including encryption
-	// keys. Thus, we have to keep what we've found so far so that we can ignore the Lost
-	// events for instances that we ignored due to permission.
-	found := make(map[string]struct{})
+	found := make(map[string]*Advertisement)
 	for {
 		select {
 		case ad := <-scanCh:
@@ -70,25 +69,44 @@
 				}
 				continue
 			}
-			// TODO(jhahn): Merge scanData based on InstanceUuid.
-			var update discovery.Update
-			id := string(ad.Service.InstanceUuid)
-			if ad.Lost {
-				if _, ok := found[id]; ok {
-					delete(found, id)
-					update = discovery.UpdateLost{discovery.Lost{InstanceUuid: ad.Service.InstanceUuid}}
+			for _, update := range mergeAdvertisement(found, &ad) {
+				select {
+				case updateCh <- update:
+				case <-ctx.Done():
+					return
 				}
-			} else {
-				found[id] = struct{}{}
-				update = discovery.UpdateFound{discovery.Found{Service: ad.Service}}
-			}
-			select {
-			case updateCh <- update:
-			case <-ctx.Done():
-				return
 			}
 		case <-ctx.Done():
 			return
 		}
 	}
 }
+
+func mergeAdvertisement(found map[string]*Advertisement, ad *Advertisement) (updates []discovery.Update) {
+	// The multiple plugins may return the same advertisements. We ignores the update
+	// if it has been already sent through the update channel.
+	id := string(ad.Service.InstanceUuid)
+	prev := found[id]
+	if ad.Lost {
+		// TODO(jhahn): If some plugins return 'Lost' events for an advertisement update, we may
+		// generates multiple 'Lost' and 'Found' events for the same update. In order to minimize
+		// this flakiness, we may need to delay the handling of 'Lost'.
+		if prev != nil {
+			delete(found, id)
+			updates = []discovery.Update{discovery.UpdateLost{discovery.Lost{InstanceUuid: ad.Service.InstanceUuid}}}
+		}
+	} else {
+		// TODO(jhahn): Need to compare the proximity as well.
+		switch {
+		case prev == nil:
+			updates = []discovery.Update{discovery.UpdateFound{discovery.Found{Service: ad.Service}}}
+		case !reflect.DeepEqual(prev.Service, ad.Service):
+			updates = []discovery.Update{
+				discovery.UpdateLost{discovery.Lost{InstanceUuid: ad.Service.InstanceUuid}},
+				discovery.UpdateFound{discovery.Found{Service: ad.Service}},
+			}
+		}
+		found[id] = ad
+	}
+	return
+}
diff --git a/lib/discovery/util/advertise.go b/lib/discovery/util/advertise.go
new file mode 100644
index 0000000..5835552
--- /dev/null
+++ b/lib/discovery/util/advertise.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/discovery"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+
+	idiscovery "v.io/x/ref/lib/discovery"
+)
+
+// AdvertiseServer advertises the server with the given service. It uses the
+// server's endpoints and the given suffix as the service addresses, and the
+// addresses will be updated automatically when the underlying network are
+// changed. Advertising will continue until the context is canceled or exceeds
+// its deadline and the returned channel will be closed when it stops.
+func AdvertiseServer(ctx *context.T, server rpc.Server, suffix string, service discovery.Service, visibility []security.BlessingPattern) (<-chan struct{}, error) {
+	// Assign the instance UUID if not set in order to keep the same instance UUID
+	// when the advertisement is updated.
+	if len(service.InstanceUuid) == 0 {
+		service.InstanceUuid = idiscovery.NewInstanceUUID()
+	}
+
+	watcher := make(chan rpc.NetworkChange, 3)
+	server.WatchNetwork(watcher)
+
+	stop, err := advertise(ctx, service, getEndpoints(server), suffix, visibility)
+	if err != nil {
+		server.UnwatchNetwork(watcher)
+		close(watcher)
+		return nil, err
+	}
+
+	done := make(chan struct{})
+	go func() {
+		for {
+			select {
+			case <-watcher:
+				if stop != nil {
+					stop() // Stop the previous advertisement.
+				}
+				stop, err = advertise(ctx, service, getEndpoints(server), suffix, visibility)
+				if err != nil {
+					ctx.Error(err)
+				}
+			case <-ctx.Done():
+				server.UnwatchNetwork(watcher)
+				close(watcher)
+				close(done)
+				return
+			}
+		}
+	}()
+
+	return done, nil
+}
+
+func advertise(ctx *context.T, service discovery.Service, eps []naming.Endpoint, suffix string, visibility []security.BlessingPattern) (func(), error) {
+	service.Addrs = make([]string, len(eps))
+	for i, ep := range eps {
+		service.Addrs[i] = naming.JoinAddressName(ep.Name(), suffix)
+	}
+	ds := v23.GetDiscovery(ctx)
+	ctx, cancel := context.WithCancel(ctx)
+	done, err := ds.Advertise(ctx, service, visibility)
+	if err != nil {
+		cancel()
+		return nil, err
+	}
+	stop := func() {
+		cancel()
+		<-done
+	}
+	return stop, nil
+}
+
+// TODO(suharshs): Use server.Status().Endpoints only when migrating to a new server.
+func getEndpoints(server rpc.Server) []naming.Endpoint {
+	status := server.Status()
+	eps := status.Endpoints
+	for _, p := range status.Proxies {
+		eps = append(eps, p.Endpoint)
+	}
+	return eps
+}
diff --git a/lib/discovery/util/advertise_test.go b/lib/discovery/util/advertise_test.go
new file mode 100644
index 0000000..b3ff757
--- /dev/null
+++ b/lib/discovery/util/advertise_test.go
@@ -0,0 +1,179 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util_test
+
+import (
+	"fmt"
+	"reflect"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/discovery"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+
+	idiscovery "v.io/x/ref/lib/discovery"
+	fdiscovery "v.io/x/ref/lib/discovery/factory"
+	"v.io/x/ref/lib/discovery/plugins/mock"
+	"v.io/x/ref/lib/discovery/util"
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/test"
+)
+
+type mockServer struct {
+	eps             []naming.Endpoint
+	watcher         chan<- rpc.NetworkChange
+	watcherClosedCh chan struct{}
+}
+
+func (s *mockServer) AddName(string) error     { return nil }
+func (s *mockServer) RemoveName(string)        {}
+func (s *mockServer) Stop() error              { return nil }
+func (s *mockServer) Closed() <-chan struct{}  { return nil }
+func (s *mockServer) Status() rpc.ServerStatus { return rpc.ServerStatus{Endpoints: s.eps} }
+
+func (s *mockServer) WatchNetwork(ch chan<- rpc.NetworkChange) {
+	s.watcher = ch
+	s.watcherClosedCh = make(chan struct{})
+}
+
+func (s *mockServer) UnwatchNetwork(ch chan<- rpc.NetworkChange) {
+	s.watcher = nil
+	close(s.watcherClosedCh)
+}
+
+func (s *mockServer) updateNetwork(eps []naming.Endpoint) {
+	s.eps = eps
+	if s.watcher != nil {
+		s.watcher <- rpc.NetworkChange{Changed: eps}
+	}
+}
+
+func (s *mockServer) watcherClosed() <-chan struct{} {
+	return s.watcherClosedCh
+}
+
+func newMockServer(eps []naming.Endpoint) *mockServer {
+	return &mockServer{eps: eps}
+}
+
+func newEndpoints(addrs ...string) []naming.Endpoint {
+	eps := make([]naming.Endpoint, len(addrs))
+	for i, a := range addrs {
+		eps[i], _ = v23.NewEndpoint(a)
+	}
+	return eps
+}
+
+func TestNetworkChange(t *testing.T) {
+	fdiscovery.InjectDiscovery(idiscovery.NewWithPlugins([]idiscovery.Plugin{mock.New()}))
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	service := discovery.Service{
+		InstanceUuid:  idiscovery.NewInstanceUUID(),
+		InterfaceName: "v.io/v23/a",
+		Attrs:         discovery.Attributes{"a1": "v1"},
+	}
+
+	const suffix = "test"
+	eps := newEndpoints("addr1:123")
+	mock := newMockServer(eps)
+
+	ctx, cancel := context.WithCancel(ctx)
+	util.AdvertiseServer(ctx, mock, suffix, service, nil)
+	if err := scanAndMatch(ctx, service, eps, suffix); err != nil {
+		t.Error(err)
+	}
+
+	tests := [][]naming.Endpoint{
+		newEndpoints("addr2:123", "addr3:456"),
+		newEndpoints("addr4:123"),
+		newEndpoints("addr5:123", "addr6:456"),
+	}
+	for _, eps := range tests {
+		mock.updateNetwork(eps)
+		if err := scanAndMatch(ctx, service, eps, suffix); err != nil {
+			t.Error(err)
+		}
+	}
+
+	// Make sure that the network watcher is unregistered when the context
+	// is canceled.
+	cancel()
+
+	select {
+	case <-mock.watcherClosed():
+	case <-time.After(3 * time.Second):
+		t.Error("watcher not closed")
+	}
+}
+
+func TestNetworkChangeInstanceUuid(t *testing.T) {
+	fdiscovery.InjectDiscovery(idiscovery.NewWithPlugins([]idiscovery.Plugin{mock.New()}))
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	mock := newMockServer(newEndpoints("addr1:123"))
+	util.AdvertiseServer(ctx, mock, "", discovery.Service{InterfaceName: "v.io/v23/a"}, nil)
+
+	// Scan the advertised service.
+	service, err := scan(ctx, 3*time.Second)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(service.InstanceUuid) == 0 {
+		t.Fatal("couldn't scan")
+	}
+
+	// Make sure the instance uuid has not been changed.
+	eps := newEndpoints("addr2:123")
+	mock.updateNetwork(eps)
+	if err := scanAndMatch(ctx, service, eps, ""); err != nil {
+		t.Error(err)
+	}
+}
+
+func scanAndMatch(ctx *context.T, want discovery.Service, eps []naming.Endpoint, suffix string) error {
+	want.Addrs = make([]string, len(eps))
+	for i, ep := range eps {
+		want.Addrs[i] = naming.JoinAddressName(ep.Name(), suffix)
+	}
+
+	const timeout = 3 * time.Second
+
+	var found discovery.Service
+	for now := time.Now(); time.Since(now) < timeout; {
+		var err error
+		found, err = scan(ctx, 5*time.Millisecond)
+		if err != nil {
+			return err
+		}
+		if reflect.DeepEqual(found, want) {
+			return nil
+		}
+	}
+	return fmt.Errorf("match failed; got %v, but wanted %v", found, want)
+}
+
+func scan(ctx *context.T, timeout time.Duration) (discovery.Service, error) {
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	ds := v23.GetDiscovery(ctx)
+	scan, err := ds.Scan(ctx, "")
+	if err != nil {
+		return discovery.Service{}, fmt.Errorf("scan failed: %v", err)
+	}
+
+	select {
+	case update := <-scan:
+		return update.Interface().(discovery.Found).Service, nil
+	case <-time.After(timeout):
+		return discovery.Service{}, nil
+	}
+}
diff --git a/lib/security/audit/principal_test.go b/lib/security/audit/principal_test.go
index 75148b6..48124f4 100644
--- a/lib/security/audit/principal_test.go
+++ b/lib/security/audit/principal_test.go
@@ -25,7 +25,7 @@
 )
 
 func TestAuditingPrincipal(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	var (
 		thirdPartyCaveat, discharge = newThirdPartyCaveatAndDischarge(t)
@@ -129,7 +129,7 @@
 }
 
 func TestUnauditedMethodsOnPrincipal(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	var (
 		auditor = new(mockAuditor)
@@ -199,9 +199,9 @@
 	return d, p.NextError
 }
 
-func (p *mockPrincipal) PublicKey() security.PublicKey               { return p.NextResult.(security.PublicKey) }
-func (p *mockPrincipal) Roots() security.BlessingRoots               { return nil }
-func (p *mockPrincipal) BlessingStore() security.BlessingStore       { return nil }
+func (p *mockPrincipal) PublicKey() security.PublicKey         { return p.NextResult.(security.PublicKey) }
+func (p *mockPrincipal) Roots() security.BlessingRoots         { return nil }
+func (p *mockPrincipal) BlessingStore() security.BlessingStore { return nil }
 
 type mockAuditor struct {
 	LastEntry audit.Entry
diff --git a/lib/security/bcrypter/crypter.go b/lib/security/bcrypter/crypter.go
index 5e93126..5b59829 100644
--- a/lib/security/bcrypter/crypter.go
+++ b/lib/security/bcrypter/crypter.go
@@ -88,15 +88,15 @@
 	keys []ibe.PrivateKey
 }
 
-// Encrypt encrypts the provided fixed-length 'plaintext' so that it can
-// only be decrypted by a crypter possessing a private key for a blessing
-// matching the provided blessing pattern.
+// Encrypt encrypts the provided 'plaintext' so that it can only be decrypted
+// by a crypter possessing a private key for a blessing matching the provided
+// blessing pattern.
 //
 // Encryption makes use of the public parameters of the identity provider
 // that is authoritative on the set of blessings that match the provided
 // blessing pattern. These paramaters must have been previously added to
 // this crypter via AddParams.
-func (c *Crypter) Encrypt(ctx *context.T, forPattern security.BlessingPattern, plaintext *[32]byte) (*Ciphertext, error) {
+func (c *Crypter) Encrypt(ctx *context.T, forPattern security.BlessingPattern, plaintext []byte) (*Ciphertext, error) {
 	if !forPattern.IsValid() {
 		return nil, fmt.Errorf("provided blessing pattern %v is invalid", forPattern)
 	}
@@ -109,8 +109,8 @@
 			continue
 		}
 		for _, ibeParams := range ibeParamsList {
-			ctxt := make([]byte, ibe.CiphertextSize)
-			if err := ibeParams.Encrypt(string(forPattern), (*plaintext)[:], ctxt); err != nil {
+			ctxt := make([]byte, len(plaintext)+ibeParams.CiphertextOverhead())
+			if err := ibeParams.Encrypt(string(forPattern), plaintext, ctxt); err != nil {
 				return nil, NewErrInternal(ctx, err)
 			}
 			paramsId, err := idParams(ibeParams)
@@ -127,16 +127,24 @@
 	return ciphertext, nil
 }
 
+func decrypt(key ibe.PrivateKey, ciphertext []byte) ([]byte, error) {
+	overhead := key.Params().CiphertextOverhead()
+	if got := len(ciphertext); got < overhead {
+		return nil, fmt.Errorf("ciphertext is of size %v bytes, want at least %v bytes", got, overhead)
+	}
+	plaintext := make([]byte, len(ciphertext)-overhead)
+	if err := key.Decrypt(ciphertext, plaintext); err != nil {
+		return nil, err
+	}
+	return plaintext, nil
+}
+
 // Decrypt decrypts the provided 'ciphertext' and returns the corresponding
 // plaintext.
 //
 // Decryption succeeds only if this crypter possesses a private key for a
 // blessing that matches the blessing pattern corresponding to the ciphertext.
-func (c *Crypter) Decrypt(ctx *context.T, ciphertext *Ciphertext) (*[32]byte, error) {
-	var (
-		plaintext [32]byte
-		keyFound  bool
-	)
+func (c *Crypter) Decrypt(ctx *context.T, ciphertext *Ciphertext) ([]byte, error) {
 	c.mu.RLock()
 	defer c.mu.RUnlock()
 	for paramsId, cbytes := range ciphertext.wire.Bytes {
@@ -144,16 +152,13 @@
 			continue
 		} else if key, found := keys[ciphertext.wire.PatternId]; !found {
 			continue
-		} else if err := key.Decrypt(cbytes, plaintext[:]); err != nil {
-			return nil, NewErrInternal(ctx, err)
+		} else if ptxt, err := decrypt(key, cbytes); err != nil {
+			return nil, err
+		} else {
+			return ptxt, nil
 		}
-		keyFound = true
-		break
 	}
-	if !keyFound {
-		return nil, NewErrPrivateKeyNotFound(ctx)
-	}
-	return &plaintext, nil
+	return nil, NewErrPrivateKeyNotFound(ctx)
 }
 
 // AddKey adds the provided private key 'key' and the associated public
diff --git a/lib/security/bcrypter/crypter_test.go b/lib/security/bcrypter/crypter_test.go
index cea179c..7efaf28 100644
--- a/lib/security/bcrypter/crypter_test.go
+++ b/lib/security/bcrypter/crypter_test.go
@@ -25,12 +25,8 @@
 	return NewRoot(name, master)
 }
 
-func newPlaintext() [32]byte {
-	var m [32]byte
-	if n := copy(m[:], []byte("AThirtyTwoBytePieceOfTextThisIs!")); n != len(m) {
-		panic(fmt.Errorf("plaintext string must be %d bytes, not %d", len(m), n))
-	}
-	return m
+func newPlaintext() []byte {
+	return []byte("AThirtyTwoBytePieceOfTextThisIs!")
 }
 
 func TextExtract(t *testing.T) {
@@ -79,7 +75,7 @@
 	)
 
 	// empty encrypter should not be able to encrypt for any pattern.
-	if _, err := encrypter.Encrypt(ctx, "google/youtube/alice", &ptxt); verror.ErrorID(err) != ErrNoParams.ID {
+	if _, err := encrypter.Encrypt(ctx, "google/youtube/alice", ptxt); verror.ErrorID(err) != ErrNoParams.ID {
 		t.Fatalf("Got error %v, wanted error with ID %v", err, ErrNoParams.ID)
 	}
 
@@ -88,7 +84,7 @@
 		t.Fatal(err)
 	}
 	// encrypting for "google/youtube/alice" should now succeed.
-	if _, err := encrypter.Encrypt(ctx, "google/youtube/alice", &ptxt); err != nil {
+	if _, err := encrypter.Encrypt(ctx, "google/youtube/alice", ptxt); err != nil {
 		t.Fatal(err)
 	}
 
@@ -96,7 +92,7 @@
 	// does not have params that are authoritative on all blessings matching
 	// the pattern "google" (the googleYoutube params are authoritative on
 	// blessings matching "google/youtube").
-	if _, err := encrypter.Encrypt(ctx, "google", &ptxt); verror.ErrorID(err) != ErrNoParams.ID {
+	if _, err := encrypter.Encrypt(ctx, "google", ptxt); verror.ErrorID(err) != ErrNoParams.ID {
 		t.Fatalf("Got error %v, wanted error with ID %v", err, ErrNoParams.ID)
 	}
 	// add google's params to the encrypter.
@@ -104,21 +100,21 @@
 		t.Fatal(err)
 	}
 	// encrypting for "google" should now succeed.
-	if _, err := encrypter.Encrypt(ctx, "google", &ptxt); err != nil {
+	if _, err := encrypter.Encrypt(ctx, "google", ptxt); err != nil {
 		t.Fatal(err)
 	}
 
 	// Encryption should succeed for all of the following patterns
 	patterns := []security.BlessingPattern{"google", "google/$", "google/alice", "google/bob", "google/bob/phone"}
 	for _, p := range patterns {
-		if _, err := encrypter.Encrypt(ctx, p, &ptxt); err != nil {
+		if _, err := encrypter.Encrypt(ctx, p, ptxt); err != nil {
 			t.Fatal(err)
 		}
 	}
 
 	// Every ciphertext should be unique.
-	ctxt1, _ := encrypter.Encrypt(ctx, "google", &ptxt)
-	ctxt2, _ := encrypter.Encrypt(ctx, "google", &ptxt)
+	ctxt1, _ := encrypter.Encrypt(ctx, "google", ptxt)
+	ctxt2, _ := encrypter.Encrypt(ctx, "google", ptxt)
 	if reflect.DeepEqual(ctxt1, ctxt2) {
 		t.Fatal("Two Encrypt operations yielded the same Ciphertext")
 	}
@@ -156,7 +152,7 @@
 	addParams(encrypter, google1.Params())
 	addParams(encrypter, google2.Params())
 	// encrypt for the pattern "google/alice"
-	ctxt, err := encrypter.Encrypt(ctx, "google/alice", &ptxt)
+	ctxt, err := encrypter.Encrypt(ctx, "google/alice", ptxt)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -175,8 +171,8 @@
 	// Decryption should now succeed.
 	if got, err := decrypter.Decrypt(ctx, ctxt); err != nil {
 		t.Fatal(err)
-	} else if !bytes.Equal((*got)[:], ptxt[:]) {
-		t.Fatalf("Got plaintext %v, want %v", *got, ptxt)
+	} else if !bytes.Equal(got, ptxt) {
+		t.Fatalf("Got plaintext %v, want %v", got, ptxt)
 	}
 
 	// Decryption should have succeeded had the decrypter only contained
@@ -187,8 +183,8 @@
 	}
 	if got, err := decrypter.Decrypt(ctx, ctxt); err != nil {
 		t.Fatal(err)
-	} else if !bytes.Equal((*got)[:], ptxt[:]) {
-		t.Fatalf("Got plaintext %v, want %v", *got, ptxt)
+	} else if !bytes.Equal(got, ptxt) {
+		t.Fatalf("Got plaintext %v, want %v", got, ptxt)
 	}
 
 	// Decryption should fail for ciphertexts encrypted for the following
@@ -196,7 +192,7 @@
 	// "google/alice/tablet/app" from the root google2).
 	patterns := []security.BlessingPattern{"google/alice/$", "google/bob", "google/alice/tablet/$", "google/bob/tablet"}
 	for _, p := range patterns {
-		ctxt, err := encrypter.Encrypt(ctx, p, &ptxt)
+		ctxt, err := encrypter.Encrypt(ctx, p, ptxt)
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -210,14 +206,14 @@
 	// patterns should succeed.
 	patterns = []security.BlessingPattern{"google", "google/$", "google/alice", "google/bob", "google/bob/phone"}
 	for _, p := range patterns {
-		if _, err := decrypter.Encrypt(ctx, p, &ptxt); err != nil {
+		if _, err := decrypter.Encrypt(ctx, p, ptxt); err != nil {
 			t.Fatal(err)
 		}
 	}
 	// But encrypting for the following patterns should fail.
 	patterns = []security.BlessingPattern{"youtube", "youtube/$", "youtube/alice"}
 	for _, p := range patterns {
-		if _, err := decrypter.Encrypt(ctx, p, &ptxt); verror.ErrorID(err) != ErrNoParams.ID {
+		if _, err := decrypter.Encrypt(ctx, p, ptxt); verror.ErrorID(err) != ErrNoParams.ID {
 			t.Fatalf("Got error %v, wanted error with ID %v", err, ErrNoParams.ID)
 		}
 	}
@@ -233,7 +229,7 @@
 		if err := enc.AddParams(ctx, params); err != nil {
 			t.Fatal(err)
 		}
-		ctxt, err := enc.Encrypt(ctx, pattern, &ptxt)
+		ctxt, err := enc.Encrypt(ctx, pattern, ptxt)
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -246,8 +242,8 @@
 		}
 		if got, err := dec.Decrypt(ctx, ctxt); err != nil {
 			return err
-		} else if !bytes.Equal((*got)[:], ptxt[:]) {
-			return fmt.Errorf("got plaintext %v, want %v", *got, ptxt)
+		} else if !bytes.Equal(got, ptxt) {
+			return fmt.Errorf("got plaintext %v, want %v", got, ptxt)
 		}
 		return nil
 	}
diff --git a/runtime/factories/android/android.go b/runtime/factories/android/android.go
new file mode 100644
index 0000000..c254cc2
--- /dev/null
+++ b/runtime/factories/android/android.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin
+
+// Package android implements a RuntimeFactory suitable for android.  It is
+// based on the roaming package.
+//
+// The pubsub.Publisher mechanism is used for communicating networking
+// settings to the rpc.Server implementation of the runtime and publishes
+// the Settings it expects.
+package android
+
+import (
+	"flag"
+
+	"v.io/x/lib/netconfig"
+	"v.io/x/lib/netstate"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/flow"
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/internal/logger"
+	dfactory "v.io/x/ref/lib/discovery/factory"
+	"v.io/x/ref/lib/flags"
+	"v.io/x/ref/lib/pubsub"
+	"v.io/x/ref/lib/security/securityflag"
+	"v.io/x/ref/runtime/internal"
+	_ "v.io/x/ref/runtime/internal/flow/protocols/tcp"
+	_ "v.io/x/ref/runtime/internal/flow/protocols/ws"
+	_ "v.io/x/ref/runtime/internal/flow/protocols/wsh"
+	"v.io/x/ref/runtime/internal/lib/appcycle"
+	"v.io/x/ref/runtime/internal/lib/websocket"
+	"v.io/x/ref/runtime/internal/lib/xwebsocket"
+	irpc "v.io/x/ref/runtime/internal/rpc"
+	"v.io/x/ref/runtime/internal/rt"
+	"v.io/x/ref/services/debug/debuglib"
+
+	// TODO(suharshs): Remove these once we switch to the flow protocols.
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/wsh"
+)
+
+const (
+	SettingsStreamName = "roaming"
+	SettingsStreamDesc = "pubsub stream used by the roaming RuntimeFactory"
+)
+
+var commonFlags *flags.Flags
+
+func init() {
+	v23.RegisterRuntimeFactory(Init)
+	rpc.RegisterUnknownProtocol("wsh", websocket.HybridDial, websocket.HybridResolve, websocket.HybridListener)
+	flow.RegisterUnknownProtocol("wsh", xwebsocket.WSH{})
+	commonFlags = flags.CreateAndRegister(flag.CommandLine, flags.Runtime, flags.Listen)
+}
+
+func Init(ctx *context.T) (v23.Runtime, *context.T, v23.Shutdown, error) {
+	if err := internal.ParseFlagsAndConfigureGlobalLogger(commonFlags); err != nil {
+		return nil, nil, nil, err
+	}
+
+	ac := appcycle.New()
+	discovery, err := dfactory.New()
+	if err != nil {
+		ac.Shutdown()
+		return nil, nil, nil, err
+	}
+
+	lf := commonFlags.ListenFlags()
+	listenSpec := rpc.ListenSpec{
+		Addrs:          rpc.ListenAddrs(lf.Addrs),
+		Proxy:          lf.Proxy,
+		AddressChooser: internal.NewAddressChooser(logger.Global()),
+	}
+	reservedDispatcher := debuglib.NewDispatcher(securityflag.NewAuthorizerOrDie())
+
+	ishutdown := func() {
+		ac.Shutdown()
+		discovery.Close()
+	}
+
+	publisher := pubsub.NewPublisher()
+
+	// Create stream in Init function to avoid a race between any
+	// goroutines started here and consumers started after Init returns.
+	ch := make(chan pubsub.Setting)
+	// TODO(cnicolaou): use stop to shutdown this stream when the RuntimeFactory shutdowns.
+	stop, err := publisher.CreateStream(SettingsStreamName, SettingsStreamDesc, ch)
+	if err != nil {
+		ishutdown()
+		return nil, nil, nil, err
+	}
+
+	prev, err := netstate.GetAccessibleIPs()
+	if err != nil {
+		ishutdown()
+		return nil, nil, nil, err
+	}
+
+	// Start the dhcp watcher.
+	watcher, err := netconfig.NewNetConfigWatcher()
+	if err != nil {
+		ishutdown()
+		return nil, nil, nil, err
+	}
+
+	cleanupCh := make(chan struct{})
+	watcherCh := make(chan struct{})
+
+	runtime, ctx, shutdown, err := rt.Init(ctx, ac, discovery, nil, &listenSpec, publisher, SettingsStreamName, commonFlags.RuntimeFlags(), reservedDispatcher)
+	if err != nil {
+		ishutdown()
+		return nil, nil, nil, err
+	}
+
+	go monitorNetworkSettingsX(runtime, ctx, watcher, prev, stop, cleanupCh, watcherCh, ch)
+	runtimeFactoryShutdown := func() {
+		close(cleanupCh)
+		ishutdown()
+		shutdown()
+		<-watcherCh
+	}
+	return runtime, ctx, runtimeFactoryShutdown, nil
+}
+
+// monitorNetworkSettings will monitor network configuration changes and
+// publish subsequent Settings to reflect any changes detected.
+func monitorNetworkSettingsX(
+	runtime *rt.Runtime,
+	ctx *context.T,
+	watcher netconfig.NetConfigWatcher,
+	prev netstate.AddrList,
+	pubStop, cleanup <-chan struct{},
+	watcherLoop chan<- struct{},
+	ch chan<- pubsub.Setting) {
+	defer close(ch)
+
+	listenSpec := runtime.GetListenSpec(ctx)
+
+	// TODO(cnicolaou): add support for listening on multiple network addresses.
+
+done:
+	for {
+		select {
+		case <-watcher.Channel():
+			netstate.InvalidateCache()
+			cur, err := netstate.GetAccessibleIPs()
+			if err != nil {
+				ctx.Errorf("failed to read network state: %s", err)
+				continue
+			}
+			removed := netstate.FindRemoved(prev, cur)
+			added := netstate.FindAdded(prev, cur)
+			ctx.VI(2).Infof("Previous: %d: %s", len(prev), prev)
+			ctx.VI(2).Infof("Current : %d: %s", len(cur), cur)
+			ctx.VI(2).Infof("Added   : %d: %s", len(added), added)
+			ctx.VI(2).Infof("Removed : %d: %s", len(removed), removed)
+			if len(removed) == 0 && len(added) == 0 {
+				ctx.VI(2).Infof("Network event that lead to no address changes since our last 'baseline'")
+				continue
+			}
+			if len(removed) > 0 {
+				ctx.VI(2).Infof("Sending removed: %s", removed)
+				ch <- irpc.NewRmAddrsSetting(removed.AsNetAddrs())
+			}
+			// We will always send the best currently available address
+			if chosen, err := listenSpec.AddressChooser.ChooseAddresses(listenSpec.Addrs[0].Protocol, cur.AsNetAddrs()); err == nil && chosen != nil {
+				ctx.VI(2).Infof("Sending added and chosen: %s", chosen)
+				ch <- irpc.NewNewAddrsSetting(chosen)
+			} else {
+				ctx.VI(2).Infof("Ignoring added %s", added)
+			}
+			prev = cur
+		case <-cleanup:
+			break done
+		case <-pubStop:
+			goto done
+		}
+	}
+	watcher.Stop()
+	close(watcherLoop)
+}
diff --git a/runtime/factories/android/proxy.go b/runtime/factories/android/proxy.go
new file mode 100644
index 0000000..3b4f880
--- /dev/null
+++ b/runtime/factories/android/proxy.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package android
+
+import (
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
+)
+
+// NewProxy creates a new Proxy that listens for network connections on the provided
+// (network, address) pair and routes VC traffic between accepted connections.
+//
+// auth encapsulates the authorization policy of the proxy - which
+// servers it is willing to proxy for.
+func NewProxy(ctx *context.T, spec rpc.ListenSpec, auth security.Authorizer, names ...string) (shutdown func(), endpoint naming.Endpoint, err error) {
+	return proxy.New(ctx, spec, auth, names...)
+}
diff --git a/runtime/internal/address_chooser.go b/runtime/internal/address_chooser.go
index 500465c..a317972 100644
--- a/runtime/internal/address_chooser.go
+++ b/runtime/internal/address_chooser.go
@@ -21,7 +21,9 @@
 
 func (c *addressChooser) setGCEPublicAddress() {
 	c.gcePublicAddressOnce.Do(func() {
-		c.gcePublicAddress = GCEPublicAddress(c.logger)
+		if ipaddr := GCEPublicAddress(c.logger); ipaddr != nil {
+			c.gcePublicAddress = ipaddr
+		}
 	})
 }
 
diff --git a/runtime/internal/flow/conn/auth.go b/runtime/internal/flow/conn/auth.go
index 16ac858..030ce8e 100644
--- a/runtime/internal/flow/conn/auth.go
+++ b/runtime/internal/flow/conn/auth.go
@@ -85,7 +85,10 @@
 	// Resolve of the discharge server name.  The two resolve calls may be to
 	// the same mounttable.
 	c.loopWG.Add(1)
-	go c.refreshDischarges(ctx)
+	go func() {
+		c.refreshDischarges(ctx)
+		c.loopWG.Done()
+	}()
 	return nil
 }
 
@@ -105,7 +108,6 @@
 	lAuth := &message.Auth{
 		ChannelBinding: signedBinding,
 	}
-	c.loopWG.Add(1)
 	if lAuth.BlessingsKey, lAuth.DischargeKey, err = c.refreshDischarges(ctx); err != nil {
 		return err
 	}
@@ -211,7 +213,6 @@
 }
 
 func (c *Conn) refreshDischarges(ctx *context.T) (bkey, dkey uint64, err error) {
-	defer c.loopWG.Done()
 	dis := slib.PrepareDischarges(ctx, c.lBlessings,
 		security.DischargeImpetus{}, time.Minute)
 	// Schedule the next update.
@@ -221,6 +222,7 @@
 		c.loopWG.Add(1)
 		c.dischargeTimer = time.AfterFunc(dur, func() {
 			c.refreshDischarges(ctx)
+			c.loopWG.Done()
 		})
 	}
 	c.mu.Unlock()
diff --git a/runtime/internal/flow/conn/conn.go b/runtime/internal/flow/conn/conn.go
index 80d0034..4806bef 100644
--- a/runtime/internal/flow/conn/conn.go
+++ b/runtime/internal/flow/conn/conn.go
@@ -340,7 +340,7 @@
 	}
 	c.status = Closing
 
-	go func() {
+	go func(c *Conn) {
 		if verror.ErrorID(err) != ErrConnClosedRemotely.ID {
 			msg := ""
 			if err != nil {
@@ -373,7 +373,7 @@
 		c.status = Closed
 		close(c.closed)
 		c.mu.Unlock()
-	}()
+	}(c)
 }
 
 func (c *Conn) release(ctx *context.T, fid, count uint64) {
diff --git a/runtime/internal/flow/conn/flow.go b/runtime/internal/flow/conn/flow.go
index e4127a3..1cad267 100644
--- a/runtime/internal/flow/conn/flow.go
+++ b/runtime/internal/flow/conn/flow.go
@@ -6,6 +6,7 @@
 
 import (
 	"io"
+	"sync"
 	"time"
 
 	"v.io/v23/context"
@@ -47,6 +48,9 @@
 	// borrowed indicates the number of tokens we have borrowed from the shared pool for
 	// sending on newly dialed flows.
 	borrowed uint64
+	// borrowCond is a condition variable that we can use to wait for shared
+	// counters to be released.
+	borrowCond *sync.Cond
 	// borrowing indicates whether this flow is using borrowed counters for a newly
 	// dialed flow.  This will be set to false after we first receive a
 	// release from the remote end.  This is always false for accepted flows.
@@ -60,14 +64,15 @@
 
 func (c *Conn) newFlowLocked(ctx *context.T, id uint64, bkey, dkey uint64, remote naming.Endpoint, dialed, preopen bool) *flw {
 	f := &flw{
-		id:        id,
-		dialed:    dialed,
-		conn:      c,
-		q:         newReadQ(c, id),
-		bkey:      bkey,
-		dkey:      dkey,
-		opened:    preopen,
-		borrowing: dialed,
+		id:         id,
+		dialed:     dialed,
+		conn:       c,
+		q:          newReadQ(c, id),
+		bkey:       bkey,
+		dkey:       dkey,
+		opened:     preopen,
+		borrowing:  dialed,
+		borrowCond: sync.NewCond(&c.mu),
 		// It's important that this channel has a non-zero buffer.  Sometimes this
 		// flow will be notifying itself, so if there's no buffer a deadlock will
 		// occur.
@@ -144,12 +149,16 @@
 		return int(max), func(used int) {
 			f.conn.lshared -= uint64(used)
 			f.borrowed += uint64(used)
+			f.ctx.VI(2).Infof("deducting %d borrowed tokens on flow %d(%p), total: %d", used, f.id, f, f.borrowed)
 		}
 	}
 	if f.released < max {
 		max = f.released
 	}
-	return int(max), func(used int) { f.released -= uint64(used) }
+	return int(max), func(used int) {
+		f.released -= uint64(used)
+		f.ctx.VI(2).Infof("flow %d(%p) deducting %d tokens, %d left", f.id, f, used, f.released)
+	}
 }
 
 // releaseLocked releases some counters from a remote reader to the local
@@ -161,12 +170,16 @@
 		if f.borrowed < tokens {
 			n = f.borrowed
 		}
+		f.ctx.VI(2).Infof("Returning %d tokens borrowed by %d(%p)", f.borrowed, f.id, f)
 		tokens -= n
 		f.borrowed -= n
 		f.conn.lshared += n
+		f.borrowCond.Broadcast()
 	}
 	f.released += tokens
+	f.ctx.VI(2).Infof("Tokens release to %d(%p): %d => %d", f.id, f, tokens, f.released)
 	if f.writing {
+		f.ctx.VI(2).Infof("Activating writing flow %d(%p) now that we have tokens.", f.id, f)
 		f.conn.activateWriterLocked(f)
 		f.conn.notifyNextWriterLocked(nil)
 	}
@@ -176,6 +189,7 @@
 	if err = f.checkBlessings(); err != nil {
 		return 0, err
 	}
+	f.ctx.VI(2).Infof("starting write on flow %d(%p)", f.id, f)
 	select {
 	// Catch cancellations early.  If we caught a cancel when waiting
 	// our turn below its possible that we were notified simultaneously.
@@ -212,6 +226,7 @@
 		if tokens == 0 {
 			// Oops, we really don't have data to send, probably because we've exhausted
 			// the remote buffer.  deactivate ourselves but keep trying.
+			f.ctx.VI(2).Infof("Deactivating write on flow %d(%p) due to lack of tokens", f.id, f)
 			f.conn.deactivateWriterLocked(f)
 			continue
 		}
@@ -250,6 +265,7 @@
 		f.opened = true
 	}
 	f.writing = false
+	f.ctx.VI(2).Infof("finishing write on %d(%p): %v", f.id, f, err)
 	f.conn.deactivateWriterLocked(f)
 	f.conn.notifyNextWriterLocked(f)
 	f.conn.mu.Unlock()
@@ -394,8 +410,19 @@
 }
 
 func (f *flw) close(ctx *context.T, err error) {
+	closedRemotely := verror.ErrorID(err) == ErrFlowClosedRemotely.ID
+	f.conn.mu.Lock()
+	if closedRemotely {
+		// When the other side closes a flow, it implicitly releases all the
+		// counters used by that flow.  That means we should release the shared
+		// counter to be used on other new flows.
+		f.conn.lshared += f.borrowed
+		f.borrowed = 0
+	}
+	f.borrowCond.Broadcast()
+	f.conn.mu.Unlock()
 	if f.q.close(ctx) {
-		eid := verror.ErrorID(err)
+		f.ctx.VI(2).Infof("closing %d(%p): %v", f.id, f, err)
 		f.cancel()
 		// After cancel has been called no new writes will begin for this
 		// flow.  There may be a write in progress, but it must finish
@@ -403,13 +430,12 @@
 		// can simply use sendMessageLocked to send the close flow
 		// message.
 		f.conn.mu.Lock()
-		delete(f.conn.flows, f.id)
 		connClosing := f.conn.status == Closing
 		var serr error
 		if !f.opened {
 			// Closing a flow that was never opened.
 			f.conn.unopenedFlows.Done()
-		} else if eid != ErrFlowClosedRemotely.ID && !connClosing {
+		} else if !closedRemotely && !connClosing {
 			// Note: If the conn is closing there is no point in trying to
 			// send the flow close message as it will fail.  This is racy
 			// with the connection closing, but there are no ill-effects
@@ -419,6 +445,18 @@
 				Flags: message.CloseFlag,
 			})
 		}
+		if f.borrowed > 0 && f.conn.status < Closing {
+			f.conn.loopWG.Add(1)
+			go func() {
+				defer f.conn.loopWG.Done()
+				f.conn.mu.Lock()
+				for f.borrowed > 0 && f.conn.status < Closing {
+					f.borrowCond.Wait()
+				}
+				delete(f.conn.flows, f.id)
+				f.conn.mu.Unlock()
+			}()
+		}
 		f.conn.mu.Unlock()
 		if serr != nil {
 			ctx.Errorf("Could not send close flow message: %v", err)
diff --git a/runtime/internal/flow/manager/conncache_test.go b/runtime/internal/flow/manager/conncache_test.go
index ff1f83c..186a40e 100644
--- a/runtime/internal/flow/manager/conncache_test.go
+++ b/runtime/internal/flow/manager/conncache_test.go
@@ -18,9 +18,11 @@
 	"v.io/x/ref/runtime/internal/flow/flowtest"
 	_ "v.io/x/ref/runtime/internal/flow/protocols/local"
 	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/test/goroutines"
 )
 
 func TestCache(t *testing.T) {
+	defer goroutines.NoLeaks(t, leakWaitTime)()
 	ctx, shutdown := v23.Init()
 	defer shutdown()
 
@@ -31,7 +33,9 @@
 		RID:       naming.FixedRoutingID(0x5555),
 		Blessings: []string{"A", "B", "C"},
 	}
-	conn := makeConnAndFlow(t, ctx, remote).c
+	caf := makeConnAndFlow(t, ctx, remote)
+	defer caf.stop(ctx)
+	conn := caf.c
 	if err := c.Insert(conn, remote.Protocol, remote.Address); err != nil {
 		t.Fatal(err)
 	}
@@ -72,7 +76,9 @@
 		RID:       naming.FixedRoutingID(0x1111),
 		Blessings: []string{"ridonly"},
 	}
-	ridConn := makeConnAndFlow(t, ctx, ridEP).c
+	caf = makeConnAndFlow(t, ctx, ridEP)
+	defer caf.stop(ctx)
+	ridConn := caf.c
 	if err := c.InsertWithRoutingID(ridConn); err != nil {
 		t.Fatal(err)
 	}
@@ -90,7 +96,9 @@
 		RID:       naming.FixedRoutingID(0x2222),
 		Blessings: []string{"other"},
 	}
-	otherConn := makeConnAndFlow(t, ctx, otherEP).c
+	caf = makeConnAndFlow(t, ctx, otherEP)
+	defer caf.stop(ctx)
+	otherConn := caf.c
 
 	// Looking up a not yet inserted endpoint should fail.
 	if got, err := c.ReservedFind(otherEP.Protocol, otherEP.Address, otherEP.Blessings); err != nil || got != nil {
@@ -117,7 +125,9 @@
 	}
 
 	// Insert a duplicate conn to ensure that replaced conns still get closed.
-	dupConn := makeConnAndFlow(t, ctx, remote).c
+	caf = makeConnAndFlow(t, ctx, remote)
+	defer caf.stop(ctx)
+	dupConn := caf.c
 	if err := c.Insert(dupConn, remote.Protocol, remote.Address); err != nil {
 		t.Fatal(err)
 	}
@@ -136,17 +146,20 @@
 	c.Close(ctx)
 	// Now the connections should be closed.
 	<-conn.Closed()
+	<-ridConn.Closed()
 	<-dupConn.Closed()
 	<-otherConn.Closed()
 }
 
 func TestLRU(t *testing.T) {
+	defer goroutines.NoLeaks(t, leakWaitTime)()
 	ctx, shutdown := v23.Init()
 	defer shutdown()
 
 	// Ensure that the least recently created conns are killed by KillConnections.
 	c := NewConnCache()
-	conns := nConnAndFlows(t, ctx, 10)
+	conns, stop := nConnAndFlows(t, ctx, 10)
+	defer stop()
 	for _, conn := range conns {
 		addr := conn.c.RemoteEndpoint().Addr()
 		if err := c.Insert(conn.c, addr.Network(), addr.String()); err != nil {
@@ -178,7 +191,8 @@
 
 	// Ensure that writing to conns marks conns as more recently used.
 	c = NewConnCache()
-	conns = nConnAndFlows(t, ctx, 10)
+	conns, stop = nConnAndFlows(t, ctx, 10)
+	defer stop()
 	for _, conn := range conns {
 		addr := conn.c.RemoteEndpoint().Addr()
 		if err := c.Insert(conn.c, addr.Network(), addr.String()); err != nil {
@@ -213,7 +227,8 @@
 
 	// Ensure that reading from conns marks conns as more recently used.
 	c = NewConnCache()
-	conns = nConnAndFlows(t, ctx, 10)
+	conns, stop = nConnAndFlows(t, ctx, 10)
+	defer stop()
 	for _, conn := range conns {
 		addr := conn.c.RemoteEndpoint().Addr()
 		if err := c.Insert(conn.c, addr.Network(), addr.String()); err != nil {
@@ -267,6 +282,7 @@
 
 type connAndFlow struct {
 	c *connpackage.Conn
+	a *connpackage.Conn
 	f flow.Flow
 }
 
@@ -284,7 +300,12 @@
 	}
 }
 
-func nConnAndFlows(t *testing.T, ctx *context.T, n int) []connAndFlow {
+func (c connAndFlow) stop(ctx *context.T) {
+	c.c.Close(ctx, nil)
+	c.a.Close(ctx, nil)
+}
+
+func nConnAndFlows(t *testing.T, ctx *context.T, n int) ([]connAndFlow, func()) {
 	cfs := make([]connAndFlow, n)
 	for i := 0; i < n; i++ {
 		cfs[i] = makeConnAndFlow(t, ctx, &inaming.Endpoint{
@@ -292,7 +313,11 @@
 			RID:      naming.FixedRoutingID(uint64(i + 1)), // We need to have a nonzero rid for bidi.
 		})
 	}
-	return cfs
+	return cfs, func() {
+		for _, conn := range cfs {
+			conn.stop(ctx)
+		}
+	}
 }
 
 func makeConnAndFlow(t *testing.T, ctx *context.T, ep naming.Endpoint) connAndFlow {
@@ -318,7 +343,7 @@
 		ach <- a
 	}()
 	conn := <-dch
-	<-ach
+	aconn := <-ach
 	f, err := conn.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil)
 	if err != nil {
 		t.Fatal(err)
@@ -328,7 +353,7 @@
 		t.Fatal(err)
 	}
 	<-fh.ch
-	return connAndFlow{conn, f}
+	return connAndFlow{conn, aconn, f}
 }
 
 type fh struct {
diff --git a/runtime/internal/rpc/stream/proxy/proxy.go b/runtime/internal/rpc/stream/proxy/proxy.go
index a010f18..5e3a32a 100644
--- a/runtime/internal/rpc/stream/proxy/proxy.go
+++ b/runtime/internal/rpc/stream/proxy/proxy.go
@@ -607,9 +607,10 @@
 				dst.Process.queue.Put(m)
 			}
 		case *message.HealthCheckResponse:
-			// Note that the proxy never sends health check requests, so responses
-			// should always be forwarded.
-			if dst := p.Route(m.VCI); dst != nil {
+			if svc := p.ServerVC(m.VCI); svc != nil {
+				// If the request is for the proxy, pass it to the VC.
+				svc.HandleHealthCheckResponse()
+			} else if dst := p.Route(m.VCI); dst != nil {
 				m.VCI = dst.VCI
 				dst.Process.queue.Put(m)
 			}
diff --git a/runtime/internal/rpc/stream/vc/vc_cache.go b/runtime/internal/rpc/stream/vc/vc_cache.go
index d962cfa..d78098c 100644
--- a/runtime/internal/rpc/stream/vc/vc_cache.go
+++ b/runtime/internal/rpc/stream/vc/vc_cache.go
@@ -5,6 +5,7 @@
 package vc
 
 import (
+	"strings"
 	"sync"
 
 	"v.io/v23/naming"
@@ -17,17 +18,19 @@
 // VCCache implements a set of VIFs keyed by the endpoint of the remote end and the
 // local principal. Multiple goroutines can invoke methods on the VCCache simultaneously.
 type VCCache struct {
-	mu      sync.Mutex
-	cache   map[vcKey]*VC  // GUARDED_BY(mu)
-	started map[vcKey]bool // GUARDED_BY(mu)
-	cond    *sync.Cond
+	mu       sync.Mutex
+	cache    map[vcKey]*VC  // GUARDED_BY(mu)
+	ridCache map[ridKey]*VC // GUARDED_BY(mu)
+	started  map[vcKey]bool // GUARDED_BY(mu)
+	cond     *sync.Cond
 }
 
 // NewVCCache returns a new cache for VCs.
 func NewVCCache() *VCCache {
 	c := &VCCache{
-		cache:   make(map[vcKey]*VC),
-		started: make(map[vcKey]bool),
+		cache:    make(map[vcKey]*VC),
+		ridCache: make(map[ridKey]*VC),
+		started:  make(map[vcKey]bool),
 	}
 	c.cond = sync.NewCond(&c.mu)
 	return c
@@ -53,6 +56,9 @@
 		return nil, verror.New(errVCCacheClosed, nil)
 	}
 	c.started[k] = true
+	if vc, ok := c.ridCache[c.ridkey(ep, p)]; ok {
+		return vc, nil
+	}
 	return c.cache[k], nil
 }
 
@@ -72,7 +78,11 @@
 	if c.cache == nil {
 		return verror.New(errVCCacheClosed, nil)
 	}
-	c.cache[c.key(vc.RemoteEndpoint(), vc.LocalPrincipal())] = vc
+	ep, principal := vc.RemoteEndpoint(), vc.LocalPrincipal()
+	c.cache[c.key(ep, principal)] = vc
+	if ep.RoutingID() != naming.NullRoutingID {
+		c.ridCache[c.ridkey(ep, principal)] = vc
+	}
 	return nil
 }
 
@@ -85,6 +95,7 @@
 	}
 	c.cache = nil
 	c.started = nil
+	c.ridCache = nil
 	c.mu.Unlock()
 	return vcs
 }
@@ -96,10 +107,18 @@
 	if c.cache == nil {
 		return verror.New(errVCCacheClosed, nil)
 	}
-	delete(c.cache, c.key(vc.RemoteEndpoint(), vc.LocalPrincipal()))
+	ep, principal := vc.RemoteEndpoint(), vc.LocalPrincipal()
+	delete(c.cache, c.key(ep, principal))
+	delete(c.ridCache, c.ridkey(ep, principal))
 	return nil
 }
 
+type ridKey struct {
+	rid            naming.RoutingID
+	localPublicKey string
+	blessingNames  string
+}
+
 type vcKey struct {
 	remoteEP       string
 	localPublicKey string // localPublicKey = "" means we are running unencrypted (i.e. SecurityNone)
@@ -112,3 +131,12 @@
 	}
 	return k
 }
+
+func (c *VCCache) ridkey(ep naming.Endpoint, p security.Principal) ridKey {
+	k := ridKey{rid: ep.RoutingID()}
+	if p != nil {
+		k.localPublicKey = p.PublicKey().String()
+		k.blessingNames = strings.Join(ep.BlessingNames(), ",")
+	}
+	return k
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_cache_test.go b/runtime/internal/rpc/stream/vc/vc_cache_test.go
index b096e21..1d85ff9 100644
--- a/runtime/internal/rpc/stream/vc/vc_cache_test.go
+++ b/runtime/internal/rpc/stream/vc/vc_cache_test.go
@@ -7,6 +7,7 @@
 import (
 	"testing"
 
+	"v.io/v23/naming"
 	inaming "v.io/x/ref/runtime/internal/naming"
 	"v.io/x/ref/test/testutil"
 )
@@ -101,6 +102,24 @@
 	if cachedVC := <-ch; cachedVC != otherVC {
 		t.Errorf("got %v, want %v", cachedVC, otherVC)
 	}
+
+	// If we add an endpoint with a non-zero routingId and search for another
+	// endpoint with the same routingID, we should get the first routingID.
+	ridep, err := inaming.NewEndpoint("oink:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	ridep.RID = naming.FixedRoutingID(0x1111)
+	vc = &VC{remoteEP: ridep, localPrincipal: p}
+	cache.Insert(vc)
+	otherEP, err = inaming.NewEndpoint("moo:7777")
+	if err != nil {
+		t.Fatal(err)
+	}
+	otherEP.RID = ridep.RID
+	if got, err := cache.ReservedFind(otherEP, p); err != nil || got != vc {
+		t.Errorf("got %v, want %v, err: %v", got, vc, err)
+	}
 }
 
 func vcsEqual(a, b []*VC) bool {
diff --git a/runtime/internal/rpc/stream/vif/set_test.go b/runtime/internal/rpc/stream/vif/set_test.go
index b79d8d5..f634c64 100644
--- a/runtime/internal/rpc/stream/vif/set_test.go
+++ b/runtime/internal/rpc/stream/vif/set_test.go
@@ -111,7 +111,7 @@
 }
 
 func TestSetBasic(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	sockdir, err := ioutil.TempDir("", "TestSetBasic")
 	if err != nil {
@@ -184,7 +184,7 @@
 }
 
 func TestSetWithPipes(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	c1, s1 := net.Pipe()
 	c2, s2 := net.Pipe()
@@ -233,7 +233,7 @@
 }
 
 func TestSetWithUnixSocket(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	dir, err := ioutil.TempDir("", "TestSetWithUnixSocket")
 	if err != nil {
@@ -296,7 +296,7 @@
 }
 
 func TestSetInsertDelete(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	c1, s1 := net.Pipe()
 	vf1, _, err := newVIF(ctx, c1, s1)
@@ -319,7 +319,7 @@
 }
 
 func TestBlockingFind(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	network, address := "tcp", "127.0.0.1:1234"
 	set := vif.NewSet()
diff --git a/runtime/internal/rpc/stream/vif/vif_test.go b/runtime/internal/rpc/stream/vif/vif_test.go
index a15b081..40114c0 100644
--- a/runtime/internal/rpc/stream/vif/vif_test.go
+++ b/runtime/internal/rpc/stream/vif/vif_test.go
@@ -37,7 +37,7 @@
 //go:generate jiri test generate
 
 func TestSingleFlowCreatedAtClient(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -65,7 +65,7 @@
 }
 
 func TestSingleFlowCreatedAtServer(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -96,7 +96,7 @@
 
 func testMultipleVCsAndMultipleFlows(t *testing.T, gomaxprocs int) {
 	testutil.InitRandGenerator(t.Logf)
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	// This test dials multiple VCs from the client to the server.
 	// On each VC, it creates multiple flows, writes to them and verifies
@@ -255,7 +255,7 @@
 }
 
 func TestClose(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -294,7 +294,7 @@
 }
 
 func TestOnClose(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -335,7 +335,7 @@
 	const (
 		waitTime = 5 * time.Millisecond
 	)
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -424,7 +424,7 @@
 		// connection of the other side to be closed especially in race testing.
 		waitTime = 150 * time.Millisecond
 	)
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -489,7 +489,7 @@
 		idleTime = 10 * time.Millisecond
 		waitTime = idleTime * 2
 	)
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -607,7 +607,7 @@
 func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
 
 func TestShutdownVCs(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -674,7 +674,7 @@
 }
 
 func (tc *versionTestCase) Run(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -734,7 +734,7 @@
 }
 
 func TestNetworkFailure(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
@@ -768,7 +768,7 @@
 }
 
 func TestPreAuthentication(t *testing.T) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	defer shutdown()
 	pclient := testutil.NewPrincipal("client")
 	pserver := testutil.NewPrincipal("server")
diff --git a/runtime/internal/rpc/testutil_test.go b/runtime/internal/rpc/testutil_test.go
index 105638d..1221ff5 100644
--- a/runtime/internal/rpc/testutil_test.go
+++ b/runtime/internal/rpc/testutil_test.go
@@ -79,7 +79,7 @@
 }
 
 func initForTest() (*context.T, v23.Shutdown) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	ctx, err := ivtrace.Init(ctx, flags.VtraceFlags{})
 	if err != nil {
 		panic(err)
diff --git a/services/device/deviced/internal/impl/app_service.go b/services/device/deviced/internal/impl/app_service.go
index ac637e4..e3f75ee 100644
--- a/services/device/deviced/internal/impl/app_service.go
+++ b/services/device/deviced/internal/impl/app_service.go
@@ -1050,24 +1050,6 @@
 		ctx.Error(err)
 		return
 	}
-	// TODO(caprita): Putting the StopServing call here means that the
-	// socket is still serving after the app instance has been transitioned
-	// in state 'not running'.  This creates the possibility of a Run()
-	// happening after the app state has changed to 'not running' in the
-	// reaper, but before restartAppIfNecessary has a chance to execute
-	// (resulting int he ServePrincipal call failing).  We should either
-	// move the StopServing call before we transition the instance to 'not
-	// running', or make the ServePrincipal robust w.r.t. already serving
-	// state.
-	if sa := i.securityAgent; sa != nil && sa.keyMgr != nil {
-		info, err := loadInstanceInfo(ctx, instanceDir)
-		if err != nil {
-			ctx.Errorf("Failed to load instance info: %v", err)
-		}
-		if err := sa.keyMgr.StopServing(info.handle()); err != nil {
-			ctx.Errorf("StopServing failed: %v", err)
-		}
-	}
 	shouldRestart := synchronizedShouldRestart(ctx, instanceDir)
 
 	if err := transitionInstance(instanceDir, device.InstanceStateLaunching, device.InstanceStateNotRunning); err != nil {
@@ -1182,22 +1164,54 @@
 	return nil
 }
 
-func (i *appService) stop(ctx *context.T, instanceDir string, reap *reaper, deadline time.Duration) error {
-	info, err := loadInstanceInfo(ctx, instanceDir)
-	if err != nil {
-		return err
-	}
-	err = stopAppRemotely(ctx, info.AppCycleMgrName, deadline)
-	reap.forciblySuspend(instanceDir)
-	if err == nil {
-		reap.stopWatching(instanceDir)
-		if sa := i.runner.securityAgent; sa != nil && sa.keyMgr != nil {
-			if err := sa.keyMgr.StopServing(info.handle()); err != nil {
-				ctx.Errorf("StopServing failed: %v", err)
+// stop attempts to stop the instance's process; returns true if successful, or
+// false if the process is still running.
+func (i *appService) stop(ctx *context.T, instanceDir string, info *instanceInfo, reap *reaper, deadline time.Duration) (bool, error) {
+	pid := info.Pid
+	// The reaper should stop tracking this instance, and, in particular,
+	// not attempt to restart it.
+	reap.stopWatching(instanceDir)
+	processExited, stopGoroutine := make(chan struct{}), make(chan struct{})
+	defer close(stopGoroutine)
+	go func() {
+		for {
+			if !isAlive(ctx, pid) {
+				close(processExited)
+				return
 			}
+			select {
+			case <-stopGoroutine:
+				return
+			default:
+			}
+			time.Sleep(time.Millisecond)
 		}
+	}()
+	deadlineExpired := time.After(deadline)
+	err := stopAppRemotely(ctx, info.AppCycleMgrName, deadline)
+	select {
+	case <-processExited:
+		if err != nil {
+			err = verror.New(errStoppedWithErrors, ctx, fmt.Sprintf("process exited uncleanly upon remote stop: %v"), err)
+		}
+		return true, err
+	case <-deadlineExpired:
 	}
-	return err
+	reap.forciblySuspend(instanceDir)
+	// Give it an extra 500 ms of grace period for the process to die after
+	// forceful shutdown.
+	deadlineExpired = time.After(500 * time.Millisecond)
+	select {
+	case <-processExited:
+		return true, verror.New(errStoppedWithErrors, ctx, fmt.Sprintf("process failed to exit cleanly upon remote stop (%v) and was forcefully terminated"), err)
+	case <-deadlineExpired:
+		// The process just won't die.  We'll declare the stop operation
+		// unsuccessful and switch the instance back to running
+		// state. We let the reaper deal with it going forward
+		// (including restarting it if restarts are enabled).
+		reap.startWatching(instanceDir, pid)
+		return false, verror.New(errStopFailed, ctx, "process failed to exit after force stop")
+	}
 }
 
 func (i *appService) Delete(ctx *context.T, _ rpc.ServerCall) error {
@@ -1216,10 +1230,28 @@
 	if err := transitionInstance(instanceDir, device.InstanceStateRunning, device.InstanceStateDying); err != nil {
 		return err
 	}
-	if err := i.stop(ctx, instanceDir, i.runner.reap, deadline); err != nil {
-		transitionInstance(instanceDir, device.InstanceStateDying, device.InstanceStateRunning)
+	info, err := loadInstanceInfo(ctx, instanceDir)
+	if err != nil {
 		return err
 	}
+	if exited, err := i.stop(ctx, instanceDir, info, i.runner.reap, deadline); !exited {
+		// If the process failed to terminate, it's going back in state
+		// running (as if the Kill never happened).  The client may try
+		// again.
+		if err := transitionInstance(instanceDir, device.InstanceStateDying, device.InstanceStateRunning); err != nil {
+			ctx.Errorf("transitionInstance(%v, %v, %v): %v", instanceDir, device.InstanceStateDying, device.InstanceStateRunning, err)
+		}
+		// Return the stop error.
+		return err
+	} else if err != nil {
+		ctx.Errorf("stop %v ultimately succeeded, but had encountered an error: %v", instanceDir, err)
+	}
+	// The app exited, so we can stop serving the principal.
+	if sa := i.runner.securityAgent; sa != nil && sa.keyMgr != nil {
+		if err := sa.keyMgr.StopServing(info.handle()); err != nil {
+			ctx.Errorf("StopServing failed: %v", err)
+		}
+	}
 	return transitionInstance(instanceDir, device.InstanceStateDying, device.InstanceStateNotRunning)
 }
 
diff --git a/services/device/deviced/internal/impl/dispatcher.go b/services/device/deviced/internal/impl/dispatcher.go
index 5d12e66..98af665 100644
--- a/services/device/deviced/internal/impl/dispatcher.go
+++ b/services/device/deviced/internal/impl/dispatcher.go
@@ -83,6 +83,8 @@
 	errCantCreateAccountStore = verror.Register(pkgPath+".errCantCreateAccountStore", verror.NoRetry, "{1:}{2:} cannot create persistent store for identity to system account associations{:_}")
 	errCantCreateAppWatcher   = verror.Register(pkgPath+".errCantCreateAppWatcher", verror.NoRetry, "{1:}{2:} cannot create app status watcher{:_}")
 	errNewAgentFailed         = verror.Register(pkgPath+".errNewAgentFailed", verror.NoRetry, "{1:}{2:} NewAgent() failed{:_}")
+	errStoppedWithErrors      = verror.Register(pkgPath+".errStoppedWithErrors", verror.NoRetry, "{1:}{2:} instance killed uncleanly{:_}")
+	errStopFailed             = verror.Register(pkgPath+".errStopFailed", verror.NoRetry, "{1:}{2:} instance couldn't be killed{:_}")
 )
 
 // NewDispatcher is the device manager dispatcher factory.  It returns a new
diff --git a/services/device/deviced/internal/impl/instance_reaping.go b/services/device/deviced/internal/impl/instance_reaping.go
index ac75f61..33ba9eb 100644
--- a/services/device/deviced/internal/impl/instance_reaping.go
+++ b/services/device/deviced/internal/impl/instance_reaping.go
@@ -72,7 +72,17 @@
 	return r, nil
 }
 
-func markNotRunning(ctx *context.T, idir string) {
+func markNotRunning(ctx *context.T, runner *appRunner, idir string) {
+	if sa := runner.securityAgent; sa != nil && sa.keyMgr != nil {
+		info, err := loadInstanceInfo(ctx, idir)
+		if err != nil {
+			ctx.Errorf("Failed to load instance info: %v", err)
+		}
+		if err := sa.keyMgr.StopServing(info.handle()); err != nil {
+			ctx.Errorf("StopServing failed: %v", err)
+		}
+	}
+
 	if err := transitionInstance(idir, device.InstanceStateRunning, device.InstanceStateNotRunning); err != nil {
 		// This may fail under two circumstances.
 		// 1. The app has crashed between where startCmd invokes
@@ -84,6 +94,22 @@
 	}
 }
 
+func isAlive(ctx *context.T, pid int) bool {
+	switch err := syscall.Kill(pid, 0); err {
+	case syscall.ESRCH:
+		// No such PID.
+		return false
+	case nil, syscall.EPERM:
+		return true
+	default:
+		// The kill system call manpage says that this can only happen
+		// if the kernel claims that 0 is an invalid signal.  Only a
+		// deeply confused kernel would say this so just give up.
+		ctx.Panicf("processStatusPolling: unanticipated result from sys.Kill: %v", err)
+		return true
+	}
+}
+
 // processStatusPolling polls for the continued existence of a set of
 // tracked pids. TODO(rjkroege): There are nicer ways to provide this
 // functionality. For example, use the kevent facility in darwin or
@@ -92,14 +118,12 @@
 func (r *reaper) processStatusPolling(ctx *context.T, trackedPids map[string]int, appRunner *appRunner) {
 	poll := func(ctx *context.T) {
 		for idir, pid := range trackedPids {
-			switch err := syscall.Kill(pid, 0); err {
-			case syscall.ESRCH:
-				// No such PID.
+			if !isAlive(ctx, pid) {
 				ctx.VI(2).Infof("processStatusPolling discovered pid %d ended", pid)
-				markNotRunning(ctx, idir)
+				markNotRunning(ctx, appRunner, idir)
 				go appRunner.restartAppIfNecessary(ctx, idir)
 				delete(trackedPids, idir)
-			case nil, syscall.EPERM:
+			} else {
 				ctx.VI(2).Infof("processStatusPolling saw live pid: %d", pid)
 				// The task exists and is running under the same uid as
 				// the device manager or the task exists and is running
@@ -117,12 +141,6 @@
 				// the appcycle manager, the app was probably started under
 				// a different agent and cannot be managed. Perhaps we should
 				// then kill the app and restart it?
-			default:
-				// The kill system call manpage says that this can only happen
-				// if the kernel claims that 0 is an invalid signal.
-				// Only a deeply confused kernel would say this so just give
-				// up.
-				ctx.Panicf("processStatusPolling: unanticpated result from sys.Kill: %v", err)
 			}
 		}
 	}
@@ -137,10 +155,17 @@
 				delete(trackedPids, p.instanceDir)
 				poll(ctx)
 			case p.pid == -2: // kill the process
-				if pid, ok := trackedPids[p.instanceDir]; ok {
-					if err := suidHelper.terminatePid(ctx, pid, nil, nil); err != nil {
-						ctx.Errorf("Failure to kill: %v", err)
-					}
+				info, err := loadInstanceInfo(ctx, p.instanceDir)
+				if err != nil {
+					ctx.Errorf("loadInstanceInfo(%v) failed: %v", p.instanceDir, err)
+					continue
+				}
+				if info.Pid <= 0 {
+					ctx.Errorf("invalid pid in %v: %v", p.instanceDir, info.Pid)
+					continue
+				}
+				if err := suidHelper.terminatePid(ctx, info.Pid, nil, nil); err != nil {
+					ctx.Errorf("Failure to kill pid %d: %v", info.Pid, err)
 				}
 			case p.pid < 0:
 				ctx.Panicf("invalid pid %v", p.pid)
diff --git a/services/device/dmrun/backend/backend_ssh.go b/services/device/dmrun/backend/backend_ssh.go
index 0ddb7f0..3b4e8fe 100644
--- a/services/device/dmrun/backend/backend_ssh.go
+++ b/services/device/dmrun/backend/backend_ssh.go
@@ -59,6 +59,9 @@
 	}
 
 	const workingDir = "/tmp/dmrun"
+	if _, err := g.RunCommand("test", "!", "-e", workingDir); err != nil {
+		return nil, fmt.Errorf("working dir %v already exists on target. Please clean up any previous dmrun instance.", workingDir)
+	}
 	output, err := g.RunCommand("mkdir", workingDir)
 	if err != nil {
 		return nil, fmt.Errorf("failed to make working dir: %v (output: %s)", err, output)
diff --git a/services/device/dmrun/backend/backend_vcloud.go b/services/device/dmrun/backend/backend_vcloud.go
index e1b33f5..36ccec9 100644
--- a/services/device/dmrun/backend/backend_vcloud.go
+++ b/services/device/dmrun/backend/backend_vcloud.go
@@ -66,8 +66,7 @@
 	if g.isDeleted {
 		return fmt.Errorf("trying to delete a deleted VcloudVM")
 	}
-
-	cmd := exec.Command(g.vcloud, "node", "delete", g.projectArg, g.zoneArg, g.name)
+	cmd := g.generateDeleteCmd(false)
 	output, err := cmd.CombinedOutput()
 	if err != nil {
 		err = fmt.Errorf("failed deleting GCE instance (%s): %v\nOutput:%v\n", strings.Join(cmd.Args, " "), err, string(output))
@@ -79,6 +78,10 @@
 	return err
 }
 
+func (g *VcloudVM) generateDeleteCmd(forUser bool) *exec.Cmd {
+	return exec.Command(g.vcloudCmd(forUser), "node", "delete", g.projectArg, g.zoneArg, g.name)
+}
+
 func (g *VcloudVM) Name() string {
 	return g.name
 }
@@ -92,7 +95,7 @@
 		return nil, fmt.Errorf("RunCommand called on deleted VcloudVM")
 	}
 
-	cmd := g.generateExecCmdForRun(args...)
+	cmd := g.generateExecCmdForRun(false, args...)
 	output, err := cmd.CombinedOutput()
 	if err != nil {
 		err = fmt.Errorf("failed running [%s] on VM %s", strings.Join(args, " "), g.name)
@@ -104,17 +107,29 @@
 	if g.isDeleted {
 		return ""
 	}
-	cmd := g.generateExecCmdForRun(args...)
+	return cmdLine(g.generateExecCmdForRun(true, args...))
+}
 
+func cmdLine(cmd *exec.Cmd) string {
 	result := cmd.Path
-	for i := 1; i < len(cmd.Args); i++ {
-		result = fmt.Sprintf("%s %q", result, cmd.Args[i])
+	for _, arg := range cmd.Args[1:] {
+		result = fmt.Sprintf("%s %q", result, arg)
 	}
 	return result
 }
 
-func (g *VcloudVM) generateExecCmdForRun(args ...string) *exec.Cmd {
-	return exec.Command(g.vcloud, append([]string{"sh", g.projectArg, g.name, "cd", g.workingDir, "&&"}, args...)...)
+func (g *VcloudVM) vcloudCmd(forUser bool) string {
+	if forUser {
+		// We can't return the vcloud binary that we ran for the steps
+		// above, as that one is deleted after use. For now, we assume
+		// the user will have a vcloud binary on his path to use.
+		return "vcloud"
+	}
+	return g.vcloud
+}
+
+func (g *VcloudVM) generateExecCmdForRun(forUser bool, args ...string) *exec.Cmd {
+	return exec.Command(g.vcloudCmd(forUser), append([]string{"sh", g.projectArg, g.name, "cd", g.workingDir, "&&"}, args...)...)
 }
 
 func (g *VcloudVM) CopyFile(infile, destination string) error {
@@ -122,7 +137,7 @@
 		return fmt.Errorf("CopyFile called on deleted VcloudVM")
 	}
 
-	cmd := exec.Command("gcloud", "compute", g.projectArg, "copy-files", infile, fmt.Sprintf("%s@%s:/%s", g.sshUser, g.Name(), path.Join(g.workingDir, destination)), g.zoneArg)
+	cmd := exec.Command("gcloud", "compute", g.projectArg, "copy-files", infile, fmt.Sprintf("%s@%s:%s", g.sshUser, g.Name(), path.Join(g.workingDir, destination)), g.zoneArg)
 	output, err := cmd.CombinedOutput()
 	if err != nil {
 		err = fmt.Errorf("failed copying %s to %s:%s - %v\nOutput:\n%v", infile, g.name, destination, err, string(output))
@@ -134,8 +149,5 @@
 	if g.isDeleted {
 		return ""
 	}
-
-	// We can't return the vcloud binary that we ran for the steps above, as that one is deleted
-	// after use. For now, we assume the user will have a vcloud binary on his path to use.
-	return strings.Join([]string{"vcloud", "node", "delete", g.projectArg, g.zoneArg, g.name}, " ")
+	return cmdLine(g.generateDeleteCmd(true))
 }
diff --git a/services/device/dmrun/dmrun.go b/services/device/dmrun/dmrun.go
index e378295..5d011f2 100644
--- a/services/device/dmrun/dmrun.go
+++ b/services/device/dmrun/dmrun.go
@@ -122,24 +122,21 @@
 	fmt.Printf("Working dir: %s\n", workDir)
 }
 
-// buildV23Binary builds the specified binary and returns the path to the
-// executable.
-func buildV23Binary(pkg string) string {
-	fmt.Println("Building", pkg)
-	dest := filepath.Join(workDir, path.Base(pkg))
-	cmd := exec.Command("jiri", "go", "build", "-x", "-o", dest, pkg)
+// buildV23Binaries builds the specified binaries and returns the paths to the
+// executables.
+func buildV23Binaries(pkg ...string) []string {
+	fmt.Print("Building ", pkg, " ...")
+	defer fmt.Println("Done.")
+	args := append([]string{"go", "install", "-x"}, pkg...)
+	cmd := exec.Command("jiri", args...)
+	cmd.Env = append(os.Environ(), "GOBIN="+workDir)
 	output, err := cmd.CombinedOutput()
 	dieIfErr(err, "Running build command %v failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
-	return dest
-}
-
-// buildDMBinaries builds the binaries required for a device manager
-// installation and returns the paths to the executables.
-func buildDMBinaries() (ret []string) {
-	for _, b := range dmBins {
-		ret = append(ret, buildV23Binary(b))
+	dest := make([]string, len(pkg))
+	for i, p := range pkg {
+		dest[i] = filepath.Join(workDir, path.Base(p))
 	}
-	return
+	return dest
 }
 
 // createArchive creates a zip archive from the given files.
@@ -169,11 +166,19 @@
 
 // setupInstance creates a new VM instance and returns its name and IP address.
 func setupInstance(vmOptions interface{}) (backend.CloudVM, string, string) {
+	fmt.Println("Setting up instance ...")
 	currUser, err := user.Current()
 	dieIfErr(err, "Couldn't obtain current user")
 	instanceName := fmt.Sprintf("%s-%s", currUser.Username, time.Now().UTC().Format("20060102-150405"))
 	vm, err = backend.CreateCloudVM(instanceName, vmOptions)
 	dieIfErr(err, "VM Instance Creation Failed: %v", err)
+
+	// Make sure nothing is using the ports we plan to give to deviced.
+	// TODO(caprita): Don't hardcode the ports and all that.
+	if output, err := vm.RunCommand("!", "netstat", "-tulpn", "2>/dev/null", "|", "grep", "'LISTEN'", "|", "grep", "-E", "':8150 |:8160 |:8151 '"); err != nil {
+		die("device manager ports are already in use:\n" + string(output))
+	}
+
 	instanceIP := vm.IP()
 	// Install unzip so we can unpack the archive.
 	// TODO(caprita): Use tar instead.
@@ -185,7 +190,7 @@
 
 // installArchive ships the archive to the VM instance and unpacks it.
 func installArchive(archive, instance string) {
-	err := vm.CopyFile(archive, "/")
+	err := vm.CopyFile(archive, "")
 	dieIfErr(err, "Copying archive failed: %v", err)
 	output, err := vm.RunCommand("unzip", path.Join("./", filepath.Base(archive)), "-d", "./unpacked")
 	dieIfErr(err, "Extracting archive failed. Output:\n%v", string(output))
@@ -195,7 +200,7 @@
 // and pairing token needed for claiming.
 func installDevice(instance string) (string, string) {
 	fmt.Println("Installing device manager...")
-	defer fmt.Println("Done installing device manager...")
+	defer fmt.Println("Done installing device manager.")
 	output, err := vm.RunCommand("V23_DEVICE_DIR=`pwd`/dm", "./unpacked/devicex", "install", "./unpacked", "--single_user", "--", "--v23.tcp.address=:8151", "--deviced-port=8150", "--proxy-port=8160", "--use-pairing-token")
 	dieIfErr(err, "Installing device manager failed. Output:\n%v", string(output))
 	output, err = vm.RunCommand("V23_DEVICE_DIR=`pwd`/dm", "./unpacked/devicex", "start")
@@ -317,6 +322,15 @@
 	flag.StringVar(&awsImageID, "aws-image-id", "", "ID of AWS vm image to use")
 
 	flag.Parse()
+	if len(flag.Args()) == 0 {
+		die("Usage: %s [--ssh [user@]ip] [--sshoptions \"<options>\"]  <app> <arguments ... >\n", os.Args[0])
+	}
+	binPath := flag.Args()[0]
+	if fi, err := os.Stat(binPath); err != nil {
+		die("failed to stat %v: %v", binPath, err)
+	} else if fi.IsDir() {
+		die("%v is a directory", binPath)
+	}
 
 	var dbg backend.DebugPrinter = backend.NoopDebugPrinter{}
 	if debug {
@@ -327,7 +341,7 @@
 	switch {
 	default:
 		// Vcloud backend
-		vcloud = buildV23Binary(vcloudBin)
+		vcloud = buildV23Binaries(vcloudBin)[0]
 		vmOpts = backend.VcloudVMOptions{VcloudBinary: vcloud}
 
 	case sshTarget != "":
@@ -360,23 +374,19 @@
 		}
 	}
 
-	if len(flag.Args()) == 0 {
-		die("Usage: %s [--ssh [user@]ip] [--sshoptions \"<options>\"]  <app> <arguments ... >\n", os.Args[0])
-	}
-
 	return vmOpts
 }
 
 func main() {
-	vmOpts := handleFlags()
 	setupWorkDir()
 	cleanupOnDeath = func() {
 		os.RemoveAll(workDir)
 	}
 	defer os.RemoveAll(workDir)
-	device = buildV23Binary(deviceBin)
-	dmBins := buildDMBinaries()
-	archive := createArchive(append(dmBins, getPath(devicexRepo, devicex)))
+	vmOpts := handleFlags()
+	dmBinaries := buildV23Binaries(append([]string{deviceBin}, dmBins[:]...)...)
+	device, dmBinaries = dmBinaries[0], dmBinaries[1:]
+	archive := createArchive(append(dmBinaries, getPath(devicexRepo, devicex)))
 
 	vm, vmInstanceName, vmInstanceIP := setupInstance(vmOpts)
 	cleanupOnDeath = func() {
diff --git a/services/device/mgmt_v23_test.go b/services/device/mgmt_v23_test.go
index a2fefec..6652839 100644
--- a/services/device/mgmt_v23_test.go
+++ b/services/device/mgmt_v23_test.go
@@ -294,7 +294,7 @@
 	adminDeviceBin.Run("acl", "set", mtName+"/devmgr/device", "root/u/alice", "Read,Resolve,Write")
 
 	if withSuid {
-		adminDeviceBin.Start("associate", "add", mtName+"/devmgr/device", appUser, "root/u/alice")
+		adminDeviceBin.Run("associate", "add", mtName+"/devmgr/device", appUser, "root/u/alice")
 
 		aai := adminDeviceBin.Start("associate", "list", mtName+"/devmgr/device")
 		if got, expected := strings.Trim(aai.Output(), "\n "), "root/u/alice "+appUser; got != expected {
diff --git a/services/syncbase/clock/syncservice.go b/services/syncbase/clock/syncservice.go
index ece678a..bd6a90b 100644
--- a/services/syncbase/clock/syncservice.go
+++ b/services/syncbase/clock/syncservice.go
@@ -22,40 +22,43 @@
 //    the difference btw the two clocks is < 1 minute.
 // 4) num hops for peer's NTP sync data is < 2, i.e. either the peer has
 //    synced with NTP itself or it synced with another peer that did NTP.
-func (c *VClock) ProcessPeerClockData(tx store.Transaction, resp *PeerSyncData, localData *ClockData) error {
+// Retruns true if the local clock was updated.
+func (c *VClock) ProcessPeerClockData(tx store.Transaction, resp *PeerSyncData, localData *ClockData) bool {
 	offset := (resp.RecvTs.Sub(resp.MySendTs) + resp.SendTs.Sub(resp.MyRecvTs)) / 2
 	vlog.VI(2).Infof("clock: ProcessPeerClockData: offset between two clocks: %v", offset)
 	if math.Abs(float64(offset.Nanoseconds())) <= util.PeerSyncDiffThreshold {
 		vlog.VI(2).Info("clock: ProcessPeerClockData: the two clocks are synced within PeerSyncDiffThreshold.")
-		return nil
+		return false
 	}
 	if resp.LastNtpTs == nil {
 		vlog.VI(2).Info("clock: ProcessPeerClockData: peer clock has not synced to NTP. Ignoring peer's clock.")
-		return nil
+		return false
 	}
 	if (localData != nil) && !isPeerNtpSyncMoreRecent(localData.LastNtpTs, resp.LastNtpTs) {
 		vlog.VI(2).Info("clock: ProcessPeerClockData: peer NTP sync is less recent than local.")
-		return nil
+		return false
 	}
 	if isOverRebootTolerance(offset, resp.NumReboots) {
 		vlog.VI(2).Info("clock: ProcessPeerClockData: peer clock is over reboot tolerance.")
-		return nil
+		return false
 	}
 	if resp.NumHops >= util.HopTolerance {
 		vlog.VI(2).Info("clock: ProcessPeerClockData: peer clock is over hop tolerance.")
-		return nil
+		return false
 	}
 	vlog.VI(2).Info("clock: ProcessPeerClockData: peer's clock is more accurate than local clock. Syncing to peer's clock.")
-	c.updateClockData(tx, resp, offset, localData)
-	return nil
+	return c.updateClockData(tx, resp, offset, localData)
 }
 
-func (c *VClock) updateClockData(tx store.Transaction, peerResp *PeerSyncData, offset time.Duration, localData *ClockData) {
+// updateClockData updates the clock data for the local clock based on peer
+// clock's data.
+// Returs true if update succeeds.
+func (c *VClock) updateClockData(tx store.Transaction, peerResp *PeerSyncData, offset time.Duration, localData *ClockData) bool {
 	systemTime := c.SysClock.Now()
 	elapsedTime, err := c.SysClock.ElapsedTime()
 	if err != nil {
 		vlog.Errorf("clock: ProcessPeerClockData: error while fetching elapsed time: %v", err)
-		return
+		return false
 	}
 	systemTimeAtBoot := systemTime.Add(-elapsedTime)
 	var skew time.Duration
@@ -78,7 +81,9 @@
 	}
 	if err := c.SetClockData(tx, newClockData); err != nil {
 		vlog.Errorf("clock: ProcessPeerClockData: error while setting new clock data: %v", err)
+		return false
 	}
+	return true
 }
 
 func isPeerNtpSyncMoreRecent(localNtpTs, peerNtpTs *time.Time) bool {
diff --git a/services/syncbase/server/db_info_test.go b/services/syncbase/server/db_info_test.go
index 7bc0870..386cd4c 100644
--- a/services/syncbase/server/db_info_test.go
+++ b/services/syncbase/server/db_info_test.go
@@ -14,7 +14,7 @@
 		dbName  string
 		stKey   string
 	}{
-		{"app1", "db1", "$dbInfo:app1:db1"},
+		{"app1", "db1", "$dbInfo\xfeapp1\xfedb1"},
 	}
 	for _, test := range tests {
 		got, want := dbInfoStKey(&app{name: test.appName}, test.dbName), test.stKey
diff --git a/services/syncbase/server/nosql/database.go b/services/syncbase/server/nosql/database.go
index b1d4990..32740d7 100644
--- a/services/syncbase/server/nosql/database.go
+++ b/services/syncbase/server/nosql/database.go
@@ -15,8 +15,8 @@
 	"v.io/v23/rpc"
 	"v.io/v23/security/access"
 	wire "v.io/v23/services/syncbase/nosql"
-	"v.io/v23/syncbase/nosql/query"
-	"v.io/v23/syncbase/nosql/query/exec"
+	"v.io/v23/query/engine"
+	ds "v.io/v23/query/engine/datasource"
 	pubutil "v.io/v23/syncbase/util"
 	"v.io/v23/vdl"
 	"v.io/v23/verror"
@@ -265,7 +265,7 @@
 			req:  d,
 			sntx: sntx,
 		}
-		headers, rs, err := exec.Exec(db, q)
+		headers, rs, err := engine.Exec(db, q)
 		if err != nil {
 			return err
 		}
@@ -351,13 +351,13 @@
 			return nil, err
 		}
 		it := sntx.Scan(util.ScanPrefixArgs(util.TablePrefix, ""))
-		key := []byte{}
+		keyBytes := []byte{}
 		res := []string{}
 		for it.Advance() {
-			key = it.Key(key)
-			parts := util.SplitKeyParts(string(key))
+			keyBytes = it.Key(keyBytes)
+			parts := util.SplitNKeyParts(string(keyBytes), 2)
 			// For explanation of Escape(), see comment in server/nosql/dispatcher.go.
-			res = append(res, pubutil.Escape(parts[len(parts)-1]))
+			res = append(res, pubutil.Escape(parts[1]))
 		}
 		if err := it.Err(); err != nil {
 			return nil, err
@@ -439,7 +439,7 @@
 ////////////////////////////////////////
 // query interface implementations
 
-// queryDb implements query.Database.
+// queryDb implements ds.Database.
 type queryDb struct {
 	ctx  *context.T
 	call wire.DatabaseExecServerCall
@@ -451,7 +451,7 @@
 	return db.ctx
 }
 
-func (db *queryDb) GetTable(name string) (query.Table, error) {
+func (db *queryDb) GetTable(name string) (ds.Table, error) {
 	tDb := &tableDb{
 		qdb: db,
 		req: &tableReq{
@@ -466,13 +466,13 @@
 	return tDb, nil
 }
 
-// tableDb implements query.Table.
+// tableDb implements ds.Table.
 type tableDb struct {
 	qdb *queryDb
 	req *tableReq
 }
 
-func (t *tableDb) Scan(keyRanges query.KeyRanges) (query.KeyValueStream, error) {
+func (t *tableDb) Scan(keyRanges ds.KeyRanges) (ds.KeyValueStream, error) {
 	streams := []store.Stream{}
 	for _, keyRange := range keyRanges {
 		// TODO(jkline): For now, acquire all of the streams at once to minimize the
@@ -489,7 +489,7 @@
 	}, nil
 }
 
-// kvs implements query.KeyValueStream.
+// kvs implements ds.KeyValueStream.
 type kvs struct {
 	t         *tableDb
 	curr      int
@@ -508,9 +508,9 @@
 		if s.it[s.curr].Advance() {
 			// key
 			keyBytes := s.it[s.curr].Key(nil)
-			parts := util.SplitKeyParts(string(keyBytes))
+			parts := util.SplitNKeyParts(string(keyBytes), 3)
 			// TODO(rogulenko): Check access for the key.
-			s.currKey = parts[len(parts)-1]
+			s.currKey = parts[2]
 			// value
 			valueBytes := s.it[s.curr].Value(nil)
 			var currValue *vdl.Value
diff --git a/services/syncbase/server/nosql/database_watch.go b/services/syncbase/server/nosql/database_watch.go
index e9e1921..b0f61a5 100644
--- a/services/syncbase/server/nosql/database_watch.go
+++ b/services/syncbase/server/nosql/database_watch.go
@@ -170,13 +170,12 @@
 		default:
 			continue
 		}
-		parts := util.SplitKeyParts(opKey)
 		// TODO(rogulenko): Currently we only process rows, i.e. keys of the form
-		// <rowPrefix>:xxx:yyy. Consider processing other keys.
-		if len(parts) != 3 || parts[0] != util.RowPrefix {
+		// <RowPrefix>:xxx:yyy. Consider processing other keys.
+		if !util.IsRowKey(opKey) {
 			continue
 		}
-		table, row := parts[1], parts[2]
+		table, row := util.ParseTableAndRowOrDie(opKey)
 		// Filter out unnecessary rows and rows that we can't access.
 		if table != t.name || !strings.HasPrefix(row, prefix) {
 			continue
diff --git a/services/syncbase/server/nosql/row.go b/services/syncbase/server/nosql/row.go
index 97cfc98..e95746e 100644
--- a/services/syncbase/server/nosql/row.go
+++ b/services/syncbase/server/nosql/row.go
@@ -126,6 +126,8 @@
 	if err != nil {
 		return err
 	}
+	// TODO(rogulenko): Avoid the redundant lookups since in theory we have all
+	// we need from the checkAccess.
 	permsKey := r.t.prefixPermsKey(permsPrefix)
 	if err := watchable.PutWithPerms(tx, []byte(r.stKey()), value, permsKey); err != nil {
 		return verror.New(verror.ErrInternal, ctx, err)
@@ -140,6 +142,8 @@
 	if err != nil {
 		return err
 	}
+	// TODO(rogulenko): Avoid the redundant lookups since in theory we have all
+	// we need from the checkAccess.
 	permsKey := r.t.prefixPermsKey(permsPrefix)
 	if err := watchable.DeleteWithPerms(tx, []byte(r.stKey()), permsKey); err != nil {
 		return verror.New(verror.ErrInternal, ctx, err)
diff --git a/services/syncbase/server/nosql/table.go b/services/syncbase/server/nosql/table.go
index a43dfe3..34a9407 100644
--- a/services/syncbase/server/nosql/table.go
+++ b/services/syncbase/server/nosql/table.go
@@ -166,8 +166,9 @@
 		for it.Advance() {
 			key = it.Key(key)
 			// Check perms.
-			parts := util.SplitKeyParts(string(key))
-			externalKey := parts[len(parts)-1]
+			// See comment in util/constants.go for why we use SplitNKeyParts.
+			parts := util.SplitNKeyParts(string(key), 3)
+			externalKey := parts[2]
 			permsPrefix, err := t.checkAccess(ctx, call, tx, externalKey)
 			if err != nil {
 				// TODO(rogulenko): Revisit this behavior. Probably we should
@@ -211,8 +212,9 @@
 		for it.Advance() {
 			key, value = it.Key(key), it.Value(value)
 			// Check perms.
-			parts := util.SplitKeyParts(string(key))
-			externalKey := parts[len(parts)-1]
+			// See comment in util/constants.go for why we use SplitNKeyParts.
+			parts := util.SplitNKeyParts(string(key), 3)
+			externalKey := parts[2]
 			if _, err := t.checkAccess(ctx, call, sntx, externalKey); err != nil {
 				it.Cancel()
 				return err
@@ -413,7 +415,7 @@
 	if err := t.UpdatePrefixPermsIndexForSet(ctx, tx, key); err != nil {
 		return err
 	}
-	return watchable.PutVOMWithPerms(ctx, tx, t.prefixPermsKey(key), perms, t.prefixPermsKey(parent))
+	return watchable.PutVomWithPerms(ctx, tx, t.prefixPermsKey(key), perms, t.prefixPermsKey(parent))
 }
 
 func (t *tableReq) deletePrefixPerms(ctx *context.T, tx store.Transaction, key string) error {
@@ -534,8 +536,10 @@
 		return "", "", nil
 	}
 	defer it.Cancel()
-	parts := util.SplitKeyParts(string(it.Key(nil)))
-	prefix = strings.TrimSuffix(parts[len(parts)-1], util.PrefixRangeLimitSuffix)
+	// See comment in util/constants.go for why we use SplitNKeyParts.
+	parts := util.SplitNKeyParts(string(it.Key(nil)), 3)
+	externalKey := parts[2]
+	prefix = strings.TrimSuffix(externalKey, util.PrefixRangeLimitSuffix)
 	value := it.Value(nil)
 	if err = vom.Decode(value, &parent); err != nil {
 		return "", "", verror.New(verror.ErrInternal, ctx, err)
diff --git a/services/syncbase/server/util/constants.go b/services/syncbase/server/util/constants.go
index fa89fa8..272233e 100644
--- a/services/syncbase/server/util/constants.go
+++ b/services/syncbase/server/util/constants.go
@@ -9,6 +9,9 @@
 )
 
 // Constants related to storage engine keys.
+// Note, these are persisted and therefore must not be modified.
+// TODO(sadovsky): Use one-byte strings. Changing these prefixes breaks various
+// tests. Tests generally shouldn't depend on the values of these constants.
 const (
 	AppPrefix        = "$app"
 	ClockPrefix      = "$clock"
@@ -23,28 +26,9 @@
 	TablePrefix      = "$table"
 	VersionPrefix    = "$version"
 
-	// Note, these are persisted and therefore must not be modified.
-	// Below, they are ordered lexicographically by value.
-	// TODO(sadovsky): Changing these prefixes breaks various tests. Tests
-	// generally shouldn't depend on the values of these constants.
-	/*
-		AppPrefix      = "a"
-		ClockPrefix    = "c"
-		DatabasePrefix = "d"
-		DbInfoPrefix   = "i"
-		LogPrefix      = "l"
-		PermsPrefix    = "p"
-		RowPrefix      = "r"
-		ServicePrefix  = "s"
-		TablePrefix    = "t"
-		VersionPrefix  = "v"
-		SyncPrefix     = "y"
-	*/
-
-	// Separator for parts of storage engine keys.
-	// TODO(sadovsky): Switch to \xff or \x00, both of which are disallowed in
-	// client-specified names and keys.
-	KeyPartSep = ":"
+	// KeyPartSep is a separator for parts of storage engine keys, e.g. separating
+	// table name from row key.
+	KeyPartSep = "\xfe"
 
 	// PrefixRangeLimitSuffix is a key suffix that indicates the end of a prefix
 	// range. Must be greater than any character allowed in client-specified keys.
diff --git a/services/syncbase/server/util/key_util.go b/services/syncbase/server/util/key_util.go
index e1b9272..c5736ec 100644
--- a/services/syncbase/server/util/key_util.go
+++ b/services/syncbase/server/util/key_util.go
@@ -5,6 +5,7 @@
 package util
 
 import (
+	"fmt"
 	"strconv"
 	"strings"
 
@@ -18,22 +19,65 @@
 	return strings.Join(parts, KeyPartSep)
 }
 
-// SplitKeyParts is the inverse of JoinKeyParts.
+// SplitKeyParts is the inverse of JoinKeyParts. Clients are generally
+// encouraged to use SplitNKeyParts.
 func SplitKeyParts(key string) []string {
 	return strings.Split(key, KeyPartSep)
 }
 
-// StripFirstPartOrDie strips off the first part of the given key. Typically
+// SplitNKeyParts is to SplitKeyParts as strings.SplitN is to strings.Split.
+func SplitNKeyParts(key string, n int) []string {
+	return strings.SplitN(key, KeyPartSep, n)
+}
+
+// StripFirstKeyPartOrDie strips off the first part of the given key. Typically
 // used to strip off the key prefixes defined in constants.go. Panics if the
 // input string has fewer than two parts.
-func StripFirstPartOrDie(key string) string {
-	parts := strings.SplitN(key, KeyPartSep, 2)
+func StripFirstKeyPartOrDie(key string) string {
+	parts := SplitNKeyParts(key, 2)
 	if len(parts) < 2 {
-		vlog.Fatalf("StripFirstPartOrDie: invalid key: %q", key)
+		vlog.Fatalf("StripFirstKeyPartOrDie: invalid key %q", key)
 	}
 	return parts[1]
 }
 
+// FirstKeyPart returns the first part of 'key', typically a key prefix defined
+// in constants.go.
+func FirstKeyPart(key string) string {
+	return SplitNKeyParts(key, 2)[0]
+}
+
+// IsRowKey returns true iff 'key' is a storage engine key for a row.
+func IsRowKey(key string) bool {
+	return FirstKeyPart(key) == RowPrefix
+}
+
+// IsPermsKey returns true iff 'key' is a storage engine key for perms.
+func IsPermsKey(key string) bool {
+	return FirstKeyPart(key) == PermsPrefix
+}
+
+// ParseTableAndRow extracts table and row parts from the given storage engine
+// key for a row or perms. Returns an error if the given key is not a storage
+// engine key for a row or perms.
+func ParseTableAndRow(key string) (table string, row string, err error) {
+	parts := SplitNKeyParts(key, 3)
+	pfx := parts[0]
+	if len(parts) < 3 || (pfx != RowPrefix && pfx != PermsPrefix) {
+		return "", "", fmt.Errorf("ParseTableAndRow: invalid key %q", key)
+	}
+	return parts[1], parts[2], nil
+}
+
+// ParseTableAndRowOrDie calls ParseTableAndRow and panics on error.
+func ParseTableAndRowOrDie(key string) (table string, row string) {
+	table, row, err := ParseTableAndRow(key)
+	if err != nil {
+		vlog.Fatal(err)
+	}
+	return table, row
+}
+
 // ScanPrefixArgs returns args for sn.Scan() for the specified prefix.
 func ScanPrefixArgs(stKeyPrefix, prefix string) ([]byte, []byte) {
 	return ScanRangeArgs(stKeyPrefix, util.PrefixRangeStart(prefix), util.PrefixRangeLimit(prefix))
diff --git a/services/syncbase/server/util/key_util_test.go b/services/syncbase/server/util/key_util_test.go
index 69db9fa..c8b6a43 100644
--- a/services/syncbase/server/util/key_util_test.go
+++ b/services/syncbase/server/util/key_util_test.go
@@ -11,15 +11,13 @@
 	"v.io/x/ref/services/syncbase/server/util"
 )
 
-type kpt struct {
+var keyPartTests = []struct {
 	parts []string
 	key   string
-}
-
-var keyPartTests []kpt = []kpt{
-	{[]string{"a", "b"}, "a:b"},
-	{[]string{"aa", "bb"}, "aa:bb"},
-	{[]string{"a", "b", "c"}, "a:b:c"},
+}{
+	{[]string{"a", "b"}, "a\xfeb"},
+	{[]string{"aa", "bb"}, "aa\xfebb"},
+	{[]string{"a", "b", "c"}, "a\xfeb\xfec"},
 }
 
 func TestJoinKeyParts(t *testing.T) {
@@ -40,13 +38,157 @@
 	}
 }
 
+func TestSplitNKeyParts(t *testing.T) {
+	for _, test := range keyPartTests {
+		got, want := util.SplitNKeyParts(test.key, 1), []string{test.key}
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("%q: got %v, want %v", test.key, got, want)
+		}
+	}
+	for _, test := range keyPartTests {
+		// Note, all test cases in keyPartTests have <= 3 parts.
+		got, want := util.SplitNKeyParts(test.key, 3), test.parts
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("%q: got %v, want %v", test.key, got, want)
+		}
+	}
+}
+
+func TestStripFirstKeyPartOrDie(t *testing.T) {
+	tests := []struct {
+		in  string
+		out string
+	}{
+		{"a\xfe", ""},
+		{"a\xfeb", "b"},
+		{"a\xfe\xfe", "\xfe"},
+		{"a\xfeb\xfe", "b\xfe"},
+		{"a\xfeb\xfec", "b\xfec"},
+	}
+	for _, test := range tests {
+		got, want := util.StripFirstKeyPartOrDie(test.in), test.out
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("%q: got %v, want %v", test.in, got, want)
+		}
+	}
+}
+
+func TestFirstKeyPart(t *testing.T) {
+	tests := []struct {
+		in  string
+		out string
+	}{
+		{"", ""},
+		{"a", "a"},
+		{"a\xfe", "a"},
+		{"a\xfeb", "a"},
+		{"\xfe", ""},
+		{"\xfeb", ""},
+	}
+	for _, test := range tests {
+		got, want := util.FirstKeyPart(test.in), test.out
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("%q: got %v, want %v", test.in, got, want)
+		}
+	}
+}
+
+func TestIsRowKey(t *testing.T) {
+	tests := []struct {
+		in  string
+		out bool
+	}{
+		{"", false},
+		{"a", false},
+		{"a\xfe", false},
+		{"a\xfeb", false},
+		{util.RowPrefix, true},
+		{util.RowPrefix + "\xfe", true},
+		{util.RowPrefix + "\xfeb", true},
+		{util.PermsPrefix, false},
+		{util.PermsPrefix + "\xfe", false},
+		{util.PermsPrefix + "\xfeb", false},
+	}
+	for _, test := range tests {
+		got, want := util.IsRowKey(test.in), test.out
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("%q: got %v, want %v", test.in, got, want)
+		}
+	}
+}
+
+func TestIsPermsKey(t *testing.T) {
+	tests := []struct {
+		in  string
+		out bool
+	}{
+		{"", false},
+		{"a", false},
+		{"a\xfe", false},
+		{"a\xfeb", false},
+		{util.RowPrefix, false},
+		{util.RowPrefix + "\xfe", false},
+		{util.RowPrefix + "\xfeb", false},
+		{util.PermsPrefix, true},
+		{util.PermsPrefix + "\xfe", true},
+		{util.PermsPrefix + "\xfeb", true},
+	}
+	for _, test := range tests {
+		got, want := util.IsPermsKey(test.in), test.out
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("%q: got %v, want %v", test.in, got, want)
+		}
+	}
+}
+
+func TestParseTableAndRow(t *testing.T) {
+	tests := []struct {
+		key   string
+		table string
+		row   string
+		err   bool
+	}{
+		{util.RowPrefix + "\xfetb\xferow", "tb", "row", false},
+		{util.RowPrefix + "\xfetb\xfe", "tb", "", false},
+		{util.RowPrefix + "\xfe\xferow", "", "row", false},
+		{util.RowPrefix + "\xfe\xfe", "", "", false},
+		{util.PermsPrefix + "\xfetb\xferow", "tb", "row", false},
+		{util.PermsPrefix + "\xfetb\xfe", "tb", "", false},
+		{util.PermsPrefix + "\xfe\xferow", "", "row", false},
+		{util.PermsPrefix + "\xfe\xfe", "", "", false},
+		{"pfx\xfetb\xferow", "", "", true},
+		{"pfx\xfetb\xfe", "", "", true},
+		{"pfx\xfe\xferow", "", "", true},
+		{"pfx\xfe\xfe", "", "", true},
+		{"\xfetb\xferow", "", "", true},
+		{"\xfetb\xfe", "", "", true},
+		{"\xfe\xferow", "", "", true},
+		{"\xfe\xfe", "", "", true},
+		{util.RowPrefix, "", "", true},
+		{util.RowPrefix + "\xfetb", "", "", true},
+		{util.RowPrefix + "\xfe", "", "", true},
+	}
+	for _, test := range tests {
+		table, row, err := util.ParseTableAndRow(test.key)
+		if !reflect.DeepEqual(table, test.table) {
+			t.Errorf("%q: got %v, want %v", test.key, table, test.table)
+		}
+		if !reflect.DeepEqual(row, test.row) {
+			t.Errorf("%q: got %v, want %v", test.key, table, test.table)
+		}
+		if !reflect.DeepEqual(err != nil, test.err) {
+			t.Errorf("%q: got %v, want %v", test.key, err != nil, test.err)
+		}
+	}
+}
+
 func TestScanPrefixArgs(t *testing.T) {
 	tests := []struct {
 		stKeyPrefix, prefix, wantStart, wantLimit string
 	}{
-		{"x", "", "x:", "x;"},
-		{"x", "a", "x:a", "x:b"},
-		{"x", "a\xff", "x:a\xff", "x:b"},
+		{"x", "", "x\xfe", "x\xff"},
+		{"x", "a", "x\xfea", "x\xfeb"},
+		{"x", "a\xfe", "x\xfea\xfe", "x\xfea\xff"},
 	}
 	for _, test := range tests {
 		start, limit := util.ScanPrefixArgs(test.stKeyPrefix, test.prefix)
@@ -64,11 +206,11 @@
 	tests := []struct {
 		stKeyPrefix, start, limit, wantStart, wantLimit string
 	}{
-		{"x", "", "", "x:", "x;"},   // limit "" means "no limit"
-		{"x", "a", "", "x:a", "x;"}, // limit "" means "no limit"
-		{"x", "a", "b", "x:a", "x:b"},
-		{"x", "a", "a", "x:a", "x:a"}, // empty range
-		{"x", "b", "a", "x:b", "x:a"}, // empty range
+		{"x", "", "", "x\xfe", "x\xff"},   // limit "" means "no limit"
+		{"x", "a", "", "x\xfea", "x\xff"}, // limit "" means "no limit"
+		{"x", "a", "b", "x\xfea", "x\xfeb"},
+		{"x", "a", "a", "x\xfea", "x\xfea"}, // empty range
+		{"x", "b", "a", "x\xfeb", "x\xfea"}, // empty range
 	}
 	for _, test := range tests {
 		start, limit := util.ScanRangeArgs(test.stKeyPrefix, test.start, test.limit)
diff --git a/services/syncbase/server/watchable/stream.go b/services/syncbase/server/watchable/stream.go
index 3448c02..b6ba752 100644
--- a/services/syncbase/server/watchable/stream.go
+++ b/services/syncbase/server/watchable/stream.go
@@ -7,6 +7,7 @@
 import (
 	"sync"
 
+	"v.io/x/ref/services/syncbase/server/util"
 	"v.io/x/ref/services/syncbase/store"
 )
 
@@ -44,7 +45,7 @@
 		return false
 	}
 	versionKey, version := s.iit.Key(nil), s.iit.Value(nil)
-	s.key = []byte(join(split(string(versionKey))[1:]...)) // drop "$version" prefix
+	s.key = []byte(util.StripFirstKeyPartOrDie(string(versionKey))) // drop "$version" prefix
 	s.value, s.err = s.sntx.Get(makeAtVersionKey(s.key, version), nil)
 	if s.err != nil {
 		return false
diff --git a/services/syncbase/server/watchable/transaction.go b/services/syncbase/server/watchable/transaction.go
index 49092dd..fdabb29 100644
--- a/services/syncbase/server/watchable/transaction.go
+++ b/services/syncbase/server/watchable/transaction.go
@@ -349,10 +349,10 @@
 	return nil
 }
 
-// PutWithPerms puts a VOM-encoded value for the managed key, recording the key
-// and version of the prefix permissions object that granted access to this put
-// operation.
-func PutVOMWithPerms(ctx *context.T, tx store.Transaction, k string, v interface{}, permsKey string) error {
+// PutVomWithPerms puts a VOM-encoded value for the managed key, recording
+// the key and the version of the prefix permissions object that granted access
+// to this put operation.
+func PutVomWithPerms(ctx *context.T, tx store.Transaction, k string, v interface{}, permsKey string) error {
 	bytes, err := vom.Encode(v)
 	if err != nil {
 		return verror.New(verror.ErrInternal, ctx, err)
diff --git a/services/syncbase/server/watchable/util.go b/services/syncbase/server/watchable/util.go
index b49530e..4661978 100644
--- a/services/syncbase/server/watchable/util.go
+++ b/services/syncbase/server/watchable/util.go
@@ -84,10 +84,6 @@
 	return util.JoinKeyParts(parts...)
 }
 
-func split(key string) []string {
-	return util.SplitKeyParts(key)
-}
-
 func convertError(err error) error {
 	return verror.Convert(verror.IDAction{}, nil, err)
 }
diff --git a/services/syncbase/server/watchable/watcher.go b/services/syncbase/server/watchable/watcher.go
index b78f86a..ab24d81 100644
--- a/services/syncbase/server/watchable/watcher.go
+++ b/services/syncbase/server/watchable/watcher.go
@@ -146,7 +146,7 @@
 }
 
 func parseResumeMarker(resumeMarker string) (uint64, error) {
-	parts := split(resumeMarker)
+	parts := util.SplitNKeyParts(resumeMarker, 2)
 	if len(parts) != 2 {
 		return 0, verror.New(watch.ErrUnknownResumeMarker, nil, resumeMarker)
 	}
diff --git a/services/syncbase/testutil/constants.go b/services/syncbase/testutil/constants.go
index 3f65999..57be396 100644
--- a/services/syncbase/testutil/constants.go
+++ b/services/syncbase/testutil/constants.go
@@ -7,7 +7,13 @@
 var invalidIdentifiers []string = []string{
 	"/",
 	"a/b",
+	":",
+	"a:b",
 	"*",
+	"\x00",
+	"\x01",
+	"\xfa",
+	"\xfb",
 	"@@",
 	"dev.v.io/a/admin@myapp.com",
 	"안녕하세요",
@@ -27,11 +33,13 @@
 
 var NotOkAppRowNames []string = []string{
 	"",
-	":",
-	"\x00",
+	"\xfc",
+	"\xfd",
+	"\xfe",
 	"\xff",
-	"a:b",
-	"a\x00b",
+	"a\xfcb",
+	"a\xfdb",
+	"a\xfeb",
 	"a\xffb",
 }
 
diff --git a/services/syncbase/vsync/clock.go b/services/syncbase/vsync/clock.go
index cddd285..1ac9ebf 100644
--- a/services/syncbase/vsync/clock.go
+++ b/services/syncbase/vsync/clock.go
@@ -157,9 +157,13 @@
 		}
 
 		vlog.VI(4).Infof("sync: syncClock: connection established on %s", absPeerName)
-		vclock.ProcessPeerClockData(tx, toPeerSyncData(&timeResp, recvTs), localData)
-		if commitErr := tx.Commit(); commitErr != nil {
-			vlog.Errorf("sync: syncClock: error while commiting tx: %v", commitErr)
+		updated := vclock.ProcessPeerClockData(tx, toPeerSyncData(&timeResp, recvTs), localData)
+		if updated {
+			if commitErr := tx.Commit(); commitErr != nil {
+				vlog.VI(2).Infof("sync: syncClock: error while commiting tx: %v", commitErr)
+			}
+		} else {
+			tx.Abort()
 		}
 	} else if (verror.ErrorID(reqErr) == verror.ErrNoExist.ID) || (verror.ErrorID(reqErr) == verror.ErrInternal.ID) {
 		vlog.Errorf("sync: syncClock: error returned by peer %s: %v", absPeerName, err)
diff --git a/services/syncbase/vsync/cr_app_resolves.go b/services/syncbase/vsync/cr_app_resolves.go
index 89f6c0e..640dc00 100644
--- a/services/syncbase/vsync/cr_app_resolves.go
+++ b/services/syncbase/vsync/cr_app_resolves.go
@@ -13,6 +13,7 @@
 	"v.io/v23/verror"
 	"v.io/x/lib/vlog"
 	"v.io/x/ref/services/syncbase/server/interfaces"
+	"v.io/x/ref/services/syncbase/server/util"
 	"v.io/x/ref/services/syncbase/server/watchable"
 	"v.io/x/ref/services/syncbase/store"
 )
@@ -295,7 +296,7 @@
 
 func createRowConflictInfo(ctx *context.T, iSt *initiationState, oid string, batches []uint64, contd bool) *wire.ConflictInfo {
 	op := wire.RowOp{}
-	op.Key = extractAppKey(oid)
+	op.Key = util.StripFirstKeyPartOrDie(oid)
 	objSt := iSt.updObjects[oid]
 	ancestorVer := objSt.ancestor
 	if ancestorVer != NoVersion {
diff --git a/services/syncbase/vsync/cr_app_resolves_test.go b/services/syncbase/vsync/cr_app_resolves_test.go
index 3b3c846..640c96a 100644
--- a/services/syncbase/vsync/cr_app_resolves_test.go
+++ b/services/syncbase/vsync/cr_app_resolves_test.go
@@ -11,6 +11,7 @@
 
 	wire "v.io/v23/services/syncbase/nosql"
 	"v.io/x/ref/services/syncbase/server/interfaces"
+	"v.io/x/ref/services/syncbase/server/util"
 	"v.io/x/ref/services/syncbase/server/watchable"
 	"v.io/x/ref/services/syncbase/store"
 )
@@ -379,7 +380,7 @@
 		}
 	}
 	rInfo := wire.ResolutionInfo{
-		Key:       extractAppKey(oid),
+		Key:       util.StripFirstKeyPartOrDie(oid),
 		Selection: sel,
 		Result:    valRes,
 		Continued: cntd,
diff --git a/services/syncbase/vsync/cr_policy_filter.go b/services/syncbase/vsync/cr_policy_filter.go
index 139a5a4..f8a7a2f 100644
--- a/services/syncbase/vsync/cr_policy_filter.go
+++ b/services/syncbase/vsync/cr_policy_filter.go
@@ -10,6 +10,7 @@
 	"v.io/v23/context"
 	wire "v.io/v23/services/syncbase/nosql"
 	"v.io/x/lib/vlog"
+	"v.io/x/ref/services/syncbase/server/util"
 )
 
 // getDbSchema returns the SchemaMetadata for the db.
@@ -44,7 +45,7 @@
 //  2. Else, if only one match specifies a type, take that one.
 //  3. Else, the two matches are identical; take the last one in the Rules array.
 func getResolutionType(oid string, schema *wire.SchemaMetadata) wire.ResolverType {
-	if !isRowKey(oid) {
+	if !util.IsRowKey(oid) {
 		// This is a perms object key. Handle perms using LastWins policy till a
 		// better policy is available.
 		return wire.ResolverTypeLastWins
@@ -69,7 +70,7 @@
 // applies to the given oid.
 // TODO(jlodhia): Implement Type based matching.
 func isRuleApplicable(oid string, rule *wire.CrRule) bool {
-	tableName, rowKey := extractComponentsFromKey(oid)
+	tableName, rowKey := util.ParseTableAndRowOrDie(oid)
 	if rule.TableName != "" && tableName != rule.TableName {
 		return false
 	}
diff --git a/services/syncbase/vsync/dag_test.go b/services/syncbase/vsync/dag_test.go
index fedf762..8ec653b 100644
--- a/services/syncbase/vsync/dag_test.go
+++ b/services/syncbase/vsync/dag_test.go
@@ -108,7 +108,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid, version := "tb:foo1", "7"
+	oid, version := "tb\xfefoo1", "7"
 
 	tx := st.NewTransaction()
 	if err := s.addParent(nil, tx, oid, version, "haha", nil); err == nil {
@@ -236,7 +236,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -258,21 +258,21 @@
 	tx := st.NewTransaction()
 
 	// Make sure a new node cannot have more than 2 parents.
-	if err := s.addNode(nil, tx, oid, "4", "tb:foo", false, []string{"1", "2", "3"}, NoBatchId, nil); err == nil {
+	if err := s.addNode(nil, tx, oid, "4", "tb\xfefoo", false, []string{"1", "2", "3"}, NoBatchId, nil); err == nil {
 		t.Errorf("addNode() did not fail when given 3 parents")
 	}
 
 	// Make sure a new node cannot have an invalid parent.
-	if err := s.addNode(nil, tx, oid, "4", "tb:foo", false, []string{"1", "555"}, NoBatchId, nil); err == nil {
+	if err := s.addNode(nil, tx, oid, "4", "tb\xfefoo", false, []string{"1", "555"}, NoBatchId, nil); err == nil {
 		t.Errorf("addNode() did not fail when using an invalid parent")
 	}
 
 	// Make sure a new root node (no parents) can be added once a root exists.
 	// For the parents array, check both the "nil" and the empty array as input.
-	if err := s.addNode(nil, tx, oid, "6789", "tb:foo", false, nil, NoBatchId, nil); err != nil {
+	if err := s.addNode(nil, tx, oid, "6789", "tb\xfefoo", false, nil, NoBatchId, nil); err != nil {
 		t.Errorf("cannot add another root node (nil parents) for object %s: %v", oid, err)
 	}
-	if err := s.addNode(nil, tx, oid, "9999", "tb:foo", false, []string{}, NoBatchId, nil); err != nil {
+	if err := s.addNode(nil, tx, oid, "9999", "tb\xfefoo", false, []string{}, NoBatchId, nil); err != nil {
 		t.Errorf("cannot add another root node (empty parents) for object %s: %v", oid, err)
 	}
 
@@ -289,7 +289,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	graft, err := s.dagReplayCommands(nil, "remote-init-00.log.sync")
 	if err != nil {
@@ -331,7 +331,7 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %v", oid, newHead, logrec)
 	}
 
@@ -363,7 +363,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -407,10 +407,10 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %v", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %v", oid, newHead, logrec)
 	}
 
@@ -446,7 +446,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -490,13 +490,13 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync:log:data:10:2" {
+	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe2" {
 		t.Errorf("invalid logrec for ancestor object %s:%s: %s", oid, ancestor, logrec)
 	}
 
@@ -539,7 +539,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -583,13 +583,13 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:2" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe2" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync:log:data:10:2" {
+	if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe2" {
 		t.Errorf("invalid logrec for ancestor object %s:%s: %s", oid, ancestor, logrec)
 	}
 
@@ -626,7 +626,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -670,10 +670,10 @@
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
 
-	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
 		t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
 	}
-	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+	if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
 		t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
 	}
 
@@ -931,7 +931,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -1001,7 +1001,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -1061,7 +1061,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
@@ -1123,7 +1123,7 @@
 	st := svc.St()
 	s := svc.sync
 
-	oid := "tb:foo1"
+	oid := "tb\xfefoo1"
 
 	if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
 		t.Fatal(err)
diff --git a/services/syncbase/vsync/initiator.go b/services/syncbase/vsync/initiator.go
index 9b607a2..d39590a 100644
--- a/services/syncbase/vsync/initiator.go
+++ b/services/syncbase/vsync/initiator.go
@@ -670,7 +670,7 @@
 			// managed namespaces (e.g. "$row", "$perms"). Remove that prefix before
 			// comparing it with the syncgroup prefixes which are defined by the
 			// application.
-			if strings.HasPrefix(util.StripFirstPartOrDie(objid), p) {
+			if strings.HasPrefix(util.StripFirstKeyPartOrDie(objid), p) {
 				for sg := range sgs {
 					sgIds[sg] = struct{}{}
 				}
@@ -903,17 +903,14 @@
 			}
 
 			// If this is a perms key, update the local store index.
-			parts := util.SplitKeyParts(objid)
-			if len(parts) < 3 {
-				vlog.Fatalf("sync: updateDbAndSyncSt: bad key %s", objid)
-			}
-			if parts[0] == util.PermsPrefix {
-				tb := iSt.config.db.Table(ctx, parts[1])
+			if util.IsPermsKey(objid) {
+				table, row := util.ParseTableAndRowOrDie(objid)
+				tb := iSt.config.db.Table(ctx, table)
 				var err error
 				if !newVersDeleted {
-					err = tb.UpdatePrefixPermsIndexForSet(ctx, iSt.tx, parts[2])
+					err = tb.UpdatePrefixPermsIndexForSet(ctx, iSt.tx, row)
 				} else {
-					err = tb.UpdatePrefixPermsIndexForDelete(ctx, iSt.tx, parts[2])
+					err = tb.UpdatePrefixPermsIndexForDelete(ctx, iSt.tx, row)
 				}
 				if err != nil {
 					return err
diff --git a/services/syncbase/vsync/initiator_test.go b/services/syncbase/vsync/initiator_test.go
index d77bbf5..fd37fb6 100644
--- a/services/syncbase/vsync/initiator_test.go
+++ b/services/syncbase/vsync/initiator_test.go
@@ -143,8 +143,8 @@
 
 	// Verify genvec state.
 	wantVec := interfaces.GenVector{
-		"tb:foo1": interfaces.PrefixGenVector{11: 3},
-		"tb:bar":  interfaces.PrefixGenVector{11: 0},
+		"tb\xfefoo1": interfaces.PrefixGenVector{11: 3},
+		"tb\xfebar":  interfaces.PrefixGenVector{11: 0},
 	}
 	if !reflect.DeepEqual(iSt.updLocal, wantVec) {
 		t.Fatalf("Final local gen vec mismatch got %v, want %v", iSt.updLocal, wantVec)
@@ -179,7 +179,7 @@
 	svc, iSt, cleanup := testInit(t, "local-init-00.log.sync", "remote-noconf-00.log.sync", false)
 	defer cleanup()
 
-	objid := util.JoinKeyParts(util.RowPrefix, "tb:foo1")
+	objid := util.JoinKeyParts(util.RowPrefix, "tb\xfefoo1")
 
 	// Check all log records.
 	var version uint64 = 1
@@ -240,8 +240,8 @@
 
 	// Verify genvec state.
 	wantVec := interfaces.GenVector{
-		"tb:foo1": interfaces.PrefixGenVector{11: 3},
-		"tb:bar":  interfaces.PrefixGenVector{11: 0},
+		"tb\xfefoo1": interfaces.PrefixGenVector{11: 3},
+		"tb\xfebar":  interfaces.PrefixGenVector{11: 0},
 	}
 	if !reflect.DeepEqual(iSt.updLocal, wantVec) {
 		t.Fatalf("Final local gen vec failed got %v, want %v", iSt.updLocal, wantVec)
@@ -276,7 +276,7 @@
 	svc, iSt, cleanup := testInit(t, "local-init-00.log.sync", "remote-conf-00.log.sync", false)
 	defer cleanup()
 
-	objid := util.JoinKeyParts(util.RowPrefix, "tb:foo1")
+	objid := util.JoinKeyParts(util.RowPrefix, "tb\xfefoo1")
 
 	// Verify conflict state.
 	if len(iSt.updObjects) != 1 {
@@ -323,7 +323,7 @@
 	svc, iSt, cleanup := testInit(t, "local-init-00.log.sync", "remote-conf-03.log.sync", false)
 	defer cleanup()
 
-	objid := util.JoinKeyParts(util.RowPrefix, "tb:foo1")
+	objid := util.JoinKeyParts(util.RowPrefix, "tb\xfefoo1")
 
 	// Verify conflict state.
 	if len(iSt.updObjects) != 1 {
@@ -365,6 +365,9 @@
 //////////////////////////////
 // Helpers.
 
+// TODO(sadovsky): If any of the various t.Fatalf()'s below get triggered,
+// cleanup() is not run, and subsequent tests panic with "A runtime has already
+// been initialized".
 func testInit(t *testing.T, lfile, rfile string, sg bool) (*mockService, *initiationState, func()) {
 	// Set a large value to prevent the initiator from running.
 	peerSyncInterval = 1 * time.Hour
@@ -434,7 +437,7 @@
 	if !sg {
 		iSt.peerSgInfo(nil)
 		// sg1.Spec.Prefixes
-		testIfSgPfxsEqual(t, iSt.config.sgPfxs, []string{"foo:", "bar:"})
+		testIfSgPfxsEqual(t, iSt.config.sgPfxs, []string{"foo\xfe", "bar\xfe"})
 	}
 
 	sort.Strings(iSt.config.mtTables)
@@ -462,8 +465,8 @@
 		}
 
 		wantVec = interfaces.GenVector{
-			"foo:": interfaces.PrefixGenVector{10: 0},
-			"bar:": interfaces.PrefixGenVector{10: 0},
+			"foo\xfe": interfaces.PrefixGenVector{10: 0},
+			"bar\xfe": interfaces.PrefixGenVector{10: 0},
 		}
 	}
 
diff --git a/services/syncbase/vsync/replay_test.go b/services/syncbase/vsync/replay_test.go
index 55ae422..2547966 100644
--- a/services/syncbase/vsync/replay_test.go
+++ b/services/syncbase/vsync/replay_test.go
@@ -69,6 +69,16 @@
 			continue
 		}
 
+		// The current line encodes a command, i.e. it is not a comment line.
+		// Use strconv.Unquote to convert \xfe to the desired byte (for example).
+		// Note, we must wrap the original line in quotes before passing it to
+		// strconv.Unquote since strconv.Unquote expects the input string to look
+		// like a Go string literal (quoted).
+		qline := "\"" + line + "\""
+		if line, err = strconv.Unquote(qline); err != nil {
+			return nil, fmt.Errorf("%s: %s", err, qline)
+		}
+
 		args := strings.Split(line, "|")
 		nargs := len(args)
 
@@ -422,7 +432,15 @@
 }
 
 func TestSplitLogRecKey(t *testing.T) {
-	invalid := []string{"$sync:100:bb", "log:100:bb", "$sync:log:data:100:xx", "$sync:log:data:aa:bb", "$sync:log:xx:100:bb", "$sync:log:data:aa:100:bb", "$sync:log:$sync:sgd:xx:100:bb"}
+	invalid := []string{
+		"$sync\xfe100\xfebb",
+		"log\xfe100\xfebb",
+		"$sync\xfelog\xfedata\xfe100\xfexx",
+		"$sync\xfelog\xfedata\xfeaa\xfebb",
+		"$sync\xfelog\xfexx\xfe100\xfebb",
+		"$sync\xfelog\xfedata\xfeaa\xfe100\xfebb",
+		"$sync\xfelog\xfe$sync\xfesgd\xfexx\xfe100\xfebb",
+	}
 
 	for _, k := range invalid {
 		if _, _, _, err := splitLogRecKey(nil, k); err == nil {
@@ -436,8 +454,8 @@
 		gen uint64
 	}{
 		{logDataPrefix, 10, 20},
-		{"$sync:sgd:2500", 190, 540},
-		{"$sync:sgd:4200", 9999, 999999},
+		{"$sync\xfesgd\xfe2500", 190, 540},
+		{"$sync\xfesgd\xfe4200", 9999, 999999},
 	}
 
 	for _, v := range valid {
diff --git a/services/syncbase/vsync/responder.go b/services/syncbase/vsync/responder.go
index b7cd392..16d9f92 100644
--- a/services/syncbase/vsync/responder.go
+++ b/services/syncbase/vsync/responder.go
@@ -72,7 +72,7 @@
 		for oid := range rSt.initVec {
 			gid, err := sgID(oid)
 			if err != nil {
-				vlog.Fatalf("sync: newResponderState: invalid syncgroup key", oid)
+				vlog.Fatalf("sync: newResponderState: invalid syncgroup key %s", oid)
 			}
 			rSt.sgIds[interfaces.GroupId(gid)] = struct{}{}
 		}
@@ -482,7 +482,7 @@
 	// managed namespaces (e.g. "$row", "$perms"). Remove that prefix before
 	// comparing it with the syncgroup prefixes which are defined by the
 	// application.
-	key := util.StripFirstPartOrDie(rec.Metadata.ObjId)
+	key := util.StripFirstKeyPartOrDie(rec.Metadata.ObjId)
 
 	filter := true
 	var maxGen uint64
diff --git a/services/syncbase/vsync/sync_state.go b/services/syncbase/vsync/sync_state.go
index 53eb098..fe389d1 100644
--- a/services/syncbase/vsync/sync_state.go
+++ b/services/syncbase/vsync/sync_state.go
@@ -507,7 +507,7 @@
 // splitAppDbName is the inverse of appDbName and returns app and db name from a
 // globally unique name for a Database.
 func splitAppDbName(ctx *context.T, name string) (string, string, error) {
-	parts := util.SplitKeyParts(name)
+	parts := util.SplitNKeyParts(name, 2)
 	if len(parts) != 2 {
 		return "", "", verror.New(verror.ErrInternal, ctx, "invalid appDbName", name)
 	}
diff --git a/services/syncbase/vsync/syncgroup.go b/services/syncbase/vsync/syncgroup.go
index 3d1f5fc..243fc01 100644
--- a/services/syncbase/vsync/syncgroup.go
+++ b/services/syncbase/vsync/syncgroup.go
@@ -526,7 +526,12 @@
 	return util.JoinKeyParts(sgDataKeyPrefix, fmt.Sprintf("%d", gid))
 }
 
-// sgID is the inverse of sgOID and converts an oid string into a group id.
+// sgID is approximately the inverse of sgOID: it converts an oid string into a
+// group id, but assumes that oid is prefixed with util.SyncPrefix (whereas
+// sgOID does not prepend util.SyncPrefix).
+// TODO(hpucha): Add unittests that cover sgOID/sgID (and other such helpers).
+// In CL v.io/c/16919, an incorrect change to the implementation of sgID was
+// only caught by integration tests.
 func sgID(oid string) (interfaces.GroupId, error) {
 	parts := util.SplitKeyParts(oid)
 	if len(parts) != 3 {
@@ -545,7 +550,8 @@
 	return util.JoinKeyParts(sgDataKeyPrefix, fmt.Sprintf("%d", gid), version)
 }
 
-// sgDataKeyByOID returns the key used to access a version of the syncgroup data.
+// sgDataKeyByOID returns the key used to access a version of the syncgroup
+// data.
 func sgDataKeyByOID(oid, version string) string {
 	return util.JoinKeyParts(oid, version)
 }
@@ -1111,12 +1117,8 @@
 			stream := tx.Scan(start, limit)
 			for stream.Advance() {
 				k, v := stream.Key(nil), stream.Value(nil)
-				parts := util.SplitKeyParts(string(k))
-				if len(parts) < 2 {
-					vlog.Fatalf("sync: bootstrapSyncgroup: invalid version key %s", string(k))
-
-				}
-				key := []byte(util.JoinKeyParts(parts[1:]...))
+				// Remove version prefix.
+				key := []byte(util.StripFirstKeyPartOrDie(string(k)))
 				if err := watchable.AddSyncSnapshotOp(ctx, tx, key, v); err != nil {
 					return err
 				}
diff --git a/services/syncbase/vsync/syncgroup_test.go b/services/syncbase/vsync/syncgroup_test.go
index 2a56f6e..6975eff 100644
--- a/services/syncbase/vsync/syncgroup_test.go
+++ b/services/syncbase/vsync/syncgroup_test.go
@@ -261,7 +261,7 @@
 	checkBadAddSyncgroup(t, st, sg, "SG with invalid (empty) table name")
 
 	sg = mkSg()
-	sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "a", RowPrefix: "\xff"}}
+	sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "a", RowPrefix: "\xfe"}}
 	checkBadAddSyncgroup(t, st, sg, "SG with invalid row prefix")
 }
 
@@ -448,7 +448,7 @@
 	expMemberInfo := map[string]*memberInfo{
 		"phone": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId1: sg1.Joiners["phone"],
 				},
 			},
@@ -456,7 +456,7 @@
 		},
 		"tablet": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId1: sg1.Joiners["tablet"],
 					sgId2: sg2.Joiners["tablet"],
 				},
@@ -469,7 +469,7 @@
 		},
 		"cloud": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId1: sg1.Joiners["cloud"],
 				},
 			},
@@ -477,7 +477,7 @@
 		},
 		"door": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId2: sg2.Joiners["door"],
 				},
 			},
@@ -485,7 +485,7 @@
 		},
 		"lamp": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId2: sg2.Joiners["lamp"],
 				},
 			},
@@ -529,7 +529,7 @@
 	expMemberInfo = map[string]*memberInfo{
 		"tablet": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId2: sg2.Joiners["tablet"],
 				},
 			},
@@ -537,7 +537,7 @@
 		},
 		"door": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId2: sg2.Joiners["door"],
 				},
 			},
@@ -545,7 +545,7 @@
 		},
 		"lamp": &memberInfo{
 			db2sg: map[string]sgMemberInfo{
-				"mockapp:mockdb": sgMemberInfo{
+				"mockapp\xfemockdb": sgMemberInfo{
 					sgId2: sg2.Joiners["lamp"],
 				},
 			},
diff --git a/services/syncbase/vsync/testdata/local-init-00.log.sync b/services/syncbase/vsync/testdata/local-init-00.log.sync
index c34695b..59fb856 100644
--- a/services/syncbase/vsync/testdata/local-init-00.log.sync
+++ b/services/syncbase/vsync/testdata/local-init-00.log.sync
@@ -1,6 +1,6 @@
 # Create an object locally and update it twice (linked-list).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addl|tb:foo1|1|||$sync:log:data:10:1|0|1|false
-addl|tb:foo1|2|1||$sync:log:data:10:2|0|1|false
-addl|tb:foo1|3|2||$sync:log:data:10:3|0|1|false
+addl|tb\xfefoo1|1|||$sync\xfelog\xfedata\xfe10\xfe1|0|1|false
+addl|tb\xfefoo1|2|1||$sync\xfelog\xfedata\xfe10\xfe2|0|1|false
+addl|tb\xfefoo1|3|2||$sync\xfelog\xfedata\xfe10\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/local-resolve-00.sync b/services/syncbase/vsync/testdata/local-resolve-00.sync
index 383b38f..28e1e9d 100644
--- a/services/syncbase/vsync/testdata/local-resolve-00.sync
+++ b/services/syncbase/vsync/testdata/local-resolve-00.sync
@@ -1,4 +1,4 @@
 # Create an object locally and update it twice (linked-list).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addl|tb:foo1|7|3|6|logrec-06|0|1|false
+addl|tb\xfefoo1|7|3|6|logrec-06|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-00.log.sync b/services/syncbase/vsync/testdata/remote-conf-00.log.sync
index 1d90e2c..261f520 100644
--- a/services/syncbase/vsync/testdata/remote-conf-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-00.log.sync
@@ -3,6 +3,6 @@
 # it from the local sync at v2, then updated separately).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|2||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|5|4||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:3|0|1|false
+addr|tb\xfefoo1|4|2||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-01.log.sync b/services/syncbase/vsync/testdata/remote-conf-01.log.sync
index 4e0d2de..ee1b836 100644
--- a/services/syncbase/vsync/testdata/remote-conf-01.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-01.log.sync
@@ -5,6 +5,6 @@
 # sees 2 graft points: v1-v4 and v2-v5.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|1||$sync:log:data:12:1|0|1|false
-addr|tb:foo1|5|2|4|$sync:log:data:11:1|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:2|0|1|false
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfedata\xfe12\xfe1|0|1|false
+addr|tb\xfefoo1|5|2|4|$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-03.log.sync b/services/syncbase/vsync/testdata/remote-conf-03.log.sync
index d96bf3f..8aed9dc 100644
--- a/services/syncbase/vsync/testdata/remote-conf-03.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-03.log.sync
@@ -1,6 +1,6 @@
 # Create the same object remotely from scratch and update it twice (linked-list).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|5|4||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:3|0|1|false
+addr|tb\xfefoo1|4|||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-link.log.sync b/services/syncbase/vsync/testdata/remote-conf-link.log.sync
index c46d319..d3be9e4 100644
--- a/services/syncbase/vsync/testdata/remote-conf-link.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-link.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the local version.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|4|2||$sync:log:11:2
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|4|2||$sync\xfelog\xfe11\xfe2
diff --git a/services/syncbase/vsync/testdata/remote-init-00.log.sync b/services/syncbase/vsync/testdata/remote-init-00.log.sync
index 6189873..1683d08 100644
--- a/services/syncbase/vsync/testdata/remote-init-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-init-00.log.sync
@@ -1,7 +1,8 @@
 # Create an object remotely and update it twice (linked-list).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
+# TODO(rdaoud): The above comment is incorrect for the 'genvec' line.
 
-addr|tb:foo1|1|||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|2|1||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|3|2||$sync:log:data:11:3|0|1|false
-genvec|tb:foo1|10:0,11:3|tb:bar|11:0
+addr|tb\xfefoo1|1|||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|2|1||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|3|2||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+genvec|tb\xfefoo1|10:0,11:3|tb\xfebar|11:0
diff --git a/services/syncbase/vsync/testdata/remote-noconf-00.log.sync b/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
index 9bdc88c..e54a5ec 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
@@ -2,7 +2,9 @@
 # after it was created locally up to v3 (i.e. assume the remote sync
 # received it from the local sync first, then updated it).
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|3||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|5|4||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:3|0|1|false
-genvec|tb:foo1|10:0,11:3|tb:bar|11:0
+# TODO(rdaoud): The above comment is incorrect for the 'genvec' line.
+
+addr|tb\xfefoo1|4|3||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+genvec|tb\xfefoo1|10:0,11:3|tb\xfebar|11:0
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
index 546dfb0..11e0df5 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the remote version.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|2|4||$sync:log:11:2
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|2|4||$sync\xfelog\xfe11\xfe2
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
index 4e91506..840514c 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
@@ -1,5 +1,5 @@
 # Update an object remotely, detect conflict, and bless the local version.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|4|3||$sync:log:11:2
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|4|3||$sync\xfelog\xfe11\xfe2
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
index 2b75980..cd42033 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
@@ -1,6 +1,6 @@
 # Update an object remotely, detect conflict, and bless the remote version, and continue updating.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|3|4||$sync:log:11:2
-addr|tb:foo1|5|3||$sync:log:11:3|0|1|false
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|3|4||$sync\xfelog\xfe11\xfe2
+addr|tb\xfefoo1|5|3||$sync\xfelog\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
index eff36cd..cbcdd58 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
@@ -1,4 +1,4 @@
 # Resolve the same conflict on two different devices.
 # The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
 
-linkr|tb:foo1|3|4||$sync:log:12:1
+linkr|tb\xfefoo1|3|4||$sync\xfelog\xfe12\xfe1
diff --git a/services/syncbase/vsync/util.go b/services/syncbase/vsync/util.go
index 4e0c523..4842a0c 100644
--- a/services/syncbase/vsync/util.go
+++ b/services/syncbase/vsync/util.go
@@ -7,7 +7,6 @@
 // Sync utility functions
 
 import (
-	"strings"
 	"time"
 
 	"v.io/v23/context"
@@ -98,41 +97,10 @@
 	return util.JoinKeyParts(p.TableName, p.RowPrefix)
 }
 
-// TODO(jlodhia): extractAppKey() method is temporary for conflict resolution.
-// Will be removed once SyncgroupPrefix is refactored into a generic
-// TableRow struct.
-// extractAppKey extracts the app key from the key sent over the wire between
-// two Syncbases. The on-wire key starts with one of the store's reserved
-// prefixes for managed namespaces (e.g. $row, $perms). This function removes
-// that prefix and returns the application component of the key. This is done
-// typically before comparing keys with the SyncGroup prefixes which are defined
-// by the application.
-func extractAppKey(key string) string {
-	parts := splitKeyIntoParts(key, 2)
-	return util.JoinKeyParts(parts[1:]...)
-}
-
-// isRowKey checks if the given key belongs to a data row.
-func isRowKey(key string) bool {
-	return strings.HasPrefix(key, util.RowPrefix)
-}
-
-// makeRowKey takes an app key, whose structure is <table>:<row>, and converts
-// it into store's representation of row key with structure $row:<table>:<row>
-func toRowKey(appKey string) string {
-	return util.JoinKeyParts(util.RowPrefix, appKey)
-}
-
-// Returns the table name and key within the table from the given row key.
-func extractComponentsFromKey(key string) (table string, row string) {
-	parts := splitKeyIntoParts(key, 3)
-	return parts[1], parts[2]
-}
-
-func splitKeyIntoParts(key string, minCount int) []string {
-	parts := util.SplitKeyParts(key)
-	if len(parts) < minCount {
-		vlog.Fatalf("sync: extractKeyParts: invalid entry key %s (expected %d parts)", key, minCount)
-	}
-	return parts
+// toRowKey prepends RowPrefix to what is presumably a "<table>:<row>" string,
+// yielding a storage engine key for a row.
+// TODO(sadovsky): Only used by CR code. Should go away once CR stores table
+// name and row key as separate fields in a "TableAndRow" struct.
+func toRowKey(tableRow string) string {
+	return util.JoinKeyParts(util.RowPrefix, tableRow)
 }
diff --git a/services/syncbase/vsync/watcher.go b/services/syncbase/vsync/watcher.go
index 0d5e5d6..c61ff55 100644
--- a/services/syncbase/vsync/watcher.go
+++ b/services/syncbase/vsync/watcher.go
@@ -422,13 +422,9 @@
 	}
 
 	// The key starts with one of the store's reserved prefixes for managed
-	// namespaced (e.g. $row or $perm).  Remove that prefix before comparing
-	// it with the syncgroup prefixes which are defined by the application.
-	parts := util.SplitKeyParts(key)
-	if len(parts) < 2 {
-		vlog.Fatalf("sync: syncable: %s: invalid entry key %s: %v", appdb, key, logEnt)
-	}
-	key = util.JoinKeyParts(parts[1:]...)
+	// namespaces (e.g. "$row", "$perms"). Remove that prefix before comparing it
+	// with the syncgroup prefixes which are defined by the application.
+	key = util.StripFirstKeyPartOrDie(key)
 
 	for prefix := range watchPrefixes[appdb] {
 		if strings.HasPrefix(key, prefix) {
diff --git a/services/syncbase/vsync/watcher_test.go b/services/syncbase/vsync/watcher_test.go
index b09a9e8..66a6c55 100644
--- a/services/syncbase/vsync/watcher_test.go
+++ b/services/syncbase/vsync/watcher_test.go
@@ -86,9 +86,9 @@
 	}
 
 	expPrefixes := map[string]sgPrefixes{
-		"app1:db1": sgPrefixes{"foo": 2, "bar": 1},
-		"app2:db1": sgPrefixes{"xyz": 1},
-		"app3:db1": sgPrefixes{"haha": 1},
+		"app1\xfedb1": sgPrefixes{"foo": 2, "bar": 1},
+		"app2\xfedb1": sgPrefixes{"xyz": 1},
+		"app3\xfedb1": sgPrefixes{"haha": 1},
 	}
 	if !reflect.DeepEqual(watchPrefixes, expPrefixes) {
 		t.Errorf("invalid watch prefixes: got %v instead of %v", watchPrefixes, expPrefixes)
diff --git a/services/wspr/internal/lib/signature_manager_test.go b/services/wspr/internal/lib/signature_manager_test.go
index 97240ab..9173521 100644
--- a/services/wspr/internal/lib/signature_manager_test.go
+++ b/services/wspr/internal/lib/signature_manager_test.go
@@ -23,7 +23,7 @@
 )
 
 func initRuntime(t *testing.T) (*context.T, clientWithTimesCalled, v23.Shutdown) {
-	ctx, shutdown := test.V23InitAnon()
+	ctx, shutdown := test.V23InitSimple()
 	initialSig := []signature.Interface{
 		{
 			Methods: []signature.Method{
diff --git a/test/doc.go b/test/doc.go
index 206164e..8352f95 100644
--- a/test/doc.go
+++ b/test/doc.go
@@ -28,20 +28,20 @@
 // --v23.tests.shell-on-fail - drop into a debug shell if the test fails.
 //
 // Typical usage is:
-// $ v23 go test . --v23.tests
+// $ jiri go test . --v23.tests
 //
 // Note that, like all flags not recognised by the go testing package, the
 // v23.tests flags must follow the package spec.
 //
 // The sub-directories of this package provide either functionality that
 // can be used within traditional go tests, or support for the v23 integration
-// test framework. The v23 command is able to generate boilerplate code
-// to support these tests. In summary, v23 test generate will generate
+// test framework. The jiri command is able to generate boilerplate code
+// to support these tests. In summary, 'jiri test generate' will generate
 // go files to be checked in that include appropriate TestMain functions,
 // registration calls for modules commands and wrapper functions for v23test
 // tests. More detailed documentation is available via:
 //
-// $ v23 test generate --help
+// $ jiri test generate --help
 //
 // Vanadium tests often need to run subprocesses to provide either common
 // services that they depend (e.g. a mount table) and/or services that are
diff --git a/test/init.go b/test/init.go
index ef625eb..e1812e9 100644
--- a/test/init.go
+++ b/test/init.go
@@ -117,10 +117,11 @@
 	return context.WithLogger(ctx, logger.Global()), cancel
 }
 
-// V23InitEmpty initializes a runtime but with no principal.
-func V23InitAnon() (*context.T, v23.Shutdown) {
+// V23InitSimple is like V23Init, except that it does not setup a
+// mounttable.
+func V23InitSimple() (*context.T, v23.Shutdown) {
 	return initWithParams(initParams{
-		CreatePrincipal:  false,
+		CreatePrincipal:  true,
 		CreateMounttable: false,
 	})
 }