Merge "add builtin VDLROOT support to vdl"
diff --git a/README.md b/README.md
index 4c8c809..21f18d6 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,8 @@
 # Vanadium
 
-This repository contains a reference implementation of the [Vanadium] APIs.
+This repository contains a reference implementation of the Vanadium APIs.
 
 Unlike the APIs in https://github.com/vanadium/go.v23, which promises to
 provide [backward compatibility] this repository makes no such promises.
 
-[Vanadium]: https://v.io
-[backward compatibility]: https://godoc.v.io/pkg/v.io/v23
+[backward compatibility]: https://godoc.org/v.io/v23
diff --git a/envvar.go b/envvar.go
index 0c153db..d6c074a 100644
--- a/envvar.go
+++ b/envvar.go
@@ -4,8 +4,6 @@
 
 // Package ref defines constants used through the Vanadium reference
 // implementation, which is implemented in its subdirectories.
-//
-// For more details about the Vanadium project, please visit https://v.io.
 package ref
 
 import (
@@ -52,6 +50,11 @@
 	// to the url of the OAuth identity provider used by the principal
 	// seekblessings command.
 	EnvOAuthIdentityProvider = "V23_OAUTH_IDENTITY_PROVIDER"
+
+	// RPCTransitionStateVar is a temporary variable that determines how far along we
+	// are in the transition from old to new RPC systems.  It should be removed
+	// when the transition is complete.
+	RPCTransitionStateVar = "V23_RPC_TRANSITION_STATE"
 )
 
 // EnvNamespaceRoots returns the set of namespace roots to be used by the
@@ -89,3 +92,24 @@
 	}
 	return nil
 }
+
+type TransitionState int
+
+const (
+	None TransitionState = iota
+	XClients
+	XServers
+)
+
+func RPCTransitionState() TransitionState {
+	switch ts := os.Getenv(RPCTransitionStateVar); ts {
+	case "xclients":
+		return XClients
+	case "xservers":
+		return XServers
+	case "":
+		return None
+	default:
+		panic("Unknown transition state: " + ts)
+	}
+}
diff --git a/lib/discovery/plugins/mdns/mdns.go b/lib/discovery/plugins/mdns/mdns.go
index 4b489ab..a02c322 100644
--- a/lib/discovery/plugins/mdns/mdns.go
+++ b/lib/discovery/plugins/mdns/mdns.go
@@ -85,8 +85,8 @@
 		return err
 	}
 	stop := func() {
-		p.mdns.RemoveService(serviceName, hostName)
-		p.mdns.RemoveService(v23ServiceName, hostName)
+		p.mdns.RemoveService(serviceName, hostName, 0)
+		p.mdns.RemoveService(v23ServiceName, hostName, 0)
 	}
 	p.adStopper.Add(stop, ctx.Done())
 	return nil
@@ -101,10 +101,10 @@
 	}
 
 	go func() {
+		// Subscribe to the service if not subscribed yet or if we haven't refreshed in a while.
 		p.subscriptionMu.Lock()
 		sub := p.subscription[serviceName]
 		sub.count++
-		// If we haven't refreshed in a while, do it now.
 		if time.Since(sub.lastSubscription) > p.subscriptionRefreshTime {
 			p.mdns.SubscribeToService(serviceName)
 			// Wait a bit to learn about neighborhood.
@@ -114,7 +114,10 @@
 		p.subscription[serviceName] = sub
 		p.subscriptionMu.Unlock()
 
+		// Watch the membership changes.
+		watcher, stopWatcher := p.mdns.ServiceMemberWatch(serviceName)
 		defer func() {
+			stopWatcher()
 			p.subscriptionMu.Lock()
 			sub := p.subscription[serviceName]
 			sub.count--
@@ -127,9 +130,13 @@
 			p.subscriptionMu.Unlock()
 		}()
 
-		// TODO(jhahn): Handle "Lost" case.
-		services := p.mdns.ServiceDiscovery(serviceName)
-		for _, service := range services {
+		for {
+			var service mdns.ServiceInstance
+			select {
+			case service = <-watcher:
+			case <-ctx.Done():
+				return
+			}
 			ad, err := decodeAdvertisement(service)
 			if err != nil {
 				ctx.Error(err)
@@ -185,6 +192,7 @@
 			Attrs:        make(discovery.Attributes),
 		},
 	}
+	// TODO(jhahn): Handle lost service.
 	for _, rr := range service.TxtRRs {
 		for _, txt := range rr.Txt {
 			kv := strings.SplitN(txt, "=", 2)
@@ -214,7 +222,13 @@
 		// is set. Use a default one if not given.
 		host = "v23()"
 	}
-	m, err := mdns.NewMDNS(host, "", "", loopback, false)
+	var v4addr, v6addr string
+	if loopback {
+		// To avoid interference from other mDNS server in unit tests.
+		v4addr = "224.0.0.251:9999"
+		v6addr = "[FF02::FB]:9999"
+	}
+	m, err := mdns.NewMDNS(host, v4addr, v6addr, loopback, false)
 	if err != nil {
 		// The name may not have been unique. Try one more time with a unique
 		// name. NewMDNS will replace the "()" with "(hardware mac address)".
diff --git a/runtime/factories/roaming/.api b/runtime/factories/roaming/.api
index c15af37..7443e1d 100644
--- a/runtime/factories/roaming/.api
+++ b/runtime/factories/roaming/.api
@@ -1,4 +1,4 @@
 pkg roaming, const SettingsStreamDesc ideal-string
 pkg roaming, const SettingsStreamName ideal-string
 pkg roaming, func Init(*context.T) (v23.Runtime, *context.T, v23.Shutdown, error)
-pkg roaming, func NewProxy(*context.T, rpc.ListenSpec, ...string) (func(), naming.Endpoint, error)
+pkg roaming, func NewProxy(*context.T, rpc.ListenSpec, security.Authorizer, ...string) (func(), naming.Endpoint, error)
diff --git a/runtime/factories/static/.api b/runtime/factories/static/.api
index 2d3ebb3..c6a2583 100644
--- a/runtime/factories/static/.api
+++ b/runtime/factories/static/.api
@@ -1,2 +1,2 @@
 pkg static, func Init(*context.T) (v23.Runtime, *context.T, v23.Shutdown, error)
-pkg static, func NewProxy(*context.T, rpc.ListenSpec, ...string) (func(), naming.Endpoint, error)
+pkg static, func NewProxy(*context.T, rpc.ListenSpec, security.Authorizer, ...string) (func(), naming.Endpoint, error)
diff --git a/runtime/internal/flow/conn/conn.go b/runtime/internal/flow/conn/conn.go
index a7991c7..ecbd8a9 100644
--- a/runtime/internal/flow/conn/conn.go
+++ b/runtime/internal/flow/conn/conn.go
@@ -346,3 +346,8 @@
 	c.lastUsedTime = time.Now()
 	c.mu.Unlock()
 }
+
+func (c *Conn) IsEncapsulated() bool {
+	_, ok := c.mp.rw.(*flw)
+	return ok
+}
diff --git a/runtime/internal/flow/conn/conn_test.go b/runtime/internal/flow/conn/conn_test.go
index b5b6eeb..c9ba7c0 100644
--- a/runtime/internal/flow/conn/conn_test.go
+++ b/runtime/internal/flow/conn/conn_test.go
@@ -19,7 +19,7 @@
 	"v.io/x/ref/test/goroutines"
 )
 
-const leakWaitTime = 100 * time.Millisecond
+const leakWaitTime = 250 * time.Millisecond
 
 var randData []byte
 
diff --git a/runtime/internal/flow/conn/flow.go b/runtime/internal/flow/conn/flow.go
index dd6ffc6..3bfd000 100644
--- a/runtime/internal/flow/conn/flow.go
+++ b/runtime/internal/flow/conn/flow.go
@@ -56,7 +56,7 @@
 // Read and ReadMsg should not be called concurrently with themselves
 // or each other.
 func (f *flw) Read(p []byte) (n int, err error) {
-	f.conn.markUsed()
+	f.markUsed()
 	if n, err = f.q.read(f.ctx, p); err != nil {
 		f.close(f.ctx, err)
 	}
@@ -68,7 +68,7 @@
 // Read and ReadMsg should not be called concurrently with themselves
 // or each other.
 func (f *flw) ReadMsg() (buf []byte, err error) {
-	f.conn.markUsed()
+	f.markUsed()
 	// TODO(mattr): Currently we only ever release counters when some flow
 	// reads.  We may need to do it more or less often.  Currently
 	// we'll send counters whenever a new flow is opened.
@@ -86,7 +86,7 @@
 }
 
 func (f *flw) writeMsg(alsoClose bool, parts ...[]byte) (int, error) {
-	f.conn.markUsed()
+	f.markUsed()
 	sent := 0
 	var left []byte
 	err := f.worker.Run(f.ctx, func(tokens int) (int, bool, error) {
@@ -285,3 +285,9 @@
 	f.close(f.ctx, nil)
 	return nil
 }
+
+func (f *flw) markUsed() {
+	if f.id >= reservedFlows {
+		f.conn.markUsed()
+	}
+}
diff --git a/runtime/internal/flow/errors.vdl b/runtime/internal/flow/errors.vdl
new file mode 100644
index 0000000..8019811
--- /dev/null
+++ b/runtime/internal/flow/errors.vdl
@@ -0,0 +1,15 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flow
+
+// These messages are constructed so as to avoid embedding a component/method name
+// and are thus more suitable for inclusion in other verrors.
+// This practice of omitting {1}{2} should be used throughout the flow implementations
+// since all of their errors are intended to be used as arguments to higher level errors.
+// TODO(suharshs,toddw): Allow skipping of {1}{2} in vdl generated errors.
+error (
+  WrongObjectInContext(typ string) {"en":
+  "context passed to method of {typ} object, but that object is not attached to the context."}
+)
diff --git a/runtime/internal/flow/errors.vdl.go b/runtime/internal/flow/errors.vdl.go
new file mode 100644
index 0000000..3f4d3b2
--- /dev/null
+++ b/runtime/internal/flow/errors.vdl.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: errors.vdl
+
+package flow
+
+import (
+	// VDL system imports
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/verror"
+)
+
+var (
+	ErrWrongObjectInContext = verror.Register("v.io/x/ref/runtime/internal/flow.WrongObjectInContext", verror.NoRetry, "{1:}{2:} context passed to method of {3} object, but that object is not attached to the context.")
+)
+
+func init() {
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrWrongObjectInContext.ID), "{1:}{2:} context passed to method of {3} object, but that object is not attached to the context.")
+}
+
+// NewErrWrongObjectInContext returns an error with the ErrWrongObjectInContext ID.
+func NewErrWrongObjectInContext(ctx *context.T, typ string) error {
+	return verror.New(ErrWrongObjectInContext, ctx, typ)
+}
diff --git a/runtime/internal/flow/manager/conncache.go b/runtime/internal/flow/manager/conncache.go
index 92f8348..b3975e2 100644
--- a/runtime/internal/flow/manager/conncache.go
+++ b/runtime/internal/flow/manager/conncache.go
@@ -151,6 +151,10 @@
 			delete(c.ridCache, e.rid)
 			continue
 		}
+		if e.conn.IsEncapsulated() {
+			// Killing a proxied connection doesn't save us any FD resources, just memory.
+			continue
+		}
 		pq = append(pq, e)
 	}
 	sort.Sort(pq)
diff --git a/runtime/internal/flow/manager/errors.vdl b/runtime/internal/flow/manager/errors.vdl
index dbb38ae..af017a7 100644
--- a/runtime/internal/flow/manager/errors.vdl
+++ b/runtime/internal/flow/manager/errors.vdl
@@ -16,4 +16,5 @@
   CacheClosed() {"en":"cache is closed"}
   ConnKilledToFreeResources() {"en": "Connection killed to free resources."}
   InvalidProxyResponse(typ string) {"en": "Invalid proxy response{:typ}"}
+  ManagerDialingSelf() {"en": "manager cannot be used to dial itself"}
 )
diff --git a/runtime/internal/flow/manager/errors.vdl.go b/runtime/internal/flow/manager/errors.vdl.go
index 438e6fa..a437afd 100644
--- a/runtime/internal/flow/manager/errors.vdl.go
+++ b/runtime/internal/flow/manager/errors.vdl.go
@@ -21,6 +21,7 @@
 	ErrCacheClosed               = verror.Register("v.io/x/ref/runtime/internal/flow/manager.CacheClosed", verror.NoRetry, "{1:}{2:} cache is closed")
 	ErrConnKilledToFreeResources = verror.Register("v.io/x/ref/runtime/internal/flow/manager.ConnKilledToFreeResources", verror.NoRetry, "{1:}{2:} Connection killed to free resources.")
 	ErrInvalidProxyResponse      = verror.Register("v.io/x/ref/runtime/internal/flow/manager.InvalidProxyResponse", verror.NoRetry, "{1:}{2:} Invalid proxy response{:3}")
+	ErrManagerDialingSelf        = verror.Register("v.io/x/ref/runtime/internal/flow/manager.ManagerDialingSelf", verror.NoRetry, "{1:}{2:} manager cannot be used to dial itself")
 )
 
 func init() {
@@ -30,6 +31,7 @@
 	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrCacheClosed.ID), "{1:}{2:} cache is closed")
 	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrConnKilledToFreeResources.ID), "{1:}{2:} Connection killed to free resources.")
 	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrInvalidProxyResponse.ID), "{1:}{2:} Invalid proxy response{:3}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrManagerDialingSelf.ID), "{1:}{2:} manager cannot be used to dial itself")
 }
 
 // NewErrUnknownProtocol returns an error with the ErrUnknownProtocol ID.
@@ -61,3 +63,8 @@
 func NewErrInvalidProxyResponse(ctx *context.T, typ string) error {
 	return verror.New(ErrInvalidProxyResponse, ctx, typ)
 }
+
+// NewErrManagerDialingSelf returns an error with the ErrManagerDialingSelf ID.
+func NewErrManagerDialingSelf(ctx *context.T) error {
+	return verror.New(ErrManagerDialingSelf, ctx)
+}
diff --git a/runtime/internal/flow/manager/manager.go b/runtime/internal/flow/manager/manager.go
index f6432bc..60fa43e 100644
--- a/runtime/internal/flow/manager/manager.go
+++ b/runtime/internal/flow/manager/manager.go
@@ -20,12 +20,18 @@
 	"v.io/v23/security"
 	"v.io/v23/verror"
 
+	iflow "v.io/x/ref/runtime/internal/flow"
 	"v.io/x/ref/runtime/internal/flow/conn"
 	"v.io/x/ref/runtime/internal/lib/upcqueue"
 	inaming "v.io/x/ref/runtime/internal/naming"
 	"v.io/x/ref/runtime/internal/rpc/version"
 )
 
+const (
+	reconnectDelay    = 50 * time.Millisecond
+	reapCacheInterval = 5 * time.Minute
+)
+
 type manager struct {
 	rid    naming.RoutingID
 	closed chan struct{}
@@ -34,33 +40,43 @@
 
 	mu              *sync.Mutex
 	listenEndpoints []naming.Endpoint
+	proxyEndpoints  map[string][]naming.Endpoint // keyed by proxy address
 	listeners       []flow.Listener
 	wg              sync.WaitGroup
 }
 
 func New(ctx *context.T, rid naming.RoutingID) flow.Manager {
 	m := &manager{
-		rid:       rid,
-		closed:    make(chan struct{}),
-		q:         upcqueue.New(),
-		cache:     NewConnCache(),
-		mu:        &sync.Mutex{},
-		listeners: []flow.Listener{},
+		rid:            rid,
+		closed:         make(chan struct{}),
+		q:              upcqueue.New(),
+		cache:          NewConnCache(),
+		mu:             &sync.Mutex{},
+		proxyEndpoints: make(map[string][]naming.Endpoint),
+		listeners:      []flow.Listener{},
 	}
 	go func() {
-		select {
-		case <-ctx.Done():
-			m.mu.Lock()
-			listeners := m.listeners
-			m.listeners = nil
-			m.mu.Unlock()
-			for _, ln := range listeners {
-				ln.Close()
+		ticker := time.NewTicker(reapCacheInterval)
+		for {
+			select {
+			case <-ctx.Done():
+				m.mu.Lock()
+				listeners := m.listeners
+				m.listeners = nil
+				m.mu.Unlock()
+				for _, ln := range listeners {
+					ln.Close()
+				}
+				m.cache.Close(ctx)
+				m.q.Close()
+				m.wg.Wait()
+				ticker.Stop()
+				close(m.closed)
+				return
+			case <-ticker.C:
+				// Periodically kill closed connections.
+				m.cache.KillConnections(ctx, 0)
 			}
-			m.cache.Close(ctx)
-			m.q.Close()
-			m.wg.Wait()
-			close(m.closed)
 		}
 	}()
 	return m
@@ -72,28 +88,19 @@
 // The flow.Manager associated with ctx must be the receiver of the method,
 // otherwise an error is returned.
 func (m *manager) Listen(ctx *context.T, protocol, address string) error {
-	var (
-		eps []naming.Endpoint
-		err error
-	)
-	if protocol == inaming.Network {
-		eps, err = m.proxyListen(ctx, address)
-	} else {
-		eps, err = m.listen(ctx, protocol, address)
-	}
-	if err != nil {
+	if err := m.validateContext(ctx); err != nil {
 		return err
 	}
-	m.mu.Lock()
-	m.listenEndpoints = append(m.listenEndpoints, eps...)
-	m.mu.Unlock()
-	return nil
+	if protocol == inaming.Network {
+		return m.proxyListen(ctx, address)
+	}
+	return m.listen(ctx, protocol, address)
 }
 
-func (m *manager) listen(ctx *context.T, protocol, address string) ([]naming.Endpoint, error) {
+func (m *manager) listen(ctx *context.T, protocol, address string) error {
 	ln, err := listen(ctx, protocol, address)
 	if err != nil {
-		return nil, flow.NewErrNetwork(ctx, err)
+		return flow.NewErrNetwork(ctx, err)
 	}
 	local := &inaming.Endpoint{
 		Protocol: protocol,
@@ -102,33 +109,69 @@
 	}
 	m.mu.Lock()
 	if m.listeners == nil {
-		return nil, flow.NewErrBadState(ctx, NewErrManagerClosed(ctx))
+		return flow.NewErrBadState(ctx, NewErrManagerClosed(ctx))
 	}
 	m.listeners = append(m.listeners, ln)
 	m.mu.Unlock()
 	m.wg.Add(1)
 	go m.lnAcceptLoop(ctx, ln, local)
-	return []naming.Endpoint{local}, nil
+	m.mu.Lock()
+	m.listenEndpoints = append(m.listenEndpoints, local)
+	m.mu.Unlock()
+	return nil
 }
 
-func (m *manager) proxyListen(ctx *context.T, address string) ([]naming.Endpoint, error) {
+func (m *manager) proxyListen(ctx *context.T, address string) error {
 	ep, err := inaming.NewEndpoint(address)
 	if err != nil {
-		return nil, flow.NewErrBadArg(ctx, err)
+		return flow.NewErrBadArg(ctx, err)
 	}
-	f, err := m.internalDial(ctx, ep, proxyBlessingsForPeer{}.run, &proxyFlowHandler{ctx: ctx, m: m})
-	if err != nil {
-		return nil, flow.NewErrNetwork(ctx, err)
-	}
-	w, err := message.Append(ctx, &message.ProxyServerRequest{}, nil)
-	if err != nil {
-		return nil, flow.NewErrBadArg(ctx, err)
-	}
-	if _, err := f.WriteMsg(w); err != nil {
-		return nil, flow.NewErrBadArg(ctx, err)
-	}
+	m.wg.Add(1)
+	go m.connectToProxy(ctx, address, ep)
+	return nil
+}
 
-	return m.readProxyResponse(ctx, f)
+func (m *manager) connectToProxy(ctx *context.T, address string, ep naming.Endpoint) {
+	defer m.wg.Done()
+	for delay := reconnectDelay; ; delay *= 2 {
+		time.Sleep(delay - reconnectDelay)
+		select {
+		case <-ctx.Done():
+			return
+		default:
+		}
+		f, err := m.internalDial(ctx, ep, proxyBlessingsForPeer{}.run, &proxyFlowHandler{ctx: ctx, m: m})
+		if err != nil {
+			ctx.Error(err)
+			continue
+		}
+		w, err := message.Append(ctx, &message.ProxyServerRequest{}, nil)
+		if err != nil {
+			ctx.Error(err)
+			continue
+		}
+		if _, err = f.WriteMsg(w); err != nil {
+			ctx.Error(err)
+			continue
+		}
+		eps, err := m.readProxyResponse(ctx, f)
+		if err != nil {
+			ctx.Error(err)
+			continue
+		}
+		m.mu.Lock()
+		m.proxyEndpoints[address] = eps
+		m.mu.Unlock()
+		select {
+		case <-ctx.Done():
+			return
+		case <-f.Closed():
+			m.mu.Lock()
+			delete(m.proxyEndpoints, address)
+			m.mu.Unlock()
+			delay = reconnectDelay
+		}
+	}
 }
 
 func (m *manager) readProxyResponse(ctx *context.T, f flow.Flow) ([]naming.Endpoint, error) {
@@ -176,29 +219,37 @@
 			ctx.Errorf("ln.Accept on localEP %v failed: %v", local, err)
 			return
 		}
+		cached := make(chan struct{})
 		c, err := conn.NewAccepted(
 			ctx,
 			flowConn,
 			local,
 			version.Supported,
-			&flowHandler{q: m.q},
+			&flowHandler{q: m.q, cached: cached},
 		)
 		if err != nil {
+			close(cached)
 			flowConn.Close()
 			ctx.Errorf("failed to accept flow.Conn on localEP %v failed: %v", local, err)
 			continue
 		}
 		if err := m.cache.InsertWithRoutingID(c); err != nil {
-			ctx.VI(2).Infof("failed to cache conn %v: %v", c, err)
+			close(cached)
+			ctx.Errorf("failed to cache conn %v: %v", c, err)
 		}
+		close(cached)
 	}
 }
 
 type flowHandler struct {
-	q *upcqueue.T
+	q      *upcqueue.T
+	cached chan struct{}
 }
 
 func (h *flowHandler) HandleFlow(f flow.Flow) error {
+	if h.cached != nil {
+		<-h.cached
+	}
 	return h.q.Put(f)
 }
 
@@ -236,6 +287,9 @@
 	m.mu.Lock()
 	ret := make([]naming.Endpoint, len(m.listenEndpoints))
 	copy(ret, m.listenEndpoints)
+	for _, peps := range m.proxyEndpoints {
+		ret = append(ret, peps...)
+	}
 	m.mu.Unlock()
 	if len(ret) == 0 {
 		ret = append(ret, &inaming.Endpoint{RID: m.rid})
@@ -259,7 +313,9 @@
 // The flow.Manager associated with ctx must be the receiver of the method,
 // otherwise an error is returned.
 func (m *manager) Accept(ctx *context.T) (flow.Flow, error) {
-	// TODO(suharshs): Ensure that m is attached to ctx.
+	if err := m.validateContext(ctx); err != nil {
+		return nil, err
+	}
 	item, err := m.q.Get(ctx.Done())
 	switch {
 	case err == upcqueue.ErrQueueIsClosed:
@@ -280,6 +336,9 @@
 // The flow.Manager associated with ctx must be the receiver of the method,
 // otherwise an error is returned.
 func (m *manager) Dial(ctx *context.T, remote naming.Endpoint, fn flow.BlessingsForPeer) (flow.Flow, error) {
+	if err := m.validateContext(ctx); err != nil {
+		return nil, err
+	}
 	var fh conn.FlowHandler
 	if m.rid != naming.NullRoutingID {
 		fh = &flowHandler{q: m.q}
@@ -288,6 +347,12 @@
 }
 
 func (m *manager) internalDial(ctx *context.T, remote naming.Endpoint, fn flow.BlessingsForPeer, fh conn.FlowHandler) (flow.Flow, error) {
+	// Disallow making connections to ourselves.
+	// TODO(suharshs): Figure out the right thing to do here. We could create a "localflow"
+	// that bypasses auth and is added to the accept queue immediately.
+	if remote.RoutingID() == m.rid {
+		return nil, flow.NewErrBadArg(ctx, NewErrManagerDialingSelf(ctx))
+	}
 	// Look up the connection based on RoutingID first.
 	c, err := m.cache.FindWithRoutingID(remote.RoutingID())
 	if err != nil {
@@ -334,6 +399,7 @@
 			fh,
 		)
 		if err != nil {
+			flowConn.Close()
 			if verror.ErrorID(err) == message.ErrWrongProtocol.ID {
 				return nil, err
 			}
@@ -350,16 +416,17 @@
 
 	// If we are dialing out to a Proxy, we need to dial a conn on this flow, and
 	// return a flow on that corresponding conn.
-	if remote.RoutingID() != c.RemoteEndpoint().RoutingID() {
+	if proxyConn := c; remote.RoutingID() != proxyConn.RemoteEndpoint().RoutingID() {
 		c, err = conn.NewDialed(
 			ctx,
 			f,
-			c.LocalEndpoint(),
+			proxyConn.LocalEndpoint(),
 			remote,
 			version.Supported,
 			fh,
 		)
 		if err != nil {
+			proxyConn.Close(ctx, err)
 			if verror.ErrorID(err) == message.ErrWrongProtocol.ID {
 				return nil, err
 			}
@@ -370,6 +437,7 @@
 		}
 		f, err = c.Dial(ctx, fn)
 		if err != nil {
+			proxyConn.Close(ctx, err)
 			return nil, flow.NewErrDialFailed(ctx, err)
 		}
 	}
@@ -388,6 +456,13 @@
 	return m.closed
 }
 
+func (m *manager) validateContext(ctx *context.T) error {
+	if v23.ExperimentalGetFlowManager(ctx) != m {
+		return flow.NewErrBadArg(ctx, iflow.NewErrWrongObjectInContext(ctx, "manager"))
+	}
+	return nil
+}
+
 func dial(ctx *context.T, p flow.Protocol, protocol, address string) (flow.Conn, error) {
 	if p != nil {
 		var timeout time.Duration
diff --git a/runtime/internal/flow/manager/manager_test.go b/runtime/internal/flow/manager/manager_test.go
index d0a9a41..02a96d3 100644
--- a/runtime/internal/flow/manager/manager_test.go
+++ b/runtime/internal/flow/manager/manager_test.go
@@ -15,7 +15,7 @@
 	"v.io/v23/flow"
 	"v.io/v23/naming"
 
-	_ "v.io/x/ref/runtime/factories/fake"
+	"v.io/x/ref/runtime/factories/fake"
 	"v.io/x/ref/runtime/internal/flow/conn"
 	"v.io/x/ref/runtime/internal/flow/flowtest"
 	"v.io/x/ref/test"
@@ -26,107 +26,127 @@
 	test.Init()
 }
 
-const leakWaitTime = 100 * time.Millisecond
+const leakWaitTime = 250 * time.Millisecond
 
 func TestDirectConnection(t *testing.T) {
 	defer goroutines.NoLeaks(t, leakWaitTime)()
 	ctx, shutdown := v23.Init()
-	defer shutdown()
 
-	rid := naming.FixedRoutingID(0x5555)
-	m := New(ctx, rid)
-
-	if err := m.Listen(ctx, "tcp", "127.0.0.1:0"); err != nil {
+	am := New(ctx, naming.FixedRoutingID(0x5555))
+	actx := fake.SetFlowManager(ctx, am)
+	if err := am.Listen(actx, "tcp", "127.0.0.1:0"); err != nil {
 		t.Fatal(err)
 	}
+	dm := New(ctx, naming.FixedRoutingID(0x1111))
+	dctx := fake.SetFlowManager(ctx, dm)
 
-	testFlows(t, ctx, m, m, flowtest.BlessingsForPeer)
+	testFlows(t, dctx, actx, flowtest.BlessingsForPeer)
+
+	shutdown()
+	<-am.Closed()
+	<-dm.Closed()
 }
 
 func TestDialCachedConn(t *testing.T) {
 	defer goroutines.NoLeaks(t, leakWaitTime)()
 	ctx, shutdown := v23.Init()
-	defer shutdown()
 
 	am := New(ctx, naming.FixedRoutingID(0x5555))
-	if err := am.Listen(ctx, "tcp", "127.0.0.1:0"); err != nil {
+	actx := fake.SetFlowManager(ctx, am)
+	if err := am.Listen(actx, "tcp", "127.0.0.1:0"); err != nil {
 		t.Fatal(err)
 	}
 
 	dm := New(ctx, naming.FixedRoutingID(0x1111))
+	dctx := fake.SetFlowManager(ctx, dm)
 	// At first the cache should be empty.
 	if got, want := len(dm.(*manager).cache.addrCache), 0; got != want {
 		t.Fatalf("got cache size %v, want %v", got, want)
 	}
 	// After dialing a connection the cache should hold one connection.
-	testFlows(t, ctx, dm, am, flowtest.BlessingsForPeer)
+	testFlows(t, dctx, actx, flowtest.BlessingsForPeer)
 	if got, want := len(dm.(*manager).cache.addrCache), 1; got != want {
 		t.Fatalf("got cache size %v, want %v", got, want)
 	}
+	old := dm.(*manager).cache.ridCache[am.RoutingID()]
 	// After dialing another connection the cache should still hold one connection
 	// because the connections should be reused.
-	testFlows(t, ctx, dm, am, flowtest.BlessingsForPeer)
+	testFlows(t, dctx, actx, flowtest.BlessingsForPeer)
 	if got, want := len(dm.(*manager).cache.addrCache), 1; got != want {
-		t.Fatalf("got cache size %v, want %v", got, want)
+		t.Errorf("got cache size %v, want %v", got, want)
 	}
+	if c := dm.(*manager).cache.ridCache[am.RoutingID()]; c != old {
+		t.Errorf("got %v want %v", c, old)
+	}
+
+	shutdown()
+	<-am.Closed()
+	<-dm.Closed()
 }
 
 func TestBidirectionalListeningEndpoint(t *testing.T) {
 	defer goroutines.NoLeaks(t, leakWaitTime)()
 	ctx, shutdown := v23.Init()
-	defer shutdown()
 
 	am := New(ctx, naming.FixedRoutingID(0x5555))
-	if err := am.Listen(ctx, "tcp", "127.0.0.1:0"); err != nil {
+	actx := fake.SetFlowManager(ctx, am)
+	if err := am.Listen(actx, "tcp", "127.0.0.1:0"); err != nil {
 		t.Fatal(err)
 	}
-	eps := am.ListeningEndpoints()
-	if len(eps) == 0 {
-		t.Fatalf("no endpoints listened on")
-	}
+
 	dm := New(ctx, naming.FixedRoutingID(0x1111))
-	testFlows(t, ctx, dm, am, flowtest.BlessingsForPeer)
+	dctx := fake.SetFlowManager(ctx, dm)
+	testFlows(t, dctx, actx, flowtest.BlessingsForPeer)
 	// Now am should be able to make a flow to dm even though dm is not listening.
-	testFlows(t, ctx, am, dm, flowtest.BlessingsForPeer)
+	testFlows(t, actx, dctx, flowtest.BlessingsForPeer)
+
+	shutdown()
+	<-am.Closed()
+	<-dm.Closed()
 }
 
 func TestNullClientBlessings(t *testing.T) {
+	defer goroutines.NoLeaks(t, leakWaitTime)()
 	ctx, shutdown := v23.Init()
-	defer shutdown()
 
 	am := New(ctx, naming.FixedRoutingID(0x5555))
-	if err := am.Listen(ctx, "tcp", "127.0.0.1:0"); err != nil {
+	actx := fake.SetFlowManager(ctx, am)
+	if err := am.Listen(actx, "tcp", "127.0.0.1:0"); err != nil {
 		t.Fatal(err)
 	}
-	dm := New(ctx, naming.NullRoutingID)
-	_, af := testFlows(t, ctx, dm, am, flowtest.BlessingsForPeer)
+	nulldm := New(ctx, naming.NullRoutingID)
+	nctx := fake.SetFlowManager(ctx, nulldm)
+	_, af := testFlows(t, nctx, actx, flowtest.BlessingsForPeer)
 	// Ensure that the remote blessings of the underlying conn of the accepted flow are zero.
 	if rBlessings := af.Conn().(*conn.Conn).RemoteBlessings(); !rBlessings.IsZero() {
 		t.Errorf("got %v, want zero-value blessings", rBlessings)
 	}
-	dm = New(ctx, naming.FixedRoutingID(0x1111))
-	_, af = testFlows(t, ctx, dm, am, flowtest.BlessingsForPeer)
+	dm := New(ctx, naming.FixedRoutingID(0x1111))
+	dctx := fake.SetFlowManager(ctx, dm)
+	_, af = testFlows(t, dctx, actx, flowtest.BlessingsForPeer)
 	// Ensure that the remote blessings of the underlying conn of the accepted flow are
 	// non-zero if we did specify a RoutingID.
 	if rBlessings := af.Conn().(*conn.Conn).RemoteBlessings(); rBlessings.IsZero() {
 		t.Errorf("got %v, want non-zero blessings", rBlessings)
 	}
+
+	shutdown()
+	<-am.Closed()
+	<-dm.Closed()
+	<-nulldm.Closed()
 }
 
-func testFlows(t *testing.T, ctx *context.T, dm, am flow.Manager, bFn flow.BlessingsForPeer) (df, af flow.Flow) {
-	eps := am.ListeningEndpoints()
-	if len(eps) == 0 {
-		t.Fatalf("no endpoints listened on")
-	}
-	ep := eps[0]
+func testFlows(t *testing.T, dctx, actx *context.T, bFn flow.BlessingsForPeer) (df, af flow.Flow) {
+	am := v23.ExperimentalGetFlowManager(actx)
+	ep := am.ListeningEndpoints()[0]
 	var err error
-	df, err = dm.Dial(ctx, ep, bFn)
+	df, err = v23.ExperimentalGetFlowManager(dctx).Dial(dctx, ep, bFn)
 	if err != nil {
 		t.Fatal(err)
 	}
 	want := "do you read me?"
 	writeLine(df, want)
-	af, err = am.Accept(ctx)
+	af, err = am.Accept(actx)
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/runtime/internal/naming/namespace/namespace.go b/runtime/internal/naming/namespace/namespace.go
index 1a9f109..eaa20c4 100644
--- a/runtime/internal/naming/namespace/namespace.go
+++ b/runtime/internal/naming/namespace/namespace.go
@@ -159,9 +159,9 @@
 	case verror.ErrBadArg.ID:
 		// This should cover "rpc: wrong number of in-args".
 		return true
-	case verror.ErrNoExist.ID, verror.ErrUnknownMethod.ID, verror.ErrUnknownSuffix.ID:
+	case verror.ErrNoExist.ID, verror.ErrUnknownMethod.ID, verror.ErrUnknownSuffix.ID, errNoServers.ID:
 		// This should cover "rpc: unknown method", "rpc: dispatcher not
-		// found", and dispatcher Lookup not found errors.
+		// found", dispatcher Lookup not found, and "No servers found to resolve query "errors.
 		return true
 	case verror.ErrBadProtocol.ID:
 		// This covers "rpc: response decoding failed: EOF".
diff --git a/runtime/internal/rpc/benchmark/simple/main.go b/runtime/internal/rpc/benchmark/simple/main.go
index c904a7d..fb1d52c 100644
--- a/runtime/internal/rpc/benchmark/simple/main.go
+++ b/runtime/internal/rpc/benchmark/simple/main.go
@@ -14,13 +14,13 @@
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/naming"
+	"v.io/x/ref"
 	"v.io/x/ref/lib/security/securityflag"
 	_ "v.io/x/ref/runtime/factories/static"
 	"v.io/x/ref/runtime/internal/flow/flowtest"
 	fmanager "v.io/x/ref/runtime/internal/flow/manager"
 	"v.io/x/ref/runtime/internal/rpc/benchmark/internal"
 	"v.io/x/ref/runtime/internal/rpc/stream/manager"
-	"v.io/x/ref/runtime/internal/rt"
 	"v.io/x/ref/test"
 	"v.io/x/ref/test/benchmark"
 	"v.io/x/ref/test/testutil"
@@ -55,7 +55,7 @@
 	b.StopTimer()
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
-		if rt.TransitionState >= rt.XServers {
+		if ref.RPCTransitionState() >= ref.XServers {
 			m := fmanager.New(nctx, naming.FixedRoutingID(0xc))
 			b.StartTimer()
 			_, err := m.Dial(nctx, serverEP, flowtest.BlessingsForPeer)
diff --git a/runtime/internal/rpc/cancel_test.go b/runtime/internal/rpc/cancel_test.go
deleted file mode 100644
index 9a11c07..0000000
--- a/runtime/internal/rpc/cancel_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rpc
-
-import (
-	"testing"
-
-	"v.io/v23"
-	"v.io/v23/context"
-	"v.io/v23/namespace"
-	"v.io/v23/naming"
-	"v.io/v23/rpc"
-	"v.io/v23/security"
-	"v.io/v23/verror"
-
-	"v.io/x/ref/runtime/internal/rpc/stream"
-	"v.io/x/ref/runtime/internal/rpc/stream/manager"
-	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
-)
-
-type canceld struct {
-	sm       stream.Manager
-	ns       namespace.T
-	name     string
-	child    string
-	started  chan struct{}
-	canceled chan struct{}
-	stop     func() error
-}
-
-func (c *canceld) Run(ctx *context.T, _ rpc.StreamServerCall) error {
-	close(c.started)
-
-	client, err := InternalNewClient(c.sm, c.ns)
-	if err != nil {
-		ctx.Error(err)
-		return err
-	}
-
-	ctx.Infof("Run: %s", c.child)
-	if c.child != "" {
-		if _, err = client.StartCall(ctx, c.child, "Run", []interface{}{}); err != nil {
-			ctx.Error(err)
-			return err
-		}
-	}
-
-	<-ctx.Done()
-	close(c.canceled)
-	return nil
-}
-
-func makeCanceld(ctx *context.T, ns namespace.T, name, child string) (*canceld, error) {
-	sm := manager.InternalNew(ctx, naming.FixedRoutingID(0x111111111))
-	s, err := testInternalNewServer(ctx, sm, ns)
-	if err != nil {
-		return nil, err
-	}
-
-	if _, err := s.Listen(listenSpec); err != nil {
-		return nil, err
-	}
-
-	c := &canceld{
-		sm:       sm,
-		ns:       ns,
-		name:     name,
-		child:    child,
-		started:  make(chan struct{}, 0),
-		canceled: make(chan struct{}, 0),
-		stop:     s.Stop,
-	}
-
-	if err := s.Serve(name, c, security.AllowEveryone()); err != nil {
-		return nil, err
-	}
-	ctx.Infof("Serving: %q", name)
-	return c, nil
-}
-
-// TestCancellationPropagation tests that cancellation propogates along an
-// RPC call chain without user intervention.
-func TestCancellationPropagation(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		sm               = manager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-		ns               = tnaming.NewSimpleNamespace()
-		pclient, pserver = newClientServerPrincipals()
-		serverCtx, _     = v23.WithPrincipal(ctx, pserver)
-		clientCtx, _     = v23.WithPrincipal(ctx, pclient)
-	)
-	client, err := InternalNewClient(sm, ns)
-	if err != nil {
-		t.Error(err)
-	}
-
-	c1, err := makeCanceld(serverCtx, ns, "c1", "c2")
-	if err != nil {
-		t.Fatal("Can't start server:", err, verror.DebugString(err))
-	}
-	defer c1.stop()
-	c2, err := makeCanceld(serverCtx, ns, "c2", "")
-	if err != nil {
-		t.Fatal("Can't start server:", err)
-	}
-	defer c2.stop()
-
-	clientCtx, cancel := context.WithCancel(clientCtx)
-	_, err = client.StartCall(clientCtx, "c1", "Run", []interface{}{})
-	if err != nil {
-		t.Fatal("can't call: ", err)
-	}
-
-	<-c1.started
-	<-c2.started
-
-	ctx.Info("cancelling initial call")
-	cancel()
-
-	ctx.Info("waiting for children to be canceled")
-	<-c1.canceled
-	<-c2.canceled
-}
diff --git a/runtime/internal/rpc/client.go b/runtime/internal/rpc/client.go
index 64312d0..a7f427f 100644
--- a/runtime/internal/rpc/client.go
+++ b/runtime/internal/rpc/client.go
@@ -93,7 +93,7 @@
 
 var _ rpc.Client = (*client)(nil)
 
-func InternalNewClient(streamMgr stream.Manager, ns namespace.T, opts ...rpc.ClientOpt) (rpc.Client, error) {
+func DeprecatedNewClient(streamMgr stream.Manager, ns namespace.T, opts ...rpc.ClientOpt) (rpc.Client, error) {
 	c := &client{
 		streamMgr: streamMgr,
 		ns:        ns,
diff --git a/runtime/internal/rpc/debug_test.go b/runtime/internal/rpc/debug_test.go
deleted file mode 100644
index 24873ad..0000000
--- a/runtime/internal/rpc/debug_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rpc
-
-import (
-	"io"
-	"reflect"
-	"sort"
-	"testing"
-
-	"v.io/v23"
-	"v.io/v23/context"
-	"v.io/v23/naming"
-	"v.io/v23/options"
-	"v.io/v23/rpc"
-
-	"v.io/x/ref/lib/stats"
-	"v.io/x/ref/runtime/internal/rpc/stream/manager"
-	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
-	"v.io/x/ref/services/debug/debuglib"
-	"v.io/x/ref/test/testutil"
-)
-
-func TestDebugServer(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	// Setup the client and server principals, with the client willing to share its
-	// blessing with the server.
-	var (
-		pclient = testutil.NewPrincipal("client")
-		pserver = testutil.NewPrincipal("server")
-		bclient = bless(pserver, pclient, "client") // server/client blessing.
-		sctx, _ = v23.WithPrincipal(ctx, pserver)
-		cctx, _ = v23.WithPrincipal(ctx, pclient)
-	)
-	pclient.AddToRoots(bclient)                    // Client recognizes "server" as a root of blessings.
-	pclient.BlessingStore().Set(bclient, "server") // Client presents bclient to server
-
-	debugDisp := debuglib.NewDispatcher(nil)
-
-	sm := manager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-
-	server, err := testInternalNewServer(sctx, sm, ns, ReservedNameDispatcher{debugDisp})
-	if err != nil {
-		t.Fatalf("InternalNewServer failed: %v", err)
-	}
-	defer server.Stop()
-	eps, err := server.Listen(listenSpec)
-	if err != nil {
-		t.Fatalf("server.Listen failed: %v", err)
-	}
-	if err := server.Serve("", &testObject{}, nil); err != nil {
-		t.Fatalf("server.Serve failed: %v", err)
-	}
-
-	client, err := InternalNewClient(sm, ns)
-	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
-	}
-	defer client.Close()
-	ep := eps[0]
-	// Call the Foo method on ""
-	{
-		var value string
-		if err := client.Call(cctx, ep.Name(), "Foo", nil, []interface{}{&value}); err != nil {
-			t.Fatalf("client.Call failed: %v", err)
-		}
-		if want := "BAR"; value != want {
-			t.Errorf("unexpected value: Got %v, want %v", value, want)
-		}
-	}
-	// Call Value on __debug/stats/testing/foo
-	{
-		foo := stats.NewString("testing/foo")
-		foo.Set("The quick brown fox jumps over the lazy dog")
-		addr := naming.JoinAddressName(ep.String(), "__debug/stats/testing/foo")
-		var value string
-		if err := client.Call(cctx, addr, "Value", nil, []interface{}{&value}, options.Preresolved{}); err != nil {
-			t.Fatalf("client.Call failed: %v", err)
-		}
-		if want := foo.Value(); value != want {
-			t.Errorf("unexpected result: Got %v, want %v", value, want)
-		}
-	}
-
-	// Call Glob
-	testcases := []struct {
-		name, pattern string
-		expected      []string
-	}{
-		{"", "*", []string{}},
-		{"", "__*", []string{"__debug"}},
-		{"", "__*/*", []string{"__debug/logs", "__debug/pprof", "__debug/stats", "__debug/vtrace"}},
-		{"__debug", "*", []string{"logs", "pprof", "stats", "vtrace"}},
-	}
-	for _, tc := range testcases {
-		addr := naming.JoinAddressName(ep.String(), tc.name)
-		call, err := client.StartCall(cctx, addr, rpc.GlobMethod, []interface{}{tc.pattern}, options.Preresolved{})
-		if err != nil {
-			t.Fatalf("client.StartCall failed for %q: %v", tc.name, err)
-		}
-		results := []string{}
-		for {
-			var gr naming.GlobReply
-			if err := call.Recv(&gr); err != nil {
-				if err != io.EOF {
-					t.Fatalf("Recv failed for %q: %v. Results received thus far: %q", tc.name, err, results)
-				}
-				break
-			}
-			switch v := gr.(type) {
-			case naming.GlobReplyEntry:
-				results = append(results, v.Value.Name)
-			}
-		}
-		if err := call.Finish(); err != nil {
-			t.Fatalf("call.Finish failed for %q: %v", tc.name, err)
-		}
-		sort.Strings(results)
-		if !reflect.DeepEqual(tc.expected, results) {
-			t.Errorf("unexpected results for %q. Got %v, want %v", tc.name, results, tc.expected)
-		}
-	}
-}
-
-type testObject struct {
-}
-
-func (o testObject) Foo(*context.T, rpc.ServerCall) (string, error) {
-	return "BAR", nil
-}
diff --git a/runtime/internal/rpc/full_test.go b/runtime/internal/rpc/full_test.go
index cb24d94..4d06e25 100644
--- a/runtime/internal/rpc/full_test.go
+++ b/runtime/internal/rpc/full_test.go
@@ -5,11 +5,8 @@
 package rpc
 
 import (
-	"encoding/hex"
-	"errors"
 	"fmt"
 	"io"
-	"net"
 	"path/filepath"
 	"reflect"
 	"runtime"
@@ -19,8 +16,6 @@
 	"testing"
 	"time"
 
-	"v.io/x/lib/netstate"
-
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/i18n"
@@ -34,18 +29,14 @@
 	"v.io/v23/vdl"
 	"v.io/v23/verror"
 	"v.io/v23/vtrace"
-
 	"v.io/x/ref/lib/pubsub"
 	"v.io/x/ref/lib/stats"
-	"v.io/x/ref/runtime/internal/lib/publisher"
 	"v.io/x/ref/runtime/internal/lib/websocket"
-	inaming "v.io/x/ref/runtime/internal/naming"
 	_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
 	_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
 	_ "v.io/x/ref/runtime/internal/rpc/protocols/wsh"
 	"v.io/x/ref/runtime/internal/rpc/stream"
 	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
-	"v.io/x/ref/runtime/internal/rpc/stream/vc"
 	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
 	"v.io/x/ref/test/testutil"
 )
@@ -79,11 +70,11 @@
 }
 
 func testInternalNewServerWithPubsub(ctx *context.T, streamMgr stream.Manager, ns namespace.T, settingsPublisher *pubsub.Publisher, settingsStreamName string, opts ...rpc.ServerOpt) (DeprecatedServer, error) {
-	client, err := InternalNewClient(streamMgr, ns)
+	client, err := DeprecatedNewClient(streamMgr, ns)
 	if err != nil {
 		return nil, err
 	}
-	return InternalNewServer(ctx, streamMgr, ns, settingsPublisher, settingsStreamName, client, opts...)
+	return DeprecatedNewServer(ctx, streamMgr, ns, settingsPublisher, settingsStreamName, client, opts...)
 }
 
 func testInternalNewServer(ctx *context.T, streamMgr stream.Manager, ns namespace.T, opts ...rpc.ServerOpt) (DeprecatedServer, error) {
@@ -376,8 +367,8 @@
 		b.ep, b.server = startServerWS(t, ctx, server, b.sm, b.ns, b.name, testServerDisp{ts}, shouldUseWebsocket)
 	}
 	var err error
-	if b.client, err = InternalNewClient(b.sm, b.ns); err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
+	if b.client, err = DeprecatedNewClient(b.sm, b.ns); err != nil {
+		t.Fatalf("DeprecatedNewClient failed: %v", err)
 	}
 	return
 }
@@ -411,268 +402,6 @@
 	return sm
 }
 
-func TestMultipleCallsToServeAndName(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	ns := tnaming.NewSimpleNamespace()
-	sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
-	server, err := testInternalNewServer(sctx, sm, ns)
-	if err != nil {
-		t.Errorf("InternalNewServer failed: %v", err)
-	}
-	_, err = server.Listen(listenSpec)
-	if err != nil {
-		t.Errorf("server.Listen failed: %v", err)
-	}
-
-	disp := &testServerDisp{&testServer{}}
-	if err := server.ServeDispatcher("mountpoint/server", disp); err != nil {
-		t.Errorf("server.ServeDispatcher failed: %v", err)
-	}
-
-	n1 := "mountpoint/server"
-	n2 := "should_appear_in_mt/server"
-	n3 := "should_appear_in_mt/server"
-	n4 := "should_not_appear_in_mt/server"
-
-	verifyMount(t, ctx, ns, n1)
-
-	if server.ServeDispatcher(n2, disp) == nil {
-		t.Errorf("server.ServeDispatcher should have failed")
-	}
-
-	if err := server.Serve(n2, &testServer{}, nil); err == nil {
-		t.Errorf("server.Serve should have failed")
-	}
-
-	if err := server.AddName(n3); err != nil {
-		t.Errorf("server.AddName failed: %v", err)
-	}
-
-	if err := server.AddName(n3); err != nil {
-		t.Errorf("server.AddName failed: %v", err)
-	}
-	verifyMount(t, ctx, ns, n2)
-	verifyMount(t, ctx, ns, n3)
-
-	server.RemoveName(n1)
-	verifyMountMissing(t, ctx, ns, n1)
-
-	server.RemoveName("some randome name")
-
-	if err := server.ServeDispatcher(n4, &testServerDisp{&testServer{}}); err == nil {
-		t.Errorf("server.ServeDispatcher should have failed")
-	}
-	verifyMountMissing(t, ctx, ns, n4)
-
-	if err := server.Stop(); err != nil {
-		t.Errorf("server.Stop failed: %v", err)
-	}
-
-	verifyMountMissing(t, ctx, ns, n1)
-	verifyMountMissing(t, ctx, ns, n2)
-	verifyMountMissing(t, ctx, ns, n3)
-}
-
-func TestRPCServerAuthorization(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-
-	const (
-		publicKeyErr        = "not matched by server key"
-		missingDischargeErr = "missing discharge"
-		expiryErr           = "is after expiry"
-		allowedErr          = "do not match any allowed server patterns"
-	)
-	type O []rpc.CallOpt // shorthand
-	var (
-		pprovider, pclient, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal(), testutil.NewPrincipal()
-		pdischarger                 = pprovider
-		now                         = time.Now()
-		noErrID                     verror.IDAction
-
-		// Third-party caveats on blessings presented by server.
-		cavTPValid   = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(now.Add(24*time.Hour))))
-		cavTPExpired = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(now.Add(-1*time.Second))))
-
-		// Server blessings.
-		bServer          = bless(pprovider, pserver, "server")
-		bServerExpired   = bless(pprovider, pserver, "expiredserver", mkCaveat(security.NewExpiryCaveat(time.Now().Add(-1*time.Second))))
-		bServerTPValid   = bless(pprovider, pserver, "serverWithTPCaveats", cavTPValid)
-		bServerTPExpired = bless(pprovider, pserver, "serverWithExpiredTPCaveats", cavTPExpired)
-		bOther           = bless(pprovider, pserver, "other")
-		bTwoBlessings, _ = security.UnionOfBlessings(bServer, bOther)
-
-		mgr   = imanager.InternalNew(ctx, naming.FixedRoutingID(0x1111111))
-		ns    = tnaming.NewSimpleNamespace()
-		tests = []struct {
-			server security.Blessings // blessings presented by the server to the client.
-			name   string             // name provided by the client to StartCall
-			opts   O                  // options provided to StartCall.
-			errID  verror.IDAction
-			err    string
-		}{
-			// Client accepts talking to the server only if the
-			// server presents valid blessings (and discharges)
-			// consistent with the ones published in the endpoint.
-			{bServer, "mountpoint/server", nil, noErrID, ""},
-			{bServerTPValid, "mountpoint/server", nil, noErrID, ""},
-
-			// Client will not talk to a server that presents
-			// expired blessings or is missing discharges.
-			{bServerExpired, "mountpoint/server", nil, verror.ErrNotTrusted, expiryErr},
-			{bServerTPExpired, "mountpoint/server", nil, verror.ErrNotTrusted, missingDischargeErr},
-
-			// Testing the AllowedServersPolicy option.
-			{bServer, "mountpoint/server", O{options.AllowedServersPolicy{"otherroot"}}, verror.ErrNotTrusted, allowedErr},
-			{bServer, "mountpoint/server", O{options.AllowedServersPolicy{"root"}}, noErrID, ""},
-			{bTwoBlessings, "mountpoint/server", O{options.AllowedServersPolicy{"root/other"}}, noErrID, ""},
-
-			// Test the ServerPublicKey option.
-			{bOther, "mountpoint/server", O{options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{
-				PublicKey: bOther.PublicKey(),
-			}}, noErrID, ""},
-			{bOther, "mountpoint/server", O{options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{
-				PublicKey: testutil.NewPrincipal("irrelevant").PublicKey(),
-			}}, verror.ErrNotTrusted, publicKeyErr},
-
-			// Test the "paranoid" names, where the pattern is provided in the name.
-			{bServer, "__(root/server)/mountpoint/server", nil, noErrID, ""},
-			{bServer, "__(root/other)/mountpoint/server", nil, verror.ErrNotTrusted, allowedErr},
-			{bTwoBlessings, "__(root/server)/mountpoint/server", O{options.AllowedServersPolicy{"root/other"}}, noErrID, ""},
-		}
-	)
-	// Start the discharge server.
-	_, dischargeServer := startServer(t, ctx, pdischarger, mgr, ns, "mountpoint/dischargeserver", testutil.LeafDispatcher(&dischargeServer{}, security.AllowEveryone()))
-	defer stopServer(t, ctx, dischargeServer, ns, "mountpoint/dischargeserver")
-
-	// Make the client and server principals trust root certificates from
-	// pprovider
-	pclient.AddToRoots(pprovider.BlessingStore().Default())
-	pserver.AddToRoots(pprovider.BlessingStore().Default())
-	// Set a blessing that the client is willing to share with servers
-	// (that are blessed by pprovider).
-	pclient.BlessingStore().Set(bless(pprovider, pclient, "client"), "root")
-
-	clientCtx, _ := v23.WithPrincipal(ctx, pclient)
-	client, err := InternalNewClient(mgr, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-
-	var server rpc.Server
-	stop := func() {
-		if server != nil {
-			stopServer(t, ctx, server, ns, "mountpoint/server")
-		}
-	}
-	defer stop()
-	for i, test := range tests {
-		stop() // Stop any server started in the previous test.
-		name := fmt.Sprintf("(#%d: Name:%q, Server:%q, opts:%v)", i, test.name, test.server, test.opts)
-		if err := pserver.BlessingStore().SetDefault(test.server); err != nil {
-			t.Fatalf("SetDefault failed on server's BlessingStore: %v", err)
-		}
-		if _, err := pserver.BlessingStore().Set(test.server, "root"); err != nil {
-			t.Fatalf("Set failed on server's BlessingStore: %v", err)
-		}
-		_, server = startServer(t, ctx, pserver, mgr, ns, "mountpoint/server", testServerDisp{&testServer{}})
-		clientCtx, cancel := context.WithCancel(clientCtx)
-		call, err := client.StartCall(clientCtx, test.name, "Method", nil, test.opts...)
-		if !matchesErrorPattern(err, test.errID, test.err) {
-			t.Errorf(`%s: client.StartCall: got error "%v", want to match "%v"`, name, err, test.err)
-		} else if call != nil {
-			blessings, proof := call.RemoteBlessings()
-			if proof.IsZero() {
-				t.Errorf("%s: Returned zero value for remote blessings", name)
-			}
-			// Currently all tests are configured so that the only
-			// blessings presented by the server that are
-			// recognized by the client match the pattern
-			// "root"
-			if len(blessings) < 1 || !security.BlessingPattern("root").MatchedBy(blessings...) {
-				t.Errorf("%s: Client sees server as %v, expected a single blessing matching root", name, blessings)
-			}
-		}
-		cancel()
-	}
-}
-
-func TestServerManInTheMiddleAttack(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	// Test scenario: A server mounts itself, but then some other service
-	// somehow "takes over" the network endpoint (a naughty router
-	// perhaps), thus trying to steal traffic.
-	var (
-		pclient        = testutil.NewPrincipal("client")
-		pserver        = testutil.NewPrincipal("server")
-		pattacker      = testutil.NewPrincipal("attacker")
-		attackerCtx, _ = v23.WithPrincipal(ctx, pattacker)
-		cctx, _        = v23.WithPrincipal(ctx, pclient)
-	)
-	// Client recognizes both the server and the attacker's blessings.
-	// (Though, it doesn't need to do the latter for the purposes of this
-	// test).
-	pclient.AddToRoots(pserver.BlessingStore().Default())
-	pclient.AddToRoots(pattacker.BlessingStore().Default())
-
-	// Start up the attacker's server.
-	attacker, err := testInternalNewServer(
-		attackerCtx,
-		imanager.InternalNew(ctx, naming.FixedRoutingID(0xaaaaaaaaaaaaaaaa)),
-		// (To prevent the attacker for legitimately mounting on the
-		// namespace that the client will use, provide it with a
-		// different namespace).
-		tnaming.NewSimpleNamespace(),
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if _, err := attacker.Listen(listenSpec); err != nil {
-		t.Fatal(err)
-	}
-	if err := attacker.ServeDispatcher("mountpoint/server", testServerDisp{&testServer{}}); err != nil {
-		t.Fatal(err)
-	}
-	var ep naming.Endpoint
-	if status := attacker.Status(); len(status.Endpoints) < 1 {
-		t.Fatalf("Attacker server does not have an endpoint: %+v", status)
-	} else {
-		ep = status.Endpoints[0]
-	}
-
-	// The legitimate server would have mounted the same endpoint on the
-	// namespace, but with different blessings.
-	ns := tnaming.NewSimpleNamespace()
-	ep.(*inaming.Endpoint).Blessings = []string{"server"}
-	if err := ns.Mount(ctx, "mountpoint/server", ep.Name(), time.Hour); err != nil {
-		t.Fatal(err)
-	}
-
-	// The RPC call should fail because the blessings presented by the
-	// (attacker's) server are not consistent with the ones registered in
-	// the mounttable trusted by the client.
-	client, err := InternalNewClient(
-		imanager.InternalNew(cctx, naming.FixedRoutingID(0xcccccccccccccccc)),
-		ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer client.Close()
-	ctx, _ = v23.WithPrincipal(cctx, pclient)
-	if _, err := client.StartCall(cctx, "mountpoint/server", "Closure", nil); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
-		t.Errorf("Got error %v (errorid=%v), want errorid=%v", err, verror.ErrorID(err), verror.ErrNotTrusted.ID)
-	}
-	// But the RPC should succeed if the client explicitly
-	// decided to skip server authorization.
-	if _, err := client.StartCall(cctx, "mountpoint/server", "Closure", nil, options.SkipServerEndpointAuthorization{}); err != nil {
-		t.Errorf("Unexpected error(%v) when skipping server authorization", err)
-	}
-}
-
 type websocketMode bool
 type closeSendMode bool
 
@@ -684,206 +413,6 @@
 	noCloseSend closeSendMode = false
 )
 
-func TestRPC(t *testing.T) {
-	testRPC(t, closeSend, noWebsocket)
-}
-
-func TestRPCWithWebsocket(t *testing.T) {
-	testRPC(t, closeSend, useWebsocket)
-}
-
-// TestCloseSendOnFinish tests that Finish informs the server that no more
-// inputs will be sent by the client if CloseSend has not already done so.
-func TestRPCCloseSendOnFinish(t *testing.T) {
-	testRPC(t, noCloseSend, noWebsocket)
-}
-
-func TestRPCCloseSendOnFinishWithWebsocket(t *testing.T) {
-	testRPC(t, noCloseSend, useWebsocket)
-}
-
-func testRPC(t *testing.T, shouldCloseSend closeSendMode, shouldUseWebsocket websocketMode) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	type v []interface{}
-	type testcase struct {
-		name       string
-		method     string
-		args       v
-		streamArgs v
-		startErr   error
-		results    v
-		finishErr  error
-	}
-	var (
-		tests = []testcase{
-			{"mountpoint/server/suffix", "Closure", nil, nil, nil, nil, nil},
-			{"mountpoint/server/suffix", "Error", nil, nil, nil, nil, errMethod},
-
-			{"mountpoint/server/suffix", "Echo", v{"foo"}, nil, nil, v{`method:"Echo",suffix:"suffix",arg:"foo"`}, nil},
-			{"mountpoint/server/suffix/abc", "Echo", v{"bar"}, nil, nil, v{`method:"Echo",suffix:"suffix/abc",arg:"bar"`}, nil},
-
-			{"mountpoint/server/suffix", "EchoUser", v{"foo", userType("bar")}, nil, nil, v{`method:"EchoUser",suffix:"suffix",arg:"foo"`, userType("bar")}, nil},
-			{"mountpoint/server/suffix/abc", "EchoUser", v{"baz", userType("bla")}, nil, nil, v{`method:"EchoUser",suffix:"suffix/abc",arg:"baz"`, userType("bla")}, nil},
-			{"mountpoint/server/suffix", "Stream", v{"foo"}, v{userType("bar"), userType("baz")}, nil, v{`method:"Stream",suffix:"suffix",arg:"foo" bar baz`}, nil},
-			{"mountpoint/server/suffix/abc", "Stream", v{"123"}, v{userType("456"), userType("789")}, nil, v{`method:"Stream",suffix:"suffix/abc",arg:"123" 456 789`}, nil},
-			{"mountpoint/server/suffix", "EchoBlessings", nil, nil, nil, v{"[server]", "[client]"}, nil},
-			{"mountpoint/server/suffix", "EchoAndError", v{"bugs bunny"}, nil, nil, v{`method:"EchoAndError",suffix:"suffix",arg:"bugs bunny"`}, nil},
-			{"mountpoint/server/suffix", "EchoAndError", v{"error"}, nil, nil, nil, errMethod},
-			{"mountpoint/server/suffix", "EchoLang", nil, nil, nil, v{"foolang"}, nil},
-		}
-		name = func(t testcase) string {
-			return fmt.Sprintf("%s.%s(%v)", t.name, t.method, t.args)
-		}
-
-		pclient, pserver = newClientServerPrincipals()
-		b                = createBundleWS(t, ctx, pserver, &testServer{}, shouldUseWebsocket)
-	)
-	defer b.cleanup(t, ctx)
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	ctx = i18n.WithLangID(ctx, "foolang")
-	for _, test := range tests {
-		ctx.VI(1).Infof("%s client.StartCall", name(test))
-		vname := test.name
-		if shouldUseWebsocket {
-			var err error
-			vname, err = fakeWSName(ctx, b.ns, vname)
-			if err != nil && err != test.startErr {
-				t.Errorf(`%s ns.Resolve got error "%v", want "%v"`, name(test), err, test.startErr)
-				continue
-			}
-		}
-		call, err := b.client.StartCall(ctx, vname, test.method, test.args)
-		if err != test.startErr {
-			t.Errorf(`%s client.StartCall got error "%v", want "%v"`, name(test), err, test.startErr)
-			continue
-		}
-		for _, sarg := range test.streamArgs {
-			ctx.VI(1).Infof("%s client.Send(%v)", name(test), sarg)
-			if err := call.Send(sarg); err != nil {
-				t.Errorf(`%s call.Send(%v) got unexpected error "%v"`, name(test), sarg, err)
-			}
-			var u userType
-			if err := call.Recv(&u); err != nil {
-				t.Errorf(`%s call.Recv(%v) got unexpected error "%v"`, name(test), sarg, err)
-			}
-			if !reflect.DeepEqual(u, sarg) {
-				t.Errorf("%s call.Recv got value %v, want %v", name(test), u, sarg)
-			}
-		}
-		if shouldCloseSend {
-			ctx.VI(1).Infof("%s call.CloseSend", name(test))
-			// When the method does not involve streaming
-			// arguments, the server gets all the arguments in
-			// StartCall and then sends a response without
-			// (unnecessarily) waiting for a CloseSend message from
-			// the client.  If the server responds before the
-			// CloseSend call is made at the client, the CloseSend
-			// call will fail.  Thus, only check for errors on
-			// CloseSend if there are streaming arguments to begin
-			// with (i.e., only if the server is expected to wait
-			// for the CloseSend notification).
-			if err := call.CloseSend(); err != nil && len(test.streamArgs) > 0 {
-				t.Errorf(`%s call.CloseSend got unexpected error "%v"`, name(test), err)
-			}
-		}
-		ctx.VI(1).Infof("%s client.Finish", name(test))
-		results := makeResultPtrs(test.results)
-		err = call.Finish(results...)
-		if got, want := err, test.finishErr; (got == nil) != (want == nil) {
-			t.Errorf(`%s call.Finish got error "%v", want "%v'`, name(test), got, want)
-		} else if want != nil && verror.ErrorID(got) != verror.ErrorID(want) {
-			t.Errorf(`%s call.Finish got error "%v", want "%v"`, name(test), got, want)
-		}
-		checkResultPtrs(t, name(test), results, test.results)
-	}
-}
-
-func TestMultipleFinish(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	type v []interface{}
-	var (
-		pclient, pserver = newClientServerPrincipals()
-		b                = createBundle(t, ctx, pserver, &testServer{})
-	)
-	defer b.cleanup(t, ctx)
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "Echo", v{"foo"})
-	if err != nil {
-		t.Fatalf(`client.StartCall got error "%v"`, err)
-	}
-	var results string
-	err = call.Finish(&results)
-	if err != nil {
-		t.Fatalf(`call.Finish got error "%v"`, err)
-	}
-	// Calling Finish a second time should result in a useful error.
-	if err = call.Finish(&results); !matchesErrorPattern(err, verror.ErrBadState, "Finish has already been called") {
-		t.Fatalf(`got "%v", want "%v"`, err, verror.ErrBadState)
-	}
-}
-
-// granter implements rpc.Granter.
-//
-// It returns the specified (security.Blessings, error) pair if either the
-// blessing or the error is specified. Otherwise it returns a blessing
-// derived from the local blessings of the current call.
-type granter struct {
-	rpc.CallOpt
-	b   security.Blessings
-	err error
-}
-
-func (g granter) Grant(ctx *context.T, call security.Call) (security.Blessings, error) {
-	if !g.b.IsZero() || g.err != nil {
-		return g.b, g.err
-	}
-	return call.LocalPrincipal().Bless(call.RemoteBlessings().PublicKey(), call.LocalBlessings(), "blessed", security.UnconstrainedUse())
-}
-
-func TestGranter(t *testing.T) {
-	var (
-		pclient, pserver = newClientServerPrincipals()
-		ctx, shutdown    = initForTest()
-		b                = createBundle(t, ctx, pserver, &testServer{})
-	)
-	defer shutdown()
-	defer b.cleanup(t, ctx)
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	tests := []struct {
-		granter                       rpc.Granter
-		startErrID, finishErrID       verror.IDAction
-		blessing, starterr, finisherr string
-	}{
-		{blessing: ""},
-		{granter: granter{b: bless(pclient, pserver, "blessed")}, blessing: "client/blessed"},
-		{granter: granter{err: errors.New("hell no")}, startErrID: verror.ErrNotTrusted, starterr: "hell no"},
-		{granter: granter{}, blessing: "client/blessed"},
-		{granter: granter{b: pclient.BlessingStore().Default()}, finishErrID: verror.ErrNoAccess, finisherr: "blessing granted not bound to this server"},
-	}
-	for i, test := range tests {
-		call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "EchoGrantedBlessings", []interface{}{"argument"}, test.granter)
-		if !matchesErrorPattern(err, test.startErrID, test.starterr) {
-			t.Errorf("%d: %+v: StartCall returned error %v", i, test, err)
-		}
-		if err != nil {
-			continue
-		}
-		var result, blessing string
-		if err = call.Finish(&result, &blessing); !matchesErrorPattern(err, test.finishErrID, test.finisherr) {
-			t.Errorf("%+v: Finish returned error %v", test, err)
-		}
-		if err != nil {
-			continue
-		}
-		if result != "argument" || blessing != test.blessing {
-			t.Errorf("%+v: Got (%q, %q)", test, result, blessing)
-		}
-	}
-}
-
 // dischargeTestServer implements the discharge service. Always fails to
 // issue a discharge, but records the impetus and traceid of the RPC call.
 type dischargeTestServer struct {
@@ -904,276 +433,6 @@
 	return impetus, traceid
 }
 
-func TestDischargeImpetusAndContextPropagation(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		pserver     = testutil.NewPrincipal("server")
-		pdischarger = testutil.NewPrincipal("discharger")
-		pclient     = testutil.NewPrincipal("client")
-		pctx, _     = v23.WithPrincipal(ctx, pdischarger)
-		sctx, _     = v23.WithPrincipal(ctx, pserver)
-
-		sm = imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-		ns = tnaming.NewSimpleNamespace()
-	)
-
-	// Setup the client so that it shares a blessing with a third-party caveat with the server.
-	setClientBlessings := func(req security.ThirdPartyRequirements) security.Principal {
-		cav, err := security.NewPublicKeyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", req, security.UnconstrainedUse())
-		if err != nil {
-			t.Fatalf("Failed to create ThirdPartyCaveat(%+v): %v", req, err)
-		}
-		b, err := pclient.BlessSelf("client_for_server", cav)
-		if err != nil {
-			t.Fatalf("BlessSelf failed: %v", err)
-		}
-		pclient.BlessingStore().Set(b, "server")
-		return pclient
-	}
-
-	// Initialize the client principal.
-	// It trusts both the application server and the discharger.
-	pclient.AddToRoots(pserver.BlessingStore().Default())
-	pclient.AddToRoots(pdischarger.BlessingStore().Default())
-
-	// Setup the discharge server.
-	var tester dischargeTestServer
-	dischargeServer, err := testInternalNewServer(pctx, sm, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer dischargeServer.Stop()
-	if _, err := dischargeServer.Listen(listenSpec); err != nil {
-		t.Fatal(err)
-	}
-	if err := dischargeServer.Serve("mountpoint/discharger", &tester, &testServerAuthorizer{}); err != nil {
-		t.Fatal(err)
-	}
-
-	// Setup the application server.
-	appServer, err := testInternalNewServer(sctx, sm, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer appServer.Stop()
-	eps, err := appServer.Listen(listenSpec)
-	if err != nil {
-		t.Fatal(err)
-	}
-	// TODO(bjornick,cnicolaou,ashankar): This is a hack to workaround the
-	// fact that a single Listen on the "tcp" protocol followed by a call
-	// to Serve(<name>, ...) transparently creates two endpoints (one for
-	// tcp, one for websockets) and maps both to <name> via a mount.
-	// Because all endpoints to a name are tried in a parallel, this
-	// transparency makes this test hard to follow (many discharge fetch
-	// attempts are made - one for VIF authentication, one for VC
-	// authentication and one for the actual RPC - and having them be made
-	// to two different endpoints in parallel leads to a lot of
-	// non-determinism). The last plan of record known by the author of
-	// this comment was to stop this sly creation of two endpoints and
-	// require that they be done explicitly. When that happens, this hack
-	// can go away, but till then, this workaround allows the test to be
-	// more predictable by ensuring there is only one VIF/VC/Flow to the
-	// server.
-	object := naming.JoinAddressName(eps[0].String(), "object") // instead of "mountpoint/object"
-	if err := appServer.Serve("mountpoint/object", &testServer{}, &testServerAuthorizer{}); err != nil {
-		t.Fatal(err)
-	}
-	tests := []struct {
-		Requirements security.ThirdPartyRequirements
-		Impetus      security.DischargeImpetus
-	}{
-		{ // No requirements, no impetus
-			Requirements: security.ThirdPartyRequirements{},
-			Impetus:      security.DischargeImpetus{},
-		},
-		{ // Require everything
-			Requirements: security.ThirdPartyRequirements{ReportServer: true, ReportMethod: true, ReportArguments: true},
-			Impetus:      security.DischargeImpetus{Server: []security.BlessingPattern{"server"}, Method: "Method", Arguments: []*vdl.Value{vdl.StringValue("argument")}},
-		},
-		{ // Require only the method name
-			Requirements: security.ThirdPartyRequirements{ReportMethod: true},
-			Impetus:      security.DischargeImpetus{Method: "Method"},
-		},
-	}
-
-	for _, test := range tests {
-		pclient := setClientBlessings(test.Requirements)
-		cctx, _ := v23.WithPrincipal(ctx, pclient)
-		client, err := InternalNewClient(sm, ns)
-		if err != nil {
-			t.Fatalf("InternalNewClient(%+v) failed: %v", test.Requirements, err)
-		}
-		defer client.Close()
-		tid := vtrace.GetSpan(cctx).Trace()
-		// StartCall should fetch the discharge, do not worry about finishing the RPC - do not care about that for this test.
-		if _, err := client.StartCall(cctx, object, "Method", []interface{}{"argument"}); err != nil {
-			t.Errorf("StartCall(%+v) failed: %v", test.Requirements, err)
-			continue
-		}
-		impetus, traceid := tester.Release()
-		// There should have been exactly 1 attempt to fetch discharges when making
-		// the RPC to the remote object.
-		if len(impetus) != 1 || len(traceid) != 1 {
-			t.Errorf("Test %+v: Got (%d, %d) (#impetus, #traceid), wanted exactly one", test.Requirements, len(impetus), len(traceid))
-			continue
-		}
-		// VC creation does not have any "impetus", it is established without
-		// knowledge of the context of the RPC. So ignore that.
-		//
-		// TODO(ashankar): Should the impetus of the RPC that initiated the
-		// VIF/VC creation be propagated?
-		if got, want := impetus[len(impetus)-1], test.Impetus; !reflect.DeepEqual(got, want) {
-			t.Errorf("Test %+v: Got impetus %v, want %v", test.Requirements, got, want)
-		}
-		// But the context used for all of this should be the same
-		// (thereby allowing debug traces to link VIF/VC creation with
-		// the RPC that initiated them).
-		for idx, got := range traceid {
-			if !reflect.DeepEqual(got, tid) {
-				t.Errorf("Test %+v: %d - Got trace id %q, want %q", test.Requirements, idx, hex.EncodeToString(got[:]), hex.EncodeToString(tid[:]))
-			}
-		}
-	}
-}
-
-func TestRPCClientAuthorization(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-
-	type v []interface{}
-	var (
-		// Principals
-		pclient, pserver = testutil.NewPrincipal("client"), testutil.NewPrincipal("server")
-		pdischarger      = testutil.NewPrincipal("discharger")
-
-		now = time.Now()
-
-		serverName          = "mountpoint/server"
-		dischargeServerName = "mountpoint/dischargeserver"
-
-		// Caveats on blessings to the client: First-party caveats
-		cavOnlyEcho = mkCaveat(security.NewMethodCaveat("Echo"))
-		cavExpired  = mkCaveat(security.NewExpiryCaveat(now.Add(-1 * time.Second)))
-		// Caveats on blessings to the client: Third-party caveats
-		cavTPValid   = mkThirdPartyCaveat(pdischarger.PublicKey(), dischargeServerName, mkCaveat(security.NewExpiryCaveat(now.Add(24*time.Hour))))
-		cavTPExpired = mkThirdPartyCaveat(pdischarger.PublicKey(), dischargeServerName, mkCaveat(security.NewExpiryCaveat(now.Add(-1*time.Second))))
-
-		// Client blessings that will be tested.
-		bServerClientOnlyEcho  = bless(pserver, pclient, "onlyecho", cavOnlyEcho)
-		bServerClientExpired   = bless(pserver, pclient, "expired", cavExpired)
-		bServerClientTPValid   = bless(pserver, pclient, "dischargeable_third_party_caveat", cavTPValid)
-		bServerClientTPExpired = bless(pserver, pclient, "expired_third_party_caveat", cavTPExpired)
-		bClient                = pclient.BlessingStore().Default()
-		bRandom, _             = pclient.BlessSelf("random")
-
-		mgr   = imanager.InternalNew(ctx, naming.FixedRoutingID(0x1111111))
-		ns    = tnaming.NewSimpleNamespace()
-		tests = []struct {
-			blessings  security.Blessings // Blessings used by the client
-			name       string             // object name on which the method is invoked
-			method     string
-			args       v
-			results    v
-			authorized bool // Whether or not the RPC should be authorized by the server.
-		}{
-			// There are three different authorization policies (security.Authorizer implementations)
-			// used by the server, depending on the suffix (see testServerDisp.Lookup):
-			// - nilAuth suffix: the default authorization policy (only delegates of or delegators of the server can call RPCs)
-			// - aclAuth suffix: the AccessList only allows blessings matching the patterns "server" or "client"
-			// - other suffixes: testServerAuthorizer allows any principal to call any method except "Unauthorized"
-
-			// Expired blessings should fail nilAuth and aclAuth (which care about names), but should succeed on
-			// other suffixes (which allow all blessings), unless calling the Unauthorized method.
-			{bServerClientExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
-			{bServerClientExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
-			{bServerClientExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
-
-			// Same for blessings that should fail to obtain a discharge for the third party caveat.
-			{bServerClientTPExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
-			{bServerClientTPExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
-			{bServerClientTPExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientTPExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
-
-			// The "server/client" blessing (with MethodCaveat("Echo")) should satisfy all authorization policies
-			// when "Echo" is called.
-			{bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientOnlyEcho, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
-
-			// The "server/client" blessing (with MethodCaveat("Echo")) should satisfy no authorization policy
-			// when any other method is invoked, except for the testServerAuthorizer policy (which will
-			// not recognize the blessing "server/onlyecho", but it would authorize anyone anyway).
-			{bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Closure", nil, nil, false},
-			{bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Closure", nil, nil, false},
-			{bServerClientOnlyEcho, "mountpoint/server/suffix", "Closure", nil, nil, true},
-
-			// The "client" blessing doesn't satisfy the default authorization policy, but does satisfy
-			// the AccessList and the testServerAuthorizer policy.
-			{bClient, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
-			{bClient, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
-			{bClient, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
-			{bClient, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
-
-			// The "random" blessing does not satisfy either the default policy or the AccessList, but does
-			// satisfy testServerAuthorizer.
-			{bRandom, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
-			{bRandom, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
-			{bRandom, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
-			{bRandom, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
-
-			// The "server/dischargeable_third_party_caveat" blessing satisfies all policies.
-			// (the discharges should be fetched).
-			{bServerClientTPValid, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientTPValid, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientTPValid, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
-			{bServerClientTPValid, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
-		}
-	)
-
-	// Start the main server.
-	_, server := startServer(t, ctx, pserver, mgr, ns, serverName, testServerDisp{&testServer{}})
-	defer stopServer(t, ctx, server, ns, serverName)
-
-	// Start the discharge server.
-	_, dischargeServer := startServer(t, ctx, pdischarger, mgr, ns, dischargeServerName, testutil.LeafDispatcher(&dischargeServer{}, security.AllowEveryone()))
-	defer stopServer(t, ctx, dischargeServer, ns, dischargeServerName)
-
-	// The server should recognize the client principal as an authority on "client" and "random" blessings.
-	pserver.AddToRoots(bClient)
-	pserver.AddToRoots(bRandom)
-	// And the client needs to recognize the server's and discharger's blessings to decide which of its
-	// own blessings to share.
-	pclient.AddToRoots(pserver.BlessingStore().Default())
-	pclient.AddToRoots(pdischarger.BlessingStore().Default())
-	// Set a blessing on the client's blessing store to be presented to the discharge server.
-	pclient.BlessingStore().Set(pclient.BlessingStore().Default(), "discharger")
-	// testutil.NewPrincipal sets up a principal that shares blessings with all servers, undo that.
-	pclient.BlessingStore().Set(security.Blessings{}, security.AllPrincipals)
-
-	for i, test := range tests {
-		name := fmt.Sprintf("#%d: %q.%s(%v) by %v", i, test.name, test.method, test.args, test.blessings)
-		client, err := InternalNewClient(mgr, ns)
-		if err != nil {
-			t.Fatalf("InternalNewClient failed: %v", err)
-		}
-		defer client.Close()
-
-		pclient.BlessingStore().Set(test.blessings, "server")
-		ctx, _ := v23.WithPrincipal(ctx, pclient)
-		err = client.Call(ctx, test.name, test.method, test.args, makeResultPtrs(test.results))
-		if err != nil && test.authorized {
-			t.Errorf(`%s client.Call got error: "%v", wanted the RPC to succeed`, name, err)
-		} else if err == nil && !test.authorized {
-			t.Errorf("%s call.Finish succeeded, expected authorization failure", name)
-		} else if !test.authorized && verror.ErrorID(err) != verror.ErrNoAccess.ID {
-			t.Errorf("%s. call.Finish returned error %v(%v), wanted %v", name, verror.ErrorID(verror.Convert(verror.ErrNoAccess, nil, err)), err, verror.ErrNoAccess)
-		}
-	}
-}
-
 // singleBlessingStore implements security.BlessingStore. It is a
 // BlessingStore that marks the last blessing that was set on it as
 // shareable with any peer. It does not care about the public key that
@@ -1226,410 +485,6 @@
 	return &p.b
 }
 
-func TestRPCClientBlessingsPublicKey(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		pprovider, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal("server")
-		pclient            = &singleBlessingPrincipal{Principal: testutil.NewPrincipal("client")}
-
-		bserver = bless(pprovider, pserver, "server")
-		bclient = bless(pprovider, pclient, "client")
-		bvictim = bless(pprovider, testutil.NewPrincipal("victim"), "victim")
-	)
-	// Make the client and server trust blessings from pprovider.
-	pclient.AddToRoots(pprovider.BlessingStore().Default())
-	pserver.AddToRoots(pprovider.BlessingStore().Default())
-
-	// Make the server present bserver to all clients and start the server.
-	pserver.BlessingStore().SetDefault(bserver)
-	b := createBundle(t, ctx, pserver, &testServer{})
-	defer b.cleanup(t, ctx)
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	tests := []struct {
-		blessings security.Blessings
-		errID     verror.IDAction
-		err       string
-	}{
-		{blessings: bclient},
-		// server disallows clients from authenticating with blessings not bound to
-		// the client principal's public key
-		{blessings: bvictim, errID: verror.ErrNoAccess, err: "bound to a different public key"},
-		{blessings: bserver, errID: verror.ErrNoAccess, err: "bound to a different public key"},
-	}
-	for i, test := range tests {
-		name := fmt.Sprintf("%d: Client RPCing with blessings %v", i, test.blessings)
-		pclient.BlessingStore().Set(test.blessings, "root")
-		if err := b.client.Call(ctx, "mountpoint/server/suffix", "Closure", nil, nil); !matchesErrorPattern(err, test.errID, test.err) {
-			t.Errorf("%v: client.Call returned error %v", name, err)
-			continue
-		}
-	}
-}
-
-func TestServerLocalBlessings(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		pprovider, pclient, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal("client"), testutil.NewPrincipal("server")
-		pdischarger                 = pprovider
-
-		mgr = imanager.InternalNew(ctx, naming.FixedRoutingID(0x1111111))
-		ns  = tnaming.NewSimpleNamespace()
-
-		tpCav = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
-
-		bserver = bless(pprovider, pserver, "server", tpCav)
-		bclient = bless(pprovider, pclient, "client")
-	)
-	// Make the client and server principals trust root certificates from
-	// pprovider.
-	pclient.AddToRoots(pprovider.BlessingStore().Default())
-	pserver.AddToRoots(pprovider.BlessingStore().Default())
-
-	// Make the server present bserver to all clients.
-	pserver.BlessingStore().SetDefault(bserver)
-
-	// Start the server and the discharger.
-	_, server := startServer(t, ctx, pserver, mgr, ns, "mountpoint/server", testServerDisp{&testServer{}})
-	defer stopServer(t, ctx, server, ns, "mountpoint/server")
-
-	_, dischargeServer := startServer(t, ctx, pdischarger, mgr, ns, "mountpoint/dischargeserver", testutil.LeafDispatcher(&dischargeServer{}, security.AllowEveryone()))
-	defer stopServer(t, ctx, dischargeServer, ns, "mountpoint/dischargeserver")
-
-	// Make the client present bclient to all servers that are blessed
-	// by pprovider.
-	pclient.BlessingStore().Set(bclient, "root")
-	client, err := InternalNewClient(mgr, ns)
-	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
-	}
-	defer client.Close()
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	var gotServer, gotClient string
-	if err := client.Call(ctx, "mountpoint/server/suffix", "EchoBlessings", nil, []interface{}{&gotServer, &gotClient}); err != nil {
-		t.Fatalf("Finish failed: %v", err)
-	}
-	if wantServer, wantClient := "[root/server]", "[root/client]"; gotServer != wantServer || gotClient != wantClient {
-		t.Fatalf("EchoBlessings: got %v, %v want %v, %v", gotServer, gotClient, wantServer, wantClient)
-	}
-}
-
-func TestDischargePurgeFromCache(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-
-	var (
-		pserver     = testutil.NewPrincipal("server")
-		pdischarger = pserver // In general, the discharger can be a separate principal. In this test, it happens to be the server.
-		pclient     = testutil.NewPrincipal("client")
-		// Client is blessed with a third-party caveat. The discharger service issues discharges with a fakeTimeCaveat.
-		// This blessing is presented to "server".
-		bclient = bless(pserver, pclient, "client", mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/server/discharger", security.UnconstrainedUse()))
-
-		b = createBundle(t, ctx, pserver, &testServer{})
-	)
-	defer b.cleanup(t, ctx)
-	// Setup the client to recognize the server's blessing and present bclient to it.
-	pclient.AddToRoots(pserver.BlessingStore().Default())
-	pclient.BlessingStore().Set(bclient, "server")
-
-	var err error
-	if b.client, err = InternalNewClient(b.sm, b.ns); err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
-	}
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	call := func() error {
-		var got string
-		if err := b.client.Call(ctx, "mountpoint/server/aclAuth", "Echo", []interface{}{"batman"}, []interface{}{&got}); err != nil {
-			return err
-		}
-		if want := `method:"Echo",suffix:"aclAuth",arg:"batman"`; got != want {
-			return verror.Convert(verror.ErrBadArg, nil, fmt.Errorf("Got [%v] want [%v]", got, want))
-		}
-		return nil
-	}
-
-	// First call should succeed
-	if err := call(); err != nil {
-		t.Fatal(err)
-	}
-	// Advance virtual clock, which will invalidate the discharge
-	clock.Advance(1)
-	if err, want := call(), "not authorized"; !matchesErrorPattern(err, verror.ErrNoAccess, want) {
-		t.Errorf("Got error [%v] wanted to match pattern %q", err, want)
-	}
-	// But retrying will succeed since the discharge should be purged from cache and refreshed
-	if err := call(); err != nil {
-		t.Fatal(err)
-	}
-}
-
-type cancelTestServer struct {
-	started   chan struct{}
-	cancelled chan struct{}
-	t         *testing.T
-}
-
-func newCancelTestServer(t *testing.T) *cancelTestServer {
-	return &cancelTestServer{
-		started:   make(chan struct{}),
-		cancelled: make(chan struct{}),
-		t:         t,
-	}
-}
-
-func (s *cancelTestServer) CancelStreamReader(ctx *context.T, call rpc.StreamServerCall) error {
-	close(s.started)
-	var b []byte
-	if err := call.Recv(&b); err != io.EOF {
-		s.t.Errorf("Got error %v, want io.EOF", err)
-	}
-	<-ctx.Done()
-	close(s.cancelled)
-	return nil
-}
-
-// CancelStreamIgnorer doesn't read from it's input stream so all it's
-// buffers fill.  The intention is to show that call.Done() is closed
-// even when the stream is stalled.
-func (s *cancelTestServer) CancelStreamIgnorer(ctx *context.T, _ rpc.StreamServerCall) error {
-	close(s.started)
-	<-ctx.Done()
-	close(s.cancelled)
-	return nil
-}
-
-func waitForCancel(t *testing.T, ts *cancelTestServer, cancel context.CancelFunc) {
-	<-ts.started
-	cancel()
-	<-ts.cancelled
-}
-
-// TestCancel tests cancellation while the server is reading from a stream.
-func TestCancel(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		ts               = newCancelTestServer(t)
-		pclient, pserver = newClientServerPrincipals()
-		b                = createBundle(t, ctx, pserver, ts)
-	)
-	defer b.cleanup(t, ctx)
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	ctx, cancel := context.WithCancel(ctx)
-	_, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "CancelStreamReader", []interface{}{})
-	if err != nil {
-		t.Fatalf("Start call failed: %v", err)
-	}
-	waitForCancel(t, ts, cancel)
-}
-
-// TestCancelWithFullBuffers tests that even if the writer has filled the buffers and
-// the server is not reading that the cancel message gets through.
-func TestCancelWithFullBuffers(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		ts               = newCancelTestServer(t)
-		pclient, pserver = newClientServerPrincipals()
-		b                = createBundle(t, ctx, pserver, ts)
-	)
-	defer b.cleanup(t, ctx)
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	ctx, cancel := context.WithCancel(ctx)
-	call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "CancelStreamIgnorer", []interface{}{})
-	if err != nil {
-		t.Fatalf("Start call failed: %v", err)
-	}
-	// Fill up all the write buffers to ensure that cancelling works even when the stream
-	// is blocked.
-	call.Send(make([]byte, vc.MaxSharedBytes))
-	call.Send(make([]byte, vc.DefaultBytesBufferedPerFlow))
-
-	waitForCancel(t, ts, cancel)
-}
-
-type streamRecvInGoroutineServer struct{ c chan error }
-
-func (s *streamRecvInGoroutineServer) RecvInGoroutine(_ *context.T, call rpc.StreamServerCall) error {
-	// Spawn a goroutine to read streaming data from the client.
-	go func() {
-		var i interface{}
-		for {
-			err := call.Recv(&i)
-			if err != nil {
-				s.c <- err
-				return
-			}
-		}
-	}()
-	// Imagine the server did some processing here and now that it is done,
-	// it does not care to see what else the client has to say.
-	return nil
-}
-
-func TestStreamReadTerminatedByServer(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		pclient, pserver = newClientServerPrincipals()
-		s                = &streamRecvInGoroutineServer{c: make(chan error, 1)}
-		b                = createBundle(t, ctx, pserver, s)
-	)
-	defer b.cleanup(t, ctx)
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "RecvInGoroutine", []interface{}{})
-	if err != nil {
-		t.Fatalf("StartCall failed: %v", err)
-	}
-
-	c := make(chan error, 1)
-	go func() {
-		for i := 0; true; i++ {
-			if err := call.Send(i); err != nil {
-				c <- err
-				return
-			}
-		}
-	}()
-
-	// The goroutine at the server executing "Recv" should have terminated
-	// with EOF.
-	if err := <-s.c; err != io.EOF {
-		t.Errorf("Got %v at server, want io.EOF", err)
-	}
-	// The client Send should have failed since the RPC has been
-	// terminated.
-	if err := <-c; err == nil {
-		t.Errorf("Client Send should fail as the server should have closed the flow")
-	}
-}
-
-// TestConnectWithIncompatibleServers tests that clients ignore incompatible endpoints.
-func TestConnectWithIncompatibleServers(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	var (
-		pclient, pserver = newClientServerPrincipals()
-		b                = createBundle(t, ctx, pserver, &testServer{})
-	)
-	defer b.cleanup(t, ctx)
-
-	// Publish some incompatible endpoints.
-	publisher := publisher.New(ctx, b.ns, publishPeriod)
-	defer publisher.WaitForStop()
-	defer publisher.Stop()
-	publisher.AddName("incompatible", false, false)
-	publisher.AddServer("/@2@tcp@localhost:10000@@1000000@2000000@@")
-	publisher.AddServer("/@2@tcp@localhost:10001@@2000000@3000000@@")
-
-	ctx, _ = v23.WithPrincipal(ctx, pclient)
-	_, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"}, options.NoRetry{})
-	if verror.ErrorID(err) != verror.ErrNoServers.ID {
-		t.Errorf("Expected error %v, found: %v", verror.ErrNoServers, err)
-	}
-
-	// Now add a server with a compatible endpoint and try again.
-	publisher.AddServer("/" + b.ep.String())
-	publisher.AddName("incompatible", false, false)
-
-	call, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"})
-	if err != nil {
-		t.Fatal(err)
-	}
-	var result string
-	if err = call.Finish(&result); err != nil {
-		t.Errorf("Unexpected error finishing call %v", err)
-	}
-	expected := `method:"Echo",suffix:"suffix",arg:"foo"`
-	if result != expected {
-		t.Errorf("Wrong result returned.  Got %s, wanted %s", result, expected)
-	}
-}
-
-func TestPreferredAddress(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	pa := netstate.AddressChooserFunc(func(string, []net.Addr) ([]net.Addr, error) {
-		return []net.Addr{netstate.NewNetAddr("tcp", "1.1.1.1")}, nil
-	})
-	ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
-	server, err := testInternalNewServer(ctx, sm, ns)
-	if err != nil {
-		t.Errorf("InternalNewServer failed: %v", err)
-	}
-	defer server.Stop()
-
-	spec := rpc.ListenSpec{
-		Addrs:          rpc.ListenAddrs{{"tcp", ":0"}},
-		AddressChooser: pa,
-	}
-	eps, err := server.Listen(spec)
-	if err != nil {
-		t.Errorf("unexpected error: %s", err)
-	}
-	iep := eps[0].(*inaming.Endpoint)
-	host, _, err := net.SplitHostPort(iep.Address)
-	if err != nil {
-		t.Errorf("unexpected error: %s", err)
-	}
-	if got, want := host, "1.1.1.1"; got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-	// Won't override the specified address.
-	eps, err = server.Listen(listenSpec)
-	iep = eps[0].(*inaming.Endpoint)
-	host, _, err = net.SplitHostPort(iep.Address)
-	if err != nil {
-		t.Errorf("unexpected error: %s", err)
-	}
-	if got, want := host, "127.0.0.1"; got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-}
-
-func TestPreferredAddressErrors(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	paerr := netstate.AddressChooserFunc(func(_ string, a []net.Addr) ([]net.Addr, error) {
-		return nil, fmt.Errorf("oops")
-	})
-	ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
-	server, err := testInternalNewServer(ctx, sm, ns)
-	if err != nil {
-		t.Errorf("InternalNewServer failed: %v", err)
-	}
-	defer server.Stop()
-	spec := rpc.ListenSpec{
-		Addrs:          rpc.ListenAddrs{{"tcp", ":0"}},
-		AddressChooser: paerr,
-	}
-	eps, err := server.Listen(spec)
-
-	if got, want := len(eps), 0; got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-	status := server.Status()
-	if got, want := len(status.Errors), 1; got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-	if got, want := status.Errors[0].Error(), "oops"; got != want {
-		t.Errorf("got %q, want %q", got, want)
-	}
-}
-
 func TestSecurityNone(t *testing.T) {
 	ctx, shutdown := initForTest()
 	defer shutdown()
@@ -1647,9 +502,9 @@
 	if err := server.ServeDispatcher("mp/server", disp); err != nil {
 		t.Fatalf("server.Serve failed: %v", err)
 	}
-	client, err := InternalNewClient(sm, ns)
+	client, err := DeprecatedNewClient(sm, ns)
 	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
+		t.Fatalf("DeprecatedNewClient failed: %v", err)
 	}
 	// When using SecurityNone, all authorization checks should be skipped, so
 	// unauthorized methods should be callable.
@@ -1680,9 +535,9 @@
 	if err := server.ServeDispatcher("mp/server", disp); err != nil {
 		t.Fatalf("server.Serve failed: %v", err)
 	}
-	client, err := InternalNewClient(sm, ns)
+	client, err := DeprecatedNewClient(sm, ns)
 	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
+		t.Fatalf("DeprecatedNewClient failed: %v", err)
 	}
 
 	// A call should fail if the principal in the ctx is nil and SecurityNone is not specified.
@@ -1696,25 +551,6 @@
 	}
 }
 
-func TestCallWithNilContext(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x66666666))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	client, err := InternalNewClient(sm, ns)
-	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
-	}
-	call, err := client.StartCall(nil, "foo", "bar", []interface{}{}, options.SecurityNone)
-	if call != nil {
-		t.Errorf("Expected nil interface got: %#v", call)
-	}
-	if verror.ErrorID(err) != verror.ErrBadArg.ID {
-		t.Errorf("Expected a BadArg error, got: %s", err.Error())
-	}
-}
-
 func TestServerBlessingsOpt(t *testing.T) {
 	ctx, shutdown := initForTest()
 	defer shutdown()
@@ -1747,7 +583,7 @@
 	runClient := func(server string) ([]string, error) {
 		smc := imanager.InternalNew(ctx, naming.FixedRoutingID(0xc))
 		defer smc.Shutdown()
-		client, err := InternalNewClient(
+		client, err := DeprecatedNewClient(
 			smc,
 			ns)
 		if err != nil {
@@ -1817,7 +653,7 @@
 		}
 		smc := imanager.InternalNew(ctx, rid)
 		defer smc.Shutdown()
-		client, err := InternalNewClient(smc, ns)
+		client, err := DeprecatedNewClient(smc, ns)
 		if err != nil {
 			t.Fatalf("failed to create client: %v", err)
 		}
@@ -1882,7 +718,7 @@
 	}
 	sm := imanager.InternalNew(ctx, rid)
 
-	c, err := InternalNewClient(sm, ns)
+	c, err := DeprecatedNewClient(sm, ns)
 	if err != nil {
 		t.Fatalf("failed to create client: %v", err)
 	}
@@ -1932,7 +768,7 @@
 		}
 		smc := imanager.InternalNew(sctx, rid)
 		defer smc.Shutdown()
-		client, err := InternalNewClient(smc, ns)
+		client, err := DeprecatedNewClient(smc, ns)
 		if err != nil {
 			t.Fatalf("failed to create client: %v", err)
 		}
@@ -2011,7 +847,7 @@
 	defer runServer(t, sctx, ns, mountName, &testServer{}).Shutdown()
 
 	smc := imanager.InternalNew(sctx, naming.FixedRoutingID(0xc))
-	client, err := InternalNewClient(smc, ns)
+	client, err := DeprecatedNewClient(smc, ns)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -2082,7 +918,7 @@
 	}
 	smc := imanager.InternalNew(ctx, rid)
 	defer smc.Shutdown()
-	client, err := InternalNewClient(smc, ns)
+	client, err := DeprecatedNewClient(smc, ns)
 	if err != nil {
 		t.Fatalf("failed to create client: %v", err)
 	}
@@ -2134,3 +970,71 @@
 		return nil
 	})
 }
+
+func TestServerStates(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+	expectBadState := func(err error) {
+		if verror.ErrorID(err) != verror.ErrBadState.ID {
+			t.Fatalf("%s: unexpected error: %v", loc(1), err)
+		}
+	}
+
+	expectNoError := func(err error) {
+		if err != nil {
+			t.Fatalf("%s: unexpected error: %v", loc(1), err)
+		}
+	}
+
+	server, err := testInternalNewServer(sctx, sm, ns)
+	expectNoError(err)
+	defer server.Stop()
+
+	expectState := func(s rpc.ServerState) {
+		if got, want := server.Status().State, s; got != want {
+			t.Fatalf("%s: got %s, want %s", loc(1), got, want)
+		}
+	}
+
+	expectState(rpc.ServerActive)
+
+	// Need to call Listen first.
+	err = server.Serve("", &testServer{}, nil)
+	expectBadState(err)
+	err = server.AddName("a")
+	expectBadState(err)
+
+	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
+	expectNoError(err)
+
+	expectState(rpc.ServerActive)
+
+	err = server.Serve("", &testServer{}, nil)
+	expectNoError(err)
+
+	err = server.Serve("", &testServer{}, nil)
+	expectBadState(err)
+
+	expectState(rpc.ServerActive)
+
+	err = server.AddName("a")
+	expectNoError(err)
+
+	expectState(rpc.ServerActive)
+
+	server.RemoveName("a")
+
+	expectState(rpc.ServerActive)
+
+	err = server.Stop()
+	expectNoError(err)
+	err = server.Stop()
+	expectNoError(err)
+
+	err = server.AddName("a")
+	expectBadState(err)
+}
diff --git a/runtime/internal/rpc/roaming_test.go b/runtime/internal/rpc/roaming_test.go
new file mode 100644
index 0000000..95cecdd
--- /dev/null
+++ b/runtime/internal/rpc/roaming_test.go
@@ -0,0 +1,303 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"net"
+	"reflect"
+	"sort"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/x/lib/netstate"
+	"v.io/x/lib/set"
+	"v.io/x/ref/lib/pubsub"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/wsh"
+	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
+	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
+	"v.io/x/ref/test/testutil"
+)
+
+// TODO(mattr): Transition this to using public API.
+func TestRoaming(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+
+	publisher := pubsub.NewPublisher()
+	roaming := make(chan pubsub.Setting)
+	stop, err := publisher.CreateStream("TestRoaming", "TestRoaming", roaming)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() { publisher.Shutdown(); <-stop }()
+
+	nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+	server, err := testInternalNewServerWithPubsub(nctx, sm, ns, publisher, "TestRoaming")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	ipv4And6 := netstate.AddressChooserFunc(func(network string, addrs []net.Addr) ([]net.Addr, error) {
+		accessible := netstate.ConvertToAddresses(addrs)
+		ipv4 := accessible.Filter(netstate.IsUnicastIPv4)
+		ipv6 := accessible.Filter(netstate.IsUnicastIPv6)
+		return append(ipv4.AsNetAddrs(), ipv6.AsNetAddrs()...), nil
+	})
+	spec := rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", "*:0"},
+			{"tcp", ":0"},
+			{"tcp", ":0"},
+		},
+		AddressChooser: ipv4And6,
+	}
+
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(eps) == 0 {
+		t.Fatal("no endpoints listened on.")
+	}
+
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	setLeafEndpoints(eps)
+	if err = server.AddName("bar"); err != nil {
+		t.Fatal(err)
+	}
+
+	status := server.Status()
+	if got, want := status.Endpoints, eps; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	if got, want := len(status.Mounts), len(eps)*2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	n1 := netstate.NewNetAddr("ip", "1.1.1.1")
+	n2 := netstate.NewNetAddr("ip", "2.2.2.2")
+
+	watcher := make(chan rpc.NetworkChange, 10)
+	server.WatchNetwork(watcher)
+	defer close(watcher)
+
+	roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
+
+	waitForChange := func() *rpc.NetworkChange {
+		ctx.Infof("Waiting on %p", watcher)
+		select {
+		case c := <-watcher:
+			return &c
+		case <-time.After(time.Minute):
+			t.Fatalf("timedout: %s", loc(1))
+		}
+		return nil
+	}
+
+	// We expect 4 changes, one for each IP per usable listen spec addr.
+	change := waitForChange()
+	if got, want := len(change.Changed), 4; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	nepsA := make([]naming.Endpoint, len(eps))
+	copy(nepsA, eps)
+	for _, p := range getUniqPorts(eps) {
+		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		nepsA = append(nepsA, []naming.Endpoint{nep1, nep2}...)
+	}
+
+	status = server.Status()
+	if got, want := status.Endpoints, nepsA; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+	}
+
+	if got, want := len(status.Mounts), len(nepsA)*2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := len(status.Mounts.Servers()), len(nepsA); got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	roaming <- NewRmAddrsSetting([]net.Addr{n1})
+
+	// We expect 2 changes, one for each usable listen spec addr.
+	change = waitForChange()
+	if got, want := len(change.Changed), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	nepsR := make([]naming.Endpoint, len(eps))
+	copy(nepsR, eps)
+	for _, p := range getUniqPorts(eps) {
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		nepsR = append(nepsR, nep2)
+	}
+
+	status = server.Status()
+	if got, want := status.Endpoints, nepsR; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+	}
+
+	// Remove all addresses to mimic losing all connectivity.
+	roaming <- NewRmAddrsSetting(getIPAddrs(nepsR))
+
+	// We expect changes for all of the current endpoints
+	change = waitForChange()
+	if got, want := len(change.Changed), len(nepsR); got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	status = server.Status()
+	if got, want := len(status.Mounts), 0; got != want {
+		t.Fatalf("got %d, want %d: %v", got, want, status.Mounts)
+	}
+
+	roaming <- NewAddAddrsSetting([]net.Addr{n1})
+	// We expect 2 changes, one for each usable listen spec addr.
+	change = waitForChange()
+	if got, want := len(change.Changed), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+}
+
+func TestWatcherDeadlock(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+
+	publisher := pubsub.NewPublisher()
+	roaming := make(chan pubsub.Setting)
+	stop, err := publisher.CreateStream("TestWatcherDeadlock", "TestWatcherDeadlock", roaming)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() { publisher.Shutdown(); <-stop }()
+
+	nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+	server, err := testInternalNewServerWithPubsub(nctx, sm, ns, publisher, "TestWatcherDeadlock")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	spec := rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", ":0"},
+		},
+	}
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	setLeafEndpoints(eps)
+
+	// Set a watcher that we never read from - the intent is to make sure
+	// that the listener still listens to changes even though there is no
+	// goroutine to read from the watcher channel.
+	watcher := make(chan rpc.NetworkChange, 0)
+	server.WatchNetwork(watcher)
+	defer close(watcher)
+
+	// Remove all addresses to mimic losing all connectivity.
+	roaming <- NewRmAddrsSetting(getIPAddrs(eps))
+
+	// Add in two new addresses
+	n1 := netstate.NewNetAddr("ip", "1.1.1.1")
+	n2 := netstate.NewNetAddr("ip", "2.2.2.2")
+	roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
+
+	neps := make([]naming.Endpoint, 0, len(eps))
+	for _, p := range getUniqPorts(eps) {
+		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		neps = append(neps, []naming.Endpoint{nep1, nep2}...)
+	}
+	then := time.Now()
+	for {
+		status := server.Status()
+		if got, want := status.Endpoints, neps; cmpEndpoints(got, want) {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+		if time.Now().Sub(then) > time.Minute {
+			t.Fatalf("timed out waiting for changes to take effect")
+		}
+	}
+}
+
+func updateHost(ep naming.Endpoint, address string) naming.Endpoint {
+	niep := *(ep).(*inaming.Endpoint)
+	niep.Address = address
+	return &niep
+}
+
+func getIPAddrs(eps []naming.Endpoint) []net.Addr {
+	hosts := map[string]struct{}{}
+	for _, ep := range eps {
+		iep := (ep).(*inaming.Endpoint)
+		h, _, _ := net.SplitHostPort(iep.Address)
+		if len(h) > 0 {
+			hosts[h] = struct{}{}
+		}
+	}
+	addrs := []net.Addr{}
+	for h, _ := range hosts {
+		addrs = append(addrs, netstate.NewNetAddr("ip", h))
+	}
+	return addrs
+}
+
+func cmpEndpoints(got, want []naming.Endpoint) bool {
+	if len(got) != len(want) {
+		return false
+	}
+	return reflect.DeepEqual(endpointToStrings(got), endpointToStrings(want))
+}
+
+func getUniqPorts(eps []naming.Endpoint) []string {
+	ports := map[string]struct{}{}
+	for _, ep := range eps {
+		iep := ep.(*inaming.Endpoint)
+		_, p, _ := net.SplitHostPort(iep.Address)
+		ports[p] = struct{}{}
+	}
+	return set.String.ToSlice(ports)
+}
+
+func endpointToStrings(eps []naming.Endpoint) []string {
+	r := []string{}
+	for _, ep := range eps {
+		r = append(r, ep.String())
+	}
+	sort.Strings(r)
+	return r
+}
+
+func setLeafEndpoints(eps []naming.Endpoint) {
+	for i := range eps {
+		eps[i].(*inaming.Endpoint).IsLeaf = true
+	}
+}
diff --git a/runtime/internal/rpc/server.go b/runtime/internal/rpc/server.go
index ecd7d00..c72439d 100644
--- a/runtime/internal/rpc/server.go
+++ b/runtime/internal/rpc/server.go
@@ -242,7 +242,7 @@
 
 var _ DeprecatedServer = (*server)(nil)
 
-func InternalNewServer(
+func DeprecatedNewServer(
 	ctx *context.T,
 	streamMgr stream.Manager,
 	ns namespace.T,
diff --git a/runtime/internal/rpc/server_test.go b/runtime/internal/rpc/server_test.go
deleted file mode 100644
index 3e8c3fc..0000000
--- a/runtime/internal/rpc/server_test.go
+++ /dev/null
@@ -1,687 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rpc
-
-import (
-	"net"
-	"reflect"
-	"sort"
-	"testing"
-	"time"
-
-	"v.io/x/lib/netstate"
-	"v.io/x/lib/set"
-
-	"v.io/v23"
-	"v.io/v23/context"
-	"v.io/v23/naming"
-	"v.io/v23/options"
-	"v.io/v23/rpc"
-	"v.io/v23/security"
-	"v.io/v23/verror"
-
-	"v.io/x/ref/lib/pubsub"
-	inaming "v.io/x/ref/runtime/internal/naming"
-	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
-	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
-	"v.io/x/ref/test/testutil"
-)
-
-type noMethodsType struct{ Field string }
-
-type fieldType struct {
-	unexported string
-}
-type noExportedFieldsType struct{}
-
-func (noExportedFieldsType) F(_ *context.T, _ rpc.ServerCall, f fieldType) error { return nil }
-
-type badObjectDispatcher struct{}
-
-func (badObjectDispatcher) Lookup(_ *context.T, suffix string) (interface{}, security.Authorizer, error) {
-	return noMethodsType{}, nil, nil
-}
-
-// TestBadObject ensures that Serve handles bad receiver objects gracefully (in
-// particular, it doesn't panic).
-func TestBadObject(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	pclient, pserver := newClientServerPrincipals()
-	cctx, _ := v23.WithPrincipal(ctx, pclient)
-	sctx, _ := v23.WithPrincipal(ctx, pserver)
-	server, err := testInternalNewServer(sctx, sm, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-
-	if _, err := server.Listen(listenSpec); err != nil {
-		t.Fatalf("Listen failed: %v", err)
-	}
-	if err := server.Serve("", nil, nil); err == nil {
-		t.Fatal("should have failed")
-	}
-	if err := server.Serve("", new(noMethodsType), nil); err == nil {
-		t.Fatal("should have failed")
-	}
-	if err := server.Serve("", new(noExportedFieldsType), nil); err == nil {
-		t.Fatal("should have failed")
-	}
-	if err := server.ServeDispatcher("servername", badObjectDispatcher{}); err != nil {
-		t.Fatalf("ServeDispatcher failed: %v", err)
-	}
-	client, err := InternalNewClient(sm, ns)
-	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
-	}
-	ctx, _ = context.WithDeadline(cctx, time.Now().Add(10*time.Second))
-	var result string
-	if err := client.Call(cctx, "servername", "SomeMethod", nil, []interface{}{&result}); err == nil {
-		// TODO(caprita): Check the error type rather than
-		// merely ensuring the test doesn't panic.
-		t.Fatalf("Call should have failed")
-	}
-}
-
-func TestServerArgs(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
-	server, err := testInternalNewServer(sctx, sm, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-	_, err = server.Listen(rpc.ListenSpec{})
-	if verror.ErrorID(err) != verror.ErrBadArg.ID {
-		t.Fatalf("expected a BadArg error: got %v", err)
-	}
-	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "*:0"}}})
-	if verror.ErrorID(err) != verror.ErrBadArg.ID {
-		t.Fatalf("expected a BadArg error: got %v", err)
-	}
-	_, err = server.Listen(rpc.ListenSpec{
-		Addrs: rpc.ListenAddrs{
-			{"tcp", "*:0"},
-			{"tcp", "127.0.0.1:0"},
-		}})
-	if verror.ErrorID(err) == verror.ErrBadArg.ID {
-		t.Fatalf("expected a BadArg error: got %v", err)
-	}
-	status := server.Status()
-	if got, want := len(status.Errors), 1; got != want {
-		t.Fatalf("got %v, want %v", got, want)
-	}
-	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "*:0"}}})
-	if verror.ErrorID(err) != verror.ErrBadArg.ID {
-		t.Fatalf("expected a BadArg error: got %v", err)
-	}
-	status = server.Status()
-	if got, want := len(status.Errors), 1; got != want {
-		t.Fatalf("got %v, want %v", got, want)
-	}
-}
-
-type statusServer struct{ ch chan struct{} }
-
-func (s *statusServer) Hang(*context.T, rpc.ServerCall) error {
-	s.ch <- struct{}{} // Notify the server has received a call.
-	<-s.ch             // Wait for the server to be ready to go.
-	return nil
-}
-
-func TestServerStatus(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	principal := testutil.NewPrincipal("testServerStatus")
-	ctx, _ = v23.WithPrincipal(ctx, principal)
-	server, err := testInternalNewServer(ctx, sm, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-
-	status := server.Status()
-	if got, want := status.State, rpc.ServerActive; got != want {
-		t.Fatalf("got %s, want %s", got, want)
-	}
-	server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
-	status = server.Status()
-	if got, want := status.State, rpc.ServerActive; got != want {
-		t.Fatalf("got %s, want %s", got, want)
-	}
-	serverChan := make(chan struct{})
-	err = server.Serve("test", &statusServer{serverChan}, nil)
-	if err != nil {
-		t.Fatalf(err.Error())
-	}
-	status = server.Status()
-	if got, want := status.State, rpc.ServerActive; got != want {
-		t.Fatalf("got %s, want %s", got, want)
-	}
-
-	progress := make(chan error)
-
-	client, err := InternalNewClient(sm, ns)
-	makeCall := func(ctx *context.T) {
-		call, err := client.StartCall(ctx, "test", "Hang", nil)
-		progress <- err
-		progress <- call.Finish()
-	}
-	go makeCall(ctx)
-
-	// Wait for RPC to start and the server has received the call.
-	if err := <-progress; err != nil {
-		t.Fatalf(err.Error())
-	}
-	<-serverChan
-
-	// Stop server asynchronously
-	go func() {
-		err = server.Stop()
-		if err != nil {
-			t.Fatalf(err.Error())
-		}
-	}()
-
-	// Server should enter 'ServerStopping' state.
-	then := time.Now()
-	for {
-		status = server.Status()
-		if got, want := status.State, rpc.ServerStopping; got != want {
-			if time.Now().Sub(then) > time.Minute {
-				t.Fatalf("got %s, want %s", got, want)
-			}
-		} else {
-			break
-		}
-		time.Sleep(100 * time.Millisecond)
-	}
-	// Server won't stop until the statusServer's hung method completes.
-	close(serverChan)
-	// Wait for RPC to finish
-	if err := <-progress; err != nil {
-		t.Fatalf(err.Error())
-	}
-
-	// Now that the RPC is done, the server should be able to stop.
-	then = time.Now()
-	for {
-		status = server.Status()
-		if got, want := status.State, rpc.ServerStopped; got != want {
-			if time.Now().Sub(then) > time.Minute {
-				t.Fatalf("got %s, want %s", got, want)
-			}
-		} else {
-			break
-		}
-		time.Sleep(100 * time.Millisecond)
-	}
-}
-
-func TestServerStates(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
-	expectBadState := func(err error) {
-		if verror.ErrorID(err) != verror.ErrBadState.ID {
-			t.Fatalf("%s: unexpected error: %v", loc(1), err)
-		}
-	}
-
-	expectNoError := func(err error) {
-		if err != nil {
-			t.Fatalf("%s: unexpected error: %v", loc(1), err)
-		}
-	}
-
-	server, err := testInternalNewServer(sctx, sm, ns)
-	expectNoError(err)
-	defer server.Stop()
-
-	expectState := func(s rpc.ServerState) {
-		if got, want := server.Status().State, s; got != want {
-			t.Fatalf("%s: got %s, want %s", loc(1), got, want)
-		}
-	}
-
-	expectState(rpc.ServerActive)
-
-	// Need to call Listen first.
-	err = server.Serve("", &testServer{}, nil)
-	expectBadState(err)
-	err = server.AddName("a")
-	expectBadState(err)
-
-	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
-	expectNoError(err)
-
-	expectState(rpc.ServerActive)
-
-	err = server.Serve("", &testServer{}, nil)
-	expectNoError(err)
-
-	err = server.Serve("", &testServer{}, nil)
-	expectBadState(err)
-
-	expectState(rpc.ServerActive)
-
-	err = server.AddName("a")
-	expectNoError(err)
-
-	expectState(rpc.ServerActive)
-
-	server.RemoveName("a")
-
-	expectState(rpc.ServerActive)
-
-	err = server.Stop()
-	expectNoError(err)
-	err = server.Stop()
-	expectNoError(err)
-
-	err = server.AddName("a")
-	expectBadState(err)
-}
-
-func TestMountStatus(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
-
-	server, err := testInternalNewServer(sctx, sm, ns)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-
-	eps, err := server.Listen(rpc.ListenSpec{
-		Addrs: rpc.ListenAddrs{
-			{"tcp", "127.0.0.1:0"},
-			{"tcp", "127.0.0.1:0"},
-		}})
-	if err != nil {
-		t.Fatal(err)
-	}
-	if got, want := len(eps), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	if err = server.Serve("foo", &testServer{}, nil); err != nil {
-		t.Fatal(err)
-	}
-	setLeafEndpoints(eps)
-	status := server.Status()
-	if got, want := len(status.Mounts), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	servers := status.Mounts.Servers()
-	if got, want := len(servers), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
-		t.Fatalf("got %v, want %v", got, want)
-	}
-
-	// Add a second name and we should now see 4 mounts, 2 for each name.
-	if err := server.AddName("bar"); err != nil {
-		t.Fatal(err)
-	}
-	status = server.Status()
-	if got, want := len(status.Mounts), 4; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	servers = status.Mounts.Servers()
-	if got, want := len(servers), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
-		t.Fatalf("got %v, want %v", got, want)
-	}
-	names := status.Mounts.Names()
-	if got, want := len(names), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	serversPerName := map[string][]string{}
-	for _, ms := range status.Mounts {
-		serversPerName[ms.Name] = append(serversPerName[ms.Name], ms.Server)
-	}
-	if got, want := len(serversPerName), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	for _, name := range []string{"foo", "bar"} {
-		if got, want := len(serversPerName[name]), 2; got != want {
-			t.Fatalf("got %d, want %d", got, want)
-		}
-	}
-}
-
-func updateHost(ep naming.Endpoint, address string) naming.Endpoint {
-	niep := *(ep).(*inaming.Endpoint)
-	niep.Address = address
-	return &niep
-}
-
-func getIPAddrs(eps []naming.Endpoint) []net.Addr {
-	hosts := map[string]struct{}{}
-	for _, ep := range eps {
-		iep := (ep).(*inaming.Endpoint)
-		h, _, _ := net.SplitHostPort(iep.Address)
-		if len(h) > 0 {
-			hosts[h] = struct{}{}
-		}
-	}
-	addrs := []net.Addr{}
-	for h, _ := range hosts {
-		addrs = append(addrs, netstate.NewNetAddr("ip", h))
-	}
-	return addrs
-}
-
-func endpointToStrings(eps []naming.Endpoint) []string {
-	r := []string{}
-	for _, ep := range eps {
-		r = append(r, ep.String())
-	}
-	sort.Strings(r)
-	return r
-}
-
-func cmpEndpoints(got, want []naming.Endpoint) bool {
-	if len(got) != len(want) {
-		return false
-	}
-	return reflect.DeepEqual(endpointToStrings(got), endpointToStrings(want))
-}
-
-func getUniqPorts(eps []naming.Endpoint) []string {
-	ports := map[string]struct{}{}
-	for _, ep := range eps {
-		iep := ep.(*inaming.Endpoint)
-		_, p, _ := net.SplitHostPort(iep.Address)
-		ports[p] = struct{}{}
-	}
-	return set.String.ToSlice(ports)
-}
-
-func TestRoaming(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-
-	publisher := pubsub.NewPublisher()
-	roaming := make(chan pubsub.Setting)
-	stop, err := publisher.CreateStream("TestRoaming", "TestRoaming", roaming)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer func() { publisher.Shutdown(); <-stop }()
-
-	nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
-	server, err := testInternalNewServerWithPubsub(nctx, sm, ns, publisher, "TestRoaming")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-
-	ipv4And6 := netstate.AddressChooserFunc(func(network string, addrs []net.Addr) ([]net.Addr, error) {
-		accessible := netstate.ConvertToAddresses(addrs)
-		ipv4 := accessible.Filter(netstate.IsUnicastIPv4)
-		ipv6 := accessible.Filter(netstate.IsUnicastIPv6)
-		return append(ipv4.AsNetAddrs(), ipv6.AsNetAddrs()...), nil
-	})
-	spec := rpc.ListenSpec{
-		Addrs: rpc.ListenAddrs{
-			{"tcp", "*:0"},
-			{"tcp", ":0"},
-			{"tcp", ":0"},
-		},
-		AddressChooser: ipv4And6,
-	}
-
-	eps, err := server.Listen(spec)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(eps) == 0 {
-		t.Fatal(err)
-	}
-
-	if err = server.Serve("foo", &testServer{}, nil); err != nil {
-		t.Fatal(err)
-	}
-	setLeafEndpoints(eps)
-	if err = server.AddName("bar"); err != nil {
-		t.Fatal(err)
-	}
-
-	status := server.Status()
-	if got, want := status.Endpoints, eps; !cmpEndpoints(got, want) {
-		t.Fatalf("got %v, want %v", got, want)
-	}
-
-	if got, want := len(status.Mounts), len(eps)*2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-
-	n1 := netstate.NewNetAddr("ip", "1.1.1.1")
-	n2 := netstate.NewNetAddr("ip", "2.2.2.2")
-
-	watcher := make(chan rpc.NetworkChange, 10)
-	server.WatchNetwork(watcher)
-	defer close(watcher)
-
-	roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
-
-	waitForChange := func() *rpc.NetworkChange {
-		ctx.Infof("Waiting on %p", watcher)
-		select {
-		case c := <-watcher:
-			return &c
-		case <-time.After(time.Minute):
-			t.Fatalf("timedout: %s", loc(1))
-		}
-		return nil
-	}
-
-	// We expect 4 changes, one for each IP per usable listen spec addr.
-	change := waitForChange()
-	if got, want := len(change.Changed), 4; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-
-	nepsA := make([]naming.Endpoint, len(eps))
-	copy(nepsA, eps)
-	for _, p := range getUniqPorts(eps) {
-		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
-		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
-		nepsA = append(nepsA, []naming.Endpoint{nep1, nep2}...)
-	}
-
-	status = server.Status()
-	if got, want := status.Endpoints, nepsA; !cmpEndpoints(got, want) {
-		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
-	}
-
-	if got, want := len(status.Mounts), len(nepsA)*2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-	if got, want := len(status.Mounts.Servers()), len(nepsA); got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-
-	roaming <- NewRmAddrsSetting([]net.Addr{n1})
-
-	// We expect 2 changes, one for each usable listen spec addr.
-	change = waitForChange()
-	if got, want := len(change.Changed), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-
-	nepsR := make([]naming.Endpoint, len(eps))
-	copy(nepsR, eps)
-	for _, p := range getUniqPorts(eps) {
-		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
-		nepsR = append(nepsR, nep2)
-	}
-
-	status = server.Status()
-	if got, want := status.Endpoints, nepsR; !cmpEndpoints(got, want) {
-		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
-	}
-
-	// Remove all addresses to mimic losing all connectivity.
-	roaming <- NewRmAddrsSetting(getIPAddrs(nepsR))
-
-	// We expect changes for all of the current endpoints
-	change = waitForChange()
-	if got, want := len(change.Changed), len(nepsR); got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-
-	status = server.Status()
-	if got, want := len(status.Mounts), 0; got != want {
-		t.Fatalf("got %d, want %d: %v", got, want, status.Mounts)
-	}
-
-	roaming <- NewAddAddrsSetting([]net.Addr{n1})
-	// We expect 2 changes, one for each usable listen spec addr.
-	change = waitForChange()
-	if got, want := len(change.Changed), 2; got != want {
-		t.Fatalf("got %d, want %d", got, want)
-	}
-
-}
-
-func TestWatcherDeadlock(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-
-	publisher := pubsub.NewPublisher()
-	roaming := make(chan pubsub.Setting)
-	stop, err := publisher.CreateStream("TestWatcherDeadlock", "TestWatcherDeadlock", roaming)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer func() { publisher.Shutdown(); <-stop }()
-
-	nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
-	server, err := testInternalNewServerWithPubsub(nctx, sm, ns, publisher, "TestWatcherDeadlock")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-
-	spec := rpc.ListenSpec{
-		Addrs: rpc.ListenAddrs{
-			{"tcp", ":0"},
-		},
-	}
-	eps, err := server.Listen(spec)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err = server.Serve("foo", &testServer{}, nil); err != nil {
-		t.Fatal(err)
-	}
-	setLeafEndpoints(eps)
-
-	// Set a watcher that we never read from - the intent is to make sure
-	// that the listener still listens to changes even though there is no
-	// goroutine to read from the watcher channel.
-	watcher := make(chan rpc.NetworkChange, 0)
-	server.WatchNetwork(watcher)
-	defer close(watcher)
-
-	// Remove all addresses to mimic losing all connectivity.
-	roaming <- NewRmAddrsSetting(getIPAddrs(eps))
-
-	// Add in two new addresses
-	n1 := netstate.NewNetAddr("ip", "1.1.1.1")
-	n2 := netstate.NewNetAddr("ip", "2.2.2.2")
-	roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
-
-	neps := make([]naming.Endpoint, 0, len(eps))
-	for _, p := range getUniqPorts(eps) {
-		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
-		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
-		neps = append(neps, []naming.Endpoint{nep1, nep2}...)
-	}
-	then := time.Now()
-	for {
-		status := server.Status()
-		if got, want := status.Endpoints, neps; cmpEndpoints(got, want) {
-			break
-		}
-		time.Sleep(100 * time.Millisecond)
-		if time.Now().Sub(then) > time.Minute {
-			t.Fatalf("timed out waiting for changes to take effect")
-		}
-	}
-}
-
-func TestIsLeafServerOption(t *testing.T) {
-	ctx, shutdown := initForTest()
-	defer shutdown()
-	sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-	defer sm.Shutdown()
-	ns := tnaming.NewSimpleNamespace()
-	pclient, pserver := newClientServerPrincipals()
-	cctx, _ := v23.WithPrincipal(ctx, pclient)
-	sctx, _ := v23.WithPrincipal(ctx, pserver)
-	server, err := testInternalNewServer(sctx, sm, ns, options.IsLeaf(true))
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer server.Stop()
-
-	disp := &testServerDisp{&testServer{}}
-
-	if _, err := server.Listen(listenSpec); err != nil {
-		t.Fatalf("Listen failed: %v", err)
-	}
-
-	if err := server.ServeDispatcher("leafserver", disp); err != nil {
-		t.Fatalf("ServeDispatcher failed: %v", err)
-	}
-	client, err := InternalNewClient(sm, ns)
-	if err != nil {
-		t.Fatalf("InternalNewClient failed: %v", err)
-	}
-	cctx, _ = context.WithDeadline(cctx, time.Now().Add(10*time.Second))
-	var result string
-	// we have set IsLeaf to true, sending any suffix to leafserver should result
-	// in an suffix was not expected error.
-	callErr := client.Call(cctx, "leafserver/unwantedSuffix", "Echo", []interface{}{"Mirror on the wall"}, []interface{}{&result})
-	if callErr == nil {
-		t.Fatalf("Call should have failed with suffix was not expected error")
-	}
-}
-
-func setLeafEndpoints(eps []naming.Endpoint) {
-	for i := range eps {
-		eps[i].(*inaming.Endpoint).IsLeaf = true
-	}
-}
diff --git a/runtime/internal/rpc/stream/manager/listener.go b/runtime/internal/rpc/stream/manager/listener.go
index a7afc98..6ef5453 100644
--- a/runtime/internal/rpc/stream/manager/listener.go
+++ b/runtime/internal/rpc/stream/manager/listener.go
@@ -293,7 +293,7 @@
 	ln.connsMu.Lock()
 	var vifs []*vif.VIF
 	if ln.vifs != nil {
-		vifs, ln.vifs = ln.vifs.List(), nil
+		vifs = ln.vifs.List()
 	}
 	ln.connsMu.Unlock()
 	if len(vifs) > 0 {
diff --git a/runtime/internal/rpc/stream/manager/manager_test.go b/runtime/internal/rpc/stream/manager/manager_test.go
index 12f7bd5..f1c0377 100644
--- a/runtime/internal/rpc/stream/manager/manager_test.go
+++ b/runtime/internal/rpc/stream/manager/manager_test.go
@@ -431,6 +431,7 @@
 		pserver = testutil.NewPrincipal("server")
 		lopts   = []stream.ListenerOpt{vc.StartTimeout{Duration: startTime}}
 	)
+	defer server.Shutdown()
 
 	sctx, _ := v23.WithPrincipal(ctx, pserver)
 
@@ -452,10 +453,11 @@
 	// Arrange for the above goroutine to exit when the test finishes.
 	defer ln.Close()
 
-	_, err = net.Dial(ep.Addr().Network(), ep.Addr().String())
+	conn, err := net.Dial(ep.Addr().Network(), ep.Addr().String())
 	if err != nil {
 		t.Fatalf("net.Dial failed: %v", err)
 	}
+	defer conn.Close()
 
 	// Trigger the start timers.
 	triggerTimers()
diff --git a/runtime/internal/rpc/test/cancel_test.go b/runtime/internal/rpc/test/cancel_test.go
new file mode 100644
index 0000000..b363a33
--- /dev/null
+++ b/runtime/internal/rpc/test/cancel_test.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"io"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/test"
+)
+
+type canceld struct {
+	name     string
+	child    string
+	started  chan struct{}
+	canceled chan struct{}
+}
+
+func (c *canceld) Run(ctx *context.T, _ rpc.ServerCall) error {
+	close(c.started)
+	client := v23.GetClient(ctx)
+	ctx.Infof("Run: %s", c.child)
+	if c.child != "" {
+		if _, err := client.StartCall(ctx, c.child, "Run", []interface{}{}); err != nil {
+			ctx.Error(err)
+			return err
+		}
+	}
+	<-ctx.Done()
+	close(c.canceled)
+	return nil
+}
+
+func makeCanceld(ctx *context.T, name, child string) (*canceld, error) {
+	c := &canceld{
+		name:     name,
+		child:    child,
+		started:  make(chan struct{}, 0),
+		canceled: make(chan struct{}, 0),
+	}
+	_, _, err := v23.WithNewServer(ctx, name, c, security.AllowEveryone())
+	if err != nil {
+		return nil, err
+	}
+	ctx.Infof("Serving: %q", name)
+	return c, nil
+}
+
+// TestCancellationPropagation tests that cancellation propogates along an
+// RPC call chain without user intervention.
+func TestCancellationPropagation(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	c1, err := makeCanceld(ctx, "c1", "c2")
+	if err != nil {
+		t.Fatalf("Can't start server:", err, verror.DebugString(err))
+	}
+	c2, err := makeCanceld(ctx, "c2", "")
+	if err != nil {
+		t.Fatalf("Can't start server:", err)
+	}
+
+	ctx, cancel := context.WithCancel(ctx)
+	_, err = v23.GetClient(ctx).StartCall(ctx, "c1", "Run", []interface{}{})
+	if err != nil {
+		t.Fatalf("can't call: ", err)
+	}
+
+	<-c1.started
+	<-c2.started
+
+	ctx.Info("cancelling initial call")
+	cancel()
+
+	ctx.Info("waiting for children to be canceled")
+	<-c1.canceled
+	<-c2.canceled
+}
+
+type cancelTestServer struct {
+	started   chan struct{}
+	cancelled chan struct{}
+	t         *testing.T
+}
+
+func newCancelTestServer(t *testing.T) *cancelTestServer {
+	return &cancelTestServer{
+		started:   make(chan struct{}),
+		cancelled: make(chan struct{}),
+		t:         t,
+	}
+}
+
+func (s *cancelTestServer) CancelStreamReader(ctx *context.T, call rpc.StreamServerCall) error {
+	close(s.started)
+	var b []byte
+	if err := call.Recv(&b); err != io.EOF {
+		s.t.Errorf("Got error %v, want io.EOF", err)
+	}
+	<-ctx.Done()
+	close(s.cancelled)
+	return nil
+}
+
+// CancelStreamIgnorer doesn't read from it's input stream so all it's
+// buffers fill.  The intention is to show that call.Done() is closed
+// even when the stream is stalled.
+func (s *cancelTestServer) CancelStreamIgnorer(ctx *context.T, _ rpc.StreamServerCall) error {
+	close(s.started)
+	<-ctx.Done()
+	close(s.cancelled)
+	return nil
+}
+
+func waitForCancel(t *testing.T, ts *cancelTestServer, cancel context.CancelFunc) {
+	<-ts.started
+	cancel()
+	<-ts.cancelled
+}
+
+// TestCancel tests cancellation while the server is reading from a stream.
+func TestCancel(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	var (
+		sctx = withPrincipal(t, ctx, "server")
+		cctx = withPrincipal(t, ctx, "client")
+		ts   = newCancelTestServer(t)
+	)
+	_, _, err := v23.WithNewServer(sctx, "cancel", ts, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	cctx, cancel := context.WithCancel(cctx)
+	_, err = v23.GetClient(cctx).StartCall(cctx, "cancel", "CancelStreamReader", []interface{}{})
+	if err != nil {
+		t.Fatalf("Start call failed: %v", err)
+	}
+	waitForCancel(t, ts, cancel)
+}
+
+// TestCancelWithFullBuffers tests that even if the writer has filled the buffers and
+// the server is not reading that the cancel message gets through.
+func TestCancelWithFullBuffers(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	var (
+		sctx = withPrincipal(t, ctx, "server")
+		cctx = withPrincipal(t, ctx, "client")
+		ts   = newCancelTestServer(t)
+	)
+	_, _, err := v23.WithNewServer(sctx, "cancel", ts, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	cctx, cancel := context.WithCancel(cctx)
+	call, err := v23.GetClient(cctx).StartCall(cctx, "cancel", "CancelStreamIgnorer", []interface{}{})
+	if err != nil {
+		t.Fatalf("Start call failed: %v", err)
+	}
+
+	// Fill up all the write buffers to ensure that cancelling works even when the stream
+	// is blocked.
+	// TODO(mattr): Update for new RPC system.
+	call.Send(make([]byte, vc.MaxSharedBytes))
+	call.Send(make([]byte, vc.DefaultBytesBufferedPerFlow))
+
+	waitForCancel(t, ts, cancel)
+}
diff --git a/runtime/internal/rpc/test/debug_test.go b/runtime/internal/rpc/test/debug_test.go
new file mode 100644
index 0000000..f5a4155
--- /dev/null
+++ b/runtime/internal/rpc/test/debug_test.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"io"
+	"reflect"
+	"sort"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/x/ref/lib/stats"
+	irpc "v.io/x/ref/runtime/internal/rpc"
+	"v.io/x/ref/services/debug/debuglib"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestDebugServer(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	// Setup the client and server principals, with the client willing to share its
+	// blessing with the server.
+	var (
+		pclient = testutil.NewPrincipal()
+		cctx, _ = v23.WithPrincipal(ctx, pclient)
+	)
+	idp := testutil.IDProviderFromPrincipal(v23.GetPrincipal(ctx))
+	if err := idp.Bless(pclient, "client"); err != nil {
+		t.Fatal(err)
+	}
+	name := "testserver"
+	debugDisp := debuglib.NewDispatcher(nil)
+	_, _, err := v23.WithNewServer(ctx, name, &testObject{}, nil,
+		irpc.ReservedNameDispatcher{debugDisp})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Call the Foo method on ""
+	{
+		var value string
+		if err := v23.GetClient(cctx).Call(cctx, name, "Foo", nil, []interface{}{&value}); err != nil {
+			t.Fatalf("client.Call failed: %v", err)
+		}
+		if want := "BAR"; value != want {
+			t.Errorf("unexpected value: Got %v, want %v", value, want)
+		}
+	}
+	// Call Value on __debug/stats/testing/foo
+	{
+		foo := stats.NewString("testing/foo")
+		foo.Set("The quick brown fox jumps over the lazy dog")
+		fullname := naming.Join(name, "__debug/stats/testing/foo")
+		var value string
+		if err := v23.GetClient(cctx).Call(cctx, fullname, "Value", nil, []interface{}{&value}); err != nil {
+			t.Fatalf("client.Call failed: %v", err)
+		}
+		if want := foo.Value(); value != want {
+			t.Errorf("unexpected result: Got %v, want %v", value, want)
+		}
+	}
+
+	// Call Glob
+	testcases := []struct {
+		name, pattern string
+		expected      []string
+	}{
+		{"", "*", []string{}},
+		{"", "__*", []string{"__debug"}},
+		{"", "__*/*", []string{"__debug/logs", "__debug/pprof", "__debug/stats", "__debug/vtrace"}},
+		{"__debug", "*", []string{"logs", "pprof", "stats", "vtrace"}},
+	}
+	for _, tc := range testcases {
+		fullname := naming.Join(name, tc.name)
+		call, err := v23.GetClient(ctx).StartCall(cctx, fullname, rpc.GlobMethod, []interface{}{tc.pattern})
+		if err != nil {
+			t.Fatalf("client.StartCall failed for %q: %v", tc.name, err)
+		}
+		results := []string{}
+		for {
+			var gr naming.GlobReply
+			if err := call.Recv(&gr); err != nil {
+				if err != io.EOF {
+					t.Fatalf("Recv failed for %q: %v. Results received thus far: %q", tc.name, err, results)
+				}
+				break
+			}
+			switch v := gr.(type) {
+			case naming.GlobReplyEntry:
+				results = append(results, v.Value.Name)
+			}
+		}
+		if err := call.Finish(); err != nil {
+			t.Fatalf("call.Finish failed for %q: %v", tc.name, err)
+		}
+		sort.Strings(results)
+		if !reflect.DeepEqual(tc.expected, results) {
+			t.Errorf("unexpected results for %q. Got %v, want %v", tc.name, results, tc.expected)
+		}
+	}
+}
+
+type testObject struct {
+}
+
+func (o testObject) Foo(*context.T, rpc.ServerCall) (string, error) {
+	return "BAR", nil
+}
diff --git a/runtime/internal/rpc/test/full_test.go b/runtime/internal/rpc/test/full_test.go
new file mode 100644
index 0000000..7b5fe8c
--- /dev/null
+++ b/runtime/internal/rpc/test/full_test.go
@@ -0,0 +1,884 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/vdl"
+	"v.io/v23/verror"
+	"v.io/v23/vtrace"
+	"v.io/x/lib/netstate"
+	vsecurity "v.io/x/ref/lib/security"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestAddRemoveName(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	_, s, err := v23.WithNewServer(ctx, "one", &testServer{}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	waitForNames(t, ctx, true, "one")
+	s.AddName("two")
+	s.AddName("three")
+	waitForNames(t, ctx, true, "one", "two", "three")
+	s.RemoveName("one")
+	waitForNames(t, ctx, false, "one")
+	s.RemoveName("two")
+	s.RemoveName("three")
+	waitForNames(t, ctx, false, "one", "two", "three")
+}
+
+func TestCallWithNilContext(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	call, err := v23.GetClient(ctx).StartCall(nil, "foo", "bar", []interface{}{}, options.SecurityNone)
+	if call != nil {
+		t.Errorf("Expected nil interface got: %#v", call)
+	}
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Errorf("Expected a BadArg error, got: %s", err.Error())
+	}
+}
+
+func TestRPC(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	ctx = v23.WithListenSpec(ctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}},
+	})
+	testRPC(t, ctx, true)
+}
+
+func TestRPCWithWebsocket(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	ctx = v23.WithListenSpec(ctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"ws", "127.0.0.1:0"}},
+	})
+	testRPC(t, ctx, true)
+}
+
+// TestCloseSendOnFinish tests that Finish informs the server that no more
+// inputs will be sent by the client if CloseSend has not already done so.
+func TestRPCCloseSendOnFinish(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	ctx = v23.WithListenSpec(ctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}},
+	})
+	testRPC(t, ctx, false)
+}
+
+func TestRPCCloseSendOnFinishWithWebsocket(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	ctx = v23.WithListenSpec(ctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"ws", "127.0.0.1:0"}},
+	})
+	testRPC(t, ctx, false)
+}
+
+func testRPC(t *testing.T, ctx *context.T, shouldCloseSend bool) {
+	ctx = i18n.WithLangID(ctx, "foolang")
+	type v []interface{}
+	type testcase struct {
+		name       string
+		method     string
+		args       v
+		streamArgs v
+		startErr   error
+		results    v
+		finishErr  error
+	}
+	var (
+		tests = []testcase{
+			{"mountpoint/server/suffix", "Closure", nil, nil, nil, nil, nil},
+			{"mountpoint/server/suffix", "Error", nil, nil, nil, nil, errMethod},
+
+			{"mountpoint/server/suffix", "Echo", v{"foo"}, nil, nil, v{`method:"Echo",suffix:"suffix",arg:"foo"`}, nil},
+			{"mountpoint/server/suffix/abc", "Echo", v{"bar"}, nil, nil, v{`method:"Echo",suffix:"suffix/abc",arg:"bar"`}, nil},
+
+			{"mountpoint/server/suffix", "EchoUser", v{"foo", userType("bar")}, nil, nil, v{`method:"EchoUser",suffix:"suffix",arg:"foo"`, userType("bar")}, nil},
+			{"mountpoint/server/suffix/abc", "EchoUser", v{"baz", userType("bla")}, nil, nil, v{`method:"EchoUser",suffix:"suffix/abc",arg:"baz"`, userType("bla")}, nil},
+			{"mountpoint/server/suffix", "Stream", v{"foo"}, v{userType("bar"), userType("baz")}, nil, v{`method:"Stream",suffix:"suffix",arg:"foo" bar baz`}, nil},
+			{"mountpoint/server/suffix/abc", "Stream", v{"123"}, v{userType("456"), userType("789")}, nil, v{`method:"Stream",suffix:"suffix/abc",arg:"123" 456 789`}, nil},
+			{"mountpoint/server/suffix", "EchoBlessings", nil, nil, nil, v{"[test-blessing/server]", "[test-blessing/client]"}, nil},
+			{"mountpoint/server/suffix", "EchoAndError", v{"bugs bunny"}, nil, nil, v{`method:"EchoAndError",suffix:"suffix",arg:"bugs bunny"`}, nil},
+			{"mountpoint/server/suffix", "EchoAndError", v{"error"}, nil, nil, nil, errMethod},
+			{"mountpoint/server/suffix", "EchoLang", nil, nil, nil, v{"foolang"}, nil},
+		}
+		name = func(t testcase) string {
+			return fmt.Sprintf("%s.%s(%v)", t.name, t.method, t.args)
+		}
+		cctx = withPrincipal(t, ctx, "client")
+		sctx = withPrincipal(t, ctx, "server")
+	)
+	_, _, err := v23.WithNewDispatchingServer(sctx, "mountpoint/server", &testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+	client := v23.GetClient(cctx)
+	for _, test := range tests {
+		cctx.VI(1).Infof("%s client.StartCall", name(test))
+		call, err := client.StartCall(cctx, test.name, test.method, test.args)
+		if err != test.startErr {
+			t.Errorf(`%s client.StartCall got error "%v", want "%v"`,
+				name(test), err, test.startErr)
+			continue
+		}
+		for _, sarg := range test.streamArgs {
+			cctx.VI(1).Infof("%s client.Send(%v)", name(test), sarg)
+			if err := call.Send(sarg); err != nil {
+				t.Errorf(`%s call.Send(%v) got unexpected error "%v"`, name(test), sarg, err)
+			}
+			var u userType
+			if err := call.Recv(&u); err != nil {
+				t.Errorf(`%s call.Recv(%v) got unexpected error "%v"`, name(test), sarg, err)
+			}
+			if !reflect.DeepEqual(u, sarg) {
+				t.Errorf("%s call.Recv got value %v, want %v", name(test), u, sarg)
+			}
+		}
+		if shouldCloseSend {
+			cctx.VI(1).Infof("%s call.CloseSend", name(test))
+			// When the method does not involve streaming
+			// arguments, the server gets all the arguments in
+			// StartCall and then sends a response without
+			// (unnecessarily) waiting for a CloseSend message from
+			// the client.  If the server responds before the
+			// CloseSend call is made at the client, the CloseSend
+			// call will fail.  Thus, only check for errors on
+			// CloseSend if there are streaming arguments to begin
+			// with (i.e., only if the server is expected to wait
+			// for the CloseSend notification).
+			if err := call.CloseSend(); err != nil && len(test.streamArgs) > 0 {
+				t.Errorf(`%s call.CloseSend got unexpected error "%v"`, name(test), err)
+			}
+		}
+		cctx.VI(1).Infof("%s client.Finish", name(test))
+		results := makeResultPtrs(test.results)
+		err = call.Finish(results...)
+		if got, want := err, test.finishErr; (got == nil) != (want == nil) {
+			t.Errorf(`%s call.Finish got error "%v", want "%v'`, name(test), got, want)
+		} else if want != nil && verror.ErrorID(got) != verror.ErrorID(want) {
+			t.Errorf(`%s call.Finish got error "%v", want "%v"`, name(test), got, want)
+		}
+		checkResultPtrs(t, name(test), results, test.results)
+
+		// Calling Finish a second time should result in a useful error.
+		err = call.Finish(results...)
+		if !matchesErrorPattern(err, verror.ErrBadState, "FinishAlreadyCalled") {
+			t.Fatalf(`got "%v", want "%v"`, err, verror.ErrBadState)
+		}
+	}
+}
+
+func TestStreamReadTerminatedByServer(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	cctx := withPrincipal(t, ctx, "client")
+	sctx := withPrincipal(t, ctx, "server")
+
+	s := &streamRecvInGoroutineServer{c: make(chan error, 1)}
+	_, _, err := v23.WithNewDispatchingServer(sctx, "mountpoint/server", testServerDisp{s})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	call, err := v23.GetClient(cctx).StartCall(cctx, "mountpoint/server/suffix", "RecvInGoroutine", []interface{}{})
+	if err != nil {
+		t.Fatalf("StartCall failed: %v", err)
+	}
+
+	c := make(chan error, 1)
+	go func() {
+		for i := 0; true; i++ {
+			if err := call.Send(i); err != nil {
+				c <- err
+				return
+			}
+		}
+	}()
+
+	// The goroutine at the server executing "Recv" should have terminated
+	// with EOF.
+	if err := <-s.c; err != io.EOF {
+		t.Errorf("Got %v at server, want io.EOF", err)
+	}
+	// The client Send should have failed since the RPC has been
+	// terminated.
+	if err := <-c; err == nil {
+		t.Errorf("Client Send should fail as the server should have closed the flow")
+	}
+}
+
+func TestPreferredAddress(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	pa := netstate.AddressChooserFunc(func(string, []net.Addr) ([]net.Addr, error) {
+		return []net.Addr{netstate.NewNetAddr("tcp", "1.1.1.1")}, nil
+	})
+	sctx = v23.WithListenSpec(sctx, rpc.ListenSpec{
+		Addrs:          rpc.ListenAddrs{{"tcp", ":0"}},
+		AddressChooser: pa,
+	})
+	_, server, err := v23.WithNewServer(sctx, "", &testServer{}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	iep := server.Status().Endpoints[0].(*inaming.Endpoint)
+	host, _, err := net.SplitHostPort(iep.Address)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if got, want := host, "1.1.1.1"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+}
+
+func TestPreferredAddressErrors(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	paerr := netstate.AddressChooserFunc(func(_ string, a []net.Addr) ([]net.Addr, error) {
+		return nil, fmt.Errorf("oops")
+	})
+	sctx = v23.WithListenSpec(sctx, rpc.ListenSpec{
+		Addrs:          rpc.ListenAddrs{{"tcp", ":0"}},
+		AddressChooser: paerr,
+	})
+	_, server, err := v23.WithNewServer(sctx, "", &testServer{}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	status := server.Status()
+	if got, want := len(status.Endpoints), 0; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+	if got, want := len(status.Errors), 1; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+	if got, want := status.Errors[0].Error(), "oops"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+}
+
+func TestGranter(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	cctx := withPrincipal(t, ctx, "client")
+	sctx := withPrincipal(t, ctx, "server")
+	_, _, err := v23.WithNewDispatchingServer(sctx, "mountpoint/server", &testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+	tests := []struct {
+		granter                       rpc.Granter
+		startErrID, finishErrID       verror.IDAction
+		blessing, starterr, finisherr string
+	}{
+		{blessing: ""},
+		{granter: granter{}, blessing: "test-blessing/client/blessed"},
+		{
+			granter:  granter{b: bless(t, cctx, sctx, "blessed")},
+			blessing: "test-blessing/client/blessed",
+		},
+		{
+			granter:    granter{err: errors.New("hell no")},
+			startErrID: verror.ErrNotTrusted,
+			starterr:   "hell no",
+		},
+		{
+			granter:     granter{b: v23.GetPrincipal(cctx).BlessingStore().Default()},
+			finishErrID: verror.ErrNoAccess,
+			finisherr:   "blessing granted not bound to this server",
+		},
+	}
+	for i, test := range tests {
+		call, err := v23.GetClient(cctx).StartCall(cctx,
+			"mountpoint/server/suffix",
+			"EchoGrantedBlessings",
+			[]interface{}{"argument"},
+			test.granter)
+		if !matchesErrorPattern(err, test.startErrID, test.starterr) {
+			t.Errorf("%d: %+v: StartCall returned error %v", i, test, err)
+		}
+		if err != nil {
+			continue
+		}
+		var result, blessing string
+		if err = call.Finish(&result, &blessing); !matchesErrorPattern(err, test.finishErrID, test.finisherr) {
+			t.Errorf("%+v: Finish returned error %v", test, err)
+		}
+		if err != nil {
+			continue
+		}
+		if result != "argument" || blessing != test.blessing {
+			t.Errorf("%+v: Got (%q, %q)", test, result, blessing)
+		}
+	}
+}
+
+func TestRPCClientAuthorization(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	type v []interface{}
+	var (
+		cctx                = withPrincipal(t, ctx, "client")
+		sctx                = withPrincipal(t, ctx, "server")
+		now                 = time.Now()
+		serverName          = "mountpoint/server"
+		dischargeServerName = "mountpoint/dischargeserver"
+
+		// Caveats on blessings to the client: First-party caveats
+		cavOnlyEcho = mkCaveat(security.NewMethodCaveat("Echo"))
+		cavExpired  = mkCaveat(security.NewExpiryCaveat(now.Add(-1 * time.Second)))
+		// Caveats on blessings to the client: Third-party caveats
+		cavTPValid = mkThirdPartyCaveat(
+			v23.GetPrincipal(ctx).PublicKey(),
+			dischargeServerName,
+			mkCaveat(security.NewExpiryCaveat(now.Add(24*time.Hour))))
+		cavTPExpired = mkThirdPartyCaveat(v23.GetPrincipal(ctx).PublicKey(),
+			dischargeServerName,
+			mkCaveat(security.NewExpiryCaveat(now.Add(-1*time.Second))))
+
+		// Client blessings that will be tested.
+		bServerClientOnlyEcho  = bless(t, sctx, cctx, "onlyecho", cavOnlyEcho)
+		bServerClientExpired   = bless(t, sctx, cctx, "expired", cavExpired)
+		bServerClientTPValid   = bless(t, sctx, cctx, "dischargeable_third_party_caveat", cavTPValid)
+		bServerClientTPExpired = bless(t, sctx, cctx, "expired_third_party_caveat", cavTPExpired)
+		bClient                = v23.GetPrincipal(cctx).BlessingStore().Default()
+		bRandom, _             = v23.GetPrincipal(cctx).BlessSelf("random")
+
+		tests = []struct {
+			blessings  security.Blessings // Blessings used by the client
+			name       string             // object name on which the method is invoked
+			method     string
+			args       v
+			results    v
+			authorized bool // Whether or not the RPC should be authorized by the server.
+		}{
+			// There are three different authorization policies
+			// (security.Authorizer implementations) used by the server,
+			// depending on the suffix (see testServerDisp.Lookup):
+			//
+			// - nilAuth suffix: the default authorization policy (only
+			// delegates of or delegators of the server can call RPCs)
+			//
+			// - aclAuth suffix: the AccessList only allows blessings
+			// matching the patterns "server" or "client"
+			//
+			// - other suffixes: testServerAuthorizer allows any principal
+			// to call any method except "Unauthorized"
+			//
+			// Expired blessings should fail nilAuth and aclAuth (which care
+			// about names), but should succeed on other suffixes (which
+			// allow all blessings), unless calling the Unauthorized method.
+			{bServerClientExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// Same for blessings that should fail to obtain a discharge for
+			// the third party caveat.
+			{bServerClientTPExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientTPExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientTPExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// The "server/client" blessing (with MethodCaveat("Echo"))
+			// should satisfy all authorization policies when "Echo" is
+			// called.
+			{bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientOnlyEcho, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+
+			// The "server/client" blessing (with MethodCaveat("Echo"))
+			// should satisfy no authorization policy when any other method
+			// is invoked, except for the testServerAuthorizer policy (which
+			// will not recognize the blessing "server/onlyecho", but it
+			// would authorize anyone anyway).
+			{bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Closure", nil, nil, false},
+			{bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Closure", nil, nil, false},
+			{bServerClientOnlyEcho, "mountpoint/server/suffix", "Closure", nil, nil, true},
+
+			// The "client" blessing doesn't satisfy the default
+			// authorization policy, but does satisfy the AccessList and the
+			// testServerAuthorizer policy.
+			{bClient, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bClient, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+			{bClient, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bClient, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// The "random" blessing does not satisfy either the default
+			// policy or the AccessList, but does satisfy
+			// testServerAuthorizer.
+			{bRandom, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bRandom, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+			{bRandom, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bRandom, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// The "server/dischargeable_third_party_caveat" blessing satisfies all policies.
+			// (the discharges should be fetched).
+			{bServerClientTPValid, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPValid, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPValid, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPValid, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+		}
+	)
+	// Start the discharge server.
+	_, _, err := v23.WithNewServer(ctx, dischargeServerName, &dischargeServer{}, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Start the main server.
+	_, _, err = v23.WithNewDispatchingServer(sctx, serverName, testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// The server should recognize the client principal as an authority
+	// on "random" blessings.
+	v23.GetPrincipal(sctx).AddToRoots(bRandom)
+
+	// Set a blessing on the client's blessing store to be presented to
+	// the discharge server.
+	v23.GetPrincipal(cctx).BlessingStore().Set(
+		v23.GetPrincipal(cctx).BlessingStore().Default(), "test-blessing/$")
+
+	// testutil.NewPrincipal sets up a principal that shares blessings
+	// with all servers, undo that.
+	v23.GetPrincipal(cctx).BlessingStore().Set(
+		security.Blessings{}, security.AllPrincipals)
+
+	for i, test := range tests {
+		name := fmt.Sprintf("#%d: %q.%s(%v) by %v", i, test.name, test.method, test.args, test.blessings)
+		client := v23.GetClient(cctx)
+
+		v23.GetPrincipal(cctx).BlessingStore().Set(test.blessings, "test-blessing/server")
+		err = client.Call(cctx, test.name, test.method, test.args, makeResultPtrs(test.results))
+		if err != nil && test.authorized {
+			t.Errorf(`%s client.Call got error: "%v", wanted the RPC to succeed`, name, err)
+		} else if err == nil && !test.authorized {
+			t.Errorf("%s call.Finish succeeded, expected authorization failure", name)
+		} else if !test.authorized && verror.ErrorID(err) != verror.ErrNoAccess.ID {
+			t.Errorf("%s. call.Finish returned error %v(%v), wanted %v", name, verror.ErrorID(verror.Convert(verror.ErrNoAccess, nil, err)), err, verror.ErrNoAccess)
+		}
+	}
+}
+
+func TestRPCServerAuthorization(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	const (
+		publicKeyErr        = "not matched by server key"
+		missingDischargeErr = "missing discharge"
+		expiryErr           = "is after expiry"
+		allowedErr          = "do not match any allowed server patterns"
+	)
+	type O []rpc.CallOpt // shorthand
+	var (
+		sctx    = withPrincipal(t, ctx, "server")
+		now     = time.Now()
+		noErrID verror.IDAction
+
+		// Third-party caveats on blessings presented by server.
+		cavTPValid = mkThirdPartyCaveat(
+			v23.GetPrincipal(ctx).PublicKey(),
+			"mountpoint/dischargeserver",
+			mkCaveat(security.NewExpiryCaveat(now.Add(24*time.Hour))))
+
+		cavTPExpired = mkThirdPartyCaveat(
+			v23.GetPrincipal(ctx).PublicKey(),
+			"mountpoint/dischargeserver",
+			mkCaveat(security.NewExpiryCaveat(now.Add(-1*time.Second))))
+
+		// Server blessings.
+		bServer          = bless(t, ctx, sctx, "server")
+		bServerExpired   = bless(t, ctx, sctx, "expiredserver", mkCaveat(security.NewExpiryCaveat(time.Now().Add(-1*time.Second))))
+		bServerTPValid   = bless(t, ctx, sctx, "serverWithTPCaveats", cavTPValid)
+		bServerTPExpired = bless(t, ctx, sctx, "serverWithExpiredTPCaveats", cavTPExpired)
+		bOther           = bless(t, ctx, sctx, "other")
+		bTwoBlessings, _ = security.UnionOfBlessings(bServer, bOther)
+
+		tests = []struct {
+			server security.Blessings // blessings presented by the server to the client.
+			name   string             // name provided by the client to StartCall
+			opts   O                  // options provided to StartCall.
+			errID  verror.IDAction
+			err    string
+		}{
+			// Client accepts talking to the server only if the
+			// server presents valid blessings (and discharges)
+			// consistent with the ones published in the endpoint.
+			{bServer, "mountpoint/server", nil, noErrID, ""},
+			{bServerTPValid, "mountpoint/server", nil, noErrID, ""},
+
+			// Client will not talk to a server that presents
+			// expired blessings or is missing discharges.
+			{bServerExpired, "mountpoint/server", nil, verror.ErrNotTrusted, expiryErr},
+			{bServerTPExpired, "mountpoint/server", nil, verror.ErrNotTrusted, missingDischargeErr},
+
+			// Testing the AllowedServersPolicy option.
+			{bServer, "mountpoint/server", O{options.AllowedServersPolicy{"otherroot"}}, verror.ErrNotTrusted, allowedErr},
+			{bServer, "mountpoint/server", O{options.AllowedServersPolicy{"test-blessing"}}, noErrID, ""},
+			{bTwoBlessings, "mountpoint/server", O{options.AllowedServersPolicy{"test-blessing/other"}}, noErrID, ""},
+
+			// Test the ServerPublicKey option.
+			{bOther, "mountpoint/server", O{options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{
+				PublicKey: bOther.PublicKey(),
+			}}, noErrID, ""},
+			{bOther, "mountpoint/server", O{options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{
+				PublicKey: testutil.NewPrincipal("irrelevant").PublicKey(),
+			}}, verror.ErrNotTrusted, publicKeyErr},
+
+			// Test the "paranoid" names, where the pattern is provided in the name.
+			{bServer, "__(test-blessing/server)/mountpoint/server", nil, noErrID, ""},
+			{bServer, "__(test-blessing/other)/mountpoint/server", nil, verror.ErrNotTrusted, allowedErr},
+			{bTwoBlessings, "__(test-blessing/server)/mountpoint/server", O{options.AllowedServersPolicy{"test-blessing/other"}}, noErrID, ""},
+		}
+	)
+	// Start the discharge server.
+	_, _, err := v23.WithNewServer(ctx, "mountpoint/dischargeserver", &dischargeServer{}, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	for i, test := range tests {
+		scctx, cancel := context.WithCancel(sctx)
+		name := fmt.Sprintf("(#%d: Name:%q, Server:%q, opts:%v)",
+			i, test.name, test.server, test.opts)
+		if err := vsecurity.SetDefaultBlessings(v23.GetPrincipal(sctx), test.server); err != nil {
+			t.Fatal(err)
+		}
+		_, s, err := v23.WithNewDispatchingServer(scctx, "mountpoint/server", &testServerDisp{&testServer{}})
+		if err != nil {
+			t.Fatal(err, v23.GetPrincipal(scctx).BlessingStore().Default())
+		}
+		call, err := v23.GetClient(ctx).StartCall(ctx, test.name, "Method", nil, test.opts...)
+		if !matchesErrorPattern(err, test.errID, test.err) {
+			t.Errorf(`%s: client.StartCall: got error "%v", want to match "%v"`,
+				name, err, test.err)
+		} else if call != nil {
+			blessings, proof := call.RemoteBlessings()
+			if proof.IsZero() {
+				t.Errorf("%s: Returned zero value for remote blessings", name)
+			}
+			// Currently all tests are configured so that the only
+			// blessings presented by the server that are
+			// recognized by the client match the pattern
+			// "test-blessing"
+			if len(blessings) < 1 || !security.BlessingPattern("test-blessing").MatchedBy(blessings...) {
+				t.Errorf("%s: Client sees server as %v, expected a single blessing matching test-blessing", name, blessings)
+			}
+		}
+		s.Stop()
+		cancel()
+	}
+}
+
+func TestServerManInTheMiddleAttack(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	// Test scenario: A server mounts itself, but then some other service
+	// somehow "takes over" the network endpoint (a naughty router
+	// perhaps), thus trying to steal traffic.
+	var (
+		cctx = withPrincipal(t, ctx, "client")
+		actx = withPrincipal(t, ctx, "attacker")
+	)
+	name := "mountpoint/server"
+	_, aserver, err := v23.WithNewDispatchingServer(actx, "", testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// The legitimate server would have mounted the same endpoint on the
+	// namespace, but with different blessings.
+	ep := aserver.Status().Endpoints[0]
+	ep.(*inaming.Endpoint).Blessings = []string{"test-blessings/server"}
+	if err := v23.GetNamespace(actx).Mount(ctx, name, ep.Name(), time.Hour); err != nil {
+		t.Fatal(err)
+	}
+
+	// The RPC call should fail because the blessings presented by the
+	// (attacker's) server are not consistent with the ones registered in
+	// the mounttable trusted by the client.
+	if _, err := v23.GetClient(cctx).StartCall(cctx, "mountpoint/server", "Closure", nil); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+		t.Errorf("Got error %v (errorid=%v), want errorid=%v", err, verror.ErrorID(err), verror.ErrNotTrusted.ID)
+	}
+	// But the RPC should succeed if the client explicitly
+	// decided to skip server authorization.
+	if err := v23.GetClient(ctx).Call(cctx, "mountpoint/server", "Closure", nil, nil, options.SkipServerEndpointAuthorization{}); err != nil {
+		t.Errorf("Unexpected error(%v) when skipping server authorization", err)
+	}
+}
+
+func TestDischargeImpetusAndContextPropagation(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	sctx := withPrincipal(t, ctx, "server")
+	cctx := withPrincipal(t, ctx, "client")
+
+	// Setup the client so that it shares a blessing with a third-party caveat with the server.
+	setClientBlessings := func(req security.ThirdPartyRequirements) {
+		cav, err := security.NewPublicKeyCaveat(
+			v23.GetPrincipal(ctx).PublicKey(),
+			"mountpoint/discharger",
+			req,
+			security.UnconstrainedUse())
+		if err != nil {
+			t.Fatalf("Failed to create ThirdPartyCaveat(%+v): %v", req, err)
+		}
+		b, err := v23.GetPrincipal(cctx).BlessSelf("client_for_server", cav)
+		if err != nil {
+			t.Fatalf("BlessSelf failed: %v", err)
+		}
+		v23.GetPrincipal(cctx).BlessingStore().Set(b, "test-blessing/server")
+	}
+
+	// Setup the discharge server.
+	var tester dischargeTestServer
+	_, _, err := v23.WithNewServer(ctx, "mountpoint/discharger", &tester, &testServerAuthorizer{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Setup the application server.
+	sctx = v23.WithListenSpec(sctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}},
+	})
+	object := "mountpoint/object"
+	_, _, err = v23.WithNewServer(sctx, object, &testServer{}, &testServerAuthorizer{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	tests := []struct {
+		Requirements security.ThirdPartyRequirements
+		Impetus      security.DischargeImpetus
+	}{
+		{ // No requirements, no impetus
+			Requirements: security.ThirdPartyRequirements{},
+			Impetus:      security.DischargeImpetus{},
+		},
+		{ // Require everything
+			Requirements: security.ThirdPartyRequirements{ReportServer: true, ReportMethod: true, ReportArguments: true},
+			Impetus:      security.DischargeImpetus{Server: []security.BlessingPattern{"test-blessing/server"}, Method: "Method", Arguments: []*vdl.Value{vdl.StringValue("argument")}},
+		},
+		{ // Require only the method name
+			Requirements: security.ThirdPartyRequirements{ReportMethod: true},
+			Impetus:      security.DischargeImpetus{Method: "Method"},
+		},
+	}
+
+	for _, test := range tests {
+		setClientBlessings(test.Requirements)
+		tid := vtrace.GetSpan(cctx).Trace()
+		// StartCall should fetch the discharge, do not worry about finishing the RPC - do not care about that for this test.
+		if _, err := v23.GetClient(cctx).StartCall(cctx, object, "Method", []interface{}{"argument"}); err != nil {
+			t.Errorf("StartCall(%+v) failed: %v", test.Requirements, err)
+			continue
+		}
+		impetus, traceid := tester.Release()
+		// There should have been exactly 1 attempt to fetch discharges when making
+		// the RPC to the remote object.
+		if len(impetus) != 1 || len(traceid) != 1 {
+			t.Errorf("Test %+v: Got (%d, %d) (#impetus, #traceid), wanted exactly one", test.Requirements, len(impetus), len(traceid))
+			continue
+		}
+		// VC creation does not have any "impetus", it is established without
+		// knowledge of the context of the RPC. So ignore that.
+		//
+		// TODO(ashankar): Should the impetus of the RPC that initiated the
+		// VIF/VC creation be propagated?
+		if got, want := impetus[len(impetus)-1], test.Impetus; !reflect.DeepEqual(got, want) {
+			t.Errorf("Test %+v: Got impetus %v, want %v", test.Requirements, got, want)
+		}
+		// But the context used for all of this should be the same
+		// (thereby allowing debug traces to link VIF/VC creation with
+		// the RPC that initiated them).
+		for idx, got := range traceid {
+			if !reflect.DeepEqual(got, tid) {
+				t.Errorf("Test %+v: %d - Got trace id %q, want %q", test.Requirements, idx, hex.EncodeToString(got[:]), hex.EncodeToString(tid[:]))
+			}
+		}
+	}
+}
+
+func TestRPCClientBlessingsPublicKey(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	bserver := v23.GetPrincipal(sctx).BlessingStore().Default()
+	cctx := withPrincipal(t, ctx, "client")
+	bclient := v23.GetPrincipal(cctx).BlessingStore().Default()
+	cctx, err := v23.WithPrincipal(cctx,
+		&singleBlessingPrincipal{Principal: v23.GetPrincipal(cctx)})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bvictim := v23.GetPrincipal(withPrincipal(t, ctx, "victim")).BlessingStore().Default()
+
+	_, s, err := v23.WithNewDispatchingServer(sctx, "", testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+	object := naming.Join(s.Status().Endpoints[0].Name(), "suffix")
+
+	tests := []struct {
+		blessings security.Blessings
+		errID     verror.IDAction
+		err       string
+	}{
+		{blessings: bclient},
+		// server disallows clients from authenticating with blessings not bound to
+		// the client principal's public key
+		{blessings: bvictim, errID: verror.ErrNoAccess, err: "bound to a different public key"},
+		{blessings: bserver, errID: verror.ErrNoAccess, err: "bound to a different public key"},
+	}
+	for i, test := range tests {
+		name := fmt.Sprintf("%d: Client RPCing with blessings %v", i, test.blessings)
+		v23.GetPrincipal(cctx).BlessingStore().Set(test.blessings, "test-blessings")
+		if err := v23.GetClient(cctx).Call(cctx, object, "Closure", nil, nil); !matchesErrorPattern(err, test.errID, test.err) {
+			t.Errorf("%v: client.Call returned error %v", name, err)
+			continue
+		}
+	}
+}
+
+func TestServerLocalBlessings(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	tpCav := mkThirdPartyCaveat(
+		v23.GetPrincipal(ctx).PublicKey(),
+		"mountpoint/dischargeserver",
+		mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+	sctx := withPrincipal(t, ctx, "server", tpCav)
+	cctx := withPrincipal(t, ctx, "client")
+
+	_, _, err := v23.WithNewServer(ctx, "mountpoint/dischargeserver", &dischargeServer{}, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, _, err = v23.WithNewDispatchingServer(sctx, "mountpoint/server", testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var gotServer, gotClient string
+	if err := v23.GetClient(cctx).Call(cctx, "mountpoint/server/suffix", "EchoBlessings", nil, []interface{}{&gotServer, &gotClient}); err != nil {
+		t.Fatalf("Finish failed: %v", err)
+	}
+	if wantServer, wantClient := "[test-blessing/server]", "[test-blessing/client]"; gotServer != wantServer || gotClient != wantClient {
+		t.Fatalf("EchoBlessings: got %v, %v want %v, %v", gotServer, gotClient, wantServer, wantClient)
+	}
+}
+
+func TestDischargePurgeFromCache(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	// Client is blessed with a third-party caveat. The discharger
+	// service issues discharges with a fakeTimeCaveat.  This blessing
+	// is presented to "server".
+	cctx := withPrincipal(t, ctx, "client",
+		mkThirdPartyCaveat(
+			v23.GetPrincipal(ctx).PublicKey(),
+			"mountpoint/dischargeserver",
+			security.UnconstrainedUse()))
+
+	_, _, err := v23.WithNewServer(ctx, "mountpoint/dischargeserver", &dischargeServer{}, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, _, err = v23.WithNewDispatchingServer(sctx, "mountpoint/server", testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	call := func() error {
+		var got string
+		if err := v23.GetClient(cctx).Call(cctx, "mountpoint/server/aclAuth", "Echo", []interface{}{"batman"}, []interface{}{&got}); err != nil {
+			return err
+		}
+		if want := `method:"Echo",suffix:"aclAuth",arg:"batman"`; got != want {
+			return verror.Convert(verror.ErrBadArg, nil, fmt.Errorf("Got [%v] want [%v]", got, want))
+		}
+		return nil
+	}
+
+	// First call should succeed
+	if err := call(); err != nil {
+		t.Fatal(err)
+	}
+	// Advance virtual clock, which will invalidate the discharge
+	clock.Advance(1)
+	if err, want := call(), "not authorized"; !matchesErrorPattern(err, verror.ErrNoAccess, want) {
+		t.Errorf("Got error [%v] wanted to match pattern %q", err, want)
+	}
+	// But retrying will succeed since the discharge should be purged
+	// from cache and refreshed
+	if err := call(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestServerPublicKeyOpt(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	cctx := withPrincipal(t, ctx, "client")
+	octx := withPrincipal(t, ctx, "other")
+
+	mountName := "mountpoint/default"
+	_, _, err := v23.WithNewDispatchingServer(sctx, mountName, testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// The call should succeed when the server presents the same public as the opt...
+	if _, err = v23.GetClient(cctx).StartCall(cctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{
+		PublicKey: v23.GetPrincipal(sctx).PublicKey(),
+	}); err != nil {
+		t.Errorf("Expected call to succeed but got %v", err)
+	}
+	// ...but fail if they differ.
+	if _, err = v23.GetClient(cctx).StartCall(cctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{
+		PublicKey: v23.GetPrincipal(octx).PublicKey(),
+	}); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+		t.Errorf("got %v, want %v", verror.ErrorID(err), verror.ErrNotTrusted.ID)
+	}
+}
diff --git a/runtime/internal/rpc/test/proxy_test.go b/runtime/internal/rpc/test/proxy_test.go
index 46c5078..d2689f6 100644
--- a/runtime/internal/rpc/test/proxy_test.go
+++ b/runtime/internal/rpc/test/proxy_test.go
@@ -21,31 +21,16 @@
 	"v.io/v23/rpc"
 	"v.io/v23/security"
 	"v.io/v23/verror"
-	"v.io/v23/vtrace"
-	"v.io/x/ref/lib/flags"
+	"v.io/x/ref"
 	_ "v.io/x/ref/runtime/factories/generic"
 	"v.io/x/ref/runtime/internal/lib/publisher"
 	inaming "v.io/x/ref/runtime/internal/naming"
-	irpc "v.io/x/ref/runtime/internal/rpc"
-	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
 	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
-	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
-	ivtrace "v.io/x/ref/runtime/internal/vtrace"
+	"v.io/x/ref/test"
 	"v.io/x/ref/test/modules"
 	"v.io/x/ref/test/testutil"
 )
 
-func testContext() (*context.T, func()) {
-	ctx, shutdown := v23.Init()
-	ctx, _ = context.WithTimeout(ctx, 20*time.Second)
-	var err error
-	if ctx, err = ivtrace.Init(ctx, flags.VtraceFlags{}); err != nil {
-		panic(err)
-	}
-	ctx, _ = vtrace.WithNewTrace(ctx)
-	return ctx, shutdown
-}
-
 var proxyServer = modules.Register(func(env *modules.Env, args ...string) error {
 	ctx, shutdown := v23.Init()
 	defer shutdown()
@@ -87,24 +72,6 @@
 	return nil
 }, "")
 
-type testServer struct{}
-
-func (*testServer) Echo(_ *context.T, call rpc.ServerCall, arg string) (string, error) {
-	return fmt.Sprintf("method:%q,suffix:%q,arg:%q", "Echo", call.Suffix(), arg), nil
-}
-
-type testServerAuthorizer struct{}
-
-func (testServerAuthorizer) Authorize(*context.T, security.Call) error {
-	return nil
-}
-
-type testServerDisp struct{ server interface{} }
-
-func (t testServerDisp) Lookup(_ *context.T, suffix string) (interface{}, security.Authorizer, error) {
-	return t.server, testServerAuthorizer{}, nil
-}
-
 type proxyHandle struct {
 	ns    namespace.T
 	sh    *modules.Shell
@@ -166,33 +133,41 @@
 }
 
 func testProxy(t *testing.T, spec rpc.ListenSpec, args ...string) {
-	ctx, shutdown := testContext()
+	if ref.RPCTransitionState() >= ref.XServers {
+		// This test cannot pass under the new RPC system.  It expects
+		// to distinguish between proxy endpoints and non-proxy endpoints
+		// which the new system does not support.
+		t.SkipNow()
+	}
+	ctx, shutdown := test.V23Init()
 	defer shutdown()
 
 	var (
-		pserver   = testutil.NewPrincipal("server")
-		pclient   = testutil.NewPrincipal("client")
+		pserver   = testutil.NewPrincipal()
+		pclient   = testutil.NewPrincipal()
 		serverKey = pserver.PublicKey()
-		// We use different stream managers for the client and server
-		// to prevent VIF re-use (in other words, we want to test VIF
-		// creation from both the client and server end).
-		smserver = imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
-		smclient = imanager.InternalNew(ctx, naming.FixedRoutingID(0x444444444))
-		ns       = tnaming.NewSimpleNamespace()
+		ns        = v23.GetNamespace(ctx)
 	)
-	defer smserver.Shutdown()
-	defer smclient.Shutdown()
-	client, err := irpc.InternalNewClient(smserver, ns)
+	idp := testutil.IDProviderFromPrincipal(v23.GetPrincipal(ctx))
+	idp.Bless(pserver, "server")
+	idp.Bless(pclient, "client")
+	clientCtx, err := v23.WithPrincipal(ctx, pclient)
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer client.Close()
-	serverCtx, _ := v23.WithPrincipal(ctx, pserver)
-	server, err := irpc.InternalNewServer(serverCtx, smserver, ns, nil, "", nil)
+	client := v23.GetClient(clientCtx)
+
+	serverCtx, err := v23.WithPrincipal(v23.WithListenSpec(ctx, spec), pserver)
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer server.Stop()
+	_, server, err := v23.WithNewDispatchingServer(
+		serverCtx,
+		"mountpoint/server",
+		testServerDisp{&testServer{}})
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	// The client must recognize the server's blessings, otherwise it won't
 	// communicate with it.
@@ -228,16 +203,21 @@
 		t.Fatalf("failed to lookup proxy")
 	}
 
-	eps, err := server.Listen(spec)
+	proxyAddr, _ := naming.SplitAddressName(addrs[0])
+	proxyEP, err := inaming.NewEndpoint(proxyAddr)
 	if err != nil {
-		t.Fatal(err)
-	}
-	if err := server.ServeDispatcher("mountpoint/server", testServerDisp{&testServer{}}); err != nil {
-		t.Fatal(err)
+		t.Fatalf("unexpected error for %q: %s", proxyEP, err)
 	}
 
+	// Proxy connetions are created asynchronously, so we wait for the
+	// expected number of endpoints to appear for the specified service name.
+	ch := make(chan struct{})
+	numNames := 1
+	if hasLocalListener {
+		numNames = 2
+	}
 	// Proxy connections are started asynchronously, so we need to wait..
-	waitForMountTable := func(ch chan int, expect int) {
+	go func() {
 		then := time.Now().Add(time.Minute)
 		for {
 			me, err := ns.Resolve(ctx, name)
@@ -247,64 +227,36 @@
 			for i, s := range me.Servers {
 				ctx.Infof("%d: %s", i, s)
 			}
-			if err == nil && len(me.Servers) == expect {
-				ch <- 1
+			if err == nil && len(me.Servers) == numNames {
+				close(ch)
 				return
 			}
 			if time.Now().After(then) {
-				t.Fatalf("timed out waiting for %d servers, found %d", expect, len(me.Servers))
+				t.Fatalf("timed out waiting for %d servers, found %d", numNames, len(me.Servers))
 			}
 			time.Sleep(100 * time.Millisecond)
 		}
-	}
-	waitForServerStatus := func(ch chan int, proxy string) {
-		then := time.Now().Add(time.Minute)
-		for {
-			status := server.Status()
-			if len(status.Proxies) == 1 && status.Proxies[0].Proxy == proxy {
-				ch <- 2
-				return
-			}
-			if time.Now().After(then) {
-				t.Fatalf("timed out")
-			}
-			time.Sleep(100 * time.Millisecond)
-		}
-	}
-	proxyEP, _ := naming.SplitAddressName(addrs[0])
-	proxiedEP, err := inaming.NewEndpoint(proxyEP)
-	if err != nil {
-		t.Fatalf("unexpected error for %q: %s", proxyEP, err)
-	}
-	proxiedEP.RID = naming.FixedRoutingID(0x555555555)
-	proxiedEP.Blessings = []string{"server"}
-	expectedNames := []string{naming.JoinAddressName(proxiedEP.String(), "suffix")}
-	if hasLocalListener {
-		expectedNames = append(expectedNames, naming.JoinAddressName(eps[0].String(), "suffix"))
-	}
+	}()
 
-	// Proxy connetions are created asynchronously, so we wait for the
-	// expected number of endpoints to appear for the specified service name.
-	ch := make(chan int, 2)
-	go waitForMountTable(ch, len(expectedNames))
-	go waitForServerStatus(ch, spec.Proxy)
 	select {
 	case <-time.After(time.Minute):
 		t.Fatalf("timedout waiting for two entries in the mount table and server status")
-	case i := <-ch:
-		select {
-		case <-time.After(time.Minute):
-			t.Fatalf("timedout waiting for two entries in the mount table or server status")
-		case j := <-ch:
-			if !((i == 1 && j == 2) || (i == 2 && j == 1)) {
-				t.Fatalf("unexpected return values from waiters")
-			}
-		}
+	case <-ch:
 	}
 
 	status := server.Status()
-	if got, want := status.Proxies[0].Endpoint, proxiedEP; !reflect.DeepEqual(got, want) {
-		t.Fatalf("got %q, want %q", got, want)
+	proxiedEP := status.Proxies[0].Endpoint
+	if proxiedEP.Network() != proxyEP.Network() ||
+		proxiedEP.Addr().String() != proxyEP.Addr().String() ||
+		proxiedEP.ServesMountTable() || proxiedEP.ServesLeaf() ||
+		proxiedEP.BlessingNames()[0] != "test-blessing/server" {
+		t.Fatalf("got %q, want (tcp, %s, s, test-blessing/server)",
+			proxiedEP, proxyEP.Addr().String())
+	}
+	expectedNames := []string{naming.JoinAddressName(proxiedEP.String(), "suffix")}
+	if hasLocalListener {
+		normalEP := status.Endpoints[0]
+		expectedNames = append(expectedNames, naming.JoinAddressName(normalEP.String(), "suffix"))
 	}
 
 	got := []string{}
@@ -322,7 +274,7 @@
 		// mount table, given that we're trying to test the proxy, we remove
 		// the local endpoint from the mount table entry!  We have to remove both
 		// the tcp and the websocket address.
-		sep := eps[0].String()
+		sep := status.Endpoints[0].String()
 		ns.Unmount(ctx, "mountpoint/server", sep)
 	}
 
diff --git a/runtime/internal/rpc/test/server_test.go b/runtime/internal/rpc/test/server_test.go
new file mode 100644
index 0000000..3c078af
--- /dev/null
+++ b/runtime/internal/rpc/test/server_test.go
@@ -0,0 +1,268 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"reflect"
+	"sort"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/test"
+)
+
+type noMethodsType struct{ Field string }
+
+type fieldType struct {
+	unexported string
+}
+type noExportedFieldsType struct{}
+
+func (noExportedFieldsType) F(_ *context.T, _ rpc.ServerCall, f fieldType) error { return nil }
+
+type badObjectDispatcher struct{}
+
+func (badObjectDispatcher) Lookup(_ *context.T, suffix string) (interface{}, security.Authorizer, error) {
+	return noMethodsType{}, nil, nil
+}
+
+// TestBadObject ensures that Serve handles bad receiver objects gracefully (in
+// particular, it doesn't panic).
+func TestBadObject(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	cctx := withPrincipal(t, ctx, "client")
+
+	if _, _, err := v23.WithNewServer(sctx, "", nil, nil); err == nil {
+		t.Fatal("should have failed")
+	}
+	if _, _, err := v23.WithNewServer(sctx, "", new(noMethodsType), nil); err == nil {
+		t.Fatal("should have failed")
+	}
+	if _, _, err := v23.WithNewServer(sctx, "", new(noExportedFieldsType), nil); err == nil {
+		t.Fatal("should have failed")
+	}
+	if _, _, err := v23.WithNewDispatchingServer(sctx, "", badObjectDispatcher{}); err != nil {
+		t.Fatalf("ServeDispatcher failed: %v", err)
+	}
+	// TODO(mattr): It doesn't necessarily make sense to me that a bad object from
+	// the dispatcher results in a retry.
+	cctx, _ = context.WithTimeout(ctx, time.Second)
+	var result string
+	if err := v23.GetClient(cctx).Call(cctx, "servername", "SomeMethod", nil, []interface{}{&result}); err == nil {
+		// TODO(caprita): Check the error type rather than
+		// merely ensuring the test doesn't panic.
+		t.Fatalf("Call should have failed")
+	}
+}
+
+func TestServerArgs(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+	sctx := withPrincipal(t, ctx, "server")
+
+	esctx := v23.WithListenSpec(sctx, rpc.ListenSpec{})
+	_, _, err := v23.WithNewServer(esctx, "", &testServer{}, nil)
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+
+	esctx = v23.WithListenSpec(sctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"tcp", "*:0"}},
+	})
+	_, _, err = v23.WithNewServer(esctx, "", &testServer{}, nil)
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+
+	esctx = v23.WithListenSpec(sctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", "*:0"},
+			{"tcp", "127.0.0.1:0"},
+		},
+	})
+	_, _, err = v23.WithNewServer(esctx, "", &testServer{}, nil)
+	if verror.ErrorID(err) == verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+}
+
+type statusServer struct{ ch chan struct{} }
+
+func (s *statusServer) Hang(*context.T, rpc.ServerCall) error {
+	s.ch <- struct{}{} // Notify the server has received a call.
+	<-s.ch             // Wait for the server to be ready to go.
+	return nil
+}
+
+func TestServerStatus(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	serverChan := make(chan struct{})
+	_, server, err := v23.WithNewServer(ctx, "test", &statusServer{serverChan}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	status := server.Status()
+	if got, want := status.State, rpc.ServerActive; got != want {
+		t.Fatalf("got %s, want %s", got, want)
+	}
+
+	progress := make(chan error)
+	makeCall := func(ctx *context.T) {
+		call, err := v23.GetClient(ctx).StartCall(ctx, "test", "Hang", nil)
+		progress <- err
+		progress <- call.Finish()
+	}
+	go makeCall(ctx)
+
+	// Wait for RPC to start and the server has received the call.
+	if err := <-progress; err != nil {
+		t.Fatal(err)
+	}
+	<-serverChan
+
+	// Stop server asynchronously
+	go func() {
+		if err = server.Stop(); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	waitForStatus := func(want rpc.ServerState) {
+		then := time.Now()
+		for {
+			status = server.Status()
+			if got := status.State; got != want {
+				if time.Now().Sub(then) > time.Minute {
+					t.Fatalf("got %s, want %s", got, want)
+				}
+			} else {
+				break
+			}
+			time.Sleep(100 * time.Millisecond)
+		}
+	}
+
+	// Server should enter 'ServerStopping' state.
+	waitForStatus(rpc.ServerStopping)
+	// Server won't stop until the statusServer's hung method completes.
+	close(serverChan)
+	// Wait for RPC to finish
+	if err := <-progress; err != nil {
+		t.Fatal(err)
+	}
+	// Now that the RPC is done, the server should be able to stop.
+	waitForStatus(rpc.ServerStopped)
+}
+
+func TestMountStatus(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	sctx := withPrincipal(t, ctx, "server")
+	sctx = v23.WithListenSpec(sctx, rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", "127.0.0.1:0"},
+			{"tcp", "127.0.0.1:0"},
+		},
+	})
+	_, server, err := v23.WithNewServer(sctx, "foo", &testServer{}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	status := server.Status()
+	eps := server.Status().Endpoints
+	if got, want := len(eps), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	setLeafEndpoints(eps)
+	if got, want := len(status.Mounts), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	servers := status.Mounts.Servers()
+	if got, want := len(servers), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	// Add a second name and we should now see 4 mounts, 2 for each name.
+	if err := server.AddName("bar"); err != nil {
+		t.Fatal(err)
+	}
+	status = server.Status()
+	if got, want := len(status.Mounts), 4; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	servers = status.Mounts.Servers()
+	if got, want := len(servers), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	names := status.Mounts.Names()
+	if got, want := len(names), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	serversPerName := map[string][]string{}
+	for _, ms := range status.Mounts {
+		serversPerName[ms.Name] = append(serversPerName[ms.Name], ms.Server)
+	}
+	if got, want := len(serversPerName), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	for _, name := range []string{"foo", "bar"} {
+		if got, want := len(serversPerName[name]), 2; got != want {
+			t.Fatalf("got %d, want %d", got, want)
+		}
+	}
+}
+
+func TestIsLeafServerOption(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	_, _, err := v23.WithNewDispatchingServer(ctx, "leafserver",
+		&testServerDisp{&testServer{}}, options.IsLeaf(true))
+	if err != nil {
+		t.Fatal(err)
+	}
+	// we have set IsLeaf to true, sending any suffix to leafserver should result
+	// in an suffix was not expected error.
+	var result string
+	callErr := v23.GetClient(ctx).Call(ctx, "leafserver/unwantedSuffix", "Echo", []interface{}{"Mirror on the wall"}, []interface{}{&result})
+	if callErr == nil {
+		t.Fatalf("Call should have failed with suffix was not expected error")
+	}
+}
+
+func endpointToStrings(eps []naming.Endpoint) []string {
+	r := []string{}
+	for _, ep := range eps {
+		r = append(r, ep.String())
+	}
+	sort.Strings(r)
+	return r
+}
+
+func setLeafEndpoints(eps []naming.Endpoint) {
+	for i := range eps {
+		eps[i].(*inaming.Endpoint).IsLeaf = true
+	}
+}
diff --git a/runtime/internal/rpc/test/testserver_test.go b/runtime/internal/rpc/test/testserver_test.go
new file mode 100644
index 0000000..6137cd5
--- /dev/null
+++ b/runtime/internal/rpc/test/testserver_test.go
@@ -0,0 +1,251 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"fmt"
+	"io"
+	"sync"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/security/access"
+	"v.io/v23/uniqueid"
+	"v.io/v23/vtrace"
+)
+
+// This file contains a test server and dispatcher which are used by other
+// tests, especially those in full_test.
+type userType string
+
+type testServer struct{}
+
+func (*testServer) Closure(*context.T, rpc.ServerCall) error {
+	return nil
+}
+
+func (*testServer) Error(*context.T, rpc.ServerCall) error {
+	return errMethod
+}
+
+func (*testServer) Echo(_ *context.T, call rpc.ServerCall, arg string) (string, error) {
+	return fmt.Sprintf("method:%q,suffix:%q,arg:%q", "Echo", call.Suffix(), arg), nil
+}
+
+func (*testServer) EchoUser(_ *context.T, call rpc.ServerCall, arg string, u userType) (string, userType, error) {
+	return fmt.Sprintf("method:%q,suffix:%q,arg:%q", "EchoUser", call.Suffix(), arg), u, nil
+}
+
+func (*testServer) EchoLang(ctx *context.T, call rpc.ServerCall) (string, error) {
+	return string(i18n.GetLangID(ctx)), nil
+}
+
+func (*testServer) EchoBlessings(ctx *context.T, call rpc.ServerCall) (server, client string, _ error) {
+	local := security.LocalBlessingNames(ctx, call.Security())
+	remote, _ := security.RemoteBlessingNames(ctx, call.Security())
+	return fmt.Sprintf("%v", local), fmt.Sprintf("%v", remote), nil
+}
+
+func (*testServer) EchoGrantedBlessings(_ *context.T, call rpc.ServerCall, arg string) (result, blessing string, _ error) {
+	return arg, fmt.Sprintf("%v", call.GrantedBlessings()), nil
+}
+
+func (*testServer) EchoAndError(_ *context.T, call rpc.ServerCall, arg string) (string, error) {
+	result := fmt.Sprintf("method:%q,suffix:%q,arg:%q", "EchoAndError", call.Suffix(), arg)
+	if arg == "error" {
+		return result, errMethod
+	}
+	return result, nil
+}
+
+func (*testServer) Stream(_ *context.T, call rpc.StreamServerCall, arg string) (string, error) {
+	result := fmt.Sprintf("method:%q,suffix:%q,arg:%q", "Stream", call.Suffix(), arg)
+	var u userType
+	var err error
+	for err = call.Recv(&u); err == nil; err = call.Recv(&u) {
+		result += " " + string(u)
+		if err := call.Send(u); err != nil {
+			return "", err
+		}
+	}
+	if err == io.EOF {
+		err = nil
+	}
+	return result, err
+}
+
+func (*testServer) Unauthorized(*context.T, rpc.StreamServerCall) (string, error) {
+	return "UnauthorizedResult", nil
+}
+
+type testServerAuthorizer struct{}
+
+func (testServerAuthorizer) Authorize(ctx *context.T, call security.Call) error {
+	// Verify that the Call object seen by the authorizer
+	// has the necessary fields.
+	lb := call.LocalBlessings()
+	if lb.IsZero() {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalBlessings", call)
+	}
+	if tpcavs := lb.ThirdPartyCaveats(); len(tpcavs) > 0 && call.LocalDischarges() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalDischarges even when LocalBlessings have third-party caveats", call)
+
+	}
+	if call.LocalPrincipal() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalPrincipal", call)
+	}
+	if call.Method() == "" {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no Method", call)
+	}
+	if call.LocalEndpoint() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalEndpoint", call)
+	}
+	if call.RemoteEndpoint() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no RemoteEndpoint", call)
+	}
+
+	// Do not authorize the method "Unauthorized".
+	if call.Method() == "Unauthorized" {
+		return fmt.Errorf("testServerAuthorizer denied access")
+	}
+	return nil
+}
+
+type testServerDisp struct{ server interface{} }
+
+func (t testServerDisp) Lookup(_ *context.T, suffix string) (interface{}, security.Authorizer, error) {
+	// If suffix is "nilAuth" we use default authorization, if it is "aclAuth" we
+	// use an AccessList-based authorizer, and otherwise we use the custom testServerAuthorizer.
+	var authorizer security.Authorizer
+	switch suffix {
+	case "discharger":
+		return &dischargeServer{}, testServerAuthorizer{}, nil
+	case "nilAuth":
+		authorizer = nil
+	case "aclAuth":
+		authorizer = &access.AccessList{
+			In: []security.BlessingPattern{"test-blessing/client", "test-blessing/server"},
+		}
+	default:
+		authorizer = testServerAuthorizer{}
+	}
+	return t.server, authorizer, nil
+}
+
+type dischargeServer struct {
+	mu     sync.Mutex
+	called bool
+}
+
+func (ds *dischargeServer) Discharge(ctx *context.T, call rpc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.Discharge, error) {
+	ds.mu.Lock()
+	ds.called = true
+	ds.mu.Unlock()
+	tp := cav.ThirdPartyDetails()
+	if tp == nil {
+		return security.Discharge{}, fmt.Errorf("discharger: %v does not represent a third-party caveat", cav)
+	}
+	if err := tp.Dischargeable(ctx, call.Security()); err != nil {
+		return security.Discharge{}, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", cav, err)
+	}
+	// Add a fakeTimeCaveat to be able to control discharge expiration via 'clock'.
+	expiry, err := security.NewCaveat(fakeTimeCaveat, clock.Now())
+	if err != nil {
+		return security.Discharge{}, fmt.Errorf("failed to create an expiration on the discharge: %v", err)
+	}
+	return call.Security().LocalPrincipal().MintDischarge(cav, expiry)
+}
+
+// granter implements rpc.Granter.
+//
+// It returns the specified (security.Blessings, error) pair if either the
+// blessing or the error is specified. Otherwise it returns a blessing
+// derived from the local blessings of the current call.
+type granter struct {
+	rpc.CallOpt
+	b   security.Blessings
+	err error
+}
+
+func (g granter) Grant(ctx *context.T, call security.Call) (security.Blessings, error) {
+	if !g.b.IsZero() || g.err != nil {
+		return g.b, g.err
+	}
+	return call.LocalPrincipal().Bless(
+		call.RemoteBlessings().PublicKey(),
+		call.LocalBlessings(),
+		"blessed",
+		security.UnconstrainedUse())
+}
+
+// dischargeTestServer implements the discharge service. Always fails to
+// issue a discharge, but records the impetus and traceid of the RPC call.
+type dischargeTestServer struct {
+	p       security.Principal
+	impetus []security.DischargeImpetus
+	traceid []uniqueid.Id
+}
+
+func (s *dischargeTestServer) Discharge(ctx *context.T, _ rpc.ServerCall, cav security.Caveat, impetus security.DischargeImpetus) (security.Discharge, error) {
+	s.impetus = append(s.impetus, impetus)
+	s.traceid = append(s.traceid, vtrace.GetSpan(ctx).Trace())
+	return security.Discharge{}, fmt.Errorf("discharges not issued")
+}
+
+func (s *dischargeTestServer) Release() ([]security.DischargeImpetus, []uniqueid.Id) {
+	impetus, traceid := s.impetus, s.traceid
+	s.impetus, s.traceid = nil, nil
+	return impetus, traceid
+}
+
+type streamRecvInGoroutineServer struct{ c chan error }
+
+func (s *streamRecvInGoroutineServer) RecvInGoroutine(_ *context.T, call rpc.StreamServerCall) error {
+	// Spawn a goroutine to read streaming data from the client.
+	go func() {
+		var i interface{}
+		for {
+			err := call.Recv(&i)
+			if err != nil {
+				s.c <- err
+				return
+			}
+		}
+	}()
+	// Imagine the server did some processing here and now that it is done,
+	// it does not care to see what else the client has to say.
+	return nil
+}
+
+type expiryDischarger struct {
+	called bool
+}
+
+func (ed *expiryDischarger) Discharge(ctx *context.T, call rpc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.Discharge, error) {
+	tp := cav.ThirdPartyDetails()
+	if tp == nil {
+		return security.Discharge{}, fmt.Errorf("discharger: %v does not represent a third-party caveat", cav)
+	}
+	if err := tp.Dischargeable(ctx, call.Security()); err != nil {
+		return security.Discharge{}, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", cav, err)
+	}
+	expDur := 10 * time.Millisecond
+	if ed.called {
+		expDur = time.Second
+	}
+	expiry, err := security.NewExpiryCaveat(time.Now().Add(expDur))
+	if err != nil {
+		return security.Discharge{}, fmt.Errorf("failed to create an expiration on the discharge: %v", err)
+	}
+	d, err := call.Security().LocalPrincipal().MintDischarge(cav, expiry)
+	if err != nil {
+		return security.Discharge{}, err
+	}
+	ed.called = true
+	return d, nil
+}
diff --git a/runtime/internal/rpc/test/testutil_test.go b/runtime/internal/rpc/test/testutil_test.go
new file mode 100644
index 0000000..5b32e71
--- /dev/null
+++ b/runtime/internal/rpc/test/testutil_test.go
@@ -0,0 +1,208 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/security"
+	"v.io/v23/uniqueid"
+	"v.io/v23/vdl"
+	"v.io/v23/verror"
+	"v.io/x/ref/test/testutil"
+)
+
+var errMethod = verror.New(verror.ErrAborted, nil)
+var fakeTimeCaveat = security.CaveatDescriptor{
+	Id:        uniqueid.Id{0x18, 0xba, 0x6f, 0x84, 0xd5, 0xec, 0xdb, 0x9b, 0xf2, 0x32, 0x19, 0x5b, 0x53, 0x92, 0x80, 0x0},
+	ParamType: vdl.TypeOf(int64(0)),
+}
+
+func init() {
+	security.RegisterCaveatValidator(fakeTimeCaveat, func(_ *context.T, _ security.Call, t int64) error {
+		if now := clock.Now(); now > t {
+			return fmt.Errorf("fakeTimeCaveat expired: now=%d > then=%d", now, t)
+		}
+		return nil
+	})
+}
+
+var clock = new(fakeClock)
+
+type fakeClock struct {
+	sync.Mutex
+	time int64
+}
+
+func (c *fakeClock) Now() int64 {
+	c.Lock()
+	defer c.Unlock()
+	return c.time
+}
+
+func (c *fakeClock) Advance(steps uint) {
+	c.Lock()
+	c.time += int64(steps)
+	c.Unlock()
+}
+
+// singleBlessingStore implements security.BlessingStore. It is a
+// BlessingStore that marks the last blessing that was set on it as
+// shareable with any peer. It does not care about the public key that
+// blessing being set is bound to.
+type singleBlessingStore struct {
+	b security.Blessings
+}
+
+func (s *singleBlessingStore) Set(b security.Blessings, _ security.BlessingPattern) (security.Blessings, error) {
+	s.b = b
+	return security.Blessings{}, nil
+}
+func (s *singleBlessingStore) ForPeer(...string) security.Blessings {
+	return s.b
+}
+func (*singleBlessingStore) SetDefault(b security.Blessings) error {
+	return nil
+}
+func (*singleBlessingStore) Default() security.Blessings {
+	return security.Blessings{}
+}
+func (*singleBlessingStore) PublicKey() security.PublicKey {
+	return nil
+}
+func (*singleBlessingStore) DebugString() string {
+	return ""
+}
+func (*singleBlessingStore) PeerBlessings() map[security.BlessingPattern]security.Blessings {
+	return nil
+}
+func (*singleBlessingStore) CacheDischarge(security.Discharge, security.Caveat, security.DischargeImpetus) {
+	return
+}
+func (*singleBlessingStore) ClearDischarges(...security.Discharge) {
+	return
+}
+func (*singleBlessingStore) Discharge(security.Caveat, security.DischargeImpetus) security.Discharge {
+	return security.Discharge{}
+}
+
+// singleBlessingPrincipal implements security.Principal. It is a wrapper over
+// a security.Principal that intercepts  all invocations on the
+// principal's BlessingStore and serves them via a singleBlessingStore.
+type singleBlessingPrincipal struct {
+	security.Principal
+	b singleBlessingStore
+}
+
+func (p *singleBlessingPrincipal) BlessingStore() security.BlessingStore {
+	return &p.b
+}
+
+func withPrincipal(t *testing.T, ctx *context.T, name string, caveats ...security.Caveat) *context.T {
+	idp := testutil.IDProviderFromPrincipal(v23.GetPrincipal(ctx))
+	p := testutil.NewPrincipal()
+	if err := idp.Bless(p, name, caveats...); err != nil {
+		t.Fatal(err)
+	}
+	ctx, err := v23.WithPrincipal(ctx, p)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return ctx
+}
+
+func bless(t *testing.T, pctx, whoctx *context.T, extension string, cavs ...security.Caveat) security.Blessings {
+	idp := testutil.IDProviderFromPrincipal(v23.GetPrincipal(pctx))
+	b, err := idp.NewBlessings(v23.GetPrincipal(whoctx), extension, cavs...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return b
+}
+
+func mkCaveat(cav security.Caveat, err error) security.Caveat {
+	if err != nil {
+		panic(err)
+	}
+	return cav
+}
+
+func mkThirdPartyCaveat(discharger security.PublicKey, location string, c security.Caveat) security.Caveat {
+	tpc, err := security.NewPublicKeyCaveat(discharger, location, security.ThirdPartyRequirements{}, c)
+	if err != nil {
+		panic(err)
+	}
+	return tpc
+}
+
+func matchesErrorPattern(err error, id verror.IDAction, pattern string) bool {
+	if len(pattern) > 0 && err != nil && strings.Index(err.Error(), pattern) < 0 {
+		return false
+	}
+	if err == nil && id.ID == "" {
+		return true
+	}
+	return verror.ErrorID(err) == id.ID
+}
+
+func waitForNames(t *testing.T, ctx *context.T, exist bool, names ...string) {
+	for _, n := range names {
+		for {
+			me, err := v23.GetNamespace(ctx).Resolve(ctx, n)
+			if err == nil && exist && len(me.Names()) > 0 {
+				break
+			}
+			if (err != nil && !exist) || (err == nil && len(me.Names()) == 0) {
+				break
+			}
+			ctx.Infof("Still waiting for %v, %v: %#v, %v", exist, n, me, err)
+			time.Sleep(10 * time.Millisecond)
+		}
+	}
+}
+
+func makeResultPtrs(ins []interface{}) []interface{} {
+	outs := make([]interface{}, len(ins))
+	for ix, in := range ins {
+		typ := reflect.TypeOf(in)
+		if typ == nil {
+			// Nil indicates interface{}.
+			var empty interface{}
+			typ = reflect.ValueOf(&empty).Elem().Type()
+		}
+		outs[ix] = reflect.New(typ).Interface()
+	}
+	return outs
+}
+
+func checkResultPtrs(t *testing.T, name string, gotptrs, want []interface{}) {
+	for ix, res := range gotptrs {
+		got := reflect.ValueOf(res).Elem().Interface()
+		want := want[ix]
+		switch g := got.(type) {
+		case verror.E:
+			w, ok := want.(verror.E)
+			// don't use reflect deep equal on verror's since they contain
+			// a list of stack PCs which will be different.
+			if !ok {
+				t.Errorf("%s result %d got type %T, want %T", name, ix, g, w)
+			}
+			if verror.ErrorID(g) != w.ID {
+				t.Errorf("%s result %d got %v, want %v", name, ix, g, w)
+			}
+		default:
+			if !reflect.DeepEqual(got, want) {
+				t.Errorf("%s result %d got %v, want %v", name, ix, got, want)
+			}
+		}
+	}
+}
diff --git a/runtime/internal/rpc/transition/transition_test.go b/runtime/internal/rpc/transition/transition_test.go
index d50c239..13ab953 100644
--- a/runtime/internal/rpc/transition/transition_test.go
+++ b/runtime/internal/rpc/transition/transition_test.go
@@ -38,7 +38,7 @@
 
 	sp := testutil.NewPrincipal()
 	testutil.IDProviderFromPrincipal(v23.GetPrincipal(ctx)).Bless(sp, "server")
-	server, err := irpc.InternalNewServer(ctx, sm, v23.GetNamespace(ctx),
+	server, err := irpc.DeprecatedNewServer(ctx, sm, v23.GetNamespace(ctx),
 		nil, "", v23.GetClient(ctx))
 	if err != nil {
 		t.Fatal(err)
diff --git a/runtime/internal/rpc/transitionclient.go b/runtime/internal/rpc/transitionclient.go
index 0c41746..ade2e3a 100644
--- a/runtime/internal/rpc/transitionclient.go
+++ b/runtime/internal/rpc/transitionclient.go
@@ -26,7 +26,7 @@
 	if ret.xc, err = NewXClient(ctx, flowMgr, ns, opts...); err != nil {
 		return nil, err
 	}
-	if ret.c, err = InternalNewClient(streamMgr, ns, opts...); err != nil {
+	if ret.c, err = DeprecatedNewClient(streamMgr, ns, opts...); err != nil {
 		ret.xc.Close()
 		return nil, err
 	}
diff --git a/runtime/internal/rpc/x_test.go b/runtime/internal/rpc/x_test.go
index e36c3f9..0c4bf18 100644
--- a/runtime/internal/rpc/x_test.go
+++ b/runtime/internal/rpc/x_test.go
@@ -34,6 +34,7 @@
 	if err != nil {
 		t.Fatal(verror.DebugString(err))
 	}
+	ctx = fake.SetFlowManager(ctx, manager.New(ctx, naming.FixedRoutingID(0x2)))
 	client, err := NewXClient(ctx, v23.ExperimentalGetFlowManager(ctx), v23.GetNamespace(ctx))
 	if err != nil {
 		t.Fatal(verror.DebugString(err))
@@ -64,6 +65,7 @@
 	if err != nil {
 		t.Fatal(verror.DebugString(err))
 	}
+	ctx = fake.SetFlowManager(ctx, manager.New(ctx, naming.FixedRoutingID(0x2)))
 	client, err := NewXClient(ctx, v23.ExperimentalGetFlowManager(ctx), v23.GetNamespace(ctx))
 	if err != nil {
 		t.Fatal(verror.DebugString(err))
diff --git a/runtime/internal/rpc/xserver.go b/runtime/internal/rpc/xserver.go
index 8b3b892..e5c7f6f 100644
--- a/runtime/internal/rpc/xserver.go
+++ b/runtime/internal/rpc/xserver.go
@@ -55,9 +55,6 @@
 	chosenEndpoints   []*inaming.Endpoint
 	typeCache         *typeCache
 
-	// state of proxies keyed by the name of the proxy
-	proxies map[string]proxyState
-
 	disp               rpc.Dispatcher // dispatcher to serve RPCs
 	dispReserved       rpc.Dispatcher // dispatcher for reserved methods
 	active             sync.WaitGroup // active goroutines we've spawned.
@@ -103,7 +100,6 @@
 		principal:         principal,
 		blessings:         principal.BlessingStore().Default(),
 		publisher:         publisher.New(ctx, ns, publishPeriod),
-		proxies:           make(map[string]proxyState),
 		stoppedChan:       make(chan struct{}),
 		ns:                ns,
 		stats:             newRPCStats(statsPrefix),
@@ -247,10 +243,24 @@
 	s.Lock()
 	defer s.Unlock()
 	var lastErr error
+	var ep string
+	if len(listenSpec.Proxy) > 0 {
+		ep, lastErr = s.resolveToEndpoint(listenSpec.Proxy)
+		if lastErr != nil {
+			s.ctx.VI(2).Infof("resolveToEndpoint(%q) failed: %v", listenSpec.Proxy, lastErr)
+		} else {
+			lastErr = s.flowMgr.Listen(ctx, inaming.Network, ep)
+			if lastErr != nil {
+				s.ctx.VI(2).Infof("Listen(%q, %q, ...) failed: %v", inaming.Network, ep, lastErr)
+			}
+		}
+	}
 	for _, addr := range listenSpec.Addrs {
 		if len(addr.Address) > 0 {
 			lastErr = s.flowMgr.Listen(ctx, addr.Protocol, addr.Address)
-			s.ctx.VI(2).Infof("Listen(%q, %q, ...) failed: %v", addr.Protocol, addr.Address, lastErr)
+			if lastErr != nil {
+				s.ctx.VI(2).Infof("Listen(%q, %q, ...) failed: %v", addr.Protocol, addr.Address, lastErr)
+			}
 		}
 	}
 
diff --git a/runtime/internal/rt/runtime.go b/runtime/internal/rt/runtime.go
index 02e919a..77f07f9 100644
--- a/runtime/internal/rt/runtime.go
+++ b/runtime/internal/rt/runtime.go
@@ -27,6 +27,7 @@
 	"v.io/v23/verror"
 	"v.io/v23/vtrace"
 
+	"v.io/x/ref"
 	"v.io/x/ref/internal/logger"
 	"v.io/x/ref/lib/apilog"
 	"v.io/x/ref/lib/flags"
@@ -43,27 +44,6 @@
 	ivtrace "v.io/x/ref/runtime/internal/vtrace"
 )
 
-const (
-	None = iota
-	XClients
-	XServers
-)
-
-var TransitionState = None
-
-func init() {
-	switch ts := os.Getenv("V23_RPC_TRANSITION_STATE"); ts {
-	case "xclients":
-		TransitionState = XClients
-	case "xservers":
-		TransitionState = XServers
-	case "":
-		TransitionState = None
-	default:
-		panic("Unknown transition state: " + ts)
-	}
-}
-
 type contextKey int
 
 const (
@@ -288,7 +268,7 @@
 			Blessings: principal.BlessingStore().Default(),
 		})
 	}
-	server, err := irpc.InternalNewServer(ctx, sm, ns, id.settingsPublisher, id.settingsName, r.GetClient(ctx), otherOpts...)
+	server, err := irpc.DeprecatedNewServer(ctx, sm, ns, id.settingsPublisher, id.settingsName, r.GetClient(ctx), otherOpts...)
 	if err != nil {
 		return nil, err
 	}
@@ -442,11 +422,11 @@
 	var err error
 	deps := []interface{}{vtraceDependency{}}
 
-	if fm != nil && TransitionState >= XClients {
+	if fm != nil && ref.RPCTransitionState() >= ref.XClients {
 		client, err = irpc.NewTransitionClient(ctx, sm, fm, ns, otherOpts...)
 		deps = append(deps, fm, sm)
 	} else {
-		client, err = irpc.InternalNewClient(sm, ns, otherOpts...)
+		client, err = irpc.DeprecatedNewClient(sm, ns, otherOpts...)
 		deps = append(deps, sm)
 	}
 
@@ -601,7 +581,7 @@
 
 func (r *Runtime) WithNewServer(ctx *context.T, name string, object interface{}, auth security.Authorizer, opts ...rpc.ServerOpt) (*context.T, rpc.Server, error) {
 	defer apilog.LogCall(ctx)(ctx) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
-	if TransitionState >= XServers {
+	if ref.RPCTransitionState() >= ref.XServers {
 		// TODO(mattr): Deal with shutdown deps.
 		newctx, spub, sname, opts, err := r.commonServerInit(ctx, opts...)
 		if err != nil {
@@ -631,7 +611,7 @@
 
 func (r *Runtime) WithNewDispatchingServer(ctx *context.T, name string, disp rpc.Dispatcher, opts ...rpc.ServerOpt) (*context.T, rpc.Server, error) {
 	defer apilog.LogCall(ctx)(ctx) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
-	if TransitionState >= XServers {
+	if ref.RPCTransitionState() >= ref.XServers {
 		// TODO(mattr): Deal with shutdown deps.
 		newctx, spub, sname, opts, err := r.commonServerInit(ctx, opts...)
 		if err != nil {
diff --git a/services/agent/internal/server/sock_len.go b/services/agent/internal/server/sock_len.go
index 7706d58..211bf7d 100644
--- a/services/agent/internal/server/sock_len.go
+++ b/services/agent/internal/server/sock_len.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package server
 
 // #include <sys/un.h>
diff --git a/services/agent/internal/server/sock_len_darwin_test.go b/services/agent/internal/server/sock_len_darwin_test.go
index ca8fc74..87ddbb9 100644
--- a/services/agent/internal/server/sock_len_darwin_test.go
+++ b/services/agent/internal/server/sock_len_darwin_test.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package server
 
 import (
diff --git a/services/agent/internal/server/sock_len_linux_test.go b/services/agent/internal/server/sock_len_linux_test.go
index e592f12..0ec18b1 100644
--- a/services/agent/internal/server/sock_len_linux_test.go
+++ b/services/agent/internal/server/sock_len_linux_test.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
 package server
 
 import (
diff --git a/services/agent/vbecome/vbecome.go b/services/agent/vbecome/vbecome.go
index 15abcc0..1b91eb2 100644
--- a/services/agent/vbecome/vbecome.go
+++ b/services/agent/vbecome/vbecome.go
@@ -109,6 +109,11 @@
 		}
 	}
 
+	// Clear out the environment variable before starting the child.
+	if err = ref.EnvClearCredentials(); err != nil {
+		return err
+	}
+
 	// Start an agent server.
 	i := ipc.NewIPC()
 	if err := server.ServeAgent(i, principal); err != nil {
diff --git a/services/device/deviced/internal/starter/starter.go b/services/device/deviced/internal/starter/starter.go
index 3ad0a74..0849d6a 100644
--- a/services/device/deviced/internal/starter/starter.go
+++ b/services/device/deviced/internal/starter/starter.go
@@ -173,16 +173,30 @@
 	}
 	var epName string
 	if args.Device.ListenSpec.Proxy != "" {
-		for {
-			p := server.Status().Proxies
-			if len(p) == 0 {
+		if os.Getenv("V23_RPC_TRANSITION_STATE") == "xservers" {
+			for {
+				eps := server.Status().Endpoints
+				if len(eps) > 0 && len(eps[0].Addr().Network()) > 0 {
+					epName = eps[0].Name()
+					ctx.Infof("Proxied address: %s", epName)
+					break
+				}
 				ctx.Infof("Waiting for proxy address to appear...")
 				time.Sleep(time.Second)
-				continue
 			}
-			epName = p[0].Endpoint.Name()
-			ctx.Infof("Proxied address: %s", epName)
-			break
+		} else {
+			// TODO(suharshs): Remove this else block once the transition is complete.
+			for {
+				p := server.Status().Proxies
+				if len(p) == 0 {
+					ctx.Infof("Waiting for proxy address to appear...")
+					time.Sleep(time.Second)
+					continue
+				}
+				epName = p[0].Endpoint.Name()
+				ctx.Infof("Proxied address: %s", epName)
+				break
+			}
 		}
 	} else {
 		if len(endpoints) == 0 {
diff --git a/services/device/dmrun/backend/backend.go b/services/device/dmrun/backend/backend.go
new file mode 100644
index 0000000..da8ca3e
--- /dev/null
+++ b/services/device/dmrun/backend/backend.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import "fmt"
+
+type CloudVM interface {
+	// Name of the VM instance that the object talks to
+	Name() string
+
+	// IP address (as a string) of the VM instance
+	IP() string
+
+	// Execute a command on the VM instance
+	RunCommand(...string) (output []byte, err error)
+
+	// Copy a file to the VM instance
+	CopyFile(infile, destination string) error
+
+	// Delete the VM instance
+	Delete() error
+
+	// Provide the command that the user can use to delete a VM instance for which Delete()
+	// was not called
+	DeleteCommandForUser() string
+}
+
+func CreateCloudVM(instanceName string, options interface{}) (CloudVM, error) {
+	switch options.(type) {
+	default:
+		return nil, fmt.Errorf("Unknown options type")
+	case VcloudVMOptions:
+		return newVcloudVM(instanceName, options.(VcloudVMOptions))
+	}
+}
diff --git a/services/device/dmrun/backend/backend_vcloud.go b/services/device/dmrun/backend/backend_vcloud.go
new file mode 100644
index 0000000..38a68b4
--- /dev/null
+++ b/services/device/dmrun/backend/backend_vcloud.go
@@ -0,0 +1,114 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+	"fmt"
+	"net"
+	"os/exec"
+	"strings"
+)
+
+type VcloudVM struct {
+	vcloud              string // path to vcloud command
+	sshUser             string // ssh into the VM as this user
+	projectArg, zoneArg string // common flags used with the vcloud command
+	name, ip            string
+	isDeleted           bool
+}
+
+type VcloudVMOptions struct {
+	VcloudBinary string // path to the "vcloud" command
+}
+
+func newVcloudVM(instanceName string, opt VcloudVMOptions) (vm *VcloudVM, err error) {
+	// TODO: Make sshUser, zone, and project configurable
+	g := &VcloudVM{
+		vcloud:     opt.VcloudBinary,
+		sshUser:    "veyron",
+		projectArg: "--project=google.com:veyron",
+		zoneArg:    "--zone=us-central1-c",
+		isDeleted:  false,
+	}
+
+	cmd := exec.Command(g.vcloud, "node", "create", g.projectArg, g.zoneArg, instanceName)
+	if output, err := cmd.CombinedOutput(); err != nil {
+		return nil, fmt.Errorf("setting up new GCE instance (%v) failed. Error: (%v) Output:\n%v", strings.Join(cmd.Args, " "), err, string(output))
+	}
+
+	cmd = exec.Command(g.vcloud, "list", g.projectArg, "--noheader", "--fields=EXTERNAL_IP", instanceName)
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		return nil, fmt.Errorf("listing instances (%v) failed. Error: (%v) Output:\n%v", strings.Join(cmd.Args, " "), err, string(output))
+	}
+	tmpIP := strings.TrimSpace(string(output))
+	if net.ParseIP(tmpIP) == nil {
+		return nil, fmt.Errorf("IP of new instance is not a valid IP address: %v", tmpIP)
+	}
+	g.ip = tmpIP
+	g.name = instanceName
+	return g, nil
+}
+
+func (g *VcloudVM) Delete() error {
+	if g.isDeleted {
+		return fmt.Errorf("trying to delete a deleted VcloudVM")
+	}
+
+	cmd := exec.Command(g.vcloud, "node", "delete", g.projectArg, g.zoneArg, g.name)
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		err = fmt.Errorf("failed deleting GCE instance (%s): %v\nOutput:%v\n", strings.Join(cmd.Args, " "), err, string(output))
+	} else {
+		g.isDeleted = true
+		g.name = ""
+		g.ip = ""
+	}
+	return err
+}
+
+func (g *VcloudVM) Name() string {
+	return g.name
+}
+
+func (g *VcloudVM) IP() string {
+	return g.ip
+}
+
+func (g *VcloudVM) RunCommand(args ...string) ([]byte, error) {
+	if g.isDeleted {
+		return nil, fmt.Errorf("RunCommand called on deleted VcloudVM")
+	}
+
+	cmd := exec.Command(g.vcloud, append([]string{"sh", g.projectArg, g.name}, args...)...)
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		err = fmt.Errorf("failed running [%s] on VM %s", strings.Join(args, " "), g.name)
+	}
+	return output, err
+}
+
+func (g *VcloudVM) CopyFile(infile, destination string) error {
+	if g.isDeleted {
+		return fmt.Errorf("CopyFile called on deleted VcloudVM")
+	}
+
+	cmd := exec.Command("gcloud", "compute", g.projectArg, "copy-files", infile, fmt.Sprintf("%s@%s:/%s", g.sshUser, g.Name(), destination), g.zoneArg)
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		err = fmt.Errorf("failed copying %s to %s:%s - %v\nOutput:\n%v", infile, g.name, destination, err, string(output))
+	}
+	return err
+}
+
+func (g *VcloudVM) DeleteCommandForUser() string {
+	if g.isDeleted {
+		return ""
+	}
+
+	// We can't return the vcloud binary that we ran for the steps above, as that one is deleted
+	// after use. For now, we assume the user will have a vcloud binary on his path to use.
+	return strings.Join([]string{"vcloud", "node", "delete", g.projectArg, g.zoneArg, g.name}, " ")
+}
diff --git a/services/device/dmrun/dmrun.go b/services/device/dmrun/dmrun.go
index d816542..d722b58 100644
--- a/services/device/dmrun/dmrun.go
+++ b/services/device/dmrun/dmrun.go
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Command dmrun runs a binary on a remote GCE instance using device manager.
+// Command dmrun runs a binary on a remote VM instance using device manager.
 //
-// dmrun creates the GCE instance, installs and starts device manager on it, and
+// dmrun creates the VM instance, installs and starts device manager on it, and
 // then installs and starts an app from the specified binary.
 //
 // dmrun uses the credentials it is running with in order to claim the device
@@ -35,12 +35,14 @@
 	"time"
 
 	"v.io/x/ref"
+	"v.io/x/ref/services/device/dmrun/backend"
 )
 
 var (
 	workDir        string
 	vcloud         string
 	device         string
+	vm             backend.CloudVM
 	cleanupOnDeath func()
 )
 
@@ -148,39 +150,28 @@
 	return zipFile
 }
 
-// setupInstance creates a new GCE instance and returns its name and IP address.
-func setupInstance() (string, string) {
+// setupInstance creates a new VM instance and returns its name and IP address.
+func setupInstance(vmOptions interface{}) (backend.CloudVM, string, string) {
 	currUser, err := user.Current()
 	dieIfErr(err, "Couldn't obtain current user")
 	instanceName := fmt.Sprintf("%s-%s", currUser.Username, time.Now().UTC().Format("20060102-150405"))
-	// TODO(caprita): Allow project and zone to be customized.
-	cmd := exec.Command(vcloud, "node", "create", "--project=google.com:veyron", "--zone=us-central1-c", instanceName)
-	output, err := cmd.CombinedOutput()
-	dieIfErr(err, "Setting up new GCE instance (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
-	cmd = exec.Command(vcloud, "list", "--project=google.com:veyron", "--noheader", "--fields=EXTERNAL_IP", instanceName)
-	output, err = cmd.CombinedOutput()
-	dieIfErr(err, "Listing instances (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
-	instanceIP := strings.TrimSpace(string(output))
-	if net.ParseIP(instanceIP) == nil {
-		die("Not a valid IP address: %v", instanceIP)
-	}
+	vm, err = backend.CreateCloudVM(instanceName, vmOptions)
+	dieIfErr(err, "VM Instance Creation Failed: %v", err)
+	instanceIP := vm.IP()
 	// Install unzip so we can unpack the archive.
 	// TODO(caprita): Use tar instead.
-	cmd = exec.Command(vcloud, "sh", "--project=google.com:veyron", instanceName, "sudo", "apt-get", "install", "unzip")
-	output, err = cmd.CombinedOutput()
-	dieIfErr(err, "Installing unzip (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
-	fmt.Println("Created GCE instance", instanceName, "with IP", instanceIP)
-	return instanceName, instanceIP
+	output, err := vm.RunCommand("sudo", "apt-get", "install", "unzip")
+	dieIfErr(err, "Installing unzip failed. Output:\n%v", string(output))
+	fmt.Println("Created VM instance", instanceName, "with IP", instanceIP)
+	return vm, instanceName, instanceIP
 }
 
-// installArchive ships the archive to the GCE instance and unpacks it.
+// installArchive ships the archive to the VM instance and unpacks it.
 func installArchive(archive, instance string) {
-	cmd := exec.Command("gcloud", "compute", "--project=google.com:veyron", "copy-files", archive, fmt.Sprintf("veyron@%s:/tmp/", instance), "--zone=us-central1-c")
-	output, err := cmd.CombinedOutput()
-	dieIfErr(err, "Copying archive (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
-	cmd = exec.Command(vcloud, "sh", "--project=google.com:veyron", instance, "unzip", path.Join("/tmp", filepath.Base(archive)), "-d", "/tmp/unpacked")
-	output, err = cmd.CombinedOutput()
-	dieIfErr(err, "Extracting archive (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
+	err := vm.CopyFile(archive, "/tmp/")
+	dieIfErr(err, "Copying archive failed: %v", err)
+	output, err := vm.RunCommand("unzip", path.Join("/tmp", filepath.Base(archive)), "-d", "/tmp/unpacked")
+	dieIfErr(err, "Extracting archive failed. Output:\n%v", string(output))
 }
 
 // installDevice installs and starts device manager, and returns the public key
@@ -188,12 +179,10 @@
 func installDevice(instance string) (string, string) {
 	fmt.Println("Installing device manager...")
 	defer fmt.Println("Done installing device manager...")
-	cmd := exec.Command(vcloud, "sh", "--project=google.com:veyron", instance, "V23_DEVICE_DIR=/tmp/dm", "/tmp/unpacked/devicex", "install", "/tmp/unpacked", "--single_user", "--", "--v23.tcp.address=:8151", "--deviced-port=8150", "--proxy-port=8160", "--use-pairing-token")
-	output, err := cmd.CombinedOutput()
-	dieIfErr(err, "Installing device manager (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
-	cmd = exec.Command(vcloud, "sh", "--project=google.com:veyron", instance, "V23_DEVICE_DIR=/tmp/dm", "/tmp/unpacked/devicex", "start")
-	output, err = cmd.CombinedOutput()
-	dieIfErr(err, "Starting device manager (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
+	output, err := vm.RunCommand("V23_DEVICE_DIR=/tmp/dm", "/tmp/unpacked/devicex", "install", "/tmp/unpacked", "--single_user", "--", "--v23.tcp.address=:8151", "--deviced-port=8150", "--proxy-port=8160", "--use-pairing-token")
+	dieIfErr(err, "Installing device manager failed. Output:\n%v", string(output))
+	output, err = vm.RunCommand("V23_DEVICE_DIR=/tmp/dm", "/tmp/unpacked/devicex", "start")
+	dieIfErr(err, "Starting device manager failed. Output:\n%v", string(output))
 	// Grab the token and public key from the device manager log.
 	dieAfter := time.After(5 * time.Second)
 	firstIteration := true
@@ -207,9 +196,8 @@
 		} else {
 			firstIteration = false
 		}
-		cmd = exec.Command(vcloud, "sh", "--project=google.com:veyron", instance, "cat", "/tmp/dm/dmroot/device-manager/logs/deviced.INFO")
-		output, err = cmd.CombinedOutput()
-		dieIfErr(err, "Reading device manager log (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
+		output, err = vm.RunCommand("cat", "/tmp/dm/dmroot/device-manager/logs/deviced.INFO")
+		dieIfErr(err, "Reading device manager log failed. Output:\n%v", string(output))
 		pairingTokenRE := regexp.MustCompile("Device manager pairing token: (.*)")
 		matches := pairingTokenRE.FindSubmatch(output)
 		if matches == nil {
@@ -301,21 +289,21 @@
 	device = buildV23Binary(deviceBin)
 	dmBins := buildDMBinaries()
 	archive := createArchive(append(dmBins, getPath(devicexRepo, devicex)))
-	gceInstanceName, gceInstanceIP := setupInstance()
+	vmOpts := backend.VcloudVMOptions{VcloudBinary: vcloud}
+	vm, vmInstanceName, vmInstanceIP := setupInstance(vmOpts)
 	cleanupOnDeath = func() {
-		fmt.Fprintf(os.Stderr, "Deleting GCE instance ...\n")
-		cmd := exec.Command(vcloud, "node", "delete", "--project=google.com:veyron", "--zone=us-central1-c", gceInstanceName)
-		output, err := cmd.CombinedOutput()
+		fmt.Fprintf(os.Stderr, "Deleting VM instance ...\n")
+		err := vm.Delete()
 		fmt.Fprintf(os.Stderr, "Removing tmp files ...\n")
 		os.RemoveAll(workDir)
-		dieIfErr(err, "Deleting GCE instance (%v) failed. Output:\n%v", strings.Join(cmd.Args, " "), string(output))
+		dieIfErr(err, "Deleting VM instance failed")
 	}
-	installArchive(archive, gceInstanceName)
-	publicKey, pairingToken := installDevice(gceInstanceName)
-	deviceAddr := net.JoinHostPort(gceInstanceIP, "8150")
+	installArchive(archive, vmInstanceName)
+	publicKey, pairingToken := installDevice(vmInstanceName)
+	deviceAddr := net.JoinHostPort(vmInstanceIP, "8150")
 	deviceName := "/" + deviceAddr
-	claimDevice(deviceName, gceInstanceIP, publicKey, pairingToken, gceInstanceName)
-	installationName := installApp(deviceName, gceInstanceIP)
+	claimDevice(deviceName, vmInstanceIP, publicKey, pairingToken, vmInstanceName)
+	installationName := installApp(deviceName, vmInstanceIP)
 	instanceName := startApp(installationName, "app")
 	fmt.Println("Launched app.")
 	fmt.Println("-------------")
@@ -325,6 +313,6 @@
 	fmt.Printf("\t${V23_ROOT}/release/go/bin/debug glob %s/logs/*\n", instanceName)
 	fmt.Println("Dump e.g. the INFO log:")
 	fmt.Printf("\t${V23_ROOT}/release/go/bin/debug logs read %s/logs/app.INFO\n", instanceName)
-	fmt.Println("Clean up by deleting the GCE instance:")
-	fmt.Printf("\t${V23_ROOT}/release/go/bin/vcloud node delete --project=google.com:veyron --zone=us-central1-c %s\n", gceInstanceName)
+	fmt.Println("Clean up by deleting the VM instance:")
+	fmt.Printf("\t%s\n", vm.DeleteCommandForUser())
 }
diff --git a/services/groups/groupsd/main.go b/services/groups/groupsd/main.go
index 401ef1c..ba6b33b 100644
--- a/services/groups/groupsd/main.go
+++ b/services/groups/groupsd/main.go
@@ -11,21 +11,14 @@
 
 import (
 	"fmt"
-	"strings"
 
 	"v.io/v23"
 	"v.io/v23/context"
-	"v.io/v23/conventions"
-	"v.io/v23/rpc"
-	"v.io/v23/security"
-	"v.io/v23/verror"
 	"v.io/x/lib/cmdline"
 	"v.io/x/ref/lib/signals"
 	"v.io/x/ref/lib/v23cmd"
 	_ "v.io/x/ref/runtime/factories/roaming"
-	"v.io/x/ref/services/groups/internal/server"
-	"v.io/x/ref/services/groups/internal/store/leveldb"
-	"v.io/x/ref/services/groups/internal/store/mem"
+	"v.io/x/ref/services/groups/lib"
 )
 
 var (
@@ -33,8 +26,6 @@
 	flagEngine  string
 	flagRootDir string
 	flagPersist string
-
-	errNotAuthorizedToCreate = verror.Register("v.io/x/ref/services/groups/groupsd.errNotAuthorizedToCreate", verror.NoRetry, "{1} {2} Creator user ids {3} are not authorized to create group {4}, group name must begin with one of the user ids")
 )
 
 func main() {
@@ -46,29 +37,6 @@
 	cmdline.Main(cmdGroupsD)
 }
 
-// Authorizer implementing the authorization policy for Create operations.
-//
-// A user is allowed to create any group that begins with the user id.
-//
-// TODO(ashankar): This is experimental use of the "conventions" API and of a
-// creation policy. This policy was thought of in a 5 minute period. Think
-// about this more!
-type createAuthorizer struct{}
-
-func (createAuthorizer) Authorize(ctx *context.T, call security.Call) error {
-	userids := conventions.GetClientUserIds(ctx, call)
-	for _, uid := range userids {
-		if strings.HasPrefix(call.Suffix(), uid+"/") {
-			return nil
-		}
-	}
-	// Revert to the default authorization policy.
-	if err := security.DefaultAuthorizer().Authorize(ctx, call); err == nil {
-		return nil
-	}
-	return verror.New(errNotAuthorizedToCreate, ctx, userids, call.Suffix())
-}
-
 var cmdGroupsD = &cmdline.Command{
 	Runner: v23cmd.RunnerFunc(runGroupsD),
 	Name:   "groupsd",
@@ -80,18 +48,9 @@
 }
 
 func runGroupsD(ctx *context.T, env *cmdline.Env, args []string) error {
-	var dispatcher rpc.Dispatcher
-	switch flagEngine {
-	case "leveldb":
-		store, err := leveldb.Open(flagRootDir)
-		if err != nil {
-			ctx.Fatalf("Open(%v) failed: %v", flagRootDir, err)
-		}
-		dispatcher = server.NewManager(store, createAuthorizer{})
-	case "memstore":
-		dispatcher = server.NewManager(mem.New(), createAuthorizer{})
-	default:
-		return fmt.Errorf("unknown storage engine %v", flagEngine)
+	dispatcher, err := lib.NewGroupsDispatcher(flagRootDir, flagEngine)
+	if err != nil {
+		return err
 	}
 	ctx, server, err := v23.WithNewDispatchingServer(ctx, flagName, dispatcher)
 	if err != nil {
diff --git a/services/groups/lib/dispatcher.go b/services/groups/lib/dispatcher.go
new file mode 100644
index 0000000..ce3f45b
--- /dev/null
+++ b/services/groups/lib/dispatcher.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lib
+
+import (
+	"fmt"
+	"strings"
+
+	"v.io/v23/rpc"
+	"v.io/x/ref/services/groups/internal/server"
+	"v.io/x/ref/services/groups/internal/store/leveldb"
+	"v.io/x/ref/services/groups/internal/store/mem"
+	"v.io/v23/context"
+	"v.io/v23/security"
+	"v.io/v23/conventions"
+	"v.io/v23/verror"
+)
+
+var (
+	errNotAuthorizedToCreate = verror.Register("v.io/x/ref/services/groups/groupsd.errNotAuthorizedToCreate", verror.NoRetry, "{1} {2} Creator user ids {3} are not authorized to create group {4}, group name must begin with one of the user ids")
+)
+
+// Authorizer implementing the authorization policy for Create operations.
+//
+// A user is allowed to create any group that begins with the user id.
+//
+// TODO(ashankar): This is experimental use of the "conventions" API and of a
+// creation policy. This policy was thought of in a 5 minute period. Think
+// about this more!
+type createAuthorizer struct{}
+
+func (createAuthorizer) Authorize(ctx *context.T, call security.Call) error {
+	userids := conventions.GetClientUserIds(ctx, call)
+	for _, uid := range userids {
+		if strings.HasPrefix(call.Suffix(), uid+"/") {
+			return nil
+		}
+	}
+	// Revert to the default authorization policy.
+	if err := security.DefaultAuthorizer().Authorize(ctx, call); err == nil {
+		return nil
+	}
+	return verror.New(errNotAuthorizedToCreate, ctx, userids, call.Suffix())
+}
+
+// NewGroupsDispatcher creates a new dispatcher for the groups service.
+//
+// rootDir is the directory for persisting groups.
+//
+// engine is the storage engine for groups.  Currently, only "leveldb" and
+// "memstore" are supported.
+func NewGroupsDispatcher(rootDir, engine string) (rpc.Dispatcher, error) {
+	switch engine {
+	case "leveldb":
+		store, err := leveldb.Open(rootDir)
+		if err != nil {
+			return nil, fmt.Errorf("Open(%v) failed: %v", rootDir, err)
+		}
+		return server.NewManager(store, createAuthorizer{}), nil
+	case "memstore":
+		return server.NewManager(mem.New(), createAuthorizer{}), nil
+	default:
+		return nil, fmt.Errorf("unknown storage engine %v", engine)
+	}
+}
diff --git a/services/identity/README.md b/services/identity/README.md
index 37f5e2c..6815464 100644
--- a/services/identity/README.md
+++ b/services/identity/README.md
@@ -3,4 +3,4 @@
 This package and its sub-packages implement the identity service
 at https://dev.v.io/auth.
 
-The design is described in https://v.io/designdocs/identity-service.html
+The design is described in https://github.com/vanadium/docs/blob/master/designdocs/identity-service.md
diff --git a/services/identity/identityd/doc.go b/services/identity/identityd/doc.go
index 2ecc2a2..e2bfac9 100644
--- a/services/identity/identityd/doc.go
+++ b/services/identity/identityd/doc.go
@@ -22,7 +22,7 @@
   https://developers.google.com/accounts/docs/OAuth2Login
 
 More details on the design of identityd at:
-  https://v.io/designdocs/identity-service.html
+  https://github.com/vanadium/docs/blob/master/designdocs/identity-service.md
 
 Usage:
    identityd [flags]
diff --git a/services/identity/identityd/main.go b/services/identity/identityd/main.go
index 3e05bef..415b382 100644
--- a/services/identity/identityd/main.go
+++ b/services/identity/identityd/main.go
@@ -76,7 +76,7 @@
   https://developers.google.com/accounts/docs/OAuth2Login
 
 More details on the design of identityd at:
-  https://v.io/designdocs/identity-service.html
+  https://github.com/vanadium/docs/blob/master/designdocs/identity-service.md
 `,
 }
 
diff --git a/services/identity/internal/templates/caveats.go b/services/identity/internal/templates/caveats.go
index 79baa0c..51ff8c4 100644
--- a/services/identity/internal/templates/caveats.go
+++ b/services/identity/internal/templates/caveats.go
@@ -50,7 +50,7 @@
             Using Vanadium in production applications is discouraged at this
           time.</strong><br>
           During this preview, the
-          <a href="https://v.io/glossary.html#blessing-root" target="_">
+          <a href="https://github.com/vanadium/docs/blob/master/glossary.md#blessing-root" target="_">
             blessing root
           </a>
           may change without notice.
diff --git a/services/identity/internal/templates/home.go b/services/identity/internal/templates/home.go
index 03d8d67..96abd5e 100644
--- a/services/identity/internal/templates/home.go
+++ b/services/identity/internal/templates/home.go
@@ -23,7 +23,7 @@
     <h1 class="page-head">Authorize Vanadium apps with Google</h1>
     <p>
       The Vanadium Identity Provider authorizes Vanadium blessings based on your Google Account.<br>
-      <a href="http://v.io/glossary.html#identity-provider">Learn more</a>
+      <a href="https://github.com/vanadium/docs/blob/master/glossary.md#identity-provider">Learn more</a>
     </p>
     <p>
       <a href="/auth/google/{{.ListBlessingsRoute}}" class="button-passive">
diff --git a/services/identity/internal/templates/list_blessings.go b/services/identity/internal/templates/list_blessings.go
index 481f0a6..c653c8f 100644
--- a/services/identity/internal/templates/list_blessings.go
+++ b/services/identity/internal/templates/list_blessings.go
@@ -24,7 +24,7 @@
     <h1 class="page-head">Authorize Vanadium apps with Google</h1>
     <p>
       The Vanadium Identity Provider authorizes Vanadium blessings based on your Google Account.<br>
-      <a href="http://v.io/glossary.html#identity-provider">Learn more</a>
+      <a href="https://github.com/vanadium/docs/blob/master/glossary.md#identity-provider">Learn more</a>
     </p>
 
     <div class="blessings-list">
@@ -69,7 +69,7 @@
       {{end}} {{/* if .Error */}}
     {{else}} {{/* range .Log */}}
       <p>
-        <a href="http://v.io/installation">Install Vanadium</a> to set up your first blessing.
+        <a href="https://github.com/vanadium/docs/blob/master/installation.md">Install Vanadium</a> to set up your first blessing.
       </p>
     {{end}} {{/* range .Log */}}
     </div>
diff --git a/services/identity/internal/templates/partials.go b/services/identity/internal/templates/partials.go
index d2a8d11..dfde489 100644
--- a/services/identity/internal/templates/partials.go
+++ b/services/identity/internal/templates/partials.go
@@ -107,9 +107,8 @@
   <div class="provider-info-section">
     <h5>Learn more</h5>
     <p>
-    Vanadium Concepts: <a href="https://v.io/concepts/security.html">Security</a><br>
-    <a href="https://v.io/tutorials/security">Tutorials</a><br>
-    <a href="https://v.io/tools/identity-service-faq.html">FAQ</a><br>
+    Vanadium Concepts: <a href="https://github.com/vanadium/docs/blob/master/concepts/security.md">Security</a><br>
+    <a href="https://github.com/vanadium/docs/blob/master/tools/identity-service-faq.md">FAQ</a><br>
     </p>
   </div>
 </section>
diff --git a/services/syncbase/server/interfaces/sync_types.vdl b/services/syncbase/server/interfaces/sync_types.vdl
index 65a7f32..849e8ec 100644
--- a/services/syncbase/server/interfaces/sync_types.vdl
+++ b/services/syncbase/server/interfaces/sync_types.vdl
@@ -58,6 +58,9 @@
         CurVers     string      // current version number of the object.
         Parents     []string    // 0, 1 or 2 parent versions that the current version is derived from.
 	UpdTime     time.Time   // timestamp when the update is generated.
+	PermId      string      // id of the permissions object controlling this version.
+	PermVers    string      // current version of the permissions object.
+	Shell       bool        // true when the mutation data is hidden due to permissions.
 	Delete      bool        // indicates whether the update resulted in object being deleted from the store.
 	BatchId     uint64      // unique id of the Batch this update belongs to.
 	BatchCount  uint64      // number of objects in the Batch.
diff --git a/services/syncbase/server/interfaces/sync_types.vdl.go b/services/syncbase/server/interfaces/sync_types.vdl.go
index ca29abc..a00591c 100644
--- a/services/syncbase/server/interfaces/sync_types.vdl.go
+++ b/services/syncbase/server/interfaces/sync_types.vdl.go
@@ -56,6 +56,9 @@
 	CurVers    string    // current version number of the object.
 	Parents    []string  // 0, 1 or 2 parent versions that the current version is derived from.
 	UpdTime    time.Time // timestamp when the update is generated.
+	PermId     string    // id of the permissions object controlling this version.
+	PermVers   string    // current version of the permissions object.
+	Shell      bool      // true when the mutation data is hidden due to permissions.
 	Delete     bool      // indicates whether the update resulted in object being deleted from the store.
 	BatchId    uint64    // unique id of the Batch this update belongs to.
 	BatchCount uint64    // number of objects in the Batch.
diff --git a/services/syncbase/server/watchable/types.vdl b/services/syncbase/server/watchable/types.vdl
index 3f5181b..984a8df 100644
--- a/services/syncbase/server/watchable/types.vdl
+++ b/services/syncbase/server/watchable/types.vdl
@@ -17,15 +17,23 @@
 
 // PutOp represents a store put operation.  The new version is written instead
 // of the value to avoid duplicating the user data in the store.  The version
-// is used to access the user data of that specific mutation.
+// is used to access the user data of that specific mutation.  The key and the
+// version of the permissions entry that was checked to allow this put operation
+// are also tracked to secure the access to this history.
 type PutOp struct {
-	Key     []byte
-	Version []byte
+	Key         []byte
+	Version     []byte
+	PermKey     []byte
+	PermVersion []byte
 }
 
-// DeleteOp represents a store delete operation.
+// DeleteOp represents a store delete operation.  The key and the version of the
+// permissions entry that was checked to allow this delete operation are also
+// tracked to secure the access to this history.
 type DeleteOp struct {
-	Key []byte
+	Key         []byte
+	PermKey     []byte
+	PermVersion []byte
 }
 
 // SyncGroupOp represents a change in SyncGroup tracking, adding or removing
@@ -43,9 +51,13 @@
 // allows sync to initialize its metadata at the correct versions of the objects
 // when they become syncable.  These log entries should be filtered by the
 // client-facing Watch interface because the user data did not actually change.
+// The key and the version of the permissions entry that was checked when the
+// key was accessed are also tracked to secure the access to this history.
 type SyncSnapshotOp struct {
-	Key     []byte
-	Version []byte
+	Key         []byte
+	Version     []byte
+	PermKey     []byte
+	PermVersion []byte
 }
 
 // Op represents a store operation.
diff --git a/services/syncbase/server/watchable/types.vdl.go b/services/syncbase/server/watchable/types.vdl.go
index 71c4b90..f4114fa 100644
--- a/services/syncbase/server/watchable/types.vdl.go
+++ b/services/syncbase/server/watchable/types.vdl.go
@@ -35,10 +35,14 @@
 
 // PutOp represents a store put operation.  The new version is written instead
 // of the value to avoid duplicating the user data in the store.  The version
-// is used to access the user data of that specific mutation.
+// is used to access the user data of that specific mutation.  The key and the
+// version of the permissions entry that was checked to allow this put operation
+// are also tracked to secure the access to this history.
 type PutOp struct {
-	Key     []byte
-	Version []byte
+	Key         []byte
+	Version     []byte
+	PermKey     []byte
+	PermVersion []byte
 }
 
 func (PutOp) __VDLReflect(struct {
@@ -46,9 +50,13 @@
 }) {
 }
 
-// DeleteOp represents a store delete operation.
+// DeleteOp represents a store delete operation.  The key and the version of the
+// permissions entry that was checked to allow this delete operation are also
+// tracked to secure the access to this history.
 type DeleteOp struct {
-	Key []byte
+	Key         []byte
+	PermKey     []byte
+	PermVersion []byte
 }
 
 func (DeleteOp) __VDLReflect(struct {
@@ -76,9 +84,13 @@
 // allows sync to initialize its metadata at the correct versions of the objects
 // when they become syncable.  These log entries should be filtered by the
 // client-facing Watch interface because the user data did not actually change.
+// The key and the version of the permissions entry that was checked when the
+// key was accessed are also tracked to secure the access to this history.
 type SyncSnapshotOp struct {
-	Key     []byte
-	Version []byte
+	Key         []byte
+	Version     []byte
+	PermKey     []byte
+	PermVersion []byte
 }
 
 func (SyncSnapshotOp) __VDLReflect(struct {
diff --git a/services/syncbase/store/ptrie/benchmark_test.go b/services/syncbase/store/ptrie/benchmark_test.go
new file mode 100644
index 0000000..a24fc0d
--- /dev/null
+++ b/services/syncbase/store/ptrie/benchmark_test.go
@@ -0,0 +1,171 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Compare ptrie with map[string]interface{}.
+// The benchmark was executed on random 16 byte keys.
+//
+// Results (v23 go test v.io/x/ref/services/syncbase/store/ptrie -bench . -benchtime 0.1s -run Benchmark):
+// map:
+// BenchmarkMapPut-12        	 1000000	       655 ns/op
+// BenchmarkMapOverwrite-12  	 1000000	       186 ns/op
+// BenchmarkMapDelete-12     	 1000000	       111 ns/op
+// BenchmarkMapGet-12        	 2000000	        91.5 ns/op
+// BenchmarkMapScan-12       	10000000	        19.9 ns/op
+//
+// ptrie with copyOnWrite = false
+// BenchmarkPtriePut-12      	  200000	      1742 ns/op
+// BenchmarkPtrieOverwrite-12	  200000	      1596 ns/op
+// BenchmarkPtrieDelete-12   	  200000	      1557 ns/op
+// BenchmarkPtrieGet-12      	  300000	      1473 ns/op
+// BenchmarkPtrieScan-12     	  300000	       940 ns/op
+//
+// ptrie with copyOnWrite = true
+// BenchmarkPtriePut-12      	   50000	      4026 ns/op
+// BenchmarkPtrieOverwrite-12	   50000	      4015 ns/op
+// BenchmarkPtrieDelete-12   	   50000	      3367 ns/op
+// BenchmarkPtrieGet-12      	  300000	      1207 ns/op
+// BenchmarkPtrieScan-12     	  200000	       879 ns/op
+package ptrie
+
+import (
+	"math/rand"
+	"testing"
+)
+
+const (
+	keyLength   = 16
+	seed        = 23917
+	copyOnWrite = false
+)
+
+func randomKey(r *rand.Rand) []byte {
+	key := make([]byte, keyLength)
+	for i := 0; i < keyLength; i++ {
+		key[i] = byte(r.Intn(256))
+	}
+	return key
+}
+
+func generatePtrieKeys(b *testing.B) [][]byte {
+	keys := make([][]byte, b.N)
+	r := rand.New(rand.NewSource(seed))
+	for i := 0; i < b.N; i++ {
+		keys[i] = randomKey(r)
+	}
+	return keys
+}
+
+func generateMapKeys(b *testing.B) []string {
+	keys := make([]string, b.N)
+	r := rand.New(rand.NewSource(seed))
+	for i := 0; i < b.N; i++ {
+		keys[i] = string(randomKey(r))
+	}
+	return keys
+}
+
+func fillPtrie(b *testing.B, t *T, keys [][]byte) {
+	for i := 0; i < b.N; i++ {
+		t.Put(keys[i], true)
+	}
+}
+
+func fillMap(b *testing.B, m map[string]interface{}, keys []string) {
+	for i := 0; i < b.N; i++ {
+		m[keys[i]] = true
+	}
+}
+
+func BenchmarkPtriePut(b *testing.B) {
+	keys := generatePtrieKeys(b)
+	t := New(copyOnWrite)
+	b.ResetTimer()
+	fillPtrie(b, t, keys)
+}
+
+func BenchmarkMapPut(b *testing.B) {
+	keys := generateMapKeys(b)
+	m := make(map[string]interface{})
+	b.ResetTimer()
+	fillMap(b, m, keys)
+}
+
+func BenchmarkPtrieOverwrite(b *testing.B) {
+	keys := generatePtrieKeys(b)
+	t := New(copyOnWrite)
+	fillPtrie(b, t, keys)
+	b.ResetTimer()
+	fillPtrie(b, t, keys)
+}
+
+func BenchmarkMapOverwrite(b *testing.B) {
+	keys := generateMapKeys(b)
+	m := make(map[string]interface{})
+	fillMap(b, m, keys)
+	b.ResetTimer()
+	fillMap(b, m, keys)
+}
+
+func BenchmarkPtrieDelete(b *testing.B) {
+	keys := generatePtrieKeys(b)
+	t := New(copyOnWrite)
+	fillPtrie(b, t, keys)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		t.Delete(keys[i])
+	}
+}
+
+func BenchmarkMapDelete(b *testing.B) {
+	keys := generateMapKeys(b)
+	m := make(map[string]interface{})
+	fillMap(b, m, keys)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		delete(m, keys[i])
+	}
+}
+
+func BenchmarkPtrieGet(b *testing.B) {
+	keys := generatePtrieKeys(b)
+	t := New(copyOnWrite)
+	fillPtrie(b, t, keys)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		_ = t.Get(keys[i])
+	}
+}
+
+func BenchmarkMapGet(b *testing.B) {
+	keys := generateMapKeys(b)
+	m := make(map[string]interface{})
+	fillMap(b, m, keys)
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		_ = m[keys[i]]
+	}
+}
+
+func BenchmarkPtrieScan(b *testing.B) {
+	keys := generatePtrieKeys(b)
+	t := New(copyOnWrite)
+	fillPtrie(b, t, keys)
+	b.ResetTimer()
+	s := t.Scan(nil, nil)
+	for i := 0; i < b.N; i++ {
+		s.Advance()
+	}
+	if s.Advance() {
+		panic(123)
+	}
+}
+
+func BenchmarkMapScan(b *testing.B) {
+	keys := generateMapKeys(b)
+	m := make(map[string]interface{})
+	fillMap(b, m, keys)
+	b.ResetTimer()
+	for _, _ = range m {
+	}
+}
diff --git a/services/syncbase/store/ptrie/ptrie.go b/services/syncbase/store/ptrie/ptrie.go
index a3e5be4..c445ad9 100644
--- a/services/syncbase/store/ptrie/ptrie.go
+++ b/services/syncbase/store/ptrie/ptrie.go
@@ -53,10 +53,6 @@
 // the current one.
 package ptrie
 
-import (
-	"v.io/x/ref/services/syncbase/store"
-)
-
 // T represents a ptrie.
 type T struct {
 	root        *pnode
@@ -164,6 +160,9 @@
 		return nil
 	}
 	newNode, _ := deleteInternal(node, 0, key, makeCopy)
+	if newNode.value == nil && newNode.child[0] == nil && newNode.child[1] == nil {
+		return nil
+	}
 	return newNode
 }
 
@@ -208,7 +207,7 @@
 	//      o - newChild.node
 	newChild := &pchild{
 		node:   &pnode{value: value},
-		bitstr: store.CopyBytes(nil, key[(bitIndex+lcp)>>3:]),
+		bitstr: copyBytes(key[(bitIndex+lcp)>>3:]),
 		bitlen: bitlen(key) - bitIndex - lcp,
 	}
 	if child == nil {
@@ -224,7 +223,7 @@
 	// Update the child of the node, i.e. the A part.
 	node.child[currBit] = &pchild{
 		node:   middleNode,
-		bitstr: store.CopyBytes(nil, child.bitstr[:((bitIndex&7)+lcp+7)>>3]),
+		bitstr: child.bitstr[:((bitIndex&7)+lcp+7)>>3],
 		bitlen: lcp,
 	}
 	// Pick the first bit on path C. Since C can be empty, we pick the first
@@ -233,11 +232,13 @@
 	// Set the C part only if C is not empty.
 	if bitIndex+lcp < bitlen(key) {
 		middleNode.child[nextBit] = newChild
+	} else {
+		middleNode.value = value
 	}
 	// Set the B part.
 	middleNode.child[nextBit^1] = &pchild{
 		node:   child.node,
-		bitstr: store.CopyBytes(nil, child.bitstr[((bitIndex&7)+lcp)>>3:]),
+		bitstr: child.bitstr[((bitIndex&7)+lcp)>>3:],
 		bitlen: child.bitlen - lcp,
 	}
 	return node
@@ -249,15 +250,17 @@
 // Invariant: the first bitIndex bits of the key represent the path from
 // the root to the current node.
 func getInternal(node *pnode, bitIndex uint32, key []byte) interface{} {
-	if bitlen(key) == bitIndex {
-		return node.value
+	keybitlen := bitlen(key)
+	for keybitlen > bitIndex {
+		child := node.child[getBit(key, bitIndex)]
+		lcp := bitLCP(child, key[bitIndex>>3:], bitIndex&7)
+		if child == nil || lcp != child.bitlen {
+			return nil
+		}
+		bitIndex += lcp
+		node = child.node
 	}
-	child := node.child[getBit(key, bitIndex)]
-	lcp := bitLCP(child, key[bitIndex>>3:], bitIndex&7)
-	if child == nil || lcp != child.bitlen {
-		return nil
-	}
-	return getInternal(child.node, bitIndex+lcp, key)
+	return node.value
 }
 
 // deleteInternal does a DFS through the ptrie to find a node corresponding to
@@ -315,6 +318,7 @@
 			child = newNode.child[1]
 		}
 		node.child[currBit].node = child.node
+		node.child[currBit].bitstr = copyBytes(node.child[currBit].bitstr)
 		node.child[currBit].bitstr = appendBits(node.child[currBit].bitstr, (bitIndex&7)+node.child[currBit].bitlen, child.bitstr)
 		node.child[currBit].bitlen += child.bitlen
 	} else {
diff --git a/services/syncbase/store/ptrie/ptrie_test.go b/services/syncbase/store/ptrie/ptrie_test.go
index e0da1dc..0d4d388 100644
--- a/services/syncbase/store/ptrie/ptrie_test.go
+++ b/services/syncbase/store/ptrie/ptrie_test.go
@@ -2,21 +2,53 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// TODO(rogulenko): Add a benchmark to compare a ptrie with
-// a map[[]byte]interface{}.
 package ptrie
 
 import (
+	"bytes"
+	"math/rand"
+	"reflect"
+	"runtime/debug"
+	"sort"
 	"testing"
 )
 
-// TestPutGetDelete verifies basic functionality of Put/Get/Delete.
-// More Put/Get/Delete/Scan tests can be found in store/memstore.
-// TODO(rogulenko): Add more tests, don't rely on tests in store/memstore.
+func deepCopy(node *pnode) *pnode {
+	if node == nil {
+		return nil
+	}
+	result := copyNode(node)
+	for i := 0; i < 2; i++ {
+		if result.child[i] != nil {
+			result.child[i] = copyChild(result.child[i])
+			result.child[i].node = deepCopy(result.child[i].node)
+			result.child[i].bitstr = copyBytes(result.child[i].bitstr)
+		}
+	}
+	return result
+}
+
+// TestPutGetDelete verifies basic functionality of Put/Get/Delete/Scan.
 func TestPutGetDelete(t *testing.T) {
 	data := New(true)
 	data.Put([]byte("a"), "a")
 	data.Put([]byte("ab"), "ab")
+	data.Put([]byte("aaa"), nil)
+	s := data.Scan([]byte("a"), []byte("ab"))
+	if !s.Advance() {
+		t.Fatalf("the stream didn't advance")
+	}
+	if got, want := s.Key(nil), []byte("a"); !bytes.Equal(got, want) {
+		Fatalf(t, "unexpected key: got %q, want %q")
+	}
+	if got, want := s.Value().(string), "a"; got != want {
+		Fatalf(t, "unexpected value: got %q, want %q")
+	}
+	for i := 0; i < 2; i++ {
+		if s.Advance() {
+			Fatalf(t, "the stream advanced")
+		}
+	}
 	if got, want := data.Get([]byte("a")).(string), "a"; got != want {
 		t.Fatalf("unexpected Get result: got %q, want %q", got, want)
 	}
@@ -47,3 +79,155 @@
 		t.Fatal("path was not contracted after Delete()")
 	}
 }
+
+// TestEmptyPtrie tests behavior of an empty ptrie.
+func TestEmptyPtrie(t *testing.T) {
+	data := New(false)
+	if data.Get([]byte("abc")) != nil {
+		t.Fatalf("Get() returned non-nil value")
+	}
+	data.Put([]byte("abc"), "abc")
+	for i := 0; i < 2; i++ {
+		data.Delete([]byte("abc"))
+		if data.root != nil {
+			t.Fatalf("non-nil root for an empty ptrie")
+		}
+	}
+	s := data.Scan(nil, nil)
+	for i := 0; i < 2; i++ {
+		if s.Advance() {
+			t.Fatalf("an empty stream advanced")
+		}
+	}
+	s = data.Scan([]byte("abc"), nil)
+	for i := 0; i < 2; i++ {
+		if s.Advance() {
+			t.Fatalf("an empty stream advanced")
+		}
+	}
+}
+
+// TestDeepPtrie verifies functionality of Put/Get/Delete/Scan on a trie with
+// a long path.
+func TestLongPath(t *testing.T) {
+	r := rand.New(rand.NewSource(seed))
+	depth := 16
+	prefix := make([]byte, depth)
+	for i := 0; i < depth; i++ {
+		prefix[i] = byte(r.Intn(256))
+	}
+	var keys1, keys2 []string
+	for i := 0; i < depth; i++ {
+		keys1 = append(keys1, string(prefix[:i+1]))
+		for j := 0; j < 8; j++ {
+			key := copyBytes(prefix[:i+1])
+			key[i] ^= 1 << uint32(j)
+			keys2 = append(keys2, string(key))
+		}
+	}
+	runPutGetDeleteTest(t, keys1)
+	runScanTest(t, keys1)
+	runPutGetDeleteTest(t, keys2)
+	runScanTest(t, keys2)
+	allKeys := append(keys2, keys1...)
+	runPutGetDeleteTest(t, allKeys)
+	runScanTest(t, allKeys)
+}
+
+// runPutGetDeleteTest adds the keys in random order checking the Put/Get/Delete
+// behavior and verifying that copy-on-write mode doesn't modify previous
+// versions.
+func runPutGetDeleteTest(t *testing.T, keys []string) {
+	noCopyOnWrite := New(false)
+	copyOnWrite := New(true)
+	r := rand.New(rand.NewSource(seed))
+	perm := r.Perm(len(keys))
+	// Add keys.
+	for i := 0; i < len(keys); i++ {
+		key := []byte(keys[perm[i]])
+		oldVersion := copyOnWrite.Copy().root
+		oldVersionCopy := deepCopy(oldVersion)
+		copyOnWrite.Put(key, key)
+		if !reflect.DeepEqual(oldVersion, oldVersionCopy) {
+			Fatalf(t, "old version is modified after adding key %v", key)
+		}
+		noCopyOnWrite.Put(key, key)
+		if !reflect.DeepEqual(noCopyOnWrite.root, copyOnWrite.root) {
+			Fatalf(t, "ptrie with copyOnWrite and without are different after adding key %v", key)
+		}
+		for j := 0; j <= i; j++ {
+			key := []byte(keys[perm[j]])
+			if got, want := copyOnWrite.Get(key).([]byte), key; !bytes.Equal(got, want) {
+				Fatalf(t, "unexpected value: got %v, want %v", got, want)
+			}
+		}
+	}
+	perm = r.Perm(len(keys))
+	// Now delete keys.
+	for i := len(keys) - 1; i >= 0; i-- {
+		key := []byte(keys[perm[i]])
+		oldVersion := copyOnWrite.Copy().root
+		oldVersionCopy := deepCopy(oldVersion)
+		copyOnWrite.Delete(key)
+		if !reflect.DeepEqual(oldVersion, oldVersionCopy) {
+			Fatalf(t, "old version is modified after adding key %v", key)
+		}
+		noCopyOnWrite.Delete(key)
+		if !reflect.DeepEqual(noCopyOnWrite.root, copyOnWrite.root) {
+			Fatalf(t, "ptrie with copyOnWrite and without are different after adding key %v", key)
+		}
+		for j := 0; j < i; j++ {
+			key := []byte(keys[perm[j]])
+			if got, want := copyOnWrite.Get(key).([]byte), key; !bytes.Equal(got, want) {
+				Fatalf(t, "unexpected value: got %v, want %v", got, want)
+			}
+		}
+	}
+	if copyOnWrite.root != nil {
+		t.Fatalf("non-nil root for an empty ptrie")
+	}
+	if noCopyOnWrite.root != nil {
+		t.Fatalf("non-nil root for an empty ptrie")
+	}
+}
+
+// runScanTest adds a random half of the keys and verifies streams started from
+// every key in the keys slice.
+func runScanTest(t *testing.T, keys []string) {
+	sort.Strings(keys)
+	r := rand.New(rand.NewSource(seed))
+	perm := r.Perm(len(keys))
+	used := make([]bool, len(keys))
+	trie := New(false)
+	for i := 0; i*2 < len(keys); i++ {
+		j := perm[i]
+		used[j] = true
+		key := []byte(keys[j])
+		trie.Put(key, key)
+	}
+	for l := 0; l < len(keys); l++ {
+		s := trie.Scan([]byte(keys[l]), nil)
+		for cur := l; cur < len(keys); cur++ {
+			if !used[cur] {
+				continue
+			}
+			if !s.Advance() {
+				Fatalf(t, "the stream didn't advance")
+			}
+			if got, want := s.Key(nil), []byte(keys[cur]); !bytes.Equal(got, want) {
+				Fatalf(t, "unexpected key: got %v, want %v")
+			}
+			if got, want := s.Value().([]byte), []byte(keys[cur]); !bytes.Equal(got, want) {
+				Fatalf(t, "unexpected value: got %v, want %v")
+			}
+		}
+		if s.Advance() {
+			Fatalf(t, "the stream advanced")
+		}
+	}
+}
+
+func Fatalf(t *testing.T, format string, args ...interface{}) {
+	debug.PrintStack()
+	t.Fatalf(format, args...)
+}
diff --git a/services/syncbase/store/ptrie/stream.go b/services/syncbase/store/ptrie/stream.go
index a61a7ef..73ce855 100644
--- a/services/syncbase/store/ptrie/stream.go
+++ b/services/syncbase/store/ptrie/stream.go
@@ -35,7 +35,7 @@
 // A nil node is treated as an empty ptrie.
 func (node *pnode) Scan(start, limit []byte) *Stream {
 	if node == nil {
-		node = &pnode{}
+		return &Stream{}
 	}
 	s := &Stream{
 		limit: store.CopyBytes(nil, limit),
diff --git a/services/syncbase/store/ptrie/util.go b/services/syncbase/store/ptrie/util.go
index d1fa68d..660fe34 100644
--- a/services/syncbase/store/ptrie/util.go
+++ b/services/syncbase/store/ptrie/util.go
@@ -107,3 +107,10 @@
 	a[bitlen>>3] = (bitmask & a[bitlen>>3]) | (^bitmask & oldByte)
 	return a
 }
+
+// copyBytes returns a copy of the provided slice.
+func copyBytes(slice []byte) []byte {
+	result := make([]byte, len(slice))
+	copy(result, slice)
+	return result
+}
diff --git a/services/syncbase/vsync/dag.go b/services/syncbase/vsync/dag.go
index 086296c..1d0eacd 100644
--- a/services/syncbase/vsync/dag.go
+++ b/services/syncbase/vsync/dag.go
@@ -110,34 +110,18 @@
 	NoBatchId = uint64(0)
 )
 
-// dagNode holds the information on a object mutation in the DAG.
-// Note: the batch ID and deleted flag are copies of information in the log
-// record.  They are also stored in the DAG node to improve DAG traversal for
-// conflict resolution and pruning without having to fetch the full log record
-// every time.
-type dagNode struct {
-	Level   uint64   // node distance from root
-	Parents []string // references to parent versions
-	Logrec  string   // reference to log record
-	BatchId uint64   // ID of a write batch
-	Deleted bool     // true if the change was a delete
-}
-
 // batchSet holds information on a set of write batches.
 type batchSet map[uint64]*batchInfo
 
-// batchInfo holds the information on a write batch:
-// - The map of syncable (versioned) objects: {oid: version}
-// - The total count of batch objects, including non-syncable ones.
-// TODO(rdaoud): add support to track the read and scan sets.
-type batchInfo struct {
-	Objects map[string]string
-	Count   uint64
+// graftMap holds the state of DAG node grafting (attaching) per object.  It
+// holds a store handle to use when reading the object heads during grafting
+// operations.  This avoids contaminating the transaction read-set of the
+// caller (typically the Initiator storing newly received deltas).
+type graftMap struct {
+	objGraft map[string]*graftInfo
+	st       store.Store
 }
 
-// graftMap holds the state of DAG node grafting (attaching) per object.
-type graftMap map[string]*graftInfo
-
 // graftInfo holds the state of an object's node grafting in the DAG.
 // It is ephemeral (in-memory), used during a single sync operation to track
 // where the new DAG fragments are attached to the existing DAG for the object:
@@ -269,7 +253,7 @@
 //
 // The grafting structure is not needed when nodes are being added locally by
 // the Watcher, passing a nil grafting structure.
-func (s *syncService) addNode(ctx *context.T, tx store.Transaction, oid, version, logrec string, deleted bool, parents []string, btid uint64, graft graftMap) error {
+func (s *syncService) addNode(ctx *context.T, tx store.Transaction, oid, version, logrec string, deleted bool, parents []string, btid uint64, graft *graftMap) error {
 	if parents != nil {
 		if len(parents) > 2 {
 			return verror.New(verror.ErrInternal, ctx, "cannot have more than 2 parents")
@@ -369,7 +353,7 @@
 // to track DAG attachements during a sync operation.  It is not needed if the
 // parent linkage is due to a local change (from conflict resolution selecting
 // an existing version).
-func (s *syncService) addParent(ctx *context.T, tx store.Transaction, oid, version, parent string, graft graftMap) error {
+func (s *syncService) addParent(ctx *context.T, tx store.Transaction, oid, version, parent string, graft *graftMap) error {
 	if version == parent {
 		return verror.New(verror.ErrInternal, ctx, "object", oid, version, "cannot be its own parent")
 	}
@@ -457,7 +441,7 @@
 // head is different from the current local head. If there is a single new-head
 // and the snapshot head is the same as the current local head, the object
 // changes were applied without triggering a conflict.
-func hasConflict(ctx *context.T, st store.StoreReader, oid string, graft graftMap) (isConflict bool, newHead, oldHead, ancestor string, err error) {
+func hasConflict(ctx *context.T, st store.StoreReader, oid string, graft *graftMap) (isConflict bool, newHead, oldHead, ancestor string, err error) {
 	isConflict = false
 	oldHead = NoVersion
 	newHead = NoVersion
@@ -469,7 +453,7 @@
 		return
 	}
 
-	info := graft[oid]
+	info := graft.objGraft[oid]
 	if info == nil {
 		err = verror.New(verror.ErrInternal, ctx, "node", oid, "has no DAG graft info")
 		return
@@ -543,18 +527,22 @@
 	return
 }
 
-// newGraft allocates a graftMap to track DAG node grafting during sync.
-func newGraft() graftMap {
-	return make(graftMap)
+// newGraft allocates a graftMap to track DAG node grafting during sync.  It is
+// given a handle to a store to use for its own reading of object head nodes.
+func newGraft(st store.Store) *graftMap {
+	return &graftMap{
+		objGraft: make(map[string]*graftInfo),
+		st:       st,
+	}
 }
 
 // getObjectGraft returns the graftInfo for an object ID.  If the graftMap is
 // nil, a nil graftInfo is returned because grafting is not being tracked.
-func getObjectGraftInfo(ctx *context.T, sntx store.SnapshotOrTransaction, graft graftMap, oid string) *graftInfo {
+func getObjectGraftInfo(ctx *context.T, sntx store.SnapshotOrTransaction, graft *graftMap, oid string) *graftInfo {
 	if graft == nil {
 		return nil
 	}
-	if info := graft[oid]; info != nil {
+	if info := graft.objGraft[oid]; info != nil {
 		return info
 	}
 
@@ -565,12 +553,20 @@
 	}
 
 	// If the object has a head node, include it in the set of new heads.
-	if head, err := getHead(ctx, sntx, oid); err == nil {
+	// Note: use the store handle of the graftMap if available to avoid
+	// contaminating the read-set of the caller's transaction.  Otherwise
+	// use the caller's transaction.
+	var st store.StoreReader
+	st = graft.st
+	if st == nil {
+		st = sntx
+	}
+	if head, err := getHead(ctx, st, oid); err == nil {
 		info.newHeads[head] = true
 		info.oldHeadSnap = head
 	}
 
-	graft[oid] = info
+	graft.objGraft[oid] = info
 	return info
 }
 
@@ -840,15 +836,15 @@
 // getParentMap is a testing and debug helper function that returns for an
 // object a map of its DAG (node-to-parents relations).  If a graft structure
 // is given, include its fragments in the map.
-func getParentMap(ctx *context.T, st store.StoreReader, oid string, graft graftMap) map[string][]string {
+func getParentMap(ctx *context.T, st store.StoreReader, oid string, graft *graftMap) map[string][]string {
 	parentMap := make(map[string][]string)
 	var start []string
 
 	if head, err := getHead(ctx, st, oid); err == nil {
 		start = append(start, head)
 	}
-	if graft != nil && graft[oid] != nil {
-		for v := range graft[oid].newHeads {
+	if graft != nil && graft.objGraft[oid] != nil {
+		for v := range graft.objGraft[oid].newHeads {
 			start = append(start, v)
 		}
 	}
diff --git a/services/syncbase/vsync/dag_test.go b/services/syncbase/vsync/dag_test.go
index 57291a4..709df1c 100644
--- a/services/syncbase/vsync/dag_test.go
+++ b/services/syncbase/vsync/dag_test.go
@@ -128,7 +128,7 @@
 	}
 	tx.Commit()
 
-	graft := newGraft()
+	graft := newGraft(st)
 	tx = st.NewTransaction()
 	if err := s.addParent(nil, tx, oid, version, version, graft); err == nil {
 		t.Errorf("addParent() did not fail on a self-parent for object %s:%s", oid, version)
@@ -154,7 +154,7 @@
 		}
 		tx.Commit()
 
-		var g graftMap
+		var g *graftMap
 		if remote {
 			g = graft
 		}
@@ -311,7 +311,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"3": true}
 
@@ -388,7 +388,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"6": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -471,7 +471,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"3": true, "6": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -564,7 +564,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"3": true, "6": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -651,7 +651,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"3": true, "6": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -956,7 +956,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"3": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -981,7 +981,7 @@
 		t.Errorf("hasConflict() on %v did not fail with a nil graft map: flag %t, newHead %s, oldHead %s, ancestor %s, err %v",
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
 	}
-	isConflict, newHead, oldHead, ancestor, errConflict = hasConflict(nil, st, oid, newGraft())
+	isConflict, newHead, oldHead, ancestor, errConflict = hasConflict(nil, st, oid, newGraft(st))
 	if errConflict == nil {
 		t.Errorf("hasConflict() on %v did not fail with an empty graft map: flag %t, newHead %s, oldHead %s, ancestor %s, err %v",
 			oid, isConflict, newHead, oldHead, ancestor, errConflict)
@@ -1026,7 +1026,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"3": true, "4": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -1086,7 +1086,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"4": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -1148,7 +1148,7 @@
 	}
 
 	// Verify the grafting of remote nodes.
-	g := graft[oid]
+	g := graft.objGraft[oid]
 
 	expNewHeads := map[string]bool{"5": true}
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
@@ -1185,7 +1185,7 @@
 		t.Errorf("object %s has wrong head: %s", oid, head)
 	}
 
-	g = graft[oid]
+	g = graft.objGraft[oid]
 
 	if !reflect.DeepEqual(g.newHeads, expNewHeads) {
 		t.Errorf("object %s has invalid newHeads: (%v) instead of (%v)", oid, g.newHeads, expNewHeads)
diff --git a/services/syncbase/vsync/initiator.go b/services/syncbase/vsync/initiator.go
index 5875e21..94f79e1 100644
--- a/services/syncbase/vsync/initiator.go
+++ b/services/syncbase/vsync/initiator.go
@@ -180,7 +180,7 @@
 	remote     interfaces.GenVector         // generation vector from the remote peer.
 	updLocal   interfaces.GenVector         // updated local generation vector at the end of sync round.
 	updObjects map[string]*objConflictState // tracks updated objects during a log replay.
-	dagGraft   graftMap                     // DAG state that tracks conflicts and common ancestors.
+	dagGraft   *graftMap                    // DAG state that tracks conflicts and common ancestors.
 
 	req    interfaces.DeltaReq                // GetDeltas RPC request.
 	stream interfaces.SyncGetDeltasClientCall // stream handle for the GetDeltas RPC.
@@ -243,7 +243,7 @@
 	iSt := &initiationState{}
 	iSt.config = c
 	iSt.updObjects = make(map[string]*objConflictState)
-	iSt.dagGraft = newGraft()
+	iSt.dagGraft = newGraft(c.st)
 	iSt.sg = sg
 	return iSt
 }
@@ -436,18 +436,6 @@
 // resolution during replay.  This avoids resolving conflicts that have already
 // been resolved by other devices.
 func (iSt *initiationState) recvAndProcessDeltas(ctx *context.T) error {
-	// This is to handle issues with graftMap in DAG. Ideally, the
-	// transaction created to store all the deltas received over the network
-	// should not contend with any other store changes since this is all
-	// brand new information. However, as log records are received over the
-	// network, they are also incrementally processed. To enable incremental
-	// processing, the current head of each dirty object is read to populate
-	// the graftMap. This read can potentially contend with the watcher
-	// updating the head of an object. This lock prevents that contention in
-	// order to avoid retrying the whole transaction.
-	iSt.config.sync.thLock.Lock()
-	defer iSt.config.sync.thLock.Unlock()
-
 	// TODO(hpucha): This works for now, but figure out a long term solution
 	// as this may be implementation dependent. It currently works because
 	// the RecvStream call is stateless, and grabbing a handle to it
diff --git a/services/syncbase/vsync/replay_test.go b/services/syncbase/vsync/replay_test.go
index e13eab6..272b945 100644
--- a/services/syncbase/vsync/replay_test.go
+++ b/services/syncbase/vsync/replay_test.go
@@ -179,14 +179,14 @@
 
 // dagReplayCommands parses a sync test file and replays its commands, updating
 // the DAG structures associated with the sync service.
-func (s *syncService) dagReplayCommands(ctx *context.T, syncfile string) (graftMap, error) {
+func (s *syncService) dagReplayCommands(ctx *context.T, syncfile string) (*graftMap, error) {
 	cmds, err := parseSyncCommands(syncfile)
 	if err != nil {
 		return nil, err
 	}
 
 	st := s.sv.St()
-	graft := newGraft()
+	graft := newGraft(st)
 
 	for _, cmd := range cmds {
 		tx := st.NewTransaction()
diff --git a/services/syncbase/vsync/types.vdl b/services/syncbase/vsync/types.vdl
index a0c8731..820dec6 100644
--- a/services/syncbase/vsync/types.vdl
+++ b/services/syncbase/vsync/types.vdl
@@ -84,3 +84,27 @@
 	SyncPending   bool
 	PendingGenVec interfaces.PrefixGenVector
 }
+
+// dagNode holds the information on an object mutation in the DAG.  The node
+// information is extracted from the log records exchanged between Syncbases.
+// They are also stored in the DAG node to improve DAG traversal for conflict
+// resolution and pruning without having to fetch the full log record.
+type dagNode struct {
+	Level    uint64   // node distance from root
+	Parents  []string // references to parent versions
+	Logrec   string   // reference to log record
+	BatchId  uint64   // ID of a write batch
+	Shell    bool     // true when the data is hidden due to permissions
+	Deleted  bool     // true if the change was a delete
+	PermId   string   // ID of the permissions controlling this version
+	PermVers string   // current version of the permissions object
+}
+
+// batchInfo holds the information on a write batch:
+// - The map of syncable (versioned) objects: {oid: version}
+// - The total count of batch objects, including non-syncable ones.
+// TODO(rdaoud): add support to track the read and scan sets.
+type batchInfo struct {
+	Objects map[string]string
+	Count   uint64
+}
diff --git a/services/syncbase/vsync/types.vdl.go b/services/syncbase/vsync/types.vdl.go
index 34f425f..e352de4 100644
--- a/services/syncbase/vsync/types.vdl.go
+++ b/services/syncbase/vsync/types.vdl.go
@@ -104,12 +104,48 @@
 }) {
 }
 
+// dagNode holds the information on an object mutation in the DAG.  The node
+// information is extracted from the log records exchanged between Syncbases.
+// They are also stored in the DAG node to improve DAG traversal for conflict
+// resolution and pruning without having to fetch the full log record.
+type dagNode struct {
+	Level    uint64   // node distance from root
+	Parents  []string // references to parent versions
+	Logrec   string   // reference to log record
+	BatchId  uint64   // ID of a write batch
+	Shell    bool     // true when the data is hidden due to permissions
+	Deleted  bool     // true if the change was a delete
+	PermId   string   // ID of the permissions controlling this version
+	PermVers string   // current version of the permissions object
+}
+
+func (dagNode) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/services/syncbase/vsync.dagNode"`
+}) {
+}
+
+// batchInfo holds the information on a write batch:
+// - The map of syncable (versioned) objects: {oid: version}
+// - The total count of batch objects, including non-syncable ones.
+// TODO(rdaoud): add support to track the read and scan sets.
+type batchInfo struct {
+	Objects map[string]string
+	Count   uint64
+}
+
+func (batchInfo) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/services/syncbase/vsync.batchInfo"`
+}) {
+}
+
 func init() {
 	vdl.Register((*syncData)(nil))
 	vdl.Register((*localGenInfo)(nil))
 	vdl.Register((*dbSyncState)(nil))
 	vdl.Register((*localLogRec)(nil))
 	vdl.Register((*sgLocalState)(nil))
+	vdl.Register((*dagNode)(nil))
+	vdl.Register((*batchInfo)(nil))
 }
 
 const logPrefix = "log" // log state.
diff --git a/services/xproxyd/proxy_test.go b/services/xproxyd/proxy_test.go
index ebeb262..960e533 100644
--- a/services/xproxyd/proxy_test.go
+++ b/services/xproxyd/proxy_test.go
@@ -6,7 +6,9 @@
 
 import (
 	"bufio"
+	"fmt"
 	"strings"
+	"sync"
 	"testing"
 	"time"
 
@@ -22,10 +24,15 @@
 	"v.io/v23/security"
 )
 
-const leakWaitTime = 100 * time.Millisecond
+const (
+	leakWaitTime = 250 * time.Millisecond
+	pollTime     = 50 * time.Millisecond
+)
 
-func TestProxiedConnection(t *testing.T) {
+func TestSingleProxy(t *testing.T) {
 	defer goroutines.NoLeaks(t, leakWaitTime)()
+	kp := newKillProtocol()
+	flow.RegisterProtocol("kill", kp)
 	pctx, shutdown := v23.Init()
 	defer shutdown()
 	actx, am, err := v23.ExperimentalWithNewFlowManager(pctx)
@@ -37,16 +44,23 @@
 		t.Fatal(err)
 	}
 
-	pep := startProxy(t, pctx, address{"tcp", "127.0.0.1:0"})
+	pep := startProxy(t, pctx, address{"kill", "127.0.0.1:0"})
 
 	if err := am.Listen(actx, "v23", pep.String()); err != nil {
 		t.Fatal(err)
 	}
-	testEndToEndConnections(t, dctx, actx, dm, am)
+
+	for am.ListeningEndpoints()[0].Addr().Network() == "" {
+		time.Sleep(pollTime)
+	}
+
+	testEndToEndConnections(t, dctx, actx, dm, am, kp)
 }
 
 func TestMultipleProxies(t *testing.T) {
 	defer goroutines.NoLeaks(t, leakWaitTime)()
+	kp := newKillProtocol()
+	flow.RegisterProtocol("kill", kp)
 	pctx, shutdown := v23.Init()
 	defer shutdown()
 	actx, am, err := v23.ExperimentalWithNewFlowManager(pctx)
@@ -58,59 +72,79 @@
 		t.Fatal(err)
 	}
 
-	pep := startProxy(t, pctx, address{"tcp", "127.0.0.1:0"})
+	pep := startProxy(t, pctx, address{"kill", "127.0.0.1:0"})
 
-	p2ep := startProxy(t, pctx, address{"v23", pep.String()}, address{"tcp", "127.0.0.1:0"})
+	p2ep := startProxy(t, pctx, address{"v23", pep.String()}, address{"kill", "127.0.0.1:0"})
 
-	p3ep := startProxy(t, pctx, address{"v23", p2ep.String()}, address{"tcp", "127.0.0.1:0"})
+	p3ep := startProxy(t, pctx, address{"v23", p2ep.String()}, address{"kill", "127.0.0.1:0"})
 
 	if err := am.Listen(actx, "v23", p3ep.String()); err != nil {
 		t.Fatal(err)
 	}
-	testEndToEndConnections(t, dctx, actx, dm, am)
+
+	// Wait for am.Listen to get 3 endpoints.
+	for len(am.ListeningEndpoints()) != 3 {
+		time.Sleep(pollTime)
+	}
+
+	testEndToEndConnections(t, dctx, actx, dm, am, kp)
 }
 
-func testEndToEndConnections(t *testing.T, dctx, actx *context.T, dm, am flow.Manager) {
+func testEndToEndConnections(t *testing.T, dctx, actx *context.T, dm, am flow.Manager, kp *killProtocol) {
 	aeps := am.ListeningEndpoints()
 	if len(aeps) == 0 {
 		t.Fatal("acceptor not listening on any endpoints")
 	}
 	for _, aep := range aeps {
-		testEndToEndConnection(t, dctx, actx, dm, am, aep)
+		// Kill the connections, connections should still eventually succeed.
+		kp.KillConnections()
+		for {
+			if err := testEndToEndConnection(t, dctx, actx, dm, am, aep); err != nil {
+				t.Log(err)
+				time.Sleep(pollTime)
+				continue
+			}
+			break
+		}
 	}
 }
 
-func testEndToEndConnection(t *testing.T, dctx, actx *context.T, dm, am flow.Manager, aep naming.Endpoint) {
+func testEndToEndConnection(t *testing.T, dctx, actx *context.T, dm, am flow.Manager, aep naming.Endpoint) error {
 	// The dialing flow.Manager dials a flow to the accepting flow.Manager.
 	want := "Do you read me?"
 	df, err := dm.Dial(dctx, aep, bfp)
 	if err != nil {
-		t.Fatal(err)
+		return err
 	}
 	// We write before accepting to ensure that the openFlow message is sent.
-	writeLine(df, want)
+	if err := writeLine(df, want); err != nil {
+		return err
+	}
 	af, err := am.Accept(actx)
 	if err != nil {
-		t.Fatal(err)
+		return err
 	}
 	got, err := readLine(af)
 	if err != nil {
-		t.Fatal(err)
+		return err
 	}
 	if got != want {
-		t.Errorf("got %v, want %v", got, want)
+		return fmt.Errorf("got %v, want %v", got, want)
 	}
 
 	// Writes in the opposite direction should work as well.
 	want = "I read you loud and clear."
-	writeLine(af, want)
+	if err := writeLine(af, want); err != nil {
+		return err
+	}
 	got, err = readLine(df)
 	if err != nil {
-		t.Fatal(err)
+		return err
 	}
 	if got != want {
-		t.Errorf("got %v, want %v", got, want)
+		return fmt.Errorf("got %v, want %v", got, want)
 	}
+	return nil
 }
 
 // TODO(suharshs): Add test for bidirectional RPC.
@@ -141,20 +175,69 @@
 
 func startProxy(t *testing.T, ctx *context.T, addrs ...address) naming.Endpoint {
 	var ls rpc.ListenSpec
+	hasProxies := false
 	for _, addr := range addrs {
 		ls.Addrs = append(ls.Addrs, addr)
+		if addr.Protocol == "v23" {
+			hasProxies = true
+		}
 	}
 	ctx = v23.WithListenSpec(ctx, ls)
 	proxy, _, err := xproxyd.New(ctx)
 	if err != nil {
 		t.Fatal(err)
 	}
+	// Wait for the proxy to connect to its proxies.
+	if hasProxies {
+		for len(proxy.MultipleProxyEndpoints()) == 0 {
+			time.Sleep(pollTime)
+		}
+	}
 	peps := proxy.ListeningEndpoints()
 	for _, pep := range peps {
-		if pep.Addr().Network() == "tcp" {
+		if pep.Addr().Network() == "tcp" || pep.Addr().Network() == "kill" {
 			return pep
 		}
 	}
 	t.Fatal("Proxy not listening on network address.")
 	return nil
 }
+
+type killProtocol struct {
+	protocol flow.Protocol
+	mu       sync.Mutex
+	conns    []flow.Conn
+}
+
+func newKillProtocol() *killProtocol {
+	p, _ := flow.RegisteredProtocol("tcp")
+	return &killProtocol{protocol: p}
+}
+
+func (p *killProtocol) KillConnections() {
+	p.mu.Lock()
+	for _, c := range p.conns {
+		c.Close()
+	}
+	p.conns = nil
+	p.mu.Unlock()
+}
+
+func (p *killProtocol) Dial(ctx *context.T, protocol, address string, timeout time.Duration) (flow.Conn, error) {
+	c, err := p.protocol.Dial(ctx, "tcp", address, timeout)
+	if err != nil {
+		return nil, err
+	}
+	p.mu.Lock()
+	p.conns = append(p.conns, c)
+	p.mu.Unlock()
+	return c, nil
+}
+
+func (p *killProtocol) Listen(ctx *context.T, protocol, address string) (flow.Listener, error) {
+	return p.protocol.Listen(ctx, "tcp", address)
+}
+
+func (p *killProtocol) Resolve(ctx *context.T, protocol, address string) (string, string, error) {
+	return p.protocol.Resolve(ctx, "tcp", address)
+}
diff --git a/services/xproxyd/proxyd.go b/services/xproxyd/proxyd.go
index d05a561..85d66f7 100644
--- a/services/xproxyd/proxyd.go
+++ b/services/xproxyd/proxyd.go
@@ -5,9 +5,9 @@
 package xproxyd
 
 import (
-	"fmt"
 	"io"
 	"sync"
+	"time"
 
 	"v.io/v23"
 	"v.io/v23/context"
@@ -16,12 +16,12 @@
 	"v.io/v23/naming"
 )
 
-// TODO(suharshs): Make sure that we don't leak any goroutines.
+const reconnectDelay = 50 * time.Millisecond
 
 type proxy struct {
 	m              flow.Manager
 	mu             sync.Mutex
-	proxyEndpoints []naming.Endpoint
+	proxyEndpoints map[string][]naming.Endpoint // keyed by proxy address
 }
 
 func New(ctx *context.T) (*proxy, *context.T, error) {
@@ -30,7 +30,8 @@
 		return nil, nil, err
 	}
 	p := &proxy{
-		m: mgr,
+		m:              mgr,
+		proxyEndpoints: make(map[string][]naming.Endpoint),
 	}
 	for _, addr := range v23.GetListenSpec(ctx).Addrs {
 		if addr.Protocol == "v23" {
@@ -38,25 +39,7 @@
 			if err != nil {
 				return nil, nil, err
 			}
-			f, err := p.m.Dial(ctx, ep, proxyBlessingsForPeer{}.run)
-			if err != nil {
-				return nil, nil, err
-			}
-			// Send a byte telling the acceptor that we are a proxy.
-			if err := writeMessage(ctx, &message.MultiProxyRequest{}, f); err != nil {
-				return nil, nil, err
-			}
-			msg, err := readMessage(ctx, f)
-			if err != nil {
-				return nil, nil, err
-			}
-			m, ok := msg.(*message.ProxyResponse)
-			if !ok {
-				return nil, nil, NewErrUnexpectedMessage(ctx, fmt.Sprintf("%t", m))
-			}
-			p.mu.Lock()
-			p.proxyEndpoints = append(p.proxyEndpoints, m.Endpoints...)
-			p.mu.Unlock()
+			go p.connectToProxy(ctx, addr.Address, ep)
 		} else if err := p.m.Listen(ctx, addr.Protocol, addr.Address); err != nil {
 			return nil, nil, err
 		}
@@ -69,6 +52,16 @@
 	return p.m.ListeningEndpoints()
 }
 
+func (p *proxy) MultipleProxyEndpoints() []naming.Endpoint {
+	var eps []naming.Endpoint
+	p.mu.Lock()
+	for _, v := range p.proxyEndpoints {
+		eps = append(eps, v...)
+	}
+	p.mu.Unlock()
+	return eps
+}
+
 func (p *proxy) listenLoop(ctx *context.T) {
 	for {
 		f, err := p.m.Accept(ctx)
@@ -99,6 +92,7 @@
 func (p *proxy) startRouting(ctx *context.T, f flow.Flow, m *message.Setup) error {
 	fout, err := p.dialNextHop(ctx, f, m)
 	if err != nil {
+		f.Close()
 		return err
 	}
 	go p.forwardLoop(ctx, f, fout)
@@ -108,10 +102,9 @@
 
 func (p *proxy) forwardLoop(ctx *context.T, fin, fout flow.Flow) {
 	for {
-		_, err := io.Copy(fin, fout)
-		if err == io.EOF {
-			return
-		} else if err != nil {
+		if _, err := io.Copy(fin, fout); err != nil {
+			fin.Close()
+			fout.Close()
 			ctx.Errorf("f.Read failed: %v", err)
 			return
 		}
@@ -124,7 +117,10 @@
 		ep  naming.Endpoint
 		err error
 	)
-	if routes := m.PeerRemoteEndpoint.Routes(); len(routes) > 0 {
+	if ep, err = removeNetworkAddress(m.PeerRemoteEndpoint); err != nil {
+		return nil, err
+	}
+	if routes := ep.Routes(); len(routes) > 0 {
 		if err := rid.FromString(routes[0]); err != nil {
 			return nil, err
 		}
@@ -133,15 +129,13 @@
 		// TODO(suharshs): Make sure that the routingID from the route belongs to a
 		// connection that is stored in the manager's cache. (i.e. a Server has connected
 		// with the routingID before)
-		if ep, err = setEndpointRoutingID(m.PeerRemoteEndpoint, rid); err != nil {
+		if ep, err = setEndpointRoutingID(ep, rid); err != nil {
 			return nil, err
 		}
 		// Remove the read route from the setup message endpoint.
 		if m.PeerRemoteEndpoint, err = setEndpointRoutes(m.PeerRemoteEndpoint, routes[1:]); err != nil {
 			return nil, err
 		}
-	} else {
-		ep = m.PeerRemoteEndpoint
 	}
 	fout, err := p.m.Dial(ctx, ep, proxyBlessingsForPeer{}.run)
 	if err != nil {
@@ -175,7 +169,10 @@
 
 func (p *proxy) returnEndpoints(ctx *context.T, rid naming.RoutingID, route string) ([]naming.Endpoint, error) {
 	p.mu.Lock()
-	eps := append(p.m.ListeningEndpoints(), p.proxyEndpoints...)
+	eps := p.m.ListeningEndpoints()
+	for _, peps := range p.proxyEndpoints {
+		eps = append(eps, peps...)
+	}
 	p.mu.Unlock()
 	if len(eps) == 0 {
 		return nil, NewErrNotListening(ctx)
@@ -201,3 +198,41 @@
 	}
 	return eps, nil
 }
+
+func (p *proxy) connectToProxy(ctx *context.T, address string, ep naming.Endpoint) {
+	for delay := reconnectDelay; ; delay *= 2 {
+		time.Sleep(delay - reconnectDelay)
+		select {
+		case <-ctx.Done():
+			return
+		default:
+		}
+		f, err := p.m.Dial(ctx, ep, proxyBlessingsForPeer{}.run)
+		if err != nil {
+			ctx.Error(err)
+			continue
+		}
+		// Send a byte telling the acceptor that we are a proxy.
+		if err := writeMessage(ctx, &message.MultiProxyRequest{}, f); err != nil {
+			ctx.Error(err)
+			continue
+		}
+		eps, err := readProxyResponse(ctx, f)
+		if err != nil {
+			ctx.Error(err)
+			continue
+		}
+		p.mu.Lock()
+		p.proxyEndpoints[address] = eps
+		p.mu.Unlock()
+		select {
+		case <-ctx.Done():
+			return
+		case <-f.Closed():
+			p.mu.Lock()
+			delete(p.proxyEndpoints, address)
+			p.mu.Unlock()
+			delay = reconnectDelay
+		}
+	}
+}
diff --git a/services/xproxyd/util.go b/services/xproxyd/util.go
index 4ccad31..e14e23e 100644
--- a/services/xproxyd/util.go
+++ b/services/xproxyd/util.go
@@ -5,6 +5,8 @@
 package xproxyd
 
 import (
+	"fmt"
+
 	"v.io/v23"
 	"v.io/v23/context"
 	"v.io/v23/flow"
@@ -13,6 +15,16 @@
 	"v.io/v23/security"
 )
 
+func removeNetworkAddress(ep naming.Endpoint) (naming.Endpoint, error) {
+	_, _, routes, rid, bnames, mountable := getEndpointParts(ep)
+	opts := routes
+	opts = append(opts, bnames...)
+	opts = append(opts, rid)
+	opts = append(opts, mountable)
+	epString := naming.FormatEndpoint("", "", opts...)
+	return v23.NewEndpoint(epString)
+}
+
 // setEndpointRoutingID returns a copy of ep with RoutingId changed to rid.
 func setEndpointRoutingID(ep naming.Endpoint, rid naming.RoutingID) (naming.Endpoint, error) {
 	network, address, routes, _, bnames, mountable := getEndpointParts(ep)
@@ -87,3 +99,15 @@
 	}
 	return message.Read(ctx, b)
 }
+
+func readProxyResponse(ctx *context.T, f flow.Flow) ([]naming.Endpoint, error) {
+	msg, err := readMessage(ctx, f)
+	if err != nil {
+		return nil, err
+	}
+	res, ok := msg.(*message.ProxyResponse)
+	if !ok {
+		return nil, NewErrUnexpectedMessage(ctx, fmt.Sprintf("%t", msg))
+	}
+	return res.Endpoints, nil
+}
diff --git a/test/v23tests/v23tests_test.go b/test/v23tests/v23tests_test.go
index c8d655f..c6ece5a 100644
--- a/test/v23tests/v23tests_test.go
+++ b/test/v23tests/v23tests_test.go
@@ -119,7 +119,7 @@
 }
 
 func TestDeferHandling(t *testing.T) {
-	t.Skip("http://v.io/i/686 -- test is flaky in Go1.5")
+	t.Skip("https://v.io/i/686 -- test is flaky in Go1.5")
 	sh, _ := modules.NewShell(nil, nil, testing.Verbose(), t)
 	child, err := sh.Start(nil, RunIntegrationTestInChild, "--test.run=TestHelperProcess", "--v23.tests")
 	if err != nil {