Merge " veyron/services/mgmt/device: reduce dispatcher log spam"
diff --git a/profiles/roaming/roaming_server.go b/profiles/roaming/roaming_server.go
index 30874be..e6b6276 100644
--- a/profiles/roaming/roaming_server.go
+++ b/profiles/roaming/roaming_server.go
@@ -4,12 +4,13 @@
 
 import (
 	"fmt"
+	"log"
 
 	"v.io/core/veyron2"
 	"v.io/core/veyron2/ipc"
 	"v.io/core/veyron2/vlog"
 
-	"v.io/core/veyron/profiles/roaming"
+	_ "v.io/core/veyron/profiles/roaming"
 )
 
 func main() {
@@ -22,25 +23,36 @@
 	}
 
 	listenSpec := veyron2.GetListenSpec(ctx)
-
 	fmt.Printf("listen spec: %v\n", listenSpec)
-	ep, err := server.Listen(listenSpec)
+
+	_, err = server.Listen(listenSpec)
 	if err != nil {
 		vlog.Fatalf("unexpected error: %q", err)
 	}
-	if ep != nil {
-		fmt.Println(ep)
+	err = server.Serve("roamer", &dummy{}, nil)
+	if err != nil {
+		log.Fatalf("unexpected error: %q", err)
 	}
-	if err := server.Serve("roamer", &receiver{}, nil); err != nil {
-		vlog.Fatalf("unexpected error: %q", err)
-	}
+	watcher := make(chan ipc.NetworkChange, 1)
+	server.WatchNetwork(watcher)
 
-	done := make(chan struct{})
-	<-done
+	for {
+		status := server.Status()
+		fmt.Printf("Endpoints: %d created:\n", len(status.Endpoints))
+		for _, ep := range status.Endpoints {
+			fmt.Printf("  %s\n", ep)
+		}
+		fmt.Printf("Mounts: %d mounts:\n", len(status.Mounts))
+		for _, ms := range status.Mounts {
+			fmt.Printf("  %s\n", ms)
+		}
+		change := <-watcher
+		fmt.Printf("Network change: %s", change.DebugString())
+	}
 }
 
-type receiver struct{}
+type dummy struct{}
 
-func (d *receiver) Echo(call ipc.ServerContext, arg string) (string, error) {
+func (d *dummy) Echo(call ipc.ServerContext, arg string) (string, error) {
 	return arg, nil
 }
diff --git a/profiles/roaming/roaminginit.go b/profiles/roaming/roaminginit.go
index 45a5f56..ae37ee9 100644
--- a/profiles/roaming/roaminginit.go
+++ b/profiles/roaming/roaminginit.go
@@ -168,7 +168,10 @@
 			}
 			// We will always send the best currently available address
 			if chosen, err := listenSpec.AddressChooser(listenSpec.Addrs[0].Protocol, cur); err == nil && chosen != nil {
+				vlog.VI(2).Infof("Sending added and chosen: %s", chosen)
 				ch <- ipc.NewAddAddrsSetting(chosen)
+			} else {
+				vlog.VI(2).Infof("Ignoring added %s", added)
 			}
 			prev = cur
 		case <-cleanup:
diff --git a/runtimes/google/ipc/benchmark/benchmark_test.go b/runtimes/google/ipc/benchmark/benchmark_test.go
index 7bd0db8..67e36c5 100644
--- a/runtimes/google/ipc/benchmark/benchmark_test.go
+++ b/runtimes/google/ipc/benchmark/benchmark_test.go
@@ -6,8 +6,7 @@
 
 	"v.io/core/veyron/lib/testutil"
 	"v.io/core/veyron/lib/testutil/benchmark"
-	tsecurity "v.io/core/veyron/lib/testutil/security"
-	_ "v.io/core/veyron/profiles"
+	_ "v.io/core/veyron/profiles/static"
 
 	"v.io/core/veyron2"
 	"v.io/core/veyron2/context"
@@ -107,12 +106,6 @@
 	var shutdown veyron2.Shutdown
 	ctx, shutdown = testutil.InitForTest()
 
-	var err error
-	ctx, err = veyron2.SetPrincipal(ctx, tsecurity.NewPrincipal("test-blessing"))
-	if err != nil {
-		panic(err)
-	}
-
 	var serverStop func()
 	serverAddr, serverStop = StartServer(ctx, veyron2.GetListenSpec(ctx))
 
diff --git a/runtimes/google/ipc/cancel_test.go b/runtimes/google/ipc/cancel_test.go
index d5e2e01..2022ad7 100644
--- a/runtimes/google/ipc/cancel_test.go
+++ b/runtimes/google/ipc/cancel_test.go
@@ -21,13 +21,13 @@
 }
 
 type canceld struct {
-	sm        stream.Manager
-	ns        naming.Namespace
-	name      string
-	child     string
-	started   chan struct{}
-	cancelled chan struct{}
-	stop      func() error
+	sm       stream.Manager
+	ns       naming.Namespace
+	name     string
+	child    string
+	started  chan struct{}
+	canceled chan struct{}
+	stop     func() error
 }
 
 func (c *canceld) Run(ctx ipc.ServerCall) error {
@@ -48,8 +48,8 @@
 
 	vlog.Info(c.name, " waiting for cancellation")
 	<-ctx.Context().Done()
-	vlog.Info(c.name, " cancelled")
-	close(c.cancelled)
+	vlog.Info(c.name, " canceled")
+	close(c.canceled)
 	return nil
 }
 
@@ -65,13 +65,13 @@
 	}
 
 	c := &canceld{
-		sm:        sm,
-		ns:        ns,
-		name:      name,
-		child:     child,
-		started:   make(chan struct{}, 0),
-		cancelled: make(chan struct{}, 0),
-		stop:      s.Stop,
+		sm:       sm,
+		ns:       ns,
+		name:     name,
+		child:    child,
+		started:  make(chan struct{}, 0),
+		canceled: make(chan struct{}, 0),
+		stop:     s.Stop,
 	}
 
 	if err := s.Serve(name, c, fakeAuthorizer(0)); err != nil {
@@ -116,7 +116,7 @@
 	vlog.Info("cancelling initial call")
 	cancel()
 
-	vlog.Info("waiting for children to be cancelled")
-	<-c1.cancelled
-	<-c2.cancelled
+	vlog.Info("waiting for children to be canceled")
+	<-c1.canceled
+	<-c2.canceled
 }
diff --git a/runtimes/google/ipc/client.go b/runtimes/google/ipc/client.go
index c0d41df..a534864 100644
--- a/runtimes/google/ipc/client.go
+++ b/runtimes/google/ipc/client.go
@@ -53,7 +53,6 @@
 	errAuthError = verror.Register(pkgPath+".authError", verror.RetryRefetch, "authentication error from server {3}{:4}")
 
 	errSystemRetry = verror.Register(pkgPath+".sysErrorRetryConnection", verror.RetryConnection, "{:3:}")
-	errClosingFlow = verror.Register(pkgPath+".errClosingFlow", verror.NoRetry, "{:3:}")
 
 	errVomEncoder = verror.Register(pkgPath+".vomEncoder", verror.NoRetry, "failed to create vom encoder {:3}")
 	errVomDecoder = verror.Register(pkgPath+".vomDecoder", verror.NoRetry, "failed to create vom decoder {:3}")
@@ -296,15 +295,6 @@
 	return
 }
 
-func allowCancel(opts []ipc.CallOpt) bool {
-	for _, o := range opts {
-		if _, ok := o.(inaming.NoCancel); ok {
-			return false
-		}
-	}
-	return true
-}
-
 func mkDischargeImpetus(serverBlessings []string, method string, args []interface{}) security.DischargeImpetus {
 	var impetus security.DischargeImpetus
 	if len(serverBlessings) > 0 {
@@ -324,7 +314,7 @@
 }
 
 // startCall ensures StartCall always returns verror.E.
-func (c *client) startCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.Call, verror.E) {
+func (c *client) startCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.Call, error) {
 	if !ctx.Initialized() {
 		return nil, verror.ExplicitMake(verror.BadArg, i18n.NoLangID, "ipc.Client", "StartCall")
 	}
@@ -343,7 +333,7 @@
 		deadline = time.Now().Add(time.Duration(r))
 	}
 
-	var lastErr verror.E
+	var lastErr error
 	for retries := 0; ; retries++ {
 		if retries != 0 {
 			if !backoff(retries, deadline) {
@@ -421,16 +411,19 @@
 // (all that serve "name"), but will invoke the method on at most one of them
 // (the server running on the most preferred protcol and network amongst all
 // the servers that were successfully connected to and authorized).
-func (c *client) tryCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.Call, verror.ActionCode, verror.E) {
+func (c *client) tryCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.Call, verror.ActionCode, error) {
 	var resolved *naming.MountEntry
 	var pattern security.BlessingPattern
 	var err error
 	if resolved, err = c.ns.Resolve(ctx, name, getResolveOpts(opts)...); err != nil {
 		vlog.Errorf("Resolve: %v", err)
+		// We always return NoServers as the error so that the caller knows
+		// that's ok to retry the operation since the name may be registered
+		// in the near future.
 		if verror.Is(err, naming.ErrNoSuchName.ID) {
 			return nil, verror.RetryRefetch, verror.Make(verror.NoServers, ctx, name)
 		}
-		return nil, verror.NoRetry, verror.Make(verror.NoExist, ctx, name, err)
+		return nil, verror.NoRetry, verror.Make(verror.NoServers, ctx, name, err)
 	} else {
 		pattern = security.BlessingPattern(resolved.Pattern)
 		if len(resolved.Servers) == 0 {
@@ -500,10 +493,7 @@
 				continue
 			}
 
-			var doneChan <-chan struct{}
-			if allowCancel(opts) {
-				doneChan = ctx.Done()
-			}
+			doneChan := ctx.Done()
 			r.flow.SetDeadline(doneChan)
 
 			var (
@@ -743,14 +733,33 @@
 	return fc, nil
 }
 
-func (fc *flowClient) close(verr verror.E) verror.E {
-	if err := fc.flow.Close(); err != nil && verr == nil {
-		verr = verror.Make(errClosingFlow, fc.ctx, err)
+func (fc *flowClient) close(err error) error {
+	if cerr := fc.flow.Close(); cerr != nil && err == nil {
+		return verror.Make(verror.Internal, fc.ctx, err)
 	}
-	return verr
+	switch {
+	case verror.Is(err, verror.BadProtocol.ID):
+		switch fc.ctx.Err() {
+		case context.DeadlineExceeded:
+			// TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
+			//return verror.Append(verror.Make(verror.Timeout, fc.ctx), verr)
+			return verror.Make(verror.Timeout, fc.ctx, err.Error())
+		case context.Canceled:
+			// TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
+			//return verror.Append(verror.Make(verror.Canceled, fc.ctx), verr)
+			return verror.Make(verror.Canceled, fc.ctx, err.Error())
+		}
+	case verror.Is(err, verror.Timeout.ID):
+		// Canceled trumps timeout.
+		if fc.ctx.Err() == context.Canceled {
+			// TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
+			return verror.Make(verror.Canceled, fc.ctx, err.Error())
+		}
+	}
+	return err
 }
 
-func (fc *flowClient) start(suffix, method string, args []interface{}, timeout time.Duration, blessings security.Blessings) verror.E {
+func (fc *flowClient) start(suffix, method string, args []interface{}, timeout time.Duration, blessings security.Blessings) error {
 	// Fetch any discharges for third-party caveats on the client's blessings
 	// if this client owns a discharge-client.
 	if self := fc.flow.LocalBlessings(); self != nil && fc.dc != nil {
@@ -812,10 +821,10 @@
 func decodeNetError(ctx *context.T, err error) verror.IDAction {
 	if neterr, ok := err.(net.Error); ok {
 		if neterr.Timeout() || neterr.Temporary() {
-			// If a read is cancelled in the lower levels we see
+			// If a read is canceled in the lower levels we see
 			// a timeout error - see readLocked in vc/reader.go
 			if ctx.Err() == context.Canceled {
-				return verror.Cancelled
+				return verror.Canceled
 			}
 			return verror.Timeout
 		}
@@ -900,7 +909,7 @@
 }
 
 // finish ensures Finish always returns verror.E.
-func (fc *flowClient) finish(resultptrs ...interface{}) verror.E {
+func (fc *flowClient) finish(resultptrs ...interface{}) error {
 	if fc.finished {
 		err := verror.Make(errClientFinishAlreadyCalled, fc.ctx)
 		return fc.close(verror.Make(verror.BadState, fc.ctx, err))
diff --git a/runtimes/google/ipc/client_test.go b/runtimes/google/ipc/client_test.go
index ad6eca7..b91f8c0 100644
--- a/runtimes/google/ipc/client_test.go
+++ b/runtimes/google/ipc/client_test.go
@@ -205,17 +205,10 @@
 	return eps[0].Name(), deferFn
 }
 
-func testForVerror(t *testing.T, err error, verr ...verror.IDAction) {
+func testForVerror(t *testing.T, err error, verr verror.IDAction) {
 	_, file, line, _ := runtime.Caller(1)
 	loc := fmt.Sprintf("%s:%d", filepath.Base(file), line)
-	found := false
-	for _, v := range verr {
-		if verror.Is(err, v.ID) {
-			found = true
-			break
-		}
-	}
-	if !found {
+	if !verror.Is(err, verr.ID) {
 		if _, ok := err.(verror.E); !ok {
 			t.Fatalf("%s: err %v not a verror", loc, err)
 		}
@@ -235,12 +228,11 @@
 	ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
 	call, err := veyron2.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
 	if err != nil {
-		testForVerror(t, err, verror.Timeout, verror.BadProtocol)
+		testForVerror(t, err, verror.Timeout)
 		return
 	}
 	verr := call.Finish(&err)
-	// TODO(cnicolaou): this should be Timeout only.
-	testForVerror(t, verr, verror.Timeout, verror.BadProtocol)
+	testForVerror(t, verr, verror.Timeout)
 }
 
 func TestArgsAndResponses(t *testing.T) {
@@ -291,7 +283,7 @@
 	testForVerror(t, verr, verror.NoAccess)
 }
 
-func TestCancelledBeforeFinish(t *testing.T) {
+func TestCanceledBeforeFinish(t *testing.T) {
 	ctx, shutdown := newCtx()
 	defer shutdown()
 	name, fn := initServer(t, ctx)
@@ -305,11 +297,11 @@
 	// Cancel before we call finish.
 	cancel()
 	verr := call.Finish(&err)
-	// TOO(cnicolaou): this should be Cancelled only.
-	testForVerror(t, verr, verror.Cancelled, verror.BadProtocol)
+	// TOO(cnicolaou): this should be Canceled only.
+	testForVerror(t, verr, verror.Canceled)
 }
 
-func TestCancelledDuringFinish(t *testing.T) {
+func TestCanceledDuringFinish(t *testing.T) {
 	ctx, shutdown := newCtx()
 	defer shutdown()
 	name, fn := initServer(t, ctx)
@@ -326,8 +318,7 @@
 		cancel()
 	}()
 	verr := call.Finish(&err)
-	// TOO(cnicolaou): this should be Cancelled only.
-	testForVerror(t, verr, verror.Cancelled, verror.BadProtocol)
+	testForVerror(t, verr, verror.Canceled)
 }
 
 func TestRendezvous(t *testing.T) {
@@ -358,7 +349,7 @@
 	response := ""
 	verr := call.Finish(&response, &err)
 	if verr != nil {
-		testForVerror(t, verr, verror.Cancelled)
+		testForVerror(t, verr, verror.Canceled)
 		return
 	}
 	if got, want := response, "message: hello\n"; got != want {
@@ -395,8 +386,8 @@
 	ctx, _ = context.WithTimeout(ctx, 300*time.Millisecond)
 	call, err := veyron2.GetClient(ctx).StartCall(ctx, name, "Source", []interface{}{want})
 	if err != nil {
-		if !verror.Is(err, verror.Timeout.ID) && !verror.Is(err, verror.BadProtocol.ID) {
-			t.Fatalf("verror should be a timeout or badprotocol, not %s: stack %s",
+		if !verror.Is(err, verror.Timeout.ID) {
+			t.Fatalf("verror should be a timeout not %s: stack %s",
 				err, err.(verror.E).Stack())
 		}
 		return
@@ -413,11 +404,11 @@
 			continue
 		}
 		// TOO(cnicolaou): this should be Timeout only.
-		testForVerror(t, err, verror.Timeout, verror.BadProtocol)
+		testForVerror(t, err, verror.Timeout)
 		break
 	}
 	verr := call.Finish(&err)
-	testForVerror(t, verr, verror.Timeout, verror.BadProtocol)
+	testForVerror(t, verr, verror.Timeout)
 }
 
 func TestStreamAbort(t *testing.T) {
@@ -465,14 +456,14 @@
 	_, fn := runMountTable(t, ctx)
 	defer fn()
 	name := "noservers"
-	ctx, _ = context.WithTimeout(ctx, 300*time.Millisecond)
+	ctx, _ = context.WithTimeout(ctx, 1000*time.Millisecond)
 	call, verr := veyron2.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
 	if verr != nil {
-		testForVerror(t, verr, verror.Timeout, verror.BadProtocol, verror.NoExist)
+		testForVerror(t, verr, verror.NoServers)
 		return
 	}
 	err := call.Finish(&verr)
-	testForVerror(t, err, verror.Timeout, verror.BadProtocol, verror.NoExist)
+	testForVerror(t, err, verror.NoServers)
 }
 
 func TestNoMountTable(t *testing.T) {
diff --git a/runtimes/google/ipc/full_test.go b/runtimes/google/ipc/full_test.go
index f9859b2..2db17e4 100644
--- a/runtimes/google/ipc/full_test.go
+++ b/runtimes/google/ipc/full_test.go
@@ -88,7 +88,7 @@
 }
 
 func testContextWithoutDeadline() *context.T {
-	var ctx *context.T
+	ctx, _ := context.RootContext()
 	ctx, err := ivtrace.Init(ctx, flags.VtraceFlags{})
 	if err != nil {
 		panic(err)
diff --git a/runtimes/google/ipc/server.go b/runtimes/google/ipc/server.go
index 936402c..11802cd 100644
--- a/runtimes/google/ipc/server.go
+++ b/runtimes/google/ipc/server.go
@@ -36,20 +36,53 @@
 	// for communicating from server to client.
 )
 
+// state for each requested listen address
+type listenState struct {
+	protocol, address string
+	ln                stream.Listener
+	lep               naming.Endpoint
+	lnerr, eperr      error
+	roaming           bool
+	// We keep track of all of the endpoints, the port and a copy of
+	// the original listen endpoint for use with roaming network changes.
+	ieps     []*inaming.Endpoint // list of currently active eps
+	port     string              // port to use for creating new eps
+	protoIEP inaming.Endpoint    // endpoint to use as template for new eps (includes rid, versions etc)
+}
+
+// state for each requested proxy
+type proxyState struct {
+	endpoint naming.Endpoint
+	err      verror.E
+}
+
+type dhcpState struct {
+	name      string
+	publisher *config.Publisher
+	stream    *config.Stream
+	ch        chan config.Setting // channel to receive dhcp settings over
+	err       error               // error status.
+	watchers  map[chan<- ipc.NetworkChange]struct{}
+}
+
 type server struct {
 	sync.Mutex
+	// context used by the server to make internal RPCs, error messages etc.
+	ctx          *context.T
+	cancel       context.CancelFunc   // function to cancel the above context.
 	state        serverState          // track state of the server.
-	ctx          *context.T           // context used by the server to make internal RPCs.
 	streamMgr    stream.Manager       // stream manager to listen for new flows.
 	publisher    publisher.Publisher  // publisher to publish mounttable mounts.
 	listenerOpts []stream.ListenerOpt // listener opts for Listen.
+	dhcpState    *dhcpState           // dhcpState, nil if not using dhcp
 
-	// listeners created by Listen for servers and proxies
-	listeners map[stream.Listener]struct{}
-	// dhcpListeners created by Listen.
-	dhcpListeners map[*dhcpListener]struct{}
+	// maps that contain state on listeners.
+	listenState map[*listenState]struct{}
+	listeners   map[stream.Listener]struct{}
+
 	// state of proxies keyed by the name of the proxy
 	proxies map[string]proxyState
+
 	// all endpoints generated and returned by this server
 	endpoints []naming.Endpoint
 
@@ -64,6 +97,7 @@
 	ipNets           []*net.IPNet
 	ns               naming.Namespace
 	servesMountTable bool
+
 	// TODO(cnicolaou): add roaming stats to ipcStats
 	stats *ipcStats // stats for this server.
 }
@@ -117,21 +151,6 @@
 
 var _ ipc.Server = (*server)(nil)
 
-type dhcpListener struct {
-	sync.Mutex
-	publisher *config.Publisher   // publisher used to fork the stream
-	name      string              // name of the publisher stream
-	eps       []*inaming.Endpoint // endpoint returned after listening
-	pubAddrs  []ipc.Address       // addresses to publish
-	pubPort   string              // port to use with the publish addresses
-	ch        chan config.Setting // channel to receive settings over
-}
-
-type proxyState struct {
-	endpoint naming.Endpoint
-	err      verror.E
-}
-
 // This option is used to sort and filter the endpoints when resolving the
 // proxy name from a mounttable.
 type PreferredServerResolveProtocols []string
@@ -147,19 +166,22 @@
 func (ReservedNameDispatcher) IPCServerOpt() {}
 
 func InternalNewServer(ctx *context.T, streamMgr stream.Manager, ns naming.Namespace, client ipc.Client, opts ...ipc.ServerOpt) (ipc.Server, error) {
+	ctx, cancel := context.WithRootCancel(ctx)
 	ctx, _ = vtrace.SetNewSpan(ctx, "NewServer")
 	statsPrefix := naming.Join("ipc", "server", "routing-id", streamMgr.RoutingID().String())
 	s := &server{
-		ctx:           ctx,
-		streamMgr:     streamMgr,
-		publisher:     publisher.New(ctx, ns, publishPeriod),
-		listeners:     make(map[stream.Listener]struct{}),
-		proxies:       make(map[string]proxyState),
-		dhcpListeners: make(map[*dhcpListener]struct{}),
-		stoppedChan:   make(chan struct{}),
-		ipNets:        ipNetworks(),
-		ns:            ns,
-		stats:         newIPCStats(statsPrefix),
+
+		ctx:         ctx,
+		cancel:      cancel,
+		streamMgr:   streamMgr,
+		publisher:   publisher.New(ctx, ns, publishPeriod),
+		listenState: make(map[*listenState]struct{}),
+		listeners:   make(map[stream.Listener]struct{}),
+		proxies:     make(map[string]proxyState),
+		stoppedChan: make(chan struct{}),
+		ipNets:      ipNetworks(),
+		ns:          ns,
+		stats:       newIPCStats(statsPrefix),
 	}
 	var (
 		principal security.Principal
@@ -207,8 +229,18 @@
 	status.State = externalStates[s.state]
 	status.ServesMountTable = s.servesMountTable
 	status.Mounts = s.publisher.Status()
-	status.Endpoints = make([]naming.Endpoint, len(s.endpoints))
-	copy(status.Endpoints, s.endpoints)
+	status.Endpoints = []naming.Endpoint{}
+	for ls, _ := range s.listenState {
+		if ls.eperr != nil {
+			status.Errors = append(status.Errors, ls.eperr)
+		}
+		if ls.lnerr != nil {
+			status.Errors = append(status.Errors, ls.lnerr)
+		}
+		for _, iep := range ls.ieps {
+			status.Endpoints = append(status.Endpoints, iep)
+		}
+	}
 	status.Proxies = make([]ipc.ProxyStatus, 0, len(s.proxies))
 	for k, v := range s.proxies {
 		status.Proxies = append(status.Proxies, ipc.ProxyStatus{k, v.endpoint, v.err})
@@ -216,6 +248,24 @@
 	return status
 }
 
+func (s *server) WatchNetwork(ch chan<- ipc.NetworkChange) {
+	defer vlog.LogCall()()
+	s.Lock()
+	defer s.Unlock()
+	if s.dhcpState != nil {
+		s.dhcpState.watchers[ch] = struct{}{}
+	}
+}
+
+func (s *server) UnwatchNetwork(ch chan<- ipc.NetworkChange) {
+	defer vlog.LogCall()()
+	s.Lock()
+	defer s.Unlock()
+	if s.dhcpState != nil {
+		delete(s.dhcpState.watchers, ch)
+	}
+}
+
 // resolveToEndpoint resolves an object name or address to an endpoint.
 func (s *server) resolveToEndpoint(address string) (string, error) {
 	var resolved *naming.MountEntry
@@ -246,56 +296,24 @@
 	return "", fmt.Errorf("unable to resolve %q to an endpoint", address)
 }
 
-func addrFromIP(ip net.IP) ipc.Address {
-	return &netstate.AddrIfc{
-		Addr: &net.IPAddr{IP: ip},
-	}
-}
-
-/*
-// getIPRoamingAddrs finds an appropriate set of addresss to publish
-// externally and also determines if it's sensible to allow roaming.
-// It returns the host address of the first suitable address that
-// can be used and the port number that can be used with all addresses.
-// The host is required to allow the caller to construct an endpoint
-// that can be returned to the caller of Listen.
-func (s *server) getIPRoamingAddrs(chooser ipc.AddressChooser, iep *inaming.Endpoint) (addresses []ipc.Address, host string, port string, roaming bool, err error) {
-	host, port, err = net.SplitHostPort(iep.Address)
-	if err != nil {
-		return nil, "", "", false, err
-	}
-	ip := net.ParseIP(host)
-	if ip == nil {
-		return nil, "", "", false, fmt.Errorf("failed to parse %q as an IP host", host)
-	}
-	if ip.IsUnspecified() && chooser != nil {
-		// Need to find a usable IP address since the call to listen
-		// didn't specify one.
-		if addrs, err := netstate.GetAccessibleIPs(); err == nil {
-			if a, err := chooser(iep.Protocol, addrs); err == nil && len(a) > 0 {
-				phost := a[0].Address().String()
-				iep.Address = net.JoinHostPort(phost, port)
-				return a, phost, port, true, nil
-			}
-		}
-		return []ipc.Address{addrFromIP(ip)}, host, port, true, nil
-	}
-	// Listen used a fixed IP address, which we take to mean that
-	// roaming is not desired.
-	return []ipc.Address{addrFromIP(ip)}, host, port, false, nil
-}
-*/
-
 // getPossbileAddrs returns an appropriate set of addresses that could be used
 // to contact the supplied protocol, host, port parameters using the supplied
 // chooser function. It returns an indication of whether the supplied address
 // was fully specified or not, returning false if the address was fully
 // specified, and true if it was not.
 func getPossibleAddrs(protocol, host, port string, chooser ipc.AddressChooser) ([]ipc.Address, bool, error) {
+
 	ip := net.ParseIP(host)
 	if ip == nil {
 		return nil, false, fmt.Errorf("failed to parse %q as an IP host", host)
 	}
+
+	addrFromIP := func(ip net.IP) ipc.Address {
+		return &netstate.AddrIfc{
+			Addr: &net.IPAddr{IP: ip},
+		}
+	}
+
 	if ip.IsUnspecified() {
 		if chooser != nil {
 			// Need to find a usable IP address since the call to listen
@@ -315,128 +333,57 @@
 }
 
 // createEndpoints creates appropriate inaming.Endpoint instances for
-// all of the externally accessible networrk addresses that can be used
+// all of the externally accessible network addresses that can be used
 // to reach this server.
-func (s *server) createEndpoints(lep naming.Endpoint, chooser ipc.AddressChooser) ([]*inaming.Endpoint, bool, error) {
+func (s *server) createEndpoints(lep naming.Endpoint, chooser ipc.AddressChooser) ([]*inaming.Endpoint, string, bool, error) {
 	iep, ok := lep.(*inaming.Endpoint)
 	if !ok {
-		return nil, false, fmt.Errorf("internal type conversion error for %T", lep)
+		return nil, "", false, fmt.Errorf("internal type conversion error for %T", lep)
 	}
 	if !strings.HasPrefix(iep.Protocol, "tcp") &&
 		!strings.HasPrefix(iep.Protocol, "ws") {
-		// If not tcp or ws, just return the endpoint we were given.
-		return []*inaming.Endpoint{iep}, false, nil
+		// If not tcp, ws, or wsh, just return the endpoint we were given.
+		return []*inaming.Endpoint{iep}, "", false, nil
 	}
 
 	host, port, err := net.SplitHostPort(iep.Address)
 	if err != nil {
-		return nil, false, err
+		return nil, "", false, err
 	}
 	addrs, unspecified, err := getPossibleAddrs(iep.Protocol, host, port, chooser)
 	if err != nil {
-		return nil, false, err
+		return nil, port, false, err
 	}
 	ieps := make([]*inaming.Endpoint, 0, len(addrs))
 	for _, addr := range addrs {
 		n, err := inaming.NewEndpoint(lep.String())
 		if err != nil {
-			return nil, false, err
+			return nil, port, false, err
 		}
 		n.IsMountTable = s.servesMountTable
-		//n.Protocol = addr.Address().Network()
 		n.Address = net.JoinHostPort(addr.Address().String(), port)
 		ieps = append(ieps, n)
 	}
-	return ieps, unspecified, nil
-}
-
-/*
-// configureEPAndRoaming configures the endpoint by filling in its Address
-// portion with the appropriately selected network address, it also
-// returns an indication of whether this endpoint is appropriate for
-// roaming and the set of addresses that should be published.
-func (s *server) configureEPAndRoaming(spec ipc.ListenSpec, ep naming.Endpoint) (bool, []ipc.Address, *inaming.Endpoint, error) {
-	iep, ok := ep.(*inaming.Endpoint)
-	if !ok {
-		return false, nil, nil, fmt.Errorf("internal type conversion error for %T", ep)
-	}
-	if !strings.HasPrefix(spec.Addrs[0].Protocol, "tcp") &&
-		!strings.HasPrefix(spec.Addrs[0].Protocol, "ws") {
-		return false, nil, iep, nil
-	}
-	pubAddrs, pubHost, pubPort, roaming, err := s.getIPRoamingAddrs(spec.AddressChooser, iep)
-	if err != nil {
-		return false, nil, iep, err
-	}
-	iep.Address = net.JoinHostPort(pubHost, pubPort)
-	return roaming, pubAddrs, iep, nil
-}
-*/
-
-// TODO(cnicolaou): get rid of this in a subsequent CL - it's not used
-// and it's not clear it's needed.
-type listenError struct {
-	err    verror.E
-	errors map[struct{ Protocol, Address string }]error
-}
-
-func newError() *listenError {
-	return &listenError{errors: make(map[struct{ Protocol, Address string }]error)}
-}
-
-func ErrorDetails(le *listenError) map[struct{ Protocol, Address string }]error {
-	return le.errors
-}
-
-// Implements error
-func (le *listenError) Error() string {
-	s := le.err.Error()
-	for k, v := range le.errors {
-		s += fmt.Sprintf("(%s,%s:%s) ", k.Protocol, k.Address, v)
-	}
-	return strings.TrimRight(s, " ")
-}
-
-func (le *listenError) ErrorID() old_verror.ID {
-	return le.err.ErrorID()
-}
-
-func (le *listenError) Action() verror.ActionCode {
-	return le.err.Action()
-}
-
-func (le *listenError) Params() []interface{} {
-	return le.err.Params()
-}
-
-func (le *listenError) HasMessage() bool {
-	return le.err.HasMessage()
-}
-
-func (le *listenError) Stack() verror.PCs {
-	return le.err.Stack()
-}
-
-func (s *server) newBadState(m string) *listenError {
-	return &listenError{err: verror.Make(verror.BadState, s.ctx, m)}
-}
-
-func (s *server) newBadArg(m string) *listenError {
-	return &listenError{err: verror.Make(verror.BadArg, s.ctx, m)}
+	return ieps, port, unspecified, nil
 }
 
 func (s *server) Listen(listenSpec ipc.ListenSpec) ([]naming.Endpoint, error) {
 	defer vlog.LogCall()()
+	useProxy := len(listenSpec.Proxy) > 0
+	if !useProxy && len(listenSpec.Addrs) == 0 {
+		return nil, verror.Make(verror.BadArg, s.ctx, "ListenSpec contains no proxy or addresses to listen on")
+	}
+
 	s.Lock()
 	defer s.Unlock()
+
 	if err := s.allowed(listening, "Listen"); err != nil {
 		return nil, err
 	}
 
-	useProxy := len(listenSpec.Proxy) > 0
-
-	// Start the proxy as early as possible.
-	if useProxy {
+	// Start the proxy as early as possible, ignore duplicate requests
+	// for the same proxy.
+	if _, inuse := s.proxies[listenSpec.Proxy]; useProxy && !inuse {
 		// We have a goroutine for listening on proxy connections.
 		s.active.Add(1)
 		go func() {
@@ -445,96 +392,78 @@
 		}()
 	}
 
-	var ieps []*inaming.Endpoint
-
-	type lnInfo struct {
-		ln stream.Listener
-		ep naming.Endpoint
-	}
-	linfo := []lnInfo{}
-	closeAll := func(lni []lnInfo) {
-		for _, li := range lni {
-			li.ln.Close()
-		}
-	}
-
 	roaming := false
+	lnState := make([]*listenState, 0, len(listenSpec.Addrs))
 	for _, addr := range listenSpec.Addrs {
 		if len(addr.Address) > 0 {
-			// Listen if we have a local address to listen on. Some situations
-			// just need a proxy (e.g. a browser extension).
-			tmpln, lep, err := s.streamMgr.Listen(addr.Protocol, addr.Address, s.listenerOpts...)
-			if err != nil {
-				closeAll(linfo)
-				vlog.Errorf("ipc: Listen on %s failed: %s", addr, err)
-				return nil, err
+			// Listen if we have a local address to listen on.
+			ls := &listenState{
+				protocol: addr.Protocol,
+				address:  addr.Address,
 			}
-			linfo = append(linfo, lnInfo{tmpln, lep})
-			tmpieps, tmpRoaming, err := s.createEndpoints(lep, listenSpec.AddressChooser)
-			if err != nil {
-				closeAll(linfo)
-				return nil, err
+			ls.ln, ls.lep, ls.lnerr = s.streamMgr.Listen(addr.Protocol, addr.Address, s.listenerOpts...)
+			lnState = append(lnState, ls)
+			if ls.lnerr != nil {
+				continue
 			}
-			ieps = append(ieps, tmpieps...)
-			if tmpRoaming {
+			ls.ieps, ls.port, ls.roaming, ls.eperr = s.createEndpoints(ls.lep, listenSpec.AddressChooser)
+			if ls.roaming && ls.eperr == nil {
+				ls.protoIEP = *ls.lep.(*inaming.Endpoint)
 				roaming = true
 			}
 		}
 	}
 
-	// TODO(cnicolaou): write a test for all of these error cases.
-	if len(ieps) == 0 {
-		if useProxy {
-			return nil, nil
+	found := false
+	for _, ls := range lnState {
+		if ls.ln != nil {
+			found = true
+			break
 		}
-		// no proxy.
-		if len(listenSpec.Addrs) > 0 {
-			// TODO(cnicolaou): should be verror2
-			return nil, fmt.Errorf("no endpoints")
-		}
-		// TODO(cnicolaou): should be verror2
-		return nil, fmt.Errorf("no proxy and no addresses requested")
+	}
+	if !found && !useProxy {
+		return nil, verror.Make(verror.BadArg, s.ctx, "failed to create any listeners")
 	}
 
-	// TODO(cnicolaou): return all of the eps and their errors, then again,
-	// it's not clear we need to....
-
-	if roaming && listenSpec.StreamPublisher != nil {
-		// TODO(cnicolaou): renable roaming in a followup CL.
-		/*
-			var dhcpl *dhcpListener
-			streamName := listenSpec.StreamName
-			ch := make(chan config.Setting)
-			if _, err := publisher.ForkStream(streamName, ch); err != nil {
-				return ieps[0], fmt.Errorf("failed to fork stream %q: %s", streamName, err)
-			}
-			dhcpl = &dhcpListener{eps: ieps, pubAddrs: pubAddrs, ch: ch, name: streamName, publisher: publisher}, iep, nil
+	if roaming && s.dhcpState == nil && listenSpec.StreamPublisher != nil {
+		// Create a dhcp listener if we haven't already done so.
+		dhcp := &dhcpState{
+			name:      listenSpec.StreamName,
+			publisher: listenSpec.StreamPublisher,
+			watchers:  make(map[chan<- ipc.NetworkChange]struct{}),
+		}
+		s.dhcpState = dhcp
+		dhcp.ch = make(chan config.Setting, 10)
+		dhcp.stream, dhcp.err = dhcp.publisher.ForkStream(dhcp.name, dhcp.ch)
+		if dhcp.err == nil {
 			// We have a goroutine to listen for dhcp changes.
 			s.active.Add(1)
 			go func() {
-				s.dhcpLoop(dhcpl)
+				s.dhcpLoop(dhcp.ch)
 				s.active.Done()
 			}()
-			s.dhcpListeners[dhcpl] = struct{}{}
-		*/
+		}
 	}
 
-	for _, li := range linfo {
-		s.listeners[li.ln] = struct{}{}
-		// We have a goroutine per listener to accept new flows.
-		// Each flow is served from its own goroutine.
-		s.active.Add(1)
-		go func(ln stream.Listener, ep naming.Endpoint) {
-			s.listenLoop(ln, ep)
-			s.active.Done()
-		}(li.ln, li.ep)
+	eps := make([]naming.Endpoint, 0, 10)
+	for _, ls := range lnState {
+		s.listenState[ls] = struct{}{}
+		if ls.ln != nil {
+			// We have a goroutine per listener to accept new flows.
+			// Each flow is served from its own goroutine.
+			s.active.Add(1)
+			go func(ln stream.Listener, ep naming.Endpoint) {
+				s.listenLoop(ln, ep)
+				s.active.Done()
+			}(ls.ln, ls.lep)
+		}
+
+		for _, iep := range ls.ieps {
+			s.publisher.AddServer(iep.String(), s.servesMountTable)
+			eps = append(eps, iep)
+		}
 	}
-	eps := make([]naming.Endpoint, len(ieps))
-	for i, iep := range ieps {
-		s.publisher.AddServer(iep.String(), s.servesMountTable)
-		eps[i] = iep
-		s.endpoints = append(s.endpoints, iep)
-	}
+
 	return eps, nil
 }
 
@@ -553,7 +482,6 @@
 		return nil, nil, fmt.Errorf("internal type conversion error for %T", ep)
 	}
 	s.Lock()
-	s.listeners[ln] = struct{}{}
 	s.proxies[proxy] = proxyState{iep, nil}
 	s.Unlock()
 	s.publisher.AddServer(iep.String(), s.servesMountTable)
@@ -587,7 +515,14 @@
 			// (1) Unpublish its name
 			s.publisher.RemoveServer(iep.String())
 			s.Lock()
-			s.proxies[proxy] = proxyState{iep, verror.Make(verror.NoServers, nil, err)}
+			if err != nil {
+				s.proxies[proxy] = proxyState{iep, verror.Make(verror.NoServers, s.ctx, err)}
+			} else {
+				// err will be nill if we're stopping.
+				s.proxies[proxy] = proxyState{iep, nil}
+				s.Unlock()
+				return
+			}
 			s.Unlock()
 		}
 
@@ -621,14 +556,44 @@
 	}
 }
 
+// addListener adds the supplied listener taking care to
+// check to see if we're already stopping. It returns true
+// if the listener was added.
+func (s *server) addListener(ln stream.Listener) bool {
+	s.Lock()
+	defer s.Unlock()
+	if s.isStopState() {
+		return false
+	}
+	s.listeners[ln] = struct{}{}
+	return true
+}
+
+// rmListener removes the supplied listener taking care to
+// check if we're already stopping. It returns true if the
+// listener was removed.
+func (s *server) rmListener(ln stream.Listener) bool {
+	s.Lock()
+	defer s.Unlock()
+	if s.isStopState() {
+		return false
+	}
+	delete(s.listeners, ln)
+	return true
+}
+
 func (s *server) listenLoop(ln stream.Listener, ep naming.Endpoint) error {
 	defer vlog.VI(1).Infof("ipc: Stopped listening on %s", ep)
 	var calls sync.WaitGroup
+
+	if !s.addListener(ln) {
+		// We're stopping.
+		return nil
+	}
+
 	defer func() {
 		calls.Wait()
-		s.Lock()
-		delete(s.listeners, ln)
-		s.Unlock()
+		s.rmListener(ln)
 	}()
 	for {
 		flow, err := ln.Accept()
@@ -655,56 +620,117 @@
 	}
 }
 
-/*
-func (s *server) applyChange(dhcpl *dhcpListener, addrs []net.Addr, fn func(string)) {
-	dhcpl.Lock()
-	defer dhcpl.Unlock()
-	for _, a := range addrs {
-		if ip := netstate.AsIP(a); ip != nil {
-			dhcpl.ep.Address = net.JoinHostPort(ip.String(), dhcpl.pubPort)
-			fn(dhcpl.ep.String())
-		}
-	}
-}
-
-func (s *server) dhcpLoop(dhcpl *dhcpListener) {
-	defer vlog.VI(1).Infof("ipc: Stopped listen for dhcp changes on %v", dhcpl.ep)
+func (s *server) dhcpLoop(ch chan config.Setting) {
+	defer vlog.VI(1).Infof("ipc: Stopped listen for dhcp changes")
 	vlog.VI(2).Infof("ipc: dhcp loop")
-
-	ep := *dhcpl.ep
-	// Publish all of the addresses
-	for _, pubAddr := range dhcpl.pubAddrs {
-		ep.Address = net.JoinHostPort(pubAddr.Address().String(), dhcpl.pubPort)
-		s.publisher.AddServer(ep.String(), s.servesMountTable)
-	}
-
-	for setting := range dhcpl.ch {
+	for setting := range ch {
 		if setting == nil {
 			return
 		}
 		switch v := setting.Value().(type) {
-		case bool:
-			return
-		case []net.Addr:
+		case []ipc.Address:
 			s.Lock()
-			if s.stopped {
+			if s.isStopState() {
 				s.Unlock()
 				return
 			}
-			publisher := s.publisher
-			s.Unlock()
+			var err error
+			var changed []naming.Endpoint
 			switch setting.Name() {
 			case ipc.NewAddrsSetting:
-				vlog.Infof("Added some addresses: %q", v)
-				s.applyChange(dhcpl, v, func(name string) { publisher.AddServer(name, s.servesMountTable) })
+				changed = s.addAddresses(v)
 			case ipc.RmAddrsSetting:
-				vlog.Infof("Removed some addresses: %q", v)
-				s.applyChange(dhcpl, v, publisher.RemoveServer)
+				changed, err = s.removeAddresses(v)
 			}
+			change := ipc.NetworkChange{
+				Time:    time.Now(),
+				State:   externalStates[s.state],
+				Setting: setting,
+				Changed: changed,
+				Error:   err,
+			}
+			vlog.VI(2).Infof("ipc: dhcp: change %v", change)
+			for ch, _ := range s.dhcpState.watchers {
+				select {
+				case ch <- change:
+				default:
+				}
+			}
+			s.Unlock()
+		default:
+			vlog.Errorf("ipc: dhcpLoop: unhandled setting type %T", v)
 		}
 	}
 }
-*/
+
+func getHost(address ipc.Address) string {
+	host, _, err := net.SplitHostPort(address.Address().String())
+	if err == nil {
+		return host
+	}
+	return address.Address().String()
+
+}
+
+// Remove all endpoints that have the same host address as the supplied
+// address parameter.
+func (s *server) removeAddresses(addresses []ipc.Address) ([]naming.Endpoint, error) {
+	var removed []naming.Endpoint
+	for _, address := range addresses {
+		host := getHost(address)
+		for ls, _ := range s.listenState {
+			if ls != nil && ls.roaming && len(ls.ieps) > 0 {
+				remaining := make([]*inaming.Endpoint, 0, len(ls.ieps))
+				for _, iep := range ls.ieps {
+					lnHost, _, err := net.SplitHostPort(iep.Address)
+					if err != nil {
+						lnHost = iep.Address
+					}
+					if lnHost == host {
+						vlog.VI(2).Infof("ipc: dhcp removing: %s", iep)
+						removed = append(removed, iep)
+						s.publisher.RemoveServer(iep.String())
+						continue
+					}
+					remaining = append(remaining, iep)
+				}
+				ls.ieps = remaining
+			}
+		}
+	}
+	return removed, nil
+}
+
+// Add new endpoints for the new address. There is no way to know with
+// 100% confidence which new endpoints to publish without shutting down
+// all network connections and reinitializing everything from scratch.
+// Instead, we find all roaming listeners with at least one endpoint
+// and create a new endpoint with the same port as the existing ones
+// but with the new address supplied to us to by the dhcp code. As
+// an additional safeguard we reject the new address if it is not
+// externally accessible.
+// This places the onus on the dhcp/roaming code that sends us addresses
+// to ensure that those addresses are externally reachable.
+func (s *server) addAddresses(addresses []ipc.Address) []naming.Endpoint {
+	var added []naming.Endpoint
+	for _, address := range addresses {
+		if !netstate.IsAccessibleIP(address) {
+			return added
+		}
+		host := getHost(address)
+		for ls, _ := range s.listenState {
+			if ls != nil && ls.roaming {
+				niep := ls.protoIEP
+				niep.Address = net.JoinHostPort(host, ls.port)
+				ls.ieps = append(ls.ieps, &niep)
+				vlog.VI(2).Infof("ipc: dhcp adding: %s", niep)
+				s.publisher.AddServer(niep.String(), s.servesMountTable)
+				added = append(added, &niep)
+			}
+		}
+	}
+	return added
+}
 
 type leafDispatcher struct {
 	invoker ipc.Invoker
@@ -721,11 +747,11 @@
 func (s *server) Serve(name string, obj interface{}, authorizer security.Authorizer) error {
 	defer vlog.LogCall()()
 	if obj == nil {
-		return s.newBadArg("nil object")
+		return verror.Make(verror.BadArg, s.ctx, "nil object")
 	}
 	invoker, err := objectToInvoker(obj)
 	if err != nil {
-		return s.newBadArg(fmt.Sprintf("bad object: %v", err))
+		return verror.Make(verror.BadArg, s.ctx, fmt.Sprintf("bad object: %v", err))
 	}
 	return s.ServeDispatcher(name, &leafDispatcher{invoker, authorizer})
 }
@@ -733,7 +759,7 @@
 func (s *server) ServeDispatcher(name string, disp ipc.Dispatcher) error {
 	defer vlog.LogCall()()
 	if disp == nil {
-		return s.newBadArg("nil dispatcher")
+		return verror.Make(verror.BadArg, s.ctx, "nil dispatcher")
 	}
 	s.Lock()
 	defer s.Unlock()
@@ -751,7 +777,7 @@
 func (s *server) AddName(name string) error {
 	defer vlog.LogCall()()
 	if len(name) == 0 {
-		return s.newBadArg("name is empty")
+		return verror.Make(verror.BadArg, s.ctx, "name is empty")
 	}
 	s.Lock()
 	defer s.Unlock()
@@ -815,14 +841,27 @@
 		}(ln)
 	}
 
-	for dhcpl, _ := range s.dhcpListeners {
-		dhcpl.Lock()
-		dhcpl.publisher.CloseFork(dhcpl.name, dhcpl.ch)
-		dhcpl.ch <- config.NewBool("EOF", "stop", true)
-		dhcpl.Unlock()
+	drain := func(ch chan config.Setting) {
+		for {
+			select {
+			case v := <-ch:
+				if v == nil {
+					return
+				}
+			default:
+				close(ch)
+				return
+			}
+		}
+	}
+
+	if dhcp := s.dhcpState; dhcp != nil {
+		dhcp.publisher.CloseFork(dhcp.name, dhcp.ch)
+		drain(dhcp.ch)
 	}
 
 	s.Unlock()
+
 	var firstErr error
 	for i := 0; i < nListeners; i++ {
 		if err := <-errCh; err != nil && firstErr == nil {
@@ -833,7 +872,22 @@
 	// accepted.
 
 	// Wait for the publisher and active listener + flows to finish.
-	s.active.Wait()
+	done := make(chan struct{}, 1)
+	go func() { s.active.Wait(); done <- struct{}{} }()
+
+	select {
+	case <-done:
+	case <-time.After(5 * time.Minute):
+		vlog.Errorf("Listener Close Error: %v", firstErr)
+		vlog.Errorf("Timedout waiting for goroutines to stop: listeners: %d", nListeners, len(s.listeners))
+		for ln, _ := range s.listeners {
+			vlog.Errorf("Listener: %p", ln)
+		}
+		for ls, _ := range s.listenState {
+			vlog.Errorf("ListenState: %v", ls)
+		}
+		<-done
+	}
 
 	s.Lock()
 	defer s.Unlock()
@@ -842,6 +896,7 @@
 		return verror.Make(verror.Internal, s.ctx, firstErr)
 	}
 	s.state = stopped
+	s.cancel()
 	return nil
 }
 
diff --git a/runtimes/google/ipc/server_test.go b/runtimes/google/ipc/server_test.go
index 9caf0d3..d83367a 100644
--- a/runtimes/google/ipc/server_test.go
+++ b/runtimes/google/ipc/server_test.go
@@ -2,24 +2,26 @@
 
 import (
 	"fmt"
+	"net"
 	"os"
-	"path/filepath"
 	"reflect"
-	"runtime"
 	"sort"
 	"strings"
 	"testing"
 	"time"
 
+	"v.io/core/veyron2/config"
 	"v.io/core/veyron2/context"
 	"v.io/core/veyron2/ipc"
 	"v.io/core/veyron2/naming"
 	"v.io/core/veyron2/security"
 	verror "v.io/core/veyron2/verror2"
+	"v.io/core/veyron2/vlog"
 
 	"v.io/core/veyron/lib/expect"
 	"v.io/core/veyron/lib/modules"
 	"v.io/core/veyron/lib/modules/core"
+	"v.io/core/veyron/lib/netstate"
 	tsecurity "v.io/core/veyron/lib/testutil/security"
 	imanager "v.io/core/veyron/runtimes/google/ipc/stream/manager"
 	"v.io/core/veyron/runtimes/google/ipc/stream/vc"
@@ -46,14 +48,15 @@
 // particular, it doesn't panic).
 func TestBadObject(t *testing.T) {
 	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
 	ns := tnaming.NewSimpleNamespace()
-
 	ctx := testContext()
 	server, err := testInternalNewServer(ctx, sm, ns)
 	if err != nil {
 		t.Fatal(err)
 	}
 	defer server.Stop()
+
 	if _, err := server.Listen(listenSpec); err != nil {
 		t.Fatalf("Listen failed: %v", err)
 	}
@@ -102,7 +105,6 @@
 	h.sh = sh
 	p, err := sh.Start(core.ProxyServerCommand, nil, args...)
 	if err != nil {
-		p.Shutdown(os.Stderr, os.Stderr)
 		t.Fatalf("unexpected error: %s", err)
 	}
 	h.proxy = p
@@ -146,6 +148,7 @@
 
 func testProxy(t *testing.T, spec ipc.ListenSpec, args ...string) {
 	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
 	ns := tnaming.NewSimpleNamespace()
 	client, err := InternalNewClient(sm, ns, vc.LocalPrincipal{tsecurity.NewPrincipal("client")})
 	if err != nil {
@@ -333,6 +336,45 @@
 	}
 }
 
+func TestServerArgs(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := InternalNewServer(testContext(), sm, ns, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+	_, err = server.Listen(ipc.ListenSpec{})
+	if !verror.Is(err, verror.BadArg.ID) {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	_, err = server.Listen(ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "*:0"}}})
+	if !verror.Is(err, verror.BadArg.ID) {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	_, err = server.Listen(ipc.ListenSpec{
+		Addrs: ipc.ListenAddrs{
+			{"tcp", "*:0"},
+			{"tcp", "127.0.0.1:0"},
+		}})
+	if verror.Is(err, verror.BadArg.ID) {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	status := server.Status()
+	if got, want := len(status.Errors), 1; got != want {
+		t.Fatalf("got %s, want %s", got, want)
+	}
+	_, err = server.Listen(ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "*:0"}}})
+	if !verror.Is(err, verror.BadArg.ID) {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	status = server.Status()
+	if got, want := len(status.Errors), 1; got != want {
+		t.Fatalf("got %s, want %s", got, want)
+	}
+}
+
 type statusServer struct{ ch chan struct{} }
 
 func (s *statusServer) Hang(ctx ipc.ServerContext) {
@@ -341,12 +383,15 @@
 
 func TestServerStatus(t *testing.T) {
 	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
 	ns := tnaming.NewSimpleNamespace()
 	principal := vc.LocalPrincipal{tsecurity.NewPrincipal("testServerStatus")}
 	server, err := testInternalNewServer(testContext(), sm, ns, principal)
 	if err != nil {
 		t.Fatal(err)
 	}
+	defer server.Stop()
+
 	status := server.Status()
 	if got, want := status.State, ipc.ServerInit; got != want {
 		t.Fatalf("got %s, want %s", got, want)
@@ -426,31 +471,28 @@
 
 func TestServerStates(t *testing.T) {
 	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
 	ns := tnaming.NewSimpleNamespace()
 
-	loc := func() string {
-		_, file, line, _ := runtime.Caller(2)
-		return fmt.Sprintf("%s:%d", filepath.Base(file), line)
-	}
-
 	expectBadState := func(err error) {
 		if !verror.Is(err, verror.BadState.ID) {
-			t.Fatalf("%s: unexpected error: %v", loc(), err)
+			t.Fatalf("%s: unexpected error: %v", loc(1), err)
 		}
 	}
 
 	expectNoError := func(err error) {
 		if err != nil {
-			t.Fatalf("%s: unexpected error: %v", loc(), err)
+			t.Fatalf("%s: unexpected error: %v", loc(1), err)
 		}
 	}
 
 	server, err := testInternalNewServer(testContext(), sm, ns)
 	expectNoError(err)
+	defer server.Stop()
 
 	expectState := func(s ipc.ServerState) {
 		if got, want := server.Status().State, s; got != want {
-			t.Fatalf("%s: got %s, want %s", loc(), got, want)
+			t.Fatalf("%s: got %s, want %s", loc(1), got, want)
 		}
 	}
 
@@ -493,6 +535,345 @@
 	expectBadState(err)
 }
 
+func TestMountStatus(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(testContext(), sm, ns)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	eps, err := server.Listen(ipc.ListenSpec{
+		Addrs: ipc.ListenAddrs{
+			{"tcp", "127.0.0.1:0"},
+			{"tcp", "127.0.0.1:0"},
+		}})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := len(eps), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	status := server.Status()
+	if got, want := len(status.Mounts), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	servers := status.Mounts.Servers()
+	if got, want := len(servers), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	// Add a second name and we should now see 4 mounts, 2 for each name.
+	if err := server.AddName("bar"); err != nil {
+		t.Fatal(err)
+	}
+	status = server.Status()
+	if got, want := len(status.Mounts), 4; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	servers = status.Mounts.Servers()
+	if got, want := len(servers), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	names := status.Mounts.Names()
+	if got, want := len(names), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	serversPerName := map[string][]string{}
+	for _, ms := range status.Mounts {
+		serversPerName[ms.Name] = append(serversPerName[ms.Name], ms.Server)
+	}
+	if got, want := len(serversPerName), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	for _, name := range []string{"foo", "bar"} {
+		if got, want := len(serversPerName[name]), 2; got != want {
+			t.Fatalf("got %d, want %d", got, want)
+		}
+	}
+}
+
+func updateHost(ep naming.Endpoint, address string) naming.Endpoint {
+	niep := *(ep).(*inaming.Endpoint)
+	niep.Address = address
+	return &niep
+}
+
+func getIPAddrs(eps []naming.Endpoint) []ipc.Address {
+	hosts := map[string]struct{}{}
+	for _, ep := range eps {
+		iep := (ep).(*inaming.Endpoint)
+		h, _, _ := net.SplitHostPort(iep.Address)
+		if len(h) > 0 {
+			hosts[h] = struct{}{}
+		}
+	}
+	addrs := []ipc.Address{}
+	for h, _ := range hosts {
+		a := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP(h)}}
+		addrs = append(addrs, a)
+	}
+	return addrs
+}
+
+func endpointToStrings(eps []naming.Endpoint) []string {
+	r := []string{}
+	for _, ep := range eps {
+		r = append(r, ep.String())
+	}
+	sort.Strings(r)
+	return r
+}
+
+func cmpEndpoints(got, want []naming.Endpoint) bool {
+	if len(got) != len(want) {
+		return false
+	}
+	return reflect.DeepEqual(endpointToStrings(got), endpointToStrings(want))
+}
+
+func getUniqPorts(eps []naming.Endpoint) []string {
+	ports := map[string]struct{}{}
+	for _, ep := range eps {
+		iep := ep.(*inaming.Endpoint)
+		_, p, _ := net.SplitHostPort(iep.Address)
+		ports[p] = struct{}{}
+	}
+	r := []string{}
+	for p, _ := range ports {
+		r = append(r, p)
+	}
+	return r
+}
+
+func TestRoaming(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(testContext(), sm, ns)
+	defer server.Stop()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	publisher := config.NewPublisher()
+	roaming := make(chan config.Setting)
+	stop, err := publisher.CreateStream("roaming", "roaming", roaming)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() { publisher.Shutdown(); <-stop }()
+
+	ipv4And6 := func(network string, addrs []ipc.Address) ([]ipc.Address, error) {
+		accessible := netstate.AddrList(addrs)
+		ipv4 := accessible.Filter(netstate.IsUnicastIPv4)
+		ipv6 := accessible.Filter(netstate.IsUnicastIPv6)
+		return append(ipv4, ipv6...), nil
+	}
+	spec := ipc.ListenSpec{
+		Addrs: ipc.ListenAddrs{
+			{"tcp", "*:0"},
+			{"tcp", ":0"},
+			{"tcp", ":0"},
+		},
+		StreamName:      "roaming",
+		StreamPublisher: publisher,
+		AddressChooser:  ipv4And6,
+	}
+
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(eps) == 0 {
+		t.Fatal(err)
+	}
+
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	if err = server.AddName("bar"); err != nil {
+		t.Fatal(err)
+	}
+
+	status := server.Status()
+	if got, want := status.Endpoints, eps; !cmpEndpoints(got, want) {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	if got, want := len(status.Mounts), len(eps)*2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	n1 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("1.1.1.1")}}
+	n2 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("2.2.2.2")}}
+
+	watcher := make(chan ipc.NetworkChange, 10)
+	server.WatchNetwork(watcher)
+	defer close(watcher)
+
+	roaming <- ipc.NewAddAddrsSetting([]ipc.Address{n1, n2})
+
+	waitForChange := func() *ipc.NetworkChange {
+		vlog.Infof("Waiting on %p", watcher)
+		select {
+		case c := <-watcher:
+			return &c
+		case <-time.After(time.Minute):
+			t.Fatalf("timedout: %s", loc(1))
+		}
+		return nil
+	}
+
+	// We expect 4 changes, one for each IP per usable listen spec addr.
+	change := waitForChange()
+	if got, want := len(change.Changed), 4; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	nepsA := make([]naming.Endpoint, len(eps))
+	copy(nepsA, eps)
+	for _, p := range getUniqPorts(eps) {
+		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		nepsA = append(nepsA, []naming.Endpoint{nep1, nep2}...)
+	}
+
+	status = server.Status()
+	if got, want := status.Endpoints, nepsA; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+	}
+
+	if got, want := len(status.Mounts), len(nepsA)*2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := len(status.Mounts.Servers()), len(nepsA); got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	roaming <- ipc.NewRmAddrsSetting([]ipc.Address{n1})
+
+	// We expect 2 changes, one for each usable listen spec addr.
+	change = waitForChange()
+	if got, want := len(change.Changed), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	nepsR := make([]naming.Endpoint, len(eps))
+	copy(nepsR, eps)
+	for _, p := range getUniqPorts(eps) {
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		nepsR = append(nepsR, nep2)
+	}
+
+	status = server.Status()
+	if got, want := status.Endpoints, nepsR; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+	}
+
+	// Remove all addresses to mimic losing all connectivity.
+	roaming <- ipc.NewRmAddrsSetting(getIPAddrs(nepsR))
+
+	// We expect changes for all of the current endpoints
+	change = waitForChange()
+	if got, want := len(change.Changed), len(nepsR); got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	status = server.Status()
+	if got, want := len(status.Mounts), 0; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	roaming <- ipc.NewAddAddrsSetting([]ipc.Address{n1})
+	// We expect 2 changes, one for each usable listen spec addr.
+	change = waitForChange()
+	if got, want := len(change.Changed), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+}
+
+func TestWatcherDeadlock(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(testContext(), sm, ns)
+	defer server.Stop()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	publisher := config.NewPublisher()
+	roaming := make(chan config.Setting)
+	stop, err := publisher.CreateStream("roaming", "roaming", roaming)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() { publisher.Shutdown(); <-stop }()
+
+	spec := ipc.ListenSpec{
+		Addrs: ipc.ListenAddrs{
+			{"tcp", ":0"},
+		},
+		StreamName:      "roaming",
+		StreamPublisher: publisher,
+	}
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+
+	// Set a watcher that we never read from - the intent is to make sure
+	// that the listener still listens to changes even though there is no
+	// goroutine to read from the watcher channel.
+	watcher := make(chan ipc.NetworkChange, 0)
+	server.WatchNetwork(watcher)
+	defer close(watcher)
+
+	// Remove all addresses to mimic losing all connectivity.
+	roaming <- ipc.NewRmAddrsSetting(getIPAddrs(eps))
+
+	// Add in two new addresses
+	n1 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("1.1.1.1")}}
+	n2 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("2.2.2.2")}}
+	roaming <- ipc.NewAddAddrsSetting([]ipc.Address{n1, n2})
+
+	neps := make([]naming.Endpoint, 0, len(eps))
+	for _, p := range getUniqPorts(eps) {
+		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		neps = append(neps, []naming.Endpoint{nep1, nep2}...)
+	}
+	then := time.Now()
+	for {
+		status := server.Status()
+		if got, want := status.Endpoints, neps; cmpEndpoints(got, want) {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+		if time.Now().Sub(then) > time.Minute {
+			t.Fatalf("timed out waiting for changes to take effect")
+		}
+	}
+
+}
+
 // Required by modules framework.
 func TestHelperProcess(t *testing.T) {
 	modules.DispatchInTest()
diff --git a/runtimes/google/lib/publisher/publisher_test.go b/runtimes/google/lib/publisher/publisher_test.go
index 1fe0992..a70d1e2 100644
--- a/runtimes/google/lib/publisher/publisher_test.go
+++ b/runtimes/google/lib/publisher/publisher_test.go
@@ -23,7 +23,7 @@
 }
 
 func testContext() *context.T {
-	var ctx *context.T
+	ctx, _ := context.RootContext()
 	ctx, err := ivtrace.Init(ctx, flags.VtraceFlags{})
 	if err != nil {
 		panic(err)
diff --git a/runtimes/google/naming/namespace/mount.go b/runtimes/google/naming/namespace/mount.go
index 7b7ce3a..0cab494 100644
--- a/runtimes/google/naming/namespace/mount.go
+++ b/runtimes/google/naming/namespace/mount.go
@@ -37,7 +37,7 @@
 func unmountFromMountTable(ctx *context.T, client ipc.Client, name, server string, id string) (s status) {
 	s.id = id
 	ctx, _ = context.WithTimeout(ctx, callTimeout)
-	call, err := client.StartCall(ctx, name, "Unmount", []interface{}{server}, options.NoResolve{}, inaming.NoCancel{})
+	call, err := client.StartCall(ctx, name, "Unmount", []interface{}{server}, options.NoResolve{})
 	s.err = err
 	if err != nil {
 		return
@@ -139,7 +139,7 @@
 	f := func(context *context.T, mt, id string) status {
 		return unmountFromMountTable(ctx, client, mt, server, id)
 	}
-	err := ns.dispatch(ctx, name, f, inaming.NoCancel{})
+	err := ns.dispatch(ctx, name, f)
 	vlog.VI(1).Infof("Unmount(%s, %s) -> %v", name, server, err)
 	return err
 }
diff --git a/runtimes/google/naming/nocancel.go b/runtimes/google/naming/nocancel.go
deleted file mode 100644
index 85e1658..0000000
--- a/runtimes/google/naming/nocancel.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package naming
-
-// NoCancel is an option passed to a resolve or an IPC call that
-// instructs it to ignore cancellation for that call.
-// This is used to allow servers to unmount even after their context
-// has been cancelled.
-// TODO(mattr): Find a better mechanism for this.
-type NoCancel struct{}
-
-func (NoCancel) IPCCallOpt()   {}
-func (NoCancel) NSResolveOpt() {}
diff --git a/runtimes/google/rt/runtime.go b/runtimes/google/rt/runtime.go
index fceb78e..3614268 100644
--- a/runtimes/google/rt/runtime.go
+++ b/runtimes/google/rt/runtime.go
@@ -21,6 +21,7 @@
 	"v.io/core/veyron2/vtrace"
 
 	"v.io/core/veyron/lib/flags"
+	"v.io/core/veyron/lib/stats"
 	_ "v.io/core/veyron/lib/stats/sysstats"
 	iipc "v.io/core/veyron/runtimes/google/ipc"
 	imanager "v.io/core/veyron/runtimes/google/ipc/stream/manager"
@@ -60,7 +61,6 @@
 	opts       []ipc.ServerOpt
 }
 
-// TODO(mattr,suharshs): Decide if Options would be better than this.
 func Init(ctx *context.T, appCycle veyron2.AppCycle, protocols []string, listenSpec *ipc.ListenSpec, flags flags.RuntimeFlags,
 	reservedDispatcher ipc.Dispatcher, dispatcherOpts ...ipc.ServerOpt) (*Runtime, *context.T, veyron2.Shutdown, error) {
 	r := &Runtime{deps: dependency.NewGraph()}
@@ -139,7 +139,7 @@
 	if err != nil {
 		return nil, nil, nil, err
 	}
-	ctx = context.WithValue(ctx, principalKey, principal)
+	ctx = r.setPrincipal(ctx, principal)
 
 	// Set up secure client.
 	ctx, _, err = r.SetNewClient(ctx)
@@ -149,9 +149,6 @@
 
 	ctx = r.SetBackgroundContext(ctx)
 
-	// TODO(suharshs,mattr): Go through the rt.Cleanup function and make sure everything
-	// gets cleaned up.
-
 	return r, ctx, r.shutdown, nil
 }
 
@@ -281,16 +278,20 @@
 	return cl
 }
 
+func (*Runtime) setPrincipal(ctx *context.T, principal security.Principal) *context.T {
+	// We uniquely identity a principal with "security/principal/<publicKey>"
+	principalName := "security/principal/" + principal.PublicKey().String()
+	stats.NewStringFunc(principalName+"/blessingstore", principal.BlessingStore().DebugString)
+	stats.NewStringFunc(principalName+"/blessingroots", principal.Roots().DebugString)
+	return context.WithValue(ctx, principalKey, principal)
+}
+
 func (r *Runtime) SetPrincipal(ctx *context.T, principal security.Principal) (*context.T, error) {
 	var err error
 	newctx := ctx
 
-	newctx = context.WithValue(newctx, principalKey, principal)
+	newctx = r.setPrincipal(ctx, principal)
 
-	// TODO(mattr, suharshs): The stream manager holds a cache of vifs
-	// which were negotiated with the principal, so we replace it here when the
-	// principal changes.  However we should negotiate the vif with a
-	// random principal and then we needn't replace this here.
 	if newctx, _, err = r.setNewStreamManager(newctx); err != nil {
 		return ctx, err
 	}
@@ -312,10 +313,6 @@
 func (r *Runtime) SetNewClient(ctx *context.T, opts ...ipc.ClientOpt) (*context.T, ipc.Client, error) {
 	otherOpts := append([]ipc.ClientOpt{}, opts...)
 
-	// TODO(mattr, suharshs):  Currently there are a lot of things that can come in as opts.
-	// Some of them will be removed as opts and simply be pulled from the context instead
-	// these are:
-	// stream.Manager, Namespace, LocalPrincipal, preferred protocols.
 	sm, _ := ctx.Value(streamManagerKey).(stream.Manager)
 	ns, _ := ctx.Value(namespaceKey).(naming.Namespace)
 	p, _ := ctx.Value(principalKey).(security.Principal)
@@ -341,9 +338,13 @@
 	return cl
 }
 
-func (*Runtime) setNewNamespace(ctx *context.T, roots ...string) (*context.T, naming.Namespace, error) {
+func (r *Runtime) setNewNamespace(ctx *context.T, roots ...string) (*context.T, naming.Namespace, error) {
 	ns, err := namespace.New(roots...)
-	// TODO(mattr): Copy cache settings.
+
+	if oldNS := r.GetNamespace(ctx); oldNS != nil {
+		ns.CacheCtl(oldNS.CacheCtl()...)
+	}
+
 	if err == nil {
 		ctx = context.WithValue(ctx, namespaceKey, ns)
 	}
diff --git a/runtimes/google/rt/runtime_test.go b/runtimes/google/rt/runtime_test.go
index 4f41e20..cb6b000 100644
--- a/runtimes/google/rt/runtime_test.go
+++ b/runtimes/google/rt/runtime_test.go
@@ -5,6 +5,7 @@
 
 	"v.io/core/veyron2"
 	"v.io/core/veyron2/context"
+	"v.io/core/veyron2/naming"
 
 	"v.io/core/veyron/lib/flags"
 	tsecurity "v.io/core/veyron/lib/testutil/security"
@@ -14,7 +15,7 @@
 
 // InitForTest creates a context for use in a test.
 func InitForTest(t *testing.T) (*rt.Runtime, *context.T, veyron2.Shutdown) {
-	ctx, cancel := context.WithCancel(nil)
+	ctx, cancel := context.RootContext()
 	r, ctx, shutdown, err := rt.Init(ctx, nil, nil, nil, flags.RuntimeFlags{}, nil)
 	if err != nil {
 		t.Fatal(err)
@@ -104,6 +105,7 @@
 	defer shutdown()
 
 	orig := r.GetNamespace(ctx)
+	orig.CacheCtl(naming.DisableCache(true))
 
 	newroots := []string{"/newroot1", "/newroot2"}
 	c2, ns, err := r.SetNewNamespace(ctx, newroots...)
@@ -125,6 +127,13 @@
 			t.Errorf("root %s found in ns, but we expected: %v", root, newroots)
 		}
 	}
+	opts := ns.CacheCtl()
+	if len(opts) != 1 {
+		t.Fatalf("Expected one option for cache control, got %v", opts)
+	}
+	if disable, ok := opts[0].(naming.DisableCache); !ok || !bool(disable) {
+		t.Errorf("expected a disable(true) message got %#v", opts[0])
+	}
 }
 
 func TestBackgroundContext(t *testing.T) {
diff --git a/runtimes/google/rt/security.go b/runtimes/google/rt/security.go
index b4a3aef..f06f9ed 100644
--- a/runtimes/google/rt/security.go
+++ b/runtimes/google/rt/security.go
@@ -13,7 +13,6 @@
 	"v.io/core/veyron2/security"
 
 	"v.io/core/veyron/lib/exec"
-	"v.io/core/veyron/lib/stats"
 	vsecurity "v.io/core/veyron/security"
 	"v.io/core/veyron/security/agent"
 )
@@ -24,10 +23,6 @@
 		return nil, err
 	}
 
-	// TODO(suharshs,mattr): Move this code to SetNewPrincipal and determine what their string should be.
-	stats.NewString("security/principal/key").Set(principal.PublicKey().String())
-	stats.NewStringFunc("security/principal/blessingstore", principal.BlessingStore().DebugString)
-	stats.NewStringFunc("security/principal/blessingroots", principal.Roots().DebugString)
 	return principal, nil
 }
 
diff --git a/runtimes/google/testing/mocks/ipc/simple_client_test.go b/runtimes/google/testing/mocks/ipc/simple_client_test.go
index a4bd4f0..f14eb7d 100644
--- a/runtimes/google/testing/mocks/ipc/simple_client_test.go
+++ b/runtimes/google/testing/mocks/ipc/simple_client_test.go
@@ -7,11 +7,8 @@
 )
 
 func testContext() *context.T {
-	// The nil context is not directly usable, we need to create
-	// a context specially.
-	type key struct{}
-	var ctx *context.T
-	return context.WithValue(ctx, key{}, nil)
+	ctx, _ := context.RootContext()
+	return ctx
 }
 
 func TestSuccessfulCalls(t *testing.T) {
diff --git a/services/mgmt/device/impl/device_installer.go b/services/mgmt/device/impl/device_installer.go
index ee59788..25e0e8a 100644
--- a/services/mgmt/device/impl/device_installer.go
+++ b/services/mgmt/device/impl/device_installer.go
@@ -15,6 +15,9 @@
 //       base/                 - initial installation of device manager
 //         deviced             - link to deviced (self)
 //         deviced.sh          - script to start the device manager
+//       device-data/
+//         persistent-args     - list of persistent arguments for the device
+//                               manager (json encoded)
 //       logs/                 - device manager logs will go here
 //     current                 - set as <Config.CurrentLink>
 //     agent_deviced.sh        - script to launch device manager under agent
@@ -156,6 +159,9 @@
 		// device manager in a different way.
 		Env: VeyronEnvironment(env),
 	}
+	if err := savePersistentArgs(root, envelope.Args); err != nil {
+		return err
+	}
 	if err := linkSelf(deviceDir, "deviced"); err != nil {
 		return err
 	}
diff --git a/services/mgmt/device/impl/device_service.go b/services/mgmt/device/impl/device_service.go
index d570d3e..629a283 100644
--- a/services/mgmt/device/impl/device_service.go
+++ b/services/mgmt/device/impl/device_service.go
@@ -21,6 +21,8 @@
 //         data
 //         signature
 //	 associated.accounts
+//       persistent-args       - list of persistent arguments for the device
+//                               manager (json encoded)
 //
 // The device manager is always expected to be started through the symbolic link
 // passed in as config.CurrentLink, which is monitored by an init daemon. This
@@ -139,6 +141,32 @@
 	return info, nil
 }
 
+func savePersistentArgs(root string, args []string) error {
+	dir := filepath.Join(root, "device-manager", "device-data")
+	if err := os.MkdirAll(dir, 0700); err != nil {
+		return fmt.Errorf("MkdirAll(%q) failed: %v", dir)
+	}
+	data, err := json.Marshal(args)
+	if err != nil {
+		return fmt.Errorf("Marshal(%v) failed: %v", args, err)
+	}
+	fileName := filepath.Join(dir, "persistent-args")
+	return ioutil.WriteFile(fileName, data, 0600)
+}
+
+func loadPersistentArgs(root string) ([]string, error) {
+	fileName := filepath.Join(root, "device-manager", "device-data", "persistent-args")
+	bytes, err := ioutil.ReadFile(fileName)
+	if err != nil {
+		return nil, err
+	}
+	args := []string{}
+	if err := json.Unmarshal(bytes, &args); err != nil {
+		return nil, fmt.Errorf("json.Unmarshal(%v) failed: %v", bytes, err)
+	}
+	return args, nil
+}
+
 func (s *deviceService) Claim(ctx ipc.ServerContext) error {
 	return s.disp.claimDeviceManager(ctx)
 }
@@ -355,6 +383,10 @@
 		vlog.Errorf("app title mismatch. Got %q, expected %q.", envelope.Title, application.DeviceManagerTitle)
 		return verror2.Make(ErrAppTitleMismatch, ctx)
 	}
+	// Read and merge persistent args, if present.
+	if args, err := loadPersistentArgs(s.config.Root); err == nil {
+		envelope.Args = append(envelope.Args, args...)
+	}
 	if s.config.Envelope != nil && reflect.DeepEqual(envelope, s.config.Envelope) {
 		return verror2.Make(ErrUpdateNoOp, ctx)
 	}
diff --git a/services/mgmt/logreader/impl/reader.go b/services/mgmt/logreader/impl/reader.go
index e2cefb9..2d1908a 100644
--- a/services/mgmt/logreader/impl/reader.go
+++ b/services/mgmt/logreader/impl/reader.go
@@ -47,7 +47,7 @@
 		if f.ctx != nil {
 			select {
 			case <-f.ctx.Context().Done():
-				return 0, verror.Make(verror.Cancelled, nil)
+				return 0, verror.Make(verror.Canceled, nil)
 			default:
 			}
 		}
diff --git a/services/mgmt/profile/profiled/testdata/integration_test.go b/services/mgmt/profile/profiled/testdata/integration_test.go
index d331fd0..c20c8e3 100644
--- a/services/mgmt/profile/profiled/testdata/integration_test.go
+++ b/services/mgmt/profile/profiled/testdata/integration_test.go
@@ -1,7 +1,6 @@
 package integration_test
 
 import (
-	"io/ioutil"
 	"os"
 	"strings"
 	"syscall"
@@ -9,14 +8,12 @@
 
 	"v.io/core/veyron/lib/modules"
 	"v.io/core/veyron/lib/testutil/integration"
-	"v.io/core/veyron/lib/testutil/security"
 	_ "v.io/core/veyron/profiles"
 	"v.io/core/veyron2/naming"
 )
 
-func profileCommandOutput(t *testing.T, env integration.TestEnvironment, profileBin integration.TestBinary, expectError bool, command, credentials, name, suffix string) string {
+func profileCommandOutput(t *testing.T, env integration.TestEnvironment, profileBin integration.TestBinary, expectError bool, command, name, suffix string) string {
 	labelArgs := []string{
-		"-veyron.credentials=" + credentials,
 		"-veyron.namespace.root=" + env.RootMT(),
 		command, naming.Join(name, suffix),
 	}
@@ -32,18 +29,16 @@
 	return strings.TrimSpace(out)
 }
 
-func putProfile(t *testing.T, env integration.TestEnvironment, profileBin integration.TestBinary, credentials, name, suffix string) {
+func putProfile(t *testing.T, env integration.TestEnvironment, profileBin integration.TestBinary, name, suffix string) {
 	putArgs := []string{
-		"-veyron.credentials=" + credentials,
 		"-veyron.namespace.root=" + env.RootMT(),
 		"put", naming.Join(name, suffix),
 	}
 	profileBin.Start(putArgs...).WaitOrDie(os.Stdout, os.Stderr)
 }
 
-func removeProfile(t *testing.T, env integration.TestEnvironment, profileBin integration.TestBinary, credentials, name, suffix string) {
+func removeProfile(t *testing.T, env integration.TestEnvironment, profileBin integration.TestBinary, name, suffix string) {
 	removeArgs := []string{
-		"-veyron.credentials=" + credentials,
 		"-veyron.namespace.root=" + env.RootMT(),
 		"remove", naming.Join(name, suffix),
 	}
@@ -58,23 +53,12 @@
 	env := integration.NewTestEnvironment(t)
 	defer env.Cleanup()
 
-	// Generate credentials.
-	serverCred, serverPrin := security.NewCredentials("server")
-	defer os.RemoveAll(serverCred)
-	clientCred, _ := security.ForkCredentials(serverPrin, "client")
-	defer os.RemoveAll(clientCred)
-
 	// Start the profile repository.
 	profileRepoName := "test-profile-repo"
-	profileRepoStore, err := ioutil.TempDir("", "")
-	if err != nil {
-		t.Fatalf("TempDir() failed: %v", err)
-	}
-	defer os.RemoveAll(profileRepoStore)
+	profileRepoStore := env.TempDir()
 	args := []string{
 		"-name=" + profileRepoName, "-store=" + profileRepoStore,
 		"-veyron.tcp.address=127.0.0.1:0",
-		"-veyron.credentials=" + serverCred,
 		"-veyron.namespace.root=" + env.RootMT(),
 	}
 	serverBin := env.BuildGoPkg("v.io/core/veyron/services/mgmt/profile/profiled")
@@ -85,34 +69,34 @@
 
 	// Create a profile.
 	const profile = "test-profile"
-	putProfile(t, env, clientBin, clientCred, profileRepoName, profile)
+	putProfile(t, env, clientBin, profileRepoName, profile)
 
 	// Retrieve the profile label and check it matches the
 	// expected label.
-	profileLabel := profileCommandOutput(t, env, clientBin, false, "label", clientCred, profileRepoName, profile)
+	profileLabel := profileCommandOutput(t, env, clientBin, false, "label", profileRepoName, profile)
 	if got, want := profileLabel, "example"; got != want {
 		t.Fatalf("unexpected output: got %v, want %v", got, want)
 	}
 
 	// Retrieve the profile description and check it matches the
 	// expected description.
-	profileDesc := profileCommandOutput(t, env, clientBin, false, "description", clientCred, profileRepoName, profile)
+	profileDesc := profileCommandOutput(t, env, clientBin, false, "description", profileRepoName, profile)
 	if got, want := profileDesc, "Example profile to test the profile manager implementation."; got != want {
 		t.Fatalf("unexpected output: got %v, want %v", got, want)
 	}
 
 	// Retrieve the profile specification and check it matches the
 	// expected specification.
-	profileSpec := profileCommandOutput(t, env, clientBin, false, "specification", clientCred, profileRepoName, profile)
+	profileSpec := profileCommandOutput(t, env, clientBin, false, "specification", profileRepoName, profile)
 	if got, want := profileSpec, `profile.Specification{Label:"example", Description:"Example profile to test the profile manager implementation.", Arch:"amd64", OS:"linux", Format:"ELF", Libraries:map[profile.Library]struct {}{profile.Library{Name:"foo", MajorVersion:"1", MinorVersion:"0"}:struct {}{}}}`; got != want {
 		t.Fatalf("unexpected output: got %v, want %v", got, want)
 	}
 
 	// Remove the profile.
-	removeProfile(t, env, clientBin, clientCred, profileRepoName, profile)
+	removeProfile(t, env, clientBin, profileRepoName, profile)
 
 	// Check that the profile no longer exists.
-	profileCommandOutput(t, env, clientBin, true, "label", clientCred, profileRepoName, profile)
-	profileCommandOutput(t, env, clientBin, true, "description", clientCred, profileRepoName, profile)
-	profileCommandOutput(t, env, clientBin, true, "specification", clientCred, profileRepoName, profile)
+	profileCommandOutput(t, env, clientBin, true, "label", profileRepoName, profile)
+	profileCommandOutput(t, env, clientBin, true, "description", profileRepoName, profile)
+	profileCommandOutput(t, env, clientBin, true, "specification", profileRepoName, profile)
 }
diff --git a/tools/debug/testdata/integration_test.go b/tools/debug/testdata/integration_test.go
index 8216925..ed17bcc 100644
--- a/tools/debug/testdata/integration_test.go
+++ b/tools/debug/testdata/integration_test.go
@@ -185,9 +185,9 @@
 	}
 	traceId := fields[2]
 
-	// Do a sanity check on the trace ID: it should be a 32-character hex ID.
-	if match, _ := regexp.MatchString("[0-9a-f]{32}", traceId); !match {
-		t.Fatalf("wanted a 32-character hex ID, got %s", traceId)
+	// Do a sanity check on the trace ID: it should be a 32-character hex ID prefixed with 0x
+	if match, _ := regexp.MatchString("0x[0-9a-f]{32}", traceId); !match {
+		t.Fatalf("wanted a 32-character hex ID prefixed with 0x, got %s", traceId)
 	}
 
 	// Do another traced read, this will generate a new trace entry.
diff --git a/tools/mgmt/test.sh b/tools/mgmt/test.sh
index 6441eb7..7a28463 100755
--- a/tools/mgmt/test.sh
+++ b/tools/mgmt/test.sh
@@ -113,6 +113,9 @@
   cd "${WORKDIR}"
   build
 
+  local -r APPLICATIOND_NAME="applicationd"
+  local -r DEVICED_APP_NAME="${APPLICATIOND_NAME}/deviced/test"
+
   BIN_STAGING_DIR=$(shell::tmp_dir)
   cp "${AGENTD_BIN}" "${SUIDHELPER_BIN}" "${INITHELPER_BIN}" "${DEVICEMANAGER_BIN}" "${BIN_STAGING_DIR}"
   shell_test::setup_server_test
@@ -123,9 +126,9 @@
   export VANADIUM_DEVICE_DIR="${DM_INSTALL_DIR}/dm"
 
   if [[ "${WITH_SUID}" == "--with_suid" ]]; then
-    "${DEVICE_SCRIPT}" install "${BIN_STAGING_DIR}" --veyron.tcp.address=127.0.0.1:0
+    "${DEVICE_SCRIPT}" install "${BIN_STAGING_DIR}" --origin="${DEVICED_APP_NAME}" -- --veyron.tcp.address=127.0.0.1:0
   else
-      "${DEVICE_SCRIPT}" install "${BIN_STAGING_DIR}" --single_user -- --veyron.tcp.address=127.0.0.1:0
+    "${DEVICE_SCRIPT}" install "${BIN_STAGING_DIR}" --single_user --origin="${DEVICED_APP_NAME}" -- --veyron.tcp.address=127.0.0.1:0
   fi
 
   "${VRUN}" "${DEVICE_SCRIPT}" start
@@ -147,6 +150,7 @@
     shell_test::fail "line ${LINENO}: store set failed"
 
   # Claim the device as "alice/myworkstation".
+  echo ">> Claiming the device manager"
   "${DEVICE_BIN}" claim "${DM_NAME}/device" myworkstation
 
   if [[ "${WITH_SUID}" == "--with_suid" ]]; then
@@ -156,8 +160,8 @@
   fi
 
   # Verify the device's default blessing is as expected.
-  shell_test::assert_eq "$("${DEBUG_BIN}" stats read "${DM_NAME}/__debug/stats/security/principal/blessingstore" | head -1 | sed -e 's/^.*Default blessings: '//)" \
-    "alice/myworkstation" "${LINENO}"
+  shell_test::assert_contains "$("${DEBUG_BIN}" stats read "${DM_NAME}/__debug/stats/security/principal/*/blessingstore" | head -1)" \
+    "Default blessings: alice/myworkstation" "${LINENO}"
 
   # Get the device's profile.
   local -r DEVICE_PROFILE=$("${DEVICE_BIN}" describe "${DM_NAME}/device" | sed -e 's/{Profiles:map\[\(.*\):{}]}/\1/')
@@ -172,6 +176,7 @@
   # Upload a binary to the binary server.  The binary we upload is binaryd
   # itself.
   local -r SAMPLE_APP_BIN_NAME="${BINARYD_NAME}/testapp"
+  echo ">> Uploading ${SAMPLE_APP_BIN_NAME}"
   "${BINARY_BIN}" upload "${SAMPLE_APP_BIN_NAME}" "${BINARYD_BIN}"
 
   # Verify that the binary we uploaded is shown by glob.
@@ -180,7 +185,6 @@
 
   # Start an application server under the blessing "alice/myworkstation/applicationd" so that
   # the device ("alice/myworkstation") can talk to it.
-  local -r APPLICATIOND_NAME="applicationd"
   shell_test::start_server "${VRUN}" --name=myworkstation/applicationd "${APPLICATIOND_BIN}" --name="${APPLICATIOND_NAME}" \
     --store="$(shell::tmp_dir)" --veyron.tcp.address=127.0.0.1:0 \
     || shell_test::fail "line ${LINENO} failed to start applicationd"
@@ -188,6 +192,7 @@
   # Upload an envelope for our test app.
   local -r SAMPLE_APP_NAME="${APPLICATIOND_NAME}/testapp/v0"
   local -r APP_PUBLISH_NAME="testbinaryd"
+  echo ">> Uploading ${SAMPLE_APP_NAME}"
   echo "{\"Title\":\"BINARYD\", \"Args\":[\"--name=${APP_PUBLISH_NAME}\", \"--root_dir=./binstore\", \"--veyron.tcp.address=127.0.0.1:0\"], \"Binary\":\"${SAMPLE_APP_BIN_NAME}\", \"Env\":[]}" > ./app.envelope && \
     "${APPLICATION_BIN}" put "${SAMPLE_APP_NAME}" "${DEVICE_PROFILE}" ./app.envelope && rm ./app.envelope
 
@@ -196,6 +201,7 @@
     "BINARYD" "${LINENO}"
 
   # Install the app on the device.
+  echo ">> Installing ${SAMPLE_APP_NAME}"
   local -r INSTALLATION_NAME=$("${DEVICE_BIN}" install "${DM_NAME}/apps" "${SAMPLE_APP_NAME}" | sed -e 's/Successfully installed: "//' | sed -e 's/"//')
 
   # Verify that the installation shows up when globbing the device manager.
@@ -203,6 +209,7 @@
     "${INSTALLATION_NAME}" "${LINENO}"
 
   # Start an instance of the app, granting it blessing extension myapp.
+  echo ">> Starting ${INSTALLATION_NAME}"
   local -r INSTANCE_NAME=$("${DEVICE_BIN}" start "${INSTALLATION_NAME}" myapp | sed -e 's/Successfully started: "//' | sed -e 's/"//')
   wait_for_mountentry "${NAMESPACE_BIN}" "5" "${APP_PUBLISH_NAME}"
 
@@ -212,16 +219,45 @@
   # TODO(rjkroege): Verify that the app is actually running as ${SUID_USER}
 
   # Verify the app's default blessing.
-  shell_test::assert_eq "$("${DEBUG_BIN}" stats read "${INSTANCE_NAME}/stats/security/principal/blessingstore" | head -1 | sed -e 's/^.*Default blessings: '//)" \
-    "alice/myapp/BINARYD" "${LINENO}"
+  shell_test::assert_contains "$("${DEBUG_BIN}" stats read "${INSTANCE_NAME}/stats/security/principal/*/blessingstore" | head -1)" \
+    "Default blessings: alice/myapp/BINARYD" "${LINENO}"
 
   # Stop the instance.
+  echo ">> Stopping ${INSTANCE_NAME}"
   "${DEVICE_BIN}" stop "${INSTANCE_NAME}"
 
   # Verify that logs, but not stats, show up when globbing the stopped instance.
   shell_test::assert_eq "$("${NAMESPACE_BIN}" glob "${INSTANCE_NAME}/stats/...")" "" "${LINENO}"
   shell_test::assert_ne "$("${NAMESPACE_BIN}" glob "${INSTANCE_NAME}/logs/...")" "" "${LINENO}"
 
+  # Upload a deviced binary.
+  local -r DEVICED_APP_BIN_NAME="${BINARYD_NAME}/deviced"
+  echo ">> Uploading ${DEVICEMANAGER_BIN}"
+  "${BINARY_BIN}" upload "${DEVICED_APP_BIN_NAME}" "${DEVICEMANAGER_BIN}"
+
+  # Upload a device manager envelope.
+  echo ">> Uploading ${DEVICED_APP_NAME}"
+  echo "{\"Title\":\"device manager\", \"Binary\":\"${DEVICED_APP_BIN_NAME}\"}" > ./deviced.envelope && \
+    "${APPLICATION_BIN}" put "${DEVICED_APP_NAME}" "${DEVICE_PROFILE}" ./deviced.envelope && rm ./deviced.envelope
+
+  # Update the device manager.
+  echo ">> Updating device manager"
+  "${DEVICE_BIN}" update "${DM_NAME}/device"
+  DM_EP=$(wait_for_mountentry "${NAMESPACE_BIN}" 5 "${DM_NAME}" "${DM_EP}")
+
+  # Verify that device manager is still published under the expected name
+  # (hostname).
+  shell_test::assert_ne "$("${NAMESPACE_BIN}" glob "${DM_NAME}")" "" "${LINENO}"
+
+  # Revert the device manager.
+  echo ">> Reverting device manager"
+  "${DEVICE_BIN}" revert "${DM_NAME}/device"
+  DM_EP=$(wait_for_mountentry "${NAMESPACE_BIN}" 5 "${DM_NAME}" "${DM_EP}")
+
+  # Verify that device manager is still published under the expected name
+  # (hostname).
+  shell_test::assert_ne "$("${NAMESPACE_BIN}" glob "${DM_NAME}")" "" "${LINENO}"
+
   # Restart the device manager.
   "${DEVICE_BIN}" suspend "${DM_NAME}/device"
   wait_for_mountentry "${NAMESPACE_BIN}" "5" "${DM_NAME}" "{DM_EP}"
diff --git a/tools/naming/simulator/driver.go b/tools/naming/simulator/driver.go
index 34c6e91..faa02f8 100644
--- a/tools/naming/simulator/driver.go
+++ b/tools/naming/simulator/driver.go
@@ -33,12 +33,14 @@
 
 var (
 	interactive bool
+	filename    string
 	handles     map[string]*cmdState
 	jsonDict    map[string]string
 )
 
 func init() {
 	flag.BoolVar(&interactive, "interactive", true, "set interactive/batch mode")
+	flag.StringVar(&filename, "file", "", "command file")
 	handles = make(map[string]*cmdState)
 	jsonDict = make(map[string]string)
 	flag.Usage = usage
@@ -101,6 +103,17 @@
 	var shutdown veyron2.Shutdown
 	ctx, shutdown = veyron2.Init()
 
+	input := os.Stdin
+	if len(filename) > 0 {
+		f, err := os.Open(filename)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "unexpected error: %s\n", err)
+			os.Exit(1)
+		}
+		input = f
+		interactive = false
+	}
+
 	// Subprocesses commands are run by fork/execing this binary
 	// so we must test to see if this instance is a subprocess or the
 	// the original command line instance.
@@ -122,7 +135,7 @@
 	}
 	defer shell.Cleanup(os.Stderr, os.Stderr)
 
-	scanner := bufio.NewScanner(os.Stdin)
+	scanner := bufio.NewScanner(input)
 	lineno := 1
 	prompt(lineno)
 	for scanner.Scan() {
diff --git a/tools/naming/simulator/test.sh b/tools/naming/simulator/test.sh
deleted file mode 100755
index ff515d2..0000000
--- a/tools/naming/simulator/test.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# Test the simulator command-line tool.
-
-source "$(go list -f {{.Dir}} v.io/core/shell/lib)/shell_test.sh"
-
-# Run the test under the security agent.
-shell_test::enable_agent "$@"
-
-readonly WORKDIR="${shell_test_WORK_DIR}"
-
-main() {
-  # Build binaries.
-  cd "${WORKDIR}"
-  PKG="v.io/core/veyron/tools/naming/simulator"
-  SIMULATOR_BIN="$(shell_test::build_go_binary ${PKG})"
-
-  local -r DIR=$(go list -f {{.Dir}} "${PKG}")
-  local file
-  for file in "${DIR}"/*.scr; do
-    echo "${file}"
-    "${VRUN}" "${SIMULATOR_BIN}" --interactive=false < "${file}" &> output || shell_test::fail "line ${LINENO}: failed for ${file}: $(cat output)"
-  done
-  shell_test::pass
-}
-
-main "$@"
diff --git a/tools/naming/simulator/ambiguity.scr b/tools/naming/simulator/testdata/ambiguity.scr
similarity index 100%
rename from tools/naming/simulator/ambiguity.scr
rename to tools/naming/simulator/testdata/ambiguity.scr
diff --git a/tools/naming/simulator/echo.scr b/tools/naming/simulator/testdata/echo.scr
similarity index 100%
rename from tools/naming/simulator/echo.scr
rename to tools/naming/simulator/testdata/echo.scr
diff --git a/tools/naming/simulator/testdata/integration_test.go b/tools/naming/simulator/testdata/integration_test.go
new file mode 100644
index 0000000..9cee3bf
--- /dev/null
+++ b/tools/naming/simulator/testdata/integration_test.go
@@ -0,0 +1,45 @@
+package testdata
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"regexp"
+	"testing"
+
+	"v.io/core/veyron/lib/modules"
+	"v.io/core/veyron/lib/testutil/integration"
+
+	_ "v.io/core/veyron/profiles/static"
+)
+
+func TestHelperProcess(t *testing.T) {
+	modules.DispatchInTest()
+}
+
+func TestSimulator(t *testing.T) {
+	env := integration.NewTestEnvironment(t)
+	defer env.Cleanup()
+	binary := env.BuildGoPkg("v.io/core/veyron/tools/naming/simulator")
+	files, err := ioutil.ReadDir(".")
+	if err != nil {
+		t.Fatal(err)
+	}
+	scripts := []string{}
+	re := regexp.MustCompile(`.*\.scr`)
+	for _, f := range files {
+		if !f.IsDir() && re.MatchString(f.Name()) {
+			scripts = append(scripts, f.Name())
+		}
+	}
+	for _, script := range scripts {
+		invocation := binary.Start("--file", script)
+		output, errorOutput := invocation.Output(), invocation.ErrorOutput()
+		if err := invocation.Wait(nil, nil); err != nil {
+			fmt.Fprintf(os.Stderr, "Script %v failed\n", script)
+			fmt.Fprintln(os.Stderr, output)
+			fmt.Fprintln(os.Stderr, errorOutput)
+			t.Fatal(err)
+		}
+	}
+}
diff --git a/tools/naming/simulator/json_example.scr b/tools/naming/simulator/testdata/json_example.scr
similarity index 100%
rename from tools/naming/simulator/json_example.scr
rename to tools/naming/simulator/testdata/json_example.scr
diff --git a/tools/naming/simulator/mt_complex.scr b/tools/naming/simulator/testdata/mt_complex.scr
similarity index 100%
rename from tools/naming/simulator/mt_complex.scr
rename to tools/naming/simulator/testdata/mt_complex.scr
diff --git a/tools/naming/simulator/mt_simple.scr b/tools/naming/simulator/testdata/mt_simple.scr
similarity index 100%
rename from tools/naming/simulator/mt_simple.scr
rename to tools/naming/simulator/testdata/mt_simple.scr
diff --git a/tools/naming/simulator/proxy.scr b/tools/naming/simulator/testdata/proxy.scr
similarity index 100%
rename from tools/naming/simulator/proxy.scr
rename to tools/naming/simulator/testdata/proxy.scr
diff --git a/tools/naming/simulator/public_echo.scr b/tools/naming/simulator/testdata/public_echo.scr
similarity index 100%
rename from tools/naming/simulator/public_echo.scr
rename to tools/naming/simulator/testdata/public_echo.scr
diff --git a/tools/uniqueid/doc.go b/tools/uniqueid/doc.go
new file mode 100644
index 0000000..5fdbcab
--- /dev/null
+++ b/tools/uniqueid/doc.go
@@ -0,0 +1,57 @@
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+
+/*
+The uniqueid tool generates unique ids. It also has an option of automatically
+substituting unique ids with placeholders in files.
+
+Usage:
+   uniqueid <command>
+
+The uniqueid commands are:
+   generate    Generates UniqueIds
+   inject      Injects UniqueIds into existing files
+   help        Display help for commands or topics
+Run "uniqueid help [command]" for command usage.
+
+Uniqueid Generate
+
+Generates unique ids and outputs them to standard out.
+
+Usage:
+   uniqueid generate
+
+Uniqueid Inject
+
+Injects UniqueIds into existing files. Strings of the form "$UNIQUEID$" will be
+replaced with generated ids.
+
+Usage:
+   uniqueid inject <filenames>
+
+<filenames> List of files to inject unique ids into
+
+Uniqueid Help
+
+Help with no args displays the usage of the parent command.
+
+Help with args displays the usage of the specified sub-command or help topic.
+
+"help ..." recursively displays help for all commands and topics.
+
+The output is formatted to a target width in runes.  The target width is
+determined by checking the environment variable CMDLINE_WIDTH, falling back on
+the terminal width from the OS, falling back on 80 chars.  By setting
+CMDLINE_WIDTH=x, if x > 0 the width is x, if x < 0 the width is unlimited, and
+if x == 0 or is unset one of the fallbacks is used.
+
+Usage:
+   uniqueid help [flags] [command/topic ...]
+
+[command/topic ...] optionally identifies a specific sub-command or help topic.
+
+The uniqueid help flags are:
+ -style=text
+   The formatting style for help output, either "text" or "godoc".
+*/
+package main
diff --git a/tools/uniqueid/main.go b/tools/uniqueid/main.go
new file mode 100644
index 0000000..ce003d0
--- /dev/null
+++ b/tools/uniqueid/main.go
@@ -0,0 +1,110 @@
+// The following enables go generate to generate the doc.go file.
+//go:generate go run $VANADIUM_ROOT/release/go/src/v.io/lib/cmdline/testdata/gendoc.go .
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"regexp"
+
+	"v.io/core/veyron2/uniqueid"
+	"v.io/lib/cmdline"
+)
+
+func main() {
+	os.Exit(cmdUniqueId.Main())
+}
+
+var cmdUniqueId = &cmdline.Command{
+	Name:  "uniqueid",
+	Short: "Generates UniqueIds.",
+	Long: `
+The uniqueid tool generates unique ids.
+It also has an option of automatically substituting unique ids with placeholders in files.
+`,
+	Children: []*cmdline.Command{cmdGenerate, cmdInject},
+	Topics:   []cmdline.Topic{},
+}
+
+var cmdGenerate = &cmdline.Command{
+	Run:   runGenerate,
+	Name:  "generate",
+	Short: "Generates UniqueIds",
+	Long: `
+Generates unique ids and outputs them to standard out.
+`,
+	ArgsName: "",
+	ArgsLong: "",
+}
+
+var cmdInject = &cmdline.Command{
+	Run:   runInject,
+	Name:  "inject",
+	Short: "Injects UniqueIds into existing files",
+	Long: `
+Injects UniqueIds into existing files.
+Strings of the form "$UNIQUEID$" will be replaced with generated ids.
+`,
+	ArgsName: "<filenames>",
+	ArgsLong: "<filenames> List of files to inject unique ids into",
+}
+
+// runGenerate implements the generate command which outputs generated ids to stdout.
+func runGenerate(command *cmdline.Command, args []string) error {
+	if len(args) > 0 {
+		return command.UsageErrorf("expected 0 args, got %d", len(args))
+	}
+	id, err := uniqueid.Random()
+	if err != nil {
+		return err
+	}
+	fmt.Printf("%#v", id)
+	return nil
+}
+
+// runInject implements the inject command which replaces $UNIQUEID$ strings with generated ids.
+func runInject(command *cmdline.Command, args []string) error {
+	if len(args) == 0 {
+		return command.UsageErrorf("expected at least one file arg, got 0")
+	}
+	for _, arg := range args {
+		if err := injectIntoFile(arg); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// injectIntoFile replaces $UNIQUEID$ strings when they exist in the specified file.
+func injectIntoFile(filename string) error {
+	inbytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return err
+	}
+
+	// Replace $UNIQUEID$ with generated ids.
+	re, err := regexp.Compile("[$]UNIQUEID")
+	if err != nil {
+		return err
+	}
+	replaced := re.ReplaceAllFunc(inbytes, func(match []byte) []byte {
+		id, randErr := uniqueid.Random()
+		if randErr != nil {
+			err = randErr
+		}
+		return []byte(fmt.Sprintf("%#v", id))
+	})
+	if err != nil {
+		return err
+	}
+
+	// If the file with injections is different, write it to disk.
+	if !bytes.Equal(inbytes, replaced) {
+		fmt.Printf("Updated: %s\n", filename)
+		return ioutil.WriteFile(filename, replaced, 0)
+	}
+	return nil
+}