Merge "services/device/mgmt_v23_test: Use "device publish" and check publisher blessings"
diff --git a/lib/mgmt/model.go b/lib/mgmt/model.go
index e1f3032..1aa425f 100644
--- a/lib/mgmt/model.go
+++ b/lib/mgmt/model.go
@@ -14,4 +14,6 @@
 	ParentBlessingConfigKey        = "MGMT_PARENT_BLESSING_PEER_PATTERN"
 	SecurityAgentEndpointConfigKey = "MGMT_SECURITY_AGENT_EP"
 	AppOriginConfigKey             = "MGMT_APP_ORIGIN"
+	PublisherBlessingPrefixesKey   = "MGMT_PUBLISHER_BLESSING_PREFIXES"
+	InstanceNameKey                = "MGMT_INSTANCE_NAME"
 )
diff --git a/runtime/internal/flow/flowcontrol/flowcontrol.go b/runtime/internal/flow/flowcontrol/flowcontrol.go
new file mode 100644
index 0000000..1a47ada
--- /dev/null
+++ b/runtime/internal/flow/flowcontrol/flowcontrol.go
@@ -0,0 +1,339 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flowcontrol
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+
+	"v.io/v23/context"
+	"v.io/v23/verror"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/flow/flowcontrol"
+
+var ErrConcurrentRun = verror.Register(
+	verror.ID(pkgPath+".ErrConcurrentRun"),
+	verror.NoRetry, "Run called concurrently.")
+var ErrWrongFlowController = verror.Register(
+	verror.ID(pkgPath+".ErrWrongFlowController"),
+	verror.NoRetry, "Release called for worker from different flow controller.")
+
+// Runners are called by Workers.  For a given flow controller
+// only one Runner will be running at a time.  tokens specifies
+// the number of tokens available for this call.  Implementors
+// should return the number of tokens used, whether they are done
+// with all their work, and any error encountered.
+// Runners will be called repeatedly within a single Run call until
+// either err != nil or done is true.
+type Runner func(tokens int) (used int, done bool, err error)
+
+type counterState struct {
+	// TODO(mattr): Add deficit if we allow multi-slice writes.
+	borrowed     int  // Number of tokens borrowed from the shared pool.
+	released     int  // Number of tokens available via our flow control counters.
+	everReleased bool // True if tokens have ever been released to this worker.
+}
+
+type state int
+
+const (
+	idle = state(iota)
+	running
+	active
+)
+
+// Worker represents a single flowcontrolled worker.
+// Workers keep track of flow control counters to ensure
+// producers do not overwhelm consumers.  Only one Worker
+// will be executing at a time.
+type Worker struct {
+	fc       *FlowController
+	priority int
+	work     chan struct{}
+
+	// These variables are protected by fc.mu.
+	counters   *counterState // State related to the flow control counters.
+	state      state
+	next, prev *Worker // Used as a list when in an active queue.
+}
+
+// Run runs r potentially multiple times.
+// Only one worker's r function will run at a time for a given FlowController.
+// A single worker's Run function should not be called concurrently from multiple
+// goroutines.
+func (w *Worker) Run(ctx *context.T, r Runner) (err error) {
+	w.fc.mu.Lock()
+	if w.state != idle {
+		w.fc.mu.Unlock()
+		return verror.New(ErrConcurrentRun, ctx)
+	}
+
+	w.state = running
+	if w.readyLocked() {
+		w.fc.activateLocked(w)
+		w.state = active
+	}
+
+	for {
+		next := w.fc.nextWorkerLocked()
+		for w.fc.writing != w && err == nil {
+			w.fc.mu.Unlock()
+			if next != nil {
+				next.work <- struct{}{}
+			}
+			select {
+			case <-ctx.Done():
+				err = ctx.Err()
+			case <-w.work:
+			}
+			w.fc.mu.Lock()
+		}
+		if err != nil {
+			break
+		}
+
+		toWrite := w.fc.mtu
+		if w.counters != nil {
+			if !w.counters.everReleased {
+				toWrite = min(w.fc.shared, w.fc.mtu)
+				w.counters.released += toWrite
+				w.counters.borrowed += toWrite
+				w.fc.shared -= toWrite
+			} else {
+				toWrite = min(w.counters.released, w.fc.mtu)
+			}
+		}
+
+		w.fc.mu.Unlock()
+		var written int
+		var done bool
+		written, done, err = r(toWrite)
+		w.fc.mu.Lock()
+
+		if w.counters != nil {
+			w.counters.released -= written
+			if w.counters.released > 0 && w.counters.borrowed > 0 {
+				toReturn := min(w.counters.released, w.counters.borrowed)
+				w.counters.borrowed -= toReturn
+				w.counters.released -= toReturn
+				w.fc.shared += toReturn
+			}
+		}
+
+		w.fc.writing = nil
+		if err != nil || done {
+			break
+		}
+		if !w.readyLocked() {
+			w.fc.deactivateLocked(w)
+			w.state = running
+		}
+	}
+
+	w.state = idle
+	w.fc.deactivateLocked(w)
+	next := w.fc.nextWorkerLocked()
+	w.fc.mu.Unlock()
+	if next != nil {
+		next.work <- struct{}{}
+	}
+	return err
+}
+
+func (w *Worker) releaseLocked(tokens int) {
+	if w.counters == nil {
+		return
+	}
+	w.counters.everReleased = true
+	if w.counters.borrowed > 0 {
+		n := min(w.counters.borrowed, tokens)
+		w.counters.borrowed -= n
+		w.fc.shared += n
+		tokens -= n
+	}
+	w.counters.released += tokens
+	if w.state == running && w.readyLocked() {
+		w.fc.activateLocked(w)
+	}
+}
+
+// Release releases tokens to this worker.
+// Workers will first repay any debts to the flow controllers shared pool
+// and use any surplus in subsequent calls to Run.
+func (w *Worker) Release(tokens int) {
+	w.fc.mu.Lock()
+	w.releaseLocked(tokens)
+	next := w.fc.nextWorkerLocked()
+	w.fc.mu.Unlock()
+	if next != nil {
+		next.work <- struct{}{}
+	}
+}
+
+func (w *Worker) readyLocked() bool {
+	if w.counters == nil {
+		return true
+	}
+	return w.counters.released > 0 || (!w.counters.everReleased && w.fc.shared > 0)
+}
+
+// FlowController manages multiple Workers to ensure only one runs at a time.
+// The workers also obey counters so that producers don't overwhelm consumers.
+type FlowController struct {
+	mtu int
+
+	mu      sync.Mutex
+	shared  int
+	active  []*Worker
+	writing *Worker
+}
+
+// New creates a new FlowController.  Shared is the number of shared tokens
+// that flows can borrow from before they receive their first Release.
+// Mtu is the maximum number of tokens to be consumed by a single Runner
+// invocation.
+func New(shared, mtu int) *FlowController {
+	return &FlowController{shared: shared, mtu: mtu}
+}
+
+// NewWorker creates a new worker.  Workers keep track of token counters
+// for a flow controlled process.  The order that workers
+// execute is controlled by priority.  Higher priority
+// workers that are ready will run before any lower priority
+// workers.
+func (fc *FlowController) NewWorker(priority int) *Worker {
+	w := &Worker{
+		fc:       fc,
+		priority: priority,
+		work:     make(chan struct{}),
+		counters: &counterState{},
+	}
+	w.next, w.prev = w, w
+	return w
+}
+
+type Release struct {
+	Worker *Worker
+	Tokens int
+}
+
+// Release releases to many Workers atomically.  It is conceptually
+// the same as calling release on each worker indepedently.
+func (fc *FlowController) Release(to []Release) error {
+	fc.mu.Lock()
+	for _, t := range to {
+		if t.Worker.fc != fc {
+			return verror.New(ErrWrongFlowController, nil)
+		}
+		t.Worker.releaseLocked(t.Tokens)
+	}
+	next := fc.nextWorkerLocked()
+	fc.mu.Unlock()
+	if next != nil {
+		next.work <- struct{}{}
+	}
+	return nil
+}
+
+// Run runs the given runner on a non-flow controlled Worker.  This
+// worker does not wait for any flow control tokens and is limited
+// only by the MTU.
+func (fc *FlowController) Run(ctx *context.T, p int, r Runner) error {
+	w := &Worker{
+		fc:       fc,
+		priority: p,
+		work:     make(chan struct{}),
+	}
+	w.next, w.prev = w, w
+	return w.Run(ctx, r)
+}
+
+func (fc *FlowController) nextWorkerLocked() *Worker {
+	if fc.writing == nil {
+		for p, head := range fc.active {
+			if head != nil {
+				fc.active[p] = head.next
+				fc.writing = head
+				return head
+			}
+		}
+	}
+	return nil
+}
+
+func (fc *FlowController) activateLocked(w *Worker) {
+	if w.priority >= len(fc.active) {
+		newActive := make([]*Worker, int(w.priority)+1)
+		copy(newActive, fc.active)
+		fc.active = newActive
+	}
+	head := fc.active[w.priority]
+	if head == nil {
+		fc.active[w.priority] = w
+	} else {
+		w.prev, w.next = head.prev, head
+		w.prev.next, w.next.prev = w, w
+	}
+}
+
+func (fc *FlowController) deactivateLocked(w *Worker) {
+	if head := fc.active[w.priority]; head == w {
+		if w.next == w {
+			fc.active[w.priority] = nil
+		} else {
+			fc.active[w.priority] = w.next
+		}
+	}
+	w.next.prev, w.prev.next = w.prev, w.next
+	w.next, w.prev = w, w
+}
+
+func (fc *FlowController) numActive() int {
+	n := 0
+	fc.mu.Lock()
+	for _, head := range fc.active {
+		if head != nil {
+			n++
+			for cur := head.next; cur != head; cur = cur.next {
+				n++
+			}
+		}
+	}
+	fc.mu.Unlock()
+	return n
+}
+
+// String writes a string representation of the flow controller.
+// This can be helpful in debugging.
+func (fc *FlowController) String() string {
+	buf := &bytes.Buffer{}
+	fmt.Fprintf(buf, "FlowController %p: \n", fc)
+
+	fc.mu.Lock()
+	fmt.Fprintf(buf, "writing: %p\n", fc.writing)
+	fmt.Fprintln(buf, "active:")
+	for p, head := range fc.active {
+		fmt.Fprintf(buf, "  %v: %p", p, head)
+		if head != nil {
+			for cur := head.next; cur != head; cur = cur.next {
+				fmt.Fprintf(buf, " %p", cur)
+			}
+		}
+		fmt.Fprintln(buf, "")
+	}
+	fc.mu.Unlock()
+	return buf.String()
+}
+
+func min(head int, rest ...int) int {
+	for _, r := range rest {
+		if r < head {
+			head = r
+		}
+	}
+	return head
+}
diff --git a/runtime/internal/flow/flowcontrol/flowcontrol_test.go b/runtime/internal/flow/flowcontrol/flowcontrol_test.go
new file mode 100644
index 0000000..7388b8f
--- /dev/null
+++ b/runtime/internal/flow/flowcontrol/flowcontrol_test.go
@@ -0,0 +1,293 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flowcontrol
+
+import (
+	"bytes"
+	"crypto/rand"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"testing"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/v23/verror"
+	"v.io/x/ref/test"
+)
+
+var testdata = make([]byte, 1<<20)
+
+func init() {
+	test.Init()
+	_, err := io.ReadFull(rand.Reader, testdata)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func TestFlowControl(t *testing.T) {
+	const (
+		workers  = 10
+		messages = 10
+	)
+
+	msgs := make(map[int][]byte)
+	fc := New(256, 64)
+
+	ctx, cancel := context.RootContext()
+	defer cancel()
+
+	var wg sync.WaitGroup
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		go func(idx int) {
+			el := fc.NewWorker(0)
+			go el.Release(messages * 5) // Try to make races happen
+			j := 0
+			el.Run(ctx, func(tokens int) (used int, done bool, err error) {
+				msgs[idx] = append(msgs[idx], []byte(fmt.Sprintf("%d-%d,", idx, j))...)
+				j++
+				return 3, j >= messages, nil
+			})
+			wg.Done()
+		}(i)
+	}
+	wg.Wait()
+
+	for i := 0; i < workers; i++ {
+		buf := &bytes.Buffer{}
+		for j := 0; j < messages; j++ {
+			fmt.Fprintf(buf, "%d-%d,", i, j)
+		}
+		if want, got := buf.String(), string(msgs[i]); want != got {
+			t.Errorf("Got %s, want %s for %d", got, want, i)
+		}
+	}
+}
+
+func expect(t *testing.T, work chan interface{}, values ...interface{}) {
+	for i, w := range values {
+		if got := <-work; got != w {
+			t.Errorf("expected %p in pos %d got %p", w, i, got)
+		}
+	}
+}
+
+func TestOrdering(t *testing.T) {
+	const mtu = 10
+
+	ctx, cancel := context.RootContext()
+	defer cancel()
+	fc := New(0, mtu)
+
+	work := make(chan interface{})
+	worker := func(p int) *Worker {
+		w := fc.NewWorker(p)
+		go w.Run(ctx, func(t int) (int, bool, error) {
+			work <- w
+			return t, false, nil
+		})
+		w.Release(mtu)
+		<-work
+		return w
+	}
+
+	w0 := worker(0)
+	w1a := worker(1)
+	w1b := worker(1)
+	w1c := worker(1)
+	w2 := worker(2)
+
+	// Release to all the flows at once and ensure the writes
+	// happen in the correct order.
+	fc.Release([]Release{{w0, 2 * mtu}, {w1a, 2 * mtu}, {w1b, 3 * mtu}, {w1c, 0}, {w2, mtu}})
+	expect(t, work, w0, w0, w1a, w1b, w1a, w1b, w1b, w2)
+}
+
+func TestSharedCounters(t *testing.T) {
+	const (
+		mtu    = 10
+		shared = 2 * mtu
+	)
+
+	ctx, cancel := context.RootContext()
+	defer cancel()
+
+	fc := New(shared, mtu)
+
+	work := make(chan interface{})
+
+	worker := func(p int) *Worker {
+		w := fc.NewWorker(p)
+		go w.Run(ctx, func(t int) (int, bool, error) {
+			work <- w
+			return t, false, nil
+		})
+		return w
+	}
+
+	// w0 should run twice on shared counters.
+	w0 := worker(0)
+	expect(t, work, w0, w0)
+
+	w1 := worker(1)
+	// Now Release to w0 which shouldn't allow it to run since it's just repaying, but
+	// should allow w1 to run on the returned shared counters.
+	w0.Release(2 * mtu)
+	expect(t, work, w1, w1)
+
+	// Releasing again will allow w0 to run.
+	w0.Release(mtu)
+	expect(t, work, w0)
+}
+
+func TestConcurrentRun(t *testing.T) {
+	ctx, cancel := context.RootContext()
+	defer cancel()
+	const mtu = 10
+	fc := New(mtu, mtu)
+
+	ready, wait := make(chan struct{}), make(chan struct{})
+	w := fc.NewWorker(0)
+	go w.Run(ctx, func(t int) (int, bool, error) {
+		close(ready)
+		<-wait
+		return t, true, nil
+	})
+	<-ready
+	if err := w.Run(ctx, nil); verror.ErrorID(err) != ErrConcurrentRun.ID {
+		t.Errorf("expected concurrent run error got: %v", err)
+	}
+	close(wait)
+}
+
+func TestNonFlowControlledRun(t *testing.T) {
+	ctx, cancel := context.RootContext()
+	defer cancel()
+	const mtu = 10
+	fc := New(0, mtu)
+
+	work := make(chan interface{})
+	ready, wait := make(chan struct{}), make(chan struct{})
+	// Start one worker running
+	go fc.Run(ctx, 0, func(t int) (int, bool, error) {
+		close(ready)
+		<-wait
+		return t, true, nil
+	})
+	<-ready
+	// Now queue up sever workers and make sure they execute in order.
+	go fc.Run(ctx, 2, func(t int) (int, bool, error) {
+		work <- "c"
+		return t, true, nil
+	})
+	go fc.Run(ctx, 1, func(t int) (int, bool, error) {
+		work <- "b"
+		return t, true, nil
+	})
+	go fc.Run(ctx, 0, func(t int) (int, bool, error) {
+		work <- "a"
+		return t, true, nil
+	})
+	for fc.numActive() < 4 {
+		time.Sleep(time.Millisecond)
+	}
+	close(wait)
+	expect(t, work, "a", "b", "c")
+}
+
+func newNullConn(mtu int) net.Conn {
+	ln, err := net.Listen("tcp", ":0")
+	if err != nil {
+		panic(err)
+	}
+	addr := ln.Addr()
+
+	go func() {
+		conn, err := ln.Accept()
+		if err != nil {
+			panic(err)
+		}
+		ln.Close()
+		buf := make([]byte, mtu)
+		for {
+			_, err := conn.Read(buf)
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				panic(err)
+			}
+		}
+		conn.Close()
+	}()
+
+	conn, err := net.Dial(addr.Network(), addr.String())
+	if err != nil {
+		panic(err)
+	}
+	return conn
+}
+
+func BenchmarkWithFlowControl(b *testing.B) {
+	const (
+		mtu     = 1 << 16
+		shared  = 1 << 20
+		workers = 100
+	)
+	ctx, cancel := context.RootContext()
+	defer cancel()
+	s := newNullConn(mtu)
+
+	for n := 0; n < b.N; n++ {
+		fc := New(shared, mtu)
+		var wg sync.WaitGroup
+		wg.Add(workers)
+		for i := 0; i < workers; i++ {
+			go func(idx int) {
+				w := fc.NewWorker(0)
+				w.Release(len(testdata))
+				t := testdata
+				err := w.Run(ctx, func(tokens int) (used int, done bool, err error) {
+					towrite := min(tokens, len(t))
+					written, err := s.Write(t[:min(tokens, len(t))])
+					t = t[written:]
+					return towrite, len(t) == 0, err
+				})
+				if err != nil {
+					panic(err)
+				}
+				wg.Done()
+			}(i)
+		}
+		wg.Wait()
+	}
+	if err := s.Close(); err != nil {
+		panic(err)
+	}
+}
+
+func BenchmarkWithoutFlowControl(b *testing.B) {
+	const (
+		workers = 100
+		mtu     = 1 << 16
+	)
+	s := newNullConn(mtu)
+	for n := 0; n < b.N; n++ {
+		for cursor := 0; cursor < len(testdata); cursor += mtu {
+			for i := 0; i < workers; i++ {
+				_, err := s.Write(testdata[cursor : cursor+mtu])
+				if err != nil {
+					panic(err)
+				}
+			}
+		}
+	}
+	if err := s.Close(); err != nil {
+		panic(err)
+	}
+}
diff --git a/services/application/applicationd/perms_test.go b/services/application/applicationd/perms_test.go
index c4446f9..207c0a2 100644
--- a/services/application/applicationd/perms_test.go
+++ b/services/application/applicationd/perms_test.go
@@ -39,8 +39,6 @@
 	publishName := args[0]
 	storedir := args[1]
 
-	v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
-
 	defer fmt.Fprintf(env.Stdout, "%v terminating\n", publishName)
 	defer ctx.VI(1).Infof("%v terminating", publishName)
 
@@ -50,7 +48,7 @@
 	}
 	server, err := xrpc.NewDispatchingServer(ctx, publishName, dispatcher)
 	if err != nil {
-		ctx.Fatalf("Serve(%v) failed: %v", publishName, err)
+		ctx.Fatalf("NewDispatchingServer(%v) failed: %v", publishName, err)
 	}
 	ctx.VI(1).Infof("applicationd name: %v", server.Status().Endpoints[0].Name())
 
@@ -63,17 +61,18 @@
 func TestApplicationUpdatePermissions(t *testing.T) {
 	ctx, shutdown := test.V23Init()
 	defer shutdown()
-	v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
 
-	// By default, all principals in this test will have blessings generated based
-	// on the username/machine running this process. Give them recognizable names
-	// ("root/self" etc.), so the Permissions can be set deterministically.
-	idp := testutil.NewIDProvider("root")
+	// V23Init sets the context up with a self-signed principal, whose
+	// blessing (test-blessing) will act as the root blessing for the test.
+	const rootBlessing = test.TestBlessing
+	idp := testutil.IDProviderFromPrincipal(v23.GetPrincipal(ctx))
+	// Call ourselves test-blessing/self, distinct from test-blessing/other
+	// which we'll give to the 'other' context.
 	if err := idp.Bless(v23.GetPrincipal(ctx), "self"); err != nil {
 		t.Fatal(err)
 	}
 
-	sh, deferFn := servicetest.CreateShellAndMountTable(t, ctx, v23.GetPrincipal(ctx))
+	sh, deferFn := servicetest.CreateShell(t, ctx, nil)
 	defer deferFn()
 
 	// setup mock up directory to put state in
@@ -122,29 +121,29 @@
 	}
 	expected := access.Permissions{
 		"Admin": access.AccessList{
-			In:    []security.BlessingPattern{"root/$", "root/self/$", "root/self/child"},
+			In:    []security.BlessingPattern{test.TestBlessing + "/$", rootBlessing + "/self/$", rootBlessing + "/self/child"},
 			NotIn: []string(nil)},
 		"Read": access.AccessList{
-			In:    []security.BlessingPattern{"root/$", "root/self/$", "root/self/child"},
+			In:    []security.BlessingPattern{rootBlessing + "/$", rootBlessing + "/self/$", rootBlessing + "/self/child"},
 			NotIn: []string(nil)},
 		"Write": access.AccessList{
-			In:    []security.BlessingPattern{"root/$", "root/self/$", "root/self/child"},
+			In:    []security.BlessingPattern{rootBlessing + "/$", rootBlessing + "/self/$", rootBlessing + "/self/child"},
 			NotIn: []string(nil)},
 		"Debug": access.AccessList{
-			In:    []security.BlessingPattern{"root/$", "root/self/$", "root/self/child"},
+			In:    []security.BlessingPattern{rootBlessing + "/$", rootBlessing + "/self/$", rootBlessing + "/self/child"},
 			NotIn: []string(nil)},
 		"Resolve": access.AccessList{
-			In:    []security.BlessingPattern{"root/$", "root/self/$", "root/self/child"},
+			In:    []security.BlessingPattern{rootBlessing + "/$", rootBlessing + "/self/$", rootBlessing + "/self/child"},
 			NotIn: []string(nil)}}
 	if got := perms; !reflect.DeepEqual(expected.Normalize(), got.Normalize()) {
-		t.Errorf("got %#v, exected %#v ", got, expected)
+		t.Errorf("got %#v, expected %#v ", got, expected)
 	}
 
 	ctx.VI(2).Infof("self attempting to give other permission to update application")
 	newPerms := make(access.Permissions)
 	for _, tag := range access.AllTypicalTags() {
-		newPerms.Add("root/self", string(tag))
-		newPerms.Add("root/other", string(tag))
+		newPerms.Add(rootBlessing+"/self", string(tag))
+		newPerms.Add(rootBlessing+"/other", string(tag))
 	}
 	if err := repostub.SetPermissions(ctx, newPerms, ""); err != nil {
 		t.Fatalf("SetPermissions failed: %v", err)
@@ -170,7 +169,7 @@
 		t.Fatalf("GetPermissions 2 should not have failed: %v", err)
 	}
 	perms["Admin"] = access.AccessList{
-		In:    []security.BlessingPattern{"root/other"},
+		In:    []security.BlessingPattern{rootBlessing + "/other"},
 		NotIn: []string{}}
 	if err = repostub.SetPermissions(otherCtx, perms, version); err != nil {
 		t.Fatalf("SetPermissions failed: %v", err)
@@ -186,19 +185,19 @@
 	}
 	expected = access.Permissions{
 		"Admin": access.AccessList{
-			In:    []security.BlessingPattern{"root/other"},
+			In:    []security.BlessingPattern{rootBlessing + "/other"},
 			NotIn: []string{}},
-		"Read": access.AccessList{In: []security.BlessingPattern{"root/other",
-			"root/self"},
+		"Read": access.AccessList{In: []security.BlessingPattern{rootBlessing + "/other",
+			rootBlessing + "/self"},
 			NotIn: []string{}},
-		"Write": access.AccessList{In: []security.BlessingPattern{"root/other",
-			"root/self"},
+		"Write": access.AccessList{In: []security.BlessingPattern{rootBlessing + "/other",
+			rootBlessing + "/self"},
 			NotIn: []string{}},
-		"Debug": access.AccessList{In: []security.BlessingPattern{"root/other",
-			"root/self"},
+		"Debug": access.AccessList{In: []security.BlessingPattern{rootBlessing + "/other",
+			rootBlessing + "/self"},
 			NotIn: []string{}},
-		"Resolve": access.AccessList{In: []security.BlessingPattern{"root/other",
-			"root/self"},
+		"Resolve": access.AccessList{In: []security.BlessingPattern{rootBlessing + "/other",
+			rootBlessing + "/self"},
 			NotIn: []string{}}}
 
 	if got := perms; !reflect.DeepEqual(expected.Normalize(), got.Normalize()) {
diff --git a/services/device/internal/impl/app_service.go b/services/device/internal/impl/app_service.go
index fabd4cd..4712114 100644
--- a/services/device/internal/impl/app_service.go
+++ b/services/device/internal/impl/app_service.go
@@ -127,6 +127,7 @@
 	"path"
 	"path/filepath"
 	"reflect"
+	"regexp"
 	"strconv"
 	"strings"
 	"text/template"
@@ -204,6 +205,8 @@
 	reap *reaper
 	// mtAddress is the address of the local mounttable.
 	mtAddress string
+	// appServiceName is a name by which the appService can be reached
+	appServiceName string
 }
 
 // appService implements the Device manager's Application interface.
@@ -337,6 +340,12 @@
 // title, and thereby being installed in the same app dir.  Do we want to
 // prevent that for the same user or across users?
 
+const (
+	appDirPrefix       = "app-"
+	installationPrefix = "installation-"
+	instancePrefix     = "instance-"
+)
+
 // applicationDirName generates a cryptographic hash of the application title,
 // to be used as a directory name for installations of the application with the
 // given title.
@@ -344,15 +353,15 @@
 	h := md5.New()
 	h.Write([]byte(title))
 	hash := strings.TrimRight(base64.URLEncoding.EncodeToString(h.Sum(nil)), "=")
-	return "app-" + hash
+	return appDirPrefix + hash
 }
 
 func installationDirName(installationID string) string {
-	return "installation-" + installationID
+	return installationPrefix + installationID
 }
 
 func instanceDirName(instanceID string) string {
-	return "instance-" + instanceID
+	return instancePrefix + instanceID
 }
 
 func mkdir(dir string) error {
@@ -837,6 +846,20 @@
 	return suidHelper.getAppCmd(&saArgs)
 }
 
+// instanceNameFromDir returns the instance name, given the instanceDir.
+func instanceNameFromDir(ctx *context.T, instanceDir string) (string, error) {
+	_, _, installation, instance := parseInstanceDir(instanceDir)
+	if installation == "" || instance == "" {
+		return "", fmt.Errorf("Unable to parse instanceDir %v", instanceDir)
+	}
+
+	env, err := loadEnvelopeForInstance(ctx, instanceDir)
+	if err != nil {
+		return "", err
+	}
+	return env.Title + "/" + installation + "/" + instance, nil
+}
+
 func (i *appRunner) startCmd(ctx *context.T, instanceDir string, cmd *exec.Cmd) (int, error) {
 	info, err := loadInstanceInfo(ctx, instanceDir)
 	if err != nil {
@@ -863,6 +886,14 @@
 	cfg.Set(mgmt.ProtocolConfigKey, "tcp")
 	cfg.Set(mgmt.AddressConfigKey, "127.0.0.1:0")
 	cfg.Set(mgmt.ParentBlessingConfigKey, info.DeviceManagerPeerPattern)
+	cfg.Set(mgmt.PublisherBlessingPrefixesKey,
+		v23.GetPrincipal(ctx).BlessingStore().Default().String())
+
+	if instanceName, err := instanceNameFromDir(ctx, instanceDir); err != nil {
+		return 0, err
+	} else {
+		cfg.Set(mgmt.InstanceNameKey, naming.Join(i.appServiceName, instanceName))
+	}
 
 	appPermsDir := filepath.Join(instanceDir, "debugacls", "data")
 	cfg.Set("v23.permissions.file", "runtime:"+appPermsDir)
@@ -990,7 +1021,7 @@
 // device manager then crashes between the reaper marking the app not
 // running and the go routine invoking this function having a chance to
 // complete.
-func (i *appRunner) restartAppIfNecessary(instanceDir string) {
+func (i *appRunner) restartAppIfNecessary(ctx *context.T, instanceDir string) {
 	if err := transitionInstance(instanceDir, device.InstanceStateNotRunning, device.InstanceStateLaunching); err != nil {
 		vlog.Error(err)
 		return
@@ -1007,7 +1038,7 @@
 		return
 	}
 
-	if err := i.run(nil, instanceDir); err != nil {
+	if err := i.run(ctx, instanceDir); err != nil {
 		vlog.Error(err)
 	}
 }
@@ -1040,6 +1071,17 @@
 	return instanceDir, nil
 }
 
+// parseInstanceDir is a partial inverse of instanceDir. It cannot retrieve the app name,
+// as that has been hashed so it returns an appDir instead.
+func parseInstanceDir(dir string) (prefix, appDir, installation, instance string) {
+	dirRE := regexp.MustCompile("(/.*)(/" + appDirPrefix + "[^/]+)/" + installationPrefix + "([^/]+)/" + "instances/" + instancePrefix + "([^/]+)$")
+	matches := dirRE.FindStringSubmatch(dir)
+	if len(matches) < 5 {
+		return "", "", "", ""
+	}
+	return matches[1], matches[2], matches[3], matches[4]
+}
+
 // instanceDir returns the path to the directory containing the app instance
 // referred to by the invoker's suffix, as well as the corresponding not-running
 // instance dir.  Returns an error if the suffix does not name an instance.
@@ -1322,7 +1364,7 @@
 
 func (i *appService) scanEnvelopes(ctx *context.T, tree *treeNode, appDir string) {
 	// Find all envelopes, extract installID.
-	envGlob := []string{i.config.Root, appDir, "installation-*", "*", "envelope"}
+	envGlob := []string{i.config.Root, appDir, installationPrefix + "*", "*", "envelope"}
 	envelopes, err := filepath.Glob(filepath.Join(envGlob...))
 	if err != nil {
 		vlog.Errorf("unexpected error: %v", err)
@@ -1339,7 +1381,7 @@
 			vlog.Errorf("unexpected number of path components: %q (%q)", elems, path)
 			continue
 		}
-		installID := strings.TrimPrefix(elems[1], "installation-")
+		installID := strings.TrimPrefix(elems[1], installationPrefix)
 		tree.find([]string{env.Title, installID}, true)
 	}
 	return
@@ -1357,7 +1399,7 @@
 	// Add the node corresponding to the installation itself.
 	tree.find(i.suffix[:2], true)
 	// Find all instances.
-	infoGlob := []string{installDir, "instances", "instance-*", "info"}
+	infoGlob := []string{installDir, "instances", instancePrefix + "*", "info"}
 	instances, err := filepath.Glob(filepath.Join(infoGlob...))
 	if err != nil {
 		vlog.Errorf("unexpected error: %v", err)
@@ -1374,14 +1416,12 @@
 	if _, err := loadInstanceInfo(ctx, instanceDir); err != nil {
 		return
 	}
-	relpath, _ := filepath.Rel(i.config.Root, instanceDir)
-	elems := strings.Split(relpath, string(filepath.Separator))
-	if len(elems) < 4 {
-		vlog.Errorf("unexpected number of path components: %q (%q)", elems, instanceDir)
+	rootDir, _, installID, instanceID := parseInstanceDir(instanceDir)
+	if installID == "" || instanceID == "" || filepath.Clean(i.config.Root) != filepath.Clean(rootDir) {
+		vlog.Errorf("failed to parse instanceDir %v (got: %v %v %v)", instanceDir, rootDir, installID, instanceID)
 		return
 	}
-	installID := strings.TrimPrefix(elems[1], "installation-")
-	instanceID := strings.TrimPrefix(elems[3], "instance-")
+
 	tree.find([]string{title, installID, instanceID, "logs"}, true)
 	if instanceStateIs(instanceDir, device.InstanceStateRunning) {
 		for _, obj := range []string{"pprof", "stats"} {
@@ -1394,7 +1434,7 @@
 	tree := newTreeNode()
 	switch len(i.suffix) {
 	case 0:
-		i.scanEnvelopes(ctx, tree, "app-*")
+		i.scanEnvelopes(ctx, tree, appDirPrefix+"*")
 	case 1:
 		appDir := applicationDirName(i.suffix[0])
 		i.scanEnvelopes(ctx, tree, appDir)
diff --git a/services/device/internal/impl/applife/app_life_test.go b/services/device/internal/impl/applife/app_life_test.go
index cde8c9a..0e42790 100644
--- a/services/device/internal/impl/applife/app_life_test.go
+++ b/services/device/internal/impl/applife/app_life_test.go
@@ -161,10 +161,32 @@
 	verifyAppPeerBlessings(t, ctx, pubCtx, instanceDebug, envelope)
 
 	// Wait until the app pings us that it's ready.
-	pingCh.VerifyPingArgs(t, utiltest.UserName(t), "flag-val-install", "env-val-envelope")
-
+	pingResult := pingCh.VerifyPingArgs(t, utiltest.UserName(t), "flag-val-install", "env-val-envelope")
 	v1EP1 := utiltest.Resolve(t, ctx, "appV1", 1, true)[0]
 
+	// Check that the instance name handed to the app looks plausible
+	nameRE := regexp.MustCompile(".*/apps/google naps/[^/]+/[^/]+$")
+	if nameRE.FindString(pingResult.InstanceName) == "" {
+		t.Fatalf("Unexpected instance name: %v", pingResult.InstanceName)
+	}
+
+	// There should be at least one publisher blessing prefix, and all prefixes should
+	// end in "/mydevice" because they are just the device manager's blessings
+	prefixes := strings.Split(pingResult.PubBlessingPrefixes, ",")
+	if len(prefixes) == 0 {
+		t.Fatalf("No publisher blessing prefixes found: %v", pingResult)
+	}
+	for _, p := range prefixes {
+		if !strings.HasSuffix(p, "/mydevice") {
+			t.Fatalf("publisher Blessing prefixes don't look right: %v", pingResult.PubBlessingPrefixes)
+		}
+	}
+
+	// We used a signed envelope, so there should have been some publisher blessings
+	if !hasPrefixMatches(pingResult.PubBlessingPrefixes, pingResult.DefaultPeerBlessings) {
+		t.Fatalf("Publisher Blessing Prefixes are not as expected: %v vs %v", pingResult.PubBlessingPrefixes, pingResult.DefaultPeerBlessings)
+	}
+
 	// Stop the app instance.
 	utiltest.KillApp(t, ctx, appID, instance1ID)
 	utiltest.VerifyState(t, ctx, device.InstanceStateNotRunning, appID, instance1ID)
@@ -265,10 +287,21 @@
 	}
 	// Resume the first instance and verify it's running v2 now.
 	utiltest.RunApp(t, ctx, appID, instance1ID)
-	pingCh.VerifyPingArgs(t, utiltest.UserName(t), "flag-val-install", "env-val-envelope")
+	pingResult = pingCh.VerifyPingArgs(t, utiltest.UserName(t), "flag-val-install", "env-val-envelope")
 	utiltest.Resolve(t, ctx, "appV1", 1, false)
 	utiltest.Resolve(t, ctx, "appV2", 1, false)
 
+	// Although v2 does not have a signed envelope, this was an update of v1, which did.
+	// This app's config still includes publisher blessing prefixes, and it should still
+	// have the publisher blessing it acquired earlier.
+	//
+	// TODO: This behavior is non-ideal. A reasonable requirement in future would be that
+	// the publisher blessing string remain unchanged on updates to an installation, just as the
+	// title is not allowed to change.
+	if !hasPrefixMatches(pingResult.PubBlessingPrefixes, pingResult.DefaultPeerBlessings) {
+		t.Fatalf("Publisher Blessing Prefixes are not as expected: %v vs %v", pingResult.PubBlessingPrefixes, pingResult.DefaultPeerBlessings)
+	}
+
 	// Reverting first instance fails since it's still running.
 	utiltest.RevertAppExpectError(t, ctx, appID+"/"+instance1ID, errors.ErrInvalidOperation.ID)
 	// Stop first instance and try again.
@@ -289,7 +322,12 @@
 		t.Fatalf("Instance version expected to be %v, got %v instead", v2, v)
 	}
 	// Wait until the app pings us that it's ready.
-	pingCh.VerifyPingArgs(t, utiltest.UserName(t), "flag-val-install", "env-val-envelope")
+	pingResult = pingCh.VerifyPingArgs(t, utiltest.UserName(t), "flag-val-install", "env-val-envelope")
+	// This app should not have publisher blessings. It was started from an installation
+	// that did not have a signed envelope.
+	if hasPrefixMatches(pingResult.PubBlessingPrefixes, pingResult.DefaultPeerBlessings) {
+		t.Fatalf("Publisher Blessing Prefixes are not as expected: %v vs %v", pingResult.PubBlessingPrefixes, pingResult.DefaultPeerBlessings)
+	}
 
 	utiltest.Resolve(t, ctx, "appV2", 1, true)
 
@@ -561,6 +599,22 @@
 	return pubCtx, nil
 }
 
+// findPrefixMatches takes a set of comma-separated prefixes, and a set of comma-separated
+// strings, and checks if any of the strings match any of the prefixes
+func hasPrefixMatches(prefixList, stringList string) bool {
+	prefixes := strings.Split(prefixList, ",")
+	inStrings := strings.Split(stringList, ",")
+
+	for _, s := range inStrings {
+		for _, p := range prefixes {
+			if strings.HasPrefix(s, p) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
 // verifyAppPeerBlessings checks the instanceDebug string to ensure that the app is running with
 // the expected blessings for peer "..." (i.e. security.AllPrincipals) .
 //
diff --git a/services/device/internal/impl/dispatcher.go b/services/device/internal/impl/dispatcher.go
index 2afb044..f016313 100644
--- a/services/device/internal/impl/dispatcher.go
+++ b/services/device/internal/impl/dispatcher.go
@@ -135,8 +135,9 @@
 		}
 	}
 	reap, err := newReaper(ctx, config.Root, &appRunner{
-		callback:      d.internal.callback,
-		securityAgent: d.internal.securityAgent,
+		callback:       d.internal.callback,
+		securityAgent:  d.internal.securityAgent,
+		appServiceName: naming.Join(d.config.Name, appsSuffix),
 	})
 	if err != nil {
 		return nil, verror.New(errCantCreateAppWatcher, ctx, err)
@@ -324,10 +325,11 @@
 			uat:        d.uat,
 			permsStore: d.permsStore,
 			runner: &appRunner{
-				reap:          d.internal.reap,
-				callback:      d.internal.callback,
-				securityAgent: d.internal.securityAgent,
-				mtAddress:     d.mtAddress,
+				reap:           d.internal.reap,
+				callback:       d.internal.callback,
+				securityAgent:  d.internal.securityAgent,
+				mtAddress:      d.mtAddress,
+				appServiceName: naming.Join(d.config.Name, appsSuffix),
 			},
 		})
 		appSpecificAuthorizer, err := newAppSpecificAuthorizer(auth, d.config, components[1:], d.permsStore)
diff --git a/services/device/internal/impl/instance_reaping.go b/services/device/internal/impl/instance_reaping.go
index 9bfb4e1..7f17aed 100644
--- a/services/device/internal/impl/instance_reaping.go
+++ b/services/device/internal/impl/instance_reaping.go
@@ -46,6 +46,7 @@
 type reaper struct {
 	c          chan pidInstanceDirPair
 	startState *appRunner
+	ctx        *context.T
 }
 
 var stashedPidMap map[string]int
@@ -63,13 +64,14 @@
 	r := &reaper{
 		c:          make(chan pidInstanceDirPair),
 		startState: startState,
+		ctx:        ctx,
 	}
 	r.startState.reap = r
 	go r.processStatusPolling(pidMap)
 
 	// Restart daemon jobs if they're not running (say because the machine crashed.)
 	for _, idir := range restartCandidates {
-		go r.startState.restartAppIfNecessary(idir)
+		go r.startState.restartAppIfNecessary(ctx, idir)
 	}
 	return r, nil
 }
@@ -99,7 +101,7 @@
 				// No such PID.
 				vlog.VI(2).Infof("processStatusPolling discovered pid %d ended", pid)
 				markNotRunning(idir)
-				go r.startState.restartAppIfNecessary(idir)
+				go r.startState.restartAppIfNecessary(r.ctx, idir)
 				delete(trackedPids, idir)
 			case nil, syscall.EPERM:
 				vlog.VI(2).Infof("processStatusPolling saw live pid: %d", pid)
diff --git a/services/device/internal/impl/utiltest/app.go b/services/device/internal/impl/utiltest/app.go
index 1f959b8..093796d 100644
--- a/services/device/internal/impl/utiltest/app.go
+++ b/services/device/internal/impl/utiltest/app.go
@@ -11,7 +11,6 @@
 	"io/ioutil"
 	"os"
 	"path/filepath"
-	"reflect"
 	"strings"
 	"testing"
 	"time"
@@ -22,6 +21,8 @@
 	"v.io/v23/rpc"
 	"v.io/v23/security"
 	"v.io/x/lib/vlog"
+	"v.io/x/ref/lib/exec"
+	"v.io/x/ref/lib/mgmt"
 	"v.io/x/ref/lib/signals"
 	"v.io/x/ref/lib/xrpc"
 	"v.io/x/ref/services/device/internal/suid"
@@ -63,8 +64,9 @@
 }
 
 type PingArgs struct {
-	Username, FlagValue, EnvValue string
-	Pid                           int
+	Username, FlagValue, EnvValue,
+	DefaultPeerBlessings, PubBlessingPrefixes, InstanceName string
+	Pid int
 }
 
 // ping makes a RPC from the App back to the invoking device manager
@@ -82,11 +84,20 @@
 	args := &PingArgs{
 		// TODO(rjkroege): Consider validating additional parameters
 		// from helper.
-		Username:  savedArgs.Uname,
-		FlagValue: flagValue,
-		EnvValue:  os.Getenv(TestEnvVarName),
-		Pid:       os.Getpid(),
+		Username:             savedArgs.Uname,
+		FlagValue:            flagValue,
+		EnvValue:             os.Getenv(TestEnvVarName),
+		Pid:                  os.Getpid(),
+		DefaultPeerBlessings: v23.GetPrincipal(ctx).BlessingStore().ForPeer("nonexistent").String(),
 	}
+
+	if handle, err := exec.GetChildHandle(); err != nil {
+		vlog.Fatalf("Couldn't get Child Handle: %v", err)
+	} else {
+		args.PubBlessingPrefixes, _ = handle.Config.Get(mgmt.PublisherBlessingPrefixesKey)
+		args.InstanceName, _ = handle.Config.Get(mgmt.InstanceNameKey)
+	}
+
 	client := v23.GetClient(ctx)
 	if call, err := client.StartCall(ctx, "pingserver", "Ping", []interface{}{args}); err != nil {
 		vlog.Fatalf("StartCall failed: %v", err)
@@ -179,17 +190,12 @@
 	return args
 }
 
-func (p PingServer) VerifyPingArgs(t *testing.T, username, flagValue, envValue string) {
+func (p PingServer) VerifyPingArgs(t *testing.T, username, flagValue, envValue string) PingArgs {
 	args := p.WaitForPingArgs(t)
-	wantArgs := PingArgs{
-		Username:  username,
-		FlagValue: flagValue,
-		EnvValue:  envValue,
-		Pid:       args.Pid, // We are not checking for a value of Pid
+	if args.Username != username || args.FlagValue != flagValue || args.EnvValue != envValue {
+		t.Fatalf(testutil.FormatLogLine(2, "got ping args %q, expected [username = %v, flag value = %v, env value = %v]", args, username, flagValue, envValue))
 	}
-	if !reflect.DeepEqual(args, wantArgs) {
-		t.Fatalf(testutil.FormatLogLine(2, "got ping args %q, expected %q", args, wantArgs))
-	}
+	return args // Useful for tests that want to check other values in the PingArgs result
 }
 
 // HangingApp is the same as App, except that it does not exit properly after
diff --git a/services/internal/servicetest/modules.go b/services/internal/servicetest/modules.go
index ee13fa5..56b4264 100644
--- a/services/internal/servicetest/modules.go
+++ b/services/internal/servicetest/modules.go
@@ -109,6 +109,35 @@
 	return sh, fn
 }
 
+// CreateShell builds a new modules shell.
+func CreateShell(t *testing.T, ctx *context.T, p security.Principal) (*modules.Shell, func()) {
+	sh, err := modules.NewShell(ctx, p, testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	opts := sh.DefaultStartOpts()
+	opts.ExpectTimeout = ExpectTimeout
+	sh.SetDefaultStartOpts(opts)
+	// The shell, will, by default share credentials with its children.
+	sh.ClearVar(ref.EnvCredentials)
+
+	fn := func() {
+		ctx.VI(1).Info("------------ CLEANUP ------------")
+		ctx.VI(1).Info("---------------------------------")
+		ctx.VI(1).Info("--(cleaning up shell)------------")
+		if err := sh.Cleanup(os.Stdout, os.Stderr); err != nil {
+			t.Errorf(testutil.FormatLogLine(2, "sh.Cleanup failed with %v", err))
+		}
+		ctx.VI(1).Info("--(done cleaning up shell)-------")
+	}
+	nsRoots := v23.GetNamespace(ctx).Roots()
+	if len(nsRoots) == 0 {
+		t.Fatalf("shell context has no namespace roots")
+	}
+	sh.SetVar(ref.EnvNamespacePrefix, nsRoots[0])
+	return sh, fn
+}
+
 // RunCommand runs a modules command.
 func RunCommand(t *testing.T, sh *modules.Shell, env []string, prog modules.Program, args ...string) modules.Handle {
 	h, err := sh.StartWithOpts(sh.DefaultStartOpts(), env, prog, args...)
diff --git a/test/testutil/security.go b/test/testutil/security.go
index a59c5f8..2c673dd 100644
--- a/test/testutil/security.go
+++ b/test/testutil/security.go
@@ -78,6 +78,12 @@
 	return &IDProvider{p, b}
 }
 
+// IDProviderFromPrincipal creates and IDProvider for the given principal.  It
+// will bless other principals with extensions of its default blessing.
+func IDProviderFromPrincipal(p security.Principal) *IDProvider {
+	return &IDProvider{p, p.BlessingStore().Default()}
+}
+
 // Bless sets up the provided principal to use blessings from idp as its
 // default. It is shorthand for:
 //    b, _ := idp.NewBlessings(who, extension, caveats...)
@@ -89,16 +95,7 @@
 	if err != nil {
 		return err
 	}
-	if err := who.BlessingStore().SetDefault(b); err != nil {
-		return err
-	}
-	if _, err := who.BlessingStore().Set(b, security.AllPrincipals); err != nil {
-		return err
-	}
-	if err := who.AddToRoots(b); err != nil {
-		return err
-	}
-	return nil
+	return vsecurity.SetDefaultBlessings(who, b)
 }
 
 // NewBlessings returns Blessings that extend the identity provider's blessing