profiles/internal: Add IsLeaf

When a server is using LeafDispatcher, it now publishes itself with a
flag that indicates that it is a leaf. This information can be used by
the namespace clients to avoid looking for objects beyond the root of
the server.

GlobResult now also includes the IsLeaf bit as well to indicate which
internal objects are leaves.

BUG=https://github.com/veyron/release-issues/issues/1495

MultiPart: 2/2
Change-Id: Idbae6ad68d19da1d41df7da68d39167e45dcfa10
diff --git a/cmd/mounttable/impl_test.go b/cmd/mounttable/impl_test.go
index ba378d3..bacf877 100644
--- a/cmd/mounttable/impl_test.go
+++ b/cmd/mounttable/impl_test.go
@@ -38,8 +38,8 @@
 func (s *server) Glob__(call rpc.ServerCall, pattern string) (<-chan naming.GlobReply, error) {
 	vlog.VI(2).Infof("Glob() was called. suffix=%v pattern=%q", s.suffix, pattern)
 	ch := make(chan naming.GlobReply, 2)
-	ch <- naming.GlobReplyEntry{naming.MountEntry{"name1", []naming.MountedServer{{"server1", nil, deadline(1)}}, false}}
-	ch <- naming.GlobReplyEntry{naming.MountEntry{"name2", []naming.MountedServer{{"server2", nil, deadline(2)}, {"server3", nil, deadline(3)}}, false}}
+	ch <- naming.GlobReplyEntry{naming.MountEntry{"name1", []naming.MountedServer{{"server1", nil, deadline(1)}}, false, false}}
+	ch <- naming.GlobReplyEntry{naming.MountEntry{"name2", []naming.MountedServer{{"server2", nil, deadline(2)}, {"server3", nil, deadline(3)}}, false, false}}
 	close(ch)
 	return ch, nil
 }
diff --git a/profiles/internal/lib/publisher/publisher.go b/profiles/internal/lib/publisher/publisher.go
index 25c9419..0d60f03 100644
--- a/profiles/internal/lib/publisher/publisher.go
+++ b/profiles/internal/lib/publisher/publisher.go
@@ -19,11 +19,11 @@
 // Publisher manages the publishing of servers in mounttable.
 type Publisher interface {
 	// AddServer adds a new server to be mounted.
-	AddServer(server string, ServesMountTable bool)
+	AddServer(server string)
 	// RemoveServer removes a server from the list of mounts.
 	RemoveServer(server string)
 	// AddName adds a new name for all servers to be mounted as.
-	AddName(name string)
+	AddName(name string, ServesMountTable bool, IsLeaf bool)
 	// RemoveName removes a name.
 	RemoveName(name string)
 	// Status returns a snapshot of the publisher's current state.
@@ -54,7 +54,6 @@
 
 type addServerCmd struct {
 	server string        // server to add
-	mt     bool          // true if server serves a mount table
 	done   chan struct{} // closed when the cmd is done
 }
 
@@ -65,6 +64,8 @@
 
 type addNameCmd struct {
 	name string        // name to add
+	mt   bool          // true if server serves a mount table
+	leaf bool          // true if server is a leaf
 	done chan struct{} // closed when the cmd is done
 }
 
@@ -98,9 +99,9 @@
 	}
 }
 
-func (p *publisher) AddServer(server string, mt bool) {
+func (p *publisher) AddServer(server string) {
 	done := make(chan struct{})
-	if p.sendCmd(addServerCmd{server, mt, done}) {
+	if p.sendCmd(addServerCmd{server, done}) {
 		<-done
 	}
 }
@@ -112,9 +113,9 @@
 	}
 }
 
-func (p *publisher) AddName(name string) {
+func (p *publisher) AddName(name string, mt bool, leaf bool) {
 	done := make(chan struct{})
-	if p.sendCmd(addNameCmd{name, done}) {
+	if p.sendCmd(addNameCmd{name, mt, leaf, done}) {
 		<-done
 	}
 }
@@ -175,13 +176,13 @@
 				vlog.VI(2).Info("rpc pub: exit runLoop")
 				return
 			case addServerCmd:
-				state.addServer(tcmd.server, tcmd.mt)
+				state.addServer(tcmd.server)
 				close(tcmd.done)
 			case removeServerCmd:
 				state.removeServer(tcmd.server)
 				close(tcmd.done)
 			case addNameCmd:
-				state.addName(tcmd.name)
+				state.addName(tcmd.name, tcmd.mt, tcmd.leaf)
 				close(tcmd.done)
 			case removeNameCmd:
 				state.removeName(tcmd.name)
@@ -210,20 +211,25 @@
 	ctx      *context.T
 	ns       ns.Namespace
 	period   time.Duration
-	deadline time.Time       // deadline for the next sync call
-	names    map[string]bool // names that have been added
-	servers  map[string]bool // servers that have been added, true
+	deadline time.Time           // deadline for the next sync call
+	names    map[string]nameAttr // names that have been added
+	servers  map[string]bool     // servers that have been added, true
 	// map each (name,server) to its status.
 	mounts map[mountKey]*rpc.MountStatus
 }
 
+type nameAttr struct {
+	servesMT bool
+	isLeaf   bool
+}
+
 func newPubState(ctx *context.T, ns ns.Namespace, period time.Duration) *pubState {
 	return &pubState{
 		ctx:      ctx,
 		ns:       ns,
 		period:   period,
 		deadline: time.Now().Add(period),
-		names:    make(map[string]bool),
+		names:    make(map[string]nameAttr),
 		servers:  make(map[string]bool),
 		mounts:   make(map[mountKey]*rpc.MountStatus),
 	}
@@ -233,22 +239,23 @@
 	return time.After(ps.deadline.Sub(time.Now()))
 }
 
-func (ps *pubState) addName(name string) {
+func (ps *pubState) addName(name string, mt bool, leaf bool) {
 	// Each non-dup name that is added causes new mounts to be created for all
 	// existing servers.
-	if ps.names[name] {
+	if _, exists := ps.names[name]; exists {
 		return
 	}
-	ps.names[name] = true
-	for server, servesMT := range ps.servers {
+	attr := nameAttr{mt, leaf}
+	ps.names[name] = attr
+	for server, _ := range ps.servers {
 		status := new(rpc.MountStatus)
 		ps.mounts[mountKey{name, server}] = status
-		ps.mount(name, server, status, servesMT)
+		ps.mount(name, server, status, attr)
 	}
 }
 
 func (ps *pubState) removeName(name string) {
-	if !ps.names[name] {
+	if _, exists := ps.names[name]; !exists {
 		return
 	}
 	for server, _ := range ps.servers {
@@ -259,15 +266,15 @@
 	delete(ps.names, name)
 }
 
-func (ps *pubState) addServer(server string, servesMT bool) {
+func (ps *pubState) addServer(server string) {
 	// Each non-dup server that is added causes new mounts to be created for all
 	// existing names.
-	if !ps.servers[server] {
-		ps.servers[server] = servesMT
-		for name, _ := range ps.names {
+	if _, exists := ps.servers[server]; !exists {
+		ps.servers[server] = true
+		for name, attr := range ps.names {
 			status := new(rpc.MountStatus)
 			ps.mounts[mountKey{name, server}] = status
-			ps.mount(name, server, status, servesMT)
+			ps.mount(name, server, status, attr)
 		}
 	}
 }
@@ -284,13 +291,13 @@
 	}
 }
 
-func (ps *pubState) mount(name, server string, status *rpc.MountStatus, servesMT bool) {
+func (ps *pubState) mount(name, server string, status *rpc.MountStatus, attr nameAttr) {
 	// Always mount with ttl = period + slack, regardless of whether this is
 	// triggered by a newly added server or name, or by sync.  The next call
 	// to sync will occur within the next period, and refresh all mounts.
 	ttl := ps.period + mountTTLSlack
 	status.LastMount = time.Now()
-	status.LastMountErr = ps.ns.Mount(ps.ctx, name, server, ttl, naming.ServesMountTableOpt(servesMT))
+	status.LastMountErr = ps.ns.Mount(ps.ctx, name, server, ttl, naming.ServesMountTableOpt(attr.servesMT), naming.IsLeafOpt(attr.isLeaf))
 	status.TTL = ttl
 	if status.LastMountErr != nil {
 		vlog.Errorf("rpc pub: couldn't mount(%v, %v, %v): %v", name, server, ttl, status.LastMountErr)
@@ -306,7 +313,7 @@
 			// Desired state is "unmounted", failed at previous attempt. Retry.
 			ps.unmount(key.name, key.server, status)
 		} else {
-			ps.mount(key.name, key.server, status, ps.servers[key.server])
+			ps.mount(key.name, key.server, status, ps.names[key.name])
 		}
 	}
 }
@@ -328,7 +335,18 @@
 	}
 }
 
-func copyToSlice(sl map[string]bool) []string {
+func copyNamesToSlice(sl map[string]nameAttr) []string {
+	var ret []string
+	for s, _ := range sl {
+		if len(s) == 0 {
+			continue
+		}
+		ret = append(ret, s)
+	}
+	return ret
+}
+
+func copyServersToSlice(sl map[string]bool) []string {
 	var ret []string
 	for s, _ := range sl {
 		if len(s) == 0 {
@@ -341,8 +359,8 @@
 
 func (ps *pubState) getStatus() rpc.MountState {
 	st := make([]rpc.MountStatus, 0, len(ps.mounts))
-	names := copyToSlice(ps.names)
-	servers := copyToSlice(ps.servers)
+	names := copyNamesToSlice(ps.names)
+	servers := copyServersToSlice(ps.servers)
 	sort.Strings(names)
 	sort.Strings(servers)
 	for _, name := range names {
diff --git a/profiles/internal/lib/publisher/publisher_test.go b/profiles/internal/lib/publisher/publisher_test.go
index 58ed04b..f672d16 100644
--- a/profiles/internal/lib/publisher/publisher_test.go
+++ b/profiles/internal/lib/publisher/publisher_test.go
@@ -41,18 +41,18 @@
 func TestAddAndRemove(t *testing.T) {
 	ns := tnaming.NewSimpleNamespace()
 	pub := publisher.New(testContext(), ns, time.Second)
-	pub.AddName("foo")
-	pub.AddServer("foo-addr", false)
+	pub.AddName("foo", false, false)
+	pub.AddServer("foo-addr")
 	if got, want := resolve(t, ns, "foo"), []string{"/foo-addr"}; !reflect.DeepEqual(got, want) {
 		t.Errorf("got %q, want %q", got, want)
 	}
-	pub.AddServer("bar-addr", false)
+	pub.AddServer("bar-addr")
 	got, want := resolve(t, ns, "foo"), []string{"/bar-addr", "/foo-addr"}
 	sort.Strings(got)
 	if !reflect.DeepEqual(got, want) {
 		t.Errorf("got %q, want %q", got, want)
 	}
-	pub.AddName("baz")
+	pub.AddName("baz", false, false)
 	got = resolve(t, ns, "baz")
 	sort.Strings(got)
 	if !reflect.DeepEqual(got, want) {
@@ -67,12 +67,12 @@
 func TestStatus(t *testing.T) {
 	ns := tnaming.NewSimpleNamespace()
 	pub := publisher.New(testContext(), ns, time.Second)
-	pub.AddName("foo")
+	pub.AddName("foo", false, false)
 	status := pub.Status()
 	if got, want := len(status), 0; got != want {
 		t.Errorf("got %d, want %d", got, want)
 	}
-	pub.AddServer("foo-addr", false)
+	pub.AddServer("foo-addr")
 
 	// Wait for the publisher to asynchronously publish server the
 	// requisite number of servers.
@@ -99,8 +99,8 @@
 		t.Fatalf("%s", err)
 	}
 
-	pub.AddServer("bar-addr", false)
-	pub.AddName("baz")
+	pub.AddServer("bar-addr")
+	pub.AddName("baz", false, false)
 	status = pub.Status()
 	names := status.Names()
 	if got, want := names, []string{"baz", "foo"}; !reflect.DeepEqual(got, want) {
diff --git a/profiles/internal/naming/endpoint.go b/profiles/internal/naming/endpoint.go
index cf3477d..eddf369 100644
--- a/profiles/internal/naming/endpoint.go
+++ b/profiles/internal/naming/endpoint.go
@@ -140,7 +140,10 @@
 		switch f := input[0]; f {
 		case 'm':
 			return true, nil
-		case 's':
+		case 's', 'l':
+			// TODO(rthellend): 'l' will be used to indicate leaf
+			// servers in a future version. For now, treat it the
+			// the same as 's'.
 			return false, nil
 		default:
 			return false, fmt.Errorf("%c is not one of 'm' or 's'", f)
diff --git a/profiles/internal/naming/namespace/all_test.go b/profiles/internal/naming/namespace/all_test.go
index a981d4b..738eb09 100644
--- a/profiles/internal/naming/namespace/all_test.go
+++ b/profiles/internal/naming/namespace/all_test.go
@@ -725,3 +725,37 @@
 		t.Errorf("Delete failed: %s", err)
 	}
 }
+
+type leafObject struct{}
+
+func (leafObject) Foo(rpc.ServerCall) error {
+	return nil
+}
+
+func TestLeaf(t *testing.T) {
+	_, ctx, cleanup := createContexts(t)
+	defer cleanup()
+	root := runMT(t, ctx, "")
+	ns := v23.GetNamespace(ctx)
+	ns.SetRoots(root.name)
+
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		boom(t, "v23.NewServer: %s", err)
+	}
+	ls := rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}}
+	if _, err := server.Listen(ls); err != nil {
+		boom(t, "Failed to Listen: %s", err)
+	}
+	if err := server.Serve("leaf", &leafObject{}, nil); err != nil {
+		boom(t, "server.Serve failed: %s", err)
+	}
+
+	mountEntry, err := ns.Resolve(ctx, "leaf")
+	if err != nil {
+		boom(t, "ns.Resolve failed: %v", err)
+	}
+	if expected := true; mountEntry.IsLeaf != expected {
+		boom(t, "unexpected mountEntry.IsLeaf value. Got %v, expected %v", mountEntry.IsLeaf, expected)
+	}
+}
diff --git a/profiles/internal/naming/namespace/mount.go b/profiles/internal/naming/namespace/mount.go
index b39697f..e625f51 100644
--- a/profiles/internal/naming/namespace/mount.go
+++ b/profiles/internal/naming/namespace/mount.go
@@ -43,6 +43,10 @@
 			if v {
 				flags |= naming.MountFlag(naming.MT)
 			}
+		case naming.IsLeafOpt:
+			if v {
+				flags |= naming.MountFlag(naming.Leaf)
+			}
 		case naming.MountedServerBlessingsOpt:
 			patterns = str2pattern([]string(v))
 		}
diff --git a/profiles/internal/rpc/full_test.go b/profiles/internal/rpc/full_test.go
index 6789fda..4a86966 100644
--- a/profiles/internal/rpc/full_test.go
+++ b/profiles/internal/rpc/full_test.go
@@ -1496,9 +1496,9 @@
 	publisher := publisher.New(ctx, b.ns, publishPeriod)
 	defer publisher.WaitForStop()
 	defer publisher.Stop()
-	publisher.AddName("incompatible")
-	publisher.AddServer("/@2@tcp@localhost:10000@@1000000@2000000@@", false)
-	publisher.AddServer("/@2@tcp@localhost:10001@@2000000@3000000@@", false)
+	publisher.AddName("incompatible", false, false)
+	publisher.AddServer("/@2@tcp@localhost:10000@@1000000@2000000@@")
+	publisher.AddServer("/@2@tcp@localhost:10001@@2000000@3000000@@")
 
 	ctx, _ = v23.SetPrincipal(ctx, pclient)
 	_, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"}, options.NoRetry{})
@@ -1507,8 +1507,8 @@
 	}
 
 	// Now add a server with a compatible endpoint and try again.
-	publisher.AddServer("/"+b.ep.String(), false)
-	publisher.AddName("incompatible")
+	publisher.AddServer("/" + b.ep.String())
+	publisher.AddName("incompatible", false, false)
 
 	call, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"})
 	if err != nil {
diff --git a/profiles/internal/rpc/reserved.go b/profiles/internal/rpc/reserved.go
index 2e6ba34..262014d 100644
--- a/profiles/internal/rpc/reserved.go
+++ b/profiles/internal/rpc/reserved.go
@@ -268,7 +268,7 @@
 		gs := invoker.Globber()
 		if gs == nil || (gs.AllGlobber == nil && gs.ChildrenGlobber == nil) {
 			if state.glob.Len() == 0 {
-				call.Send(naming.GlobReplyEntry{naming.MountEntry{Name: state.name}})
+				call.Send(naming.GlobReplyEntry{naming.MountEntry{Name: state.name, IsLeaf: true}})
 			} else {
 				call.Send(naming.GlobReplyError{
 					naming.GlobError{Name: state.name, Error: reserved.NewErrGlobNotImplemented(call.Context())},
diff --git a/profiles/internal/rpc/server.go b/profiles/internal/rpc/server.go
index 79c97e0..3e9b57f 100644
--- a/profiles/internal/rpc/server.go
+++ b/profiles/internal/rpc/server.go
@@ -98,6 +98,7 @@
 	ipNets           []*net.IPNet
 	ns               ns.Namespace
 	servesMountTable bool
+	isLeaf           bool
 
 	// TODO(cnicolaou): add roaming stats to rpcStats
 	stats *rpcStats // stats for this server.
@@ -459,7 +460,7 @@
 		}
 
 		for _, iep := range ls.ieps {
-			s.publisher.AddServer(iep.String(), s.servesMountTable)
+			s.publisher.AddServer(iep.String())
 			eps = append(eps, iep)
 		}
 	}
@@ -485,7 +486,7 @@
 	s.proxies[proxy] = proxyState{iep, nil}
 	s.Unlock()
 	iep.IsMountTable = s.servesMountTable
-	s.publisher.AddServer(iep.String(), s.servesMountTable)
+	s.publisher.AddServer(iep.String())
 	return iep, ln, nil
 }
 
@@ -727,7 +728,7 @@
 				niep.Address = net.JoinHostPort(host, ls.port)
 				ls.ieps = append(ls.ieps, &niep)
 				vlog.VI(2).Infof("rpc: dhcp adding: %s", niep)
-				s.publisher.AddServer(niep.String(), s.servesMountTable)
+				s.publisher.AddServer(niep.String())
 				added = append(added, &niep)
 			}
 		}
@@ -756,6 +757,7 @@
 	if err != nil {
 		return verror.New(verror.ErrBadArg, s.ctx, fmt.Sprintf("bad object: %v", err))
 	}
+	s.isLeaf = true
 	return s.ServeDispatcher(name, &leafDispatcher{invoker, authorizer})
 }
 
@@ -772,7 +774,7 @@
 	vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
 	s.disp = disp
 	if len(name) > 0 {
-		s.publisher.AddName(name)
+		s.publisher.AddName(name, s.servesMountTable, s.isLeaf)
 	}
 	return nil
 }
@@ -788,7 +790,7 @@
 		return err
 	}
 	vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
-	s.publisher.AddName(name)
+	s.publisher.AddName(name, s.servesMountTable, s.isLeaf)
 	return nil
 }
 
diff --git a/profiles/internal/rpc/stream/proxy/proxy.go b/profiles/internal/rpc/stream/proxy/proxy.go
index 11f76c8..e6ab0b1 100644
--- a/profiles/internal/rpc/stream/proxy/proxy.go
+++ b/profiles/internal/rpc/stream/proxy/proxy.go
@@ -176,9 +176,9 @@
 		}
 		if pub == nil {
 			pub = publisher.New(ctx, v23.GetNamespace(ctx), time.Minute)
-			pub.AddServer(endpoint.String(), false)
+			pub.AddServer(endpoint.String())
 		}
-		pub.AddName(name)
+		pub.AddName(name, false, true)
 	}
 
 	shutdown = func() {
diff --git a/profiles/internal/rpc/test/proxy_test.go b/profiles/internal/rpc/test/proxy_test.go
index 15ff799..203b41d 100644
--- a/profiles/internal/rpc/test/proxy_test.go
+++ b/profiles/internal/rpc/test/proxy_test.go
@@ -63,12 +63,12 @@
 		pub := publisher.New(ctx, v23.GetNamespace(ctx), time.Minute)
 		defer pub.WaitForStop()
 		defer pub.Stop()
-		pub.AddServer(proxyEp.String(), false)
+		pub.AddServer(proxyEp.String())
 		for _, name := range args {
 			if len(name) == 0 {
 				return fmt.Errorf("empty name specified on the command line")
 			}
-			pub.AddName(name)
+			pub.AddName(name, false, false)
 		}
 		// Wait for all the entries to be published.
 		for {
diff --git a/services/mounttable/lib/mounttable.go b/services/mounttable/lib/mounttable.go
index cfe9e4d..1259455 100644
--- a/services/mounttable/lib/mounttable.go
+++ b/services/mounttable/lib/mounttable.go
@@ -56,6 +56,7 @@
 type mount struct {
 	servers *serverList
 	mt      bool
+	leaf    bool
 }
 
 // node is a single point in the tree representing the mount table.
@@ -402,6 +403,7 @@
 	entry.Servers = n.mount.servers.copyToSlice()
 	entry.Name = strings.Join(elems, "/")
 	entry.ServesMountTable = n.mount.mt
+	entry.IsLeaf = n.mount.leaf
 	return
 }
 
@@ -409,6 +411,10 @@
 	return (flags & naming.MT) == naming.MT
 }
 
+func hasLeafFlag(flags naming.MountFlag) bool {
+	return (flags & naming.Leaf) == naming.Leaf
+}
+
 func hasReplaceFlag(flags naming.MountFlag) bool {
 	return (flags & naming.Replace) == naming.Replace
 }
@@ -459,12 +465,16 @@
 		n.mount = nil
 	}
 	wantMT := hasMTFlag(flags)
+	wantLeaf := hasLeafFlag(flags)
 	if n.mount == nil {
-		n.mount = &mount{servers: newServerList(), mt: wantMT}
+		n.mount = &mount{servers: newServerList(), mt: wantMT, leaf: wantLeaf}
 	} else {
 		if wantMT != n.mount.mt {
 			return fmt.Errorf("MT doesn't match")
 		}
+		if wantLeaf != n.mount.leaf {
+			return fmt.Errorf("Leaf doesn't match")
+		}
 	}
 	n.mount.servers.add(server, patterns, time.Duration(ttlsecs)*time.Second)
 	return nil