x.ref/runtime: convert to context-based logging.
MultiPart: 1/2
Change-Id: Ia337148d3e5958c3f2b9e614f22ad0a90fbedace
diff --git a/cmd/mounttable/impl.go b/cmd/mounttable/impl.go
index 80ba494..3653e16 100644
--- a/cmd/mounttable/impl.go
+++ b/cmd/mounttable/impl.go
@@ -13,14 +13,15 @@
"regexp"
"time"
+ "v.io/x/lib/cmdline"
+
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/security"
- "v.io/x/lib/cmdline"
- "v.io/x/lib/vlog"
+
"v.io/x/ref/lib/v23cmd"
_ "v.io/x/ref/runtime/factories/generic"
)
@@ -202,7 +203,7 @@
}
func blessingPatternsFromServer(ctx *context.T, server string) ([]security.BlessingPattern, error) {
- vlog.Infof("Contacting %q to determine the blessings presented by it", server)
+ ctx.Infof("Contacting %q to determine the blessings presented by it", server)
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
call, err := v23.GetClient(ctx).StartCall(ctx, server, rpc.ReservedSignature, nil)
diff --git a/cmd/mounttable/impl_test.go b/cmd/mounttable/impl_test.go
index f4fa627..2f918de 100644
--- a/cmd/mounttable/impl_test.go
+++ b/cmd/mounttable/impl_test.go
@@ -11,6 +11,8 @@
"testing"
"time"
+ "v.io/x/lib/cmdline"
+
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
@@ -19,8 +21,7 @@
"v.io/v23/security/access"
"v.io/v23/services/mounttable"
vdltime "v.io/v23/vdlroot/time"
- "v.io/x/lib/cmdline"
- "v.io/x/lib/vlog"
+
"v.io/x/ref/lib/v23cmd"
"v.io/x/ref/lib/xrpc"
_ "v.io/x/ref/runtime/factories/generic"
@@ -41,8 +42,8 @@
suffix string
}
-func (s *server) Glob__(_ *context.T, _ rpc.ServerCall, pattern string) (<-chan naming.GlobReply, error) {
- vlog.VI(2).Infof("Glob() was called. suffix=%v pattern=%q", s.suffix, pattern)
+func (s *server) Glob__(ctx *context.T, _ rpc.ServerCall, pattern string) (<-chan naming.GlobReply, error) {
+ ctx.VI(2).Infof("Glob() was called. suffix=%v pattern=%q", s.suffix, pattern)
ch := make(chan naming.GlobReply, 2)
ch <- naming.GlobReplyEntry{naming.MountEntry{"name1", []naming.MountedServer{{"server1", deadline(1)}}, false, false}}
ch <- naming.GlobReplyEntry{naming.MountEntry{"name2", []naming.MountedServer{{"server2", deadline(2)}, {"server3", deadline(3)}}, false, false}}
@@ -50,34 +51,34 @@
return ch, nil
}
-func (s *server) Mount(_ *context.T, _ rpc.ServerCall, server string, ttl uint32, flags naming.MountFlag) error {
- vlog.VI(2).Infof("Mount() was called. suffix=%v server=%q ttl=%d", s.suffix, server, ttl)
+func (s *server) Mount(ctx *context.T, _ rpc.ServerCall, server string, ttl uint32, flags naming.MountFlag) error {
+ ctx.VI(2).Infof("Mount() was called. suffix=%v server=%q ttl=%d", s.suffix, server, ttl)
return nil
}
-func (s *server) Unmount(_ *context.T, _ rpc.ServerCall, server string) error {
- vlog.VI(2).Infof("Unmount() was called. suffix=%v server=%q", s.suffix, server)
+func (s *server) Unmount(ctx *context.T, _ rpc.ServerCall, server string) error {
+ ctx.VI(2).Infof("Unmount() was called. suffix=%v server=%q", s.suffix, server)
return nil
}
-func (s *server) ResolveStep(*context.T, rpc.ServerCall) (entry naming.MountEntry, err error) {
- vlog.VI(2).Infof("ResolveStep() was called. suffix=%v", s.suffix)
+func (s *server) ResolveStep(ctx *context.T, _ rpc.ServerCall) (entry naming.MountEntry, err error) {
+ ctx.VI(2).Infof("ResolveStep() was called. suffix=%v", s.suffix)
entry.Servers = []naming.MountedServer{{"server1", deadline(1)}}
entry.Name = s.suffix
return
}
-func (s *server) Delete(*context.T, rpc.ServerCall, bool) error {
- vlog.VI(2).Infof("Delete() was called. suffix=%v", s.suffix)
+func (s *server) Delete(ctx *context.T, _ rpc.ServerCall, _ bool) error {
+ ctx.VI(2).Infof("Delete() was called. suffix=%v", s.suffix)
return nil
}
-func (s *server) SetPermissions(*context.T, rpc.ServerCall, access.Permissions, string) error {
- vlog.VI(2).Infof("SetPermissions() was called. suffix=%v", s.suffix)
+func (s *server) SetPermissions(ctx *context.T, _ rpc.ServerCall, _ access.Permissions, _ string) error {
+ ctx.VI(2).Infof("SetPermissions() was called. suffix=%v", s.suffix)
return nil
}
-func (s *server) GetPermissions(*context.T, rpc.ServerCall) (access.Permissions, string, error) {
- vlog.VI(2).Infof("GetPermissions() was called. suffix=%v", s.suffix)
+func (s *server) GetPermissions(ctx *context.T, _ rpc.ServerCall) (access.Permissions, string, error) {
+ ctx.VI(2).Infof("GetPermissions() was called. suffix=%v", s.suffix)
return nil, "", nil
}
@@ -135,7 +136,7 @@
stdout.Reset()
// Test the 'resolvestep' command.
- vlog.Infof("resovestep %s", naming.JoinAddressName(endpoint.String(), "name"))
+ ctx.Infof("resovestep %s", naming.JoinAddressName(endpoint.String(), "name"))
if err := v23cmd.ParseAndRunForTest(cmdRoot, ctx, env, []string{"resolvestep", naming.JoinAddressName(endpoint.String(), "name")}); err != nil {
t.Fatalf("%v", err)
}
diff --git a/cmd/namespace/impl.go b/cmd/namespace/impl.go
index f12917e..ca17f5c 100644
--- a/cmd/namespace/impl.go
+++ b/cmd/namespace/impl.go
@@ -15,15 +15,16 @@
"sort"
"time"
+ "v.io/x/lib/cmdline"
+ "v.io/x/lib/set"
+
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/security/access"
"v.io/v23/verror"
- "v.io/x/lib/cmdline"
- "v.io/x/lib/set"
- "v.io/x/lib/vlog"
+
"v.io/x/ref/lib/v23cmd"
_ "v.io/x/ref/runtime/factories/generic"
)
@@ -72,7 +73,7 @@
c, err := ns.Glob(ctx, pattern)
if err != nil {
- vlog.Infof("ns.Glob(%q) failed: %v", pattern, err)
+ ctx.Infof("ns.Glob(%q) failed: %v", pattern, err)
return err
}
if flagLongGlob {
@@ -148,7 +149,7 @@
ns := v23.GetNamespace(ctx)
if err = ns.Mount(ctx, name, server, ttl); err != nil {
- vlog.Infof("ns.Mount(%q, %q, %s) failed: %v", name, server, ttl, err)
+ ctx.Infof("ns.Mount(%q, %q, %s) failed: %v", name, server, ttl, err)
return err
}
fmt.Fprintln(env.Stdout, "Server mounted successfully.")
@@ -180,7 +181,7 @@
ns := v23.GetNamespace(ctx)
if err := ns.Unmount(ctx, name, server); err != nil {
- vlog.Infof("ns.Unmount(%q, %q) failed: %v", name, server, err)
+ ctx.Infof("ns.Unmount(%q, %q) failed: %v", name, server, err)
return err
}
fmt.Fprintln(env.Stdout, "Server unmounted successfully.")
@@ -213,7 +214,7 @@
}
me, err := ns.Resolve(ctx, name, opts...)
if err != nil {
- vlog.Infof("ns.Resolve(%q) failed: %v", name, err)
+ ctx.Infof("ns.Resolve(%q) failed: %v", name, err)
return err
}
for _, n := range me.Names() {
@@ -247,7 +248,7 @@
}
e, err := ns.ResolveToMountTable(ctx, name, opts...)
if err != nil {
- vlog.Infof("ns.ResolveToMountTable(%q) failed: %v", name, err)
+ ctx.Infof("ns.ResolveToMountTable(%q) failed: %v", name, err)
return err
}
for _, s := range e.Servers {
@@ -311,7 +312,7 @@
return err
}
if err = ns.SetPermissions(ctx, name, perms, etag); verror.ErrorID(err) == verror.ErrBadVersion.ID {
- vlog.Infof("SetPermissions(%q, %q) failed: %v, retrying...", name, etag, err)
+ ctx.Infof("SetPermissions(%q, %q) failed: %v, retrying...", name, etag, err)
continue
}
return err
diff --git a/examples/tunnel/tunneld/impl.go b/examples/tunnel/tunneld/impl.go
index 020707e..64f68a7 100644
--- a/examples/tunnel/tunneld/impl.go
+++ b/examples/tunnel/tunneld/impl.go
@@ -19,7 +19,6 @@
"v.io/v23/context"
"v.io/v23/logging"
"v.io/v23/security"
-
"v.io/x/ref/examples/tunnel"
"v.io/x/ref/examples/tunnel/internal"
)
diff --git a/internal/logger/logger.go b/internal/logger/logger.go
index 4f07b0d..c8a84af 100644
--- a/internal/logger/logger.go
+++ b/internal/logger/logger.go
@@ -72,3 +72,9 @@
}
return &dummy{}
}
+
+// IsAlreadyConfiguredError returns true if the err parameter indicates
+// the the logger has already been configured.
+func IsAlreadyConfiguredError(err error) bool {
+ return err == vlog.ErrConfigured
+}
diff --git a/lib/signals/signals.go b/lib/signals/signals.go
index bc645d4..61600cb 100644
--- a/lib/signals/signals.go
+++ b/lib/signals/signals.go
@@ -74,7 +74,7 @@
sawStop = true
if ctx != nil {
stopWaiter := make(chan string, 1)
- v23.GetAppCycle(ctx).WaitForStop(stopWaiter)
+ v23.GetAppCycle(ctx).WaitForStop(ctx, stopWaiter)
go func() {
for {
ch <- stopSignal(<-stopWaiter)
diff --git a/lib/signals/signals_test.go b/lib/signals/signals_test.go
index 2acdad1..46bbefb 100644
--- a/lib/signals/signals_test.go
+++ b/lib/signals/signals_test.go
@@ -47,7 +47,9 @@
func program(stdin io.Reader, stdout io.Writer, signals ...os.Signal) {
ctx, shutdown := test.V23Init()
closeStopLoop := make(chan struct{})
- go stopLoop(v23.GetAppCycle(ctx).Stop, stdin, closeStopLoop)
+ // obtain ac here since stopLoop may execute after shutdown is called below
+ ac := v23.GetAppCycle(ctx)
+ go stopLoop(func() { ac.Stop(ctx) }, stdin, closeStopLoop)
wait := ShutdownOnSignals(ctx, signals...)
fmt.Fprintf(stdout, "ready\n")
fmt.Fprintf(stdout, "received signal %s\n", <-wait)
@@ -73,9 +75,10 @@
var handleDefaultsIgnoreChan = modules.Register(func(env *modules.Env, args ...string) error {
ctx, shutdown := test.V23Init()
defer shutdown()
-
closeStopLoop := make(chan struct{})
- go stopLoop(v23.GetAppCycle(ctx).Stop, env.Stdin, closeStopLoop)
+ // obtain ac here since stopLoop may execute after shutdown is called below
+ ac := v23.GetAppCycle(ctx)
+ go stopLoop(func() { ac.Stop(ctx) }, env.Stdin, closeStopLoop)
ShutdownOnSignals(ctx)
fmt.Fprintf(env.Stdout, "ready\n")
<-closeStopLoop
diff --git a/runtime/factories/chrome/chrome.go b/runtime/factories/chrome/chrome.go
index 5108bbe..8245a77 100644
--- a/runtime/factories/chrome/chrome.go
+++ b/runtime/factories/chrome/chrome.go
@@ -12,7 +12,6 @@
"v.io/v23"
"v.io/v23/context"
"v.io/v23/rpc"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/flags"
"v.io/x/ref/runtime/internal"
@@ -41,6 +40,6 @@
if err != nil {
return nil, nil, shutdown, err
}
- vlog.Log.VI(1).Infof("Initializing chrome RuntimeFactory.")
+ ctx.VI(1).Infof("Initializing chrome RuntimeFactory.")
return runtime, ctx, shutdown, nil
}
diff --git a/runtime/factories/fake/runtime.go b/runtime/factories/fake/runtime.go
index 2282a47..b5ea1e9 100644
--- a/runtime/factories/fake/runtime.go
+++ b/runtime/factories/fake/runtime.go
@@ -10,6 +10,7 @@
"v.io/v23/namespace"
"v.io/v23/rpc"
"v.io/v23/security"
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/lib/apilog"
vsecurity "v.io/x/ref/lib/security"
tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
@@ -38,8 +39,8 @@
}
func (r *Runtime) Init(ctx *context.T) error {
- defer apilog.LogCall(ctx)(ctx) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
- return nil
+ // nologcall
+ return logger.Manager(ctx).ConfigureFromFlags()
}
func (r *Runtime) WithPrincipal(ctx *context.T, principal security.Principal) (*context.T, error) {
diff --git a/runtime/factories/gce/gce.go b/runtime/factories/gce/gce.go
index b38548b..a0d25e8 100644
--- a/runtime/factories/gce/gce.go
+++ b/runtime/factories/gce/gce.go
@@ -16,7 +16,6 @@
"v.io/v23"
"v.io/v23/context"
"v.io/v23/rpc"
- "v.io/x/lib/vlog"
"v.io/x/lib/netstate"
"v.io/x/ref/lib/flags"
@@ -68,7 +67,7 @@
return nil, nil, shutdown, err
}
- vlog.Log.VI(1).Infof("Initializing GCE RuntimeFactory.")
+ ctx.VI(1).Infof("Initializing GCE RuntimeFactory.")
runtimeFactoryShutdown := func() {
ac.Shutdown()
diff --git a/runtime/factories/generic/generic.go b/runtime/factories/generic/generic.go
index eabda93..54e4149 100644
--- a/runtime/factories/generic/generic.go
+++ b/runtime/factories/generic/generic.go
@@ -12,7 +12,6 @@
"v.io/v23"
"v.io/v23/context"
"v.io/v23/rpc"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/flags"
"v.io/x/ref/runtime/internal"
@@ -58,7 +57,7 @@
if err != nil {
return nil, nil, nil, err
}
- vlog.Log.VI(1).Infof("Initializing generic RuntimeFactory.")
+ ctx.VI(1).Infof("Initializing generic RuntimeFactory.")
runtimeFactoryShutdown := func() {
ac.Shutdown()
diff --git a/runtime/factories/roaming/roaming.go b/runtime/factories/roaming/roaming.go
index 5424691..9dd781e 100644
--- a/runtime/factories/roaming/roaming.go
+++ b/runtime/factories/roaming/roaming.go
@@ -20,7 +20,6 @@
"v.io/x/lib/netconfig"
"v.io/x/lib/netstate"
"v.io/x/lib/pubsub"
- "v.io/x/lib/vlog"
"v.io/v23"
"v.io/v23/context"
@@ -157,29 +156,29 @@
netstate.InvalidateCache()
cur, err := netstate.GetAccessibleIPs()
if err != nil {
- vlog.Errorf("failed to read network state: %s", err)
+ ctx.Errorf("failed to read network state: %s", err)
continue
}
removed := netstate.FindRemoved(prev, cur)
added := netstate.FindAdded(prev, cur)
- vlog.VI(2).Infof("Previous: %d: %s", len(prev), prev)
- vlog.VI(2).Infof("Current : %d: %s", len(cur), cur)
- vlog.VI(2).Infof("Added : %d: %s", len(added), added)
- vlog.VI(2).Infof("Removed : %d: %s", len(removed), removed)
+ ctx.VI(2).Infof("Previous: %d: %s", len(prev), prev)
+ ctx.VI(2).Infof("Current : %d: %s", len(cur), cur)
+ ctx.VI(2).Infof("Added : %d: %s", len(added), added)
+ ctx.VI(2).Infof("Removed : %d: %s", len(removed), removed)
if len(removed) == 0 && len(added) == 0 {
- vlog.VI(2).Infof("Network event that lead to no address changes since our last 'baseline'")
+ ctx.VI(2).Infof("Network event that lead to no address changes since our last 'baseline'")
continue
}
if len(removed) > 0 {
- vlog.VI(2).Infof("Sending removed: %s", removed)
+ ctx.VI(2).Infof("Sending removed: %s", removed)
ch <- irpc.NewRmAddrsSetting(removed.AsNetAddrs())
}
// We will always send the best currently available address
if chosen, err := listenSpec.AddressChooser.ChooseAddress(listenSpec.Addrs[0].Protocol, cur.AsNetAddrs()); err == nil && chosen != nil {
- vlog.VI(2).Infof("Sending added and chosen: %s", chosen)
+ ctx.VI(2).Infof("Sending added and chosen: %s", chosen)
ch <- irpc.NewAddAddrsSetting(chosen)
} else {
- vlog.VI(2).Infof("Ignoring added %s", added)
+ ctx.VI(2).Infof("Ignoring added %s", added)
}
prev = cur
case <-cleanup:
diff --git a/runtime/factories/roaming/roaming_server.go b/runtime/factories/roaming/roaming_server.go
index 10e801a..c87d37a 100644
--- a/runtime/factories/roaming/roaming_server.go
+++ b/runtime/factories/roaming/roaming_server.go
@@ -8,12 +8,9 @@
import (
"fmt"
- "log"
"v.io/v23"
"v.io/v23/rpc"
- "v.io/x/lib/vlog"
-
"v.io/x/ref/lib/xrpc"
_ "v.io/x/ref/runtime/factories/roaming"
)
@@ -24,7 +21,7 @@
server, err := xrpc.NewServer(ctx, "roamer", &dummy{}, nil)
if err != nil {
- vlog.Fatalf("unexpected error: %q", err)
+ ctx.Fatalf("unexpected error: %q", err)
}
watcher := make(chan rpc.NetworkChange, 1)
server.WatchNetwork(watcher)
diff --git a/runtime/internal/lib/appcycle/appcycle.go b/runtime/internal/lib/appcycle/appcycle.go
index 9356afc..d35bb0f 100644
--- a/runtime/internal/lib/appcycle/appcycle.go
+++ b/runtime/internal/lib/appcycle/appcycle.go
@@ -13,7 +13,6 @@
"v.io/v23/context"
"v.io/v23/rpc"
"v.io/v23/security"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
public "v.io/v23/services/appcycle"
@@ -51,13 +50,13 @@
m.taskTrackers = nil
}
-func (m *AppCycle) stop(msg string) {
- vlog.Infof("stop(%v)", msg)
- defer vlog.Infof("stop(%v) done", msg)
+func (m *AppCycle) stop(ctx *context.T, msg string) {
+ ctx.Infof("stop(%v)", msg)
+ defer ctx.Infof("stop(%v) done", msg)
m.RLock()
defer m.RUnlock()
if len(m.waiters) == 0 {
- vlog.Infof("Unhandled stop. Exiting.")
+ ctx.Infof("Unhandled stop. Exiting.")
os.Exit(v23.UnhandledStopExitCode)
}
for _, w := range m.waiters {
@@ -68,17 +67,17 @@
}
}
-func (m *AppCycle) Stop() {
+func (m *AppCycle) Stop(ctx *context.T) {
defer apilog.LogCall(nil)(nil) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
- m.stop(v23.LocalStop)
+ m.stop(ctx, v23.LocalStop)
}
-func (*AppCycle) ForceStop() {
+func (*AppCycle) ForceStop(ctx *context.T) {
defer apilog.LogCall(nil)(nil) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
os.Exit(v23.ForceStopExitCode)
}
-func (m *AppCycle) WaitForStop(ch chan<- string) {
+func (m *AppCycle) WaitForStop(_ *context.T, ch chan<- string) {
defer apilog.LogCallf(nil, "ch=")(nil, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
m.Lock()
defer m.Unlock()
@@ -137,13 +136,13 @@
func (d *invoker) Stop(ctx *context.T, call public.AppCycleStopServerCall) error {
defer apilog.LogCallf(ctx, "call=")(ctx, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
blessings, _ := security.RemoteBlessingNames(ctx, call.Security())
- vlog.Infof("AppCycle Stop request from %v", blessings)
+ ctx.Infof("AppCycle Stop request from %v", blessings)
// The size of the channel should be reasonably sized to expect not to
// miss updates while we're waiting for the stream to unblock.
ch := make(chan v23.Task, 10)
d.ac.TrackTask(ch)
// TODO(caprita): Include identity of Stop issuer in message.
- d.ac.stop(v23.RemoteStop)
+ d.ac.stop(ctx, v23.RemoteStop)
for {
task, ok := <-ch
if !ok {
@@ -151,15 +150,15 @@
break
}
actask := public.Task{Progress: task.Progress, Goal: task.Goal}
- vlog.Infof("AppCycle Stop progress %d/%d", task.Progress, task.Goal)
+ ctx.Infof("AppCycle Stop progress %d/%d", task.Progress, task.Goal)
call.SendStream().Send(actask)
}
- vlog.Infof("AppCycle Stop done")
+ ctx.Infof("AppCycle Stop done")
return nil
}
-func (d *invoker) ForceStop(*context.T, rpc.ServerCall) error {
+func (d *invoker) ForceStop(ctx *context.T, _ rpc.ServerCall) error {
defer apilog.LogCall(nil)(nil) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
- d.ac.ForceStop()
+ d.ac.ForceStop(ctx)
return fmt.Errorf("ForceStop should not reply as the process should be dead")
}
diff --git a/runtime/internal/lib/iobuf/allocator.go b/runtime/internal/lib/iobuf/allocator.go
index d059564..f586c39 100644
--- a/runtime/internal/lib/iobuf/allocator.go
+++ b/runtime/internal/lib/iobuf/allocator.go
@@ -4,7 +4,7 @@
package iobuf
-import "v.io/x/lib/vlog"
+import "v.io/x/ref/internal/logger"
// Allocator is an allocator for Slices that tries to allocate contiguously.
// That is, sequential allocations will tend to be contiguous, which means
@@ -51,7 +51,7 @@
n := bytes + a.reserve
if a.iobuf == nil {
if a.pool == nil {
- vlog.Info("iobuf.Allocator has already been closed")
+ logger.Global().Info("iobuf.Allocator has already been closed")
return nil
}
a.iobuf = a.pool.alloc(n)
diff --git a/runtime/internal/lib/iobuf/iobuf.go b/runtime/internal/lib/iobuf/iobuf.go
index 2b1542f..7c64eda 100644
--- a/runtime/internal/lib/iobuf/iobuf.go
+++ b/runtime/internal/lib/iobuf/iobuf.go
@@ -31,7 +31,7 @@
"sync"
"sync/atomic"
- "v.io/x/lib/vlog"
+ "v.io/x/ref/internal/logger"
)
// A iobuf is a storage space for memory read from the network. The data should
@@ -92,7 +92,7 @@
pool.mutex.Lock()
defer pool.mutex.Unlock()
if pool.freelist == nil {
- vlog.Info("iobuf.Pool is closed")
+ logger.Global().Info("iobuf.Pool is closed")
return nil
}
@@ -131,7 +131,7 @@
func (iobuf *buf) release() {
refcount := atomic.AddInt32(&iobuf.refcount, -1)
if refcount < 0 {
- vlog.Infof("Refcount is negative: %d. This is a bug in the program.", refcount)
+ logger.Global().Infof("Refcount is negative: %d. This is a bug in the program.", refcount)
}
if refcount == 0 {
iobuf.pool.release(iobuf)
diff --git a/runtime/internal/lib/pcqueue/pcqueue_test.go b/runtime/internal/lib/pcqueue/pcqueue_test.go
index a8858d5..49c13fb 100644
--- a/runtime/internal/lib/pcqueue/pcqueue_test.go
+++ b/runtime/internal/lib/pcqueue/pcqueue_test.go
@@ -10,7 +10,7 @@
"testing"
"time"
- "v.io/x/lib/vlog"
+ "v.io/x/ref/internal/logger"
)
//go:generate v23 test generate
@@ -81,7 +81,7 @@
cancel := make(chan struct{})
// Check that the queue elements are sequentially increasing ints.
- vlog.VI(1).Infof("Start consumer")
+ logger.Global().VI(1).Infof("Start consumer")
go func() {
for i := 0; i != elementCount; i++ {
item, err := queue.Get(cancel)
@@ -100,27 +100,27 @@
}()
// Generate the sequential ints.
- vlog.VI(1).Infof("Put values")
+ logger.Global().VI(1).Infof("Put values")
for i := 0; i != elementCount; i++ {
queue.Put(i, nil)
}
// Wait for the consumer.
- vlog.VI(1).Infof("Waiting for consumer")
+ logger.Global().VI(1).Infof("Waiting for consumer")
<-done
// Any subsequent read should timeout.
- vlog.VI(1).Infof("Start consumer")
+ logger.Global().VI(1).Infof("Start consumer")
go func() {
_, err := queue.Get(cancel)
if err != ErrCancelled {
t.Errorf("Expected timeout: %v", err)
}
- vlog.VI(1).Infof("Consumer done")
+ logger.Global().VI(1).Infof("Consumer done")
done <- struct{}{}
}()
- vlog.VI(1).Infof("Sleep a little")
+ logger.Global().VI(1).Infof("Sleep a little")
time.Sleep(100 * time.Millisecond)
select {
case <-done:
@@ -128,10 +128,10 @@
default:
}
- vlog.VI(1).Infof("Cancel")
+ logger.Global().VI(1).Infof("Cancel")
close(cancel)
- vlog.VI(1).Infof("Wait for consumer")
+ logger.Global().VI(1).Infof("Wait for consumer")
<-done
}
@@ -141,7 +141,7 @@
done := make(chan struct{}, 1)
cancel := make(chan struct{})
- vlog.VI(1).Infof("Put values")
+ logger.Global().VI(1).Infof("Put values")
for i := 0; i != queueSize; i++ {
err := queue.Put(i, nil)
if err != nil {
@@ -149,7 +149,7 @@
}
}
- vlog.VI(1).Infof("Start producer")
+ logger.Global().VI(1).Infof("Start producer")
go func() {
err := queue.Put(0, cancel)
if err != ErrCancelled {
@@ -158,7 +158,7 @@
done <- struct{}{}
}()
- vlog.VI(1).Infof("Sleep a little")
+ logger.Global().VI(1).Infof("Sleep a little")
time.Sleep(100 * time.Millisecond)
select {
case <-done:
@@ -166,10 +166,10 @@
default:
}
- vlog.VI(1).Infof("Cancel")
+ logger.Global().VI(1).Infof("Cancel")
close(cancel)
- vlog.VI(1).Infof("Wait for producer")
+ logger.Global().VI(1).Infof("Wait for producer")
<-done
}
@@ -213,7 +213,7 @@
go func() {
err := queue.Put(1, nil)
if err != nil {
- vlog.VI(1).Infof("Put: %v", err)
+ logger.Global().VI(1).Infof("Put: %v", err)
}
pending.Done()
}()
@@ -241,7 +241,7 @@
}
readers++
}
- vlog.VI(1).Infof("%d operations completed", readers)
+ logger.Global().VI(1).Infof("%d operations completed", readers)
if readers > writerCount {
t.Errorf("Too many readers")
}
@@ -317,7 +317,7 @@
// Sum up the results and compare.
sum := uint32(0)
count := uint32(0)
- vlog.VI(1).Infof("Start consumers")
+ logger.Global().VI(1).Infof("Start consumers")
for i := 0; i != readerCount; i++ {
pid := i
go func() {
@@ -335,13 +335,13 @@
atomic.AddUint32(&sum, uint32(item.(int)))
atomic.AddUint32(&count, 1)
}
- vlog.VI(1).Infof("Consumer %d done", pid)
+ logger.Global().VI(1).Infof("Consumer %d done", pid)
pending.Done()
}()
}
// Generate the sequential ints.
- vlog.VI(1).Infof("Start producers")
+ logger.Global().VI(1).Infof("Start producers")
for i := 0; i != writerCount; i++ {
pid := i
go func() {
@@ -351,18 +351,18 @@
t.Errorf("Put: %v", err)
}
}
- vlog.VI(1).Infof("Producer %d done", pid)
+ logger.Global().VI(1).Infof("Producer %d done", pid)
pending.Done()
}()
}
- vlog.VI(1).Infof("Start termination checker")
+ logger.Global().VI(1).Infof("Start termination checker")
go func() {
pending.Wait()
done <- struct{}{}
}()
- vlog.VI(1).Infof("Wait for processes")
+ logger.Global().VI(1).Infof("Wait for processes")
stop := false
for !stop {
time.Sleep(100 * time.Millisecond)
@@ -374,7 +374,7 @@
}
}
- vlog.VI(1).Infof("Checking the sum")
+ logger.Global().VI(1).Infof("Checking the sum")
expected := writerCount * elementCount * (elementCount - 1) / 2
s := atomic.LoadUint32(&sum)
if s != uint32(expected) {
diff --git a/runtime/internal/lib/publisher/publisher.go b/runtime/internal/lib/publisher/publisher.go
index d254fc8..270fec2 100644
--- a/runtime/internal/lib/publisher/publisher.go
+++ b/runtime/internal/lib/publisher/publisher.go
@@ -19,7 +19,6 @@
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
)
// Publisher manages the publishing of servers in mounttable.
@@ -57,6 +56,7 @@
cmdchan chan interface{} // value is one of {server,name,debug}Cmd
stopchan chan struct{} // closed when no longer accepting commands.
donechan chan struct{} // closed when the publisher is done
+ ctx *context.T
}
type addServerCmd struct {
@@ -89,6 +89,7 @@
cmdchan: make(chan interface{}),
stopchan: make(chan struct{}),
donechan: make(chan struct{}),
+ ctx: ctx,
}
go runLoop(ctx, p.cmdchan, p.donechan, ns, period)
return p
@@ -158,7 +159,7 @@
}
func runLoop(ctx *context.T, cmdchan chan interface{}, donechan chan struct{}, ns namespace.T, period time.Duration) {
- vlog.VI(2).Info("rpc pub: start runLoop")
+ ctx.VI(2).Info("rpc pub: start runLoop")
state := newPubState(ctx, ns, period)
for {
select {
@@ -167,7 +168,7 @@
case stopCmd:
state.unmountAll()
close(donechan)
- vlog.VI(2).Info("rpc pub: exit runLoop")
+ ctx.VI(2).Info("rpc pub: exit runLoop")
return
case addServerCmd:
state.addServer(tcmd.server)
@@ -292,12 +293,12 @@
status.TTL = ttl
// If the mount status changed, log it.
if status.LastMountErr != nil {
- if verror.ErrorID(last.LastMountErr) != verror.ErrorID(status.LastMountErr) || vlog.V(2) {
- vlog.Errorf("rpc pub: couldn't mount(%v, %v, %v): %v", name, server, ttl, status.LastMountErr)
+ if verror.ErrorID(last.LastMountErr) != verror.ErrorID(status.LastMountErr) || ps.ctx.V(2) {
+ ps.ctx.Errorf("rpc pub: couldn't mount(%v, %v, %v): %v", name, server, ttl, status.LastMountErr)
}
} else {
- if last.LastMount.IsZero() || last.LastMountErr != nil || vlog.V(2) {
- vlog.Infof("rpc pub: mount(%v, %v, %v)", name, server, ttl)
+ if last.LastMount.IsZero() || last.LastMountErr != nil || ps.ctx.V(2) {
+ ps.ctx.Infof("rpc pub: mount(%v, %v, %v)", name, server, ttl)
}
}
}
@@ -322,9 +323,9 @@
}
status.LastUnmountErr = ps.ns.Unmount(ps.ctx, name, server, opts...)
if status.LastUnmountErr != nil {
- vlog.Errorf("rpc pub: couldn't unmount(%v, %v): %v", name, server, status.LastUnmountErr)
+ ps.ctx.Errorf("rpc pub: couldn't unmount(%v, %v): %v", name, server, status.LastUnmountErr)
} else {
- vlog.VI(1).Infof("rpc pub: unmount(%v, %v)", name, server)
+ ps.ctx.VI(1).Infof("rpc pub: unmount(%v, %v)", name, server)
delete(ps.mounts, mountKey{name, server})
}
}
diff --git a/runtime/internal/lib/upcqueue/upcqueue_test.go b/runtime/internal/lib/upcqueue/upcqueue_test.go
index 1b9ea84..3f5220c 100644
--- a/runtime/internal/lib/upcqueue/upcqueue_test.go
+++ b/runtime/internal/lib/upcqueue/upcqueue_test.go
@@ -10,7 +10,7 @@
"testing"
"time"
- "v.io/x/lib/vlog"
+ "v.io/x/ref/internal/logger"
vsync "v.io/x/ref/runtime/internal/lib/sync"
)
@@ -75,7 +75,7 @@
cancel := make(chan struct{})
// Check that the queue elements are sequentially increasing ints.
- vlog.VI(1).Infof("Start consumer")
+ logger.Global().VI(1).Infof("Start consumer")
go func() {
for i := 0; i != elementCount; i++ {
item, err := queue.Get(cancel)
@@ -94,27 +94,27 @@
}()
// Generate the sequential ints.
- vlog.VI(1).Infof("Put values")
+ logger.Global().VI(1).Infof("Put values")
for i := 0; i != elementCount; i++ {
queue.Put(i)
}
// Wait for the consumer.
- vlog.VI(1).Infof("Waiting for consumer")
+ logger.Global().VI(1).Infof("Waiting for consumer")
<-done
// Any subsequent read should timeout.
- vlog.VI(1).Infof("Start consumer")
+ logger.Global().VI(1).Infof("Start consumer")
go func() {
_, err := queue.Get(cancel)
if err != vsync.ErrCanceled {
t.Errorf("Expected timeout: %v", err)
}
- vlog.VI(1).Infof("Consumer done")
+ logger.Global().VI(1).Infof("Consumer done")
done <- struct{}{}
}()
- vlog.VI(1).Infof("Sleep a little")
+ logger.Global().VI(1).Infof("Sleep a little")
time.Sleep(100 * time.Millisecond)
select {
case <-done:
@@ -122,26 +122,26 @@
default:
}
- vlog.VI(1).Infof("Cancel")
+ logger.Global().VI(1).Infof("Cancel")
close(cancel)
- vlog.VI(1).Infof("Wait for consumer")
+ logger.Global().VI(1).Infof("Wait for consumer")
<-done
}
// Test that Get() returns an error when the queue is closed.
func TestSequentialClose(t *testing.T) {
- vlog.VI(1).Infof("Put")
+ logger.Global().VI(1).Infof("Put")
queue := New()
err := queue.Put(0)
if err != nil {
t.Errorf("Put: %v", err)
}
- vlog.VI(1).Infof("Close")
+ logger.Global().VI(1).Infof("Close")
queue.Close()
// Check that Get() returns the element.
- vlog.VI(1).Infof("Get")
+ logger.Global().VI(1).Infof("Get")
item, err := queue.Get(nil)
if err != nil {
t.Errorf("Get: %v", err)
@@ -151,14 +151,14 @@
}
// Check that Get() returns an error.
- vlog.VI(1).Infof("Get")
+ logger.Global().VI(1).Infof("Get")
_, err = queue.Get(nil)
if err != ErrQueueIsClosed {
t.Errorf("Expected queue to be closed: %v", err)
}
// Check that Put() returns an error.
- vlog.VI(1).Infof("Put")
+ logger.Global().VI(1).Infof("Put")
err = queue.Put(0)
if err != ErrQueueIsClosed {
t.Errorf("Expected queue to be closed: %v", err)
@@ -174,7 +174,7 @@
go func() {
err := queue.Put(1)
if err != nil {
- vlog.VI(1).Infof("Put: %v", err)
+ logger.Global().VI(1).Infof("Put: %v", err)
}
pending.Done()
}()
@@ -202,7 +202,7 @@
}
readers++
}
- vlog.VI(1).Infof("%d operations completed", readers)
+ logger.Global().VI(1).Infof("%d operations completed", readers)
if readers > writerCount {
t.Errorf("Too many readers")
}
@@ -213,24 +213,24 @@
func TestSequentialShutdown(t *testing.T) {
queue := New()
- vlog.VI(1).Infof("Put")
+ logger.Global().VI(1).Infof("Put")
err := queue.Put(0)
if err != nil {
t.Errorf("Put: %v", err)
}
- vlog.VI(1).Infof("Shutdown")
+ logger.Global().VI(1).Infof("Shutdown")
queue.Shutdown()
// Check that Get() returns an error.
- vlog.VI(1).Infof("Get")
+ logger.Global().VI(1).Infof("Get")
_, err = queue.Get(nil)
if err != ErrQueueIsClosed {
t.Errorf("Expected queue to be closed: %v", err)
}
// Check that Put() returns an error.
- vlog.VI(1).Infof("Put")
+ logger.Global().VI(1).Infof("Put")
err = queue.Put(0)
if err != ErrQueueIsClosed {
t.Errorf("Expected queue to be closed: %v", err)
@@ -284,7 +284,7 @@
// Sum up the results and compare.
sum := uint32(0)
count := uint32(0)
- vlog.VI(1).Infof("Start consumers")
+ logger.Global().VI(1).Infof("Start consumers")
for i := 0; i != readerCount; i++ {
pid := i
go func() {
@@ -302,13 +302,13 @@
atomic.AddUint32(&sum, uint32(item.(int)))
atomic.AddUint32(&count, 1)
}
- vlog.VI(1).Infof("Consumer %d done", pid)
+ logger.Global().VI(1).Infof("Consumer %d done", pid)
pending.Done()
}()
}
// Generate the sequential ints.
- vlog.VI(1).Infof("Start producers")
+ logger.Global().VI(1).Infof("Start producers")
for i := 0; i != writerCount; i++ {
pid := i
go func() {
@@ -318,18 +318,18 @@
t.Errorf("Put: %v", err)
}
}
- vlog.VI(1).Infof("Producer %d done", pid)
+ logger.Global().VI(1).Infof("Producer %d done", pid)
pending.Done()
}()
}
- vlog.VI(1).Infof("Start termination checker")
+ logger.Global().VI(1).Infof("Start termination checker")
go func() {
pending.Wait()
done <- struct{}{}
}()
- vlog.VI(1).Infof("Wait for processes")
+ logger.Global().VI(1).Infof("Wait for processes")
stop := false
for !stop {
time.Sleep(100 * time.Millisecond)
@@ -341,7 +341,7 @@
}
}
- vlog.VI(1).Infof("Checking the sum")
+ logger.Global().VI(1).Infof("Checking the sum")
expected := writerCount * elementCount * (elementCount - 1) / 2
s := atomic.LoadUint32(&sum)
if s != uint32(expected) {
diff --git a/runtime/internal/lib/websocket/listener.go b/runtime/internal/lib/websocket/listener.go
index 23af7d7..e32a116 100644
--- a/runtime/internal/lib/websocket/listener.go
+++ b/runtime/internal/lib/websocket/listener.go
@@ -16,8 +16,7 @@
"github.com/gorilla/websocket"
- "v.io/x/lib/vlog"
-
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/runtime/internal/lib/tcputil"
)
@@ -87,7 +86,7 @@
case error:
return nil, v
default:
- vlog.Errorf("Unexpected type %T in channel (%v)", v, v)
+ logger.Global().Errorf("Unexpected type %T in channel (%v)", v, v)
}
}
}
@@ -102,7 +101,7 @@
ln.mu.Unlock()
addr := ln.netLn.Addr()
err := ln.netLn.Close()
- vlog.VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
+ logger.Global().VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
// netAcceptLoop might be trying to push new TCP connections that
// arrived while the listener was being closed. Drop those.
drainChan(ln.acceptQ)
@@ -137,9 +136,9 @@
ln.acceptQ <- err
continue
}
- vlog.VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
+ logger.Global().VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
if err := tcputil.EnableTCPKeepAlive(conn); err != nil {
- vlog.Errorf("Failed to enable TCP keep alive: %v", err)
+ logger.Global().Errorf("Failed to enable TCP keep alive: %v", err)
}
classifications.Add(1)
go ln.classify(conn, &classifications)
@@ -160,7 +159,7 @@
n, err := io.ReadFull(conn, magic[:])
if err != nil {
// Unable to classify, ignore this connection.
- vlog.VI(1).Infof("Shutting down connection from %v since the magic bytes could not be read: %v", conn.RemoteAddr(), err)
+ logger.Global().VI(1).Infof("Shutting down connection from %v since the magic bytes could not be read: %v", conn.RemoteAddr(), err)
conn.Close()
return
}
@@ -184,12 +183,12 @@
ws, err := websocket.Upgrade(w, r, nil, bufferSize, bufferSize)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", http.StatusBadRequest)
- vlog.Errorf("Rejected a non-websocket request: %v", err)
+ logger.Global().Errorf("Rejected a non-websocket request: %v", err)
return
}
if err != nil {
http.Error(w, "Internal Error", http.StatusInternalServerError)
- vlog.Errorf("Rejected a non-websocket request: %v", err)
+ logger.Global().Errorf("Rejected a non-websocket request: %v", err)
return
}
ln.acceptQ <- WebsocketConn(ws)
diff --git a/runtime/internal/naming/namespace/all_test.go b/runtime/internal/naming/namespace/all_test.go
index a27c68e..381ad6b 100644
--- a/runtime/internal/naming/namespace/all_test.go
+++ b/runtime/internal/naming/namespace/all_test.go
@@ -20,7 +20,6 @@
"v.io/v23/rpc"
"v.io/v23/security"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/xrpc"
_ "v.io/x/ref/runtime/factories/generic"
@@ -91,11 +90,11 @@
a, foundA := contains(got, want)
b, foundB := contains(want, got)
if !foundA {
- vlog.Infof("%s: %q: failed to find %q: got %v, want %v", caller, name, a, got, want)
+ t.Logf("%s: %q: failed to find %q: got %v, want %v", caller, name, a, got, want)
boom(t, "%s: %q: failed to find %q: got %v, want %v", caller, name, a, got, want)
}
if !foundB {
- vlog.Infof("%s: %q: failed to find %q: got %v, want %v", caller, name, a, got, want)
+ t.Logf("%s: %q: failed to find %q: got %v, want %v", caller, name, a, got, want)
boom(t, "%s: %q: failed to find %q: got %v, want %v", caller, name, b, got, want)
}
}
@@ -615,11 +614,11 @@
_, _, _, stopper := createNamespace(t, sc)
defer func() {
- vlog.Infof("%d goroutines:", runtime.NumGoroutine())
+ sc.Infof("%d goroutines:", runtime.NumGoroutine())
}()
defer stopper()
defer func() {
- vlog.Infof("%d goroutines:", runtime.NumGoroutine())
+ sc.Infof("%d goroutines:", runtime.NumGoroutine())
}()
//panic("this will show up lots of goroutine+channel leaks!!!!")
}
diff --git a/runtime/internal/naming/namespace/cache.go b/runtime/internal/naming/namespace/cache.go
index 9479055..f7dc89f 100644
--- a/runtime/internal/naming/namespace/cache.go
+++ b/runtime/internal/naming/namespace/cache.go
@@ -10,9 +10,9 @@
"sync"
"time"
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
)
// maxCacheEntries is the max number of cache entries to keep. It exists only so that we
@@ -24,9 +24,9 @@
// cache is a generic interface to the resolution cache.
type cache interface {
- remember(prefix string, entry *naming.MountEntry)
- forget(names []string)
- lookup(name string) (naming.MountEntry, error)
+ remember(ctx *context.T, prefix string, entry *naming.MountEntry)
+ forget(ctx *context.T, names []string)
+ lookup(ctx *context.T, name string) (naming.MountEntry, error)
}
// ttlCache is an instance of cache that obeys ttl from the mount points.
@@ -81,7 +81,7 @@
}
// remember the servers associated with name with suffix removed.
-func (c *ttlCache) remember(prefix string, entry *naming.MountEntry) {
+func (c *ttlCache) remember(ctx *context.T, prefix string, entry *naming.MountEntry) {
// Remove suffix. We only care about the name that gets us
// to the mounttable from the last mounttable.
prefix = naming.Clean(prefix)
@@ -106,7 +106,7 @@
// forget cache entries whose index begins with an element of names. If names is nil
// forget all cached entries.
-func (c *ttlCache) forget(names []string) {
+func (c *ttlCache) forget(ctx *context.T, names []string) {
c.Lock()
defer c.Unlock()
for key := range c.entries {
@@ -123,7 +123,7 @@
// lookup searches the cache for a maximal prefix of name and returns the associated servers,
// prefix, and suffix. If any of the associated servers is expired, don't return anything
// since that would reduce availability.
-func (c *ttlCache) lookup(name string) (naming.MountEntry, error) {
+func (c *ttlCache) lookup(ctx *context.T, name string) (naming.MountEntry, error) {
name = naming.Clean(name)
c.Lock()
defer c.Unlock()
@@ -136,7 +136,7 @@
if isStale(now, e) {
return e, verror.New(naming.ErrNoSuchName, nil, name)
}
- vlog.VI(2).Infof("namespace cache %s -> %v %s", name, e.Servers, e.Name)
+ ctx.VI(2).Infof("namespace cache %s -> %v %s", name, e.Servers, e.Name)
e.Name = suffix
return e, nil
}
@@ -159,9 +159,9 @@
// nullCache is an instance of cache that does nothing.
type nullCache int
-func newNullCache() cache { return nullCache(1) }
-func (nullCache) remember(prefix string, entry *naming.MountEntry) {}
-func (nullCache) forget(names []string) {}
-func (nullCache) lookup(name string) (e naming.MountEntry, err error) {
+func newNullCache() cache { return nullCache(1) }
+func (nullCache) remember(ctx *context.T, prefix string, entry *naming.MountEntry) {}
+func (nullCache) forget(ctx *context.T, names []string) {}
+func (nullCache) lookup(ctx *context.T, name string) (e naming.MountEntry, err error) {
return e, verror.New(naming.ErrNoSuchName, nil, name)
}
diff --git a/runtime/internal/naming/namespace/cache_test.go b/runtime/internal/naming/namespace/cache_test.go
index ae8c391..17fc42d 100644
--- a/runtime/internal/naming/namespace/cache_test.go
+++ b/runtime/internal/naming/namespace/cache_test.go
@@ -11,6 +11,7 @@
"v.io/v23/naming"
vdltime "v.io/v23/vdlroot/time"
+ "v.io/x/ref/test"
)
func compatible(server string, servers []naming.MountedServer) bool {
@@ -26,6 +27,8 @@
// TestCache tests the cache directly rather than via the namespace methods.
func TestCache(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
preload := []struct {
name string
suffix string
@@ -38,7 +41,7 @@
c := newTTLCache()
for _, p := range preload {
e := &naming.MountEntry{Name: p.suffix, Servers: []naming.MountedServer{{Server: p.server, Deadline: future(30)}}}
- c.remember(p.name, e)
+ c.remember(ctx, p.name, e)
}
tests := []struct {
@@ -55,7 +58,7 @@
{"/h3//d//e", "e", "/h4:1234", true},
}
for _, p := range tests {
- e, err := c.lookup(p.name)
+ e, err := c.lookup(ctx, p.name)
if (err == nil) != p.succeed {
t.Errorf("%s: lookup failed", p.name)
}
@@ -66,28 +69,32 @@
}
func TestCacheLimit(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
c := newTTLCache().(*ttlCache)
e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: "the rain in spain", Deadline: future(3000)}}}
for i := 0; i < maxCacheEntries; i++ {
- c.remember(fmt.Sprintf("%d", i), e)
+ c.remember(ctx, fmt.Sprintf("%d", i), e)
if len(c.entries) > maxCacheEntries {
t.Errorf("unexpected cache size: got %d not %d", len(c.entries), maxCacheEntries)
}
}
// Adding one more element should reduce us to 3/4 full.
- c.remember(fmt.Sprintf("%d", maxCacheEntries), e)
+ c.remember(ctx, fmt.Sprintf("%d", maxCacheEntries), e)
if len(c.entries) != cacheHisteresisSize {
t.Errorf("cache shrunk wrong amount: got %d not %d", len(c.entries), cacheHisteresisSize)
}
}
func TestCacheTTL(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
before := vdltime.Deadline{time.Now()}
c := newTTLCache().(*ttlCache)
// Fill cache.
e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: "the rain in spain", Deadline: future(3000)}}}
for i := 0; i < maxCacheEntries; i++ {
- c.remember(fmt.Sprintf("%d", i), e)
+ c.remember(ctx, fmt.Sprintf("%d", i), e)
}
// Time out half the entries.
i := len(c.entries) / 2
@@ -99,13 +106,15 @@
i--
}
// Add an entry and make sure we now have room.
- c.remember(fmt.Sprintf("%d", maxCacheEntries+2), e)
+ c.remember(ctx, fmt.Sprintf("%d", maxCacheEntries+2), e)
if len(c.entries) > cacheHisteresisSize {
t.Errorf("entries did not timeout: got %d not %d", len(c.entries), cacheHisteresisSize)
}
}
func TestFlushCacheEntry(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
preload := []struct {
name string
server string
@@ -118,25 +127,25 @@
c := ns.resolutionCache.(*ttlCache)
for _, p := range preload {
e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: "p.server", Deadline: future(3000)}}}
- c.remember(p.name, e)
+ c.remember(ctx, p.name, e)
}
toflush := "/h1/xyzzy"
- if ns.FlushCacheEntry(toflush) {
+ if ns.FlushCacheEntry(ctx, toflush) {
t.Errorf("%s should not have caused anything to flush", toflush)
}
toflush = "/h1/a/b/d/e"
- if !ns.FlushCacheEntry(toflush) {
+ if !ns.FlushCacheEntry(ctx, toflush) {
t.Errorf("%s should have caused something to flush", toflush)
}
name := preload[2].name
- if _, err := c.lookup(name); err != nil {
+ if _, err := c.lookup(ctx, name); err != nil {
t.Errorf("%s should not have been flushed", name)
}
if len(c.entries) != 2 {
t.Errorf("%s flushed too many entries", toflush)
}
toflush = preload[1].name
- if !ns.FlushCacheEntry(toflush) {
+ if !ns.FlushCacheEntry(ctx, toflush) {
t.Errorf("%s should have caused something to flush", toflush)
}
if _, ok := c.entries[toflush]; ok {
@@ -157,6 +166,8 @@
}
func TestCacheDisableEnable(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
ns, _ := New()
// Default should be working resolution cache.
@@ -164,8 +175,8 @@
serverName := "/h2//"
c := ns.resolutionCache.(*ttlCache)
e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: serverName, Deadline: future(3000)}}}
- c.remember(name, e)
- if ne, err := c.lookup(name); err != nil || ne.Servers[0].Server != serverName {
+ c.remember(ctx, name, e)
+ if ne, err := c.lookup(ctx, name); err != nil || ne.Servers[0].Server != serverName {
t.Errorf("should have found the server in the cache")
}
@@ -175,8 +186,8 @@
t.Errorf("caching not disabled")
}
nc := ns.resolutionCache.(nullCache)
- nc.remember(name, e)
- if _, err := nc.lookup(name); err == nil {
+ nc.remember(ctx, name, e)
+ if _, err := nc.lookup(ctx, name); err == nil {
t.Errorf("should not have found the server in the cache")
}
@@ -186,8 +197,8 @@
t.Errorf("caching disabled")
}
c = ns.resolutionCache.(*ttlCache)
- c.remember(name, e)
- if ne, err := c.lookup(name); err != nil || ne.Servers[0].Server != serverName {
+ c.remember(ctx, name, e)
+ if ne, err := c.lookup(ctx, name); err != nil || ne.Servers[0].Server != serverName {
t.Errorf("should have found the server in the cache")
}
}
diff --git a/runtime/internal/naming/namespace/glob.go b/runtime/internal/naming/namespace/glob.go
index ce073ef..6e334a2 100644
--- a/runtime/internal/naming/namespace/glob.go
+++ b/runtime/internal/naming/namespace/glob.go
@@ -9,8 +9,6 @@
"strings"
"sync"
- "v.io/x/lib/vlog"
-
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
@@ -62,7 +60,7 @@
}()
client := v23.GetClient(ctx)
pstr := t.pattern.String()
- vlog.VI(2).Infof("globAtServer(%v, %v)", *t.me, pstr)
+ ctx.VI(2).Infof("globAtServer(%v, %v)", *t.me, pstr)
servers := []string{}
for _, s := range t.me.Servers {
diff --git a/runtime/internal/naming/namespace/mount.go b/runtime/internal/naming/namespace/mount.go
index 9bf8494..884cec1 100644
--- a/runtime/internal/naming/namespace/mount.go
+++ b/runtime/internal/naming/namespace/mount.go
@@ -13,7 +13,6 @@
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/security"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
)
@@ -53,7 +52,7 @@
return mountIntoMountTable(ctx, client, mt, server, ttl, flags, id, getCallOpts(opts)...)
}
err := ns.dispatch(ctx, name, f, opts)
- vlog.VI(1).Infof("Mount(%s, %q) -> %v", name, server, err)
+ ctx.VI(1).Infof("Mount(%s, %q) -> %v", name, server, err)
return err
}
@@ -74,7 +73,7 @@
return unmountFromMountTable(ctx, client, mt, server, id, getCallOpts(opts)...)
}
err := ns.dispatch(ctx, name, f, opts)
- vlog.VI(1).Infof("Unmount(%s, %s) -> %v", name, server, err)
+ ctx.VI(1).Infof("Unmount(%s, %s) -> %v", name, server, err)
return err
}
@@ -96,7 +95,7 @@
return deleteFromMountTable(ctx, client, mt, deleteSubtree, id, getCallOpts(opts)...)
}
err := ns.dispatch(ctx, name, f, opts)
- vlog.VI(1).Infof("Remove(%s, %v) -> %v", name, deleteSubtree, err)
+ ctx.VI(1).Infof("Remove(%s, %v) -> %v", name, deleteSubtree, err)
return err
}
diff --git a/runtime/internal/naming/namespace/parallelstartcall.go b/runtime/internal/naming/namespace/parallelstartcall.go
index 246e4eb..77525a2 100644
--- a/runtime/internal/naming/namespace/parallelstartcall.go
+++ b/runtime/internal/naming/namespace/parallelstartcall.go
@@ -117,6 +117,6 @@
}
finalerr := collectStati(c, len(mts))
// Forget any previous cached information about these names.
- ns.resolutionCache.forget(mts)
+ ns.resolutionCache.forget(ctx, mts)
return finalerr
}
diff --git a/runtime/internal/naming/namespace/perms.go b/runtime/internal/naming/namespace/perms.go
index 7ee0b16..df7302c 100644
--- a/runtime/internal/naming/namespace/perms.go
+++ b/runtime/internal/naming/namespace/perms.go
@@ -11,7 +11,6 @@
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/security/access"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
)
@@ -32,7 +31,7 @@
return setPermsInMountTable(ctx, client, mt, perms, version, id, getCallOpts(opts))
}
err := ns.dispatch(ctx, name, f, opts)
- vlog.VI(1).Infof("SetPermissions(%s, %v, %s) -> %v", name, perms, version, err)
+ ctx.VI(1).Infof("SetPermissions(%s, %v, %s) -> %v", name, perms, version, err)
return err
}
diff --git a/runtime/internal/naming/namespace/resolve.go b/runtime/internal/naming/namespace/resolve.go
index f0abc5e..85a3aab 100644
--- a/runtime/internal/naming/namespace/resolve.go
+++ b/runtime/internal/naming/namespace/resolve.go
@@ -14,7 +14,6 @@
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
)
@@ -25,8 +24,8 @@
for _, s := range e.Servers {
name := naming.JoinAddressName(s.Server, e.Name)
// First check the cache.
- if ne, err := ns.resolutionCache.lookup(name); err == nil {
- vlog.VI(2).Infof("resolveAMT %s from cache -> %v", name, convertServersToStrings(ne.Servers, ne.Name))
+ if ne, err := ns.resolutionCache.lookup(ctx, name); err == nil {
+ ctx.VI(2).Infof("resolveAMT %s from cache -> %v", name, convertServersToStrings(ne.Servers, ne.Name))
return &ne, nil
}
// Not in cache, call the real server.
@@ -44,15 +43,15 @@
}
// Keep track of the final error and continue with next server.
finalErr = err
- vlog.VI(2).Infof("resolveAMT: Finish %s failed: %s", name, err)
+ ctx.VI(2).Infof("resolveAMT: Finish %s failed: %s", name, err)
continue
}
// Add result to cache.
- ns.resolutionCache.remember(name, entry)
- vlog.VI(2).Infof("resolveAMT %s -> %v", name, entry)
+ ns.resolutionCache.remember(ctx, name, entry)
+ ctx.VI(2).Infof("resolveAMT %s -> %v", name, entry)
return entry, nil
}
- vlog.VI(2).Infof("resolveAMT %v -> %v", e.Servers, finalErr)
+ ctx.VI(2).Infof("resolveAMT %v -> %v", e.Servers, finalErr)
return nil, finalErr
}
@@ -64,10 +63,10 @@
func (ns *namespace) Resolve(ctx *context.T, name string, opts ...naming.NamespaceOpt) (*naming.MountEntry, error) {
defer apilog.LogCallf(ctx, "name=%.10s...,opts...=%v", name, opts)(ctx, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
e, _ := ns.rootMountEntry(name, opts...)
- if vlog.V(2) {
+ if ctx.V(2) {
_, file, line, _ := runtime.Caller(1)
- vlog.Infof("Resolve(%s) called from %s:%d", name, file, line)
- vlog.Infof("Resolve(%s) -> rootMountEntry %v", name, *e)
+ ctx.Infof("Resolve(%s) called from %s:%d", name, file, line)
+ ctx.Infof("Resolve(%s) -> rootMountEntry %v", name, *e)
}
if skipResolve(opts) {
return e, nil
@@ -80,9 +79,9 @@
// Iterate walking through mount table servers.
for remaining := ns.maxResolveDepth; remaining > 0; remaining-- {
- vlog.VI(2).Infof("Resolve(%s) loop %v", name, *e)
+ ctx.VI(2).Infof("Resolve(%s) loop %v", name, *e)
if !e.ServesMountTable || terminal(e) {
- vlog.VI(1).Infof("Resolve(%s) -> %v", name, *e)
+ ctx.VI(1).Infof("Resolve(%s) -> %v", name, *e)
return e, nil
}
var err error
@@ -91,13 +90,13 @@
// Lots of reasons why another error can happen. We are trying
// to single out "this isn't a mount table".
if notAnMT(err) {
- vlog.VI(1).Infof("Resolve(%s) -> %v", name, curr)
+ ctx.VI(1).Infof("Resolve(%s) -> %v", name, curr)
return curr, nil
}
if verror.ErrorID(err) == naming.ErrNoSuchNameRoot.ID {
err = verror.New(naming.ErrNoSuchName, ctx, name)
}
- vlog.VI(1).Infof("Resolve(%s) -> (%s: %v)", err, name, curr)
+ ctx.VI(1).Infof("Resolve(%s) -> (%s: %v)", err, name, curr)
return nil, err
}
}
@@ -108,10 +107,10 @@
func (ns *namespace) ResolveToMountTable(ctx *context.T, name string, opts ...naming.NamespaceOpt) (*naming.MountEntry, error) {
defer apilog.LogCallf(ctx, "name=%.10s...,opts...=%v", name, opts)(ctx, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
e, _ := ns.rootMountEntry(name, opts...)
- if vlog.V(2) {
+ if ctx.V(2) {
_, file, line, _ := runtime.Caller(1)
- vlog.Infof("ResolveToMountTable(%s) called from %s:%d", name, file, line)
- vlog.Infof("ResolveToMountTable(%s) -> rootNames %v", name, e)
+ ctx.Infof("ResolveToMountTable(%s) called from %s:%d", name, file, line)
+ ctx.Infof("ResolveToMountTable(%s) -> rootNames %v", name, e)
}
if len(e.Servers) == 0 {
return nil, verror.New(naming.ErrNoMountTable, ctx)
@@ -120,33 +119,33 @@
client := v23.GetClient(ctx)
last := e
for remaining := ns.maxResolveDepth; remaining > 0; remaining-- {
- vlog.VI(2).Infof("ResolveToMountTable(%s) loop %v", name, e)
+ ctx.VI(2).Infof("ResolveToMountTable(%s) loop %v", name, e)
var err error
curr := e
// If the next name to resolve doesn't point to a mount table, we're done.
if !e.ServesMountTable || terminal(e) {
- vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, last)
+ ctx.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, last)
return last, nil
}
if e, err = ns.resolveAgainstMountTable(ctx, client, e, callOpts...); err != nil {
if verror.ErrorID(err) == naming.ErrNoSuchNameRoot.ID {
- vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v (NoSuchRoot: %v)", name, last, curr)
+ ctx.VI(1).Infof("ResolveToMountTable(%s) -> %v (NoSuchRoot: %v)", name, last, curr)
return last, nil
}
if verror.ErrorID(err) == naming.ErrNoSuchName.ID {
- vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v (NoSuchName: %v)", name, curr, curr)
+ ctx.VI(1).Infof("ResolveToMountTable(%s) -> %v (NoSuchName: %v)", name, curr, curr)
return curr, nil
}
// Lots of reasons why another error can happen. We are trying
// to single out "this isn't a mount table".
if notAnMT(err) {
- vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, last)
+ ctx.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, last)
return last, nil
}
// TODO(caprita): If the server is unreachable for
// example, we may still want to return its parent
// mounttable rather than an error.
- vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, err)
+ ctx.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, err)
return nil, err
}
last = curr
@@ -156,21 +155,21 @@
// FlushCache flushes the most specific entry found for name. It returns true if anything was
// actually flushed.
-func (ns *namespace) FlushCacheEntry(name string) bool {
+func (ns *namespace) FlushCacheEntry(ctx *context.T, name string) bool {
defer apilog.LogCallf(nil, "name=%.10s...", name)(nil, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
flushed := false
for _, n := range ns.rootName(name) {
// Walk the cache as we would in a resolution. Unlike a resolution, we have to follow
// all branches since we want to flush all entries at which we might end up whereas in a resolution,
// we stop with the first branch that works.
- if e, err := ns.resolutionCache.lookup(n); err == nil {
+ if e, err := ns.resolutionCache.lookup(ctx, n); err == nil {
// Recurse.
for _, s := range e.Servers {
- flushed = flushed || ns.FlushCacheEntry(naming.Join(s.Server, e.Name))
+ flushed = flushed || ns.FlushCacheEntry(ctx, naming.Join(s.Server, e.Name))
}
if !flushed {
// Forget the entry we just used.
- ns.resolutionCache.forget([]string{naming.TrimSuffix(n, e.Name)})
+ ns.resolutionCache.forget(ctx, []string{naming.TrimSuffix(n, e.Name)})
flushed = true
}
}
diff --git a/runtime/internal/rpc/benchmark/benchmark_test.go b/runtime/internal/rpc/benchmark/benchmark_test.go
index 6859361..22f63b9 100644
--- a/runtime/internal/rpc/benchmark/benchmark_test.go
+++ b/runtime/internal/rpc/benchmark/benchmark_test.go
@@ -10,7 +10,6 @@
"v.io/v23"
"v.io/v23/context"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/security/securityflag"
"v.io/x/ref/lib/xrpc"
_ "v.io/x/ref/runtime/factories/static"
@@ -116,7 +115,7 @@
server, err := xrpc.NewServer(ctx, "", internal.NewService(), securityflag.NewAuthorizerOrDie())
if err != nil {
- vlog.Fatalf("NewServer failed: %v", err)
+ ctx.Fatalf("NewServer failed: %v", err)
}
serverAddr = server.Status().Endpoints[0].Name()
diff --git a/runtime/internal/rpc/benchmark/simple/main.go b/runtime/internal/rpc/benchmark/simple/main.go
index fabc662..fc5beaa 100644
--- a/runtime/internal/rpc/benchmark/simple/main.go
+++ b/runtime/internal/rpc/benchmark/simple/main.go
@@ -15,7 +15,6 @@
"v.io/v23/context"
"v.io/v23/naming"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/security/securityflag"
"v.io/x/ref/lib/xrpc"
@@ -47,6 +46,9 @@
// rpc.Client doesn't export an interface for closing connection. So we
// use the stream manager directly here.
func benchmarkRPCConnection(b *testing.B) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
mp := runtime.GOMAXPROCS(numCPUs)
defer runtime.GOMAXPROCS(mp)
@@ -55,11 +57,10 @@
b.ResetTimer()
for i := 0; i < b.N; i++ {
- client := manager.InternalNew(naming.FixedRoutingID(0xc))
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xc))
b.StartTimer()
-
- _, err := client.Dial(serverEP, v23.GetPrincipal(nctx))
+ _, err := client.Dial(nctx, serverEP)
if err != nil {
ctx.Fatalf("Dial failed: %v", err)
}
@@ -130,7 +131,7 @@
server, err := xrpc.NewServer(ctx, "", internal.NewService(), securityflag.NewAuthorizerOrDie())
if err != nil {
- vlog.Fatalf("NewServer failed: %v", err)
+ ctx.Fatalf("NewServer failed: %v", err)
}
serverEP = server.Status().Endpoints[0]
diff --git a/runtime/internal/rpc/cancel_test.go b/runtime/internal/rpc/cancel_test.go
index 716a456..9a11c07 100644
--- a/runtime/internal/rpc/cancel_test.go
+++ b/runtime/internal/rpc/cancel_test.go
@@ -13,7 +13,8 @@
"v.io/v23/naming"
"v.io/v23/rpc"
"v.io/v23/security"
- "v.io/x/lib/vlog"
+ "v.io/v23/verror"
+
"v.io/x/ref/runtime/internal/rpc/stream"
"v.io/x/ref/runtime/internal/rpc/stream/manager"
tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
@@ -34,30 +35,30 @@
client, err := InternalNewClient(c.sm, c.ns)
if err != nil {
- vlog.Error(err)
+ ctx.Error(err)
return err
}
+ ctx.Infof("Run: %s", c.child)
if c.child != "" {
if _, err = client.StartCall(ctx, c.child, "Run", []interface{}{}); err != nil {
- vlog.Error(err)
+ ctx.Error(err)
return err
}
}
- vlog.Info(c.name, " waiting for cancellation")
<-ctx.Done()
- vlog.Info(c.name, " canceled")
close(c.canceled)
return nil
}
func makeCanceld(ctx *context.T, ns namespace.T, name, child string) (*canceld, error) {
- sm := manager.InternalNew(naming.FixedRoutingID(0x111111111))
- s, err := testInternalNewServer(ctx, sm, ns, v23.GetPrincipal(ctx))
+ sm := manager.InternalNew(ctx, naming.FixedRoutingID(0x111111111))
+ s, err := testInternalNewServer(ctx, sm, ns)
if err != nil {
return nil, err
}
+
if _, err := s.Listen(listenSpec); err != nil {
return nil, err
}
@@ -75,7 +76,7 @@
if err := s.Serve(name, c, security.AllowEveryone()); err != nil {
return nil, err
}
-
+ ctx.Infof("Serving: %q", name)
return c, nil
}
@@ -85,7 +86,7 @@
ctx, shutdown := initForTest()
defer shutdown()
var (
- sm = manager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm = manager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
ns = tnaming.NewSimpleNamespace()
pclient, pserver = newClientServerPrincipals()
serverCtx, _ = v23.WithPrincipal(ctx, pserver)
@@ -98,10 +99,9 @@
c1, err := makeCanceld(serverCtx, ns, "c1", "c2")
if err != nil {
- t.Fatal("Can't start server:", err)
+ t.Fatal("Can't start server:", err, verror.DebugString(err))
}
defer c1.stop()
-
c2, err := makeCanceld(serverCtx, ns, "c2", "")
if err != nil {
t.Fatal("Can't start server:", err)
@@ -117,10 +117,10 @@
<-c1.started
<-c2.started
- vlog.Info("cancelling initial call")
+ ctx.Info("cancelling initial call")
cancel()
- vlog.Info("waiting for children to be canceled")
+ ctx.Info("waiting for children to be canceled")
<-c1.canceled
<-c2.canceled
}
diff --git a/runtime/internal/rpc/client.go b/runtime/internal/rpc/client.go
index 9c81d74..ba83eb7 100644
--- a/runtime/internal/rpc/client.go
+++ b/runtime/internal/rpc/client.go
@@ -13,8 +13,6 @@
"sync"
"time"
- "v.io/x/lib/vlog"
-
"v.io/v23"
"v.io/v23/context"
"v.io/v23/i18n"
@@ -94,9 +92,14 @@
c := &client{
streamMgr: streamMgr,
ns: ns,
- ipNets: ipNetworks(),
- vcCache: vc.NewVCCache(),
+
+ vcCache: vc.NewVCCache(),
}
+ ipNets, err := ipNetworks()
+ if err != nil {
+ return nil, err
+ }
+ c.ipNets = ipNets
c.dc = InternalNewDischargeClient(nil, c, 0)
for _, opt := range opts {
// Collect all client opts that are also vc opts.
@@ -145,7 +148,7 @@
}
sm := c.streamMgr
- v, err := sm.Dial(ep, principal, vcOpts...)
+ v, err := sm.Dial(ctx, ep, vcOpts...)
if err != nil {
return nil, suberr(err)
}
@@ -209,13 +212,13 @@
// RetryConnection and RetryRefetch required actions by the client before
// retrying.
if !shouldRetryBackoff(verror.Action(lastErr), deadline, opts) {
- vlog.VI(4).Infof("Cannot retry after error: %s", lastErr)
+ ctx.VI(4).Infof("Cannot retry after error: %s", lastErr)
break
}
if !backoff(retries, deadline) {
break
}
- vlog.VI(4).Infof("Retrying due to error: %s", lastErr)
+ ctx.VI(4).Infof("Retrying due to error: %s", lastErr)
}
return lastErr
}
@@ -373,7 +376,7 @@
}
if status.flow, status.serverErr = c.createFlow(ctx, principal, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth})); status.serverErr != nil {
status.serverErr.Name = suberrName(server, name, method)
- vlog.VI(2).Infof("rpc: Failed to create Flow with %v: %v", server, status.serverErr.Err)
+ ctx.VI(2).Infof("rpc: Failed to create Flow with %v: %v", server, status.serverErr.Err)
return
}
@@ -398,7 +401,7 @@
// We will test for errServerAuthorizeFailed in failedTryCall and report
// verror.ErrNotTrusted
status.serverErr = suberr(verror.New(errServerAuthorizeFailed, ctx, status.flow.RemoteBlessings(), err))
- vlog.VI(2).Infof("rpc: Failed to authorize Flow created with server %v: %s", server, status.serverErr.Err)
+ ctx.VI(2).Infof("rpc: Failed to authorize Flow created with server %v: %s", server, status.serverErr.Err)
status.flow.Close()
status.flow = nil
return
@@ -473,7 +476,7 @@
responses := make([]*serverStatus, attempts)
ch := make(chan *serverStatus, attempts)
- vcOpts := append(getVCOpts(opts), c.vcOpts...)
+ vcOpts := append(translateVCOpts(opts), c.vcOpts...)
authorizer := newServerAuthorizer(blessingPattern, opts...)
for i, server := range resolved.Names() {
// Create a copy of vcOpts for each call to tryCreateFlow
@@ -505,7 +508,7 @@
}
}
case <-timeoutChan:
- vlog.VI(2).Infof("rpc: timeout on connection to server %v ", name)
+ ctx.VI(2).Infof("rpc: timeout on connection to server %v ", name)
_, _, _, err := c.failedTryCall(ctx, name, method, responses, ch)
if verror.ErrorID(err) != verror.ErrTimeout.ID {
return nil, verror.NoRetry, false, verror.New(verror.ErrTimeout, ctx, err)
@@ -540,7 +543,7 @@
Options: verror.Print,
Err: verror.New(verror.ErrNotTrusted, nil, verror.New(errPrepareBlessingsAndDischarges, ctx, r.flow.RemoteBlessings(), err)),
}
- vlog.VI(2).Infof("rpc: err: %s", r.serverErr)
+ ctx.VI(2).Infof("rpc: err: %s", r.serverErr)
r.flow.Close()
r.flow = nil
continue
@@ -616,7 +619,7 @@
// calls in tryCall failed or we timed out if we get here.
func (c *client) failedTryCall(ctx *context.T, name, method string, responses []*serverStatus, ch chan *serverStatus) (rpc.ClientCall, verror.ActionCode, bool, error) {
go cleanupTryCall(nil, responses, ch)
- c.ns.FlushCacheEntry(name)
+ c.ns.FlushCacheEntry(ctx, name)
suberrs := []verror.SubErr{}
topLevelError := verror.ErrNoServers
topLevelAction := verror.RetryRefetch
@@ -1017,7 +1020,7 @@
// with retrying again and again with this discharge. As there is no direct way
// to detect it, we conservatively flush all discharges we used from the cache.
// TODO(ataly,andreser): add verror.BadDischarge and handle it explicitly?
- vlog.VI(3).Infof("Discarding %d discharges as RPC failed with %v", len(fc.discharges), fc.response.Error)
+ fc.ctx.VI(3).Infof("Discarding %d discharges as RPC failed with %v", len(fc.discharges), fc.response.Error)
fc.dc.Invalidate(fc.ctx, fc.discharges...)
}
if id == errBadNumInputArgs.ID || id == errBadInputArg.ID {
diff --git a/runtime/internal/rpc/debug_test.go b/runtime/internal/rpc/debug_test.go
index 6d9445c..98dac91 100644
--- a/runtime/internal/rpc/debug_test.go
+++ b/runtime/internal/rpc/debug_test.go
@@ -15,7 +15,8 @@
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/rpc"
- "v.io/x/lib/vlog"
+
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/lib/stats"
"v.io/x/ref/runtime/internal/rpc/stream/manager"
tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
@@ -32,16 +33,19 @@
pclient = testutil.NewPrincipal("client")
pserver = testutil.NewPrincipal("server")
bclient = bless(pserver, pclient, "client") // server/client blessing.
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
)
pclient.AddToRoots(bclient) // Client recognizes "server" as a root of blessings.
pclient.BlessingStore().Set(bclient, "server") // Client presents bclient to server
- debugDisp := debuglib.NewDispatcher(vlog.Log.LogDir, nil)
+ debugDisp := debuglib.NewDispatcher(logger.Manager(ctx).LogDir, nil)
- sm := manager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm := manager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
- server, err := testInternalNewServer(ctx, sm, ns, pserver, ReservedNameDispatcher{debugDisp})
+
+ server, err := testInternalNewServer(sctx, sm, ns, ReservedNameDispatcher{debugDisp})
if err != nil {
t.Fatalf("InternalNewServer failed: %v", err)
}
@@ -53,7 +57,7 @@
if err := server.Serve("", &testObject{}, nil); err != nil {
t.Fatalf("server.Serve failed: %v", err)
}
- ctx, _ = v23.WithPrincipal(ctx, pclient)
+
client, err := InternalNewClient(sm, ns)
if err != nil {
t.Fatalf("InternalNewClient failed: %v", err)
@@ -63,7 +67,7 @@
// Call the Foo method on ""
{
var value string
- if err := client.Call(ctx, ep.Name(), "Foo", nil, []interface{}{&value}); err != nil {
+ if err := client.Call(cctx, ep.Name(), "Foo", nil, []interface{}{&value}); err != nil {
t.Fatalf("client.Call failed: %v", err)
}
if want := "BAR"; value != want {
@@ -76,7 +80,7 @@
foo.Set("The quick brown fox jumps over the lazy dog")
addr := naming.JoinAddressName(ep.String(), "__debug/stats/testing/foo")
var value string
- if err := client.Call(ctx, addr, "Value", nil, []interface{}{&value}, options.NoResolve{}); err != nil {
+ if err := client.Call(cctx, addr, "Value", nil, []interface{}{&value}, options.NoResolve{}); err != nil {
t.Fatalf("client.Call failed: %v", err)
}
if want := foo.Value(); value != want {
@@ -96,7 +100,7 @@
}
for _, tc := range testcases {
addr := naming.JoinAddressName(ep.String(), tc.name)
- call, err := client.StartCall(ctx, addr, rpc.GlobMethod, []interface{}{tc.pattern}, options.NoResolve{})
+ call, err := client.StartCall(cctx, addr, rpc.GlobMethod, []interface{}{tc.pattern}, options.NoResolve{})
if err != nil {
t.Fatalf("client.StartCall failed for %q: %v", tc.name, err)
}
diff --git a/runtime/internal/rpc/discharges.go b/runtime/internal/rpc/discharges.go
index d69e349..f0b3446 100644
--- a/runtime/internal/rpc/discharges.go
+++ b/runtime/internal/rpc/discharges.go
@@ -17,7 +17,6 @@
"v.io/v23/security"
"v.io/v23/vdl"
"v.io/v23/vtrace"
- "v.io/x/lib/vlog"
)
// NoDischarges specifies that the RPC call should not fetch discharges.
@@ -150,9 +149,9 @@
defer wg.Done()
tp := cav.ThirdPartyDetails()
var dis security.Discharge
- vlog.VI(3).Infof("Fetching discharge for %v", tp)
+ ctx.VI(3).Infof("Fetching discharge for %v", tp)
if err := d.c.Call(ctx, tp.Location(), "Discharge", []interface{}{cav, impetuses[i]}, []interface{}{&dis}, NoDischarges{}); err != nil {
- vlog.VI(3).Infof("Discharge fetch for %v failed: %v", tp, err)
+ ctx.VI(3).Infof("Discharge fetch for %v failed: %v", tp, err)
return
}
discharges <- fetched{i, dis, caveats[i], impetuses[i]}
@@ -167,7 +166,7 @@
got++
}
if want > 0 {
- vlog.VI(3).Infof("fetchDischarges: got %d of %d discharge(s) (total %d caveats)", got, want, len(caveats))
+ ctx.VI(3).Infof("fetchDischarges: got %d of %d discharge(s) (total %d caveats)", got, want, len(caveats))
}
if got == 0 || got == want {
return
diff --git a/runtime/internal/rpc/full_test.go b/runtime/internal/rpc/full_test.go
index 8224776..4ad5369 100644
--- a/runtime/internal/rpc/full_test.go
+++ b/runtime/internal/rpc/full_test.go
@@ -21,7 +21,6 @@
"v.io/x/lib/netstate"
"v.io/x/lib/pubsub"
- "v.io/x/lib/vlog"
"v.io/v23"
"v.io/v23/context"
@@ -79,16 +78,16 @@
c.Unlock()
}
-func testInternalNewServerWithPubsub(ctx *context.T, streamMgr stream.Manager, ns namespace.T, settingsPublisher *pubsub.Publisher, settingsStreamName string, principal security.Principal, opts ...rpc.ServerOpt) (rpc.Server, error) {
+func testInternalNewServerWithPubsub(ctx *context.T, streamMgr stream.Manager, ns namespace.T, settingsPublisher *pubsub.Publisher, settingsStreamName string, opts ...rpc.ServerOpt) (rpc.Server, error) {
client, err := InternalNewClient(streamMgr, ns)
if err != nil {
return nil, err
}
- return InternalNewServer(ctx, streamMgr, ns, settingsPublisher, settingsStreamName, client, principal, opts...)
+ return InternalNewServer(ctx, streamMgr, ns, settingsPublisher, settingsStreamName, client, opts...)
}
-func testInternalNewServer(ctx *context.T, streamMgr stream.Manager, ns namespace.T, principal security.Principal, opts ...rpc.ServerOpt) (rpc.Server, error) {
- return testInternalNewServerWithPubsub(ctx, streamMgr, ns, nil, "", principal, opts...)
+func testInternalNewServer(ctx *context.T, streamMgr stream.Manager, ns namespace.T, opts ...rpc.ServerOpt) (rpc.Server, error) {
+ return testInternalNewServerWithPubsub(ctx, streamMgr, ns, nil, "", opts...)
}
type userType string
@@ -245,13 +244,13 @@
}
func startServerWS(t *testing.T, ctx *context.T, principal security.Principal, sm stream.Manager, ns namespace.T, name string, disp rpc.Dispatcher, shouldUseWebsocket websocketMode, opts ...rpc.ServerOpt) (naming.Endpoint, rpc.Server) {
- vlog.VI(1).Info("InternalNewServer")
+ ctx.VI(1).Info("InternalNewServer")
ctx, _ = v23.WithPrincipal(ctx, principal)
- server, err := testInternalNewServer(ctx, sm, ns, principal, opts...)
+ server, err := testInternalNewServer(ctx, sm, ns, opts...)
if err != nil {
t.Errorf("InternalNewServer failed: %v", err)
}
- vlog.VI(1).Info("server.Listen")
+ ctx.VI(1).Info("server.Listen")
spec := listenSpec
if shouldUseWebsocket {
spec = listenWSSpec
@@ -260,7 +259,7 @@
if err != nil {
t.Errorf("server.Listen failed: %v", err)
}
- vlog.VI(1).Info("server.Serve")
+ ctx.VI(1).Info("server.Serve")
if err := server.ServeDispatcher(name, disp); err != nil {
t.Errorf("server.ServeDispatcher failed: %v", err)
}
@@ -303,7 +302,7 @@
}
func stopServer(t *testing.T, ctx *context.T, server rpc.Server, ns namespace.T, name string) {
- vlog.VI(1).Info("server.Stop")
+ ctx.VI(1).Info("server.Stop")
new_name := "should_appear_in_mt/server"
verifyMount(t, ctx, ns, name)
@@ -325,7 +324,7 @@
if err == nil || verror.ErrorID(err) != verror.ErrBadState.ID {
t.Errorf("either no error, or a wrong error was returned: %v", err)
}
- vlog.VI(1).Info("server.Stop DONE")
+ ctx.VI(1).Info("server.Stop DONE")
}
// fakeWSName creates a name containing a endpoint address that forces
@@ -370,7 +369,7 @@
}
func createBundleWS(t *testing.T, ctx *context.T, server security.Principal, ts interface{}, shouldUseWebsocket websocketMode) (b bundle) {
- b.sm = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ b.sm = imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
b.ns = tnaming.NewSimpleNamespace()
b.name = "mountpoint/server"
if server != nil {
@@ -393,13 +392,13 @@
return verror.ErrorID(err) == id.ID
}
-func runServer(t *testing.T, ctx *context.T, ns namespace.T, principal security.Principal, name string, obj interface{}, opts ...rpc.ServerOpt) stream.Manager {
+func runServer(t *testing.T, ctx *context.T, ns namespace.T, name string, obj interface{}, opts ...rpc.ServerOpt) stream.Manager {
rid, err := naming.NewRoutingID()
if err != nil {
t.Fatal(err)
}
- sm := imanager.InternalNew(rid)
- server, err := testInternalNewServer(ctx, sm, ns, principal, opts...)
+ sm := imanager.InternalNew(ctx, rid)
+ server, err := testInternalNewServer(ctx, sm, ns, opts...)
if err != nil {
t.Fatal(err)
}
@@ -413,11 +412,12 @@
}
func TestMultipleCallsToServeAndName(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- ns := tnaming.NewSimpleNamespace()
ctx, shutdown := initForTest()
defer shutdown()
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ ns := tnaming.NewSimpleNamespace()
+ sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
+ server, err := testInternalNewServer(sctx, sm, ns)
if err != nil {
t.Errorf("InternalNewServer failed: %v", err)
}
@@ -504,7 +504,7 @@
bOther = bless(pprovider, pserver, "other")
bTwoBlessings, _ = security.UnionOfBlessings(bServer, bOther)
- mgr = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+ mgr = imanager.InternalNew(ctx, naming.FixedRoutingID(0x1111111))
ns = tnaming.NewSimpleNamespace()
tests = []struct {
server security.Blessings // blessings presented by the server to the client.
@@ -603,9 +603,11 @@
// somehow "takes over" the network endpoint (a naughty router
// perhaps), thus trying to steal traffic.
var (
- pclient = testutil.NewPrincipal("client")
- pserver = testutil.NewPrincipal("server")
- pattacker = testutil.NewPrincipal("attacker")
+ pclient = testutil.NewPrincipal("client")
+ pserver = testutil.NewPrincipal("server")
+ pattacker = testutil.NewPrincipal("attacker")
+ attackerCtx, _ = v23.WithPrincipal(ctx, pattacker)
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
)
// Client recognizes both the server and the attacker's blessings.
// (Though, it doesn't need to do the latter for the purposes of this
@@ -615,13 +617,13 @@
// Start up the attacker's server.
attacker, err := testInternalNewServer(
- ctx,
- imanager.InternalNew(naming.FixedRoutingID(0xaaaaaaaaaaaaaaaa)),
+ attackerCtx,
+ imanager.InternalNew(ctx, naming.FixedRoutingID(0xaaaaaaaaaaaaaaaa)),
// (To prevent the attacker for legitimately mounting on the
// namespace that the client will use, provide it with a
// different namespace).
tnaming.NewSimpleNamespace(),
- pattacker)
+ )
if err != nil {
t.Fatal(err)
}
@@ -650,19 +652,19 @@
// (attacker's) server are not consistent with the ones registered in
// the mounttable trusted by the client.
client, err := InternalNewClient(
- imanager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc)),
+ imanager.InternalNew(cctx, naming.FixedRoutingID(0xcccccccccccccccc)),
ns)
if err != nil {
t.Fatal(err)
}
defer client.Close()
- ctx, _ = v23.WithPrincipal(ctx, pclient)
- if _, err := client.StartCall(ctx, "mountpoint/server", "Closure", nil); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+ ctx, _ = v23.WithPrincipal(cctx, pclient)
+ if _, err := client.StartCall(cctx, "mountpoint/server", "Closure", nil); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
t.Errorf("Got error %v (errorid=%v), want errorid=%v", err, verror.ErrorID(err), verror.ErrNotTrusted.ID)
}
// But the RPC should succeed if the client explicitly
// decided to skip server authorization.
- if _, err := client.StartCall(ctx, "mountpoint/server", "Closure", nil, options.SkipServerEndpointAuthorization{}); err != nil {
+ if _, err := client.StartCall(cctx, "mountpoint/server", "Closure", nil, options.SkipServerEndpointAuthorization{}); err != nil {
t.Errorf("Unexpected error(%v) when skipping server authorization", err)
}
}
@@ -737,7 +739,7 @@
ctx, _ = v23.WithPrincipal(ctx, pclient)
ctx = i18n.WithLangID(ctx, "foolang")
for _, test := range tests {
- vlog.VI(1).Infof("%s client.StartCall", name(test))
+ ctx.VI(1).Infof("%s client.StartCall", name(test))
vname := test.name
if shouldUseWebsocket {
var err error
@@ -753,7 +755,7 @@
continue
}
for _, sarg := range test.streamArgs {
- vlog.VI(1).Infof("%s client.Send(%v)", name(test), sarg)
+ ctx.VI(1).Infof("%s client.Send(%v)", name(test), sarg)
if err := call.Send(sarg); err != nil {
t.Errorf(`%s call.Send(%v) got unexpected error "%v"`, name(test), sarg, err)
}
@@ -766,7 +768,7 @@
}
}
if shouldCloseSend {
- vlog.VI(1).Infof("%s call.CloseSend", name(test))
+ ctx.VI(1).Infof("%s call.CloseSend", name(test))
// When the method does not involve streaming
// arguments, the server gets all the arguments in
// StartCall and then sends a response without
@@ -781,7 +783,7 @@
t.Errorf(`%s call.CloseSend got unexpected error "%v"`, name(test), err)
}
}
- vlog.VI(1).Infof("%s client.Finish", name(test))
+ ctx.VI(1).Infof("%s client.Finish", name(test))
results := makeResultPtrs(test.results)
err = call.Finish(results...)
if got, want := err, test.finishErr; (got == nil) != (want == nil) {
@@ -905,23 +907,27 @@
pserver = testutil.NewPrincipal("server")
pdischarger = testutil.NewPrincipal("discharger")
pclient = testutil.NewPrincipal("client")
- sm = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- ns = tnaming.NewSimpleNamespace()
+ pctx, _ = v23.WithPrincipal(ctx, pdischarger)
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
- // Setup the client so that it shares a blessing with a third-party caveat with the server.
- setClientBlessings = func(req security.ThirdPartyRequirements) security.Principal {
- cav, err := security.NewPublicKeyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", req, security.UnconstrainedUse())
- if err != nil {
- t.Fatalf("Failed to create ThirdPartyCaveat(%+v): %v", req, err)
- }
- b, err := pclient.BlessSelf("client_for_server", cav)
- if err != nil {
- t.Fatalf("BlessSelf failed: %v", err)
- }
- pclient.BlessingStore().Set(b, "server")
- return pclient
- }
+ sm = imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ ns = tnaming.NewSimpleNamespace()
)
+
+ // Setup the client so that it shares a blessing with a third-party caveat with the server.
+ setClientBlessings := func(req security.ThirdPartyRequirements) security.Principal {
+ cav, err := security.NewPublicKeyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", req, security.UnconstrainedUse())
+ if err != nil {
+ t.Fatalf("Failed to create ThirdPartyCaveat(%+v): %v", req, err)
+ }
+ b, err := pclient.BlessSelf("client_for_server", cav)
+ if err != nil {
+ t.Fatalf("BlessSelf failed: %v", err)
+ }
+ pclient.BlessingStore().Set(b, "server")
+ return pclient
+ }
+
// Initialize the client principal.
// It trusts both the application server and the discharger.
pclient.AddToRoots(pserver.BlessingStore().Default())
@@ -929,7 +935,7 @@
// Setup the discharge server.
var tester dischargeTestServer
- dischargeServer, err := testInternalNewServer(ctx, sm, ns, pdischarger)
+ dischargeServer, err := testInternalNewServer(pctx, sm, ns)
if err != nil {
t.Fatal(err)
}
@@ -942,7 +948,7 @@
}
// Setup the application server.
- appServer, err := testInternalNewServer(ctx, sm, ns, pserver)
+ appServer, err := testInternalNewServer(sctx, sm, ns)
if err != nil {
t.Fatal(err)
}
@@ -990,15 +996,15 @@
for _, test := range tests {
pclient := setClientBlessings(test.Requirements)
- ctx, _ = v23.WithPrincipal(ctx, pclient)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
client, err := InternalNewClient(sm, ns)
if err != nil {
t.Fatalf("InternalNewClient(%+v) failed: %v", test.Requirements, err)
}
defer client.Close()
- tid := vtrace.GetSpan(ctx).Trace()
+ tid := vtrace.GetSpan(cctx).Trace()
// StartCall should fetch the discharge, do not worry about finishing the RPC - do not care about that for this test.
- if _, err := client.StartCall(ctx, object, "Method", []interface{}{"argument"}); err != nil {
+ if _, err := client.StartCall(cctx, object, "Method", []interface{}{"argument"}); err != nil {
t.Errorf("StartCall(%+v) failed: %v", test.Requirements, err)
continue
}
@@ -1029,6 +1035,9 @@
}
func TestRPCClientAuthorization(t *testing.T) {
+ ctx, shutdown := initForTest()
+ defer shutdown()
+
type v []interface{}
var (
// Principals
@@ -1055,7 +1064,7 @@
bClient = pclient.BlessingStore().Default()
bRandom, _ = pclient.BlessSelf("random")
- mgr = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+ mgr = imanager.InternalNew(ctx, naming.FixedRoutingID(0x1111111))
ns = tnaming.NewSimpleNamespace()
tests = []struct {
blessings security.Blessings // Blessings used by the client
@@ -1120,8 +1129,6 @@
}
)
- ctx, shutdown := initForTest()
- defer shutdown()
// Start the main server.
_, server := startServer(t, ctx, pserver, mgr, ns, serverName, testServerDisp{&testServer{}})
defer stopServer(t, ctx, server, ns, serverName)
@@ -1264,7 +1271,7 @@
pprovider, pclient, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal("client"), testutil.NewPrincipal("server")
pdischarger = pprovider
- mgr = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+ mgr = imanager.InternalNew(ctx, naming.FixedRoutingID(0x1111111))
ns = tnaming.NewSimpleNamespace()
tpCav = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
@@ -1545,13 +1552,14 @@
func TestPreferredAddress(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
pa := netstate.AddressChooserFunc(func(string, []net.Addr) ([]net.Addr, error) {
return []net.Addr{netstate.NewNetAddr("tcp", "1.1.1.1")}, nil
})
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+ ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
+ server, err := testInternalNewServer(ctx, sm, ns)
if err != nil {
t.Errorf("InternalNewServer failed: %v", err)
}
@@ -1588,13 +1596,14 @@
func TestPreferredAddressErrors(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
paerr := netstate.AddressChooserFunc(func(_ string, a []net.Addr) ([]net.Addr, error) {
return nil, fmt.Errorf("oops")
})
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+ ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
+ server, err := testInternalNewServer(ctx, sm, ns)
if err != nil {
t.Errorf("InternalNewServer failed: %v", err)
}
@@ -1620,7 +1629,7 @@
func TestSecurityNone(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x66666666))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
server, err := testInternalNewServer(ctx, sm, ns, nil, options.SecurityNone)
@@ -1652,10 +1661,11 @@
func TestNoPrincipal(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x66666666))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+ ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("server"))
+ server, err := testInternalNewServer(ctx, sm, ns)
if err != nil {
t.Fatalf("InternalNewServer failed: %v", err)
}
@@ -1683,7 +1693,9 @@
}
func TestCallWithNilContext(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+ ctx, shutdown := initForTest()
+ defer shutdown()
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x66666666))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
client, err := InternalNewClient(sm, ns)
@@ -1700,13 +1712,17 @@
}
func TestServerBlessingsOpt(t *testing.T) {
+ ctx, shutdown := initForTest()
+ defer shutdown()
+
var (
pserver = testutil.NewPrincipal("server")
pclient = testutil.NewPrincipal("client")
batman, _ = pserver.BlessSelf("batman")
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
)
- ctx, shutdown := initForTest()
- defer shutdown()
+
// Client and server recognize the servers blessings
for _, p := range []security.Principal{pserver, pclient} {
if err := p.AddToRoots(pserver.BlessingStore().Default()); err != nil {
@@ -1720,12 +1736,12 @@
// to act as batman (as opposed to using the default blessing).
ns := tnaming.NewSimpleNamespace()
- defer runServer(t, ctx, ns, pserver, "mountpoint/batman", &testServer{}, options.ServerBlessings{batman}).Shutdown()
- defer runServer(t, ctx, ns, pserver, "mountpoint/default", &testServer{}).Shutdown()
+ defer runServer(t, sctx, ns, "mountpoint/batman", &testServer{}, options.ServerBlessings{batman}).Shutdown()
+ defer runServer(t, sctx, ns, "mountpoint/default", &testServer{}).Shutdown()
// And finally, make an RPC and see that the client sees "batman"
runClient := func(server string) ([]string, error) {
- smc := imanager.InternalNew(naming.FixedRoutingID(0xc))
+ smc := imanager.InternalNew(ctx, naming.FixedRoutingID(0xc))
defer smc.Shutdown()
client, err := InternalNewClient(
smc,
@@ -1734,8 +1750,8 @@
return nil, err
}
defer client.Close()
- ctx, _ = v23.WithPrincipal(ctx, pclient)
- call, err := client.StartCall(ctx, server, "Closure", nil)
+ ctx, _ = v23.WithPrincipal(cctx, pclient)
+ call, err := client.StartCall(cctx, server, "Closure", nil)
if err != nil {
return nil, err
}
@@ -1754,13 +1770,17 @@
}
func TestNoDischargesOpt(t *testing.T) {
+ ctx, shutdown := initForTest()
+ defer shutdown()
var (
pdischarger = testutil.NewPrincipal("discharger")
pserver = testutil.NewPrincipal("server")
pclient = testutil.NewPrincipal("client")
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
+ pctx, _ = v23.WithPrincipal(ctx, pdischarger)
)
- ctx, shutdown := initForTest()
- defer shutdown()
+
// Make the client recognize all server blessings
if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
t.Fatal(err)
@@ -1783,15 +1803,15 @@
// Setup the disharger and test server.
discharger := &dischargeServer{}
- defer runServer(t, ctx, ns, pdischarger, "mountpoint/discharger", discharger).Shutdown()
- defer runServer(t, ctx, ns, pserver, "mountpoint/testServer", &testServer{}).Shutdown()
+ defer runServer(t, pctx, ns, "mountpoint/discharger", discharger).Shutdown()
+ defer runServer(t, sctx, ns, "mountpoint/testServer", &testServer{}).Shutdown()
runClient := func(noDischarges bool) {
rid, err := naming.NewRoutingID()
if err != nil {
t.Fatal(err)
}
- smc := imanager.InternalNew(rid)
+ smc := imanager.InternalNew(ctx, rid)
defer smc.Shutdown()
client, err := InternalNewClient(smc, ns)
if err != nil {
@@ -1802,8 +1822,7 @@
if noDischarges {
opts = append(opts, NoDischarges{})
}
- ctx, _ = v23.WithPrincipal(ctx, pclient)
- if _, err = client.StartCall(ctx, "mountpoint/testServer", "Closure", nil, opts...); err != nil {
+ if _, err = client.StartCall(cctx, "mountpoint/testServer", "Closure", nil, opts...); err != nil {
t.Fatalf("failed to StartCall: %v", err)
}
}
@@ -1820,14 +1839,18 @@
}
func TestNoImplicitDischargeFetching(t *testing.T) {
+ ctx, shutdown := initForTest()
+ defer shutdown()
// This test ensures that discharge clients only fetch discharges for the specified tp caveats and not its own.
var (
pdischarger1 = testutil.NewPrincipal("discharger1")
pdischarger2 = testutil.NewPrincipal("discharger2")
pdischargeClient = testutil.NewPrincipal("dischargeClient")
+ p1ctx, _ = v23.WithPrincipal(ctx, pdischarger1)
+ p2ctx, _ = v23.WithPrincipal(ctx, pdischarger2)
+ cctx, _ = v23.WithPrincipal(ctx, pdischargeClient)
)
- ctx, shutdown := initForTest()
- defer shutdown()
+
// Bless the client with a ThirdPartyCaveat from discharger1.
tpcav1 := mkThirdPartyCaveat(pdischarger1.PublicKey(), "mountpoint/discharger1", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
blessings, err := pdischarger1.Bless(pdischargeClient.PublicKey(), pdischarger1.BlessingStore().Default(), "tpcav1", tpcav1)
@@ -1846,14 +1869,14 @@
// Setup the disharger and test server.
discharger1 := &dischargeServer{}
discharger2 := &dischargeServer{}
- defer runServer(t, ctx, ns, pdischarger1, "mountpoint/discharger1", discharger1).Shutdown()
- defer runServer(t, ctx, ns, pdischarger2, "mountpoint/discharger2", discharger2).Shutdown()
+ defer runServer(t, p1ctx, ns, "mountpoint/discharger1", discharger1).Shutdown()
+ defer runServer(t, p2ctx, ns, "mountpoint/discharger2", discharger2).Shutdown()
rid, err := naming.NewRoutingID()
if err != nil {
t.Fatal(err)
}
- sm := imanager.InternalNew(rid)
+ sm := imanager.InternalNew(ctx, rid)
c, err := InternalNewClient(sm, ns)
if err != nil {
@@ -1864,8 +1887,7 @@
if err != nil {
t.Error(err)
}
- ctx, _ = v23.WithPrincipal(ctx, pdischargeClient)
- dc.PrepareDischarges(ctx, []security.Caveat{tpcav2}, security.DischargeImpetus{})
+ dc.PrepareDischarges(cctx, []security.Caveat{tpcav2}, security.DischargeImpetus{})
// Ensure that discharger1 was not called and discharger2 was called.
if discharger1.called {
@@ -1879,12 +1901,15 @@
// TestBlessingsCache tests that the VCCache is used to sucessfully used to cache duplicate
// calls blessings.
func TestBlessingsCache(t *testing.T) {
+ ctx, shutdown := initForTest()
+ defer shutdown()
var (
pserver = testutil.NewPrincipal("server")
pclient = testutil.NewPrincipal("client")
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
)
- ctx, shutdown := initForTest()
- defer shutdown()
+
// Make the client recognize all server blessings
if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
t.Fatal(err)
@@ -1892,18 +1917,16 @@
ns := tnaming.NewSimpleNamespace()
- serverSM := runServer(t, ctx, ns, pserver, "mountpoint/testServer", &testServer{})
+ serverSM := runServer(t, sctx, ns, "mountpoint/testServer", &testServer{})
defer serverSM.Shutdown()
rid := serverSM.RoutingID()
- ctx, _ = v23.WithPrincipal(ctx, pclient)
-
newClient := func() rpc.Client {
rid, err := naming.NewRoutingID()
if err != nil {
t.Fatal(err)
}
- smc := imanager.InternalNew(rid)
+ smc := imanager.InternalNew(sctx, rid)
defer smc.Shutdown()
client, err := InternalNewClient(smc, ns)
if err != nil {
@@ -1913,7 +1936,7 @@
}
runClient := func(client rpc.Client) {
- if err := client.Call(ctx, "mountpoint/testServer", "Closure", nil, nil); err != nil {
+ if err := client.Call(cctx, "mountpoint/testServer", "Closure", nil, nil); err != nil {
t.Fatalf("failed to Call: %v", err)
}
}
@@ -1967,20 +1990,23 @@
}
func TestServerPublicKeyOpt(t *testing.T) {
+ ctx, shutdown := initForTest()
+ defer shutdown()
var (
pserver = testutil.NewPrincipal("server")
pother = testutil.NewPrincipal("other")
pclient = testutil.NewPrincipal("client")
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
)
- ctx, shutdown := initForTest()
- defer shutdown()
+
ns := tnaming.NewSimpleNamespace()
mountName := "mountpoint/default"
// Start a server with pserver.
- defer runServer(t, ctx, ns, pserver, mountName, &testServer{}).Shutdown()
+ defer runServer(t, sctx, ns, mountName, &testServer{}).Shutdown()
- smc := imanager.InternalNew(naming.FixedRoutingID(0xc))
+ smc := imanager.InternalNew(sctx, naming.FixedRoutingID(0xc))
client, err := InternalNewClient(smc, ns)
if err != nil {
t.Fatal(err)
@@ -1988,13 +2014,12 @@
defer smc.Shutdown()
defer client.Close()
- ctx, _ = v23.WithPrincipal(ctx, pclient)
// The call should succeed when the server presents the same public as the opt...
- if _, err = client.StartCall(ctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{pserver.PublicKey()}); err != nil {
+ if _, err = client.StartCall(cctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{pserver.PublicKey()}); err != nil {
t.Errorf("Expected call to succeed but got %v", err)
}
// ...but fail if they differ.
- if _, err = client.StartCall(ctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{pother.PublicKey()}); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+ if _, err = client.StartCall(cctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{pother.PublicKey()}); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
t.Errorf("got %v, want %v", verror.ErrorID(err), verror.ErrNotTrusted.ID)
}
}
@@ -2035,24 +2060,26 @@
tpcav = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
ns = tnaming.NewSimpleNamespace()
discharger = &expiryDischarger{}
+ pctx, _ = v23.WithPrincipal(ctx, pdischarger)
)
+ ctx, _ = v23.WithPrincipal(ctx, pclient)
// Setup the disharge server.
- defer runServer(t, ctx, ns, pdischarger, "mountpoint/discharger", discharger).Shutdown()
+ defer runServer(t, pctx, ns, "mountpoint/discharger", discharger).Shutdown()
// Create a discharge client.
rid, err := naming.NewRoutingID()
if err != nil {
t.Fatal(err)
}
- smc := imanager.InternalNew(rid)
+ smc := imanager.InternalNew(ctx, rid)
defer smc.Shutdown()
client, err := InternalNewClient(smc, ns)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
defer client.Close()
- ctx, _ = v23.WithPrincipal(ctx, pclient)
+
dc := InternalNewDischargeClient(ctx, client, 0)
// Fetch discharges for tpcav.
diff --git a/runtime/internal/rpc/options.go b/runtime/internal/rpc/options.go
index d046e32..4971e1c 100644
--- a/runtime/internal/rpc/options.go
+++ b/runtime/internal/rpc/options.go
@@ -78,10 +78,18 @@
return false
}
-func getVCOpts(opts []rpc.CallOpt) (vcOpts []stream.VCOpt) {
+func translateVCOpts(opts []rpc.CallOpt) (vcOpts []stream.VCOpt) {
for _, o := range opts {
- if v, ok := o.(stream.VCOpt); ok {
+ switch v := o.(type) {
+ case stream.VCOpt:
vcOpts = append(vcOpts, v)
+ case options.SecurityLevel:
+ switch v {
+ case options.SecurityNone:
+ vcOpts = append(vcOpts, stream.AuthenticatedVC(false))
+ case options.SecurityConfidential:
+ vcOpts = append(vcOpts, stream.AuthenticatedVC(true))
+ }
}
}
return
diff --git a/runtime/internal/rpc/protocols/tcp/init.go b/runtime/internal/rpc/protocols/tcp/init.go
index 23ec14d..55897a1 100644
--- a/runtime/internal/rpc/protocols/tcp/init.go
+++ b/runtime/internal/rpc/protocols/tcp/init.go
@@ -5,11 +5,10 @@
package tcp
import (
+ "fmt"
"net"
"time"
- "v.io/x/lib/vlog"
-
"v.io/v23/rpc"
"v.io/x/ref/runtime/internal/lib/tcputil"
@@ -62,7 +61,7 @@
return nil, err
}
if err := tcputil.EnableTCPKeepAlive(conn); err != nil {
- vlog.Errorf("Failed to enable TCP keep alive: %v", err)
+ return nil, fmt.Errorf("Failed to enable TCP keep alive: %v", err)
}
return conn, nil
}
diff --git a/runtime/internal/rpc/reserved.go b/runtime/internal/rpc/reserved.go
index 57b582f..94b30f8 100644
--- a/runtime/internal/rpc/reserved.go
+++ b/runtime/internal/rpc/reserved.go
@@ -16,7 +16,6 @@
"v.io/v23/vdl"
"v.io/v23/vdlroot/signature"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
"v.io/x/ref/lib/glob"
@@ -191,7 +190,7 @@
const maxRecursiveGlobDepth = 10
func (i *globInternal) Glob(ctx *context.T, call rpc.StreamServerCall, pattern string) error {
- vlog.VI(3).Infof("rpc Glob: Incoming request: %q.Glob(%q)", i.receiver, pattern)
+ ctx.VI(3).Infof("rpc Glob: Incoming request: %q.Glob(%q)", i.receiver, pattern)
g, err := glob.Parse(pattern)
if err != nil {
return err
@@ -228,7 +227,7 @@
subcall := callWithSuffix(ctx, call, naming.Join(i.receiver, state.name))
suffix := subcall.Suffix()
if state.depth > maxRecursiveGlobDepth {
- vlog.Errorf("rpc Glob: exceeded recursion limit (%d): %q", maxRecursiveGlobDepth, suffix)
+ ctx.Errorf("rpc Glob: exceeded recursion limit (%d): %q", maxRecursiveGlobDepth, suffix)
call.Send(naming.GlobReplyError{
naming.GlobError{Name: state.name, Error: reserved.NewErrGlobMaxRecursionReached(ctx)},
})
@@ -236,14 +235,14 @@
}
obj, auth, err := disp.Lookup(suffix)
if err != nil {
- vlog.VI(3).Infof("rpc Glob: Lookup failed for %q: %v", suffix, err)
+ ctx.VI(3).Infof("rpc Glob: Lookup failed for %q: %v", suffix, err)
call.Send(naming.GlobReplyError{
naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrNoExist, ctx, err)},
})
continue
}
if obj == nil {
- vlog.VI(3).Infof("rpc Glob: object not found for %q", suffix)
+ ctx.VI(3).Infof("rpc Glob: object not found for %q", suffix)
call.Send(naming.GlobReplyError{
naming.GlobError{Name: state.name, Error: verror.New(verror.ErrNoExist, ctx, "nil object")},
})
@@ -253,7 +252,7 @@
// Verify that that requester is authorized for the current object.
if err := authorize(ctx, call.Security(), auth); err != nil {
someMatchesOmitted = true
- vlog.VI(3).Infof("rpc Glob: client is not authorized for %q: %v", suffix, err)
+ ctx.VI(3).Infof("rpc Glob: client is not authorized for %q: %v", suffix, err)
continue
}
@@ -261,7 +260,7 @@
// use AllGlobber.
invoker, err := objectToInvoker(obj)
if err != nil {
- vlog.VI(3).Infof("rpc Glob: object for %q cannot be converted to invoker: %v", suffix, err)
+ ctx.VI(3).Infof("rpc Glob: object for %q cannot be converted to invoker: %v", suffix, err)
call.Send(naming.GlobReplyError{
naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrInternal, ctx, err)},
})
@@ -279,10 +278,10 @@
continue
}
if gs.AllGlobber != nil {
- vlog.VI(3).Infof("rpc Glob: %q implements AllGlobber", suffix)
+ ctx.VI(3).Infof("rpc Glob: %q implements AllGlobber", suffix)
ch, err := gs.AllGlobber.Glob__(ctx, subcall, state.glob.String())
if err != nil {
- vlog.VI(3).Infof("rpc Glob: %q.Glob(%q) failed: %v", suffix, state.glob, err)
+ ctx.VI(3).Infof("rpc Glob: %q.Glob(%q) failed: %v", suffix, state.glob, err)
subcall.Send(naming.GlobReplyError{naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrInternal, ctx, err)}})
continue
}
@@ -301,7 +300,7 @@
}
continue
}
- vlog.VI(3).Infof("rpc Glob: %q implements ChildrenGlobber", suffix)
+ ctx.VI(3).Infof("rpc Glob: %q implements ChildrenGlobber", suffix)
children, err := gs.ChildrenGlobber.GlobChildren__(ctx, subcall)
// The requested object doesn't exist.
if err != nil {
@@ -323,7 +322,7 @@
}
for child := range children {
if len(child) == 0 || strings.Contains(child, "/") {
- vlog.Errorf("rpc Glob: %q.GlobChildren__() sent an invalid child name: %q", suffix, child)
+ ctx.Errorf("rpc Glob: %q.GlobChildren__() sent an invalid child name: %q", suffix, child)
continue
}
if ok, _, left := state.glob.MatchInitialSegment(child); ok {
diff --git a/runtime/internal/rpc/server.go b/runtime/internal/rpc/server.go
index fdaa655..3ca1ee8 100644
--- a/runtime/internal/rpc/server.go
+++ b/runtime/internal/rpc/server.go
@@ -16,8 +16,8 @@
"v.io/x/lib/netstate"
"v.io/x/lib/pubsub"
- "v.io/x/lib/vlog"
+ "v.io/v23"
"v.io/v23/context"
"v.io/v23/i18n"
"v.io/v23/namespace"
@@ -53,6 +53,7 @@
errInternalTypeConversion = reg(".errInternalTypeConversion", "failed to convert {3} to v.io/x/ref/runtime/internal/naming.Endpoint")
errFailedToParseIP = reg(".errFailedToParseIP", "failed to parse {3} as an IP host")
errUnexpectedSuffix = reg(".errUnexpectedSuffix", "suffix {3} was not expected because either server has the option IsLeaf set to true or it served an object and not a dispatcher")
+ errNoListeners = reg(".errNoListeners", "failed to ceate any listeners{:3}")
)
// state for each requested listen address
@@ -180,22 +181,20 @@
settingsPublisher *pubsub.Publisher,
settingsName string,
client rpc.Client,
- principal security.Principal,
opts ...rpc.ServerOpt) (rpc.Server, error) {
ctx, cancel := context.WithRootCancel(ctx)
ctx, _ = vtrace.WithNewSpan(ctx, "NewServer")
statsPrefix := naming.Join("rpc", "server", "routing-id", streamMgr.RoutingID().String())
s := &server{
- ctx: ctx,
- cancel: cancel,
- streamMgr: streamMgr,
- principal: principal,
- publisher: publisher.New(ctx, ns, publishPeriod),
- listenState: make(map[*listenState]struct{}),
- listeners: make(map[stream.Listener]struct{}),
- proxies: make(map[string]proxyState),
- stoppedChan: make(chan struct{}),
- ipNets: ipNetworks(),
+ ctx: ctx,
+ cancel: cancel,
+ streamMgr: streamMgr,
+ publisher: publisher.New(ctx, ns, publishPeriod),
+ listenState: make(map[*listenState]struct{}),
+ listeners: make(map[stream.Listener]struct{}),
+ proxies: make(map[string]proxyState),
+ stoppedChan: make(chan struct{}),
+
ns: ns,
stats: newRPCStats(statsPrefix),
settingsPublisher: settingsPublisher,
@@ -205,6 +204,12 @@
dischargeExpiryBuffer = vc.DefaultServerDischargeExpiryBuffer
securityLevel options.SecurityLevel
)
+ ipNets, err := ipNetworks()
+ if err != nil {
+ return nil, err
+ }
+ s.ipNets = ipNets
+
for _, opt := range opts {
switch opt := opt.(type) {
case stream.ListenerOpt:
@@ -226,28 +231,36 @@
s.preferredProtocols = []string(opt)
case options.SecurityLevel:
securityLevel = opt
+
}
}
- if s.blessings.IsZero() && principal != nil {
- s.blessings = principal.BlessingStore().Default()
- }
+
+ authenticateVC := true
+
if securityLevel == options.SecurityNone {
- s.principal = nil
+ authenticateVC = false
s.blessings = security.Blessings{}
s.dispReserved = nil
}
+ if authenticateVC {
+ s.principal = v23.GetPrincipal(ctx)
+ if s.blessings.IsZero() && s.principal != nil {
+ s.blessings = s.principal.BlessingStore().Default()
+ }
+ }
+
// Make dischargeExpiryBuffer shorter than the VC discharge buffer to ensure we have fetched
- // the discharges by the time the VC asks for them.`
+ // the discharges by the time the VC asks for them.
s.dc = InternalNewDischargeClient(ctx, client, dischargeExpiryBuffer-(5*time.Second))
s.listenerOpts = append(s.listenerOpts, s.dc)
- s.listenerOpts = append(s.listenerOpts, vc.DialContext{ctx})
+ s.listenerOpts = append(s.listenerOpts, stream.AuthenticatedVC(authenticateVC))
blessingsStatsName := naming.Join(statsPrefix, "security", "blessings")
// TODO(caprita): revist printing the blessings with %s, and
// instead expose them as a list.
stats.NewString(blessingsStatsName).Set(fmt.Sprintf("%s", s.blessings))
- if principal != nil {
+ if s.principal != nil {
stats.NewStringFunc(blessingsStatsName, func() string {
- return fmt.Sprintf("%s (default)", principal.BlessingStore().Default())
+ return fmt.Sprintf("%s (default)", s.principal.BlessingStore().Default())
})
}
return s, nil
@@ -403,10 +416,10 @@
protocol: addr.Protocol,
address: addr.Address,
}
- ls.ln, ls.lep, ls.lnerr = s.streamMgr.Listen(addr.Protocol, addr.Address, s.principal, s.blessings, s.listenerOpts...)
+ ls.ln, ls.lep, ls.lnerr = s.streamMgr.Listen(s.ctx, addr.Protocol, addr.Address, s.blessings, s.listenerOpts...)
lnState = append(lnState, ls)
if ls.lnerr != nil {
- vlog.VI(2).Infof("Listen(%q, %q, ...) failed: %v", addr.Protocol, addr.Address, ls.lnerr)
+ s.ctx.VI(2).Infof("Listen(%q, %q, ...) failed: %v", addr.Protocol, addr.Address, ls.lnerr)
continue
}
ls.ieps, ls.port, ls.roaming, ls.eperr = s.createEndpoints(ls.lep, listenSpec.AddressChooser)
@@ -418,14 +431,18 @@
}
found := false
+ var lastErr error
for _, ls := range lnState {
if ls.ln != nil {
found = true
break
}
+ if ls.lnerr != nil {
+ lastErr = ls.lnerr
+ }
}
if !found && !useProxy {
- return nil, verror.New(verror.ErrBadArg, s.ctx, "failed to create any listeners")
+ return nil, verror.New(verror.ErrBadArg, s.ctx, verror.New(errNoListeners, s.ctx, lastErr))
}
if roaming && s.dhcpState == nil && s.settingsPublisher != nil {
@@ -475,7 +492,7 @@
return nil, nil, verror.New(errFailedToResolveProxy, s.ctx, proxy, err)
}
opts := append([]stream.ListenerOpt{proxyAuth{s}}, s.listenerOpts...)
- ln, ep, err := s.streamMgr.Listen(inaming.Network, resolved, s.principal, s.blessings, opts...)
+ ln, ep, err := s.streamMgr.Listen(s.ctx, inaming.Network, resolved, s.blessings, opts...)
if err != nil {
return nil, nil, verror.New(errFailedToListenForProxy, s.ctx, resolved, err)
}
@@ -501,7 +518,7 @@
iep, ln, err := s.reconnectAndPublishProxy(proxy)
if err != nil {
- vlog.Errorf("Failed to connect to proxy: %s", err)
+ s.ctx.Errorf("Failed to connect to proxy: %s", err)
}
// the initial connection maybe have failed, but we enter the retry
// loop anyway so that we will continue to try and connect to the
@@ -552,9 +569,9 @@
}
// (3) reconnect, publish new address
if iep, ln, err = s.reconnectAndPublishProxy(proxy); err != nil {
- vlog.Errorf("Failed to reconnect to proxy %q: %s", proxy, err)
+ s.ctx.Errorf("Failed to reconnect to proxy %q: %s", proxy, err)
} else {
- vlog.VI(1).Infof("Reconnected to proxy %q, %s", proxy, iep)
+ s.ctx.VI(1).Infof("Reconnected to proxy %q, %s", proxy, iep)
break
}
}
@@ -588,7 +605,7 @@
}
func (s *server) listenLoop(ln stream.Listener, ep naming.Endpoint) error {
- defer vlog.VI(1).Infof("rpc: Stopped listening on %s", ep)
+ defer s.ctx.VI(1).Infof("rpc: Stopped listening on %s", ep)
var calls sync.WaitGroup
if !s.addListener(ln) {
@@ -603,7 +620,7 @@
for {
flow, err := ln.Accept()
if err != nil {
- vlog.VI(10).Infof("rpc: Accept on %v failed: %v", ep, err)
+ s.ctx.VI(10).Infof("rpc: Accept on %v failed: %v", ep, err)
return err
}
calls.Add(1)
@@ -611,7 +628,7 @@
defer calls.Done()
fs, err := newFlowServer(flow, s)
if err != nil {
- vlog.VI(1).Infof("newFlowServer on %v failed: %v", ep, err)
+ s.ctx.VI(1).Infof("newFlowServer on %v failed: %v", ep, err)
return
}
if err := fs.serve(); err != nil {
@@ -620,7 +637,7 @@
// TODO(cnicolaou): revisit this when verror2 transition is
// done.
if err != io.EOF {
- vlog.VI(2).Infof("Flow.serve on %v failed: %v", ep, err)
+ s.ctx.VI(2).Infof("Flow.serve on %v failed: %v", ep, err)
}
}
}(flow)
@@ -628,8 +645,8 @@
}
func (s *server) dhcpLoop(ch chan pubsub.Setting) {
- defer vlog.VI(1).Infof("rpc: Stopped listen for dhcp changes")
- vlog.VI(2).Infof("rpc: dhcp loop")
+ defer s.ctx.VI(1).Infof("rpc: Stopped listen for dhcp changes")
+ s.ctx.VI(2).Infof("rpc: dhcp loop")
for setting := range ch {
if setting == nil {
return
@@ -653,7 +670,7 @@
change.Changed, change.Error = s.removeAddresses(v)
change.RemovedAddrs = v
}
- vlog.VI(2).Infof("rpc: dhcp: change %v", change)
+ s.ctx.VI(2).Infof("rpc: dhcp: change %v", change)
for ch, _ := range s.dhcpState.watchers {
select {
case ch <- change:
@@ -662,7 +679,7 @@
}
s.Unlock()
default:
- vlog.Errorf("rpc: dhcpLoop: unhandled setting type %T", v)
+ s.ctx.Errorf("rpc: dhcpLoop: unhandled setting type %T", v)
}
}
}
@@ -691,7 +708,7 @@
lnHost = iep.Address
}
if lnHost == host {
- vlog.VI(2).Infof("rpc: dhcp removing: %s", iep)
+ s.ctx.VI(2).Infof("rpc: dhcp removing: %s", iep)
removed = append(removed, iep)
s.publisher.RemoveServer(iep.String())
continue
@@ -729,7 +746,7 @@
niep.IsMountTable = s.servesMountTable
niep.IsLeaf = s.isLeaf
ls.ieps = append(ls.ieps, &niep)
- vlog.VI(2).Infof("rpc: dhcp adding: %s", niep)
+ s.ctx.VI(2).Infof("rpc: dhcp adding: %s", niep)
s.publisher.AddServer(niep.String())
added = append(added, &niep)
}
@@ -827,8 +844,8 @@
func (s *server) Stop() error {
defer apilog.LogCall(nil)(nil) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
serverDebug := fmt.Sprintf("Dispatcher: %T, Status:[%v]", s.disp, s.Status())
- vlog.VI(1).Infof("Stop: %s", serverDebug)
- defer vlog.VI(1).Infof("Stop done: %s", serverDebug)
+ s.ctx.VI(1).Infof("Stop: %s", serverDebug)
+ defer s.ctx.VI(1).Infof("Stop done: %s", serverDebug)
s.Lock()
if s.isStopState() {
s.Unlock()
@@ -910,16 +927,16 @@
select {
case <-done:
case <-time.After(5 * time.Second):
- vlog.Errorf("%s: Listener Close Error: %v", serverDebug, firstErr)
- vlog.Errorf("%s: Timedout waiting for goroutines to stop: listeners: %d (currently: %d)", serverDebug, nListeners, len(s.listeners))
+ s.ctx.Errorf("%s: Listener Close Error: %v", serverDebug, firstErr)
+ s.ctx.Errorf("%s: Timedout waiting for goroutines to stop: listeners: %d (currently: %d)", serverDebug, nListeners, len(s.listeners))
for ln, _ := range s.listeners {
- vlog.Errorf("%s: Listener: %v", serverDebug, ln)
+ s.ctx.Errorf("%s: Listener: %v", serverDebug, ln)
}
for ls, _ := range s.listenState {
- vlog.Errorf("%s: ListenState: %v", serverDebug, ls)
+ s.ctx.Errorf("%s: ListenState: %v", serverDebug, ls)
}
<-done
- vlog.Infof("%s: Done waiting.", serverDebug)
+ s.ctx.Infof("%s: Done waiting.", serverDebug)
}
s.Lock()
diff --git a/runtime/internal/rpc/server_test.go b/runtime/internal/rpc/server_test.go
index e46aa50..a5904a7 100644
--- a/runtime/internal/rpc/server_test.go
+++ b/runtime/internal/rpc/server_test.go
@@ -11,6 +11,10 @@
"testing"
"time"
+ "v.io/x/lib/netstate"
+ "v.io/x/lib/pubsub"
+ "v.io/x/lib/set"
+
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
@@ -18,10 +22,7 @@
"v.io/v23/rpc"
"v.io/v23/security"
"v.io/v23/verror"
- "v.io/x/lib/netstate"
- "v.io/x/lib/pubsub"
- "v.io/x/lib/set"
- "v.io/x/lib/vlog"
+
inaming "v.io/x/ref/runtime/internal/naming"
imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
@@ -48,11 +49,13 @@
func TestBadObject(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
pclient, pserver := newClientServerPrincipals()
- server, err := testInternalNewServer(ctx, sm, ns, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ server, err := testInternalNewServer(sctx, sm, ns)
if err != nil {
t.Fatal(err)
}
@@ -77,10 +80,9 @@
if err != nil {
t.Fatalf("InternalNewClient failed: %v", err)
}
- ctx, _ = v23.WithPrincipal(ctx, pclient)
- ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
+ ctx, _ = context.WithDeadline(cctx, time.Now().Add(10*time.Second))
var result string
- if err := client.Call(ctx, "servername", "SomeMethod", nil, []interface{}{&result}); err == nil {
+ if err := client.Call(cctx, "servername", "SomeMethod", nil, []interface{}{&result}); err == nil {
// TODO(caprita): Check the error type rather than
// merely ensuring the test doesn't panic.
t.Fatalf("Call should have failed")
@@ -88,12 +90,13 @@
}
func TestServerArgs(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- defer sm.Shutdown()
- ns := tnaming.NewSimpleNamespace()
ctx, shutdown := initForTest()
defer shutdown()
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("test"))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+ server, err := testInternalNewServer(sctx, sm, ns)
if err != nil {
t.Fatal(err)
}
@@ -139,11 +142,12 @@
func TestServerStatus(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
principal := testutil.NewPrincipal("testServerStatus")
- server, err := testInternalNewServer(ctx, sm, ns, principal)
+ ctx, _ = v23.WithPrincipal(ctx, principal)
+ server, err := testInternalNewServer(ctx, sm, ns)
if err != nil {
t.Fatal(err)
}
@@ -171,7 +175,6 @@
progress := make(chan error)
client, err := InternalNewClient(sm, ns)
- ctx, _ = v23.WithPrincipal(ctx, principal)
makeCall := func(ctx *context.T) {
call, err := client.StartCall(ctx, "test", "Hang", nil)
progress <- err
@@ -229,12 +232,12 @@
}
func TestServerStates(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- defer sm.Shutdown()
- ns := tnaming.NewSimpleNamespace()
ctx, shutdown := initForTest()
defer shutdown()
-
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
expectBadState := func(err error) {
if verror.ErrorID(err) != verror.ErrBadState.ID {
t.Fatalf("%s: unexpected error: %v", loc(1), err)
@@ -247,7 +250,7 @@
}
}
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("test"))
+ server, err := testInternalNewServer(sctx, sm, ns)
expectNoError(err)
defer server.Stop()
@@ -297,12 +300,14 @@
}
func TestMountStatus(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- defer sm.Shutdown()
- ns := tnaming.NewSimpleNamespace()
ctx, shutdown := initForTest()
defer shutdown()
- server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("test"))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ sctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+
+ server, err := testInternalNewServer(sctx, sm, ns)
if err != nil {
t.Fatal(err)
}
@@ -417,11 +422,11 @@
}
func TestRoaming(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- defer sm.Shutdown()
- ns := tnaming.NewSimpleNamespace()
ctx, shutdown := initForTest()
defer shutdown()
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
publisher := pubsub.NewPublisher()
roaming := make(chan pubsub.Setting)
@@ -431,7 +436,8 @@
}
defer func() { publisher.Shutdown(); <-stop }()
- server, err := testInternalNewServerWithPubsub(ctx, sm, ns, publisher, "TestRoaming", testutil.NewPrincipal("test"))
+ nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+ server, err := testInternalNewServerWithPubsub(nctx, sm, ns, publisher, "TestRoaming")
if err != nil {
t.Fatal(err)
}
@@ -487,7 +493,7 @@
roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
waitForChange := func() *rpc.NetworkChange {
- vlog.Infof("Waiting on %p", watcher)
+ ctx.Infof("Waiting on %p", watcher)
select {
case c := <-watcher:
return &c
@@ -567,11 +573,11 @@
}
func TestWatcherDeadlock(t *testing.T) {
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- defer sm.Shutdown()
- ns := tnaming.NewSimpleNamespace()
ctx, shutdown := initForTest()
defer shutdown()
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
publisher := pubsub.NewPublisher()
roaming := make(chan pubsub.Setting)
@@ -581,7 +587,8 @@
}
defer func() { publisher.Shutdown(); <-stop }()
- server, err := testInternalNewServerWithPubsub(ctx, sm, ns, publisher, "TestWatcherDeadlock", testutil.NewPrincipal("test"))
+ nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+ server, err := testInternalNewServerWithPubsub(nctx, sm, ns, publisher, "TestWatcherDeadlock")
if err != nil {
t.Fatal(err)
}
@@ -638,11 +645,13 @@
func TestIsLeafServerOption(t *testing.T) {
ctx, shutdown := initForTest()
defer shutdown()
- sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ sm := imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
defer sm.Shutdown()
ns := tnaming.NewSimpleNamespace()
pclient, pserver := newClientServerPrincipals()
- server, err := testInternalNewServer(ctx, sm, ns, pserver, options.IsLeaf(true))
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ server, err := testInternalNewServer(sctx, sm, ns, options.IsLeaf(true))
if err != nil {
t.Fatal(err)
}
@@ -661,12 +670,11 @@
if err != nil {
t.Fatalf("InternalNewClient failed: %v", err)
}
- ctx, _ = v23.WithPrincipal(ctx, pclient)
- ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
+ cctx, _ = context.WithDeadline(cctx, time.Now().Add(10*time.Second))
var result string
// we have set IsLeaf to true, sending any suffix to leafserver should result
// in an suffix was not expected error.
- callErr := client.Call(ctx, "leafserver/unwantedSuffix", "Echo", []interface{}{"Mirror on the wall"}, []interface{}{&result})
+ callErr := client.Call(cctx, "leafserver/unwantedSuffix", "Echo", []interface{}{"Mirror on the wall"}, []interface{}{&result})
if callErr == nil {
t.Fatalf("Call should have failed with suffix was not expected error")
}
diff --git a/runtime/internal/rpc/sort_endpoints.go b/runtime/internal/rpc/sort_endpoints.go
index ee7193c..3b5b896 100644
--- a/runtime/internal/rpc/sort_endpoints.go
+++ b/runtime/internal/rpc/sort_endpoints.go
@@ -9,8 +9,6 @@
"net"
"sort"
- "v.io/x/lib/vlog"
-
"v.io/v23/naming"
"v.io/v23/verror"
@@ -85,7 +83,6 @@
// these protocols will be returned also, but following the default
// preferences.
func filterAndOrderServers(servers []naming.MountedServer, protocols []string, ipnets []*net.IPNet) ([]naming.MountedServer, error) {
- vlog.VI(3).Infof("filterAndOrderServers%v: %v", protocols, servers)
var (
errs = verror.SubErrs{}
list = make(sortableServerList, 0, len(servers))
@@ -193,20 +190,18 @@
}
// ipNetworks returns the IP networks on this machine.
-func ipNetworks() []*net.IPNet {
+func ipNetworks() ([]*net.IPNet, error) {
ifcs, err := netstate.GetAllAddresses()
if err != nil {
- vlog.VI(5).Infof("netstate.GetAllAddresses failed: %v", err)
- return nil
+ return nil, err
}
ret := make([]*net.IPNet, 0, len(ifcs))
for _, a := range ifcs {
_, ipnet, err := net.ParseCIDR(a.String())
if err != nil {
- vlog.VI(5).Infof("net.ParseCIDR(%q) failed: %v", a, err)
- continue
+ return nil, err
}
ret = append(ret, ipnet)
}
- return ret
+ return ret, nil
}
diff --git a/runtime/internal/rpc/stream/benchmark/dial_vc.go b/runtime/internal/rpc/stream/benchmark/dial_vc.go
index fd7ed2d..e0cbe02 100644
--- a/runtime/internal/rpc/stream/benchmark/dial_vc.go
+++ b/runtime/internal/rpc/stream/benchmark/dial_vc.go
@@ -8,23 +8,28 @@
"testing"
"time"
- _ "v.io/x/ref/runtime/factories/static"
- "v.io/x/ref/runtime/internal/rpc/stream/manager"
- "v.io/x/ref/runtime/internal/rpc/stream/vc"
- "v.io/x/ref/test/benchmark"
- "v.io/x/ref/test/testutil"
-
+ "v.io/v23"
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/security"
+
+ _ "v.io/x/ref/runtime/factories/static"
+ "v.io/x/ref/runtime/internal/rpc/stream/manager"
+ "v.io/x/ref/runtime/internal/rpc/stream/vc"
+ "v.io/x/ref/test"
+ "v.io/x/ref/test/benchmark"
+ "v.io/x/ref/test/testutil"
)
// benchmarkDialVC measures VC creation time over the underlying VIF.
func benchmarkDialVC(b *testing.B, mode options.SecurityLevel) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
stats := benchmark.AddStats(b, 16)
- server := manager.InternalNew(naming.FixedRoutingID(0x5))
- client := manager.InternalNew(naming.FixedRoutingID(0xc))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x5))
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xc))
var (
principal security.Principal
blessings security.Blessings
@@ -32,16 +37,17 @@
if mode == securityDefault {
principal = testutil.NewPrincipal("test")
blessings = principal.BlessingStore().Default()
+ ctx, _ = v23.WithPrincipal(ctx, principal)
}
- _, ep, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+ _, ep, err := server.Listen(ctx, "tcp", "127.0.0.1:0", blessings)
if err != nil {
b.Fatal(err)
}
// Create one VC to prevent the underlying VIF from being closed.
- _, err = client.Dial(ep, principal, vc.IdleTimeout{0})
+ _, err = client.Dial(ctx, ep, vc.IdleTimeout{0})
if err != nil {
b.Fatal(err)
}
@@ -52,7 +58,7 @@
b.StartTimer()
start := time.Now()
- VC, err := client.Dial(ep, principal)
+ VC, err := client.Dial(ctx, ep)
if err != nil {
b.Fatal(err)
}
diff --git a/runtime/internal/rpc/stream/benchmark/dial_vif.go b/runtime/internal/rpc/stream/benchmark/dial_vif.go
index 52ed62f..20e6528 100644
--- a/runtime/internal/rpc/stream/benchmark/dial_vif.go
+++ b/runtime/internal/rpc/stream/benchmark/dial_vif.go
@@ -9,17 +9,21 @@
"testing"
"time"
- "v.io/x/ref/runtime/internal/rpc/stream/vif"
- "v.io/x/ref/test/benchmark"
- "v.io/x/ref/test/testutil"
-
+ "v.io/v23"
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/security"
+
+ "v.io/x/ref/runtime/internal/rpc/stream/vif"
+ "v.io/x/ref/test"
+ "v.io/x/ref/test/benchmark"
+ "v.io/x/ref/test/testutil"
)
// benchmarkDialVIF measures VIF creation time over the underlying net connection.
func benchmarkDialVIF(b *testing.B, mode options.SecurityLevel) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
stats := benchmark.AddStats(b, 16)
var (
principal security.Principal
@@ -28,6 +32,7 @@
if mode == securityDefault {
principal = testutil.NewPrincipal("test")
blessings = principal.BlessingStore().Default()
+ ctx, _ = v23.WithPrincipal(ctx, principal)
}
b.ResetTimer() // Exclude setup time from measurement.
@@ -38,14 +43,14 @@
serverch := make(chan *vif.VIF)
go func() {
- server, _ := vif.InternalNewAcceptedVIF(ns, naming.FixedRoutingID(0x5), principal, blessings, nil, nil)
+ server, _ := vif.InternalNewAcceptedVIF(ctx, ns, naming.FixedRoutingID(0x5), blessings, nil, nil)
serverch <- server
}()
b.StartTimer()
start := time.Now()
- client, err := vif.InternalNewDialedVIF(nc, naming.FixedRoutingID(0xc), principal, nil, nil)
+ client, err := vif.InternalNewDialedVIF(ctx, nc, naming.FixedRoutingID(0xc), nil, nil)
if err != nil {
b.Fatal(err)
}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_flow.go b/runtime/internal/rpc/stream/benchmark/throughput_flow.go
index 605ebbe..20a715f 100644
--- a/runtime/internal/rpc/stream/benchmark/throughput_flow.go
+++ b/runtime/internal/rpc/stream/benchmark/throughput_flow.go
@@ -8,12 +8,15 @@
"io"
"testing"
- "v.io/x/ref/runtime/internal/rpc/stream/manager"
-
+ "v.io/v23"
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/security"
+
"v.io/x/ref/runtime/internal/rpc/stream"
+ "v.io/x/ref/runtime/internal/rpc/stream/manager"
+ "v.io/x/ref/test"
"v.io/x/ref/test/testutil"
)
@@ -31,7 +34,7 @@
// createListeners returns N (stream.Listener, naming.Endpoint) pairs, such
// that calling stream.Manager.Dial to each of the endpoints will end up
// creating a new VIF.
-func createListeners(mode options.SecurityLevel, m stream.Manager, N int) (servers []listener, err error) {
+func createListeners(ctx *context.T, mode options.SecurityLevel, m stream.Manager, N int) (servers []listener, err error) {
for i := 0; i < N; i++ {
var (
l listener
@@ -42,7 +45,7 @@
principal = testutil.NewPrincipal("test")
blessings = principal.BlessingStore().Default()
}
- if l.ln, l.ep, err = m.Listen("tcp", "127.0.0.1:0", principal, blessings); err != nil {
+ if l.ln, l.ep, err = m.Listen(ctx, "tcp", "127.0.0.1:0", blessings); err != nil {
return
}
servers = append(servers, l)
@@ -51,15 +54,18 @@
}
func benchmarkFlow(b *testing.B, mode options.SecurityLevel, nVIFs, nVCsPerVIF, nFlowsPerVC int) {
- client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
- server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x55555555))
var principal security.Principal
if mode == securityDefault {
principal = testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
}
- lns, err := createListeners(mode, server, nVIFs)
+ lns, err := createListeners(ctx, mode, server, nVIFs)
if err != nil {
b.Fatal(err)
}
@@ -75,7 +81,7 @@
for i := 0; i < nVIFs; i++ {
ep := lns[i].ep
for j := 0; j < nVCsPerVIF; j++ {
- vc, err := client.Dial(ep, principal)
+ vc, err := client.Dial(ctx, ep)
if err != nil {
b.Error(err)
return
diff --git a/runtime/internal/rpc/stream/manager/error_test.go b/runtime/internal/rpc/stream/manager/error_test.go
index a2fbf01..fe6f76d 100644
--- a/runtime/internal/rpc/stream/manager/error_test.go
+++ b/runtime/internal/rpc/stream/manager/error_test.go
@@ -9,6 +9,7 @@
"testing"
"time"
+ "v.io/v23"
"v.io/v23/naming"
"v.io/v23/rpc"
"v.io/v23/security"
@@ -25,39 +26,43 @@
)
func TestListenErrors(t *testing.T) {
- server := manager.InternalNew(naming.FixedRoutingID(0x1))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x1))
pserver := testutil.NewPrincipal("server")
+ ctx, _ = v23.WithPrincipal(ctx, pserver)
// principal, no blessings
- _, _, err := server.Listen("tcp", "127.0.0.1:0", pserver, security.Blessings{}, nil)
+ _, _, err := server.Listen(ctx, "tcp", "127.0.0.1:0", security.Blessings{}, nil)
if verror.ErrorID(err) != stream.ErrBadArg.ID {
t.Fatalf("wrong error: %s", err)
}
t.Log(err)
// blessings, no principal
- _, _, err = server.Listen("tcp", "127.0.0.1:0", nil, pserver.BlessingStore().Default(), nil)
+ nilctx, _ := v23.WithPrincipal(ctx, nil)
+ _, _, err = server.Listen(nilctx, "tcp", "127.0.0.1:0", pserver.BlessingStore().Default(), nil)
if verror.ErrorID(err) != stream.ErrBadArg.ID {
t.Fatalf("wrong error: %s", err)
}
t.Log(err)
// bad protocol
- _, _, err = server.Listen("foo", "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+ _, _, err = server.Listen(ctx, "foo", "127.0.0.1:0", pserver.BlessingStore().Default())
if verror.ErrorID(err) != stream.ErrBadArg.ID {
t.Fatalf("wrong error: %s", err)
}
t.Log(err)
// bad address
- _, _, err = server.Listen("tcp", "xx.0.0.1:0", pserver, pserver.BlessingStore().Default())
+ _, _, err = server.Listen(ctx, "tcp", "xx.0.0.1:0", pserver.BlessingStore().Default())
if verror.ErrorID(err) != stream.ErrNetwork.ID {
t.Fatalf("wrong error: %s", err)
}
t.Log(err)
// bad address for proxy
- _, _, err = server.Listen("v23", "127x.0.0.1", pserver, pserver.BlessingStore().Default())
+ _, _, err = server.Listen(ctx, "v23", "127x.0.0.1", pserver.BlessingStore().Default())
if verror.ErrorID(err) != stream.ErrBadArg.ID {
t.Fatalf("wrong error: %s", err)
}
@@ -68,7 +73,7 @@
for {
f, err := ln.Accept()
if err != nil {
- return
+ break
}
f.Close()
}
@@ -94,16 +99,18 @@
}
func TestDialErrors(t *testing.T) {
- _, shutdown := test.V23Init()
+ ctx, shutdown := test.V23Init()
defer shutdown()
- server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
- client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
// bad protocol
ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("x", "127.0.0.1:2"))
- _, err := client.Dial(ep, pclient)
+ _, err := client.Dial(cctx, ep)
// A bad protocol should result in a Resolve Error.
if verror.ErrorID(err) != stream.ErrResolveFailed.ID {
t.Errorf("wrong error: %v", err)
@@ -112,7 +119,7 @@
// no server
ep, _ = inaming.NewEndpoint(naming.FormatEndpoint("tcp", "127.0.0.1:2"))
- _, err = client.Dial(ep, pclient)
+ _, err = client.Dial(cctx, ep)
if verror.ErrorID(err) != stream.ErrDialFailed.ID {
t.Errorf("wrong error: %v", err)
}
@@ -120,7 +127,7 @@
rpc.RegisterProtocol("dropData", dropDataDialer, simpleResolver, net.Listen)
- ln, sep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+ ln, sep, err := server.Listen(sctx, "tcp", "127.0.0.1:0", pserver.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
@@ -132,7 +139,7 @@
if err != nil {
t.Fatal(err)
}
- _, err = client.Dial(cep, pclient)
+ _, err = client.Dial(cctx, cep)
if verror.ErrorID(err) != stream.ErrNetwork.ID {
t.Errorf("wrong error: %v", err)
}
diff --git a/runtime/internal/rpc/stream/manager/listener.go b/runtime/internal/rpc/stream/manager/listener.go
index 387752e..5659fad 100644
--- a/runtime/internal/rpc/stream/manager/listener.go
+++ b/runtime/internal/rpc/stream/manager/listener.go
@@ -13,18 +13,18 @@
"syscall"
"time"
- "v.io/x/ref/runtime/internal/lib/upcqueue"
- inaming "v.io/x/ref/runtime/internal/naming"
- "v.io/x/ref/runtime/internal/rpc/stream/proxy"
- "v.io/x/ref/runtime/internal/rpc/stream/vc"
- "v.io/x/ref/runtime/internal/rpc/stream/vif"
-
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/security"
"v.io/v23/verror"
"v.io/v23/vom"
- "v.io/x/lib/vlog"
+
+ "v.io/x/ref/runtime/internal/lib/upcqueue"
+ inaming "v.io/x/ref/runtime/internal/naming"
"v.io/x/ref/runtime/internal/rpc/stream"
+ "v.io/x/ref/runtime/internal/rpc/stream/proxy"
+ "v.io/x/ref/runtime/internal/rpc/stream/vc"
+ "v.io/x/ref/runtime/internal/rpc/stream/vif"
)
// ProxyAuthenticator is a stream.ListenerOpt that is used when listening via a
@@ -71,6 +71,7 @@
netLn net.Listener
manager *manager
vifs *vif.Set
+ ctx *context.T
connsMu sync.Mutex
conns map[net.Conn]bool
@@ -88,19 +89,21 @@
proxyEP naming.Endpoint
manager *manager
vif *vif.VIF
+ ctx *context.T
vifLoop sync.WaitGroup
}
var _ stream.Listener = (*proxyListener)(nil)
-func newNetListener(m *manager, netLn net.Listener, principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) listener {
+func newNetListener(ctx *context.T, m *manager, netLn net.Listener, blessings security.Blessings, opts []stream.ListenerOpt) listener {
ln := &netListener{
q: upcqueue.New(),
manager: m,
netLn: netLn,
vifs: vif.NewSet(),
conns: make(map[net.Conn]bool),
+ ctx: ctx,
}
// Set the default idle timeout for VC. But for "unixfd", we do not set
@@ -110,7 +113,7 @@
}
ln.netLoop.Add(1)
- go ln.netAcceptLoop(principal, blessings, opts)
+ go ln.netAcceptLoop(blessings, opts)
return ln
}
@@ -140,7 +143,7 @@
removed := remaining[:n]
ln.connsMu.Unlock()
- vlog.Infof("Killing %d Conns", n)
+ ln.ctx.Infof("Killing %d Conns", n)
var wg sync.WaitGroup
wg.Add(n)
@@ -148,7 +151,7 @@
idx := rand.Intn(len(remaining))
conn := remaining[idx]
go func(conn net.Conn) {
- vlog.Infof("Killing connection (%s, %s)", conn.LocalAddr(), conn.RemoteAddr())
+ ln.ctx.Infof("Killing connection (%s, %s)", conn.LocalAddr(), conn.RemoteAddr())
conn.Close()
ln.manager.killedConns.Incr(1)
wg.Done()
@@ -166,7 +169,7 @@
wg.Wait()
}
-func (ln *netListener) netAcceptLoop(principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) {
+func (ln *netListener) netAcceptLoop(blessings security.Blessings, opts []stream.ListenerOpt) {
defer ln.netLoop.Done()
opts = append([]stream.ListenerOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
for {
@@ -175,7 +178,7 @@
// Use Info instead of Error to reduce the changes that
// the log library will cause the process to abort on
// failing to create a new file.
- vlog.Infof("net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+ ln.ctx.Infof("net.Listener.Accept() failed on %v with %v", ln.netLn, err)
for tokill := 1; isTemporaryError(err); tokill *= 2 {
if isTooManyOpenFiles(err) {
ln.killConnections(tokill)
@@ -194,26 +197,26 @@
// how I noticed). The right solution is to lock these datastructures, but
// that can wait until a bigger overhaul occurs. For now, we leave this at
// VI(1) knowing that it's basically harmless.
- vlog.VI(1).Infof("Exiting netAcceptLoop: net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+ ln.ctx.VI(1).Infof("Exiting netAcceptLoop: net.Listener.Accept() failed on %v with %v", ln.netLn, err)
return
}
ln.connsMu.Lock()
ln.conns[conn] = true
ln.connsMu.Unlock()
- vlog.VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
+ ln.ctx.VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
+
go func() {
- vf, err := vif.InternalNewAcceptedVIF(conn, ln.manager.rid, principal, blessings, nil, ln.deleteVIF, opts...)
+ vf, err := vif.InternalNewAcceptedVIF(ln.ctx, conn, ln.manager.rid, blessings, nil, ln.deleteVIF, opts...)
if err != nil {
- vlog.Infof("Shutting down conn from %s (local address: %s) as a VIF could not be created: %v", conn.RemoteAddr(), conn.LocalAddr(), err)
+ ln.ctx.Infof("Shutting down conn from %s (local address: %s) as a VIF could not be created: %v", conn.RemoteAddr(), conn.LocalAddr(), err)
conn.Close()
return
}
+ ln.vifLoops.Add(1)
ln.vifs.Insert(vf, conn.RemoteAddr().Network(), conn.RemoteAddr().String())
ln.manager.vifs.Insert(vf, conn.RemoteAddr().Network(), conn.RemoteAddr().String())
-
- ln.vifLoops.Add(1)
- vifLoop(vf, ln.q, func() {
+ vifLoop(ln.ctx, vf, ln.q, func() {
ln.connsMu.Lock()
delete(ln.conns, conn)
ln.connsMu.Unlock()
@@ -224,7 +227,7 @@
}
func (ln *netListener) deleteVIF(vf *vif.VIF) {
- vlog.VI(2).Infof("VIF %v is closed, removing from cache", vf)
+ ln.ctx.VI(2).Infof("VIF %v is closed, removing from cache", vf)
ln.vifs.Delete(vf)
ln.manager.vifs.Delete(vf)
}
@@ -242,19 +245,19 @@
}
func (ln *netListener) Close() error {
- closeNetListener(ln.netLn)
+ closeNetListener(ln.ctx, ln.netLn)
ln.netLoop.Wait()
for _, vif := range ln.vifs.List() {
// NOTE(caprita): We do not actually Close down the vifs, as
// that would require knowing when all outstanding requests are
// finished. For now, do not worry about it, since we expect
// shut down to immediately precede process exit.
+ //v23.Logger().Infof("Close: stop accepting: %p", vif)
vif.StopAccepting()
}
ln.q.Shutdown()
ln.manager.removeListener(ln)
ln.vifLoops.Wait()
- vlog.VI(3).Infof("Closed stream.Listener %s", ln)
return nil
}
@@ -275,26 +278,27 @@
return strings.Join(ret, "\n")
}
-func newProxyListener(m *manager, proxyEP naming.Endpoint, principal security.Principal, opts []stream.ListenerOpt) (listener, *inaming.Endpoint, error) {
+func newProxyListener(ctx *context.T, m *manager, proxyEP naming.Endpoint, opts []stream.ListenerOpt) (listener, *inaming.Endpoint, error) {
ln := &proxyListener{
q: upcqueue.New(),
proxyEP: proxyEP,
manager: m,
+ ctx: ctx,
}
- vf, ep, err := ln.connect(principal, opts)
+ vf, ep, err := ln.connect(opts)
if err != nil {
return nil, nil, err
}
ln.vif = vf
ln.vifLoop.Add(1)
- go vifLoop(ln.vif, ln.q, func() {
+ go vifLoop(ctx, ln.vif, ln.q, func() {
ln.vifLoop.Done()
})
return ln, ep, nil
}
-func (ln *proxyListener) connect(principal security.Principal, opts []stream.ListenerOpt) (*vif.VIF, *inaming.Endpoint, error) {
- vlog.VI(1).Infof("Connecting to proxy at %v", ln.proxyEP)
+func (ln *proxyListener) connect(opts []stream.ListenerOpt) (*vif.VIF, *inaming.Endpoint, error) {
+ ln.ctx.VI(1).Infof("Connecting to proxy at %v", ln.proxyEP)
// Requires dialing a VC to the proxy, need to extract options from ln.opts to do so.
var dialOpts []stream.VCOpt
var auth ProxyAuthenticator
@@ -310,7 +314,7 @@
// this covered by opts?)
// TODO(ashankar): Authorize the proxy server as well (similar to how
// clients authorize servers in RPCs).
- vf, err := ln.manager.FindOrDialVIF(ln.proxyEP, principal, dialOpts...)
+ vf, err := ln.manager.FindOrDialVIF(ln.ctx, ln.proxyEP, dialOpts...)
if err != nil {
return nil, nil, err
}
@@ -322,7 +326,7 @@
// Proxy protocol: See v.io/x/ref/runtime/internal/rpc/stream/proxy/protocol.vdl
//
// We don't need idle timeout for this VC, since one flow will be kept alive.
- vc, err := vf.Dial(ln.proxyEP, principal, dialOpts...)
+ vc, err := vf.Dial(ln.ctx, ln.proxyEP, dialOpts...)
if err != nil {
vf.StopAccepting()
if verror.ErrorID(err) == verror.ErrAborted.ID {
@@ -392,7 +396,7 @@
ln.q.Shutdown()
ln.manager.removeListener(ln)
ln.vifLoop.Wait()
- vlog.VI(3).Infof("Closed stream.Listener %s", ln)
+ ln.ctx.VI(3).Infof("Closed stream.Listener %s", ln)
return nil
}
@@ -404,19 +408,19 @@
return fmt.Sprintf("stream.Listener: PROXY:%v RoutingID:%v", ln.proxyEP, ln.manager.rid)
}
-func vifLoop(vf *vif.VIF, q *upcqueue.T, cleanup func()) {
+func vifLoop(ctx *context.T, vf *vif.VIF, q *upcqueue.T, cleanup func()) {
defer cleanup()
for {
cAndf, err := vf.Accept()
switch {
case err != nil:
- vlog.VI(2).Infof("Shutting down listener on VIF %v: %v", vf, err)
+ ctx.VI(2).Infof("Shutting down listener on VIF %v: %v", vf, err)
return
case cAndf.Flow == nil:
- vlog.VI(1).Infof("New VC %v on VIF %v", cAndf.Connector, vf)
+ ctx.VI(1).Infof("New VC %v on VIF %v", cAndf.Connector, vf)
default:
if err := q.Put(cAndf); err != nil {
- vlog.VI(1).Infof("Closing new flow on VC %v (VIF %v) as Put failed in vifLoop: %v", cAndf.Connector, vf, err)
+ ctx.VI(1).Infof("Closing new flow on VC %v (VIF %v) as Put failed in vifLoop: %v", cAndf.Connector, vf, err)
cAndf.Flow.Close()
}
}
diff --git a/runtime/internal/rpc/stream/manager/manager.go b/runtime/internal/rpc/stream/manager/manager.go
index bfa3deb..e5aa37d 100644
--- a/runtime/internal/rpc/stream/manager/manager.go
+++ b/runtime/internal/rpc/stream/manager/manager.go
@@ -12,11 +12,12 @@
"sync"
"time"
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/rpc"
+
"v.io/v23/security"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
"v.io/x/ref/lib/stats"
@@ -54,9 +55,10 @@
// As the name suggests, this method is intended for use only within packages
// placed inside v.io/x/ref/runtime/internal. Code outside the
// v.io/x/ref/runtime/internal/* packages should never call this method.
-func InternalNew(rid naming.RoutingID) stream.Manager {
+func InternalNew(ctx *context.T, rid naming.RoutingID) stream.Manager {
statsPrefix := naming.Join("rpc", "stream", "routing-id", rid.String())
m := &manager{
+ ctx: ctx,
rid: rid,
vifs: vif.NewSet(),
listeners: make(map[listener]bool),
@@ -68,6 +70,7 @@
}
type manager struct {
+ ctx *context.T
rid naming.RoutingID
vifs *vif.Set
@@ -113,7 +116,7 @@
// FindOrDialVIF returns the network connection (VIF) to the provided address
// from the cache in the manager. If not already present in the cache, a new
// connection will be created using net.Dial.
-func (m *manager) FindOrDialVIF(remote naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (*vif.VIF, error) {
+func (m *manager) FindOrDialVIF(ctx *context.T, remote naming.Endpoint, opts ...stream.VCOpt) (*vif.VIF, error) {
// Extract options.
var timeout time.Duration
for _, o := range opts {
@@ -137,19 +140,19 @@
}
vf, unblock := m.vifs.BlockingFind(network, address)
if vf != nil {
- vlog.VI(1).Infof("(%q, %q) resolved to (%q, %q) which exists in the VIF cache.", addr.Network(), addr.String(), network, address)
+ ctx.VI(1).Infof("(%q, %q) resolved to (%q, %q) which exists in the VIF cache.", addr.Network(), addr.String(), network, address)
return vf, nil
}
defer unblock()
- vlog.VI(1).Infof("(%q, %q) not in VIF cache. Dialing", network, address)
+ ctx.VI(1).Infof("(%q, %q) not in VIF cache. Dialing", network, address)
conn, err := dial(d, network, address, timeout)
if err != nil {
return nil, err
}
opts = append([]stream.VCOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
- vf, err = vif.InternalNewDialedVIF(conn, m.rid, principal, nil, m.deleteVIF, opts...)
+ vf, err = vif.InternalNewDialedVIF(ctx, conn, m.rid, nil, m.deleteVIF, opts...)
if err != nil {
conn.Close()
return nil, err
@@ -158,16 +161,16 @@
return vf, nil
}
-func (m *manager) Dial(remote naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (stream.VC, error) {
+func (m *manager) Dial(ctx *context.T, remote naming.Endpoint, opts ...stream.VCOpt) (stream.VC, error) {
// If vif.Dial fails because the cached network connection was broken, remove from
// the cache and try once more.
for retry := true; true; retry = false {
- vf, err := m.FindOrDialVIF(remote, principal, opts...)
+ vf, err := m.FindOrDialVIF(ctx, remote, opts...)
if err != nil {
return nil, err
}
opts = append([]stream.VCOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
- vc, err := vf.Dial(remote, principal, opts...)
+ vc, err := vf.Dial(ctx, remote, opts...)
if !retry || verror.ErrorID(err) != stream.ErrAborted.ID {
return vc, err
}
@@ -177,7 +180,7 @@
}
func (m *manager) deleteVIF(vf *vif.VIF) {
- vlog.VI(2).Infof("%p: VIF %v is closed, removing from cache", m, vf)
+ m.ctx.VI(2).Infof("%p: VIF %v is closed, removing from cache", m, vf)
m.vifs.Delete(vf)
}
@@ -192,12 +195,13 @@
return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnknownNetwork, nil, protocol))
}
-func (m *manager) Listen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, naming.Endpoint, error) {
+func (m *manager) Listen(ctx *context.T, protocol, address string, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, naming.Endpoint, error) {
+ principal := stream.GetPrincipalListenerOpts(ctx, opts...)
bNames, err := extractBlessingNames(principal, blessings)
if err != nil {
return nil, nil, err
}
- ln, ep, err := m.internalListen(protocol, address, principal, blessings, opts...)
+ ln, ep, err := m.internalListen(ctx, protocol, address, blessings, opts...)
if err != nil {
return nil, nil, err
}
@@ -205,7 +209,7 @@
return ln, ep, nil
}
-func (m *manager) internalListen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
+func (m *manager) internalListen(ctx *context.T, protocol, address string, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
m.muListeners.Lock()
if m.shutdown {
m.muListeners.Unlock()
@@ -219,7 +223,7 @@
if err != nil {
return nil, nil, verror.New(stream.ErrBadArg, nil, verror.New(errEndpointParseError, nil, address, err))
}
- return m.remoteListen(ep, principal, opts)
+ return m.remoteListen(ctx, ep, opts)
}
netln, err := listen(protocol, address)
if err != nil {
@@ -229,11 +233,11 @@
m.muListeners.Lock()
if m.shutdown {
m.muListeners.Unlock()
- closeNetListener(netln)
+ closeNetListener(ctx, netln)
return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
}
- ln := newNetListener(m, netln, principal, blessings, opts)
+ ln := newNetListener(ctx, m, netln, blessings, opts)
m.listeners[ln] = true
m.muListeners.Unlock()
ep := &inaming.Endpoint{
@@ -244,8 +248,8 @@
return ln, ep, nil
}
-func (m *manager) remoteListen(proxy naming.Endpoint, principal security.Principal, listenerOpts []stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
- ln, ep, err := newProxyListener(m, proxy, principal, listenerOpts)
+func (m *manager) remoteListen(ctx *context.T, proxy naming.Endpoint, listenerOpts []stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
+ ln, ep, err := newProxyListener(ctx, m, proxy, listenerOpts)
if err != nil {
return nil, nil, err
}
@@ -265,13 +269,13 @@
for _, vf := range vifs {
total += vf.ShutdownVCs(remote)
}
- vlog.VI(1).Infof("ShutdownEndpoint(%q) closed %d VCs", remote, total)
+ m.ctx.VI(1).Infof("ShutdownEndpoint(%q) closed %d VCs", remote, total)
}
-func closeNetListener(ln net.Listener) {
+func closeNetListener(ctx *context.T, ln net.Listener) {
addr := ln.Addr()
err := ln.Close()
- vlog.VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
+ ctx.VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
}
func (m *manager) removeListener(ln listener) {
diff --git a/runtime/internal/rpc/stream/manager/manager_test.go b/runtime/internal/rpc/stream/manager/manager_test.go
index 6fe2ddd..2ec4eb4 100644
--- a/runtime/internal/rpc/stream/manager/manager_test.go
+++ b/runtime/internal/rpc/stream/manager/manager_test.go
@@ -15,12 +15,12 @@
"sort"
"strconv"
"strings"
+ "sync"
"syscall"
"testing"
"time"
- "v.io/x/lib/vlog"
-
+ "v.io/v23"
"v.io/v23/naming"
"v.io/v23/rpc"
"v.io/v23/security"
@@ -41,8 +41,9 @@
// we need to set runtime.GOMAXPROCS.
func TestMain(m *testing.M) {
test.Init()
+
// testutil.Init sets GOMAXPROCS to NumCPU. We want to force
- // GOMAXPROCS to remain at 1, in order to trigger a particular race
+ // GOMAXPFDROCS to remain at 1, in order to trigger a particular race
// condition that occurs when closing the server; also, using 1 cpu
// introduces less variance in the behavior of the test.
runtime.GOMAXPROCS(1)
@@ -51,13 +52,16 @@
}
func testSimpleFlow(t *testing.T, protocol string) {
- server := InternalNew(naming.FixedRoutingID(0x55555555))
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
pclient := testutil.NewPrincipal("client")
pclient2 := testutil.NewPrincipal("client2")
pserver := testutil.NewPrincipal("server")
+ ctx, _ = v23.WithPrincipal(ctx, pserver)
- ln, ep, err := server.Listen(protocol, "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+ ln, ep, err := server.Listen(ctx, protocol, "127.0.0.1:0", pserver.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
@@ -66,7 +70,8 @@
var clientVC stream.VC
var clientF1 stream.Flow
go func() {
- if clientVC, err = client.Dial(ep, pclient); err != nil {
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ if clientVC, err = client.Dial(cctx, ep); err != nil {
t.Errorf("Dial(%q) failed: %v", ep, err)
return
}
@@ -134,7 +139,8 @@
// Opening a new VC should fail fast. Note that we need to use a different
// principal since the client doesn't expect a response from a server
// when re-using VIF authentication.
- if _, err := client.Dial(ep, pclient2); err == nil {
+ cctx, _ := v23.WithPrincipal(ctx, pclient2)
+ if _, err := client.Dial(cctx, ep); err == nil {
t.Errorf("Should not be able to Dial after listener is closed")
}
}
@@ -148,13 +154,16 @@
}
func TestConnectionTimeout(t *testing.T) {
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
ch := make(chan error)
go func() {
// 203.0.113.0 is TEST-NET-3 from RFC5737
ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("tcp", "203.0.113.10:80"))
- _, err := client.Dial(ep, testutil.NewPrincipal("client"), DialTimeout(time.Second))
+ nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("client"))
+ _, err := client.Dial(nctx, ep, DialTimeout(time.Second))
ch <- err
}()
@@ -169,16 +178,21 @@
}
func testAuthenticatedByDefault(t *testing.T, protocol string) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
var (
- server = InternalNew(naming.FixedRoutingID(0x55555555))
- client = InternalNew(naming.FixedRoutingID(0xcccccccc))
+ server = InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client = InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
clientPrincipal = testutil.NewPrincipal("client")
serverPrincipal = testutil.NewPrincipal("server")
clientKey = clientPrincipal.PublicKey()
serverBlessings = serverPrincipal.BlessingStore().Default()
+ cctx, _ = v23.WithPrincipal(ctx, clientPrincipal)
+ sctx, _ = v23.WithPrincipal(ctx, serverPrincipal)
)
- ln, ep, err := server.Listen(protocol, "127.0.0.1:0", serverPrincipal, serverPrincipal.BlessingStore().Default())
+
+ ln, ep, err := server.Listen(sctx, protocol, "127.0.0.1:0", serverPrincipal.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
@@ -216,7 +230,7 @@
}()
go func() {
- vc, err := client.Dial(ep, clientPrincipal)
+ vc, err := client.Dial(cctx, ep)
if err != nil {
errs <- err
return
@@ -251,11 +265,14 @@
func numVIFs(m stream.Manager) int { return len(m.(*manager).vifs.List()) }
func TestListenEndpoints(t *testing.T) {
- server := InternalNew(naming.FixedRoutingID(0xcafe))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0xcafe))
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
- ln1, ep1, err1 := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
- ln2, ep2, err2 := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+ ln1, ep1, err1 := server.Listen(ctx, "tcp", "127.0.0.1:0", blessings)
+ ln2, ep2, err2 := server.Listen(ctx, "tcp", "127.0.0.1:0", blessings)
// Since "127.0.0.1:0" was used as the network address, a random port will be
// assigned in each case. The endpoint should include that random port.
if err1 != nil {
@@ -280,14 +297,15 @@
}
}
-func acceptLoop(ln stream.Listener) {
+func acceptLoop(wg *sync.WaitGroup, ln stream.Listener) {
for {
f, err := ln.Accept()
if err != nil {
- return
+ break
}
f.Close()
}
+ wg.Done()
}
func TestCloseListener(t *testing.T) {
@@ -299,48 +317,62 @@
}
func testCloseListener(t *testing.T, protocol string) {
- server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x5e97e9))
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
blessings := pserver.BlessingStore().Default()
-
- ln, ep, err := server.Listen(protocol, "127.0.0.1:0", pserver, blessings)
+ ln, ep, err := server.Listen(sctx, protocol, "127.0.0.1:0", blessings)
if err != nil {
t.Fatal(err)
}
+ var wg sync.WaitGroup
+ wg.Add(1)
// Server will just listen for flows and close them.
- go acceptLoop(ln)
- client := InternalNew(naming.FixedRoutingID(0xc1e41))
- if _, err = client.Dial(ep, pclient); err != nil {
+ go acceptLoop(&wg, ln)
+ client := InternalNew(ctx, naming.FixedRoutingID(0xc1e41))
+ if _, err = client.Dial(cctx, ep); err != nil {
t.Fatal(err)
}
ln.Close()
- client = InternalNew(naming.FixedRoutingID(0xc1e42))
- if _, err := client.Dial(ep, pclient); err == nil {
+ client = InternalNew(ctx, naming.FixedRoutingID(0xc1e42))
+ if _, err := client.Dial(cctx, ep); err == nil {
t.Errorf("client.Dial(%q) should have failed", ep)
}
+ time.Sleep(time.Second)
+ wg.Wait()
}
func TestShutdown(t *testing.T) {
- server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x5e97e9))
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
- ln, _, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+ ln, _, err := server.Listen(ctx, "tcp", "127.0.0.1:0", blessings)
if err != nil {
t.Fatal(err)
}
+ var wg sync.WaitGroup
+ wg.Add(1)
// Server will just listen for flows and close them.
- go acceptLoop(ln)
+ go acceptLoop(&wg, ln)
if n, expect := numListeners(server), 1; n != expect {
t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
}
server.Shutdown()
- if _, _, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings); err == nil {
+ if _, _, err := server.Listen(ctx, "tcp", "127.0.0.1:0", blessings); err == nil {
t.Error("server should have shut down")
}
if n, expect := numListeners(server), 0; n != expect {
t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
}
+ wg.Wait()
+ fmt.Fprintf(os.Stderr, "DONE\n")
}
func TestShutdownEndpoint(t *testing.T) {
@@ -352,19 +384,25 @@
}
func testShutdownEndpoint(t *testing.T, protocol string) {
- server := InternalNew(naming.FixedRoutingID(0x55555555))
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
- ln, ep, err := server.Listen(protocol, "127.0.0.1:0", principal, principal.BlessingStore().Default())
+ ln, ep, err := server.Listen(ctx, protocol, "127.0.0.1:0", principal.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
+ var wg sync.WaitGroup
+ wg.Add(1)
// Server will just listen for flows and close them.
- go acceptLoop(ln)
+ go acceptLoop(&wg, ln)
- vc, err := client.Dial(ep, testutil.NewPrincipal("client"))
+ cctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("client"))
+ vc, err := client.Dial(cctx, ep)
if err != nil {
t.Fatal(err)
}
@@ -375,23 +413,29 @@
if f, err := vc.Connect(); f != nil || err == nil {
t.Errorf("vc.Connect unexpectedly succeeded: (%v, %v)", f, err)
}
+ ln.Close()
+ wg.Wait()
}
func TestStartTimeout(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
const (
startTime = 5 * time.Millisecond
)
var (
- server = InternalNew(naming.FixedRoutingID(0x55555555))
+ server = InternalNew(ctx, naming.FixedRoutingID(0x55555555))
pserver = testutil.NewPrincipal("server")
lopts = []stream.ListenerOpt{vc.StartTimeout{startTime}}
)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+
// Pause the start timers.
triggerTimers := vif.SetFakeTimers()
- ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+ ln, ep, err := server.Listen(sctx, "tcp", "127.0.0.1:0", pserver.BlessingStore().Default(), lopts...)
if err != nil {
t.Fatal(err)
}
@@ -421,6 +465,8 @@
}
func testIdleTimeout(t *testing.T, testServer bool) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
const (
idleTime = 10 * time.Millisecond
// We use a long wait time here since it takes some time to handle VC close
@@ -429,14 +475,17 @@
)
var (
- server = InternalNew(naming.FixedRoutingID(0x55555555))
- client = InternalNew(naming.FixedRoutingID(0xcccccccc))
+ server = InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client = InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
pclient = testutil.NewPrincipal("client")
pserver = testutil.NewPrincipal("server")
+ cctx, _ = v23.WithPrincipal(ctx, pclient)
+ sctx, _ = v23.WithPrincipal(ctx, pserver)
opts []stream.VCOpt
lopts []stream.ListenerOpt
)
+
if testServer {
lopts = []stream.ListenerOpt{vc.IdleTimeout{idleTime}}
} else {
@@ -446,7 +495,7 @@
// Pause the idle timers.
triggerTimers := vif.SetFakeTimers()
- ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+ ln, ep, err := server.Listen(sctx, "tcp", "127.0.0.1:0", pserver.BlessingStore().Default(), lopts...)
if err != nil {
t.Fatal(err)
}
@@ -458,7 +507,7 @@
}
}()
- vc, err := client.Dial(ep, pclient, opts...)
+ vc, err := client.Dial(cctx, ep, opts...)
if err != nil {
t.Fatalf("client.Dial(%q) failed: %v", ep, err)
}
@@ -516,16 +565,19 @@
*/
func testMultipleVCs(t *testing.T, protocol string) {
- server := InternalNew(naming.FixedRoutingID(0x55555555))
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
principal := testutil.NewPrincipal("test")
+ sctx, _ := v23.WithPrincipal(ctx, principal)
const nVCs = 2
const data = "bugs bunny"
// Have the server read from each flow and write to rchan.
rchan := make(chan string)
- ln, ep, err := server.Listen(protocol, "127.0.0.1:0", principal, principal.BlessingStore().Default())
+ ln, ep, err := server.Listen(sctx, protocol, "127.0.0.1:0", principal.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
@@ -562,7 +614,9 @@
var vcs [nVCs]stream.VC
for i := 0; i < nVCs; i++ {
var err error
- vcs[i], err = client.Dial(ep, testutil.NewPrincipal("client"))
+ pclient := testutil.NewPrincipal("client")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ vcs[i], err = client.Dial(cctx, ep)
if err != nil {
t.Fatal(err)
}
@@ -605,20 +659,25 @@
}
func TestAddressResolution(t *testing.T) {
- server := InternalNew(naming.FixedRoutingID(0x55555555))
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
principal := testutil.NewPrincipal("test")
+ sctx, _ := v23.WithPrincipal(ctx, principal)
// Using "tcp4" instead of "tcp" because the latter can end up with IPv6
// addresses and our Google Compute Engine integration test machines cannot
// resolve IPv6 addresses.
// As of April 2014, https://developers.google.com/compute/docs/networking
// said that IPv6 is not yet supported.
- ln, ep, err := server.Listen("tcp4", "127.0.0.1:0", principal, principal.BlessingStore().Default())
+ ln, ep, err := server.Listen(sctx, "tcp4", "127.0.0.1:0", principal.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
- go acceptLoop(ln)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go acceptLoop(&wg, ln)
// We'd like an endpoint that contains an address that's different than the
// one used for the connection. In practice this is awkward to achieve since
@@ -634,7 +693,9 @@
// Dial multiple VCs
for i := 0; i < 2; i++ {
- if _, err = client.Dial(nep, testutil.NewPrincipal("client")); err != nil {
+ pclient := testutil.NewPrincipal("client")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ if _, err = client.Dial(cctx, nep); err != nil {
t.Fatalf("Dial #%d failed: %v", i, err)
}
}
@@ -642,6 +703,8 @@
if n := numVIFs(client); n != 1 {
t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
}
+ ln.Close()
+ wg.Wait()
// TODO(ashankar): While a VIF can be re-used to Dial from the server
// to the client, currently there is no way to have the client "listen"
// on the same VIF. It can listen on a VC for new flows, but it cannot
@@ -657,9 +720,14 @@
}
func testServerRestartDuringClientLifetime(t *testing.T, protocol string) {
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
pclient := testutil.NewPrincipal("client")
pclient2 := testutil.NewPrincipal("client2")
+ ctx1, _ := v23.WithPrincipal(ctx, pclient)
+ ctx2, _ := v23.WithPrincipal(ctx, pclient2)
+
sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
if err != nil {
t.Fatalf("unexpected error: %s", err)
@@ -674,7 +742,7 @@
if err != nil {
t.Fatalf("inaming.NewEndpoint(%q): %v", epstr, err)
}
- if _, err := client.Dial(ep, pclient); err != nil {
+ if _, err := client.Dial(ctx1, ep); err != nil {
t.Fatal(err)
}
h.Shutdown(nil, os.Stderr)
@@ -682,7 +750,7 @@
// A new VC cannot be created since the server is dead. Note that we need to
// use a different principal since the client doesn't expect a response from
// a server when re-using VIF authentication.
- if _, err := client.Dial(ep, pclient2); err == nil {
+ if _, err := client.Dial(ctx2, ep); err == nil {
t.Fatal("Expected client.Dial to fail since server is dead")
}
@@ -698,7 +766,7 @@
if got, want := ep.Addr().String(), ep2.Addr().String(); got != want {
t.Fatalf("Got %q, want %q", got, want)
}
- if _, err := client.Dial(ep2, pclient); err != nil {
+ if _, err := client.Dial(ctx1, ep2); err != nil {
t.Fatal(err)
}
}
@@ -706,9 +774,12 @@
var runServer = modules.Register(runServerFunc, "runServer")
func runServerFunc(env *modules.Env, args ...string) error {
- server := InternalNew(naming.FixedRoutingID(0x55555555))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
principal := testutil.NewPrincipal("test")
- _, ep, err := server.Listen(args[0], args[1], principal, principal.BlessingStore().Default())
+ ctx, _ = v23.WithPrincipal(ctx, principal)
+ _, ep, err := server.Listen(ctx, args[0], args[1], principal.BlessingStore().Default())
if err != nil {
fmt.Fprintln(env.Stderr, err)
return err
@@ -751,7 +822,6 @@
func writeLine(f stream.Flow, data string) error {
data = data + "\n"
- vlog.VI(1).Infof("write sending %d bytes", len(data))
if n, err := f.Write([]byte(data)); err != nil {
return fmt.Errorf("Write returned (%d, %v)", n, err)
}
@@ -759,10 +829,13 @@
}
func TestRegistration(t *testing.T) {
- server := InternalNew(naming.FixedRoutingID(0x55555555))
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
principal := testutil.NewPrincipal("server")
blessings := principal.BlessingStore().Default()
+ ctx, _ = v23.WithPrincipal(ctx, principal)
dialer := func(_, _ string, _ time.Duration) (net.Conn, error) {
return nil, fmt.Errorf("tn.Dial")
@@ -775,12 +848,12 @@
}
rpc.RegisterProtocol("tn", dialer, resolver, listener)
- _, _, err := server.Listen("tnx", "127.0.0.1:0", principal, blessings)
+ _, _, err := server.Listen(ctx, "tnx", "127.0.0.1:0", blessings)
if err == nil || !strings.Contains(err.Error(), "unknown network: tnx") {
t.Fatalf("expected error is missing (%v)", err)
}
- _, _, err = server.Listen("tn", "127.0.0.1:0", principal, blessings)
+ _, _, err = server.Listen(ctx, "tn", "127.0.0.1:0", blessings)
if err == nil || !strings.Contains(err.Error(), "tn.Listen") {
t.Fatalf("expected error is missing (%v)", err)
}
@@ -794,23 +867,26 @@
t.Errorf("got %t, want %t", got, want)
}
- _, ep, err := server.Listen("tn", "127.0.0.1:0", principal, blessings)
+ _, ep, err := server.Listen(ctx, "tn", "127.0.0.1:0", blessings)
if err != nil {
t.Errorf("unexpected error %s", err)
}
- _, err = client.Dial(ep, testutil.NewPrincipal("client"))
+ cctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("client"))
+ _, err = client.Dial(cctx, ep)
if err == nil || !strings.Contains(err.Error(), "tn.Resolve") {
t.Fatalf("expected error is missing (%v)", err)
}
}
func TestBlessingNamesInEndpoint(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
var (
p = testutil.NewPrincipal("default")
b, _ = p.BlessSelf("dev.v.io/users/foo@bar.com/devices/desktop/app/myapp")
- server = InternalNew(naming.FixedRoutingID(0x1))
+ server = InternalNew(ctx, naming.FixedRoutingID(0x1))
tests = []struct {
principal security.Principal
@@ -838,10 +914,12 @@
},
}
)
+
// p must recognize its own blessings!
p.AddToRoots(b)
for idx, test := range tests {
- ln, ep, err := server.Listen("tcp", "127.0.0.1:0", test.principal, test.blessings)
+ sctx, _ := v23.WithPrincipal(ctx, test.principal)
+ ln, ep, err := server.Listen(sctx, "tcp", "127.0.0.1:0", test.blessings)
if (err != nil) != test.err {
t.Errorf("test #%d: Got error %v, wanted error: %v", idx, err, test.err)
}
@@ -859,6 +937,8 @@
}
func TestVIFCleanupWhenFDLimitIsReached(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
if err != nil {
t.Fatal(err)
@@ -890,9 +970,10 @@
// has reached its file descriptor limit.
nattempts := 0
for i := 0; i < 2*nfiles; i++ {
- client := InternalNew(naming.FixedRoutingID(uint64(i)))
+ client := InternalNew(ctx, naming.FixedRoutingID(uint64(i)))
defer client.Shutdown()
principal := testutil.NewPrincipal(fmt.Sprintf("client%d", i))
+ cctx, _ := v23.WithPrincipal(ctx, principal)
connected := false
for !connected {
nattempts++
@@ -900,7 +981,7 @@
// was at its limit, it might fail. However, this
// failure will trigger the "kill connections" logic at
// the server and eventually the client should succeed.
- vc, err := client.Dial(ep, principal)
+ vc, err := client.Dial(cctx, ep)
if err != nil {
continue
}
@@ -920,6 +1001,7 @@
t.Logf("%s", stderr.String())
t.Fatal(err)
}
+ fmt.Fprintf(os.Stderr, "11\n")
if log := expect.NewSession(t, bytes.NewReader(stderr.Bytes()), time.Minute).ExpectSetEventuallyRE("listener.go.*Killing [1-9][0-9]* Conns"); len(log) == 0 {
t.Errorf("Failed to find log message talking about killing Conns in:\n%v", stderr.String())
}
@@ -928,21 +1010,26 @@
}
func TestConcurrentDials(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
// Concurrent Dials to the same network, address should only result in one VIF.
- server := InternalNew(naming.FixedRoutingID(0x55555555))
- client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ server := InternalNew(ctx, naming.FixedRoutingID(0x55555555))
+ client := InternalNew(ctx, naming.FixedRoutingID(0xcccccccc))
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
// Using "tcp4" instead of "tcp" because the latter can end up with IPv6
// addresses and our Google Compute Engine integration test machines cannot
// resolve IPv6 addresses.
// As of April 2014, https://developers.google.com/compute/docs/networking
// said that IPv6 is not yet supported.
- ln, ep, err := server.Listen("tcp4", "127.0.0.1:0", principal, principal.BlessingStore().Default())
+ ln, ep, err := server.Listen(ctx, "tcp4", "127.0.0.1:0", principal.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
- go acceptLoop(ln)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go acceptLoop(&wg, ln)
nep := &inaming.Endpoint{
Protocol: ep.Addr().Network(),
@@ -954,7 +1041,8 @@
errCh := make(chan error, 10)
for i := 0; i < 10; i++ {
go func() {
- _, err := client.Dial(nep, testutil.NewPrincipal("client"))
+ cctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("client"))
+ _, err := client.Dial(cctx, nep)
errCh <- err
}()
}
@@ -967,4 +1055,6 @@
if n := numVIFs(client); n != 1 {
t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
}
+ ln.Close()
+ wg.Wait()
}
diff --git a/runtime/internal/rpc/stream/message/message.go b/runtime/internal/rpc/stream/message/message.go
index 5e4b790..48f8fd2 100644
--- a/runtime/internal/rpc/stream/message/message.go
+++ b/runtime/internal/rpc/stream/message/message.go
@@ -67,10 +67,8 @@
"fmt"
"io"
- "v.io/x/lib/vlog"
-
"v.io/v23/verror"
-
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/runtime/internal/lib/iobuf"
"v.io/x/ref/runtime/internal/rpc/stream/crypto"
"v.io/x/ref/runtime/internal/rpc/stream/id"
@@ -247,7 +245,7 @@
if slice.ExpandFront(space) {
return slice
}
- vlog.VI(10).Infof("Failed to expand slice by %d bytes. Copying", space)
+ logger.Global().VI(10).Infof("Failed to expand slice by %d bytes. Copying", space)
contents := make([]byte, slice.Size()+int(space))
copy(contents[space:], slice.Contents)
slice.Release()
diff --git a/runtime/internal/rpc/stream/model.go b/runtime/internal/rpc/stream/model.go
index baf4077..aac6c1c 100644
--- a/runtime/internal/rpc/stream/model.go
+++ b/runtime/internal/rpc/stream/model.go
@@ -7,6 +7,7 @@
import (
"io"
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/security"
)
@@ -116,13 +117,18 @@
RPCStreamVCOpt()
}
+type AuthenticatedVC bool
+
+func (AuthenticatedVC) RPCStreamVCOpt() {}
+func (AuthenticatedVC) RPCStreamListenerOpt() {}
+
// Manager is the interface for managing the creation of VCs.
type Manager interface {
// Listen creates a Listener that can be used to accept Flows initiated
// with the provided network address.
//
// For example:
- // ln, ep, err := Listen("tcp", ":0", principal)
+ // ln, ep, err := Listen(ctx, "tcp", ":0")
// for {
// flow, err := ln.Accept()
// // process flow
@@ -130,15 +136,14 @@
// can be used to accept Flows initiated by remote processes to the endpoint
// identified by the returned Endpoint.
//
- // principal is used during authentication. If principal is nil, then the Listener
- // expects to be used for unauthenticated, unencrypted communication.
- // blessings are the Blessings presented to the Client during authentication.
- Listen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...ListenerOpt) (Listener, naming.Endpoint, error)
+ // ctx contains the principal to be used during authentication and blessings the
+ // blessings to use.
+ Listen(ctx *context.T, protocol, address string, blessings security.Blessings, opts ...ListenerOpt) (Listener, naming.Endpoint, error)
// Dial creates a VC to the provided remote endpoint.
- // principal is used during authentication. If principal is nil, then the VC expects
- // to be used for unauthenticated, unencrypted communication.
- Dial(remote naming.Endpoint, principal security.Principal, opts ...VCOpt) (VC, error)
+ // ctx contains the principal to be used during authentication.
+ // Authentication may be disabled via the AuthenticatedVC option.
+ Dial(ctx *context.T, remote naming.Endpoint, opts ...VCOpt) (VC, error)
// ShutdownEndpoint closes all VCs (and Flows and Listeners over it)
// involving the provided remote endpoint.
diff --git a/runtime/internal/rpc/stream/proxy/proxy.go b/runtime/internal/rpc/stream/proxy/proxy.go
index 4707042..5d30d3c 100644
--- a/runtime/internal/rpc/stream/proxy/proxy.go
+++ b/runtime/internal/rpc/stream/proxy/proxy.go
@@ -12,11 +12,9 @@
"time"
"v.io/x/lib/netstate"
- "v.io/x/lib/vlog"
"v.io/v23"
"v.io/v23/context"
- "v.io/v23/logging"
"v.io/v23/naming"
"v.io/v23/rpc"
"v.io/v23/security"
@@ -94,6 +92,7 @@
// associated with the process at the other end of the network connection.
type process struct {
proxy *Proxy
+ ctx *context.T
conn net.Conn
pool *iobuf.Pool
reader *iobuf.Reader
@@ -138,8 +137,9 @@
// servermap is a concurrent-access safe map from the RoutingID of a server exporting itself
// through the proxy to the underlying network connection that the server is found on.
type servermap struct {
- mu sync.Mutex
- m map[naming.RoutingID]*server
+ ctx *context.T
+ mu sync.Mutex
+ m map[naming.RoutingID]*server
}
func (m *servermap) Add(server *server) error {
@@ -150,7 +150,7 @@
return verror.New(stream.ErrProxy, nil, verror.New(errAlreadyProxied, nil, key))
}
m.m[key] = server
- proxyLog().Infof("Started proxying server: %v", server)
+ proxyLog(m.ctx, "Started proxying server: %v", server)
return nil
}
@@ -159,7 +159,7 @@
m.mu.Lock()
if m.m[key] != nil {
delete(m.m, key)
- proxyLog().Infof("Stopped proxying server: %v", server)
+ proxyLog(m.ctx, "Stopped proxying server: %v", server)
}
m.mu.Unlock()
}
@@ -239,7 +239,6 @@
return nil, verror.New(stream.ErrProxy, nil, verror.New(errListenFailed, nil, network, address, err))
}
pub, _, err := netstate.PossibleAddresses(ln.Addr().Network(), ln.Addr().String(), spec.AddressChooser)
- vlog.Infof("PUB: %s", pub)
if err != nil {
ln.Close()
return nil, verror.New(stream.ErrProxy, nil, verror.New(errAccessibleAddresses, nil, err))
@@ -256,7 +255,7 @@
ln: ln,
rid: rid,
authorizer: authorizer,
- servers: &servermap{m: make(map[naming.RoutingID]*server)},
+ servers: &servermap{ctx: ctx, m: make(map[naming.RoutingID]*server)},
processes: make(map[*process]struct{}),
// TODO(cnicolaou): should use all of the available addresses
pubAddress: pub[0].String(),
@@ -273,11 +272,11 @@
}
func (p *Proxy) listenLoop() {
- proxyLog().Infof("Proxy listening on (%q, %q): %v", p.ln.Addr().Network(), p.ln.Addr(), p.endpoint())
+ proxyLog(p.ctx, "Proxy listening on (%q, %q): %v", p.ln.Addr().Network(), p.ln.Addr(), p.endpoint())
for {
conn, err := p.ln.Accept()
if err != nil {
- proxyLog().Infof("Exiting listenLoop of proxy %q: %v", p.endpoint(), err)
+ proxyLog(p.ctx, "Exiting listenLoop of proxy %q: %v", p.endpoint(), err)
return
}
go p.acceptProcess(conn)
@@ -295,11 +294,12 @@
cipher, _, err := vif.AuthenticateAsServer(conn, reader, nil, nil, p.principal, blessings, nil)
if err != nil {
- processLog().Infof("Process %v failed to authenticate: %s", p, err)
+ processLog(p.ctx, "Process %v failed to authenticate: %s", p, err)
return
}
process := &process{
+ ctx: p.ctx,
proxy: p,
conn: conn,
pool: pool,
@@ -324,7 +324,7 @@
go process.writeLoop()
go process.readLoop()
- processLog().Infof("Started process %v", process)
+ processLog(p.ctx, "Started process %v", process)
}
func (p *Proxy) removeProcess(process *process) {
@@ -367,7 +367,7 @@
}
enc := vom.NewEncoder(conn)
if err := enc.Encode(response); err != nil {
- proxyLog().Infof("Failed to encode response %#v for server %v", response, server)
+ proxyLog(p.ctx, "Failed to encode response %#v for server %v", response, server)
server.Close(verror.New(stream.ErrProxy, nil, verror.New(errVomEncodeResponse, nil, err)))
return
}
@@ -432,10 +432,10 @@
}
}
-func startRoutingVC(srcVCI, dstVCI id.VC, srcProcess, dstProcess *process) {
+func startRoutingVC(ctx *context.T, srcVCI, dstVCI id.VC, srcProcess, dstProcess *process) {
dstProcess.AddRoute(dstVCI, &destination{VCI: srcVCI, Process: srcProcess})
srcProcess.AddRoute(srcVCI, &destination{VCI: dstVCI, Process: dstProcess})
- vcLog().Infof("Routing (VCI %d @ [%s]) <-> (VCI %d @ [%s])", srcVCI, srcProcess, dstVCI, dstProcess)
+ vcLog(ctx, "Routing (VCI %d @ [%s]) <-> (VCI %d @ [%s])", srcVCI, srcProcess, dstVCI, dstProcess)
}
// Endpoint returns the endpoint of the proxy service. By Dialing a VC to this
@@ -476,7 +476,7 @@
}
vci, fid := unpackIDs(w.ID())
if vc := p.ServerVC(vci); vc != nil {
- queueDataMessages(bufs, vc, fid, p.queue)
+ queueDataMessages(p.ctx, bufs, vc, fid, p.queue)
if len(bufs) == 0 {
m := &message.Data{VCI: vci, Flow: fid}
m.SetClose()
@@ -495,17 +495,17 @@
}
}
-func queueDataMessages(bufs []*iobuf.Slice, vc *vc.VC, fid id.Flow, q *upcqueue.T) {
+func queueDataMessages(ctx *context.T, bufs []*iobuf.Slice, vc *vc.VC, fid id.Flow, q *upcqueue.T) {
for ix, b := range bufs {
m := &message.Data{VCI: vc.VCI(), Flow: fid}
var err error
if m.Payload, err = vc.Encrypt(fid, b); err != nil {
- msgLog().Infof("vc.Encrypt failed. VC:%v Flow:%v Error:%v", vc, fid, err)
+ msgLog(ctx, "vc.Encrypt failed. VC:%v Flow:%v Error:%v", vc, fid, err)
releaseBufs(ix+1, bufs)
return
}
if err = q.Put(m); err != nil {
- msgLog().Infof("Failed to enqueue data message %v: %v", m, err)
+ msgLog(ctx, "Failed to enqueue data message %v: %v", m, err)
m.Release()
releaseBufs(ix+1, bufs)
return
@@ -514,40 +514,40 @@
}
func (p *process) writeLoop() {
- defer processLog().Infof("Exited writeLoop for %v", p)
+ defer processLog(p.ctx, "Exited writeLoop for %v", p)
defer p.Close()
for {
item, err := p.queue.Get(nil)
if err != nil {
if err != upcqueue.ErrQueueIsClosed {
- processLog().Infof("upcqueue.Get failed on %v: %v", p, err)
+ processLog(p.ctx, "upcqueue.Get failed on %v: %v", p, err)
}
return
}
if err = message.WriteTo(p.conn, item.(message.T), p.ctrlCipher); err != nil {
- processLog().Infof("message.WriteTo on %v failed: %v", p, err)
+ processLog(p.ctx, "message.WriteTo on %v failed: %v", p, err)
return
}
}
}
func (p *process) readLoop() {
- defer processLog().Infof("Exited readLoop for %v", p)
+ defer processLog(p.ctx, "Exited readLoop for %v", p)
defer p.Close()
for {
msg, err := message.ReadFrom(p.reader, p.ctrlCipher)
if err != nil {
- processLog().Infof("Read on %v failed: %v", p, err)
+ processLog(p.ctx, "Read on %v failed: %v", p, err)
return
}
- msgLog().Infof("Received msg: %T = %v", msg, msg)
+ msgLog(p.ctx, "Received msg: %T = %v", msg, msg)
switch m := msg.(type) {
case *message.Data:
if vc := p.ServerVC(m.VCI); vc != nil {
if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
- processLog().Infof("Ignoring data message %v from process %v: %v", m, p, err)
+ processLog(p.ctx, "Ignoring data message %v from process %v: %v", m, p, err)
}
if m.Close() {
vc.ShutdownFlow(m.Flow)
@@ -568,7 +568,7 @@
case *message.OpenFlow:
if vc := p.ServerVC(m.VCI); vc != nil {
if err := vc.AcceptFlow(m.Flow); err != nil {
- processLog().Infof("OpenFlow %+v on process %v failed: %v", m, p, err)
+ processLog(p.ctx, "OpenFlow %+v on process %v failed: %v", m, p, err)
cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
cm.SetClose()
p.queue.Put(cm)
@@ -613,7 +613,7 @@
if naming.Compare(dstrid, p.proxy.rid) || naming.Compare(dstrid, naming.NullRoutingID) {
// VC that terminates at the proxy.
// See protocol.vdl for details on the protocol between the server and the proxy.
- vcObj := p.NewServerVC(m)
+ vcObj := p.NewServerVC(p.ctx, m)
// route counters after creating the VC so counters to vc are not lost.
p.proxy.routeCounters(p, m.Counters)
if vcObj != nil {
@@ -662,7 +662,7 @@
break
}
dstVCI := dstprocess.AllocVCI()
- startRoutingVC(srcVCI, dstVCI, p, dstprocess)
+ startRoutingVC(p.ctx, srcVCI, dstVCI, p, dstprocess)
if d = p.Route(srcVCI); d == nil {
p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errServerVanished, nil, dstrid)))
p.proxy.routeCounters(p, m.Counters)
@@ -691,7 +691,7 @@
p.proxy.routeCounters(p, counters)
default:
- processLog().Infof("Closing %v because of invalid message %T", p, m)
+ processLog(p.ctx, "Closing %v because of invalid message %T", p, m)
return
}
}
@@ -766,14 +766,14 @@
return p.servers[vci]
}
-func (p *process) NewServerVC(m *message.SetupVC) *vc.VC {
+func (p *process) NewServerVC(ctx *context.T, m *message.SetupVC) *vc.VC {
p.mu.Lock()
defer p.mu.Unlock()
if vc := p.servers[m.VCI]; vc != nil {
vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errDuplicateSetupVC, nil)))
return nil
}
- vc := vc.InternalNew(vc.Params{
+ vc := vc.InternalNew(ctx, vc.Params{
VCI: m.VCI,
LocalEP: m.RemoteEndpoint,
RemoteEP: m.LocalEndpoint,
@@ -782,7 +782,7 @@
Helper: p,
})
p.servers[m.VCI] = vc
- proxyLog().Infof("Registered VC %v from server on process %v", vc, p)
+ proxyLog(p.ctx, "Registered VC %v from server on process %v", vc, p)
return vc
}
@@ -791,7 +791,7 @@
defer p.mu.Unlock()
if vc := p.servers[vci]; vc != nil {
delete(p.servers, vci)
- proxyLog().Infof("Unregistered server VC %v from process %v", vc, p)
+ proxyLog(p.ctx, "Unregistered server VC %v from process %v", vc, p)
return vc
}
return nil
@@ -801,7 +801,7 @@
func (p *process) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
msg := &message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)}
if err := p.queue.Put(msg); err != nil {
- processLog().Infof("Failed to send OpenFlow(%+v) on process %v: %v", msg, p, err)
+ processLog(p.ctx, "Failed to send OpenFlow(%+v) on process %v: %v", msg, p, err)
}
}
@@ -812,7 +812,7 @@
msg := &message.AddReceiveBuffers{Counters: message.NewCounters()}
msg.Counters.Add(vci, fid, uint32(bytes))
if err := p.queue.Put(msg); err != nil {
- processLog().Infof("Failed to send AddReceiveBuffers(%+v) on process %v: %v", msg, p, err)
+ processLog(p.ctx, "Failed to send AddReceiveBuffers(%+v) on process %v: %v", msg, p, err)
}
}
@@ -821,10 +821,19 @@
}
// Convenience functions to assist with the logging convention.
-func proxyLog() logging.InfoLog { return vlog.VI(1) }
-func processLog() logging.InfoLog { return vlog.VI(2) }
-func vcLog() logging.InfoLog { return vlog.VI(3) }
-func msgLog() logging.InfoLog { return vlog.VI(4) }
+func proxyLog(ctx *context.T, format string, args ...interface{}) {
+ ctx.VI(1).Infof(format, args...)
+}
+func processLog(ctx *context.T, format string, args ...interface{}) {
+ ctx.VI(2).Infof(format, args...)
+}
+func vcLog(ctx *context.T, format string, args ...interface{}) {
+ ctx.VI(3).Infof(format, args...)
+}
+func msgLog(ctx *context.T, format string, args ...interface{}) {
+ ctx.VI(4).Infof(format, args...)
+}
+
func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
return bqueue.ID(message.MakeCounterID(vci, fid))
}
diff --git a/runtime/internal/rpc/stream/proxy/proxy_test.go b/runtime/internal/rpc/stream/proxy/proxy_test.go
index f583e9f..a16a51f 100644
--- a/runtime/internal/rpc/stream/proxy/proxy_test.go
+++ b/runtime/internal/rpc/stream/proxy/proxy_test.go
@@ -35,21 +35,21 @@
func TestProxy(t *testing.T) {
ctx, shutdown := v23Init()
defer shutdown()
-
_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
if err != nil {
t.Fatal(err)
}
defer shutdown()
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
// Create the stream.Manager for the server.
- server1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+ server1 := manager.InternalNew(ctx, naming.FixedRoutingID(0x1111111111111111))
defer server1.Shutdown()
// Setup a stream.Listener that will accept VCs and Flows routed
// through the proxy.
- ln1, ep1, err := server1.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+ ln1, ep1, err := server1.Listen(ctx, proxyEp.Network(), proxyEp.String(), blessings)
if err != nil {
t.Logf(verror.DebugString(err))
t.Fatal(err)
@@ -57,18 +57,18 @@
defer ln1.Close()
// Create the stream.Manager for a second server.
- server2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+ server2 := manager.InternalNew(ctx, naming.FixedRoutingID(0x2222222222222222))
defer server2.Shutdown()
// Setup a stream.Listener that will accept VCs and Flows routed
// through the proxy.
- ln2, ep2, err := server2.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+ ln2, ep2, err := server2.Listen(ctx, proxyEp.Network(), proxyEp.String(), blessings)
if err != nil {
t.Fatal(err)
}
defer ln2.Close()
// Create the stream.Manager for a client.
- client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xcccccccccccccccc))
defer client.Shutdown()
cases := []struct {
@@ -87,7 +87,7 @@
// Accept a single flow and write out what is read to readChan
readChan := make(chan string)
go readFlow(t, c.ln, readChan)
- if err := writeFlow(c.client, c.ep, written); err != nil {
+ if err := writeFlow(ctx, c.client, c.ep, written); err != nil {
t.Errorf("%s: %v", name, err)
continue
}
@@ -101,7 +101,6 @@
func TestProxyAuthorization(t *testing.T) {
ctx, shutdown := v23Init()
defer shutdown()
-
_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, testAuth{"alice", "carol"})
if err != nil {
t.Fatal(err)
@@ -129,8 +128,9 @@
{dave, false}, // not recognized, thus doesn't pass the auth policy
}
for idx, test := range testcases {
- server := manager.InternalNew(naming.FixedRoutingID(uint64(idx)))
- _, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), test.p, test.p.BlessingStore().Default(), proxyAuth{test.p})
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(uint64(idx)))
+ nctx, _ := v23.WithPrincipal(ctx, test.p)
+ _, ep, err := server.Listen(nctx, proxyEp.Network(), proxyEp.String(), test.p.BlessingStore().Default(), proxyAuth{test.p})
if (err == nil) != test.ok {
t.Errorf("Got ep=%v, err=%v - wanted error:%v", ep, err, !test.ok)
}
@@ -159,22 +159,23 @@
// Create the stream.Manager for server1 and server2, both with the same routing ID
serverRID := naming.FixedRoutingID(0x5555555555555555)
- server1 := manager.InternalNew(serverRID)
- server2 := manager.InternalNew(serverRID)
+ server1 := manager.InternalNew(ctx, serverRID)
+ server2 := manager.InternalNew(ctx, serverRID)
defer server1.Shutdown()
defer server2.Shutdown()
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
// First server to claim serverRID should win.
- ln1, ep1, err := server1.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+ ln1, ep1, err := server1.Listen(ctx, proxyEp.Network(), proxyEp.String(), blessings)
if err != nil {
t.Fatal(err)
}
defer ln1.Close()
- ln2, ep2, err := server2.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+ ln2, ep2, err := server2.Listen(ctx, proxyEp.Network(), proxyEp.String(), blessings)
if pattern := "routing id 00000000000000005555555555555555 is already being proxied"; err == nil || !strings.Contains(err.Error(), pattern) {
t.Errorf("Got (%v, %v, %v) want error \"...%v\" (ep1:%v)", ln2, ep2, err, pattern, ep1)
}
@@ -194,10 +195,11 @@
t.Errorf("Proxy endpoint blessing names: got %v, want %v", got, want)
}
- other := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ other := manager.InternalNew(ctx, naming.FixedRoutingID(0xcccccccccccccccc))
defer other.Shutdown()
- vc, err := other.Dial(proxyEp, testutil.NewPrincipal("other"))
+ nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("other"))
+ vc, err := other.Dial(nctx, proxyEp)
if err != nil {
t.Fatal(err)
}
@@ -219,6 +221,8 @@
pserver = testutil.NewPrincipal("server")
pclient = testutil.NewPrincipal("client")
)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
if err != nil {
@@ -229,10 +233,10 @@
t.Errorf("Proxy endpoint blessing names: got %v, want %v", got, want)
}
- server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x5555555555555555))
defer server.Shutdown()
- ln, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), pserver, pserver.BlessingStore().Default())
+ ln, ep, err := server.Listen(sctx, proxyEp.Network(), proxyEp.String(), pserver.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
@@ -248,9 +252,9 @@
}
}()
- client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xcccccccccccccccc))
defer client.Shutdown()
- vc, err := client.Dial(ep, pclient)
+ vc, err := client.Dial(cctx, ep)
if err != nil {
t.Fatal(err)
}
@@ -272,13 +276,14 @@
t.Fatal(err)
}
defer shutdown()
- server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x5555555555555555))
defer server.Shutdown()
addr := proxyEp.Addr().String()
port := addr[strings.LastIndex(addr, ":"):]
principal := testutil.NewPrincipal("test")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
- ln, _, err := server.Listen(inaming.Network, "127.0.0.1"+port, principal, blessings)
+ ln, _, err := server.Listen(ctx, inaming.Network, "127.0.0.1"+port, blessings)
if err != nil {
t.Fatal(err)
}
@@ -293,17 +298,18 @@
if err != nil {
t.Fatal(err)
}
- server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
- client1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
- client2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x5555555555555555))
+ client1 := manager.InternalNew(ctx, naming.FixedRoutingID(0x1111111111111111))
+ client2 := manager.InternalNew(ctx, naming.FixedRoutingID(0x2222222222222222))
defer shutdown()
defer server.Shutdown()
defer client1.Shutdown()
defer client2.Shutdown()
principal := testutil.NewPrincipal("test")
+ sctx, _ := v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
- lnS, epS, err := server.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+ lnS, epS, err := server.Listen(sctx, proxyEp.Network(), proxyEp.String(), blessings)
if err != nil {
t.Fatal(err)
}
@@ -311,13 +317,14 @@
rchan := make(chan string)
pclient1 := testutil.NewPrincipal("client1")
+ cctx, _ := v23.WithPrincipal(ctx, pclient1)
// client1 must connect to the proxy to speak to the server.
// Keep a VC and Flow open to the server, to ensure that the proxy
// maintains routing information (at some point, inactive VIFs
// should be garbage collected, so this ensures that the VIF
// is "active")
- if vc, err := client1.Dial(epS, pclient1); err != nil {
+ if vc, err := client1.Dial(cctx, epS); err != nil {
t.Fatal(err)
} else if flow, err := vc.Connect(); err != nil {
t.Fatal(err)
@@ -326,7 +333,7 @@
}
// Now client1 becomes a server
- lnC, epC, err := client1.Listen(proxyEp.Network(), proxyEp.String(), pclient1, pclient1.BlessingStore().Default())
+ lnC, epC, err := client1.Listen(cctx, proxyEp.Network(), proxyEp.String(), pclient1.BlessingStore().Default())
if err != nil {
t.Fatal(err)
}
@@ -334,7 +341,7 @@
// client2 should be able to talk to client1 through the proxy
rchan = make(chan string)
go readFlow(t, lnC, rchan)
- if err := writeFlow(client2, epC, "daffy duck"); err != nil {
+ if err := writeFlow(ctx, client2, epC, "daffy duck"); err != nil {
t.Fatalf("client2 failed to chat with client1: %v", err)
}
if got, want := <-rchan, "daffy duck"; got != want {
@@ -365,6 +372,8 @@
} else {
opts = []stream.VCOpt{vc.IdleTimeout{idleTime}}
}
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
// Pause the idle timers.
triggerTimers := vif.SetFakeTimers()
@@ -376,22 +385,22 @@
defer shutdown()
// Create the stream.Manager for the server.
- server := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+ server := manager.InternalNew(ctx, naming.FixedRoutingID(0x1111111111111111))
defer server.Shutdown()
// Setup a stream.Listener that will accept VCs and Flows routed
// through the proxy.
- ln, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), pserver, pserver.BlessingStore().Default(), lopts...)
+ ln, ep, err := server.Listen(sctx, proxyEp.Network(), proxyEp.String(), pserver.BlessingStore().Default(), lopts...)
if err != nil {
t.Fatal(err)
}
defer ln.Close()
// Create the stream.Manager for a client.
- client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ client := manager.InternalNew(ctx, naming.FixedRoutingID(0xcccccccccccccccc))
defer client.Shutdown()
// Open a VC and a Flow.
- VC, err := client.Dial(ep, pclient, opts...)
+ VC, err := client.Dial(cctx, ep, opts...)
if err != nil {
t.Fatal(err)
}
@@ -434,7 +443,7 @@
//
// We use fake timers here again to avoid idle timeout during dialing.
defer vif.SetFakeTimers()()
- if _, err := client.Dial(ep, pclient, opts...); err != nil {
+ if _, err := client.Dial(cctx, ep, opts...); err != nil {
t.Errorf("Want to dial to the server; can't dial: %v", err)
}
}
@@ -442,8 +451,9 @@
func TestProxyIdleTimeout(t *testing.T) { testProxyIdleTimeout(t, false) }
func TestProxyIdleTimeoutServer(t *testing.T) { testProxyIdleTimeout(t, true) }
-func writeFlow(mgr stream.Manager, ep naming.Endpoint, data string) error {
- vc, err := mgr.Dial(ep, testutil.NewPrincipal("test"))
+func writeFlow(ctx *context.T, mgr stream.Manager, ep naming.Endpoint, data string) error {
+ ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("test"))
+ vc, err := mgr.Dial(ctx, ep)
if err != nil {
return fmt.Errorf("manager.Dial(%v) failed: %v", ep, err)
}
diff --git a/runtime/internal/rpc/stream/util.go b/runtime/internal/rpc/stream/util.go
new file mode 100644
index 0000000..e79cc3d
--- /dev/null
+++ b/runtime/internal/rpc/stream/util.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream
+
+import (
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/security"
+)
+
+// IMPORTANT:
+// It's essential that the ctx not be accessed when authentication
+// is not requested. This is because the context initialization code
+// uses an unauthenticated connection to obtain a principal!
+
+func GetPrincipalVCOpts(ctx *context.T, opts ...VCOpt) security.Principal {
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case AuthenticatedVC:
+ if bool(v) == false {
+ return nil
+ }
+ return v23.GetPrincipal(ctx)
+ }
+ }
+ return v23.GetPrincipal(ctx)
+}
+
+func GetPrincipalListenerOpts(ctx *context.T, opts ...ListenerOpt) security.Principal {
+ for _, opt := range opts {
+ switch v := opt.(type) {
+ case AuthenticatedVC:
+ if bool(v) == false {
+ return nil
+ }
+ return v23.GetPrincipal(ctx)
+ }
+ }
+ return v23.GetPrincipal(ctx)
+}
diff --git a/runtime/internal/rpc/stream/vc/listener.go b/runtime/internal/rpc/stream/vc/listener.go
index 72479ac..7e2ed0f 100644
--- a/runtime/internal/rpc/stream/vc/listener.go
+++ b/runtime/internal/rpc/stream/vc/listener.go
@@ -7,6 +7,7 @@
import (
"v.io/v23/verror"
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/runtime/internal/lib/upcqueue"
"v.io/x/ref/runtime/internal/rpc/stream"
)
@@ -16,7 +17,7 @@
// level errors and hence {1}{2} is omitted from their format
// strings to avoid repeating these n-times in the final error
// message visible to the user.
- errListenerClosed = reg(".errListenerClosed", "Listener has been closed")
+ errListenerClosed = reg(".errListenerClosed", "listener has been closed")
errGetFromQueue = reg(".errGetFromQueue", "upcqueue.Get failed{:3}")
)
@@ -31,6 +32,7 @@
func (l *listener) Enqueue(f stream.Flow) error {
err := l.q.Put(f)
if err == upcqueue.ErrQueueIsClosed {
+ logger.Global().Infof("Listener closed: %p, %p", l, l.q)
return verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
}
return err
@@ -48,6 +50,7 @@
}
func (l *listener) Close() error {
+ logger.Global().Infof("Listener being closed: %p, %p", l, l.q)
l.q.Close()
return nil
}
diff --git a/runtime/internal/rpc/stream/vc/vc.go b/runtime/internal/rpc/stream/vc/vc.go
index 805a07f..6249c58 100644
--- a/runtime/internal/rpc/stream/vc/vc.go
+++ b/runtime/internal/rpc/stream/vc/vc.go
@@ -23,7 +23,6 @@
"v.io/v23/verror"
"v.io/v23/vom"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/apilog"
"v.io/x/ref/runtime/internal/lib/bqueue"
"v.io/x/ref/runtime/internal/lib/iobuf"
@@ -90,6 +89,7 @@
// queue (v.io/x/ref/runtime/internal/lib/bqueue) to provide flow control on Write
// operations.
type VC struct {
+ ctx *context.T
vci id.VC
localEP, remoteEP naming.Endpoint
localPrincipal security.Principal
@@ -142,12 +142,6 @@
return a.Policy.Authorize(ctx, security.NewCall(¶ms))
}
-// DialContext establishes the context under which a VC Dial was initiated.
-type DialContext struct{ *context.T }
-
-func (DialContext) RPCStreamVCOpt() {}
-func (DialContext) RPCStreamListenerOpt() {}
-
// StartTimeout specifies the time after which the underlying VIF is closed
// if no VC is opened.
type StartTimeout struct{ time.Duration }
@@ -219,12 +213,13 @@
// As the name suggests, this method is intended for use only within packages
// placed inside v.io/x/ref/runtime/internal. Code outside the
// v.io/x/ref/runtime/internal/* packages should never call this method.
-func InternalNew(p Params) *VC {
+func InternalNew(ctx *context.T, p Params) *VC {
fidOffset := 1
if p.Dialed {
fidOffset = 0
}
return &VC{
+ ctx: ctx,
vci: p.VCI,
localEP: p.LocalEP,
remoteEP: p.RemoteEP,
@@ -314,7 +309,6 @@
var err error
if payload, err = vc.crypter.Decrypt(payload); err != nil {
vc.mu.Unlock()
- vlog.Errorf("failed to decrypt payload on VC %v failed: %v", vc, err)
return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToDecryptPayload, nil, err))
}
}
@@ -375,7 +369,7 @@
// Do it in a goroutine in case the implementation of AddReceiveBuffers
// ends up attempting to lock vc.mu
go vc.helper.AddReceiveBuffers(vc.vci, fid, DefaultBytesBufferedPerFlow)
- vlog.VI(2).Infof("Added flow %d@%d to listener", fid, vc.vci)
+ vc.ctx.VI(2).Infof("Added flow %d@%d to listener", fid, vc.vci)
return nil
}
@@ -391,7 +385,7 @@
delete(vc.flowMap, fid)
vc.mu.Unlock()
f.Shutdown()
- vlog.VI(2).Infof("Shutdown flow %d@%d", fid, vc.vci)
+ vc.ctx.VI(2).Infof("Shutdown flow %d@%d", fid, vc.vci)
}
// ReleaseCounters informs the Flow (identified by fid) that the remote end is
@@ -408,14 +402,14 @@
}
vc.mu.Unlock()
if f == nil {
- vlog.VI(2).Infof("Ignoring ReleaseCounters(%d, %d) on VCI %d as the flow does not exist", fid, bytes, vc.vci)
+ vc.ctx.VI(2).Infof("Ignoring ReleaseCounters(%d, %d) on VCI %d as the flow does not exist", fid, bytes, vc.vci)
return
}
f.Release(int(bytes))
}
func (vc *VC) Close(reason error) error {
- vlog.VI(1).Infof("Closing VC %v. Reason:%q", vc, reason)
+ vc.ctx.VI(1).Infof("Closing VC %v. Reason:%q", vc, reason)
vc.mu.Lock()
if vc.closed {
vc.mu.Unlock()
@@ -434,7 +428,7 @@
vc.sharedCounters.Close()
for fid, flow := range flows {
- vlog.VI(2).Infof("Closing flow %d on VC %v as VC is being closed(%q)", fid, vc, reason)
+ vc.ctx.VI(2).Infof("Closing flow %d on VC %v as VC is being closed(%q)", fid, vc, reason)
flow.Close()
}
return nil
@@ -527,7 +521,7 @@
if err = vc.connectSystemFlows(); err != nil {
return vc.appendCloseReason(err)
}
- vlog.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+ vc.ctx.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
return nil
}
@@ -572,7 +566,7 @@
if err := vc.connectSystemFlows(); err != nil {
return vc.appendCloseReason(err)
}
- vlog.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, params.RemoteBlessings, params.LocalBlessings)
+ vc.ctx.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, params.RemoteBlessings, params.LocalBlessings)
return nil
}
@@ -607,7 +601,7 @@
return vc.appendCloseReason(err)
}
}
- vlog.VI(1).Infof("Client VC %v handshaked with no authentication.", vc)
+ vc.ctx.VI(1).Infof("Client VC %v handshaked with no authentication.", vc)
return nil
}
@@ -680,7 +674,7 @@
vc.abortHandshakeAcceptedVC(verror.New(stream.ErrNetwork, nil, verror.New(errFailedToAcceptSystemFlows, nil, err)), ln, result)
return
}
- vlog.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+ vc.ctx.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
result <- HandshakeResult{ln, nil}
}()
return result
@@ -722,7 +716,7 @@
vc.abortHandshakeAcceptedVC(verror.New(stream.ErrNetwork, nil, verror.New(errFailedToAcceptSystemFlows, nil, err)), ln, result)
return
}
- vlog.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, params.RemoteBlessings, params.LocalBlessings)
+ vc.ctx.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, params.RemoteBlessings, params.LocalBlessings)
result <- HandshakeResult{ln, nil}
}()
return result
@@ -762,7 +756,7 @@
return
}
}
- vlog.VI(1).Infof("Server VC %v handshaked with no authentication.", vc)
+ vc.ctx.VI(1).Infof("Server VC %v handshaked with no authentication.", vc)
result <- HandshakeResult{ln, err}
}()
return result
@@ -816,7 +810,7 @@
case <-time.After(fetchDuration(expiry, dischargeExpiryBuffer)):
discharges = dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
if err := enc.Encode(discharges); err != nil {
- vlog.Errorf("encoding discharges on VC %v failed: %v", vc, err)
+ vc.ctx.Errorf("encoding discharges on VC %v failed: %v", vc, err)
return
}
if len(discharges) == 0 {
@@ -831,7 +825,7 @@
}
vc.mu.Unlock()
case <-vc.closeCh:
- vlog.VI(3).Infof("closing sendDischargesLoop on VC %v", vc)
+ vc.ctx.VI(3).Infof("closing sendDischargesLoop on VC %v", vc)
return
}
}
@@ -863,7 +857,7 @@
for {
var discharges []security.Discharge
if err := dec.Decode(&discharges); err != nil {
- vlog.VI(3).Infof("decoding discharges on %v failed: %v", vc, err)
+ vc.ctx.VI(3).Infof("decoding discharges on %v failed: %v", vc, err)
return
}
if len(discharges) == 0 {
diff --git a/runtime/internal/rpc/stream/vc/vc_test.go b/runtime/internal/rpc/stream/vc/vc_test.go
index 7246383..7e12aa6 100644
--- a/runtime/internal/rpc/stream/vc/vc_test.go
+++ b/runtime/internal/rpc/stream/vc/vc_test.go
@@ -24,8 +24,7 @@
"v.io/v23/security"
"v.io/v23/verror"
- "v.io/x/lib/vlog"
-
+ _ "v.io/x/ref/runtime/factories/generic"
"v.io/x/ref/runtime/internal/lib/bqueue"
"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
"v.io/x/ref/runtime/internal/lib/iobuf"
@@ -34,6 +33,7 @@
"v.io/x/ref/runtime/internal/rpc/stream/id"
"v.io/x/ref/runtime/internal/rpc/stream/vc"
iversion "v.io/x/ref/runtime/internal/rpc/version"
+ "v.io/x/ref/test"
"v.io/x/ref/test/testutil"
)
@@ -95,8 +95,10 @@
}
func TestHandshakeNoSecurity(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
// When the principals are nil, no blessings should be sent over the wire.
- clientH, serverH := newVC()
+ clientH, serverH := newVC(ctx)
if err := handshakeVCNoAuthentication(LatestVersion, clientH.VC, serverH.VC); err != nil {
t.Fatal(err)
}
@@ -178,6 +180,8 @@
var _ vc.DischargeClient = (mockDischargeClient)(nil)
func testHandshake(t *testing.T, securityLevel testSecurityLevel) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
matchesError := func(got error, want string) error {
if (got == nil) && len(want) == 0 {
return nil
@@ -254,7 +258,7 @@
},
}
for i, d := range testdata {
- clientH, serverH := newVC()
+ clientH, serverH := newVC(ctx)
var err error
switch securityLevel {
case SecurityPreAuthenticated:
@@ -462,7 +466,9 @@
// the one that initiated the VC). The "server" end (the one that "accepted" the
// VC) listens for flows and simply echoes data read.
func NewSimple(v version.RPCVersion, securityLevel testSecurityLevel) (*helper, stream.VC, error) {
- clientH, serverH := newVC()
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ clientH, serverH := newVC(ctx)
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
var err error
@@ -482,7 +488,7 @@
return clientH, clientH.VC, err
}
-func newVC() (clientH, serverH *helper) {
+func newVC(ctx *context.T) (clientH, serverH *helper) {
clientH = &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
serverH = &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
clientH.otherEnd = serverH
@@ -506,12 +512,12 @@
Helper: serverH,
}
- clientH.VC = vc.InternalNew(clientParams)
- serverH.VC = vc.InternalNew(serverParams)
+ clientH.VC = vc.InternalNew(ctx, clientParams)
+ serverH.VC = vc.InternalNew(ctx, serverParams)
clientH.AddReceiveBuffers(vci, vc.SharedFlowID, vc.DefaultBytesBufferedPerFlow)
- go clientH.pipeLoop(serverH.VC)
- go serverH.pipeLoop(clientH.VC)
+ go clientH.pipeLoop(ctx, serverH.VC)
+ go serverH.pipeLoop(ctx, clientH.VC)
return
}
@@ -615,7 +621,7 @@
}
// pipeLoop forwards slices written to h.bq to dst.
-func (h *helper) pipeLoop(dst *vc.VC) {
+func (h *helper) pipeLoop(ctx *context.T, dst *vc.VC) {
for {
w, bufs, err := h.bq.Get(nil)
if err != nil {
@@ -625,10 +631,10 @@
for _, b := range bufs {
cipher, err := h.VC.Encrypt(fid, b)
if err != nil {
- vlog.Infof("vc encrypt failed: %v", err)
+ ctx.Infof("vc encrypt failed: %v", err)
}
if err := dst.DispatchPayload(fid, cipher); err != nil {
- vlog.Infof("dispatch payload failed: %v", err)
+ ctx.Infof("dispatch payload failed: %v", err)
return
}
}
diff --git a/runtime/internal/rpc/stream/vif/set_test.go b/runtime/internal/rpc/stream/vif/set_test.go
index 68f98f9..66925dc 100644
--- a/runtime/internal/rpc/stream/vif/set_test.go
+++ b/runtime/internal/rpc/stream/vif/set_test.go
@@ -13,11 +13,17 @@
"testing"
"time"
+ "v.io/x/lib/set"
+
+ "v.io/v23"
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/rpc"
- "v.io/x/lib/set"
+ "v.io/v23/verror"
+
_ "v.io/x/ref/runtime/factories/generic"
"v.io/x/ref/runtime/internal/rpc/stream/vif"
+ "v.io/x/ref/test"
"v.io/x/ref/test/testutil"
)
@@ -64,19 +70,22 @@
return conn, <-done, nil
}
-func newVIF(c, s net.Conn) (*vif.VIF, *vif.VIF, error) {
+func newVIF(ctx *context.T, c, s net.Conn) (*vif.VIF, *vif.VIF, error) {
done := make(chan *vif.VIF)
go func() {
principal := testutil.NewPrincipal("accepted")
+ ctx, _ = v23.WithPrincipal(ctx, principal)
blessings := principal.BlessingStore().Default()
- vf, err := vif.InternalNewAcceptedVIF(s, naming.FixedRoutingID(0x5), principal, blessings, nil, nil)
+ vf, err := vif.InternalNewAcceptedVIF(ctx, s, naming.FixedRoutingID(0x5), blessings, nil, nil)
if err != nil {
+ fmt.Fprintf(os.Stderr, "ERR 2: %s\n", verror.DebugString(err))
panic(err)
}
done <- vf
}()
- vf, err := vif.InternalNewDialedVIF(c, naming.FixedRoutingID(0xc), testutil.NewPrincipal("dialed"), nil, nil)
+ ctx, _ = v23.WithPrincipal(ctx, testutil.NewPrincipal("dialed"))
+ vf, err := vif.InternalNewDialedVIF(ctx, c, naming.FixedRoutingID(0xc), nil, nil)
if err != nil {
return nil, nil, err
}
@@ -96,6 +105,8 @@
}
func TestSetBasic(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
sockdir, err := ioutil.TempDir("", "TestSetBasic")
if err != nil {
t.Fatal(err)
@@ -138,7 +149,7 @@
if err != nil {
t.Fatal(err)
}
- vf, _, err := newVIF(c, s)
+ vf, _, err := newVIF(ctx, c, s)
if err != nil {
t.Fatal(err)
}
@@ -167,6 +178,8 @@
}
func TestSetWithPipes(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
c1, s1 := net.Pipe()
c2, s2 := net.Pipe()
a1 := c1.RemoteAddr()
@@ -177,11 +190,11 @@
a1.Network(), a1, a2.Network(), a2)
}
- vf1, _, err := newVIF(c1, s1)
+ vf1, _, err := newVIF(ctx, c1, s1)
if err != nil {
t.Fatal(err)
}
- vf2, _, err := newVIF(c2, s2)
+ vf2, _, err := newVIF(ctx, c2, s2)
if err != nil {
t.Fatal(err)
}
@@ -214,6 +227,8 @@
}
func TestSetWithUnixSocket(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
dir, err := ioutil.TempDir("", "TestSetWithUnixSocket")
if err != nil {
t.Fatal(err)
@@ -238,11 +253,11 @@
a1.Network(), a1, a2.Network(), a2)
}
- _, vf1, err := newVIF(c1, s1)
+ _, vf1, err := newVIF(ctx, c1, s1)
if err != nil {
t.Fatal(err)
}
- _, vf2, err := newVIF(c2, s2)
+ _, vf2, err := newVIF(ctx, c2, s2)
if err != nil {
t.Fatal(err)
}
@@ -275,8 +290,10 @@
}
func TestSetInsertDelete(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
c1, s1 := net.Pipe()
- vf1, _, err := newVIF(c1, s1)
+ vf1, _, err := newVIF(ctx, c1, s1)
if err != nil {
t.Fatal(err)
}
@@ -296,6 +313,8 @@
}
func TestBlockingFind(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
network, address := "tcp", "127.0.0.1:1234"
set := vif.NewSet()
@@ -318,7 +337,7 @@
if err != nil {
t.Fatal(err)
}
- vf, _, err := newVIF(c, s)
+ vf, _, err := newVIF(ctx, c, s)
if err != nil {
t.Fatal(err)
}
diff --git a/runtime/internal/rpc/stream/vif/vcmap_test.go b/runtime/internal/rpc/stream/vif/vcmap_test.go
index 83c503f..bc29468 100644
--- a/runtime/internal/rpc/stream/vif/vcmap_test.go
+++ b/runtime/internal/rpc/stream/vif/vcmap_test.go
@@ -9,14 +9,17 @@
"testing"
"v.io/x/ref/runtime/internal/rpc/stream/vc"
+ "v.io/x/ref/test"
)
func TestVCMap(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
m := newVCMap()
- vc12 := vc.InternalNew(vc.Params{VCI: 12})
- vc34 := vc.InternalNew(vc.Params{VCI: 34})
- vc45 := vc.InternalNew(vc.Params{VCI: 45})
+ vc12 := vc.InternalNew(ctx, vc.Params{VCI: 12})
+ vc34 := vc.InternalNew(ctx, vc.Params{VCI: 34})
+ vc45 := vc.InternalNew(ctx, vc.Params{VCI: 45})
if vc, _, _ := m.Find(12); vc != nil {
t.Errorf("Unexpected VC found: %+v", vc)
@@ -43,9 +46,11 @@
}
func TestVCMapFreeze(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
m := newVCMap()
- vc1 := vc.InternalNew(vc.Params{VCI: 1})
- vc2 := vc.InternalNew(vc.Params{VCI: 2})
+ vc1 := vc.InternalNew(ctx, vc.Params{VCI: 1})
+ vc2 := vc.InternalNew(ctx, vc.Params{VCI: 2})
if ok, _, _ := m.Insert(vc1); !ok {
t.Fatal("Should be able to insert the VC")
}
@@ -63,10 +68,12 @@
}
func TestVCMapDelete(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
m := newVCMap()
- vc1 := vc.InternalNew(vc.Params{VCI: 1})
- vc2 := vc.InternalNew(vc.Params{VCI: 2})
+ vc1 := vc.InternalNew(ctx, vc.Params{VCI: 1})
+ vc2 := vc.InternalNew(ctx, vc.Params{VCI: 2})
m.Insert(vc1)
if empty := m.Delete(vc1.VCI()); !empty {
diff --git a/runtime/internal/rpc/stream/vif/vif.go b/runtime/internal/rpc/stream/vif/vif.go
index 1dfd9c6..d1b1b92 100644
--- a/runtime/internal/rpc/stream/vif/vif.go
+++ b/runtime/internal/rpc/stream/vif/vif.go
@@ -5,9 +5,9 @@
package vif
// Logging guidelines:
-// vlog.VI(1) for per-net.Conn information
-// vlog.VI(2) for per-VC information
-// vlog.VI(3) for per-Flow information
+// .VI(1) for per-net.Conn information
+// .VI(2) for per-VC information
+// .VI(3) for per-Flow information
import (
"bytes"
@@ -27,7 +27,6 @@
"v.io/v23/verror"
"v.io/v23/vtrace"
- "v.io/x/lib/vlog"
"v.io/x/ref/runtime/internal/lib/bqueue"
"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
"v.io/x/ref/runtime/internal/lib/iobuf"
@@ -77,6 +76,8 @@
// single physical interface, multiple Virtual Circuits (VCs) can be
// established over a single VIF.
type VIF struct {
+ ctx *context.T
+
// All reads must be performed through reader, and not directly through conn.
conn net.Conn
pool *iobuf.Pool
@@ -180,14 +181,14 @@
// As the name suggests, this method is intended for use only within packages
// placed inside v.io/x/ref/runtime/internal. Code outside the
// v.io/x/ref/runtime/internal/* packages should never call this method.
-func InternalNewDialedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, versions *iversion.Range, onClose func(*VIF), opts ...stream.VCOpt) (*VIF, error) {
- ctx := getDialContext(opts)
+func InternalNewDialedVIF(ctx *context.T, conn net.Conn, rid naming.RoutingID, versions *iversion.Range, onClose func(*VIF), opts ...stream.VCOpt) (*VIF, error) {
if ctx != nil {
var span vtrace.Span
ctx, span = vtrace.WithNewSpan(ctx, "InternalNewDialedVIF")
span.Annotatef("(%v, %v)", conn.RemoteAddr().Network(), conn.RemoteAddr())
defer span.Finish()
}
+ principal := stream.GetPrincipalVCOpts(ctx, opts...)
pool := iobuf.NewPool(0)
reader := iobuf.NewReader(pool, conn)
localEP := localEndpoint(conn, rid, versions)
@@ -211,7 +212,7 @@
startTimeout = v.Duration
}
}
- return internalNew(conn, pool, reader, localEP, id.VC(vc.NumReservedVCs), versions, principal, blessings, startTimeout, onClose, nil, nil, c, authr)
+ return internalNew(ctx, conn, pool, reader, localEP, id.VC(vc.NumReservedVCs), versions, principal, blessings, startTimeout, onClose, nil, nil, c, authr)
}
// InternalNewAcceptedVIF creates a new virtual interface over the provided
@@ -225,12 +226,12 @@
// As the name suggests, this method is intended for use only within packages
// placed inside v.io/x/ref/runtime/internal. Code outside the
// v.io/x/ref/runtime/internal/* packages should never call this method.
-func InternalNewAcceptedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, blessings security.Blessings, versions *iversion.Range, onClose func(*VIF), lopts ...stream.ListenerOpt) (*VIF, error) {
+func InternalNewAcceptedVIF(ctx *context.T, conn net.Conn, rid naming.RoutingID, blessings security.Blessings, versions *iversion.Range, onClose func(*VIF), lopts ...stream.ListenerOpt) (*VIF, error) {
pool := iobuf.NewPool(0)
reader := iobuf.NewReader(pool, conn)
localEP := localEndpoint(conn, rid, versions)
dischargeClient := getDischargeClient(lopts)
-
+ principal := stream.GetPrincipalListenerOpts(ctx, lopts...)
c, authr, err := AuthenticateAsServer(conn, reader, localEP, versions, principal, blessings, dischargeClient)
if err != nil {
return nil, err
@@ -243,10 +244,10 @@
startTimeout = v.Duration
}
}
- return internalNew(conn, pool, reader, localEP, id.VC(vc.NumReservedVCs)+1, versions, principal, blessings, startTimeout, onClose, upcqueue.New(), lopts, c, authr)
+ return internalNew(ctx, conn, pool, reader, localEP, id.VC(vc.NumReservedVCs)+1, versions, principal, blessings, startTimeout, onClose, upcqueue.New(), lopts, c, authr)
}
-func internalNew(conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, localEP naming.Endpoint, initialVCI id.VC, versions *iversion.Range, principal security.Principal, blessings security.Blessings, startTimeout time.Duration, onClose func(*VIF), acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher, authr *AuthenticationResult) (*VIF, error) {
+func internalNew(ctx *context.T, conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, localEP naming.Endpoint, initialVCI id.VC, versions *iversion.Range, principal security.Principal, blessings security.Blessings, startTimeout time.Duration, onClose func(*VIF), acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher, authr *AuthenticationResult) (*VIF, error) {
var (
// Choose IDs that will not conflict with any other (VC, Flow)
// pairs. VCI 0 is never used by the application (it is
@@ -281,6 +282,7 @@
}
vif := &VIF{
+ ctx: ctx,
conn: conn,
pool: pool,
reader: reader,
@@ -318,7 +320,7 @@
// Dial creates a new VC to the provided remote identity, authenticating the VC
// with the provided local identity.
-func (vif *VIF) Dial(remoteEP naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (stream.VC, error) {
+func (vif *VIF) Dial(ctx *context.T, remoteEP naming.Endpoint, opts ...stream.VCOpt) (stream.VC, error) {
var idleTimeout time.Duration
for _, o := range opts {
switch v := o.(type) {
@@ -326,7 +328,8 @@
idleTimeout = v.Duration
}
}
- vc, err := vif.newVC(vif.allocVCI(), vif.localEP, remoteEP, idleTimeout, true)
+ principal := stream.GetPrincipalVCOpts(ctx, opts...)
+ vc, err := vif.newVC(ctx, vif.allocVCI(), vif.localEP, remoteEP, idleTimeout, true)
if err != nil {
return nil, err
}
@@ -334,6 +337,7 @@
counters.Add(vc.VCI(), sharedFlowID, defaultBytesBufferedPerFlow)
usePreauth := vif.useVIFAuthForVC(vif.versions.Max, vif.localEP, remoteEP, dialedVIF) &&
+ principal != nil &&
reflect.DeepEqual(principal.PublicKey(), vif.principal.PublicKey())
switch {
case usePreauth:
@@ -415,10 +419,10 @@
return vc, nil
}
-func (vif *VIF) acceptVC(m *message.SetupVC) error {
+func (vif *VIF) acceptVC(ctx *context.T, m *message.SetupVC) error {
vrange, err := vif.versions.Intersect(&m.Setup.Versions)
if err != nil {
- vlog.VI(2).Infof("SetupVC message %+v to VIF %s did not present compatible versions: %v", m, vif, err)
+ ctx.VI(2).Infof("SetupVC message %+v to VIF %s did not present compatible versions: %v", m, vif, err)
return err
}
vif.muListen.Lock()
@@ -426,7 +430,7 @@
lopts := vif.listenerOpts
vif.muListen.Unlock()
if closed {
- vlog.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not accept VCs", m, vif)
+ ctx.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not accept VCs", m, vif)
return errors.New("VCs not accepted")
}
var idleTimeout time.Duration
@@ -436,7 +440,7 @@
idleTimeout = v.Duration
}
}
- vcobj, err := vif.newVC(m.VCI, m.RemoteEndpoint, m.LocalEndpoint, idleTimeout, false)
+ vcobj, err := vif.newVC(ctx, m.VCI, m.RemoteEndpoint, m.LocalEndpoint, idleTimeout, false)
if err != nil {
return err
}
@@ -451,7 +455,7 @@
switch {
case len(sigPreauth) > 0:
if !vif.useVIFAuthForVC(vrange.Max, m.RemoteEndpoint, m.LocalEndpoint, acceptedVIF) {
- vlog.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not allow re-using VIF authentication for this VC", m, vif)
+ ctx.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not allow re-using VIF authentication for this VC", m, vif)
return errors.New("VCs not accepted: cannot re-use VIF authentication for this VC")
}
preauth := vif.authResult
@@ -535,7 +539,7 @@
vif.isClosed = true
vif.isClosedMu.Unlock()
- vlog.VI(1).Infof("Closing VIF %s", vif)
+ vif.ctx.VI(1).Infof("Closing VIF %s", vif)
// Stop accepting new VCs.
vif.StopAccepting()
// Close local datastructures for all existing VCs.
@@ -553,7 +557,7 @@
// the remote end should know to close all VCs when the VIF's
// connection breaks.
if err := vif.conn.Close(); err != nil {
- vlog.VI(1).Infof("net.Conn.Close failed on VIF %s: %v", vif, err)
+ vif.ctx.VI(1).Infof("net.Conn.Close failed on VIF %s: %v", vif, err)
}
// Notify that the VIF has been closed.
if vif.onClose != nil {
@@ -633,12 +637,12 @@
// lock is not required here.
msg, err := message.ReadFrom(vif.reader, vif.ctrlCipher)
if err != nil {
- vlog.VI(1).Infof("Exiting readLoop of VIF %s because of read error: %v", vif, err)
+ vif.ctx.VI(1).Infof("Exiting readLoop of VIF %s because of read error: %v", vif, err)
return
}
- vlog.VI(3).Infof("Received %T = [%v] on VIF %s", msg, msg, vif)
+ vif.ctx.VI(3).Infof("Received %T = [%v] on VIF %s", msg, msg, vif)
if err := vif.handleMessage(msg); err != nil {
- vlog.VI(1).Infof("Exiting readLoop of VIF %s because of message error: %v", vif, err)
+ vif.ctx.VI(1).Infof("Exiting readLoop of VIF %s because of message error: %v", vif, err)
return
}
}
@@ -657,12 +661,12 @@
case *message.Data:
_, rq, _ := vif.vcMap.Find(m.VCI)
if rq == nil {
- vlog.VI(2).Infof("Ignoring message of %d bytes for unrecognized VCI %d on VIF %s", m.Payload.Size(), m.VCI, vif)
+ vif.ctx.VI(2).Infof("Ignoring message of %d bytes for unrecognized VCI %d on VIF %s", m.Payload.Size(), m.VCI, vif)
m.Release()
return nil
}
if err := rq.Put(m, nil); err != nil {
- vlog.VI(2).Infof("Failed to put message(%v) on VC queue on VIF %v: %v", m, vif, err)
+ vif.ctx.VI(2).Infof("Failed to put message(%v) on VC queue on VIF %v: %v", m, vif, err)
m.Release()
}
@@ -672,7 +676,7 @@
case *message.OpenFlow:
if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
if err := vc.AcceptFlow(m.Flow); err != nil {
- vlog.VI(3).Infof("OpenFlow %+v on VIF %v failed:%v", m, vif, err)
+ vif.ctx.VI(3).Infof("OpenFlow %+v on VIF %v failed:%v", m, vif, err)
cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
cm.SetClose()
vif.sendOnExpressQ(cm)
@@ -681,7 +685,7 @@
vc.ReleaseCounters(m.Flow, m.InitialCounters)
return nil
}
- vlog.VI(2).Infof("Ignoring OpenFlow(%+v) for unrecognized VCI on VIF %s", m, m, vif)
+ vif.ctx.VI(2).Infof("Ignoring OpenFlow(%+v) for unrecognized VCI on VIF %s", m, m, vif)
case *message.SetupVC:
// If we dialed this VC, then this is a response and we should finish
@@ -690,7 +694,7 @@
vif.distributeCounters(m.Counters)
vc, _, _ := vif.vcMap.Find(m.VCI)
if vc == nil {
- vlog.VI(2).Infof("Ignoring SetupVC message %+v for unknown dialed VC", m)
+ vif.ctx.VI(2).Infof("Ignoring SetupVC message %+v for unknown dialed VC", m)
return nil
}
vrange, err := vif.versions.Intersect(&m.Setup.Versions)
@@ -708,7 +712,7 @@
return nil
}
// This is an accepted VC.
- if err := vif.acceptVC(m); err != nil {
+ if err := vif.acceptVC(vif.ctx, m); err != nil {
vif.sendOnExpressQ(&message.CloseVC{VCI: m.VCI, Error: err.Error()})
}
return nil
@@ -716,7 +720,7 @@
case *message.CloseVC:
if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
vif.deleteVC(vc.VCI())
- vlog.VI(2).Infof("CloseVC(%+v) on VIF %s", m, vif)
+ vif.ctx.VI(2).Infof("CloseVC(%+v) on VIF %s", m, vif)
// TODO(cnicolaou): it would be nice to have a method on VC
// to indicate a 'remote close' rather than a 'local one'. This helps
// with error reporting since we expect reads/writes to occur
@@ -724,19 +728,19 @@
vc.Close(verror.New(stream.ErrNetwork, nil, verror.New(errRemoteEndClosedVC, nil, m.Error)))
return nil
}
- vlog.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
+ vif.ctx.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
case *message.Setup:
- vlog.Infof("Ignoring redundant Setup message %T on VIF %s", m, vif)
+ vif.ctx.Infof("Ignoring redundant Setup message %T on VIF %s", m, vif)
default:
- vlog.Infof("Ignoring unrecognized message %T on VIF %s", m, vif)
+ vif.ctx.Infof("Ignoring unrecognized message %T on VIF %s", m, vif)
}
return nil
}
-func (vif *VIF) vcDispatchLoop(vc *vc.VC, messages *pcqueue.T) {
- defer vlog.VI(2).Infof("Exiting vcDispatchLoop(%v) on VIF %v", vc, vif)
+func (vif *VIF) vcDispatchLoop(ctx *context.T, vc *vc.VC, messages *pcqueue.T) {
+ defer ctx.VI(2).Infof("Exiting vcDispatchLoop(%v) on VIF %v", vc, vif)
defer vif.rpending.Done()
for {
qm, err := messages.Get(nil)
@@ -745,7 +749,7 @@
}
m := qm.(*message.Data)
if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
- vlog.VI(2).Infof("Ignoring data message %v for on VIF %s: %v", m, vif, err)
+ ctx.VI(2).Infof("Ignoring data message %v for on VIF %s: %v", m, vif, err)
}
if m.Close() {
vif.shutdownFlow(vc, m.Flow)
@@ -788,15 +792,15 @@
return
}
- vlog.VI(2).Infof("Running acceptFlowsLoop for VC %v on VIF %v", vc, vif)
+ vif.ctx.VI(2).Infof("Running acceptFlowsLoop for VC %v on VIF %v", vc, vif)
for {
f, err := hr.Listener.Accept()
if err != nil {
- vlog.VI(2).Infof("Accept failed on VC %v on VIF %v: %v", vc, vif, err)
+ vif.ctx.VI(2).Infof("Accept failed on VC %v on VIF %v: %v", vc, vif, err)
return
}
if err := acceptor.Put(ConnectorAndFlow{vc, f}); err != nil {
- vlog.VI(2).Infof("vif.acceptor.Put(%v, %T) on VIF %v failed: %v", vc, f, vif, err)
+ vif.ctx.VI(2).Infof("vif.acceptor.Put(%v, %T) on VIF %v failed: %v", vc, f, vif, err)
f.Close()
return
}
@@ -807,7 +811,7 @@
for cid, bytes := range counters {
vc, _, _ := vif.vcMap.Find(cid.VCI())
if vc == nil {
- vlog.VI(2).Infof("Ignoring counters for non-existent VCI %d on VIF %s", cid.VCI(), vif)
+ vif.ctx.VI(2).Infof("Ignoring counters for non-existent VCI %d on VIF %s", cid.VCI(), vif)
continue
}
vc.ReleaseCounters(cid.Flow(), bytes)
@@ -820,7 +824,7 @@
for {
writer, bufs, err := vif.outgoing.Get(nil)
if err != nil {
- vlog.VI(1).Infof("Exiting writeLoop of VIF %s because of bqueue.Get error: %v", vif, err)
+ vif.ctx.VI(1).Infof("Exiting writeLoop of VIF %s because of bqueue.Get error: %v", vif, err)
return
}
wtype := reflect.TypeOf(writer)
@@ -831,7 +835,7 @@
case vif.expressQ:
for _, b := range bufs {
if err := vif.writeSerializedMessage(b.Contents); err != nil {
- vlog.VI(1).Infof("Exiting writeLoop of VIF %s because Control message write failed: %s", vif, err)
+ vif.ctx.VI(1).Infof("Exiting writeLoop of VIF %s because Control message write failed: %s", vif, err)
releaseBufs(bufs)
return
}
@@ -848,9 +852,9 @@
}
vif.flowMu.Unlock()
if len(msg.Counters) > 0 {
- vlog.VI(3).Infof("Sending counters %v on VIF %s", msg.Counters, vif)
+ vif.ctx.VI(3).Infof("Sending counters %v on VIF %s", msg.Counters, vif)
if err := vif.writeMessage(msg); err != nil {
- vlog.VI(1).Infof("Exiting writeLoop of VIF %s because AddReceiveBuffers message write failed: %v", vif, err)
+ vif.ctx.VI(1).Infof("Exiting writeLoop of VIF %s because AddReceiveBuffers message write failed: %v", vif, err)
return
}
}
@@ -864,8 +868,8 @@
}
}
-func (vif *VIF) vcWriteLoop(vc *vc.VC, messages *pcqueue.T) {
- defer vlog.VI(2).Infof("Exiting vcWriteLoop(%v) on VIF %v", vc, vif)
+func (vif *VIF) vcWriteLoop(ctx *context.T, vc *vc.VC, messages *pcqueue.T) {
+ defer ctx.VI(2).Infof("Exiting vcWriteLoop(%v) on VIF %v", vc, vif)
defer vif.wpending.Done()
for {
qm, err := messages.Get(nil)
@@ -875,7 +879,7 @@
m := qm.(*message.Data)
m.Payload, err = vc.Encrypt(m.Flow, m.Payload)
if err != nil {
- vlog.Infof("Encryption failed. Flow:%v VC:%v Error:%v", m.Flow, vc, err)
+ ctx.Infof("Encryption failed. Flow:%v VC:%v Error:%v", m.Flow, vc, err)
}
if m.Close() {
// The last bytes written on the flow will be sent out
@@ -923,7 +927,7 @@
// sendOnExpressQ adds 'msg' to the expressQ (highest priority queue) of messages to write on the wire.
func (vif *VIF) sendOnExpressQ(msg message.T) error {
- vlog.VI(2).Infof("sendOnExpressQ(%T = %+v) on VIF %s", msg, msg, vif)
+ vif.ctx.VI(2).Infof("sendOnExpressQ(%T = %+v) on VIF %s", msg, msg, vif)
var buf bytes.Buffer
// Don't encrypt yet, because the message ordering isn't yet determined.
// Encryption is performed by vif.writeSerializedMessage() when the
@@ -972,7 +976,7 @@
_, _, wq := vif.vcMap.Find(vci)
if wq == nil {
// VC has been removed, stop sending messages
- vlog.VI(2).Infof("VCI %d on VIF %s was shutdown, dropping %d messages that were pending a write", vci, vif, len(bufs))
+ vif.ctx.VI(2).Infof("VCI %d on VIF %s was shutdown, dropping %d messages that were pending a write", vci, vif, len(bufs))
releaseBufs(bufs)
return
}
@@ -1012,14 +1016,14 @@
return ret
}
-func (vif *VIF) newVC(vci id.VC, localEP, remoteEP naming.Endpoint, idleTimeout time.Duration, side vifSide) (*vc.VC, error) {
+func (vif *VIF) newVC(ctx *context.T, vci id.VC, localEP, remoteEP naming.Endpoint, idleTimeout time.Duration, side vifSide) (*vc.VC, error) {
vif.muStartTimer.Lock()
if vif.startTimer != nil {
vif.startTimer.Stop()
vif.startTimer = nil
}
vif.muStartTimer.Unlock()
- vc := vc.InternalNew(vc.Params{
+ vc := vc.InternalNew(ctx, vc.Params{
VCI: vci,
Dialed: side == dialedVIF,
LocalEP: localEP,
@@ -1034,11 +1038,11 @@
}
// Start vcWriteLoop
if added = added && vif.wpending.TryAdd(); added {
- go vif.vcWriteLoop(vc, wq)
+ go vif.vcWriteLoop(ctx, vc, wq)
}
// Start vcDispatchLoop
if added = added && vif.rpending.TryAdd(); added {
- go vif.vcDispatchLoop(vc, rq)
+ go vif.vcDispatchLoop(ctx, vc, rq)
}
if !added {
if rq != nil {
@@ -1062,7 +1066,7 @@
}
func (vif *VIF) closeVCAndSendMsg(vc *vc.VC, clientVCClosed bool, errMsg error) {
- vlog.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, errMsg)
+ vif.ctx.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, errMsg)
vif.deleteVC(vc.VCI())
vc.Close(errMsg)
if clientVCClosed {
@@ -1077,7 +1081,7 @@
VCI: vc.VCI(),
Error: msg,
}); err != nil {
- vlog.VI(2).Infof("sendOnExpressQ(CloseVC{VCI:%d,...}) on VIF %v failed: %v", vc.VCI(), vif, err)
+ vif.ctx.VI(2).Infof("sendOnExpressQ(CloseVC{VCI:%d,...}) on VIF %v failed: %v", vc.VCI(), vif, err)
}
}
@@ -1097,7 +1101,7 @@
n := 0
for _, vc := range vcs {
if naming.Compare(vc.RemoteEndpoint().RoutingID(), remote.RoutingID()) {
- vlog.VI(1).Infof("VCI %d on VIF %s being closed because of ShutdownVCs call", vc.VCI(), vif)
+ vif.ctx.VI(1).Infof("VCI %d on VIF %s being closed because of ShutdownVCs call", vc.VCI(), vif)
vif.closeVCAndSendMsg(vc, false, nil)
n++
}
@@ -1232,14 +1236,3 @@
}
return ep
}
-
-// getDialContext returns the DialContext for this call.
-func getDialContext(vopts []stream.VCOpt) *context.T {
- for _, o := range vopts {
- switch v := o.(type) {
- case vc.DialContext:
- return v.T
- }
- }
- return nil
-}
diff --git a/runtime/internal/rpc/stream/vif/vif_test.go b/runtime/internal/rpc/stream/vif/vif_test.go
index b73ed57..2c5c2f7 100644
--- a/runtime/internal/rpc/stream/vif/vif_test.go
+++ b/runtime/internal/rpc/stream/vif/vif_test.go
@@ -20,27 +20,33 @@
"testing"
"time"
+ "v.io/v23"
+ "v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/rpc/version"
- "v.io/v23/security"
inaming "v.io/x/ref/runtime/internal/naming"
"v.io/x/ref/runtime/internal/rpc/stream"
"v.io/x/ref/runtime/internal/rpc/stream/vc"
"v.io/x/ref/runtime/internal/rpc/stream/vif"
iversion "v.io/x/ref/runtime/internal/rpc/version"
+ "v.io/x/ref/test"
"v.io/x/ref/test/testutil"
)
//go:generate v23 test generate
func TestSingleFlowCreatedAtClient(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server := NewClientServer(pclient, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server := NewClientServer(cctx, sctx)
defer client.Close()
- clientVC, _, err := createVC(client, server, pclient, makeEP(0x5))
+ clientVC, _, err := createVC(cctx, client, server, makeEP(0x5))
if err != nil {
t.Fatal(err)
}
@@ -59,12 +65,16 @@
}
func TestSingleFlowCreatedAtServer(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server := NewClientServer(pclient, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server := NewClientServer(cctx, sctx)
defer client.Close()
- clientVC, serverConnector, err := createVC(client, server, pclient, makeEP(0x5))
+ clientVC, serverConnector, err := createVC(cctx, client, server, makeEP(0x5))
if err != nil {
t.Fatal(err)
}
@@ -86,6 +96,8 @@
func testMultipleVCsAndMultipleFlows(t *testing.T, gomaxprocs int) {
testutil.InitRandGenerator(t.Logf)
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
// This test dials multiple VCs from the client to the server.
// On each VC, it creates multiple flows, writes to them and verifies
// that the other process received what was written.
@@ -109,13 +121,15 @@
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server := NewClientServer(pclient, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server := NewClientServer(cctx, sctx)
defer client.Close()
// Create all the VCs
// clientVCs[i] is the VC at the client process
// serverConnectors[i] is the corresponding VC at the server process.
- clientVCs, serverConnectors, err := createNVCs(client, server, pclient, 0, nVCs)
+ clientVCs, serverConnectors, err := createNVCs(cctx, client, server, 0, nVCs)
if err != nil {
t.Fatal(err)
}
@@ -241,10 +255,14 @@
}
func TestClose(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server := NewClientServer(pclient, pserver)
- vc, _, err := createVC(client, server, pclient, makeEP(0x5))
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server := NewClientServer(cctx, sctx)
+ vc, _, err := createVC(cctx, client, server, makeEP(0x5))
if err != nil {
t.Fatal(err)
}
@@ -276,14 +294,18 @@
}
func TestOnClose(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
notifyC, notifyS := make(chan *vif.VIF), make(chan *vif.VIF)
notifyFuncC := func(vf *vif.VIF) { notifyC <- vf }
notifyFuncS := func(vf *vif.VIF) { notifyS <- vf }
// Close the client VIF. Both client and server should be notified.
- client, server, err := New(nil, nil, pclient, pserver, notifyFuncC, notifyFuncS, nil, nil)
+ client, server, err := New(nil, nil, cctx, sctx, notifyFuncC, notifyFuncS, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -296,7 +318,7 @@
}
// Same as above, but close the server VIF at this time.
- client, server, err = New(nil, nil, pclient, pserver, notifyFuncC, notifyFuncS, nil, nil)
+ client, server, err = New(nil, nil, cctx, sctx, notifyFuncC, notifyFuncS, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -313,15 +335,18 @@
const (
waitTime = 5 * time.Millisecond
)
-
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
notify := make(chan interface{})
notifyFunc := func(vf *vif.VIF) { notify <- vf }
newVIF := func() (vf, remote *vif.VIF) {
var err error
- vf, remote, err = New(nil, nil, pclient, pserver, notifyFunc, notifyFunc, nil, nil)
+ vf, remote, err = New(nil, nil, cctx, sctx, notifyFunc, notifyFunc, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -342,7 +367,7 @@
// Open one VC. Should not be closed.
vf, remote = newVIF()
- if _, _, err := createVC(vf, remote, pclient, makeEP(0x10)); err != nil {
+ if _, _, err := createVC(cctx, vf, remote, makeEP(0x10)); err != nil {
t.Fatal(err)
}
if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
@@ -357,7 +382,7 @@
// Same as above, but open a VC from the remote side.
vf, remote = newVIF()
- _, _, err := createVC(remote, vf, pclient, makeEP(0x10))
+ _, _, err := createVC(cctx, remote, vf, makeEP(0x10))
if err != nil {
t.Fatal(err)
}
@@ -371,7 +396,7 @@
// Create two VCs.
vf, remote = newVIF()
- if _, _, err := createNVCs(vf, remote, pclient, 0x10, 2); err != nil {
+ if _, _, err := createNVCs(cctx, vf, remote, 0x10, 2); err != nil {
t.Fatal(err)
}
@@ -399,9 +424,12 @@
// connection of the other side to be closed especially in race testing.
waitTime = 150 * time.Millisecond
)
-
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
notify := make(chan interface{})
notifyFunc := func(vf *vif.VIF) { notify <- vf }
@@ -412,7 +440,7 @@
vfStartTime, remoteStartTime = remoteStartTime, vfStartTime
}
var err error
- vf, remote, err = New(nil, nil, pclient, pserver, notifyFunc, notifyFunc, []stream.VCOpt{vc.StartTimeout{vfStartTime}}, []stream.ListenerOpt{vc.StartTimeout{remoteStartTime}})
+ vf, remote, err = New(nil, nil, cctx, sctx, notifyFunc, notifyFunc, []stream.VCOpt{vc.StartTimeout{vfStartTime}}, []stream.ListenerOpt{vc.StartTimeout{remoteStartTime}})
if err != nil {
t.Fatal(err)
}
@@ -434,7 +462,7 @@
// Open one VC. Should not be closed.
vf, remote, triggerTimers = newVIF()
- if _, _, err := createVC(vf, remote, pclient, makeEP(0x10)); err != nil {
+ if _, _, err := createVC(cctx, vf, remote, makeEP(0x10)); err != nil {
t.Fatal(err)
}
triggerTimers()
@@ -457,15 +485,18 @@
idleTime = 10 * time.Millisecond
waitTime = idleTime * 2
)
-
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
notify := make(chan interface{})
notifyFunc := func(vf *vif.VIF) { notify <- vf }
newVIF := func() (vf, remote *vif.VIF) {
var err error
- if vf, remote, err = New(nil, nil, pclient, pserver, notifyFunc, notifyFunc, nil, nil); err != nil {
+ if vf, remote, err = New(nil, nil, cctx, sctx, notifyFunc, notifyFunc, nil, nil); err != nil {
t.Fatal(err)
}
if err = vf.StartAccepting(); err != nil {
@@ -479,7 +510,7 @@
newVC := func(vf, remote *vif.VIF) (VC stream.VC, ln stream.Listener, remoteVC stream.Connector, triggerTimers func()) {
triggerTimers = vif.SetFakeTimers()
var err error
- VC, remoteVC, err = createVC(vf, remote, pclient, makeEP(0x10), vc.IdleTimeout{idleTime})
+ VC, remoteVC, err = createVC(cctx, vf, remote, makeEP(0x10), vc.IdleTimeout{idleTime})
if err != nil {
t.Fatal(err)
}
@@ -508,7 +539,7 @@
// Same as above, but with multiple VCs.
vf, remote = newVIF()
triggerTimers = vif.SetFakeTimers()
- if _, _, err := createNVCs(vf, remote, pclient, 0x10, 5, vc.IdleTimeout{idleTime}); err != nil {
+ if _, _, err := createNVCs(cctx, vf, remote, 0x10, 5, vc.IdleTimeout{idleTime}); err != nil {
t.Fatal(err)
}
triggerTimers()
@@ -572,9 +603,13 @@
func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
func TestShutdownVCs(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server := NewClientServer(pclient, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server := NewClientServer(cctx, sctx)
defer server.Close()
defer client.Close()
@@ -586,19 +621,19 @@
return nil
}
- if _, _, err := createVC(client, server, pclient, makeEP(0x5)); err != nil {
+ if _, _, err := createVC(cctx, client, server, makeEP(0x5)); err != nil {
t.Fatal(err)
}
if err := testN(1); err != nil {
t.Error(err)
}
- if _, _, err := createVC(client, server, pclient, makeEP(0x5)); err != nil {
+ if _, _, err := createVC(cctx, client, server, makeEP(0x5)); err != nil {
t.Fatal(err)
}
if err := testN(2); err != nil {
t.Error(err)
}
- if _, _, err := createVC(client, server, pclient, makeEP(0x7)); err != nil {
+ if _, _, err := createVC(cctx, client, server, makeEP(0x7)); err != nil {
t.Fatal(err)
}
if err := testN(3); err != nil {
@@ -635,9 +670,13 @@
}
func (tc *versionTestCase) Run(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server, err := NewVersionedClientServer(tc.client, tc.server, pclient, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server, err := NewVersionedClientServer(tc.client, tc.server, cctx, sctx)
if (err != nil) != tc.expectVIFError {
t.Errorf("Error mismatch. Wanted error: %v, got %v; client: %v, server: %v", tc.expectVIFError, err, tc.client, tc.server)
}
@@ -651,7 +690,7 @@
Address: "addr",
RID: naming.FixedRoutingID(0x5),
}
- clientVC, _, err := createVC(client, server, pclient, ep)
+ clientVC, _, err := createVC(cctx, client, server, ep)
if (err != nil) != tc.expectError {
t.Errorf("Error mismatch. Wanted error: %v, got %v (client:%v, server:%v ep:%v)", tc.expectError, err, tc.client, tc.server, tc.ep)
@@ -691,19 +730,23 @@
}
func TestNetworkFailure(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
c1, c2 := pipe()
result := make(chan *vif.VIF)
closed := make(chan struct{})
go func() {
- client, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), pclient, nil, func(vf *vif.VIF) { close(closed) })
+ client, err := vif.InternalNewDialedVIF(sctx, c1, naming.FixedRoutingID(0xc), nil, func(vf *vif.VIF) { close(closed) })
if err != nil {
t.Fatal(err)
}
result <- client
}()
- server, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, pserver.BlessingStore().Default(), nil, nil)
+ server, err := vif.InternalNewAcceptedVIF(sctx, c2, naming.FixedRoutingID(0x5), pserver.BlessingStore().Default(), nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -712,7 +755,7 @@
c1.Close()
// Wait until the VIF is closed, since Dial() may run before the underlying VC is closed.
<-closed
- if _, err := client.Dial(makeEP(0x5), pclient); err == nil {
+ if _, err := client.Dial(cctx, makeEP(0x5)); err == nil {
t.Errorf("Expected client.Dial to fail")
}
if _, err := server.Accept(); err == nil {
@@ -721,9 +764,13 @@
}
func TestPreAuthentication(t *testing.T) {
+ ctx, shutdown := test.V23InitWithParams(test.InitParams{})
+ defer shutdown()
pclient := testutil.NewPrincipal("client")
pserver := testutil.NewPrincipal("server")
- client, server := NewClientServer(pclient, pserver)
+ cctx, _ := v23.WithPrincipal(ctx, pclient)
+ sctx, _ := v23.WithPrincipal(ctx, pserver)
+ client, server := NewClientServer(cctx, sctx)
defer client.Close()
check := func(numVCs, numPreAuth uint) error {
@@ -745,7 +792,7 @@
}
// Use a different routing ID. Should not use pre-auth.
- _, _, err := createVC(client, server, pclient, makeEP(0x55))
+ _, _, err := createVC(cctx, client, server, makeEP(0x55))
if err != nil {
t.Fatal(err)
}
@@ -754,7 +801,7 @@
}
// Use the same routing ID. Should use pre-auth.
- _, _, err = createVC(client, server, pclient, makeEP(0x5))
+ _, _, err = createVC(cctx, client, server, makeEP(0x5))
if err != nil {
t.Fatal(err)
}
@@ -763,7 +810,7 @@
}
// Use the null routing ID. Should use VIF pre-auth.
- _, _, err = createVC(client, server, pclient, makeEP(0x0))
+ _, _, err = createVC(cctx, client, server, makeEP(0x0))
if err != nil {
t.Fatal(err)
}
@@ -772,7 +819,8 @@
}
// Use a different principal. Should not use pre-auth.
- _, _, err = createVC(client, server, testutil.NewPrincipal("client2"), makeEP(0x5))
+ nctx, _ := v23.WithPrincipal(ctx, testutil.NewPrincipal("client2"))
+ _, _, err = createVC(nctx, client, server, makeEP(0x5))
if err != nil {
t.Fatal(err)
}
@@ -859,25 +907,25 @@
return p1, p2
}
-func NewClientServer(pclient, pserver security.Principal) (client, server *vif.VIF) {
+func NewClientServer(cctx, sctx *context.T) (client, server *vif.VIF) {
var err error
- client, server, err = New(nil, nil, pclient, pserver, nil, nil, nil, nil)
+ client, server, err = New(nil, nil, cctx, sctx, nil, nil, nil, nil)
if err != nil {
panic(err)
}
return
}
-func NewVersionedClientServer(clientVersions, serverVersions *iversion.Range, pclient, pserver security.Principal) (client, server *vif.VIF, verr error) {
- return New(clientVersions, serverVersions, pclient, pserver, nil, nil, nil, nil)
+func NewVersionedClientServer(clientVersions, serverVersions *iversion.Range, cctx, sctx *context.T) (client, server *vif.VIF, verr error) {
+ return New(clientVersions, serverVersions, cctx, sctx, nil, nil, nil, nil)
}
-func New(clientVersions, serverVersions *iversion.Range, pclient, pserver security.Principal, clientOnClose, serverOnClose func(*vif.VIF), opts []stream.VCOpt, lopts []stream.ListenerOpt) (client, server *vif.VIF, verr error) {
+func New(clientVersions, serverVersions *iversion.Range, cctx, sctx *context.T, clientOnClose, serverOnClose func(*vif.VIF), opts []stream.VCOpt, lopts []stream.ListenerOpt) (client, server *vif.VIF, verr error) {
c1, c2 := pipe()
var cerr error
cl := make(chan *vif.VIF)
go func() {
- c, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), pclient, clientVersions, clientOnClose, opts...)
+ c, err := vif.InternalNewDialedVIF(cctx, c1, naming.FixedRoutingID(0xc), clientVersions, clientOnClose, opts...)
if err != nil {
cerr = err
close(cl)
@@ -885,7 +933,8 @@
cl <- c
}
}()
- s, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, pserver.BlessingStore().Default(), serverVersions, serverOnClose, lopts...)
+ blessings := v23.GetPrincipal(sctx).BlessingStore().Default()
+ s, err := vif.InternalNewAcceptedVIF(sctx, c2, naming.FixedRoutingID(0x5), blessings, serverVersions, serverOnClose, lopts...)
c, ok := <-cl
if err != nil {
verr = err
@@ -929,12 +978,12 @@
// createVC creates a VC by dialing from the client process to the server
// process. It returns the VC at the client and the Connector at the server
// (which the server can use to create flows over the VC)).
-func createVC(client, server *vif.VIF, pclient security.Principal, ep naming.Endpoint, opts ...stream.VCOpt) (clientVC stream.VC, serverConnector stream.Connector, err error) {
+func createVC(ctx *context.T, client, server *vif.VIF, ep naming.Endpoint, opts ...stream.VCOpt) (clientVC stream.VC, serverConnector stream.Connector, err error) {
vcChan := make(chan stream.VC)
scChan := make(chan stream.Connector)
errChan := make(chan error)
go func() {
- vc, err := client.Dial(ep, pclient, opts...)
+ vc, err := client.Dial(ctx, ep, opts...)
errChan <- err
vcChan <- vc
}()
@@ -956,11 +1005,11 @@
return
}
-func createNVCs(client, server *vif.VIF, pclient security.Principal, startRID uint64, N int, opts ...stream.VCOpt) (clientVCs []stream.VC, serverConnectors []stream.Connector, err error) {
+func createNVCs(ctx *context.T, client, server *vif.VIF, startRID uint64, N int, opts ...stream.VCOpt) (clientVCs []stream.VC, serverConnectors []stream.Connector, err error) {
var c stream.VC
var s stream.Connector
for i := 0; i < N; i++ {
- c, s, err = createVC(client, server, pclient, makeEP(startRID+uint64(i)), opts...)
+ c, s, err = createVC(ctx, client, server, makeEP(startRID+uint64(i)), opts...)
if err != nil {
return
}
diff --git a/runtime/internal/rpc/stress/internal/client.go b/runtime/internal/rpc/stress/internal/client.go
index d17b1b0..2263c50 100644
--- a/runtime/internal/rpc/stress/internal/client.go
+++ b/runtime/internal/rpc/stress/internal/client.go
@@ -12,7 +12,6 @@
"v.io/v23/context"
- "v.io/x/lib/vlog"
"v.io/x/ref/runtime/internal/rpc/stress"
)
@@ -30,10 +29,10 @@
for {
got, err := stub.Echo(ctx, payload)
if err != nil {
- vlog.Fatalf("Echo failed: %v", err)
+ ctx.Fatalf("Echo failed: %v", err)
}
if !bytes.Equal(got, payload) {
- vlog.Fatalf("Echo returned %v, but expected %v", got, payload)
+ ctx.Fatalf("Echo returned %v, but expected %v", got, payload)
}
iterations++
@@ -49,17 +48,17 @@
stub := stress.StressClient(server)
arg, err := newSumArg(maxPayloadSize)
if err != nil {
- vlog.Fatalf("new arg failed: %v", err)
+ ctx.Fatalf("new arg failed: %v", err)
}
got, err := stub.Sum(ctx, arg)
if err != nil {
- vlog.Fatalf("Sum failed: %v", err)
+ ctx.Fatalf("Sum failed: %v", err)
}
wanted, _ := doSum(&arg)
if !bytes.Equal(got, wanted) {
- vlog.Fatalf("Sum returned %v, but expected %v", got, wanted)
+ ctx.Fatalf("Sum returned %v, but expected %v", got, wanted)
}
stats.SumCount++
stats.BytesSent += uint64(lenSumArg(&arg))
@@ -73,7 +72,7 @@
stub := stress.StressClient(server)
stream, err := stub.SumStream(ctx)
if err != nil {
- vlog.Fatalf("Stream failed: %v", err)
+ ctx.Fatalf("Stream failed: %v", err)
}
chunkCnt := rand.Intn(maxChunkCnt) + 1
@@ -107,23 +106,23 @@
for i := 0; i < chunkCnt; i++ {
arg, err := newSumArg(maxPayloadSize)
if err != nil {
- vlog.Fatalf("new arg failed: %v", err)
+ ctx.Fatalf("new arg failed: %v", err)
}
args[i] = arg
if err = sendS.Send(arg); err != nil {
- vlog.Fatalf("SendStream failed to send: %v", err)
+ ctx.Fatalf("SendStream failed to send: %v", err)
}
stats.BytesSent += uint64(lenSumArg(&arg))
}
if err = sendS.Close(); err != nil {
- vlog.Fatalf("SendStream failed to close: %v", err)
+ ctx.Fatalf("SendStream failed to close: %v", err)
}
if err = <-done; err != nil {
- vlog.Fatalf("%v", err)
+ ctx.Fatalf("%v", err)
}
if err = stream.Finish(); err != nil {
- vlog.Fatalf("Stream failed to finish: %v", err)
+ ctx.Fatalf("Stream failed to finish: %v", err)
}
stats.SumStreamCount++
}
diff --git a/runtime/internal/rpc/stress/mtstress/run.go b/runtime/internal/rpc/stress/mtstress/run.go
index f7f1596..c357311 100644
--- a/runtime/internal/rpc/stress/mtstress/run.go
+++ b/runtime/internal/rpc/stress/mtstress/run.go
@@ -14,7 +14,6 @@
"v.io/v23"
"v.io/v23/context"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/stats/histogram"
)
@@ -90,12 +89,12 @@
MinValue: int64(float64(avgms) * 0.95),
GrowthFactor: 0.20,
}
- vlog.Infof("Creating histogram after %d samples (%vms avg latency): %+v", ret.Count, avgms, opts)
+ p.Context.Infof("Creating histogram after %d samples (%vms avg latency): %+v", ret.Count, avgms, opts)
ret.HistMS = histogram.New(opts)
}
case sig := <-interrupt:
if time.Since(lastInterrupt) < time.Second {
- vlog.Infof("Multiple %v signals received, aborting test", sig)
+ p.Context.Infof("Multiple %v signals received, aborting test", sig)
stopped = true
break
}
@@ -119,7 +118,7 @@
func warmup(ctx *context.T, f func(*context.T) (time.Duration, error)) {
const nWarmup = 10
- vlog.Infof("Sending %d requests as warmup", nWarmup)
+ ctx.Infof("Sending %d requests as warmup", nWarmup)
var wg sync.WaitGroup
for i := 0; i < nWarmup; i++ {
wg.Add(1)
@@ -129,7 +128,7 @@
}()
}
wg.Wait()
- vlog.Infof("Done with warmup")
+ ctx.Infof("Done with warmup")
}
func call(ctx *context.T, f func(*context.T) (time.Duration, error), reauth bool, d chan<- time.Duration) {
@@ -143,7 +142,7 @@
// change!
var err error
if ctx, err = v23.WithPrincipal(ctx, v23.GetPrincipal(ctx)); err != nil {
- vlog.Infof("%v", err)
+ ctx.Infof("%v", err)
return
}
client = v23.GetClient(ctx)
@@ -151,7 +150,7 @@
}
sample, err := f(ctx)
if err != nil {
- vlog.Infof("%v", err)
+ ctx.Infof("%v", err)
return
}
d <- sample
diff --git a/runtime/internal/rpc/stress/stressd/main.go b/runtime/internal/rpc/stress/stressd/main.go
index 8a27535..1adbceb 100644
--- a/runtime/internal/rpc/stress/stressd/main.go
+++ b/runtime/internal/rpc/stress/stressd/main.go
@@ -14,7 +14,6 @@
"v.io/v23/context"
"v.io/v23/security"
"v.io/x/lib/cmdline"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/signals"
"v.io/x/ref/lib/v23cmd"
"v.io/x/ref/lib/xrpc"
@@ -43,9 +42,9 @@
service, stop := internal.NewService()
server, err := xrpc.NewServer(ctx, "", service, security.AllowEveryone())
if err != nil {
- vlog.Fatalf("NewServer failed: %v", err)
+ ctx.Fatalf("NewServer failed: %v", err)
}
- vlog.Infof("listening on %s", server.Status().Endpoints[0].Name())
+ ctx.Infof("listening on %s", server.Status().Endpoints[0].Name())
var timeout <-chan time.Time
if duration > 0 {
diff --git a/runtime/internal/rpc/test/client_test.go b/runtime/internal/rpc/test/client_test.go
index 7ea8425..ceb3a16 100644
--- a/runtime/internal/rpc/test/client_test.go
+++ b/runtime/internal/rpc/test/client_test.go
@@ -144,6 +144,9 @@
root.ExpectVar("PID")
rootName := root.ExpectVar("MT_NAME")
+ if len(rootName) == 0 {
+ root.Shutdown(nil, os.Stderr)
+ }
sh.SetVar(ref.EnvNamespacePrefix, rootName)
if err = v23.GetNamespace(ctx).SetRoots(rootName); err != nil {
@@ -426,7 +429,7 @@
nctx, _ := context.WithTimeout(ctx, time.Minute)
call, err := client.StartCall(nctx, "name", "noname", nil, options.NoRetry{}, options.SecurityNone)
if verror.ErrorID(err) != verror.ErrNoServers.ID {
- t.Fatalf("wrong error: %s", err)
+ t.Fatalf("wrong error: %s", verror.DebugString(err))
}
if call != nil {
t.Fatalf("expected call to be nil")
diff --git a/runtime/internal/rpc/test/proxy_test.go b/runtime/internal/rpc/test/proxy_test.go
index 9a6c7b8..82add66 100644
--- a/runtime/internal/rpc/test/proxy_test.go
+++ b/runtime/internal/rpc/test/proxy_test.go
@@ -13,8 +13,6 @@
"testing"
"time"
- "v.io/x/lib/vlog"
-
"v.io/v23"
"v.io/v23/context"
"v.io/v23/namespace"
@@ -178,8 +176,8 @@
// We use different stream managers for the client and server
// to prevent VIF re-use (in other words, we want to test VIF
// creation from both the client and server end).
- smserver = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
- smclient = imanager.InternalNew(naming.FixedRoutingID(0x444444444))
+ smserver = imanager.InternalNew(ctx, naming.FixedRoutingID(0x555555555))
+ smclient = imanager.InternalNew(ctx, naming.FixedRoutingID(0x444444444))
ns = tnaming.NewSimpleNamespace()
)
defer smserver.Shutdown()
@@ -190,7 +188,7 @@
}
defer client.Close()
serverCtx, _ := v23.WithPrincipal(ctx, pserver)
- server, err := irpc.InternalNewServer(serverCtx, smserver, ns, nil, "", nil, pserver)
+ server, err := irpc.InternalNewServer(serverCtx, smserver, ns, nil, "", nil)
if err != nil {
t.Fatal(err)
}
@@ -247,7 +245,7 @@
continue
}
for i, s := range me.Servers {
- vlog.Infof("%d: %s", i, s)
+ ctx.Infof("%d: %s", i, s)
}
if err == nil && len(me.Servers) == expect {
ch <- 1
diff --git a/runtime/internal/rt/mgmt_test.go b/runtime/internal/rt/mgmt_test.go
index be8c552..2c59c05 100644
--- a/runtime/internal/rt/mgmt_test.go
+++ b/runtime/internal/rt/mgmt_test.go
@@ -37,9 +37,9 @@
m := v23.GetAppCycle(ctx)
ch := make(chan string, 1)
- m.WaitForStop(ch)
+ m.WaitForStop(ctx, ch)
for i := 0; i < 10; i++ {
- m.Stop()
+ m.Stop(ctx)
if want, got := v23.LocalStop, <-ch; want != got {
t.Errorf("WaitForStop want %q got %q", want, got)
}
@@ -59,11 +59,11 @@
m := v23.GetAppCycle(ctx)
ch1 := make(chan string, 1)
- m.WaitForStop(ch1)
+ m.WaitForStop(ctx, ch1)
ch2 := make(chan string, 1)
- m.WaitForStop(ch2)
+ m.WaitForStop(ctx, ch2)
for i := 0; i < 10; i++ {
- m.Stop()
+ m.Stop(ctx)
if want, got := v23.LocalStop, <-ch1; want != got {
t.Errorf("WaitForStop want %q got %q", want, got)
}
@@ -82,9 +82,9 @@
m := v23.GetAppCycle(ctx)
ch := make(chan string, 1)
- m.WaitForStop(ch)
+ m.WaitForStop(ctx, ch)
for i := 0; i < 10; i++ {
- m.Stop()
+ m.Stop(ctx)
}
if want, got := v23.LocalStop, <-ch; want != got {
t.Errorf("WaitForStop want %q got %q", want, got)
@@ -103,7 +103,7 @@
m := v23.GetAppCycle(ctx)
fmt.Fprintf(env.Stdout, "ready\n")
modules.WaitForEOF(env.Stdin)
- m.Stop()
+ m.Stop(ctx)
os.Exit(42) // This should not be reached.
return nil
}, "noWaiters")
@@ -134,8 +134,8 @@
m := v23.GetAppCycle(ctx)
fmt.Fprintf(env.Stdout, "ready\n")
modules.WaitForEOF(env.Stdin)
- m.WaitForStop(make(chan string, 1))
- m.ForceStop()
+ m.WaitForStop(ctx, make(chan string, 1))
+ m.ForceStop(ctx)
os.Exit(42) // This should not be reached.
return nil
}, "forceStop")
@@ -249,7 +249,7 @@
m := v23.GetAppCycle(ctx)
ch := make(chan string, 1)
- m.WaitForStop(ch)
+ m.WaitForStop(ctx, ch)
fmt.Fprintf(env.Stdout, "Got %s\n", <-ch)
m.AdvanceGoal(10)
fmt.Fprintf(env.Stdout, "Doing some work\n")
diff --git a/runtime/internal/rt/rt_test.go b/runtime/internal/rt/rt_test.go
index 94bf51a..81f93d5 100644
--- a/runtime/internal/rt/rt_test.go
+++ b/runtime/internal/rt/rt_test.go
@@ -12,13 +12,12 @@
"testing"
"time"
- "v.io/x/lib/vlog"
-
"v.io/v23"
"v.io/v23/context"
"v.io/v23/security"
"v.io/x/ref"
+ "v.io/x/ref/internal/logger"
vsecurity "v.io/x/ref/lib/security"
"v.io/x/ref/test"
"v.io/x/ref/test/expect"
@@ -32,10 +31,11 @@
ctx, shutdown := v23.Init()
defer shutdown()
- l := vlog.Log
- fmt.Println(l)
- args := fmt.Sprintf("%s", l)
- expected := regexp.MustCompile("name=vlog logdirs=\\[/tmp\\] logtostderr=true|false alsologtostderr=false|true max_stack_buf_size=4292608 v=[0-9] stderrthreshold=2 vmodule= vfilepath= log_backtrace_at=:0")
+ mgr := logger.Manager(ctx)
+ fmt.Println(mgr)
+ args := fmt.Sprintf("%s", mgr)
+ expected := regexp.MustCompile("name=vanadium logdirs=\\[/tmp\\] logtostderr=true|false alsologtostderr=false|true max_stack_buf_size=4292608 v=[0-9] stderrthreshold=2 vmodule= vfilepath= log_backtrace_at=:0")
+
if !expected.MatchString(args) {
t.Errorf("unexpected default args: %s, want %s", args, expected)
}
@@ -55,12 +55,12 @@
}
var child = modules.Register(func(env *modules.Env, args ...string) error {
- _, shutdown := test.V23Init()
+ ctx, shutdown := test.V23Init()
defer shutdown()
- logger := vlog.Log
- vlog.Infof("%s\n", logger)
- fmt.Fprintf(env.Stdout, "%s\n", logger)
+ mgr := logger.Manager(ctx)
+ ctx.Infof("%s\n", mgr)
+ fmt.Fprintf(env.Stdout, "%s\n", mgr)
modules.WaitForEOF(env.Stdin)
fmt.Fprintf(env.Stdout, "done\n")
return nil
diff --git a/runtime/internal/rt/runtime.go b/runtime/internal/rt/runtime.go
index e3d8a1b..2e5d5ab 100644
--- a/runtime/internal/rt/runtime.go
+++ b/runtime/internal/rt/runtime.go
@@ -15,7 +15,6 @@
"v.io/x/lib/metadata"
"v.io/x/lib/pubsub"
- "v.io/x/lib/vlog"
"v.io/v23"
"v.io/v23/context"
@@ -70,6 +69,7 @@
// Please see the interface definition for documentation of the
// individiual methods.
type Runtime struct {
+ ctx *context.T
deps *dependency.Graph
}
@@ -100,7 +100,7 @@
}
err := logger.Manager(logger.Global()).ConfigureFromFlags()
- if err != nil && err != vlog.ErrConfigured {
+ if err != nil && !logger.IsAlreadyConfiguredError(err) {
return nil, nil, nil, err
}
@@ -169,7 +169,7 @@
if err != nil {
return nil, nil, nil, err
}
-
+ r.ctx = ctx
return r, r.WithBackgroundContext(ctx), r.shutdown, nil
}
@@ -195,7 +195,7 @@
func (r *Runtime) shutdown() {
r.deps.CloseAndWaitForAll()
- vlog.FlushLog()
+ r.ctx.FlushLog()
}
func (r *Runtime) initSignalHandling(ctx *context.T) {
@@ -214,7 +214,7 @@
if !ok {
break
}
- vlog.Infof("Received signal %v", sig)
+ r.ctx.Infof("Received signal %v", sig)
}
}()
r.addChild(ctx, signals, func() {
@@ -231,7 +231,7 @@
func (r *Runtime) NewServer(ctx *context.T, opts ...rpc.ServerOpt) (rpc.Server, error) {
defer apilog.LogCallf(ctx, "opts...=%v", opts)(ctx, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
// Create a new RoutingID (and StreamManager) for each server.
- sm, err := newStreamManager()
+ sm, err := newStreamManager(ctx)
if err != nil {
return nil, fmt.Errorf("failed to create rpc/stream/Manager: %v", err)
}
@@ -257,13 +257,13 @@
Blessings: principal.BlessingStore().Default(),
})
}
- server, err := irpc.InternalNewServer(ctx, sm, ns, id.settingsPublisher, id.settingsName, r.GetClient(ctx), principal, otherOpts...)
+ server, err := irpc.InternalNewServer(ctx, sm, ns, id.settingsPublisher, id.settingsName, r.GetClient(ctx), otherOpts...)
if err != nil {
return nil, err
}
stop := func() {
if err := server.Stop(); err != nil {
- vlog.Errorf("A server could not be stopped: %v", err)
+ r.ctx.Errorf("A server could not be stopped: %v", err)
}
sm.Shutdown()
}
@@ -286,17 +286,17 @@
return false
}
-func newStreamManager() (stream.Manager, error) {
+func newStreamManager(ctx *context.T) (stream.Manager, error) {
rid, err := naming.NewRoutingID()
if err != nil {
return nil, err
}
- sm := imanager.InternalNew(rid)
+ sm := imanager.InternalNew(ctx, rid)
return sm, nil
}
func (r *Runtime) setNewStreamManager(ctx *context.T) (*context.T, error) {
- sm, err := newStreamManager()
+ sm, err := newStreamManager(ctx)
if err != nil {
return nil, err
}
diff --git a/runtime/internal/rt/runtime_test.go b/runtime/internal/rt/runtime_test.go
index 342da80..27ae0d9 100644
--- a/runtime/internal/rt/runtime_test.go
+++ b/runtime/internal/rt/runtime_test.go
@@ -10,7 +10,9 @@
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
- "v.io/x/lib/vlog"
+ "v.io/v23/options"
+
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/lib/flags"
"v.io/x/ref/runtime/internal/rt"
"v.io/x/ref/services/debug/debuglib"
@@ -37,7 +39,11 @@
r, ctx, shutdown := initForTest(t)
defer shutdown()
- if s, err := r.NewServer(ctx); err != nil || s == nil {
+ // Use options.SecurityNone to avoid calling back into the
+ // v23 runtime, which is not setup in these tests.
+ // TODO(cnicolaou): this can be undone when the security agent
+ // no longer uses rpc as its communication mechanism.
+ if s, err := r.NewServer(ctx, options.SecurityNone); err != nil || s == nil {
t.Fatalf("Could not create server: %v", err)
}
}
@@ -137,7 +143,7 @@
defer shutdown()
oldDebugDisp := r.GetReservedNameDispatcher(ctx)
- newDebugDisp := debuglib.NewDispatcher(vlog.Log.LogDir, nil)
+ newDebugDisp := debuglib.NewDispatcher(logger.Manager(ctx).LogDir, nil)
nctx := r.WithReservedNameDispatcher(ctx, newDebugDisp)
debugDisp := r.GetReservedNameDispatcher(nctx)
diff --git a/runtime/internal/rt/security.go b/runtime/internal/rt/security.go
index ef1d822..5e439b1 100644
--- a/runtime/internal/rt/security.go
+++ b/runtime/internal/rt/security.go
@@ -72,6 +72,9 @@
// during runtime shutdown.
ctx, shutdown = context.WithRootCancel(ctx)
+ // TODO(cnicolaou): the agentlib can call back into runtime to get the principal,
+ // which will be a problem if the runtime is not initialized, hence this code
+ // path is fragile. We should ideally provide an option to work around this case.
if principal, err = agentlib.NewAgentPrincipal(ctx, ep, client); err != nil {
shutdown()
client.Close()
diff --git a/runtime/internal/rt/shutdown_servers_test.go b/runtime/internal/rt/shutdown_servers_test.go
index 8514017..b6612e4 100644
--- a/runtime/internal/rt/shutdown_servers_test.go
+++ b/runtime/internal/rt/shutdown_servers_test.go
@@ -16,7 +16,7 @@
"v.io/v23"
"v.io/v23/context"
"v.io/v23/rpc"
- "v.io/x/lib/vlog"
+
"v.io/x/ref/lib/signals"
"v.io/x/ref/lib/xrpc"
_ "v.io/x/ref/runtime/factories/generic"
@@ -37,10 +37,10 @@
for scanner.Scan() {
switch scanner.Text() {
case "stop":
- v23.GetAppCycle(ctx).Stop()
+ v23.GetAppCycle(ctx).Stop(ctx)
case "forcestop":
fmt.Println("straight exit")
- v23.GetAppCycle(ctx).ForceStop()
+ v23.GetAppCycle(ctx).ForceStop(ctx)
case "close":
close(done)
return
@@ -70,11 +70,11 @@
// Create a couple servers, and start serving.
server1, err := xrpc.NewServer(ctx, "", &dummy{}, nil)
if err != nil {
- vlog.Fatalf("r.NewServer error: %s", err)
+ ctx.Fatalf("r.NewServer error: %s", err)
}
server2, err := xrpc.NewServer(ctx, "", &dummy{}, nil)
if err != nil {
- vlog.Fatalf("r.NewServer error: %s", err)
+ ctx.Fatalf("r.NewServer error: %s", err)
}
// This is how to wait for a shutdown. In this example, a shutdown
@@ -89,7 +89,7 @@
// This is how to configure handling of stop commands to allow clean
// shutdown.
stopChan := make(chan string, 2)
- v23.GetAppCycle(ctx).WaitForStop(stopChan)
+ v23.GetAppCycle(ctx).WaitForStop(ctx, stopChan)
// Blocking is used to prevent the process from exiting upon receiving a
// second signal or stop command while critical cleanup code is
@@ -222,7 +222,7 @@
// Create a server, and start serving.
server, err := xrpc.NewServer(ctx, "", &dummy{}, nil)
if err != nil {
- vlog.Fatalf("r.NewServer error: %s", err)
+ ctx.Fatalf("r.NewServer error: %s", err)
}
// This is how to wait for a shutdown. In this example, a shutdown
diff --git a/runtime/internal/testing/concurrency/mutex_test.go b/runtime/internal/testing/concurrency/mutex_test.go
index 7069526..13aa722 100644
--- a/runtime/internal/testing/concurrency/mutex_test.go
+++ b/runtime/internal/testing/concurrency/mutex_test.go
@@ -13,7 +13,7 @@
"testing"
"time"
- "v.io/x/lib/vlog"
+ "v.io/x/ref/internal/logger"
"v.io/x/ref/runtime/internal/testing/concurrency"
"v.io/x/ref/runtime/internal/testing/concurrency/sync"
)
@@ -97,7 +97,7 @@
expectedOutputs := generateMutexOutputs(createMutexSet(n))
checkExpectedOutputs(t, outputs, expectedOutputs)
checkUnexpectedOutputs(t, outputs, expectedOutputs)
- vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ logger.Global().VI(1).Infof("Explored %v iterations.", niterations)
}
}
@@ -129,7 +129,7 @@
if niterations > stopAfter {
t.Fatalf("Unexpected number of iterations: expected at most %v, got %v", stopAfter, niterations)
}
- vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ logger.Global().VI(1).Infof("Explored %v iterations.", niterations)
}
}
@@ -160,6 +160,6 @@
if start.Add(deadline).After(end) {
checkExpectedOutputs(t, outputs, expectedOutputs)
}
- vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ logger.Global().VI(1).Infof("Explored %v iterations.", niterations)
}
}
diff --git a/runtime/internal/testing/mocks/naming/namespace.go b/runtime/internal/testing/mocks/naming/namespace.go
index 811a313..fd0e407 100644
--- a/runtime/internal/testing/mocks/naming/namespace.go
+++ b/runtime/internal/testing/mocks/naming/namespace.go
@@ -141,7 +141,7 @@
panic("ResolveToMountTable not implemented")
}
-func (ns *namespaceMock) FlushCacheEntry(name string) bool {
+func (ns *namespaceMock) FlushCacheEntry(ctx *context.T, name string) bool {
defer apilog.LogCallf(nil, "name=%.10s...", name)(nil, "") // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
return false
}
diff --git a/runtime/internal/vtrace/store_test.go b/runtime/internal/vtrace/store_test.go
index fcc473f..3d38700 100644
--- a/runtime/internal/vtrace/store_test.go
+++ b/runtime/internal/vtrace/store_test.go
@@ -120,7 +120,10 @@
newSpan(traces[0], "foo", traces[0], st)
newSpan(traces[1], "foobar", traces[1], st)
- sp := newSpan(traces[2], "baz", traces[2], st)
+ sp, err := newSpan(traces[2], "baz", traces[2], st)
+ if err != nil {
+ t.Fatal(err)
+ }
sp.Annotate("foobang")
compare(t, traceids(test.results...), st.TraceRecords())
diff --git a/runtime/internal/vtrace/vtrace.go b/runtime/internal/vtrace/vtrace.go
index 2dfc987..11bfd0c 100644
--- a/runtime/internal/vtrace/vtrace.go
+++ b/runtime/internal/vtrace/vtrace.go
@@ -14,7 +14,6 @@
"v.io/v23/context"
"v.io/v23/uniqueid"
"v.io/v23/vtrace"
- "v.io/x/lib/vlog"
"v.io/x/ref/lib/flags"
)
@@ -29,10 +28,10 @@
store *Store
}
-func newSpan(parent uniqueid.Id, name string, trace uniqueid.Id, store *Store) *span {
+func newSpan(parent uniqueid.Id, name string, trace uniqueid.Id, store *Store) (*span, error) {
id, err := uniqueid.Random()
if err != nil {
- vlog.Errorf("vtrace: Couldn't generate Span ID, debug data may be lost: %v", err)
+ return nil, fmt.Errorf("vtrace: Couldn't generate Span ID, debug data may be lost: %v", err)
}
s := &span{
id: id,
@@ -43,7 +42,7 @@
store: store,
}
store.start(s)
- return s
+ return s, nil
}
func (s *span) ID() uniqueid.Id {
@@ -97,9 +96,12 @@
// nologcall
id, err := uniqueid.Random()
if err != nil {
- vlog.Errorf("vtrace: Couldn't generate Trace Id, debug data may be lost: %v", err)
+ ctx.Errorf("vtrace: Couldn't generate Trace Id, debug data may be lost: %v", err)
}
- s := newSpan(id, "", id, getStore(ctx))
+ s, err := newSpan(id, "", id, getStore(ctx))
+ if err != nil {
+ ctx.Error(err)
+ }
return context.WithValue(ctx, spanKey, s), s
}
@@ -114,7 +116,10 @@
if req.Flags&vtrace.CollectInMemory != 0 {
st.ForceCollect(req.TraceId)
}
- newSpan := newSpan(req.SpanId, name, req.TraceId, st)
+ newSpan, err := newSpan(req.SpanId, name, req.TraceId, st)
+ if err != nil {
+ ctx.Error(err)
+ }
return context.WithValue(ctx, spanKey, newSpan), newSpan
}
@@ -126,11 +131,14 @@
if curSpan.store == nil {
panic("nil store")
}
- s := newSpan(curSpan.ID(), name, curSpan.trace, curSpan.store)
+ s, err := newSpan(curSpan.ID(), name, curSpan.trace, curSpan.store)
+ if err != nil {
+ ctx.Error(err)
+ }
return context.WithValue(ctx, spanKey, s), s
}
- vlog.Error("vtrace: Creating a new child span from context with no existing span.")
+ ctx.Error("vtrace: Creating a new child span from context with no existing span.")
return m.WithNewTrace(ctx)
}
diff --git a/services/device/deviced/internal/impl/device_service.go b/services/device/deviced/internal/impl/device_service.go
index 295ed07..480b8aa 100644
--- a/services/device/deviced/internal/impl/device_service.go
+++ b/services/device/deviced/internal/impl/device_service.go
@@ -217,7 +217,7 @@
if s.restartHandler != nil {
s.restartHandler()
}
- v23.GetAppCycle(ctx).Stop()
+ v23.GetAppCycle(ctx).Stop(ctx)
return nil
}
@@ -482,7 +482,7 @@
if s.restartHandler != nil {
s.restartHandler()
}
- v23.GetAppCycle(ctx).Stop()
+ v23.GetAppCycle(ctx).Stop(ctx)
deferrer = nil
return nil
}
@@ -515,7 +515,7 @@
}
func (*deviceService) Delete(ctx *context.T, _ rpc.ServerCall) error {
- v23.GetAppCycle(ctx).Stop()
+ v23.GetAppCycle(ctx).Stop(ctx)
return nil
}
@@ -523,7 +523,7 @@
if s.restartHandler != nil {
s.restartHandler()
}
- v23.GetAppCycle(ctx).Stop()
+ v23.GetAppCycle(ctx).Stop(ctx)
return nil
}
diff --git a/services/device/deviced/internal/starter/starter.go b/services/device/deviced/internal/starter/starter.go
index 848eccc..c6c972a 100644
--- a/services/device/deviced/internal/starter/starter.go
+++ b/services/device/deviced/internal/starter/starter.go
@@ -208,7 +208,7 @@
shutdown, err := startClaimedDevice(ctx, args)
if err != nil {
ctx.Errorf("Failed to start device service after it was claimed: %v", err)
- v23.GetAppCycle(ctx).Stop()
+ v23.GetAppCycle(ctx).Stop(ctx)
return
}
defer shutdown()
diff --git a/services/wspr/internal/namespace/request_handler.go b/services/wspr/internal/namespace/request_handler.go
index c2cbc34..c262a50 100644
--- a/services/wspr/internal/namespace/request_handler.go
+++ b/services/wspr/internal/namespace/request_handler.go
@@ -70,8 +70,8 @@
return me.Names(), nil
}
-func (s *Server) FlushCacheEntry(_ *context.T, _ rpc.ServerCall, name string) (bool, error) {
- return s.ns.FlushCacheEntry(name), nil
+func (s *Server) FlushCacheEntry(ctx *context.T, _ rpc.ServerCall, name string) (bool, error) {
+ return s.ns.FlushCacheEntry(ctx, name), nil
}
func (s *Server) DisableCache(_ *context.T, _ rpc.ServerCall, disable bool) error {