veyron.go.core: Make context.T a concrete type.
Change-Id: I1f4ce09c8a302214e13da6939a20dbf625d31511
MultiPart: 1/6
diff --git a/runtimes/google/ipc/benchmarks/client.go b/runtimes/google/ipc/benchmarks/client.go
index f6558ea..c920458 100644
--- a/runtimes/google/ipc/benchmarks/client.go
+++ b/runtimes/google/ipc/benchmarks/client.go
@@ -14,7 +14,7 @@
// CallEcho calls 'Echo' method 'iterations' times with the given payload
// size, and optionally updates the stats.
-func CallEcho(b *testing.B, ctx context.T, address string, iterations, payloadSize int, stats *testutil.BenchStats) {
+func CallEcho(b *testing.B, ctx *context.T, address string, iterations, payloadSize int, stats *testutil.BenchStats) {
stub := BenchmarkClient(address)
payload := make([]byte, payloadSize)
for i := range payload {
@@ -53,7 +53,7 @@
// CallEchoStream calls 'EchoStream' method 'iterations' times. Each iteration
// sends 'chunkCnt' chunks on the stream and receives the same number of chunks
// back. Each chunk has the given payload size. Optionally updates the stats.
-func CallEchoStream(b *testing.B, ctx context.T, address string, iterations, chunkCnt, payloadSize int, stats *testutil.BenchStats) {
+func CallEchoStream(b *testing.B, ctx *context.T, address string, iterations, chunkCnt, payloadSize int, stats *testutil.BenchStats) {
done, _ := StartEchoStream(b, ctx, address, iterations, chunkCnt, payloadSize, stats)
<-done
}
@@ -64,7 +64,7 @@
// the streaming. Each iteration requests 'chunkCnt' chunks on the stream and
// receives that number of chunks back. Each chunk has the given payload size.
// Optionally updates the stats. Zero 'iterations' means unlimited.
-func StartEchoStream(b *testing.B, ctx context.T, address string, iterations, chunkCnt, payloadSize int, stats *testutil.BenchStats) (<-chan int, func()) {
+func StartEchoStream(b *testing.B, ctx *context.T, address string, iterations, chunkCnt, payloadSize int, stats *testutil.BenchStats) (<-chan int, func()) {
stub := BenchmarkClient(address)
payload := make([]byte, payloadSize)
for i := range payload {
diff --git a/runtimes/google/ipc/benchmarks/service.vdl.go b/runtimes/google/ipc/benchmarks/service.vdl.go
index 301bd4e..6613195 100644
--- a/runtimes/google/ipc/benchmarks/service.vdl.go
+++ b/runtimes/google/ipc/benchmarks/service.vdl.go
@@ -26,9 +26,9 @@
// containing Benchmark methods.
type BenchmarkClientMethods interface {
// Echo returns the payload that it receives.
- Echo(ctx __context.T, Payload []byte, opts ...__ipc.CallOpt) ([]byte, error)
+ Echo(ctx *__context.T, Payload []byte, opts ...__ipc.CallOpt) ([]byte, error)
// EchoStream returns the payload that it receives via the stream.
- EchoStream(__context.T, ...__ipc.CallOpt) (BenchmarkEchoStreamCall, error)
+ EchoStream(*__context.T, ...__ipc.CallOpt) (BenchmarkEchoStreamCall, error)
}
// BenchmarkClientStub adds universal methods to BenchmarkClientMethods.
@@ -53,14 +53,14 @@
client __ipc.Client
}
-func (c implBenchmarkClientStub) c(ctx __context.T) __ipc.Client {
+func (c implBenchmarkClientStub) c(ctx *__context.T) __ipc.Client {
if c.client != nil {
return c.client
}
return __veyron2.RuntimeFromContext(ctx).Client()
}
-func (c implBenchmarkClientStub) Echo(ctx __context.T, i0 []byte, opts ...__ipc.CallOpt) (o0 []byte, err error) {
+func (c implBenchmarkClientStub) Echo(ctx *__context.T, i0 []byte, opts ...__ipc.CallOpt) (o0 []byte, err error) {
var call __ipc.Call
if call, err = c.c(ctx).StartCall(ctx, c.name, "Echo", []interface{}{i0}, opts...); err != nil {
return
@@ -71,7 +71,7 @@
return
}
-func (c implBenchmarkClientStub) EchoStream(ctx __context.T, opts ...__ipc.CallOpt) (ocall BenchmarkEchoStreamCall, err error) {
+func (c implBenchmarkClientStub) EchoStream(ctx *__context.T, opts ...__ipc.CallOpt) (ocall BenchmarkEchoStreamCall, err error) {
var call __ipc.Call
if call, err = c.c(ctx).StartCall(ctx, c.name, "EchoStream", nil, opts...); err != nil {
return
@@ -80,7 +80,7 @@
return
}
-func (c implBenchmarkClientStub) Signature(ctx __context.T, opts ...__ipc.CallOpt) (o0 __ipc.ServiceSignature, err error) {
+func (c implBenchmarkClientStub) Signature(ctx *__context.T, opts ...__ipc.CallOpt) (o0 __ipc.ServiceSignature, err error) {
var call __ipc.Call
if call, err = c.c(ctx).StartCall(ctx, c.name, "Signature", nil, opts...); err != nil {
return
diff --git a/runtimes/google/ipc/client.go b/runtimes/google/ipc/client.go
index 83b2de3..4269466 100644
--- a/runtimes/google/ipc/client.go
+++ b/runtimes/google/ipc/client.go
@@ -137,7 +137,7 @@
return c, nil
}
-func (c *client) createFlow(ctx context.T, ep naming.Endpoint, noDischarges bool) (stream.Flow, verror.E) {
+func (c *client) createFlow(ctx *context.T, ep naming.Endpoint, noDischarges bool) (stream.Flow, verror.E) {
c.vcMapMu.Lock()
defer c.vcMapMu.Unlock()
if c.vcMap == nil {
@@ -194,7 +194,7 @@
// a flow to the endpoint, returning the parsed suffix.
// The server name passed in should be a rooted name, of the form "/ep/suffix" or
// "/ep//suffix", or just "/ep".
-func (c *client) connectFlow(ctx context.T, server string, noDischarges bool) (stream.Flow, string, verror.E) {
+func (c *client) connectFlow(ctx *context.T, server string, noDischarges bool) (stream.Flow, string, verror.E) {
address, suffix := naming.SplitAddressName(server)
if len(address) == 0 {
return nil, "", verror.Make(errNonRootedName, ctx, server)
@@ -244,7 +244,7 @@
return 0, false
}
-func (c *client) StartCall(ctx context.T, name, method string, args []interface{}, opts ...ipc.CallOpt) (ipc.Call, error) {
+func (c *client) StartCall(ctx *context.T, name, method string, args []interface{}, opts ...ipc.CallOpt) (ipc.Call, error) {
defer vlog.LogCall()()
return c.startCall(ctx, name, method, args, opts)
}
@@ -286,8 +286,8 @@
}
// startCall ensures StartCall always returns verror.E.
-func (c *client) startCall(ctx context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.Call, verror.E) {
- if ctx == nil {
+func (c *client) startCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.Call, verror.E) {
+ if !ctx.Initialized() {
return nil, verror.ExplicitMake(verror.BadArg, i18n.NoLangID, "ipc.Client", "StartCall")
}
@@ -348,7 +348,7 @@
}
// TODO(cnicolaou): implement real, configurable load balancing.
-func (c *client) tryServer(ctx context.T, index int, server string, ch chan<- *serverStatus, noDischarges bool) {
+func (c *client) tryServer(ctx *context.T, index int, server string, ch chan<- *serverStatus, noDischarges bool) {
status := &serverStatus{index: index}
var err verror.E
var span vtrace.Span
@@ -364,7 +364,7 @@
}
// tryCall makes a single attempt at a call, against possibly multiple servers.
-func (c *client) tryCall(ctx context.T, name, method string, args []interface{}, skipResolve bool, opts []ipc.CallOpt) (ipc.Call, verror.ActionCode, verror.E) {
+func (c *client) tryCall(ctx *context.T, name, method string, args []interface{}, skipResolve bool, opts []ipc.CallOpt) (ipc.Call, verror.ActionCode, verror.E) {
noDischarges := shouldNotFetchDischarges(opts)
// Resolve name unless told not to.
var servers []string
@@ -557,7 +557,7 @@
// failedTryCall performs asynchronous cleanup for tryCall, and returns an
// appropriate error from the responses we've already received. All parallel
// calls in tryCall failed or we timed out if we get here.
-func (c *client) failedTryCall(ctx context.T, name, method string, servers []string, responses []*serverStatus, ch chan *serverStatus) (ipc.Call, verror.ActionCode, verror.E) {
+func (c *client) failedTryCall(ctx *context.T, name, method string, servers []string, responses []*serverStatus, ch chan *serverStatus) (ipc.Call, verror.ActionCode, verror.E) {
go cleanupTryCall(nil, responses, ch)
c.ns.FlushCacheEntry(name)
noconn, untrusted := []string{}, []string{}
@@ -594,7 +594,7 @@
// the RPC name.method for the client (local end of the flow). It returns the blessings at the
// server that are authorized for this purpose and any blessings that are to be granted to
// the server (via ipc.Granter implementations in opts.)
-func (c *client) authorizeServer(ctx context.T, flow stream.Flow, name, method string, serverPattern security.BlessingPattern, opts []ipc.CallOpt) (serverBlessings []string, grantedBlessings security.Blessings, err verror.E) {
+func (c *client) authorizeServer(ctx *context.T, flow stream.Flow, name, method string, serverPattern security.BlessingPattern, opts []ipc.CallOpt) (serverBlessings []string, grantedBlessings security.Blessings, err verror.E) {
if flow.RemoteBlessings() == nil {
return nil, nil, verror.Make(errNoBlessings, ctx)
}
@@ -648,7 +648,7 @@
// flowClient implements the RPC client-side protocol for a single RPC, over a
// flow that's already connected to the server.
type flowClient struct {
- ctx context.T // context to annotate with call details
+ ctx *context.T // context to annotate with call details
dec vomDecoder // to decode responses and results from the server
enc vomEncoder // to encode requests and args to the server
server []string // Blessings bound to the server that authorize it to receive the IPC request from the client.
@@ -668,7 +668,7 @@
var _ ipc.Call = (*flowClient)(nil)
var _ ipc.Stream = (*flowClient)(nil)
-func newFlowClient(ctx context.T, server []string, flow stream.Flow, dc vc.DischargeClient) (*flowClient, error) {
+func newFlowClient(ctx *context.T, server []string, flow stream.Flow, dc vc.DischargeClient) (*flowClient, error) {
fc := &flowClient{
ctx: ctx,
dec: vom.NewDecoder(flow),
@@ -760,7 +760,7 @@
return nil
}
-func decodeNetError(ctx context.T, err error) verror.IDAction {
+func decodeNetError(ctx *context.T, err error) verror.IDAction {
if neterr, ok := err.(net.Error); ok {
if neterr.Timeout() || neterr.Temporary() {
// If a read is cancelled in the lower levels we see
diff --git a/runtimes/google/ipc/context.go b/runtimes/google/ipc/context.go
deleted file mode 100644
index a2e916d..0000000
--- a/runtimes/google/ipc/context.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package ipc
-
-import (
- "sync"
- "time"
-
- "v.io/core/veyron2"
- "v.io/core/veyron2/context"
-)
-
-const nilRuntimeMessage = "attempting to create a context with a nil runtime"
-
-// InternalNewContext creates a new context.T. This function should only
-// be called from within the runtime implementation.
-func InternalNewContext(runtime veyron2.Runtime) context.T {
- if runtime == nil {
- panic(nilRuntimeMessage)
- }
- return rootContext{runtime}
-}
-
-// cancellable is an interface to cancellable contexts.
-type cancellable interface {
- // cancel cancels the context and records the given error.
- cancel(err error)
- addChild(child cancellable)
- removeChild(parent cancellable)
-}
-
-// child is an interface that allows you to find the nearest cancelContext ancestor.
-type child interface {
- parent() context.T
-}
-
-// rootContext is an empty root context. It has no deadline or values
-// and can't be canceled.
-type rootContext struct {
- runtime veyron2.Runtime
-}
-
-func (r rootContext) parent() context.T { return nil }
-func (r rootContext) Deadline() (deadline time.Time, ok bool) { return }
-func (r rootContext) Done() <-chan struct{} { return nil }
-func (r rootContext) Err() error { return nil }
-func (r rootContext) Value(key interface{}) interface{} { return nil }
-func (r rootContext) Runtime() interface{} { return r.runtime }
-func (r rootContext) WithCancel() (ctx context.T, cancel context.CancelFunc) {
- return newCancelContext(r)
-}
-func (r rootContext) WithDeadline(deadline time.Time) (context.T, context.CancelFunc) {
- return newDeadlineContext(r, deadline)
-}
-func (r rootContext) WithTimeout(timeout time.Duration) (context.T, context.CancelFunc) {
- return newDeadlineContext(r, time.Now().Add(timeout))
-}
-func (r rootContext) WithValue(key interface{}, val interface{}) context.T {
- return newValueContext(r, key, val)
-}
-
-// A valueContext contains a single key/value mapping.
-type valueContext struct {
- context.T
- key, value interface{}
-}
-
-func newValueContext(parent context.T, key, val interface{}) *valueContext {
- return &valueContext{parent, key, val}
-}
-
-func (v *valueContext) parent() context.T {
- return v.T
-}
-func (v *valueContext) Value(key interface{}) interface{} {
- if key == v.key {
- return v.value
- }
- return v.T.Value(key)
-}
-func (v *valueContext) WithCancel() (ctx context.T, cancel context.CancelFunc) {
- return newCancelContext(v)
-}
-func (v *valueContext) WithDeadline(deadline time.Time) (context.T, context.CancelFunc) {
- return newDeadlineContext(v, deadline)
-}
-func (v *valueContext) WithTimeout(timeout time.Duration) (context.T, context.CancelFunc) {
- return newDeadlineContext(v, time.Now().Add(timeout))
-}
-func (v *valueContext) WithValue(key interface{}, val interface{}) context.T {
- return newValueContext(v, key, val)
-}
-
-// A cancelContext provides a mechanism for cancellation and a
-// done channel that allows it's status to be monitored.
-type cancelContext struct {
- context.T
- done chan struct{}
- err error
-
- // children is used to keep track of descendant cancellable
- // contexts. This is an optimization to prevent excessive
- // goroutines.
- children map[cancellable]bool
-
- mu sync.Mutex
-}
-
-func newCancelContext(parent context.T) (ctx *cancelContext, cancel context.CancelFunc) {
- ctx = &cancelContext{
- T: parent,
- done: make(chan struct{}),
- }
-
- cancel = func() { ctx.cancel(context.Canceled) }
- if parent.Done() == nil {
- return
- }
-
- if ancestor, nonStandardAncestor := ctx.findCancellableAncestor(); !nonStandardAncestor {
- if ancestor != nil {
- ancestor.addChild(ctx)
- }
- return
- }
-
- // If neither the parent nor the child are canceled then both the
- // parent and the child will leak. Note this will only happen for
- // non-standard implementations of the context.T interface.
- go func() {
- select {
- case <-parent.Done():
- ctx.cancel(parent.Err())
- case <-ctx.Done():
- }
- }()
-
- return
-}
-
-// addChild sets child as a descendant cancellable context. This
-// allows us to propagate cancellations through the context tree.
-func (c *cancelContext) addChild(child cancellable) {
- c.mu.Lock()
- if c.err != nil {
- // If the parent is already canceled, just cancel the child.
- c.mu.Unlock()
- child.cancel(c.err)
- return
- }
- defer c.mu.Unlock()
- if c.children == nil {
- c.children = make(map[cancellable]bool)
- }
- c.children[child] = true
-}
-
-// removeChild is called by descendant contexts when they are
-// canceled. This prevents old contexts which are no longer relevant
-// from consuming resources.
-func (c *cancelContext) removeChild(child cancellable) {
- c.mu.Lock()
- defer c.mu.Unlock()
- delete(c.children, child)
-}
-
-// cancelChildren cancels all descendant cancellable contexts. This
-// is called during cancel but while mu is NOT held. Children may try
-// to make calls to parents, which would result in a deadlock.
-func cancelChildren(children map[cancellable]bool, err error) {
- for child, _ := range children {
- child.cancel(err)
- }
-}
-
-// cancel cancels the context, propagates that signal to children,
-// and updates parents.
-func (c *cancelContext) cancel(err error) {
- if err == nil {
- panic("Context canceled with nil error.")
- }
- c.mu.Lock()
- // cancelChilren should be called after mu is released.
- defer cancelChildren(c.children, err)
- defer c.mu.Unlock()
- if c.err != nil {
- return
- }
- c.err = err
- c.children = nil
- if ancestor, nonStandardAncestor := c.findCancellableAncestor(); !nonStandardAncestor {
- if ancestor != nil {
- ancestor.removeChild(c)
- }
- }
- close(c.done)
-}
-
-// findCancelAncestor finds the nearest ancestor that supports cancellation.
-// nonStandardAncestor will be true if we cannot determine if there is a cancellable
-// ancestor due to the presence of an unknown context implementation. In this
-// case ancestor will always be nil.
-func (c *cancelContext) findCancellableAncestor() (ancestor cancellable, nonStandardAncestor bool) {
- parent := c.T
- for {
- if c, ok := parent.(cancellable); ok {
- return c, false
- }
- c, ok := parent.(child)
- if !ok {
- return nil, true
- }
- parent = c.parent()
- }
- return nil, false // Unreachable.
-}
-
-func (c *cancelContext) Done() <-chan struct{} { return c.done }
-func (c *cancelContext) Err() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- return c.err
-}
-func (c *cancelContext) WithCancel() (ctx context.T, cancel context.CancelFunc) {
- return newCancelContext(c)
-}
-func (c *cancelContext) WithDeadline(deadline time.Time) (context.T, context.CancelFunc) {
- return newDeadlineContext(c, deadline)
-}
-func (c *cancelContext) WithTimeout(timeout time.Duration) (context.T, context.CancelFunc) {
- return newDeadlineContext(c, time.Now().Add(timeout))
-}
-func (c *cancelContext) WithValue(key interface{}, val interface{}) context.T {
- return newValueContext(c, key, val)
-}
-
-// A deadlineContext automatically cancels itself when the deadline is reached.
-type deadlineContext struct {
- *cancelContext
- deadline time.Time
- timer *time.Timer
-}
-
-// newDeadlineContext returns a new deadlineContext.
-func newDeadlineContext(parent context.T, deadline time.Time) (*deadlineContext, context.CancelFunc) {
- cancel, _ := newCancelContext(parent)
- ctx := &deadlineContext{
- cancelContext: cancel,
- deadline: deadline,
- }
- delta := deadline.Sub(time.Now())
- ctx.timer = time.AfterFunc(delta, func() { cancel.cancel(context.DeadlineExceeded) })
- return ctx, func() { ctx.cancel(context.Canceled) }
-}
-
-// cancel cancels the deadlineContext, forwards the signal to
-// descendants, and notifies parents.
-func (d *deadlineContext) cancel(err error) {
- d.timer.Stop()
- d.cancelContext.cancel(err)
-}
-func (d *deadlineContext) Deadline() (deadline time.Time, ok bool) { return d.deadline, true }
-func (d *deadlineContext) WithCancel() (ctx context.T, cancel context.CancelFunc) {
- return newCancelContext(d)
-}
-func (d *deadlineContext) WithDeadline(deadline time.Time) (context.T, context.CancelFunc) {
- return newDeadlineContext(d, deadline)
-}
-func (d *deadlineContext) WithTimeout(timeout time.Duration) (context.T, context.CancelFunc) {
- return newDeadlineContext(d, time.Now().Add(timeout))
-}
-func (d *deadlineContext) WithValue(key interface{}, val interface{}) context.T {
- return newValueContext(d, key, val)
-}
diff --git a/runtimes/google/ipc/context_test.go b/runtimes/google/ipc/context_test.go
deleted file mode 100644
index 469e954..0000000
--- a/runtimes/google/ipc/context_test.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package ipc
-
-import (
- "sync"
- "testing"
- "time"
-
- "v.io/core/veyron/runtimes/google/testing/mocks/runtime"
- "v.io/core/veyron/runtimes/google/vtrace"
-
- "v.io/core/veyron2/context"
-)
-
-// We need a special way to create contexts for tests. We
-// can't create a real runtime in the runtime implementation
-// so we use a fake one that panics if used. The runtime
-// implementation should not ever use the Runtime from a context.
-func testContext() context.T {
- ctx, _ := testContextWithoutDeadline().WithTimeout(20 * time.Second)
- return ctx
-}
-
-func testContextWithoutDeadline() context.T {
- ctx := InternalNewContext(&runtime.PanicRuntime{})
- ctx, _ = vtrace.WithNewRootSpan(ctx, nil, false)
- return ctx
-}
-
-func testCancel(t *testing.T, ctx context.T, cancel context.CancelFunc) {
- select {
- case <-ctx.Done():
- t.Errorf("Done closed when deadline not yet passed")
- default:
- }
- ch := make(chan bool, 0)
- go func() {
- cancel()
- close(ch)
- }()
- select {
- case <-ch:
- case <-time.After(3 * time.Second):
- t.Fatal("timed out witing for cancel.")
- }
-
- select {
- case <-ctx.Done():
- case <-time.After(3 * time.Second):
- t.Fatal("timed out witing for cancellation.")
- }
- if err := ctx.Err(); err != context.Canceled {
- t.Errorf("Unexpected error want %v, got %v", context.Canceled, err)
- }
-}
-
-func TestRootContext(t *testing.T) {
- r := &runtime.PanicRuntime{}
- ctx := InternalNewContext(r)
-
- if got := ctx.Runtime(); got != r {
- t.Errorf("Expected runtime %v, but found %v", r, got)
- }
-
- if got := ctx.Err(); got != nil {
- t.Errorf("Expected nil error, got: %v", got)
- }
-
- defer func() {
- r := recover()
- if r != nilRuntimeMessage {
- t.Errorf("Unexpected recover value: %s", r)
- }
- }()
- InternalNewContext(nil)
-}
-
-func TestCancelContext(t *testing.T) {
- ctx, cancel := testContext().WithCancel()
- testCancel(t, ctx, cancel)
-
- // Test cancelling a cancel context which is the child
- // of a cancellable context.
- parent, _ := testContext().WithCancel()
- child, cancel := parent.WithCancel()
- cancel()
- <-child.Done()
-
- // Test adding a cancellable child context after the parent is
- // already cancelled.
- parent, cancel = testContext().WithCancel()
- cancel()
- child, _ = parent.WithCancel()
- <-child.Done() // The child should have been cancelled right away.
-}
-
-func TestMultiLevelCancelContext(t *testing.T) {
- c0, c0Cancel := testContext().WithCancel()
- c1, _ := c0.WithCancel()
- c2, _ := c1.WithCancel()
- c3, _ := c2.WithCancel()
- testCancel(t, c3, c0Cancel)
-}
-
-type nonStandardContext struct {
- context.T
-}
-
-func (n *nonStandardContext) WithCancel() (ctx context.T, cancel context.CancelFunc) {
- return newCancelContext(n)
-}
-func (n *nonStandardContext) WithDeadline(deadline time.Time) (context.T, context.CancelFunc) {
- return newDeadlineContext(n, deadline)
-}
-func (n *nonStandardContext) WithTimeout(timeout time.Duration) (context.T, context.CancelFunc) {
- return newDeadlineContext(n, time.Now().Add(timeout))
-}
-func (n *nonStandardContext) WithValue(key interface{}, val interface{}) context.T {
- return newValueContext(n, key, val)
-}
-
-func TestCancelContextWithNonStandard(t *testing.T) {
- // Test that cancellation flows properly through non-standard intermediates.
- ctx := testContext()
- c0 := &nonStandardContext{ctx}
- c1, c1Cancel := c0.WithCancel()
- c2 := &nonStandardContext{c1}
- c3 := &nonStandardContext{c2}
- c4, _ := c3.WithCancel()
- testCancel(t, c4, c1Cancel)
-}
-
-func testDeadline(t *testing.T, ctx context.T, start time.Time, desiredTimeout time.Duration) {
- <-ctx.Done()
- if delta := time.Now().Sub(start); delta < desiredTimeout {
- t.Errorf("Deadline too short want %s got %s", desiredTimeout, delta)
- }
- if err := ctx.Err(); err != context.DeadlineExceeded {
- t.Errorf("Unexpected error want %s, got %s", context.DeadlineExceeded, err)
- }
-}
-
-func TestDeadlineContext(t *testing.T) {
- cases := []time.Duration{
- 3 * time.Millisecond,
- 0,
- }
- rootCtx := InternalNewContext(&runtime.PanicRuntime{})
- cancelCtx, _ := rootCtx.WithCancel()
- deadlineCtx, _ := rootCtx.WithDeadline(time.Now().Add(time.Hour))
-
- for _, desiredTimeout := range cases {
- // Test all the various ways of getting deadline contexts.
- start := time.Now()
- ctx, _ := rootCtx.WithDeadline(start.Add(desiredTimeout))
- testDeadline(t, ctx, start, desiredTimeout)
-
- start = time.Now()
- ctx, _ = cancelCtx.WithDeadline(start.Add(desiredTimeout))
- testDeadline(t, ctx, start, desiredTimeout)
-
- start = time.Now()
- ctx, _ = deadlineCtx.WithDeadline(start.Add(desiredTimeout))
- testDeadline(t, ctx, start, desiredTimeout)
-
- start = time.Now()
- ctx, _ = rootCtx.WithTimeout(desiredTimeout)
- testDeadline(t, ctx, start, desiredTimeout)
-
- start = time.Now()
- ctx, _ = cancelCtx.WithTimeout(desiredTimeout)
- testDeadline(t, ctx, start, desiredTimeout)
-
- start = time.Now()
- ctx, _ = deadlineCtx.WithTimeout(desiredTimeout)
- testDeadline(t, ctx, start, desiredTimeout)
- }
-
- ctx, cancel := testContext().WithDeadline(time.Now().Add(100 * time.Hour))
- testCancel(t, ctx, cancel)
-}
-
-func TestDeadlineContextWithRace(t *testing.T) {
- ctx, cancel := testContext().WithDeadline(time.Now().Add(100 * time.Hour))
- var wg sync.WaitGroup
- wg.Add(10)
- for i := 0; i < 10; i++ {
- go func() {
- cancel()
- wg.Done()
- }()
- }
- wg.Wait()
- <-ctx.Done()
- if err := ctx.Err(); err != context.Canceled {
- t.Errorf("Unexpected error want %v, got %v", context.Canceled, err)
- }
-}
-
-func TestValueContext(t *testing.T) {
- type testContextKey int
- const (
- key1 = testContextKey(iota)
- key2
- key3
- key4
- )
- const (
- val1 = iota
- val2
- val3
- )
- ctx1 := testContext().WithValue(key1, val1)
- ctx2 := ctx1.WithValue(key2, val2)
- ctx3 := ctx2.WithValue(key3, val3)
-
- expected := map[interface{}]interface{}{
- key1: val1,
- key2: val2,
- key3: val3,
- key4: nil,
- }
- for k, v := range expected {
- if got := ctx3.Value(k); got != v {
- t.Errorf("Got wrong value for %v: want %v got %v", k, v, got)
- }
- }
-
-}
diff --git a/runtimes/google/ipc/discharges.go b/runtimes/google/ipc/discharges.go
index 3e1b3dd..be7641d 100644
--- a/runtimes/google/ipc/discharges.go
+++ b/runtimes/google/ipc/discharges.go
@@ -20,7 +20,7 @@
// discharger implements vc.DischargeClient.
type dischargeClient struct {
c ipc.Client
- defaultCtx context.T
+ defaultCtx *context.T
cache dischargeCache
}
@@ -31,7 +31,7 @@
// PrepareDischarges call. This typically happens when fetching discharges on
// behalf of a server accepting connections, i.e., before any notion of the
// "context" of an API call has been established.
-func InternalNewDischargeClient(streamMgr stream.Manager, ns naming.Namespace, defaultCtx context.T, opts ...ipc.ClientOpt) (vc.DischargeClient, error) {
+func InternalNewDischargeClient(streamMgr stream.Manager, ns naming.Namespace, defaultCtx *context.T, opts ...ipc.ClientOpt) (vc.DischargeClient, error) {
if defaultCtx == nil {
return nil, fmt.Errorf("must provide a non-nil context to InternalNewDischargeClient")
}
@@ -56,7 +56,7 @@
// options, or requested from the discharge issuer indicated on the caveat.
// Note that requesting a discharge is an ipc call, so one copy of this
// function must be able to successfully terminate while another is blocked.
-func (d *dischargeClient) PrepareDischarges(ctx context.T, forcaveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus) (ret []security.Discharge) {
+func (d *dischargeClient) PrepareDischarges(ctx *context.T, forcaveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus) (ret []security.Discharge) {
if len(forcaveats) == 0 {
return
}
@@ -98,7 +98,7 @@
// caveats, fetchDischarges keeps retrying until either all discharges can be
// fetched or no new discharges are fetched.
// REQUIRES: len(caveats) == len(out)
-func (d *dischargeClient) fetchDischarges(ctx context.T, caveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus, out []security.Discharge) {
+func (d *dischargeClient) fetchDischarges(ctx *context.T, caveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus, out []security.Discharge) {
var wg sync.WaitGroup
for {
type fetched struct {
@@ -111,7 +111,7 @@
continue
}
wg.Add(1)
- go func(i int, ctx context.T, cav security.ThirdPartyCaveat) {
+ go func(i int, ctx *context.T, cav security.ThirdPartyCaveat) {
defer wg.Done()
vlog.VI(3).Infof("Fetching discharge for %v", cav)
call, err := d.c.StartCall(ctx, cav.Location(), "Discharge", []interface{}{cav, filteredImpetus(cav.Requirements(), impetus)}, vc.NoDischarges{})
diff --git a/runtimes/google/ipc/full_test.go b/runtimes/google/ipc/full_test.go
index d5612b0..cd6ac56 100644
--- a/runtimes/google/ipc/full_test.go
+++ b/runtimes/google/ipc/full_test.go
@@ -37,6 +37,7 @@
"v.io/core/veyron/runtimes/google/lib/publisher"
inaming "v.io/core/veyron/runtimes/google/naming"
tnaming "v.io/core/veyron/runtimes/google/testing/mocks/naming"
+ truntime "v.io/core/veyron/runtimes/google/testing/mocks/runtime"
ivtrace "v.io/core/veyron/runtimes/google/vtrace"
)
@@ -76,6 +77,21 @@
return nil
}
+// We need a special way to create contexts for tests. We
+// can't create a real runtime in the runtime implementation
+// so we use a fake one that panics if used. The runtime
+// implementation should not ever use the Runtime from a context.
+func testContext() *context.T {
+ ctx, _ := testContextWithoutDeadline().WithTimeout(20 * time.Second)
+ return ctx
+}
+
+func testContextWithoutDeadline() *context.T {
+ ctx := context.NewUninitializedContext(&truntime.PanicRuntime{})
+ ctx, _ = ivtrace.WithNewRootSpan(ctx, nil, false)
+ return ctx
+}
+
type userType string
type testServer struct{}
diff --git a/runtimes/google/ipc/glob.go b/runtimes/google/ipc/glob.go
index b626f49..08480f0 100644
--- a/runtimes/google/ipc/glob.go
+++ b/runtimes/google/ipc/glob.go
@@ -314,7 +314,7 @@
// mutableContext is like mutableCall but only provides the context portion.
type mutableContext struct {
- context.T
+ *context.T
M struct {
security.ContextParams
Blessings security.Blessings
@@ -322,7 +322,7 @@
}
}
-func (c *mutableContext) Context() context.T { return c.T }
+func (c *mutableContext) Context() *context.T { return c.T }
func (c *mutableContext) Timestamp() time.Time { return c.M.Timestamp }
func (c *mutableContext) Method() string { return c.M.Method }
func (c *mutableContext) MethodTags() []interface{} { return c.M.MethodTags }
diff --git a/runtimes/google/ipc/server.go b/runtimes/google/ipc/server.go
index 12fb6cf..ca3b7c8 100644
--- a/runtimes/google/ipc/server.go
+++ b/runtimes/google/ipc/server.go
@@ -44,7 +44,7 @@
type server struct {
sync.Mutex
- ctx context.T // context used by the server to make internal RPCs.
+ ctx *context.T // context used by the server to make internal RPCs.
streamMgr stream.Manager // stream manager to listen for new flows.
publisher publisher.Publisher // publisher to publish mounttable mounts.
listenerOpts []stream.ListenerOpt // listener opts passed to Listen.
@@ -86,7 +86,7 @@
func (PreferredServerResolveProtocols) IPCServerOpt() {}
-func InternalNewServer(ctx context.T, streamMgr stream.Manager, ns naming.Namespace, store *ivtrace.Store, opts ...ipc.ServerOpt) (ipc.Server, error) {
+func InternalNewServer(ctx *context.T, streamMgr stream.Manager, ns naming.Namespace, store *ivtrace.Store, opts ...ipc.ServerOpt) (ipc.Server, error) {
ctx, _ = ivtrace.WithNewSpan(ctx, "NewServer")
statsPrefix := naming.Join("ipc", "server", "routing-id", streamMgr.RoutingID().String())
s := &server{
@@ -787,7 +787,7 @@
// flowServer implements the RPC server-side protocol for a single RPC, over a
// flow that's already connected to the client.
type flowServer struct {
- context.T
+ *context.T
server *server // ipc.Server that this flow server belongs to
disp ipc.Dispatcher // ipc.Dispatcher that will serve RPCs on this flow
dec vomDecoder // to decode requests and args from the client
@@ -885,11 +885,11 @@
results, err := fs.processRequest()
- ivtrace.FromContext(fs).Finish()
+ ivtrace.FromContext(fs.T).Finish()
var traceResponse vtrace.Response
if fs.allowDebug {
- traceResponse = ivtrace.Response(fs)
+ traceResponse = ivtrace.Response(fs.T)
}
// Respond to the client with the response header and positional results.
@@ -955,7 +955,7 @@
if verr != nil {
// We don't know what the ipc call was supposed to be, but we'll create
// a placeholder span so we can capture annotations.
- fs.T, _ = ivtrace.WithNewSpan(fs, fmt.Sprintf("\"%s\".UNKNOWN", fs.Name()))
+ fs.T, _ = ivtrace.WithNewSpan(fs.T, fmt.Sprintf("\"%s\".UNKNOWN", fs.Name()))
return nil, verr
}
fs.method = req.Method
@@ -965,7 +965,7 @@
// on the server even if they will not be allowed to collect the
// results later. This might be considered a DOS vector.
spanName := fmt.Sprintf("\"%s\".%s", fs.Name(), fs.Method())
- fs.T, _ = ivtrace.WithContinuedSpan(fs, spanName, req.TraceRequest, fs.server.traceStore)
+ fs.T, _ = ivtrace.WithContinuedSpan(fs.T, spanName, req.TraceRequest, fs.server.traceStore)
var cancel context.CancelFunc
if req.Timeout != ipc.NoTimeout {
@@ -1187,7 +1187,7 @@
//nologcall
return fs.tags
}
-func (fs *flowServer) Context() context.T {
+func (fs *flowServer) Context() *context.T {
return fs.T
}
diff --git a/runtimes/google/ipc/stream/vc/auth.go b/runtimes/google/ipc/stream/vc/auth.go
index f7cb796..7cfbeee 100644
--- a/runtimes/google/ipc/stream/vc/auth.go
+++ b/runtimes/google/ipc/stream/vc/auth.go
@@ -57,7 +57,7 @@
//
// TODO(ashankar): Seems like there is no way the blessing store
// can say that it does NOT want to share the default blessing with the server?
-func AuthenticateAsClient(ctx context.T, conn io.ReadWriteCloser, principal security.Principal, dc DischargeClient, crypter crypto.Crypter, v version.IPCVersion) (server, client security.Blessings, serverDischarges map[string]security.Discharge, err error) {
+func AuthenticateAsClient(ctx *context.T, conn io.ReadWriteCloser, principal security.Principal, dc DischargeClient, crypter crypto.Crypter, v version.IPCVersion) (server, client security.Blessings, serverDischarges map[string]security.Discharge, err error) {
defer conn.Close()
if server, serverDischarges, err = readBlessings(conn, authServerContextTag, crypter, v); err != nil {
return
diff --git a/runtimes/google/ipc/stream/vc/vc.go b/runtimes/google/ipc/stream/vc/vc.go
index e34344b..a52585b 100644
--- a/runtimes/google/ipc/stream/vc/vc.go
+++ b/runtimes/google/ipc/stream/vc/vc.go
@@ -122,7 +122,7 @@
//
// TODO(ataly, ashankar): What should be the impetus for obtaining the discharges?
type DischargeClient interface {
- PrepareDischarges(ctx context.T, forcaveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus) []security.Discharge
+ PrepareDischarges(ctx *context.T, forcaveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus) []security.Discharge
// Invalidate marks the provided discharges as invalid, and therefore unfit
// for being returned by a subsequent PrepareDischarges call.
Invalidate(discharges ...security.Discharge)
@@ -133,7 +133,7 @@
}
// DialContext establishes the context under which a VC Dial was initiated.
-type DialContext struct{ context.T }
+type DialContext struct{ *context.T }
func (DialContext) IPCStreamVCOpt() {}
@@ -380,7 +380,7 @@
tlsSessionCache crypto.TLSClientSessionCache
securityLevel options.VCSecurityLevel
dischargeClient DischargeClient
- ctx context.T
+ ctx *context.T
noDischarges bool
)
for _, o := range opts {
diff --git a/runtimes/google/ipc/stream/vc/vc_test.go b/runtimes/google/ipc/stream/vc/vc_test.go
index 3728eb4..ab6a300 100644
--- a/runtimes/google/ipc/stream/vc/vc_test.go
+++ b/runtimes/google/ipc/stream/vc/vc_test.go
@@ -165,7 +165,7 @@
type mockDischargeClient []security.Discharge
-func (m mockDischargeClient) PrepareDischarges(_ context.T, forcaveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus) []security.Discharge {
+func (m mockDischargeClient) PrepareDischarges(_ *context.T, forcaveats []security.ThirdPartyCaveat, impetus security.DischargeImpetus) []security.Discharge {
return m
}
func (mockDischargeClient) Invalidate(...security.Discharge) {}
diff --git a/runtimes/google/ipc/stream/vif/auth.go b/runtimes/google/ipc/stream/vif/auth.go
index 681e91c..db659c2 100644
--- a/runtimes/google/ipc/stream/vif/auth.go
+++ b/runtimes/google/ipc/stream/vif/auth.go
@@ -61,7 +61,7 @@
// including a hash of the HopSetup message in the encrypted stream. It is
// likely that this will be addressed in subsequent protocol versions (or it may
// not be addressed at all if IPCVersion6 becomes the only supported version).
-func AuthenticateAsClient(ctx context.T, conn net.Conn, versions *version.Range, principal security.Principal, dc vc.DischargeClient) (crypto.ControlCipher, error) {
+func AuthenticateAsClient(ctx *context.T, conn net.Conn, versions *version.Range, principal security.Principal, dc vc.DischargeClient) (crypto.ControlCipher, error) {
if versions == nil {
versions = version.SupportedRange
}
@@ -117,7 +117,7 @@
}
}
-func authenticateAsClientIPC6(ctx context.T, writer io.Writer, reader *iobuf.Reader, principal security.Principal, dc vc.DischargeClient,
+func authenticateAsClientIPC6(ctx *context.T, writer io.Writer, reader *iobuf.Reader, principal security.Principal, dc vc.DischargeClient,
pvt *privateData, pub, ppub *message.HopSetup) (crypto.ControlCipher, error) {
pbox := ppub.NaclBox()
if pbox == nil {
@@ -230,7 +230,7 @@
// clientAuthOptions extracts the client authentication options from the options
// list.
-func clientAuthOptions(lopts []stream.VCOpt) (ctx context.T, principal security.Principal, dischargeClient vc.DischargeClient, err error) {
+func clientAuthOptions(lopts []stream.VCOpt) (ctx *context.T, principal security.Principal, dischargeClient vc.DischargeClient, err error) {
var securityLevel options.VCSecurityLevel
var noDischarges bool
for _, o := range lopts {
diff --git a/runtimes/google/lib/publisher/publisher.go b/runtimes/google/lib/publisher/publisher.go
index d2a27d5..c5d35ed 100644
--- a/runtimes/google/lib/publisher/publisher.go
+++ b/runtimes/google/lib/publisher/publisher.go
@@ -80,7 +80,7 @@
type stopCmd struct{} // sent to the runloop when we want it to exit.
// New returns a new publisher that updates mounts on ns every period.
-func New(ctx context.T, ns naming.Namespace, period time.Duration) Publisher {
+func New(ctx *context.T, ns naming.Namespace, period time.Duration) Publisher {
p := &publisher{
cmdchan: make(chan interface{}),
donechan: make(chan struct{}),
@@ -165,7 +165,7 @@
<-p.donechan
}
-func runLoop(ctx context.T, cmdchan chan interface{}, donechan chan struct{}, ns naming.Namespace, period time.Duration) {
+func runLoop(ctx *context.T, cmdchan chan interface{}, donechan chan struct{}, ns naming.Namespace, period time.Duration) {
vlog.VI(2).Info("ipc pub: start runLoop")
state := newPubState(ctx, ns, period)
@@ -207,7 +207,7 @@
// pubState maintains the state for our periodic mounts. It is not thread-safe;
// it's only used in the sequential publisher runLoop.
type pubState struct {
- ctx context.T
+ ctx *context.T
ns naming.Namespace
period time.Duration
deadline time.Time // deadline for the next sync call
@@ -229,7 +229,7 @@
lastUnmountErr error
}
-func newPubState(ctx context.T, ns naming.Namespace, period time.Duration) *pubState {
+func newPubState(ctx *context.T, ns naming.Namespace, period time.Duration) *pubState {
return &pubState{
ctx: ctx,
ns: ns,
diff --git a/runtimes/google/lib/publisher/publisher_test.go b/runtimes/google/lib/publisher/publisher_test.go
index 5e15a0f..cf00157 100644
--- a/runtimes/google/lib/publisher/publisher_test.go
+++ b/runtimes/google/lib/publisher/publisher_test.go
@@ -9,15 +9,14 @@
"v.io/core/veyron2/context"
"v.io/core/veyron2/naming"
- iipc "v.io/core/veyron/runtimes/google/ipc"
"v.io/core/veyron/runtimes/google/lib/publisher"
tnaming "v.io/core/veyron/runtimes/google/testing/mocks/naming"
"v.io/core/veyron/runtimes/google/testing/mocks/runtime"
"v.io/core/veyron/runtimes/google/vtrace"
)
-func testContext() context.T {
- ctx := iipc.InternalNewContext(&runtime.PanicRuntime{})
+func testContext() *context.T {
+ ctx := context.NewUninitializedContext(&runtime.PanicRuntime{})
ctx, _ = vtrace.WithNewSpan(ctx, "")
ctx, _ = ctx.WithDeadline(time.Now().Add(20 * time.Second))
return ctx
diff --git a/runtimes/google/naming/namespace/all_test.go b/runtimes/google/naming/namespace/all_test.go
index 9c76b45..f86af6a 100644
--- a/runtimes/google/naming/namespace/all_test.go
+++ b/runtimes/google/naming/namespace/all_test.go
@@ -140,7 +140,7 @@
}
}
-func doResolveTest(t *testing.T, fname string, f func(context.T, string, ...naming.ResolveOpt) ([]string, error), ctx context.T, name string, want []string, opts ...naming.ResolveOpt) {
+func doResolveTest(t *testing.T, fname string, f func(*context.T, string, ...naming.ResolveOpt) ([]string, error), ctx *context.T, name string, want []string, opts ...naming.ResolveOpt) {
servers, err := f(ctx, name, opts...)
if err != nil {
boom(t, "Failed to %s %s: %s", fname, name, err)
diff --git a/runtimes/google/naming/namespace/glob.go b/runtimes/google/naming/namespace/glob.go
index 3dc90e9..6d15ed7 100644
--- a/runtimes/google/naming/namespace/glob.go
+++ b/runtimes/google/naming/namespace/glob.go
@@ -29,7 +29,7 @@
// pelems the pattern to match relative to the mounted subtree.
// l the list to add results to.
// recursive true to continue below the matched pattern
-func (ns *namespace) globAtServer(ctx context.T, qe *queuedEntry, pattern *glob.Glob, l *list.List) error {
+func (ns *namespace) globAtServer(ctx *context.T, qe *queuedEntry, pattern *glob.Glob, l *list.List) error {
server := qe.me
client := veyron2.RuntimeFromContext(ctx).Client()
pstr := pattern.String()
@@ -107,7 +107,7 @@
}
// Glob implements naming.MountTable.Glob.
-func (ns *namespace) Glob(ctx context.T, pattern string) (chan naming.MountEntry, error) {
+func (ns *namespace) Glob(ctx *context.T, pattern string) (chan naming.MountEntry, error) {
defer vlog.LogCall()()
e, patternWasRooted := ns.rootMountEntry(pattern)
if len(e.Servers) == 0 {
@@ -140,7 +140,7 @@
return strings.Count(name, "/") + 1
}
-func (ns *namespace) globLoop(ctx context.T, e *naming.MountEntry, prefix string, pattern *glob.Glob, reply chan naming.MountEntry) {
+func (ns *namespace) globLoop(ctx *context.T, e *naming.MountEntry, prefix string, pattern *glob.Glob, reply chan naming.MountEntry) {
defer close(reply)
// As we encounter new mount tables while traversing the Glob, we add them to the list 'l'. The loop below
diff --git a/runtimes/google/naming/namespace/mount.go b/runtimes/google/naming/namespace/mount.go
index 1546d45..eb126dc 100644
--- a/runtimes/google/naming/namespace/mount.go
+++ b/runtimes/google/naming/namespace/mount.go
@@ -19,7 +19,7 @@
}
// mountIntoMountTable mounts a single server into a single mount table.
-func mountIntoMountTable(ctx context.T, client ipc.Client, name, server string, ttl time.Duration, flags naming.MountFlag, id string) (s status) {
+func mountIntoMountTable(ctx *context.T, client ipc.Client, name, server string, ttl time.Duration, flags naming.MountFlag, id string) (s status) {
s.id = id
ctx, _ = ctx.WithTimeout(callTimeout)
call, err := client.StartCall(ctx, name, "Mount", []interface{}{server, uint32(ttl.Seconds()), flags}, options.NoResolve(true))
@@ -34,7 +34,7 @@
}
// unmountFromMountTable removes a single mounted server from a single mount table.
-func unmountFromMountTable(ctx context.T, client ipc.Client, name, server string, id string) (s status) {
+func unmountFromMountTable(ctx *context.T, client ipc.Client, name, server string, id string) (s status) {
s.id = id
ctx, _ = ctx.WithTimeout(callTimeout)
call, err := client.StartCall(ctx, name, "Unmount", []interface{}{server}, options.NoResolve(true))
@@ -83,7 +83,7 @@
}
// dispatch executes f in parallel for each mount table implementing mTName.
-func (ns *namespace) dispatch(ctx context.T, mTName string, f func(context.T, string, string) status) error {
+func (ns *namespace) dispatch(ctx *context.T, mTName string, f func(*context.T, string, string) status) error {
// Resolve to all the mount tables implementing name.
mts, err := ns.ResolveToMountTable(ctx, mTName)
if err != nil {
@@ -102,7 +102,7 @@
return finalerr
}
-func (ns *namespace) Mount(ctx context.T, name, server string, ttl time.Duration, opts ...naming.MountOpt) error {
+func (ns *namespace) Mount(ctx *context.T, name, server string, ttl time.Duration, opts ...naming.MountOpt) error {
defer vlog.LogCall()()
var flags naming.MountFlag
@@ -123,7 +123,7 @@
client := veyron2.RuntimeFromContext(ctx).Client()
// Mount the server in all the returned mount tables.
- f := func(ctx context.T, mt, id string) status {
+ f := func(ctx *context.T, mt, id string) status {
return mountIntoMountTable(ctx, client, mt, server, ttl, flags, id)
}
err := ns.dispatch(ctx, name, f)
@@ -131,11 +131,11 @@
return err
}
-func (ns *namespace) Unmount(ctx context.T, name, server string) error {
+func (ns *namespace) Unmount(ctx *context.T, name, server string) error {
defer vlog.LogCall()()
// Unmount the server from all the mount tables.
client := veyron2.RuntimeFromContext(ctx).Client()
- f := func(context context.T, mt, id string) status {
+ f := func(context *context.T, mt, id string) status {
return unmountFromMountTable(ctx, client, mt, server, id)
}
err := ns.dispatch(ctx, name, f)
diff --git a/runtimes/google/naming/namespace/resolve.go b/runtimes/google/naming/namespace/resolve.go
index 097359a..1cd6005 100644
--- a/runtimes/google/naming/namespace/resolve.go
+++ b/runtimes/google/naming/namespace/resolve.go
@@ -14,7 +14,7 @@
"v.io/core/veyron2/vlog"
)
-func (ns *namespace) resolveAgainstMountTable(ctx context.T, client ipc.Client, e *naming.MountEntry, pattern string, opts ...ipc.CallOpt) (*naming.MountEntry, error) {
+func (ns *namespace) resolveAgainstMountTable(ctx *context.T, client ipc.Client, e *naming.MountEntry, pattern string, opts ...ipc.CallOpt) (*naming.MountEntry, error) {
// Try each server till one answers.
finalErr := errors.New("no servers to resolve query")
for _, s := range e.Servers {
@@ -69,7 +69,7 @@
}
// ResolveX implements veyron2/naming.Namespace.
-func (ns *namespace) ResolveX(ctx context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+func (ns *namespace) ResolveX(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
defer vlog.LogCall()()
e, _ := ns.rootMountEntry(name)
if vlog.V(2) {
@@ -119,7 +119,7 @@
}
// Resolve implements veyron2/naming.Namespace.
-func (ns *namespace) Resolve(ctx context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
+func (ns *namespace) Resolve(ctx *context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
defer vlog.LogCall()()
e, err := ns.ResolveX(ctx, name, opts...)
if err != nil {
@@ -129,7 +129,7 @@
}
// ResolveToMountTableX implements veyron2/naming.Namespace.
-func (ns *namespace) ResolveToMountTableX(ctx context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+func (ns *namespace) ResolveToMountTableX(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
defer vlog.LogCall()()
e, _ := ns.rootMountEntry(name)
if vlog.V(2) {
@@ -180,7 +180,7 @@
}
// ResolveToMountTable implements veyron2/naming.Namespace.
-func (ns *namespace) ResolveToMountTable(ctx context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
+func (ns *namespace) ResolveToMountTable(ctx *context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
defer vlog.LogCall()()
e, err := ns.ResolveToMountTableX(ctx, name, opts...)
if err != nil {
@@ -199,7 +199,7 @@
}
// TODO(caprita): UnresolveStep no longer exists.
-func unresolveAgainstServer(ctx context.T, client ipc.Client, names []string) ([]string, error) {
+func unresolveAgainstServer(ctx *context.T, client ipc.Client, names []string) ([]string, error) {
finalErr := errors.New("no servers to unresolve")
for _, name := range names {
callCtx, _ := ctx.WithTimeout(callTimeout)
@@ -229,7 +229,7 @@
// branches?).
// Unesolve implements veyron2/naming.Namespace.
-func (ns *namespace) Unresolve(ctx context.T, name string) ([]string, error) {
+func (ns *namespace) Unresolve(ctx *context.T, name string) ([]string, error) {
defer vlog.LogCall()()
vlog.VI(2).Infof("Unresolve %s", name)
names, err := ns.Resolve(ctx, name)
diff --git a/runtimes/google/rt/ipc.go b/runtimes/google/rt/ipc.go
index da8b27e..227455e 100644
--- a/runtimes/google/rt/ipc.go
+++ b/runtimes/google/rt/ipc.go
@@ -49,8 +49,8 @@
return rt.client
}
-func (rt *vrt) NewContext() context.T {
- ctx := iipc.InternalNewContext(rt)
+func (rt *vrt) NewContext() *context.T {
+ ctx := context.NewUninitializedContext(rt)
ctx = i18n.ContextWithLangID(ctx, rt.lang)
ctx = verror2.ContextWithComponentName(ctx, rt.program)
@@ -61,11 +61,11 @@
return ctx
}
-func (rt *vrt) WithNewSpan(ctx context.T, name string) (context.T, vtrace.Span) {
+func (rt *vrt) WithNewSpan(ctx *context.T, name string) (*context.T, vtrace.Span) {
return ivtrace.WithNewSpan(ctx, name)
}
-func (rt *vrt) SpanFromContext(ctx context.T) vtrace.Span {
+func (rt *vrt) SpanFromContext(ctx *context.T) vtrace.Span {
return ivtrace.FromContext(ctx)
}
diff --git a/runtimes/google/testing/mocks/ipc/simple_client.go b/runtimes/google/testing/mocks/ipc/simple_client.go
index 40bc9de..23a6b89 100644
--- a/runtimes/google/testing/mocks/ipc/simple_client.go
+++ b/runtimes/google/testing/mocks/ipc/simple_client.go
@@ -44,7 +44,7 @@
}
// StartCall Implements ipc.Client
-func (c *SimpleMockClient) StartCall(ctx context.T, name, method string, args []interface{}, opts ...ipc.CallOpt) (ipc.Call, error) {
+func (c *SimpleMockClient) StartCall(ctx *context.T, name, method string, args []interface{}, opts ...ipc.CallOpt) (ipc.Call, error) {
defer vlog.LogCall()()
results, ok := c.results[method]
if !ok {
diff --git a/runtimes/google/testing/mocks/ipc/simple_client_test.go b/runtimes/google/testing/mocks/ipc/simple_client_test.go
index 9dcbc87..2ef7ba5 100644
--- a/runtimes/google/testing/mocks/ipc/simple_client_test.go
+++ b/runtimes/google/testing/mocks/ipc/simple_client_test.go
@@ -2,31 +2,12 @@
import (
"testing"
- "time"
"v.io/core/veyron2/context"
+
+ "v.io/core/veyron/runtimes/google/testing/mocks/runtime"
)
-type fakeContext struct{}
-
-func (*fakeContext) Deadline() (deadline time.Time, ok bool) { return }
-func (*fakeContext) Done() <-chan struct{} { return nil }
-func (*fakeContext) Err() error { return nil }
-func (*fakeContext) Value(key interface{}) interface{} { return nil }
-func (*fakeContext) Runtime() interface{} { return nil }
-func (*fakeContext) WithCancel() (context.T, context.CancelFunc) {
- return &fakeContext{}, func() {}
-}
-func (*fakeContext) WithDeadline(time.Time) (context.T, context.CancelFunc) {
- return &fakeContext{}, func() {}
-}
-func (*fakeContext) WithTimeout(time.Duration) (context.T, context.CancelFunc) {
- return &fakeContext{}, func() {}
-}
-func (*fakeContext) WithValue(k, v interface{}) context.T {
- return &fakeContext{}
-}
-
func TestSuccessfulCalls(t *testing.T) {
method1ExpectedResult := []interface{}{"one", 2}
@@ -39,7 +20,7 @@
"method3": method3ExpectedResult,
})
- ctx := &fakeContext{}
+ ctx := context.NewUninitializedContext(&runtime.PanicRuntime{})
// method1
method1Call, err := client.StartCall(ctx, "name/obj", "method1", []interface{}{})
@@ -95,7 +76,8 @@
sampleStruct{name: "bar"},
},
})
- call, _ := client.StartCall(&fakeContext{}, "name/obj", "foo", []interface{}{})
+ ctx := context.NewUninitializedContext(&runtime.PanicRuntime{})
+ call, _ := client.StartCall(ctx, "name/obj", "foo", []interface{}{})
var result sampleStruct
call.Finish(&result)
if result.name != "bar" {
@@ -108,7 +90,8 @@
client := NewSimpleClient(map[string][]interface{}{
"bar": []interface{}{},
})
- _, err := client.StartCall(&fakeContext{}, "name/obj", "wrongMethodName", []interface{}{})
+ ctx := context.NewUninitializedContext(&runtime.PanicRuntime{})
+ _, err := client.StartCall(ctx, "name/obj", "wrongMethodName", []interface{}{})
if err == nil {
t.Errorf(`StartCall: should have returned an error on invalid method name`)
return
@@ -122,7 +105,7 @@
})
errMsg := "Expected method to be called %d times but it was called %d"
- ctx := &fakeContext{}
+ ctx := context.NewUninitializedContext(&runtime.PanicRuntime{})
// method 1
if n := client.TimesCalled("method1"); n != 0 {
diff --git a/runtimes/google/testing/mocks/naming/namespace.go b/runtimes/google/testing/mocks/naming/namespace.go
index 6c68c83..b019b8c 100644
--- a/runtimes/google/testing/mocks/naming/namespace.go
+++ b/runtimes/google/testing/mocks/naming/namespace.go
@@ -32,7 +32,7 @@
ns naming.Namespace
}
-func (ns *namespace) Mount(ctx context.T, name, server string, _ time.Duration, _ ...naming.MountOpt) error {
+func (ns *namespace) Mount(ctx *context.T, name, server string, _ time.Duration, _ ...naming.MountOpt) error {
defer vlog.LogCall()()
ns.Lock()
defer ns.Unlock()
@@ -45,7 +45,7 @@
return nil
}
-func (ns *namespace) Unmount(ctx context.T, name, server string) error {
+func (ns *namespace) Unmount(ctx *context.T, name, server string) error {
defer vlog.LogCall()()
var servers []string
ns.Lock()
@@ -64,7 +64,7 @@
return nil
}
-func (ns *namespace) Resolve(ctx context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
+func (ns *namespace) Resolve(ctx *context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
defer vlog.LogCall()()
if address, _ := naming.SplitAddressName(name); len(address) > 0 {
return []string{name}, nil
@@ -84,7 +84,7 @@
return nil, verror.Make(naming.ErrNoSuchName, ctx, fmt.Sprintf("Resolve name %q not found in %v", name, ns.mounts))
}
-func (ns *namespace) ResolveX(ctx context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+func (ns *namespace) ResolveX(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
defer vlog.LogCall()()
e, err := ns.ns.ResolveX(ctx, name, naming.SkipResolveOpt{})
if err != nil {
@@ -110,21 +110,21 @@
return nil, verror.Make(naming.ErrNoSuchName, ctx, fmt.Sprintf("Resolve name %q not found in %v", name, ns.mounts))
}
-func (ns *namespace) ResolveToMountTableX(ctx context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+func (ns *namespace) ResolveToMountTableX(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
defer vlog.LogCall()()
// TODO(mattr): Implement this method for tests that might need it.
panic("ResolveToMountTable not implemented")
return nil, nil
}
-func (ns *namespace) ResolveToMountTable(ctx context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
+func (ns *namespace) ResolveToMountTable(ctx *context.T, name string, opts ...naming.ResolveOpt) ([]string, error) {
defer vlog.LogCall()()
// TODO(mattr): Implement this method for tests that might need it.
panic("ResolveToMountTable not implemented")
return nil, nil
}
-func (ns *namespace) Unresolve(ctx context.T, name string) ([]string, error) {
+func (ns *namespace) Unresolve(ctx *context.T, name string) ([]string, error) {
defer vlog.LogCall()()
// TODO(mattr): Implement this method for tests that might need it.
panic("Unresolve not implemented")
@@ -141,7 +141,7 @@
return nil
}
-func (ns *namespace) Glob(ctx context.T, pattern string) (chan naming.MountEntry, error) {
+func (ns *namespace) Glob(ctx *context.T, pattern string) (chan naming.MountEntry, error) {
defer vlog.LogCall()()
// TODO(mattr): Implement this method for tests that might need it.
panic("Glob not implemented")
diff --git a/runtimes/google/testing/mocks/runtime/panic_runtime.go b/runtimes/google/testing/mocks/runtime/panic_runtime.go
index 694d3d7..84b8d8b 100644
--- a/runtimes/google/testing/mocks/runtime/panic_runtime.go
+++ b/runtimes/google/testing/mocks/runtime/panic_runtime.go
@@ -30,11 +30,11 @@
func (*PanicRuntime) NewClient(opts ...ipc.ClientOpt) (ipc.Client, error) { panic(badRuntime) }
func (*PanicRuntime) NewServer(opts ...ipc.ServerOpt) (ipc.Server, error) { panic(badRuntime) }
func (*PanicRuntime) Client() ipc.Client { panic(badRuntime) }
-func (*PanicRuntime) NewContext() context.T { panic(badRuntime) }
+func (*PanicRuntime) NewContext() *context.T { panic(badRuntime) }
-func (PanicRuntime) WithNewSpan(c context.T, m string) (context.T, vtrace.Span) { return c, &span{m} }
+func (PanicRuntime) WithNewSpan(c *context.T, m string) (*context.T, vtrace.Span) { return c, &span{m} }
-func (*PanicRuntime) SpanFromContext(context.T) vtrace.Span { return &span{} }
+func (*PanicRuntime) SpanFromContext(*context.T) vtrace.Span { return &span{} }
func (*PanicRuntime) NewStreamManager(opts ...stream.ManagerOpt) (stream.Manager, error) {
panic(badRuntime)
}
diff --git a/runtimes/google/vtrace/collector.go b/runtimes/google/vtrace/collector.go
index aed4e01..ab7000c 100644
--- a/runtimes/google/vtrace/collector.go
+++ b/runtimes/google/vtrace/collector.go
@@ -164,7 +164,7 @@
}
// MergeResponse merges a vtrace.Response into the current trace.
-func MergeResponse(ctx context.T, response *vtrace.Response) {
+func MergeResponse(ctx *context.T, response *vtrace.Response) {
if span := getSpan(ctx); span != nil {
span.collector.merge(span, response)
}
diff --git a/runtimes/google/vtrace/vtrace.go b/runtimes/google/vtrace/vtrace.go
index 9f4c357..84c2825 100644
--- a/runtimes/google/vtrace/vtrace.go
+++ b/runtimes/google/vtrace/vtrace.go
@@ -51,7 +51,7 @@
func (c *span) Finish() { c.collector.finish(c) }
// Request generates a vtrace.Request from the active Span.
-func Request(ctx context.T) vtrace.Request {
+func Request(ctx *context.T) vtrace.Request {
if span := getSpan(ctx); span != nil {
return vtrace.Request{
SpanID: span.id,
@@ -63,7 +63,7 @@
}
// Response captures the vtrace.Response for the active Span.
-func Response(ctx context.T) vtrace.Response {
+func Response(ctx *context.T) vtrace.Response {
if span := getSpan(ctx); span != nil {
return span.collector.response()
}
@@ -76,7 +76,7 @@
// ContinuedSpan creates a span that represents a continuation of a trace from
// a remote server. name is a user readable string that describes the context
// and req contains the parameters needed to connect this span with it's trace.
-func WithContinuedSpan(ctx context.T, name string, req vtrace.Request, store *Store) (context.T, vtrace.Span) {
+func WithContinuedSpan(ctx *context.T, name string, req vtrace.Request, store *Store) (*context.T, vtrace.Span) {
newSpan := newSpan(req.SpanID, name, newCollector(req.TraceID, store))
if req.Method == vtrace.InMemory {
newSpan.collector.ForceCollect()
@@ -84,7 +84,7 @@
return ctx.WithValue(spanKey{}, newSpan), newSpan
}
-func WithNewRootSpan(ctx context.T, store *Store, forceCollect bool) (context.T, vtrace.Span) {
+func WithNewRootSpan(ctx *context.T, store *Store, forceCollect bool) (*context.T, vtrace.Span) {
id, err := uniqueid.Random()
if err != nil {
vlog.Errorf("vtrace: Couldn't generate Trace ID, debug data may be lost: %v", err)
@@ -99,7 +99,7 @@
}
// NewSpan creates a new span.
-func WithNewSpan(parent context.T, name string) (context.T, vtrace.Span) {
+func WithNewSpan(parent *context.T, name string) (*context.T, vtrace.Span) {
if curSpan := getSpan(parent); curSpan != nil {
s := newSpan(curSpan.ID(), name, curSpan.collector)
return parent.WithValue(spanKey{}, s), s
@@ -109,13 +109,13 @@
return WithNewRootSpan(parent, nil, false)
}
-func getSpan(ctx context.T) *span {
+func getSpan(ctx *context.T) *span {
span, _ := ctx.Value(spanKey{}).(*span)
return span
}
// GetSpan returns the active span from the context.
-func FromContext(ctx context.T) vtrace.Span {
+func FromContext(ctx *context.T) vtrace.Span {
span, _ := ctx.Value(spanKey{}).(vtrace.Span)
return span
}
diff --git a/runtimes/google/vtrace/vtrace_test.go b/runtimes/google/vtrace/vtrace_test.go
index 8fc9cf9..398a3ed 100644
--- a/runtimes/google/vtrace/vtrace_test.go
+++ b/runtimes/google/vtrace/vtrace_test.go
@@ -25,8 +25,8 @@
// can't create a real runtime in the runtime implementation
// so we use a fake one that panics if used. The runtime
// implementation should not ever use the Runtime from a context.
-func testContext() context.T {
- return iipc.InternalNewContext(&truntime.PanicRuntime{})
+func testContext() *context.T {
+ return context.NewUninitializedContext(&truntime.PanicRuntime{})
}
func TestNewFromContext(t *testing.T) {
@@ -34,7 +34,7 @@
c1, s1 := ivtrace.WithNewSpan(c0, "s1")
c2, s2 := ivtrace.WithNewSpan(c1, "s2")
c3, s3 := ivtrace.WithNewSpan(c2, "s3")
- expected := map[context.T]vtrace.Span{
+ expected := map[*context.T]vtrace.Span{
c0: nil,
c1: s1,
c2: s2,
@@ -199,7 +199,7 @@
}
}
-func runCallChain(t *testing.T, ctx context.T, force1, force2 bool) {
+func runCallChain(t *testing.T, ctx *context.T, force1, force2 bool) {
sm := manager.InternalNew(naming.FixedRoutingID(0x555555555))
ns := tnaming.NewSimpleNamespace()