Merge "x/ref: Restructure services/agent to follow conventions."
diff --git a/cmd/mounttable/impl.go b/cmd/mounttable/impl.go
index f0adc71..6b851e0 100644
--- a/cmd/mounttable/impl.go
+++ b/cmd/mounttable/impl.go
@@ -123,11 +123,7 @@
ctx, cancel := context.WithTimeout(gctx, time.Minute)
defer cancel()
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "Mount", []interface{}{server, seconds, flags}, options.NoResolve{})
- if err != nil {
- return err
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, name, "Mount", []interface{}{server, seconds, flags}, nil, options.NoResolve{}); err != nil {
return err
}
fmt.Fprintln(cmd.Stdout(), "Name mounted successfully.")
@@ -153,11 +149,7 @@
ctx, cancel := context.WithTimeout(gctx, time.Minute)
defer cancel()
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, args[0], "Unmount", []interface{}{args[1]}, options.NoResolve{})
- if err != nil {
- return err
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, args[0], "Unmount", []interface{}{args[1]}, nil, options.NoResolve{}); err != nil {
return err
}
fmt.Fprintln(cmd.Stdout(), "Unmount successful or name not mounted.")
@@ -182,12 +174,8 @@
ctx, cancel := context.WithTimeout(gctx, time.Minute)
defer cancel()
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, args[0], "ResolveStep", []interface{}{}, options.NoResolve{})
- if err != nil {
- return err
- }
var entry naming.MountEntry
- if err := call.Finish(&entry); err != nil {
+ if err := client.Call(ctx, args[0], "ResolveStep", nil, []interface{}{&entry}, options.NoResolve{}); err != nil {
return err
}
fmt.Fprintf(cmd.Stdout(), "Servers: %v Suffix: %q MT: %v\n", entry.Servers, entry.Name, entry.ServesMountTable)
diff --git a/cmd/namespace/doc.go b/cmd/namespace/doc.go
index b4ec7fe..9e25dbc 100644
--- a/cmd/namespace/doc.go
+++ b/cmd/namespace/doc.go
@@ -23,6 +23,7 @@
resolve Translates a object name to its object address(es)
resolvetomt Finds the address of the mounttable that holds an object name
permissions Manipulates permissions on an entry in the namespace
+ delete Deletes a name from the namespace
help Display help for commands or topics
Run "namespace help [command]" for command usage.
@@ -169,6 +170,19 @@
<permissions> is the path to a file containing a JSON-encoded Permissions object
(defined in v.io/v23/security/access/types.vdl), or "-" for STDIN.
+Namespace Delete
+
+Deletes a name from the namespace.
+
+Usage:
+ namespace delete [flags] <name>
+
+<name> is a name to delete.
+
+The namespace delete flags are:
+ -r=false
+ Delete all children of the name in addition to the name itself.
+
Namespace Help
Help with no args displays the usage of the parent command.
diff --git a/cmd/namespace/impl.go b/cmd/namespace/impl.go
index 91fca65..8f65e8e 100644
--- a/cmd/namespace/impl.go
+++ b/cmd/namespace/impl.go
@@ -25,6 +25,7 @@
flagLongGlob bool
flagInsecureResolve bool
flagInsecureResolveToMT bool
+ flagDeleteSubtree bool
)
var cmdGlob = &cmdline.Command{
@@ -290,7 +291,7 @@
ns := v23.GetNamespace(ctx)
for {
_, etag, err := ns.GetPermissions(ctx, name)
- if err != nil {
+ if err != nil && verror.ErrorID(err) != naming.ErrNoSuchName.ID {
return err
}
if err = ns.SetPermissions(ctx, name, perms, etag); verror.ErrorID(err) == verror.ErrBadVersion.ID {
@@ -332,10 +333,31 @@
return json.NewEncoder(cmd.Stdout()).Encode(perms)
}
+var cmdDelete = &cmdline.Command{
+ Run: runDelete,
+ Name: "delete",
+ Short: "Deletes a name from the namespace",
+ ArgsName: "<name>",
+ ArgsLong: "<name> is a name to delete.",
+ Long: "Deletes a name from the namespace.",
+}
+
+func runDelete(cmd *cmdline.Command, args []string) error {
+ if expected, got := 1, len(args); expected != got {
+ return cmd.UsageErrorf("delete: incorrect number of arguments, expected %d, got %d", expected, got)
+ }
+ name := args[0]
+ ctx, cancel := context.WithTimeout(gctx, time.Minute)
+ defer cancel()
+
+ return v23.GetNamespace(ctx).Delete(ctx, name, flagDeleteSubtree)
+}
+
func root() *cmdline.Command {
cmdGlob.Flags.BoolVar(&flagLongGlob, "l", false, "Long listing format.")
cmdResolve.Flags.BoolVar(&flagInsecureResolve, "insecure", false, "Insecure mode: May return results from untrusted servers and invoke Resolve on untrusted mounttables")
cmdResolveToMT.Flags.BoolVar(&flagInsecureResolveToMT, "insecure", false, "Insecure mode: May return results from untrusted servers and invoke Resolve on untrusted mounttables")
+ cmdDelete.Flags.BoolVar(&flagDeleteSubtree, "r", false, "Delete all children of the name in addition to the name itself.")
return &cmdline.Command{
Name: "namespace",
Short: "resolves and manages names in the Vanadium namespace",
@@ -347,6 +369,6 @@
with V23_NAMESPACE, e.g. V23_NAMESPACE, V23_NAMESPACE_2, V23_NAMESPACE_GOOGLE,
etc. The command line options override the environment.
`,
- Children: []*cmdline.Command{cmdGlob, cmdMount, cmdUnmount, cmdResolve, cmdResolveToMT, cmdPermissions},
+ Children: []*cmdline.Command{cmdGlob, cmdMount, cmdUnmount, cmdResolve, cmdResolveToMT, cmdPermissions, cmdDelete},
}
}
diff --git a/cmd/principal/doc.go b/cmd/principal/doc.go
index 40aea3c..93a5977 100644
--- a/cmd/principal/doc.go
+++ b/cmd/principal/doc.go
@@ -219,7 +219,11 @@
this tool is running in.
Usage:
- principal dump
+ principal dump [flags]
+
+The principal dump flags are:
+ -s=false
+ If true, show a only the default blessing names
Principal Dumpblessings
diff --git a/cmd/principal/main.go b/cmd/principal/main.go
index d706292..e6f2a6b 100644
--- a/cmd/principal/main.go
+++ b/cmd/principal/main.go
@@ -44,6 +44,9 @@
flagBlessRemoteKey string
flagBlessRemoteToken string
+ // Flags for the "dump" command
+ flagDumpShort bool
+
// Flags for the "fork" command
flagForkCaveats caveatsFlag
flagForkFor time.Duration
@@ -82,6 +85,10 @@
defer shutdown()
p := v23.GetPrincipal(ctx)
+ if flagDumpShort {
+ fmt.Printf("%v\n", p.BlessingStore().Default())
+ return nil
+ }
fmt.Printf("Public key : %v\n", p.PublicKey())
fmt.Println("---------------- BlessingStore ----------------")
fmt.Printf("%v", p.BlessingStore().DebugString())
@@ -839,6 +846,8 @@
cmdBlessSelf.Flags.Var(&flagBlessSelfCaveats, "caveat", flagBlessSelfCaveats.usage())
cmdBlessSelf.Flags.DurationVar(&flagBlessSelfFor, "for", 0, "Duration of blessing validity (zero implies no expiration)")
+ cmdDump.Flags.BoolVar(&flagDumpShort, "s", false, "If true, show a only the default blessing names")
+
cmdFork.Flags.BoolVar(&flagCreateOverwrite, "overwrite", false, "If true, any existing principal data in the directory will be overwritten")
cmdFork.Flags.Var(&flagForkCaveats, "caveat", flagForkCaveats.usage())
cmdFork.Flags.DurationVar(&flagForkFor, "for", 0, "Duration of blessing validity (zero implies no expiration caveat)")
@@ -1108,12 +1117,8 @@
// the recipeint available to the sender and using
// options.ServerPublicKey instead of providing a "hash" of the
// recipients public key and verifying in the Granter implementation.
- call, err := client.StartCall(ctx, object, "Grant", []interface{}{remoteToken}, granter, options.SkipServerEndpointAuthorization{})
- if err != nil {
- return fmt.Errorf("failed to start RPC to %q: %v", object, err)
- }
- if err := call.Finish(); err != nil {
- return fmt.Errorf("failed to finish RPC to %q: %v", object, err)
+ if err := client.Call(ctx, object, "Grant", []interface{}{remoteToken}, nil, granter, options.SkipServerEndpointAuthorization{}); err != nil {
+ return fmt.Errorf("failed to make RPC to %q: %v", object, err)
}
return nil
}
diff --git a/cmd/servicerunner/main.go b/cmd/servicerunner/main.go
index fa54314..0832676 100644
--- a/cmd/servicerunner/main.go
+++ b/cmd/servicerunner/main.go
@@ -46,7 +46,7 @@
if err != nil {
return fmt.Errorf("root failed: %v", err)
}
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
}
diff --git a/cmd/vbash b/cmd/vbash
index 33029ac..e0f5c62 100755
--- a/cmd/vbash
+++ b/cmd/vbash
@@ -190,11 +190,8 @@
if [[ "${SEEK_BLESSING}" -eq "1" ]]; then
"${BIN_INSTALL}/principal" seekblessings
fi
-GREENBOLD="\[\033[1;32m\]"
-default_blessing() {
- "${BIN_INSTALL}/principal" dump | grep "Default blessings" | sed -e 's/Default blessings: //'
-}
-export PS1="\${PS1}(\${GREENBOLD}\$(default_blessing)\[\033[0m\])$ "
+export PROMPT_COMMAND='PS1="\u@\h (\[\e[1;32m\]\$("${BIN_INSTALL}/principal" dump -s)\[\e[0m\]):\w \$ "'
+
EOF
V23_CREDENTIALS="${CREDENTIALS_DIR}" exec "${BIN_INSTALL}/agentd" --additional-principals="${CREDENTIALS_DIR}" bash --rcfile "${INSTALL_DIR}/rcfile"
diff --git a/cmd/vrpc/doc.go b/cmd/vrpc/doc.go
index f281770..ff54449 100644
--- a/cmd/vrpc/doc.go
+++ b/cmd/vrpc/doc.go
@@ -79,6 +79,8 @@
-insecure=false
If true, skip server authentication. This means that the client will reveal
its blessings to servers that it may not recognize.
+ -show-reserved=false
+ if true, also show the signatures of reserved methods
Vrpc Call
diff --git a/cmd/vrpc/vrpc.go b/cmd/vrpc/vrpc.go
index bde1970..590e02d 100644
--- a/cmd/vrpc/vrpc.go
+++ b/cmd/vrpc/vrpc.go
@@ -16,6 +16,7 @@
"v.io/v23"
"v.io/v23/context"
+ "v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/rpc/reserved"
@@ -30,8 +31,9 @@
)
var (
- gctx *context.T
- flagInsecure bool
+ gctx *context.T
+ flagInsecure bool
+ flagShowReserved bool
)
func main() {
@@ -50,6 +52,8 @@
)
cmdSignature.Flags.BoolVar(&flagInsecure, insecureName, insecureVal, insecureDesc)
cmdIdentify.Flags.BoolVar(&flagInsecure, insecureName, insecureVal, insecureDesc)
+
+ cmdSignature.Flags.BoolVar(&flagShowReserved, "show-reserved", false, "if true, also show the signatures of reserved methods")
}
var cmdVRPC = &cmdline.Command{
@@ -166,6 +170,9 @@
return fmt.Errorf("Signature failed: %v", err)
}
for i, iface := range ifacesSig {
+ if !flagShowReserved && naming.IsReserved(iface.Name) {
+ continue
+ }
if i > 0 {
fmt.Fprintln(cmd.Stdout())
}
diff --git a/cmd/vrpc/vrpc_test.go b/cmd/vrpc/vrpc_test.go
index 5d380c2..190efc4 100644
--- a/cmd/vrpc/vrpc_test.go
+++ b/cmd/vrpc/vrpc_test.go
@@ -136,16 +136,29 @@
return name, shutdown
}
-func TestSignature(t *testing.T) {
+func testSignature(t *testing.T, showReserved bool, wantSig string) {
name, shutdown := initTest(t)
defer shutdown()
var stdout, stderr bytes.Buffer
cmdVRPC.Init(nil, &stdout, &stderr)
- if err := cmdVRPC.Execute([]string{"signature", name}); err != nil {
- t.Errorf("%v", err)
- return
+ args := []string{"signature", name}
+ // The cmdline package won't reparse the flags sent to Execute, so
+ // instead, set the flag variable directly from here.
+ flagShowReserved = showReserved
+ if err := cmdVRPC.Execute(args); err != nil {
+ t.Fatalf("%s: %v", args, err)
}
+
+ if got, want := stdout.String(), wantSig; got != want {
+ t.Errorf("%s: got stdout %s, want %s", args, got, want)
+ }
+ if got, want := stderr.String(), ""; got != want {
+ t.Errorf("%s: got stderr %s, want %s", args, got, want)
+ }
+}
+
+func TestSignatureWithReserved(t *testing.T) {
wantSig := `// TypeTester methods are listed in alphabetical order, to make it easier to
// test Signature output, which sorts methods alphabetically.
type "v.io/x/ref/cmd/vrpc/internal".TypeTester interface {
@@ -219,12 +232,44 @@
Y int32
}
`
- if got, want := stdout.String(), wantSig; got != want {
- t.Errorf("got stdout %q, want %q", got, want)
- }
- if got, want := stderr.String(), ""; got != want {
- t.Errorf("got stderr %q, want %q", got, want)
- }
+ testSignature(t, true, wantSig)
+}
+
+func TestSignatureNoReserved(t *testing.T) {
+ wantSig := `// TypeTester methods are listed in alphabetical order, to make it easier to
+// test Signature output, which sorts methods alphabetically.
+type "v.io/x/ref/cmd/vrpc/internal".TypeTester interface {
+ // Methods to test support for primitive types.
+ EchoBool(I1 bool) (O1 bool | error)
+ EchoByte(I1 byte) (O1 byte | error)
+ EchoFloat32(I1 float32) (O1 float32 | error)
+ EchoFloat64(I1 float64) (O1 float64 | error)
+ EchoInt32(I1 int32) (O1 int32 | error)
+ EchoInt64(I1 int64) (O1 int64 | error)
+ EchoString(I1 string) (O1 string | error)
+ EchoUint32(I1 uint32) (O1 uint32 | error)
+ EchoUint64(I1 uint64) (O1 uint64 | error)
+ // Methods to test support for composite types.
+ XEchoArray(I1 "v.io/x/ref/cmd/vrpc/internal".Array2Int) (O1 "v.io/x/ref/cmd/vrpc/internal".Array2Int | error)
+ XEchoMap(I1 map[int32]string) (O1 map[int32]string | error)
+ XEchoSet(I1 set[int32]) (O1 set[int32] | error)
+ XEchoSlice(I1 []int32) (O1 []int32 | error)
+ XEchoStruct(I1 "v.io/x/ref/cmd/vrpc/internal".Struct) (O1 "v.io/x/ref/cmd/vrpc/internal".Struct | error)
+ // Methods to test support for different number of arguments.
+ YMultiArg(I1 int32, I2 int32) (O1 int32, O2 int32 | error)
+ YNoArgs() error
+ // Methods to test support for streaming.
+ ZStream(NumStreamItems int32, StreamItem bool) stream<_, bool> error
+}
+
+type "v.io/x/ref/cmd/vrpc/internal".Array2Int [2]int32
+
+type "v.io/x/ref/cmd/vrpc/internal".Struct struct {
+ X int32
+ Y int32
+}
+`
+ testSignature(t, false, wantSig)
}
func TestMethodSignature(t *testing.T) {
diff --git a/cmd/vrun/vrun.go b/cmd/vrun/vrun.go
index 4bce13a..5f3d8f1 100644
--- a/cmd/vrun/vrun.go
+++ b/cmd/vrun/vrun.go
@@ -57,7 +57,7 @@
defer shutdown()
if len(args) == 0 {
- return cmd.UsageErrorf("vrun: no command specified")
+ args = []string{"bash", "--norc"}
}
principal, conn, err := createPrincipal(ctx)
if err != nil {
diff --git a/examples/rps/internal/auth.go b/examples/rps/internal/auth.go
new file mode 100644
index 0000000..a01b128
--- /dev/null
+++ b/examples/rps/internal/auth.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "v.io/v23/security"
+ "v.io/v23/security/access"
+)
+
+func NewAuthorizer(fname string) security.Authorizer {
+ a, err := access.PermissionsAuthorizerFromFile(fname, access.TypicalTagType())
+ if err != nil {
+ panic(err)
+ }
+ return a
+}
diff --git a/examples/rps/rpsbot/impl_test.go b/examples/rps/rpsbot/impl_test.go
index d321a31..38796b4 100644
--- a/examples/rps/rpsbot/impl_test.go
+++ b/examples/rps/rpsbot/impl_test.go
@@ -38,7 +38,7 @@
if err != nil {
return fmt.Errorf("root failed: %v", err)
}
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
}
diff --git a/examples/rps/rpsbot/main.go b/examples/rps/rpsbot/main.go
index 492be26..120f109 100644
--- a/examples/rps/rpsbot/main.go
+++ b/examples/rps/rpsbot/main.go
@@ -19,7 +19,6 @@
"v.io/x/lib/vlog"
"v.io/x/ref/examples/rps"
"v.io/x/ref/examples/rps/internal"
- "v.io/x/ref/lib/security/securityflag"
"v.io/x/ref/lib/signals"
_ "v.io/x/ref/profiles/roaming"
@@ -28,13 +27,14 @@
var (
name = flag.String("name", "", "identifier to publish itself as (defaults to user@hostname)")
numGames = flag.Int("num-games", -1, "number of games to play (-1 means unlimited)")
+ aclFile = flag.String("acl-file", "", "file containing the JSON-encoded ACL")
)
func main() {
ctx, shutdown := v23.Init()
defer shutdown()
- auth := securityflag.NewAuthorizerOrDie()
+ auth := internal.NewAuthorizer(*aclFile)
server, err := v23.NewServer(ctx)
if err != nil {
vlog.Fatalf("NewServer failed: %v", err)
diff --git a/examples/rps/rpsplayer/main.go b/examples/rps/rpsplayer/main.go
index 9f84800..5b50c17 100644
--- a/examples/rps/rpsplayer/main.go
+++ b/examples/rps/rpsplayer/main.go
@@ -24,13 +24,13 @@
"v.io/x/lib/vlog"
"v.io/x/ref/examples/rps"
"v.io/x/ref/examples/rps/internal"
- "v.io/x/ref/lib/security/securityflag"
_ "v.io/x/ref/profiles/roaming"
)
var (
- name = flag.String("name", "", "identifier to publish itself as (defaults to user@hostname)")
+ name = flag.String("name", "", "identifier to publish itself as (defaults to user@hostname)")
+ aclFile = flag.String("acl-file", "", "file containing the JSON-encoded ACL")
)
func main() {
@@ -122,7 +122,7 @@
if *name == "" {
*name = internal.CreateName()
}
- if err := server.Serve(fmt.Sprintf("rps/player/%s", *name), rps.PlayerServer(&impl{ch: ch}), securityflag.NewAuthorizerOrDie()); err != nil {
+ if err := server.Serve(fmt.Sprintf("rps/player/%s", *name), rps.PlayerServer(&impl{ch: ch}), internal.NewAuthorizer(*aclFile)); err != nil {
vlog.Fatalf("Serve failed: %v", err)
}
vlog.Infof("Listening on endpoint /%s", ep)
diff --git a/examples/rps/rpsscorekeeper/main.go b/examples/rps/rpsscorekeeper/main.go
index a5611e5..973609d 100644
--- a/examples/rps/rpsscorekeeper/main.go
+++ b/examples/rps/rpsscorekeeper/main.go
@@ -8,6 +8,7 @@
package main
import (
+ "flag"
"fmt"
"os"
@@ -17,11 +18,14 @@
"v.io/x/lib/vlog"
"v.io/x/ref/examples/rps"
"v.io/x/ref/examples/rps/internal"
- "v.io/x/ref/lib/security/securityflag"
_ "v.io/x/ref/profiles/roaming"
)
+var (
+ aclFile = flag.String("acl-file", "", "file containing the JSON-encoded ACL")
+)
+
type impl struct {
ch chan rps.ScoreCard
}
@@ -55,7 +59,7 @@
if err != nil {
vlog.Fatalf("os.Hostname failed: %v", err)
}
- if err := server.Serve(fmt.Sprintf("rps/scorekeeper/%s", hostname), rps.ScoreKeeperServer(rpsService), securityflag.NewAuthorizerOrDie()); err != nil {
+ if err := server.Serve(fmt.Sprintf("rps/scorekeeper/%s", hostname), rps.ScoreKeeperServer(rpsService), internal.NewAuthorizer(*aclFile)); err != nil {
vlog.Fatalf("Serve failed: %v", err)
}
vlog.Infof("Listening on endpoint /%s", ep)
diff --git a/profiles/internal/lib/reflectutil/all_test.go b/internal/reflectutil/all_test.go
similarity index 100%
rename from profiles/internal/lib/reflectutil/all_test.go
rename to internal/reflectutil/all_test.go
diff --git a/profiles/internal/lib/reflectutil/deepequal.go b/internal/reflectutil/deepequal.go
similarity index 100%
rename from profiles/internal/lib/reflectutil/deepequal.go
rename to internal/reflectutil/deepequal.go
diff --git a/profiles/internal/lib/reflectutil/doc.go b/internal/reflectutil/doc.go
similarity index 100%
rename from profiles/internal/lib/reflectutil/doc.go
rename to internal/reflectutil/doc.go
diff --git a/profiles/internal/lib/reflectutil/sort.go b/internal/reflectutil/sort.go
similarity index 100%
rename from profiles/internal/lib/reflectutil/sort.go
rename to internal/reflectutil/sort.go
diff --git a/lib/security/blessingroots.go b/lib/security/blessingroots.go
index 6ecd0ba..43417b2 100644
--- a/lib/security/blessingroots.go
+++ b/lib/security/blessingroots.go
@@ -20,10 +20,10 @@
persistedData SerializerReaderWriter
signer serialization.Signer
mu sync.RWMutex
- store map[string][]security.BlessingPattern // GUARDED_BY(mu)
+ state blessingRootsState // GUARDED_BY(mu)
}
-func storeMapKey(root security.PublicKey) (string, error) {
+func stateMapKey(root security.PublicKey) (string, error) {
rootBytes, err := root.MarshalBinary()
if err != nil {
return "", err
@@ -32,37 +32,37 @@
}
func (br *blessingRoots) Add(root security.PublicKey, pattern security.BlessingPattern) error {
- key, err := storeMapKey(root)
+ key, err := stateMapKey(root)
if err != nil {
return err
}
br.mu.Lock()
defer br.mu.Unlock()
- patterns := br.store[key]
+ patterns := br.state[key]
for _, p := range patterns {
if p == pattern {
return nil
}
}
- br.store[key] = append(patterns, pattern)
+ br.state[key] = append(patterns, pattern)
if err := br.save(); err != nil {
- br.store[key] = patterns[:len(patterns)-1]
+ br.state[key] = patterns[:len(patterns)-1]
return err
}
return nil
}
func (br *blessingRoots) Recognized(root security.PublicKey, blessing string) error {
- key, err := storeMapKey(root)
+ key, err := stateMapKey(root)
if err != nil {
return err
}
br.mu.RLock()
defer br.mu.RUnlock()
- for _, p := range br.store[key] {
+ for _, p := range br.state[key] {
if p.MatchedBy(blessing) {
return nil
}
@@ -82,7 +82,7 @@
const format = "%-47s %s\n"
b := bytes.NewBufferString(fmt.Sprintf(format, "Public key", "Pattern"))
var s rootSorter
- for keyBytes, patterns := range br.store {
+ for keyBytes, patterns := range br.state {
key, err := security.UnmarshalPublicKey([]byte(keyBytes))
if err != nil {
return fmt.Sprintf("failed to decode public key: %v", err)
@@ -115,7 +115,7 @@
if err != nil {
return err
}
- return encodeAndStore(br.store, data, signature, br.signer)
+ return encodeAndStore(br.state, data, signature, br.signer)
}
// newInMemoryBlessingRoots returns an in-memory security.BlessingRoots.
@@ -123,7 +123,7 @@
// The returned BlessingRoots is initialized with an empty set of keys.
func newInMemoryBlessingRoots() security.BlessingRoots {
return &blessingRoots{
- store: make(map[string][]security.BlessingPattern),
+ state: make(blessingRootsState),
}
}
@@ -135,7 +135,7 @@
return nil, verror.New(errDataOrSignerUnspecified, nil)
}
br := &blessingRoots{
- store: make(map[string][]security.BlessingPattern),
+ state: make(blessingRootsState),
persistedData: persistedData,
signer: signer,
}
@@ -144,7 +144,7 @@
return nil, err
}
if (data != nil) && (signature != nil) {
- if err := decodeFromStorage(&br.store, data, signature, br.signer.PublicKey()); err != nil {
+ if err := decodeFromStorage(&br.state, data, signature, br.signer.PublicKey()); err != nil {
return nil, err
}
}
diff --git a/lib/security/blessingstore.go b/lib/security/blessingstore.go
index 381ec87..8397294 100644
--- a/lib/security/blessingstore.go
+++ b/lib/security/blessingstore.go
@@ -27,24 +27,14 @@
errDataOrSignerUnspecified = verror.Register(pkgPath+".errDataOrSignerUnspecified", verror.NoRetry, "{1:}{2:} persisted data or signer is not specified{:_}")
)
-// TODO(ashankar,ataly): The only reason that Value is encapsulated in a struct
-// is for backward compatibility. We should probably restore "oldState" and
-// get rid of this.
+// TODO(ataly, ashankar): Get rid of this struct once we have switched all
+// credentials directories to the new serialization format.
type blessings struct {
Value security.Blessings
}
-func (w *blessings) Blessings() security.Blessings {
- if w == nil {
- return security.Blessings{}
- }
- return w.Value
-}
-
-func newWireBlessings(b security.Blessings) *blessings {
- return &blessings{Value: b}
-}
-
+// TODO(ataly, ashankar): Get rid of this struct once we have switched all
+// credentials directories to the new serialization format.
type state struct {
// Store maps BlessingPatterns to the Blessings object that is to be shared
// with peers which present blessings of their own that match the pattern.
@@ -62,7 +52,7 @@
serializer SerializerReaderWriter
signer serialization.Signer
mu sync.RWMutex
- state state // GUARDED_BY(mu)
+ state blessingStoreState // GUARDED_BY(mu)
}
func (bs *blessingStore) Set(blessings security.Blessings, forPeers security.BlessingPattern) (security.Blessings, error) {
@@ -74,21 +64,21 @@
}
bs.mu.Lock()
defer bs.mu.Unlock()
- old, hadold := bs.state.Store[forPeers]
+ old, hadold := bs.state.PeerBlessings[forPeers]
if !blessings.IsZero() {
- bs.state.Store[forPeers] = newWireBlessings(blessings)
+ bs.state.PeerBlessings[forPeers] = blessings
} else {
- delete(bs.state.Store, forPeers)
+ delete(bs.state.PeerBlessings, forPeers)
}
if err := bs.save(); err != nil {
if hadold {
- bs.state.Store[forPeers] = old
+ bs.state.PeerBlessings[forPeers] = old
} else {
- delete(bs.state.Store, forPeers)
+ delete(bs.state.PeerBlessings, forPeers)
}
return security.Blessings{}, err
}
- return old.Blessings(), nil
+ return old, nil
}
func (bs *blessingStore) ForPeer(peerBlessings ...string) security.Blessings {
@@ -96,9 +86,8 @@
defer bs.mu.RUnlock()
var ret security.Blessings
- for pattern, wb := range bs.state.Store {
+ for pattern, b := range bs.state.PeerBlessings {
if pattern.MatchedBy(peerBlessings...) {
- b := wb.Blessings()
if union, err := security.UnionOfBlessings(ret, b); err != nil {
vlog.Errorf("UnionOfBlessings(%v, %v) failed: %v, dropping the latter from BlessingStore.ForPeers(%v)", ret, b, err, peerBlessings)
} else {
@@ -112,10 +101,7 @@
func (bs *blessingStore) Default() security.Blessings {
bs.mu.RLock()
defer bs.mu.RUnlock()
- if bs.state.Default != nil {
- return bs.state.Default.Blessings()
- }
- return bs.ForPeer()
+ return bs.state.DefaultBlessings
}
func (bs *blessingStore) SetDefault(blessings security.Blessings) error {
@@ -124,10 +110,11 @@
if !blessings.IsZero() && !reflect.DeepEqual(blessings.PublicKey(), bs.publicKey) {
return verror.New(errStoreAddMismatch, nil)
}
- oldDefault := bs.state.Default
- bs.state.Default = newWireBlessings(blessings)
+ oldDefault := bs.state.DefaultBlessings
+ bs.state.DefaultBlessings = blessings
if err := bs.save(); err != nil {
- bs.state.Default = oldDefault
+ bs.state.DefaultBlessings = oldDefault
+ return err
}
return nil
}
@@ -142,8 +129,8 @@
func (bs *blessingStore) PeerBlessings() map[security.BlessingPattern]security.Blessings {
m := make(map[security.BlessingPattern]security.Blessings)
- for pattern, wb := range bs.state.Store {
- m[pattern] = wb.Blessings()
+ for pattern, b := range bs.state.PeerBlessings {
+ m[pattern] = b
}
return m
}
@@ -157,20 +144,19 @@
// <pattern> <blessings>
func (bs *blessingStore) DebugString() string {
const format = "%-30s %s\n"
- b := bytes.NewBufferString(fmt.Sprintf(format, "Default Blessings", bs.state.Default.Blessings()))
+ buff := bytes.NewBufferString(fmt.Sprintf(format, "Default Blessings", bs.state.DefaultBlessings))
- b.WriteString(fmt.Sprintf(format, "Peer pattern", "Blessings"))
+ buff.WriteString(fmt.Sprintf(format, "Peer pattern", "Blessings"))
- sorted := make([]string, 0, len(bs.state.Store))
- for k, _ := range bs.state.Store {
+ sorted := make([]string, 0, len(bs.state.PeerBlessings))
+ for k, _ := range bs.state.PeerBlessings {
sorted = append(sorted, string(k))
}
sort.Strings(sorted)
for _, pattern := range sorted {
- wb := bs.state.Store[security.BlessingPattern(pattern)]
- b.WriteString(fmt.Sprintf(format, pattern, wb.Blessings()))
+ buff.WriteString(fmt.Sprintf(format, pattern, bs.state.PeerBlessings[security.BlessingPattern(pattern)]))
}
- return b.String()
+ return buff.String()
}
func (bs *blessingStore) save() error {
@@ -191,50 +177,18 @@
func newInMemoryBlessingStore(publicKey security.PublicKey) security.BlessingStore {
return &blessingStore{
publicKey: publicKey,
- state: state{Store: make(map[security.BlessingPattern]*blessings)},
+ state: blessingStoreState{PeerBlessings: make(map[security.BlessingPattern]security.Blessings)},
}
}
-// TODO(ataly, ashankar): Get rid of this struct once we have switched all
-// credentials directories to the new serialization format. Or maybe we should
-// restore this and get rid of "type state". Probably should define the
-// serialization format in VDL!
-type oldState struct {
- Store map[security.BlessingPattern]security.Blessings
- Default security.Blessings
-}
-
-// TODO(ataly, ashankar): Get rid of this method once we have switched all
-// credentials directories to the new serialization format.
-func (bs *blessingStore) tryOldFormat() bool {
- var empty security.Blessings
- if len(bs.state.Store) == 0 {
- return bs.state.Default.Value.IsZero() || reflect.DeepEqual(bs.state.Default.Value, empty)
- }
- for _, wb := range bs.state.Store {
- if wb.Value.IsZero() {
- return true
- }
- }
- return false
-}
-
func (bs *blessingStore) verifyState() error {
- verifyBlessings := func(wb *blessings, key security.PublicKey) error {
- if b := wb.Blessings(); !reflect.DeepEqual(b.PublicKey(), key) {
- return verror.New(errBlessingsNotForKey, nil, b, key)
- }
- return nil
- }
- for _, wb := range bs.state.Store {
- if err := verifyBlessings(wb, bs.publicKey); err != nil {
- return err
+ for _, b := range bs.state.PeerBlessings {
+ if !reflect.DeepEqual(b.PublicKey(), bs.publicKey) {
+ return verror.New(errBlessingsNotForKey, nil, b, bs.publicKey)
}
}
- if bs.state.Default != nil {
- if err := verifyBlessings(bs.state.Default, bs.publicKey); err != nil {
- return err
- }
+ if !bs.state.DefaultBlessings.IsZero() && !reflect.DeepEqual(bs.state.DefaultBlessings.PublicKey(), bs.publicKey) {
+ return verror.New(errBlessingsNotForKey, nil, bs.state.DefaultBlessings, bs.publicKey)
}
return nil
}
@@ -249,24 +203,32 @@
if data == nil && signature == nil {
return nil
}
- var old oldState
+
+ var old state
if err := decodeFromStorage(&old, data, signature, bs.signer.PublicKey()); err != nil {
return err
}
- for p, wire := range old.Store {
- bs.state.Store[p] = &blessings{Value: wire}
+
+ for p, wb := range old.Store {
+ if wb != nil {
+ bs.state.PeerBlessings[p] = wb.Value
+ }
}
- bs.state.Default = &blessings{Value: old.Default}
+ if old.Default != nil {
+ bs.state.DefaultBlessings = old.Default.Value
+ }
if err := bs.verifyState(); err != nil {
return err
}
+
// Save the blessingstore in the new serialization format. This will ensure
// that all credentials directories in the old format will switch to the new
// format.
if err := bs.save(); err != nil {
return err
}
+
return nil
}
@@ -278,13 +240,10 @@
if data == nil && signature == nil {
return nil
}
- if err := decodeFromStorage(&bs.state, data, signature, bs.signer.PublicKey()); err == nil && !bs.tryOldFormat() {
- return bs.verifyState()
+ if err := decodeFromStorage(&bs.state, data, signature, bs.signer.PublicKey()); err != nil {
+ return bs.deserializeOld()
}
- if err := bs.deserializeOld(); err != nil {
- return err
- }
- return nil
+ return bs.verifyState()
}
// newPersistingBlessingStore returns a security.BlessingStore for a principal
@@ -296,7 +255,7 @@
}
bs := &blessingStore{
publicKey: signer.PublicKey(),
- state: state{Store: make(map[security.BlessingPattern]*blessings)},
+ state: blessingStoreState{PeerBlessings: make(map[security.BlessingPattern]security.Blessings)},
serializer: serializer,
signer: signer,
}
diff --git a/lib/security/blessingstore_test.go b/lib/security/blessingstore_test.go
index cd86c37..8177624 100644
--- a/lib/security/blessingstore_test.go
+++ b/lib/security/blessingstore_test.go
@@ -50,10 +50,7 @@
return nil
}
-func (t *storeTester) testSetDefault(s security.BlessingStore, currentDefault security.Blessings) error {
- if got := s.Default(); !reflect.DeepEqual(got, currentDefault) {
- return fmt.Errorf("Default(): got: %v, want: %v", got, currentDefault)
- }
+func (t *storeTester) testSetDefault(s security.BlessingStore) error {
if err := s.SetDefault(security.Blessings{}); err != nil {
return fmt.Errorf("SetDefault({}): %v", err)
}
@@ -140,7 +137,7 @@
if err := tester.testForPeer(s); err != nil {
t.Error(err)
}
- if err := tester.testSetDefault(s, tester.forAll); err != nil {
+ if err := tester.testSetDefault(s); err != nil {
t.Error(err)
}
}
@@ -164,7 +161,7 @@
if err := tester.testForPeer(s); err != nil {
t.Error(err)
}
- if err := tester.testSetDefault(s, tester.forAll); err != nil {
+ if err := tester.testSetDefault(s); err != nil {
t.Error(err)
}
diff --git a/lib/security/type.vdl b/lib/security/type.vdl
new file mode 100644
index 0000000..9d65571
--- /dev/null
+++ b/lib/security/type.vdl
@@ -0,0 +1,21 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package security
+
+import "v.io/v23/security"
+
+type blessingRootsState map[string][]security.BlessingPattern
+
+type blessingStoreState struct {
+ // PeerBlessings maps BlessingPatterns to the Blessings object that is to
+ // be shared with peers which present blessings of their own that match the
+ // pattern.
+ //
+ // All blessings bind to the same public key.
+ PeerBlessings map[security.BlessingPattern]security.WireBlessings
+ // DefaultBlessings is the default Blessings to be shared with peers for which
+ // no other information is available to select blessings.
+ DefaultBlessings security.WireBlessings
+}
diff --git a/lib/security/type.vdl.go b/lib/security/type.vdl.go
new file mode 100644
index 0000000..127698b
--- /dev/null
+++ b/lib/security/type.vdl.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: type.vdl
+
+package security
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+
+ // VDL user imports
+ "v.io/v23/security"
+)
+
+type blessingRootsState map[string][]security.BlessingPattern
+
+func (blessingRootsState) __VDLReflect(struct {
+ Name string "v.io/x/ref/lib/security.blessingRootsState"
+}) {
+}
+
+type blessingStoreState struct {
+ // PeerBlessings maps BlessingPatterns to the Blessings object that is to
+ // be shared with peers which present blessings of their own that match the
+ // pattern.
+ //
+ // All blessings bind to the same public key.
+ PeerBlessings map[security.BlessingPattern]security.Blessings
+ // DefaultBlessings is the default Blessings to be shared with peers for which
+ // no other information is available to select blessings.
+ DefaultBlessings security.Blessings
+}
+
+func (blessingStoreState) __VDLReflect(struct {
+ Name string "v.io/x/ref/lib/security.blessingStoreState"
+}) {
+}
+
+func init() {
+ vdl.Register((*blessingRootsState)(nil))
+ vdl.Register((*blessingStoreState)(nil))
+}
diff --git a/lib/stats/sysstats/sysstats.go b/lib/stats/sysstats/sysstats.go
index 49ad421..eb38f65 100644
--- a/lib/stats/sysstats/sysstats.go
+++ b/lib/stats/sysstats/sysstats.go
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package sysstats exports system statistics, and updates them periodically.
-// The package does not export any symbols, but needs to be imported for its
-// side-effects.
+// Package sysstats implements system statistics and updates them periodically.
+//
+// Importing this package causes the stats to be exported via an init function.
package sysstats
import (
diff --git a/lib/vdl/codegen/javascript/gen.go b/lib/vdl/codegen/javascript/gen.go
index 454ac25..b6104a4 100644
--- a/lib/vdl/codegen/javascript/gen.go
+++ b/lib/vdl/codegen/javascript/gen.go
@@ -361,11 +361,11 @@
res += "\n"
if hasErrors(data.Pkg) {
if data.PathToCoreJS != "" {
- res += "var makeError = require('" + packagePrefix + "/errors/make-errors');\n"
- res += "var actions = require('" + packagePrefix + "/errors/actions');\n"
+ res += "var makeError = require('" + packagePrefix + "/verror/make-errors');\n"
+ res += "var actions = require('" + packagePrefix + "/verror/actions');\n"
} else {
- res += "var makeError = require('vanadium').errors.makeError;\n"
- res += "var actions = require('vanadium').errors.actions;\n"
+ res += "var makeError = require('vanadium').verror.makeError;\n"
+ res += "var actions = require('vanadium').verror.actions;\n"
}
}
diff --git a/profiles/internal/naming/namespace/acl.go b/profiles/internal/naming/namespace/acl.go
index cdb00ce..36b90bb 100644
--- a/profiles/internal/naming/namespace/acl.go
+++ b/profiles/internal/naming/namespace/acl.go
@@ -18,12 +18,7 @@
func setAccessListInMountTable(ctx *context.T, client rpc.Client, name string, acl access.Permissions, version, id string, opts []rpc.CallOpt) (s status) {
s.id = id
ctx, _ = context.WithTimeout(ctx, callTimeout)
- call, err := client.StartCall(ctx, name, "SetPermissions", []interface{}{acl, version}, append(opts, options.NoResolve{})...)
- s.err = err
- if err != nil {
- return
- }
- s.err = call.Finish()
+ s.err = client.Call(ctx, name, "SetPermissions", []interface{}{acl, version}, nil, append(opts, options.NoResolve{})...)
return
}
diff --git a/profiles/internal/naming/namespace/acl_test.go b/profiles/internal/naming/namespace/acl_test.go
index 3cc7cb5..92bec02 100644
--- a/profiles/internal/naming/namespace/acl_test.go
+++ b/profiles/internal/naming/namespace/acl_test.go
@@ -57,7 +57,7 @@
// Create a new mounttable service.
func newMT(t *testing.T, ctx *context.T) (func(), string) {
- estr, stopFunc, err := mounttablelib.StartServers(ctx, v23.GetListenSpec(ctx), "", "", "")
+ estr, stopFunc, err := mounttablelib.StartServers(ctx, v23.GetListenSpec(ctx), "", "", "", "mounttable")
if err != nil {
t.Fatalf("r.NewServer: %s", err)
}
diff --git a/profiles/internal/naming/namespace/all_test.go b/profiles/internal/naming/namespace/all_test.go
index 1349e42..0fa67bb 100644
--- a/profiles/internal/naming/namespace/all_test.go
+++ b/profiles/internal/naming/namespace/all_test.go
@@ -135,13 +135,9 @@
func knockKnock(t *testing.T, ctx *context.T, name string) {
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "KnockKnock", nil)
- if err != nil {
- boom(t, "StartCall failed: %s", err)
- }
var result string
- if err := call.Finish(&result); err != nil {
- boom(t, "Finish returned an error: %s", err)
+ if err := client.Call(ctx, name, "KnockKnock", nil, []interface{}{&result}); err != nil {
+ boom(t, "Call failed: %s", err)
}
if result != "Who's there?" {
boom(t, "Wrong result: %v", result)
@@ -184,7 +180,7 @@
}
func runMT(t *testing.T, ctx *context.T, mountPoint string) *serverEntry {
- mtd, err := mounttablelib.NewMountTableDispatcher("")
+ mtd, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
boom(t, "NewMountTableDispatcher returned error: %v", err)
}
diff --git a/profiles/internal/naming/namespace/glob.go b/profiles/internal/naming/namespace/glob.go
index 451dc5b..e81d052 100644
--- a/profiles/internal/naming/namespace/glob.go
+++ b/profiles/internal/naming/namespace/glob.go
@@ -146,7 +146,7 @@
return strings.Count(name, "/") + 1
}
-// globLoop fires off a go routine for each server and read backs replies.
+// globLoop fires off a go routine for each server and reads backs replies.
func (ns *namespace) globLoop(ctx *context.T, e *naming.MountEntry, prefix string, pattern *glob.Glob, reply chan naming.GlobReply, tr *tracks, opts []rpc.CallOpt) {
defer close(reply)
@@ -159,77 +159,72 @@
// root of the search and the full pattern. It will be the first task fired off in the for
// loop that follows.
replies <- &task{me: e, pattern: pattern}
- inFlight := 0
+ replies <- nil
+ inFlight := 1
// Perform a parallel search of the name graph. Each task will send what it learns
// on the replies channel. If the reply is a mount point and the pattern is not completely
// fulfilled, a new task will be fired off to handle it.
- for {
- select {
- case t := <-replies:
- // A nil reply represents a successfully terminated task.
- // If no tasks are running, return.
- if t == nil {
- if inFlight--; inFlight <= 0 {
- return
- }
- continue
- }
-
- // We want to output this entry if there was a real error other than
- // "not a mount table".
- //
- // An error reply is also a terminated task.
- // If no tasks are running, return.
- if t.error != nil {
- if !notAnMT(t.error) {
- reply <- naming.GlobReplyError{naming.GlobError{Name: naming.Join(prefix, t.me.Name), Error: t.error}}
- }
- if inFlight--; inFlight <= 0 {
- return
- }
- continue
- }
-
- // If this is just an error from the mount table, pass it on.
- if t.er != nil {
- x := *t.er
- x.Name = naming.Join(prefix, x.Name)
- reply <- &naming.GlobReplyError{x}
- continue
- }
-
- // Get the pattern elements below the current path.
- suffix := pattern.Split(depth(t.me.Name))
-
- // If we've satisfied the request and this isn't the root,
- // reply to the caller.
- if suffix.Len() == 0 && t.depth != 0 {
- x := *t.me
- x.Name = naming.Join(prefix, x.Name)
- reply <- &naming.GlobReplyEntry{x}
- }
-
- // If the pattern is finished (so we're only querying about the root on the
- // remote server) and the server is not another MT, then we needn't send the
- // query on since we know the server will not supply a new address for the
- // current name.
- if suffix.Finished() {
- if !t.me.ServesMountTable {
- continue
- }
- }
-
- // If this is restricted recursive and not a mount table, don't descend into it.
- if suffix.Restricted() && suffix.Len() == 0 && !t.me.ServesMountTable {
- continue
- }
-
- // Perform a glob at the next server.
- inFlight++
- t.pattern = suffix
- go ns.globAtServer(ctx, t, replies, tr, opts)
+ for inFlight != 0 {
+ t := <-replies
+ // A nil reply represents a successfully terminated task.
+ // If no tasks are running, return.
+ if t == nil {
+ inFlight--
+ continue
}
+
+ // We want to output this entry if there was a real error other than
+ // "not a mount table".
+ //
+ // An error reply is also a terminated task.
+ // If no tasks are running, return.
+ if t.error != nil {
+ if !notAnMT(t.error) {
+ reply <- naming.GlobReplyError{naming.GlobError{Name: naming.Join(prefix, t.me.Name), Error: t.error}}
+ }
+ inFlight--
+ continue
+ }
+
+ // If this is just an error from the mount table, pass it on.
+ if t.er != nil {
+ x := *t.er
+ x.Name = naming.Join(prefix, x.Name)
+ reply <- &naming.GlobReplyError{x}
+ continue
+ }
+
+ // Get the pattern elements below the current path.
+ suffix := pattern.Split(depth(t.me.Name))
+
+ // If we've satisfied the request and this isn't the root,
+ // reply to the caller.
+ if suffix.Len() == 0 && t.depth != 0 {
+ x := *t.me
+ x.Name = naming.Join(prefix, x.Name)
+ reply <- &naming.GlobReplyEntry{x}
+ }
+
+ // If the pattern is finished (so we're only querying about the root on the
+ // remote server) and the server is not another MT, then we needn't send the
+ // query on since we know the server will not supply a new address for the
+ // current name.
+ if suffix.Finished() {
+ if !t.me.ServesMountTable {
+ continue
+ }
+ }
+
+ // If this is restricted recursive and not a mount table, don't descend into it.
+ if suffix.Restricted() && suffix.Len() == 0 && !t.me.ServesMountTable {
+ continue
+ }
+
+ // Perform a glob at the next server.
+ inFlight++
+ t.pattern = suffix
+ go ns.globAtServer(ctx, t, replies, tr, opts)
}
}
diff --git a/profiles/internal/naming/namespace/mount.go b/profiles/internal/naming/namespace/mount.go
index 6e9d87b..b42ddc4 100644
--- a/profiles/internal/naming/namespace/mount.go
+++ b/profiles/internal/naming/namespace/mount.go
@@ -20,12 +20,7 @@
func mountIntoMountTable(ctx *context.T, client rpc.Client, name, server string, ttl time.Duration, flags naming.MountFlag, id string, opts ...rpc.CallOpt) (s status) {
s.id = id
ctx, _ = context.WithTimeout(ctx, callTimeout)
- call, err := client.StartCall(ctx, name, "Mount", []interface{}{server, uint32(ttl.Seconds()), flags}, append(opts, options.NoResolve{})...)
- s.err = err
- if err != nil {
- return
- }
- s.err = call.Finish()
+ s.err = client.Call(ctx, name, "Mount", []interface{}{server, uint32(ttl.Seconds()), flags}, nil, append(opts, options.NoResolve{})...)
return
}
@@ -66,12 +61,7 @@
func unmountFromMountTable(ctx *context.T, client rpc.Client, name, server string, id string, opts ...rpc.CallOpt) (s status) {
s.id = id
ctx, _ = context.WithTimeout(ctx, callTimeout)
- call, err := client.StartCall(ctx, name, "Unmount", []interface{}{server}, append(opts, options.NoResolve{})...)
- s.err = err
- if err != nil {
- return
- }
- s.err = call.Finish()
+ s.err = client.Call(ctx, name, "Unmount", []interface{}{server}, nil, append(opts, options.NoResolve{})...)
return
}
@@ -93,12 +83,7 @@
func deleteFromMountTable(ctx *context.T, client rpc.Client, name string, deleteSubtree bool, id string, opts ...rpc.CallOpt) (s status) {
s.id = id
ctx, _ = context.WithTimeout(ctx, callTimeout)
- call, err := client.StartCall(ctx, name, "Delete", []interface{}{deleteSubtree}, append(opts, options.NoResolve{})...)
- s.err = err
- if err != nil {
- return
- }
- s.err = call.Finish()
+ s.err = client.Call(ctx, name, "Delete", []interface{}{deleteSubtree}, nil, append(opts, options.NoResolve{})...)
return
}
diff --git a/profiles/internal/naming/namespace/namespace.go b/profiles/internal/naming/namespace/namespace.go
index 13311bd..bda109e 100644
--- a/profiles/internal/naming/namespace/namespace.go
+++ b/profiles/internal/naming/namespace/namespace.go
@@ -158,7 +158,7 @@
case verror.ErrBadArg.ID:
// This should cover "rpc: wrong number of in-args".
return true
- case verror.ErrNoExist.ID:
+ case verror.ErrNoExist.ID, verror.ErrUnknownMethod.ID, verror.ErrUnknownSuffix.ID:
// This should cover "rpc: unknown method", "rpc: dispatcher not
// found", and dispatcher Lookup not found errors.
return true
diff --git a/profiles/internal/naming/namespace/resolve.go b/profiles/internal/naming/namespace/resolve.go
index a813577..96b08f8 100644
--- a/profiles/internal/naming/namespace/resolve.go
+++ b/profiles/internal/naming/namespace/resolve.go
@@ -35,21 +35,15 @@
// been set.
callCtx, _ = context.WithTimeout(ctx, callTimeout)
}
- call, err := client.StartCall(callCtx, name, "ResolveStep", nil, opts...)
- if err != nil {
- finalErr = err
- vlog.VI(2).Infof("ResolveStep.StartCall %s failed: %s", name, err)
- continue
- }
entry := new(naming.MountEntry)
- if err := call.Finish(entry); err != nil {
+ if err := client.Call(callCtx, name, "ResolveStep", nil, []interface{}{entry}, opts...); err != nil {
// If any replica says the name doesn't exist, return that fact.
if verror.ErrorID(err) == naming.ErrNoSuchName.ID || verror.ErrorID(err) == naming.ErrNoSuchNameRoot.ID {
return nil, err
}
// Keep track of the final error and continue with next server.
finalErr = err
- vlog.VI(2).Infof("ResolveStep %s failed: %s", name, err)
+ vlog.VI(2).Infof("resolveAMT: Finish %s failed: %s", name, err)
continue
}
// Add result to cache.
@@ -57,6 +51,7 @@
vlog.VI(2).Infof("resolveAMT %s -> %v", name, entry)
return entry, nil
}
+ vlog.VI(2).Infof("resolveAMT %v -> %v", e.Servers, finalErr)
return nil, finalErr
}
diff --git a/profiles/internal/rpc/blessings_cache.go b/profiles/internal/rpc/blessings_cache.go
index 2eca000..116f638 100644
--- a/profiles/internal/rpc/blessings_cache.go
+++ b/profiles/internal/rpc/blessings_cache.go
@@ -6,14 +6,24 @@
import (
"crypto/sha256"
- "fmt"
"sync"
"v.io/v23/rpc"
"v.io/v23/security"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/rpc/stream"
)
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errMissingBlessingsKey = reg(".blessingsKey", "key {3} was not in blessings cache")
+ errInvalidClientBlessings = reg("invalidClientBlessings", "client sent invalid Blessings")
+)
+
// clientEncodeBlessings gets or inserts the blessings into the cache.
func clientEncodeBlessings(cache stream.VCDataCache, blessings security.Blessings) rpc.BlessingsRequest {
blessingsCacheAny := cache.GetOrInsert(clientBlessingsCacheKey{}, newClientBlessingsCache)
@@ -146,7 +156,7 @@
cached, exists := c.m[req.Key]
c.RUnlock()
if !exists {
- return security.Blessings{}, fmt.Errorf("rpc: key was not in the cache")
+ return security.Blessings{}, verror.New(errMissingBlessingsKey, nil, req.Key)
}
stats.recordBlessingCache(true)
return cached, nil
@@ -160,7 +170,7 @@
defer c.Unlock()
if cached, exists := c.m[req.Key]; exists {
if !cached.Equivalent(recv) {
- return security.Blessings{}, fmt.Errorf("client sent invalid Blessings")
+ return security.Blessings{}, verror.New(errInvalidClientBlessings, nil)
}
return cached, nil
}
diff --git a/profiles/internal/rpc/client.go b/profiles/internal/rpc/client.go
index 84f6f4e..728d527 100644
--- a/profiles/internal/rpc/client.go
+++ b/profiles/internal/rpc/client.go
@@ -10,7 +10,6 @@
"math/rand"
"net"
"reflect"
- "strings"
"sync"
"time"
@@ -37,51 +36,42 @@
const pkgPath = "v.io/x/ref/profiles/internal/rpc"
+func reg(id, msg string) verror.IDAction {
+ // Note: the error action is never used and is instead computed
+ // at a higher level. The errors here are purely for informational
+ // purposes.
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
var (
- // Local errs that are used to provide details to the public ones.
- errClientCloseAlreadyCalled = verror.Register(pkgPath+".closeAlreadyCalled", verror.NoRetry,
- "rpc.Client.Close has already been called")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errClientCloseAlreadyCalled = reg(".errCloseAlreadyCalled", "rpc.Client.Close has already been called")
+ errClientFinishAlreadyCalled = reg(".errFinishAlreadyCalled", "rpc.ClientCall.Finish has already been called")
+ errNonRootedName = reg(".errNonRootedName", "{3} does not appear to contain an address")
+ errInvalidEndpoint = reg(".errInvalidEndpoint", "failed to parse endpoint")
+ errIncompatibleEndpoint = reg(".errIncompatibleEndpoint", "incompatible endpoint")
+ errVomEncoder = reg(".errVomEncoder", "failed to create vom encoder{:3}")
+ errVomDecoder = reg(".errVomDecoder", "failed to create vom decoder{:3}")
+ errRequestEncoding = reg(".errRequestEncoding", "failed to encode request {3}{:4}")
+ errDischargeEncoding = reg(".errDischargeEncoding", "failed to encode discharges {:3}")
+ errBlessingEncoding = reg(".errBlessingEncoding", "failed to encode blessing {3}{:4}")
+ errArgEncoding = reg(".errArgEncoding", "failed to encode arg #{3}{:4:}")
+ errMismatchedResults = reg(".errMismatchedResults", "got {3} results, but want {4}")
+ errResultDecoding = reg(".errResultDecoding", "failed to decode result #{3}{:4}")
+ errResponseDecoding = reg(".errResponseDecoding", "failed to decode response{:3}")
+ errRemainingStreamResults = reg(".errRemaingStreamResults", "stream closed with remaining stream results")
+ errNoBlessingsForPeer = reg(".errNoBlessingsForPeer", "no blessings tagged for peer {3}{:4}")
+ errBlessingGrant = reg(".errBlessingGrant", "failed to grant blessing to server with blessings{:3}")
+ errBlessingAdd = reg(".errBlessingAdd", "failed to add blessing granted to server{:3}")
+ errServerAuthorizeFailed = reg(".errServerAuthorizedFailed", "failed to authorized flow with remote blessings{:3} {:4}")
- errClientFinishAlreadyCalled = verror.Register(pkgPath+".finishAlreadyCalled", verror.NoRetry, "rpc.ClientCall.Finish has already been called")
+ errPrepareBlessingsAndDischarges = reg(".prepareBlessingsAndDischarges", "failed to prepare blessings and discharges: remote blessings{:3} {:4}")
- errNonRootedName = verror.Register(pkgPath+".nonRootedName", verror.NoRetry, "{3} does not appear to contain an address")
-
- errInvalidEndpoint = verror.Register(pkgPath+".invalidEndpoint", verror.RetryRefetch, "{3} is an invalid endpoint")
-
- errIncompatibleEndpoint = verror.Register(pkgPath+".invalidEndpoint", verror.RetryRefetch, "{3} is an incompatible endpoint")
-
- errNotTrusted = verror.Register(pkgPath+".notTrusted", verror.NoRetry, "name {3} not trusted using blessings {4}{:5}")
-
- errAuthError = verror.Register(pkgPath+".authError", verror.RetryRefetch, "{3}")
-
- errSystemRetry = verror.Register(pkgPath+".sysErrorRetryConnection", verror.RetryConnection, "{:3:}")
-
- errVomEncoder = verror.Register(pkgPath+".vomEncoder", verror.NoRetry, "failed to create vom encoder {:3}")
- errVomDecoder = verror.Register(pkgPath+".vomDecoder", verror.NoRetry, "failed to create vom decoder {:3}")
-
- errRequestEncoding = verror.Register(pkgPath+".requestEncoding", verror.NoRetry, "failed to encode request {3}{:4}")
-
- errDischargeEncoding = verror.Register(pkgPath+".dischargeEncoding", verror.NoRetry, "failed to encode discharges {:3}")
-
- errBlessingEncoding = verror.Register(pkgPath+".blessingEncoding", verror.NoRetry, "failed to encode blessing {3}{:4}")
-
- errArgEncoding = verror.Register(pkgPath+".argEncoding", verror.NoRetry, "failed to encode arg #{3}{:4:}")
-
- errMismatchedResults = verror.Register(pkgPath+".mismatchedResults", verror.NoRetry, "got {3} results, but want {4}")
-
- errResultDecoding = verror.Register(pkgPath+".resultDecoding", verror.NoRetry, "failed to decode result #{3}{:4}")
-
- errResponseDecoding = verror.Register(pkgPath+".responseDecoding", verror.NoRetry, "failed to decode response{:3}")
-
- errRemainingStreamResults = verror.Register(pkgPath+".remaingStreamResults", verror.NoRetry, "stream closed with remaining stream results")
-
- errNoBlessingsForPeer = verror.Register(pkgPath+".noBlessingsForPeer", verror.NoRetry, "no blessings tagged for peer {3}{:4}")
-
- errBlessingGrant = verror.Register(pkgPath+".blessingGrantFailed", verror.NoRetry, "failed to grant blessing to server with blessings {3}{:4}")
-
- errBlessingAdd = verror.Register(pkgPath+".blessingAddFailed", verror.NoRetry, "failed to add blessing granted to server {3}{:4}")
-
- errNoPrincipal = verror.Register(pkgPath+".noPrincipal", verror.NoRetry, "principal required for secure connections")
+ errDischargeImpetus = reg(".errDischargeImpetus", "couldn't make discharge impetus{:3}")
+ errNoPrincipal = reg(".errNoPrincipal", "principal required for secure connections")
)
type client struct {
@@ -137,12 +127,18 @@
return c, nil
}
-func (c *client) createFlow(ctx *context.T, principal security.Principal, ep naming.Endpoint, vcOpts []stream.VCOpt) (stream.Flow, error) {
+func (c *client) createFlow(ctx *context.T, principal security.Principal, ep naming.Endpoint, vcOpts []stream.VCOpt) (stream.Flow, *verror.SubErr) {
c.vcMapMu.Lock()
defer c.vcMapMu.Unlock()
- if c.vcMap == nil {
- return nil, verror.New(errClientCloseAlreadyCalled, ctx)
+
+ suberr := func(err error) *verror.SubErr {
+ return &verror.SubErr{Err: err, Options: verror.Print}
}
+
+ if c.vcMap == nil {
+ return nil, suberr(verror.New(errClientCloseAlreadyCalled, ctx))
+ }
+
vcKey := vcMapKey{endpoint: ep.String()}
if principal != nil {
vcKey.clientPublicKey = principal.PublicKey().String()
@@ -161,19 +157,14 @@
}
sm := c.streamMgr
c.vcMapMu.Unlock()
-
vc, err := sm.Dial(ep, principal, vcOpts...)
c.vcMapMu.Lock()
if err != nil {
- if strings.Contains(err.Error(), "authentication failed") {
- return nil, verror.New(errAuthError, ctx, err)
- } else {
- return nil, verror.New(errSystemRetry, ctx, err)
- }
+ return nil, suberr(err)
}
if c.vcMap == nil {
sm.ShutdownEndpoint(ep)
- return nil, verror.New(errClientCloseAlreadyCalled, ctx)
+ return nil, suberr(verror.New(errClientCloseAlreadyCalled, ctx))
}
if othervc, exists := c.vcMap[vcKey]; exists {
vc = othervc.vc
@@ -182,7 +173,11 @@
} else {
c.vcMap[vcKey] = &vcInfo{vc: vc, remoteEP: ep}
}
- return vc.Connect()
+ flow, err := vc.Connect()
+ if err != nil {
+ return nil, suberr(err)
+ }
+ return flow, nil
}
// A randomized exponential backoff. The randomness deters error convoys
@@ -232,13 +227,13 @@
// RetryConnection and RetryRefetch required actions by the client before
// retrying.
if !shouldRetryBackoff(verror.Action(lastErr), deadline, opts) {
- vlog.Infof("Cannot retry after error: %s", lastErr)
+ vlog.VI(4).Infof("Cannot retry after error: %s", lastErr)
break
}
if !backoff(retries, deadline) {
break
}
- vlog.Infof("Retrying due to error: %s", lastErr)
+ vlog.VI(4).Infof("Retrying due to error: %s", lastErr)
}
return lastErr
}
@@ -341,11 +336,11 @@
type serverStatus struct {
index int
- suffix string
+ server, suffix string
flow stream.Flow
blessings []string // authorized server blessings
rejectedBlessings []security.RejectedBlessing // rejected server blessings
- err error
+ serverErr *verror.SubErr
}
// tryCreateFlow attempts to establish a Flow to "server" (which must be a
@@ -356,7 +351,7 @@
// flow itself.
// TODO(cnicolaou): implement real, configurable load balancing.
func (c *client) tryCreateFlow(ctx *context.T, principal security.Principal, index int, name, server, method string, auth security.Authorizer, ch chan<- *serverStatus, vcOpts []stream.VCOpt) {
- status := &serverStatus{index: index}
+ status := &serverStatus{index: index, server: server}
var span vtrace.Span
ctx, span = vtrace.SetNewSpan(ctx, "<client>tryCreateFlow")
span.Annotatef("address:%v", server)
@@ -365,24 +360,33 @@
span.Finish()
}()
+ suberr := func(err error) *verror.SubErr {
+ return &verror.SubErr{
+ Name: fmt.Sprintf("%s:%s.%s", server, name, method),
+ Err: err,
+ Options: verror.Print,
+ }
+ }
+
address, suffix := naming.SplitAddressName(server)
if len(address) == 0 {
- status.err = verror.New(errNonRootedName, ctx, server)
+ status.serverErr = suberr(verror.New(errNonRootedName, ctx, server))
return
}
status.suffix = suffix
ep, err := inaming.NewEndpoint(address)
if err != nil {
- status.err = verror.New(errInvalidEndpoint, ctx, address)
+ status.serverErr = suberr(verror.New(errInvalidEndpoint, ctx))
return
}
if err = version.CheckCompatibility(ep); err != nil {
- status.err = verror.New(errIncompatibleEndpoint, ctx, ep)
+ status.serverErr = suberr(verror.New(errIncompatibleEndpoint, ctx))
return
}
- if status.flow, status.err = c.createFlow(ctx, principal, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth})); status.err != nil {
- vlog.VI(2).Infof("rpc: Failed to create Flow with %v: %v", server, status.err)
+ if status.flow, status.serverErr = c.createFlow(ctx, principal, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth})); status.serverErr != nil {
+ status.serverErr.Name = fmt.Sprintf("%s:%s.%s", server, name, method)
+ vlog.VI(2).Infof("rpc: Failed to create Flow with %v: %v", server, status.serverErr.Err)
return
}
@@ -405,8 +409,10 @@
})
ctx = security.SetCall(ctx, seccall)
if err := auth.Authorize(ctx); err != nil {
- status.err = verror.New(verror.ErrNotTrusted, ctx, name, status.flow.RemoteBlessings(), err)
- vlog.VI(2).Infof("rpc: Failed to authorize Flow created with server %v: %s", server, status.err)
+ // We will test for errServerAuthorizeFailed in failedTryCall and report
+ // verror.ErrNotTrusted
+ status.serverErr = suberr(verror.New(errServerAuthorizeFailed, ctx, status.flow.RemoteBlessings(), err))
+ vlog.VI(2).Infof("rpc: Failed to authorize Flow created with server %v: %s", server, status.serverErr.Err)
status.flow.Close()
status.flow = nil
return
@@ -425,7 +431,6 @@
var blessingPattern security.BlessingPattern
blessingPattern, name = security.SplitPatternName(name)
if resolved, err = c.ns.Resolve(ctx, name, getNamespaceOpts(opts)...); err != nil {
- vlog.Errorf("Resolve: %v", err)
// We always return NoServers as the error so that the caller knows
// that's ok to retry the operation since the name may be registered
// in the near future.
@@ -537,15 +542,19 @@
doneChan := ctx.Done()
r.flow.SetDeadline(doneChan)
- // TODO(cnicolaou): continue verror testing from here.
fc, err := newFlowClient(ctx, r.flow, r.blessings, dc)
if err != nil {
return nil, verror.NoRetry, err
}
if err := fc.prepareBlessingsAndDischarges(ctx, method, r.suffix, args, r.rejectedBlessings, opts); err != nil {
- r.err = verror.New(verror.ErrNotTrusted, ctx, name, r.flow.RemoteBlessings(), err)
- vlog.VI(2).Infof("rpc: err: %s", r.err)
+ n := fmt.Sprintf("%s:%s.%s", r.server, name, method)
+ r.serverErr = &verror.SubErr{
+ Name: n,
+ Options: verror.Print,
+ Err: verror.New(verror.ErrNotTrusted, nil, verror.New(errPrepareBlessingsAndDischarges, ctx, r.flow.RemoteBlessings(), err)),
+ }
+ vlog.VI(2).Infof("rpc: err: %s", r.serverErr)
r.flow.Close()
r.flow = nil
continue
@@ -622,17 +631,20 @@
func (c *client) failedTryCall(ctx *context.T, name, method string, responses []*serverStatus, ch chan *serverStatus) (rpc.ClientCall, verror.ActionCode, error) {
go cleanupTryCall(nil, responses, ch)
c.ns.FlushCacheEntry(name)
- noconn, untrusted := []string{}, []string{}
+ suberrs := []verror.SubErr{}
+ topLevelError := verror.ErrNoServers
+ topLevelAction := verror.RetryRefetch
for _, r := range responses {
- if r != nil && r.err != nil {
- switch {
- case verror.ErrorID(r.err) == verror.ErrNotTrusted.ID || verror.ErrorID(r.err) == errAuthError.ID:
- untrusted = append(untrusted, "("+r.err.Error()+") ")
- default:
- noconn = append(noconn, "("+r.err.Error()+") ")
+ if r != nil && r.serverErr != nil && r.serverErr.Err != nil {
+ switch verror.ErrorID(r.serverErr.Err) {
+ case stream.ErrNotTrusted.ID, verror.ErrNotTrusted.ID, errServerAuthorizeFailed.ID:
+ topLevelError = verror.ErrNotTrusted
+ topLevelAction = verror.NoRetry
}
+ suberrs = append(suberrs, *r.serverErr)
}
}
+
// TODO(cnicolaou): we get system errors for things like dialing using
// the 'ws' protocol which can never succeed even if we retry the connection,
// hence we return RetryRefetch below except for the case where the servers
@@ -640,17 +652,8 @@
// TODO(cnicolaou): implementing at-most-once rpc semantics in the future
// will require thinking through all of the cases where the RPC can
// be retried by the client whilst it's actually being executed on the
- // client.
- switch {
- case len(untrusted) > 0 && len(noconn) > 0:
- return nil, verror.RetryRefetch, verror.New(verror.ErrNoServersAndAuth, ctx, append(noconn, untrusted...))
- case len(noconn) > 0:
- return nil, verror.RetryRefetch, verror.New(verror.ErrNoServers, ctx, noconn)
- case len(untrusted) > 0:
- return nil, verror.NoRetry, verror.New(verror.ErrNotTrusted, ctx, untrusted)
- default:
- return nil, verror.RetryRefetch, verror.New(verror.ErrTimeout, ctx)
- }
+ // server.
+ return nil, topLevelAction, verror.AddSubErrs(verror.New(topLevelError, ctx), ctx, suberrs...)
}
// prepareBlessingsAndDischarges prepares blessings and discharges for
@@ -679,8 +682,7 @@
if !fc.blessings.IsZero() && fc.dc != nil {
impetus, err := mkDischargeImpetus(fc.server, method, args)
if err != nil {
- // TODO(toddw): Fix up the internal error.
- return verror.New(verror.ErrBadProtocol, fc.ctx, fmt.Errorf("couldn't make discharge impetus: %v", err))
+ return verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errDischargeImpetus, nil, err))
}
fc.discharges = fc.dc.PrepareDischarges(fc.ctx, fc.blessings.ThirdPartyCaveats(), impetus)
}
@@ -722,9 +724,9 @@
switch v := o.(type) {
case rpc.Granter:
if b, err := v.Grant(ctx); err != nil {
- return verror.New(errBlessingGrant, fc.ctx, fc.server, err)
+ return verror.New(errBlessingGrant, fc.ctx, err)
} else if fc.grantedBlessings, err = security.UnionOfBlessings(fc.grantedBlessings, b); err != nil {
- return verror.New(errBlessingAdd, fc.ctx, fc.server, err)
+ return verror.New(errBlessingAdd, fc.ctx, err)
}
}
}
@@ -776,21 +778,24 @@
typeenc := flow.VCDataCache().Get(vc.TypeEncoderKey{})
if typeenc == nil {
if fc.enc, err = vom.NewEncoder(flow); err != nil {
- berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errVomEncoder, fc.ctx, err))
+ // In practice, this will never fail because of a networking
+ // problem since the encoder writes the 'magic byte' which
+ // will be buffered and not written to the network immediately.
+ berr := verror.AddSubErrs(verror.New(errVomEncoder, fc.ctx), fc.ctx, verror.SubErr{Err: err})
return nil, fc.close(berr)
}
if fc.dec, err = vom.NewDecoder(flow); err != nil {
- berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errVomDecoder, fc.ctx, err))
+ berr := verror.AddSubErrs(verror.New(errVomDecoder, fc.ctx), fc.ctx, verror.SubErr{Err: err})
return nil, fc.close(berr)
}
} else {
if fc.enc, err = vom.NewEncoderWithTypeEncoder(flow, typeenc.(*vom.TypeEncoder)); err != nil {
- berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errVomEncoder, fc.ctx, err))
+ berr := verror.AddSubErrs(verror.New(errVomEncoder, fc.ctx), fc.ctx, verror.SubErr{Err: err})
return nil, fc.close(berr)
}
typedec := flow.VCDataCache().Get(vc.TypeDecoderKey{})
if fc.dec, err = vom.NewDecoderWithTypeDecoder(flow, typedec.(*vom.TypeDecoder)); err != nil {
- berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errVomDecoder, fc.ctx, err))
+ berr := verror.AddSubErrs(verror.New(errVomDecoder, fc.ctx), fc.ctx, verror.SubErr{Err: err})
return nil, fc.close(berr)
}
}
@@ -798,18 +803,13 @@
}
func (fc *flowClient) close(err error) error {
- if _, ok := err.(verror.E); err != nil && !ok {
- // TODO(cnicolaou): remove this once the second CL in this
- // series of CLs to use verror consistently is complete.
- vlog.Infof("WARNING: expected %v to be a verror", err)
- }
subErr := verror.SubErr{Err: err, Options: verror.Print}
subErr.Name = "remote=" + fc.flow.RemoteEndpoint().String()
if cerr := fc.flow.Close(); cerr != nil && err == nil {
return verror.New(verror.ErrInternal, fc.ctx, subErr)
}
- switch {
- case verror.ErrorID(err) == verror.ErrBadProtocol.ID:
+ switch verror.ErrorID(err) {
+ case verror.ErrBadProtocol.ID, errRequestEncoding.ID, errArgEncoding.ID:
switch fc.ctx.Err() {
case context.DeadlineExceeded:
timeout := verror.New(verror.ErrTimeout, fc.ctx)
@@ -820,11 +820,13 @@
err := verror.AddSubErrs(canceled, fc.ctx, subErr)
return err
}
- case verror.ErrorID(err) == verror.ErrTimeout.ID:
+ case errVomEncoder.ID, errVomDecoder.ID:
+ badProtocol := verror.New(verror.ErrBadProtocol, fc.ctx)
+ err = verror.AddSubErrs(badProtocol, fc.ctx, subErr)
+ case verror.ErrTimeout.ID:
// Canceled trumps timeout.
if fc.ctx.Err() == context.Canceled {
- // TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
- return verror.New(verror.ErrCanceled, fc.ctx, err.Error())
+ return verror.AddSubErrs(verror.New(verror.ErrCanceled, fc.ctx), fc.ctx, subErr)
}
}
return err
@@ -847,12 +849,12 @@
TraceRequest: vtrace.GetRequest(fc.ctx),
}
if err := fc.enc.Encode(req); err != nil {
- berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errRequestEncoding, fc.ctx, fmt.Sprintf("%#v", req), err))
+ berr := verror.New(errRequestEncoding, fc.ctx, fmt.Sprintf("%#v", req), err)
return fc.close(berr)
}
for ix, arg := range args {
if err := fc.enc.Encode(arg); err != nil {
- berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errArgEncoding, fc.ctx, ix, err))
+ berr := verror.New(errArgEncoding, fc.ctx, ix, err)
return fc.close(berr)
}
}
@@ -1008,9 +1010,8 @@
// Incorporate any VTrace info that was returned.
vtrace.GetStore(fc.ctx).Merge(fc.response.TraceResponse)
if fc.response.Error != nil {
- // TODO(cnicolaou): remove verror.ErrNoAccess with verror version
- // when rpc.Server is converted.
- if verror.ErrorID(fc.response.Error) == verror.ErrNoAccess.ID && fc.dc != nil {
+ id := verror.ErrorID(fc.response.Error)
+ if id == verror.ErrNoAccess.ID && fc.dc != nil {
// In case the error was caused by a bad discharge, we do not want to get stuck
// with retrying again and again with this discharge. As there is no direct way
// to detect it, we conservatively flush all discharges we used from the cache.
@@ -1018,6 +1019,9 @@
vlog.VI(3).Infof("Discarding %d discharges as RPC failed with %v", len(fc.discharges), fc.response.Error)
fc.dc.Invalidate(fc.discharges...)
}
+ if id == errBadNumInputArgs.ID || id == errBadInputArg.ID {
+ return fc.close(verror.New(verror.ErrBadProtocol, fc.ctx, fc.response.Error))
+ }
return fc.close(verror.Convert(verror.ErrInternal, fc.ctx, fc.response.Error))
}
if got, want := fc.response.NumPosResults, uint64(len(resultptrs)); got != want {
diff --git a/profiles/internal/rpc/debug_test.go b/profiles/internal/rpc/debug_test.go
index 0e8e46e..035aef8 100644
--- a/profiles/internal/rpc/debug_test.go
+++ b/profiles/internal/rpc/debug_test.go
@@ -62,13 +62,9 @@
ep := eps[0]
// Call the Foo method on ""
{
- call, err := client.StartCall(ctx, ep.Name(), "Foo", nil)
- if err != nil {
- t.Fatalf("client.StartCall failed: %v", err)
- }
var value string
- if err := call.Finish(&value); err != nil {
- t.Fatalf("call.Finish failed: %v", err)
+ if err := client.Call(ctx, ep.Name(), "Foo", nil, []interface{}{&value}); err != nil {
+ t.Fatalf("client.Call failed: %v", err)
}
if want := "BAR"; value != want {
t.Errorf("unexpected value: Got %v, want %v", value, want)
@@ -79,13 +75,9 @@
foo := stats.NewString("testing/foo")
foo.Set("The quick brown fox jumps over the lazy dog")
addr := naming.JoinAddressName(ep.String(), "__debug/stats/testing/foo")
- call, err := client.StartCall(ctx, addr, "Value", nil, options.NoResolve{})
- if err != nil {
- t.Fatalf("client.StartCall failed: %v", err)
- }
var value string
- if err := call.Finish(&value); err != nil {
- t.Fatalf("call.Finish failed: %v", err)
+ if err := client.Call(ctx, addr, "Value", nil, []interface{}{&value}, options.NoResolve{}); err != nil {
+ t.Fatalf("client.Call failed: %v", err)
}
if want := foo.Value(); value != want {
t.Errorf("unexpected result: Got %v, want %v", value, want)
diff --git a/profiles/internal/rpc/discharges.go b/profiles/internal/rpc/discharges.go
index 60ef4bc..b6c7bf9 100644
--- a/profiles/internal/rpc/discharges.go
+++ b/profiles/internal/rpc/discharges.go
@@ -132,18 +132,13 @@
go func(i int, ctx *context.T, cav security.Caveat) {
defer wg.Done()
tp := cav.ThirdPartyDetails()
+ var dis security.Discharge
vlog.VI(3).Infof("Fetching discharge for %v", tp)
- call, err := d.c.StartCall(ctx, tp.Location(), "Discharge", []interface{}{cav, impetuses[i]}, NoDischarges{})
- if err != nil {
+ if err := d.c.Call(ctx, tp.Location(), "Discharge", []interface{}{cav, impetuses[i]}, []interface{}{&dis}, NoDischarges{}); err != nil {
vlog.VI(3).Infof("Discharge fetch for %v failed: %v", tp, err)
return
}
- var d security.Discharge
- if err := call.Finish(&d); err != nil {
- vlog.VI(3).Infof("Discharge fetch for %v failed: (%v)", cav, err)
- return
- }
- discharges <- fetched{i, &d, impetuses[i]}
+ discharges <- fetched{i, &dis, impetuses[i]}
}(i, ctx, caveats[i])
}
wg.Wait()
diff --git a/profiles/internal/rpc/full_test.go b/profiles/internal/rpc/full_test.go
index b6916ce..8d1ee86 100644
--- a/profiles/internal/rpc/full_test.go
+++ b/profiles/internal/rpc/full_test.go
@@ -1136,16 +1136,9 @@
pclient.BlessingStore().Set(test.blessings, "server")
ctx, _ := v23.SetPrincipal(ctx, pclient)
- call, err := client.StartCall(ctx, test.name, test.method, test.args)
- if err != nil {
- t.Errorf(`%s client.StartCall got unexpected error: "%v"`, name, err)
- continue
- }
-
- results := makeResultPtrs(test.results)
- err = call.Finish(results...)
+ err = client.Call(ctx, test.name, test.method, test.args, makeResultPtrs(test.results))
if err != nil && test.authorized {
- t.Errorf(`%s call.Finish got error: "%v", wanted the RPC to succeed`, name, err)
+ t.Errorf(`%s client.Call got error: "%v", wanted the RPC to succeed`, name, err)
} else if err == nil && !test.authorized {
t.Errorf("%s call.Finish succeeded, expected authorization failure", name)
} else if !test.authorized && verror.ErrorID(err) != verror.ErrNoAccess.ID {
@@ -1232,13 +1225,8 @@
for i, test := range tests {
name := fmt.Sprintf("%d: Client RPCing with blessings %v", i, test.blessings)
pclient.BlessingStore().Set(test.blessings, "root")
- call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "Closure", nil)
- if err != nil {
- t.Errorf("%v: StartCall failed: %v", name, err)
- continue
- }
- if err := call.Finish(); !matchesErrorPattern(err, test.errID, test.err) {
- t.Errorf("%v: Finish returned error %v", name, err)
+ if err := b.client.Call(ctx, "mountpoint/server/suffix", "Closure", nil, nil); !matchesErrorPattern(err, test.errID, test.err) {
+ t.Errorf("%v: client.Call returned error %v", name, err)
continue
}
}
@@ -1284,14 +1272,8 @@
defer client.Close()
ctx, _ = v23.SetPrincipal(ctx, pclient)
- call, err := client.StartCall(ctx, "mountpoint/server/suffix", "EchoBlessings", nil)
- if err != nil {
- t.Fatalf("StartCall failed: %v", err)
- }
-
- type v []interface{}
var gotServer, gotClient string
- if err := call.Finish(&gotServer, &gotClient); err != nil {
+ if err := client.Call(ctx, "mountpoint/server/suffix", "EchoBlessings", nil, []interface{}{&gotServer, &gotClient}); err != nil {
t.Fatalf("Finish failed: %v", err)
}
if wantServer, wantClient := "[root/server]", "[root/client]"; gotServer != wantServer || gotClient != wantClient {
@@ -1324,12 +1306,8 @@
}
ctx, _ = v23.SetPrincipal(ctx, pclient)
call := func() error {
- call, err := b.client.StartCall(ctx, "mountpoint/server/aclAuth", "Echo", []interface{}{"batman"})
- if err != nil {
- return err
- }
var got string
- if err := call.Finish(&got); err != nil {
+ if err := b.client.Call(ctx, "mountpoint/server/aclAuth", "Echo", []interface{}{"batman"}, []interface{}{&got}); err != nil {
return err
}
if want := `method:"Echo",suffix:"aclAuth",arg:"batman"`; got != want {
@@ -1640,13 +1618,9 @@
}
// When using SecurityNone, all authorization checks should be skipped, so
// unauthorized methods should be callable.
- call, err := client.StartCall(ctx, "mp/server", "Unauthorized", nil, options.SecurityNone)
- if err != nil {
- t.Fatalf("client.StartCall failed: %v", err)
- }
var got string
- if err := call.Finish(&got); err != nil {
- t.Errorf("call.Finish failed: %v", err)
+ if err := client.Call(ctx, "mp/server", "Unauthorized", nil, []interface{}{&got}, options.SecurityNone); err != nil {
+ t.Fatalf("client.Call failed: %v", err)
}
if want := "UnauthorizedResult"; got != want {
t.Errorf("got (%v), want (%v)", got, want)
@@ -1917,10 +1891,8 @@
}
runClient := func(client rpc.Client) {
- if call, err := client.StartCall(ctx, "mountpoint/testServer", "Closure", nil); err != nil {
- t.Fatalf("failed to StartCall: %v", err)
- } else if err := call.Finish(); err != nil {
- t.Fatal(err)
+ if err := client.Call(ctx, "mountpoint/testServer", "Closure", nil, nil); err != nil {
+ t.Fatalf("failed to Call: %v", err)
}
}
diff --git a/profiles/internal/rpc/options.go b/profiles/internal/rpc/options.go
index d710939..5a61261 100644
--- a/profiles/internal/rpc/options.go
+++ b/profiles/internal/rpc/options.go
@@ -7,11 +7,11 @@
import (
"time"
- "v.io/x/ref/profiles/internal/rpc/stream"
-
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/rpc"
+
+ "v.io/x/ref/profiles/internal/rpc/stream"
)
// PreferredProtocols instructs the Runtime implementation to select
diff --git a/profiles/internal/rpc/reserved.go b/profiles/internal/rpc/reserved.go
index 7152349..c095378 100644
--- a/profiles/internal/rpc/reserved.go
+++ b/profiles/internal/rpc/reserved.go
@@ -83,14 +83,14 @@
disp = r.dispReserved
}
if disp == nil {
- return nil, rpc.NewErrUnknownSuffix(ctx, suffix)
+ return nil, verror.New(verror.ErrUnknownSuffix, ctx, suffix)
}
obj, _, err := disp.Lookup(suffix)
switch {
case err != nil:
return nil, err
case obj == nil:
- return nil, rpc.NewErrUnknownSuffix(ctx, suffix)
+ return nil, verror.New(verror.ErrUnknownSuffix, ctx, suffix)
}
invoker, err := objectToInvoker(obj)
if err != nil {
@@ -126,14 +126,14 @@
disp = r.dispReserved
}
if disp == nil {
- return signature.Method{}, rpc.NewErrUnknownMethod(ctx, "__MethodSignature")
+ return signature.Method{}, verror.New(verror.ErrUnknownMethod, ctx, rpc.ReservedMethodSignature)
}
obj, _, err := disp.Lookup(suffix)
switch {
case err != nil:
return signature.Method{}, err
case obj == nil:
- return signature.Method{}, rpc.NewErrUnknownMethod(ctx, "__MethodSignature")
+ return signature.Method{}, verror.New(verror.ErrUnknownMethod, ctx, rpc.ReservedMethodSignature)
}
invoker, err := objectToInvoker(obj)
if err != nil {
diff --git a/profiles/internal/rpc/resolve_test.go b/profiles/internal/rpc/resolve_test.go
index ef41e60..a9955d9 100644
--- a/profiles/internal/rpc/resolve_test.go
+++ b/profiles/internal/rpc/resolve_test.go
@@ -75,7 +75,7 @@
return fmt.Errorf("root failed: %v", err)
}
mp := ""
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
}
diff --git a/profiles/internal/rpc/server.go b/profiles/internal/rpc/server.go
index db689b8..76260fa 100644
--- a/profiles/internal/rpc/server.go
+++ b/profiles/internal/rpc/server.go
@@ -33,9 +33,20 @@
inaming "v.io/x/ref/profiles/internal/naming"
"v.io/x/ref/profiles/internal/rpc/stream"
"v.io/x/ref/profiles/internal/rpc/stream/vc"
+)
- // TODO(cnicolaou): finish verror2 -> verror transition, in particular
- // for communicating from server to client.
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errResponseEncoding = reg(".errResponseEncoding", "failed to encode RPC response {3} <-> {4}{:5}")
+ errResultEncoding = reg(".errResultEncoding", "failed to encode result #{3} [{4}]{:5}")
+ errFailedToResolveToEndpoint = reg(".errFailedToResolveToEndpoint", "failed to resolve {3} to an endpoint")
+ errFailedToResolveProxy = reg(".errFailedToResolveProxy", "failed to resolve proxy {3}{:4}")
+ errFailedToListenForProxy = reg(".errFailedToListenForProxy", "failed to listen on {3}{:4}")
+ errInternalTypeConversion = reg(".errInternalTypeConversion", "failed to convert {3} to v.io/x/ref/profiles/internal/naming.Endpoint")
+ errFailedToParseIP = reg(".errFailedToParseIP", "failed to parse {3} as an IP host")
)
// state for each requested listen address
@@ -302,7 +313,7 @@
return ep.String(), nil
}
}
- return "", fmt.Errorf("unable to resolve %q to an endpoint", address)
+ return "", verror.New(errFailedToResolveToEndpoint, s.ctx, address)
}
// getPossbileAddrs returns an appropriate set of addresses that could be used
@@ -314,7 +325,7 @@
ip := net.ParseIP(host)
if ip == nil {
- return nil, false, fmt.Errorf("failed to parse %q as an IP host", host)
+ return nil, false, verror.New(errFailedToParseIP, nil, host)
}
addrFromIP := func(ip net.IP) rpc.Address {
@@ -347,7 +358,7 @@
func (s *server) createEndpoints(lep naming.Endpoint, chooser rpc.AddressChooser) ([]*inaming.Endpoint, string, bool, error) {
iep, ok := lep.(*inaming.Endpoint)
if !ok {
- return nil, "", false, fmt.Errorf("internal type conversion error for %T", lep)
+ return nil, "", false, verror.New(errInternalTypeConversion, nil, fmt.Sprintf("%T", lep))
}
if !strings.HasPrefix(iep.Protocol, "tcp") &&
!strings.HasPrefix(iep.Protocol, "ws") {
@@ -479,16 +490,16 @@
func (s *server) reconnectAndPublishProxy(proxy string) (*inaming.Endpoint, stream.Listener, error) {
resolved, err := s.resolveToEndpoint(proxy)
if err != nil {
- return nil, nil, fmt.Errorf("Failed to resolve proxy %q (%v)", proxy, err)
+ return nil, nil, verror.New(errFailedToResolveProxy, s.ctx, proxy, err)
}
ln, ep, err := s.streamMgr.Listen(inaming.Network, resolved, s.principal, s.blessings, s.listenerOpts...)
if err != nil {
- return nil, nil, fmt.Errorf("failed to listen on %q: %s", resolved, err)
+ return nil, nil, verror.New(errFailedToListenForProxy, s.ctx, resolved, err)
}
iep, ok := ep.(*inaming.Endpoint)
if !ok {
ln.Close()
- return nil, nil, fmt.Errorf("internal type conversion error for %T", ep)
+ return nil, nil, verror.New(errInternalTypeConversion, s.ctx, fmt.Sprintf("%T", ep))
}
s.Lock()
s.proxies[proxy] = proxyState{iep, nil}
@@ -617,7 +628,7 @@
defer calls.Done()
fs, err := newFlowServer(flow, s)
if err != nil {
- vlog.Errorf("newFlowServer on %v failed: %v", ep, err)
+ vlog.VI(1).Infof("newFlowServer on %v failed: %v", ep, err)
return
}
if err := fs.serve(); err != nil {
@@ -626,7 +637,7 @@
// TODO(cnicolaou): revisit this when verror2 transition is
// done.
if err != io.EOF {
- vlog.VI(2).Infof("Flow serve on %v failed: %v", ep, err)
+ vlog.VI(2).Infof("Flow.serve on %v failed: %v", ep, err)
}
}
}(flow)
@@ -754,7 +765,7 @@
func (d leafDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
if suffix != "" {
- return nil, nil, rpc.NewErrUnknownSuffix(nil, suffix)
+ return nil, nil, verror.New(verror.ErrUnknownSuffix, nil, suffix)
}
return d.invoker, d.auth, nil
}
@@ -1044,7 +1055,7 @@
if err == io.EOF {
return err
}
- return fmt.Errorf("rpc: response encoding failed: %v", err)
+ return verror.New(errResponseEncoding, fs.Context(), fs.LocalEndpoint().String(), fs.RemoteEndpoint().String(), err)
}
if response.Error != nil {
return response.Error
@@ -1054,7 +1065,7 @@
if err == io.EOF {
return err
}
- return fmt.Errorf("rpc: result #%d [%T=%v] encoding failed: %v", ix, res, res, err)
+ return verror.New(errResultEncoding, fs.Context(), ix, fmt.Sprintf("%T=%v", res, res), err)
}
}
// TODO(ashankar): Should unread data from the flow be drained?
@@ -1139,11 +1150,11 @@
return nil, err
}
if called, want := req.NumPosArgs, uint64(len(argptrs)); called != want {
- return nil, verror.New(verror.ErrBadProtocol, fs.T, newErrBadNumInputArgs(fs.T, fs.suffix, fs.method, called, want))
+ return nil, newErrBadNumInputArgs(fs.T, fs.suffix, fs.method, called, want)
}
for ix, argptr := range argptrs {
if err := fs.dec.Decode(argptr); err != nil {
- return nil, verror.New(verror.ErrBadProtocol, fs.T, newErrBadInputArg(fs.T, fs.suffix, fs.method, uint64(ix), err))
+ return nil, newErrBadInputArg(fs.T, fs.suffix, fs.method, uint64(ix), err)
}
}
@@ -1199,7 +1210,7 @@
return invoker, auth, nil
}
}
- return nil, nil, rpc.NewErrUnknownSuffix(nil, suffix)
+ return nil, nil, verror.New(verror.ErrUnknownSuffix, fs.T, suffix)
}
func objectToInvoker(obj interface{}) (rpc.Invoker, error) {
diff --git a/profiles/internal/rpc/server_authorizer.go b/profiles/internal/rpc/server_authorizer.go
index 3166d1b..2922824 100644
--- a/profiles/internal/rpc/server_authorizer.go
+++ b/profiles/internal/rpc/server_authorizer.go
@@ -19,18 +19,18 @@
const enableSecureServerAuth = false
var (
- errNoBlessings = verror.Register(pkgPath+".noBlessings", verror.NoRetry, "server has not presented any blessings")
-
- errAuthPossibleManInTheMiddle = verror.Register(pkgPath+".authPossibleManInTheMiddle",
- verror.NoRetry, "server blessings {3} do not match expectations set by endpoint {4}, possible man-in-the-middle or the server blessings are not accepted by the client? (endpoint: {5}, rejected blessings: {6})")
-
- errAuthServerNotAllowed = verror.Register(pkgPath+".authServerNotAllowed",
- verror.NoRetry, "server blessings {3} do not match any allowed server patterns {4}{:5}")
-
- errAuthServerKeyNotAllowed = verror.Register(pkgPath+".authServerKeyNotAllowed",
- verror.NoRetry, "remote public key {3} not matched by server key {4}")
-
- errMultiplePublicKeys = verror.Register(pkgPath+".multiplePublicKeyOptions", verror.NoRetry, "multiple ServerPublicKey options supplied to call, at most one is allowed")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errNoBlessingsFromServer = reg(".errNoBlessingsFromServer", "server has not presented any blessings")
+ errAuthPossibleManInTheMiddle = reg(".errAuthPossibleManInTheMiddle",
+ "server blessings {3} do not match expectations set by endpoint {4}, possible man-in-the-middle or the server blessings are not accepted by the client? (endpoint: {5}, rejected blessings: {6})")
+ errAuthServerNotAllowed = reg(".errAuthServerNotAllowed",
+ "server blessings {3} do not match any allowed server patterns {4}{:5}")
+ errAuthServerKeyNotAllowed = reg(".errAuthServerKeyNotAllowed",
+ "remote public key {3} not matched by server key {4}")
+ errMultiplePublicKeys = reg(".errMultiplePublicKeyOptions", "multiple ServerPublicKey options supplied to call, at most one is allowed")
)
// serverAuthorizer implements security.Authorizer.
@@ -70,7 +70,7 @@
func (a *serverAuthorizer) Authorize(ctx *context.T) error {
call := security.GetCall(ctx)
if call.RemoteBlessings().IsZero() {
- return verror.New(errNoBlessings, ctx)
+ return verror.New(errNoBlessingsFromServer, ctx)
}
serverBlessings, rejectedBlessings := security.RemoteBlessingNames(ctx)
diff --git a/profiles/internal/rpc/server_test.go b/profiles/internal/rpc/server_test.go
index 3074ed5..d2c72d7 100644
--- a/profiles/internal/rpc/server_test.go
+++ b/profiles/internal/rpc/server_test.go
@@ -78,15 +78,11 @@
}
ctx, _ = v23.SetPrincipal(ctx, pclient)
ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
- call, err := client.StartCall(ctx, "servername", "SomeMethod", nil)
- if err != nil {
- t.Fatalf("StartCall failed: %v", err)
- }
var result string
- if err := call.Finish(&result); err == nil {
+ if err := client.Call(ctx, "servername", "SomeMethod", nil, []interface{}{&result}); err == nil {
// TODO(caprita): Check the error type rather than
// merely ensuring the test doesn't panic.
- t.Fatalf("should have failed")
+ t.Fatalf("Call should have failed")
}
}
diff --git a/profiles/internal/rpc/sort_endpoints.go b/profiles/internal/rpc/sort_endpoints.go
index ef1549d..8ea0bd9 100644
--- a/profiles/internal/rpc/sort_endpoints.go
+++ b/profiles/internal/rpc/sort_endpoints.go
@@ -9,37 +9,26 @@
"net"
"sort"
- "v.io/v23/naming"
"v.io/x/lib/vlog"
+ "v.io/v23/naming"
+ "v.io/v23/verror"
+
"v.io/x/lib/netstate"
inaming "v.io/x/ref/profiles/internal/naming"
"v.io/x/ref/profiles/internal/rpc/version"
)
-type errorAccumulator struct {
- errs []error
-}
-
-func (e *errorAccumulator) add(err error) {
- e.errs = append(e.errs, err)
-}
-
-func (e *errorAccumulator) failed() bool {
- return len(e.errs) > 0
-}
-
-func (e *errorAccumulator) String() string {
- r := ""
- for _, err := range e.errs {
- r += fmt.Sprintf("(%s)", err)
- }
- return r
-}
-
-func newErrorAccumulator() *errorAccumulator {
- return &errorAccumulator{errs: make([]error, 0, 4)}
-}
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errMalformedEndpoint = reg(".errMalformedEndpoint", "malformed endpoint{:3}")
+ errUndesiredProtocol = reg(".errUndesiredProtocol", "undesired protocol{:3}")
+ errIncompatibleEndpointVersions = reg(".errIncompatibleEndpointVersions", "incompatible endpoint versions{:3}")
+ errNoCompatibleServers = reg(".errNoComaptibleServers", "failed to find any compatible servers{:3}")
+)
type serverLocality int
@@ -99,27 +88,31 @@
func filterAndOrderServers(servers []naming.MountedServer, protocols []string, ipnets []*net.IPNet) ([]naming.MountedServer, error) {
vlog.VI(3).Infof("filterAndOrderServers%v: %v", protocols, servers)
var (
- errs = newErrorAccumulator()
+ errs = verror.SubErrs{}
list = make(sortableServerList, 0, len(servers))
protoRanks = mkProtocolRankMap(protocols)
)
if len(protoRanks) == 0 {
protoRanks = defaultPreferredProtocolOrder
}
+ adderr := func(name string, err error) {
+ errs = append(errs, verror.SubErr{Name: "server=" + name, Err: err, Options: verror.Print})
+ }
for _, server := range servers {
name := server.Server
ep, err := name2endpoint(name)
if err != nil {
- errs.add(fmt.Errorf("malformed endpoint %q: %v", name, err))
+ adderr(name, verror.New(errMalformedEndpoint, nil, err))
continue
}
if err = version.CheckCompatibility(ep); err != nil {
- errs.add(fmt.Errorf("%q: %v", name, err))
+ // TODO(cnicolaou): convert rpc/version to verror.
+ adderr(name, verror.New(errIncompatibleEndpointVersions, nil, err))
continue
}
rank, err := protocol2rank(ep.Addr().Network(), protoRanks)
if err != nil {
- errs.add(fmt.Errorf("%q: %v", name, err))
+ adderr(name, err)
continue
}
list = append(list, sortableServer{
@@ -129,7 +122,7 @@
})
}
if len(list) == 0 {
- return nil, fmt.Errorf("failed to find any compatible servers: %v", errs)
+ return nil, verror.AddSubErrs(verror.New(errNoCompatibleServers, nil), nil, errs...)
}
// TODO(ashankar): Don't have to use stable sorting, could
// just use sort.Sort. The only problem with that is the
@@ -178,7 +171,7 @@
if protocol == naming.UnknownProtocol {
return -1, nil
}
- return 0, fmt.Errorf("undesired protocol %q", protocol)
+ return 0, verror.New(errUndesiredProtocol, nil, protocol)
}
// locality returns the serverLocality to use given an endpoint and the
diff --git a/profiles/internal/rpc/sort_internal_test.go b/profiles/internal/rpc/sort_internal_test.go
index 67ad39d..a4c7b4d 100644
--- a/profiles/internal/rpc/sort_internal_test.go
+++ b/profiles/internal/rpc/sort_internal_test.go
@@ -23,7 +23,7 @@
servers := []naming.MountedServer{}
_, err := filterAndOrderServers(servers, []string{"tcp"}, nil)
- if err == nil || err.Error() != "failed to find any compatible servers: " {
+ if err == nil || err.Error() != "failed to find any compatible servers" {
t.Errorf("expected a different error: %v", err)
}
@@ -44,10 +44,9 @@
}
_, err = filterAndOrderServers(servers, []string{"foobar"}, nil)
- if err == nil || !strings.HasSuffix(err.Error(), "undesired protocol \"tcp\")") {
+ if err == nil || !strings.HasSuffix(err.Error(), "undesired protocol: tcp]") {
t.Errorf("expected a different error to: %v", err)
}
-
}
func TestOrderingByProtocol(t *testing.T) {
diff --git a/profiles/internal/rpc/stream/crypto/box.go b/profiles/internal/rpc/stream/crypto/box.go
index 95e27a1..0dfba87 100644
--- a/profiles/internal/rpc/stream/crypto/box.go
+++ b/profiles/internal/rpc/stream/crypto/box.go
@@ -14,7 +14,26 @@
"golang.org/x/crypto/nacl/box"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/iobuf"
+ "v.io/x/ref/profiles/internal/rpc/stream"
+)
+
+const pkgPath = "v.io/x/ref/profiles/internal/rpc/stream/crypto"
+
+func reg(id, msg string) verror.IDAction {
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errCipherTextTooShort = reg(".errCipherTextTooShort", "ciphertext too short")
+ errMessageAuthFailed = reg(".errMessageAuthFailed", "message authentication failed")
+ errUnrecognizedCipherText = reg(".errUnrecognizedCipherText", "CipherSuite {3} is not recognized. Must use one that uses Diffie-Hellman as the key exchange algorithm")
)
type boxcrypter struct {
@@ -80,13 +99,13 @@
c.readNonce += 2
retLen := len(src.Contents) - box.Overhead
if retLen < 0 {
- return nil, fmt.Errorf("ciphertext too short")
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errCipherTextTooShort, nil))
}
ret := c.alloc.Alloc(uint(retLen))
var ok bool
ret.Contents, ok = box.OpenAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
if !ok {
- return nil, fmt.Errorf("message authentication failed")
+ return nil, verror.New(stream.ErrSecurity, nil, verror.New(errMessageAuthFailed, nil))
}
return ret, nil
}
diff --git a/profiles/internal/rpc/stream/crypto/box_cipher.go b/profiles/internal/rpc/stream/crypto/box_cipher.go
index ce85fe4..5dc0cb8 100644
--- a/profiles/internal/rpc/stream/crypto/box_cipher.go
+++ b/profiles/internal/rpc/stream/crypto/box_cipher.go
@@ -6,10 +6,13 @@
import (
"encoding/binary"
- "errors"
"golang.org/x/crypto/nacl/box"
"golang.org/x/crypto/salsa20/salsa"
+
+ "v.io/v23/verror"
+
+ "v.io/x/ref/profiles/internal/rpc/stream"
)
// cbox implements a ControlCipher using go.crypto/nacl/box.
@@ -32,7 +35,11 @@
)
var (
- errMessageTooShort = errors.New("control cipher: message is too short")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errMessageTooShort = reg(".errMessageTooShort", "control cipher: message is too short")
)
func (s *cboxStream) alloc(n int) []byte {
@@ -92,7 +99,7 @@
func (c *cbox) Seal(data []byte) error {
n := len(data)
if n < cboxMACSize {
- return errMessageTooShort
+ return verror.New(stream.ErrNetwork, nil, verror.New(errMessageTooShort, nil))
}
tmp := c.enc.alloc(n)
nonce := c.enc.currentNonce()
diff --git a/profiles/internal/rpc/stream/crypto/tls.go b/profiles/internal/rpc/stream/crypto/tls.go
index 2f68b6b..8b371ab 100644
--- a/profiles/internal/rpc/stream/crypto/tls.go
+++ b/profiles/internal/rpc/stream/crypto/tls.go
@@ -9,17 +9,26 @@
import (
"bytes"
"crypto/tls"
- "errors"
"fmt"
"io"
"net"
"sync"
"time"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/iobuf"
+ "v.io/x/ref/profiles/internal/rpc/stream"
)
-var errDeadlinesNotSupported = errors.New("deadlines not supported")
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errDeadlinesNotSupported = reg(".errDeadlinesNotSupported", "deadlines not supported")
+ errEndOfEncryptedSlice = reg(".errEndOfEncryptedSlice", "end of encrypted slice")
+)
// TLSClientSessionCacheOpt specifies the ClientSessionCache used to resume TLS sessions.
// It adapts tls.ClientSessionCache to the v.io/v23/x/ref/profiles/internal/rpc/stream.VCOpt interface.
@@ -63,7 +72,7 @@
return c.handshakeConn.Read(b)
}
if len(c.in) == 0 {
- return 0, tempError{}
+ return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errEndOfEncryptedSlice, nil)), false, true)
}
n = copy(b, c.in)
c.in = c.in[n:]
@@ -77,26 +86,23 @@
return c.out.Write(b)
}
-func (*fakeConn) Close() error { return nil }
-func (c *fakeConn) LocalAddr() net.Addr { return c.laddr }
-func (c *fakeConn) RemoteAddr() net.Addr { return c.raddr }
-func (*fakeConn) SetDeadline(t time.Time) error { return errDeadlinesNotSupported }
-func (*fakeConn) SetReadDeadline(t time.Time) error { return errDeadlinesNotSupported }
-func (*fakeConn) SetWriteDeadline(t time.Time) error { return errDeadlinesNotSupported }
-
-// tempError implements net.Error and returns true for Temporary.
-// Providing this error in fakeConn.Read allows tls.Conn.Read to return with an
-// error without changing underlying state.
-type tempError struct{}
-
-func (tempError) Error() string { return "end of encrypted slice" }
-func (tempError) Timeout() bool { return false }
-func (tempError) Temporary() bool { return true }
+func (*fakeConn) Close() error { return nil }
+func (c *fakeConn) LocalAddr() net.Addr { return c.laddr }
+func (c *fakeConn) RemoteAddr() net.Addr { return c.raddr }
+func (*fakeConn) SetDeadline(t time.Time) error {
+ return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+func (*fakeConn) SetReadDeadline(t time.Time) error {
+ return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+func (*fakeConn) SetWriteDeadline(t time.Time) error {
+ return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
// tlsCrypter implements the Crypter interface using crypto/tls.
//
// crypto/tls provides a net.Conn, while the Crypter interface operates on
-// iobuf.Slice objects. In order to adapt to the Crypter interface, the
+// iobuf.Slice objects. In order to adapt to the Crypter in stream.ErrNetwork, verrorterface, the
// strategy is as follows:
//
// - netTLSCrypter wraps a net.Conn with an alternative implementation
@@ -146,7 +152,7 @@
case tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
default:
t.Close()
- return nil, fmt.Errorf("CipherSuite 0x%04x is not recognized. Must use one that uses Diffie-Hellman as the key exchange algorithm", cs)
+ return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnrecognizedCipherText, nil, fmt.Sprintf("0x%04x", cs)))
}
fc.handshakeConn = nil
return &tlsCrypter{
@@ -183,7 +189,7 @@
for {
n, err := c.tls.Read(out)
if err != nil {
- if _, exit := err.(tempError); exit {
+ if _, exit := err.(*stream.NetError); exit {
break
}
plaintext.Release()
diff --git a/profiles/internal/rpc/stream/error_test.go b/profiles/internal/rpc/stream/error_test.go
new file mode 100644
index 0000000..d576d38
--- /dev/null
+++ b/profiles/internal/rpc/stream/error_test.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream_test
+
+import (
+ "net"
+ "testing"
+
+ "v.io/v23/verror"
+
+ "v.io/x/ref/profiles/internal/rpc/stream"
+)
+
+func TestTimeoutError(t *testing.T) {
+ e := verror.Register(".test", verror.NoRetry, "hello{:3}")
+ timeoutErr := stream.NewNetError(verror.New(e, nil, "world"), true, false)
+
+ // TimeoutError implements error & net.Error. We test that it
+ // implements error by assigning timeoutErr to err which is of type error.
+ var err error
+ err = timeoutErr
+
+ neterr, ok := err.(net.Error)
+ if !ok {
+ t.Fatalf("%T not a net.Error", err)
+ }
+
+ if got, want := neterr.Timeout(), true; got != want {
+ t.Fatalf("got %v, want %v", got, want)
+ }
+ if got, want := neterr.Error(), "hello: world"; got != want {
+ t.Fatalf("got %v, want %v", got, want)
+ }
+}
diff --git a/profiles/internal/rpc/stream/errors.go b/profiles/internal/rpc/stream/errors.go
index f7d7515..82079e6 100644
--- a/profiles/internal/rpc/stream/errors.go
+++ b/profiles/internal/rpc/stream/errors.go
@@ -5,32 +5,42 @@
package stream
import (
+ "net"
+
"v.io/v23/verror"
)
const pkgPath = "v.io/x/ref/profiles/internal/rpc/stream"
-// The stream family of packages guarantee to return one of the verror codes defined here, their
-// messages are constructed so as to avoid embedding a component/method name and are thus
-// more suitable for inclusion in other verrors.
+// The stream family of packages guarantee to return one of the verror codes defined
+// here, their messages are constructed so as to avoid embedding a component/method name
+// and are thus more suitable for inclusion in other verrors.
+// This practiced of omitting {1}{2} is used throughout the stream packages since all
+// of their errors are intended to be used as arguments to higher level errors.
var (
- ErrSecurity = verror.Register(pkgPath+".errSecurity", verror.NoRetry, "{:3}")
- ErrNetwork = verror.Register(pkgPath+".errNetwork", verror.NoRetry, "{:3}")
- ErrProxy = verror.Register(pkgPath+".errProxy", verror.NoRetry, "{:3}")
- ErrBadArg = verror.Register(pkgPath+".errBadArg", verror.NoRetry, "{:3}")
- ErrBadState = verror.Register(pkgPath+".errBadState", verror.NoRetry, "{:3}")
- // TODO(cnicolaou): remove this when the rest of the stream sub packages are converted.
- ErrSecOrNet = verror.Register(pkgPath+".errSecOrNet", verror.NoRetry, "{:3}")
- // Update IsStreamError below if you add any other errors here.
+ // TODO(cnicolaou): rename ErrSecurity to ErrAuth
+ ErrSecurity = verror.Register(pkgPath+".errSecurity", verror.NoRetry, "{:3}")
+ ErrNotTrusted = verror.Register(pkgPath+".errNotTrusted", verror.NoRetry, "{:3}")
+ ErrNetwork = verror.Register(pkgPath+".errNetwork", verror.NoRetry, "{:3}")
+ ErrProxy = verror.Register(pkgPath+".errProxy", verror.NoRetry, "{:3}")
+ ErrBadArg = verror.Register(pkgPath+".errBadArg", verror.NoRetry, "{:3}")
+ ErrBadState = verror.Register(pkgPath+".errBadState", verror.NoRetry, "{:3}")
+ ErrAborted = verror.Register(pkgPath+".errAborted", verror.NoRetry, "{:3}")
)
-// IsStreamError returns true if the err is one of the verror codes defined by this package.
-func IsStreamError(err error) bool {
- id := verror.ErrorID(err)
- switch id {
- case ErrSecurity.ID, ErrNetwork.ID, ErrProxy.ID, ErrBadArg.ID, ErrBadState.ID, ErrSecOrNet.ID:
- return true
- default:
- return false
- }
+// NetError implements net.Error
+type NetError struct {
+ err error
+ timeout, temp bool
}
+
+// NewNetError returns a new net.Error which will return the
+// supplied error, timeout and temporary parameters when the corresponding
+// methods are invoked.
+func NewNetError(err error, timeout, temporary bool) net.Error {
+ return &NetError{err, timeout, temporary}
+}
+
+func (t NetError) Error() string { return t.err.Error() }
+func (t NetError) Timeout() bool { return t.timeout }
+func (t NetError) Temporary() bool { return t.temp }
diff --git a/profiles/internal/rpc/stream/manager/error_test.go b/profiles/internal/rpc/stream/manager/error_test.go
index b2c2f9d..9c33348 100644
--- a/profiles/internal/rpc/stream/manager/error_test.go
+++ b/profiles/internal/rpc/stream/manager/error_test.go
@@ -51,7 +51,7 @@
// bad address
_, _, err = server.Listen("tcp", "xx.0.0.1:0", pserver, pserver.BlessingStore().Default())
- if verror.ErrorID(err) != stream.ErrBadArg.ID {
+ if verror.ErrorID(err) != stream.ErrNetwork.ID {
t.Fatalf("wrong error: %s", err)
}
t.Log(err)
diff --git a/profiles/internal/rpc/stream/manager/listener.go b/profiles/internal/rpc/stream/manager/listener.go
index 16ace2f..1122689 100644
--- a/profiles/internal/rpc/stream/manager/listener.go
+++ b/profiles/internal/rpc/stream/manager/listener.go
@@ -9,6 +9,7 @@
"net"
"strings"
"sync"
+ "time"
"v.io/x/ref/profiles/internal/lib/upcqueue"
inaming "v.io/x/ref/profiles/internal/naming"
@@ -29,17 +30,21 @@
}
var (
- errVomEncoder = reg(".vomEncoder", "failed to create vom encoder{:3}")
- errVomDecoder = reg(".vomDecoder", "failed to create vom decoder{:3}")
- errVomEncodeRequest = reg(".vomEncodeRequest", "failed to encode request to proxy{:3}")
- errVomDecodeResponse = reg(".vomDecodeRequest", "failed to decoded response from proxy{:3}")
- errProxyError = reg(".proxyError", "proxy error {:3}")
- errProxyEndpointError = reg(".proxyEndpointError", "proxy returned an invalid endpoint {:3}{:4}")
- errAlreadyConnected = reg(".alreadyConnected", "already connected to proxy and accepting connections? VIF: {3}, StartAccepting{:_}")
- errFailedToCreateLivenessFlow = reg(".failedToCreateLivenessFlow", "unable to create liveness check flow to proxy{:3}")
- errAcceptFailed = reg(".acceptFailed", "accept failed{:3}")
- errFailedToEstablishVC = reg(".failedToEstablishVC", "VC establishment with proxy failed{:_}")
- errListenerAlreadyClosed = reg(".listenerAlreadyClosed", "listener already closed")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errVomEncoder = reg(".errVomEncoder", "failed to create vom encoder{:3}")
+ errVomDecoder = reg(".errVomDecoder", "failed to create vom decoder{:3}")
+ errVomEncodeRequest = reg(".errVomEncodeRequest", "failed to encode request to proxy{:3}")
+ errVomDecodeResponse = reg(".errVomDecodeRequest", "failed to decoded response from proxy{:3}")
+ errProxyError = reg(".errProxyError", "proxy error {:3}")
+ errProxyEndpointError = reg(".errProxyEndpointError", "proxy returned an invalid endpoint {:3}{:4}")
+ errAlreadyConnected = reg(".errAlreadyConnected", "already connected to proxy and accepting connections? VIF: {3}, StartAccepting{:_}")
+ errFailedToCreateLivenessFlow = reg(".errFailedToCreateLivenessFlow", "unable to create liveness check flow to proxy{:3}")
+ errAcceptFailed = reg(".errAcceptFailed", "accept failed{:3}")
+ errFailedToEstablishVC = reg(".errFailedToEstablishVC", "VC establishment with proxy failed{:_}")
+ errListenerAlreadyClosed = reg(".errListenerAlreadyClosed", "listener already closed")
)
// listener extends stream.Listener with a DebugString method.
@@ -94,11 +99,34 @@
return ln
}
+func isTemporaryError(err error) bool {
+ if oErr, ok := err.(*net.OpError); ok && oErr.Temporary() {
+ return true
+ }
+ return false
+}
+
func (ln *netListener) netAcceptLoop(principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) {
defer ln.netLoop.Done()
+ opts = append([]stream.ListenerOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
for {
conn, err := ln.netLn.Accept()
+ if isTemporaryError(err) {
+ // TODO(rthellend): Aggressively close other connections?
+ vlog.Errorf("net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+ for isTemporaryError(err) {
+ time.Sleep(10 * time.Millisecond)
+ conn, err = ln.netLn.Accept()
+ }
+ }
if err != nil {
+ // TODO(cnicolaou): closeListener in manager.go writes to ln (by calling
+ // ln.Close()) and we read it here in the Infof output, so there is
+ // an unguarded read here that will fail under --race. This will only show
+ // itself if the Infof below is changed to always be printed (which is
+ // how I noticed). The right solution is to lock these datastructures, but
+ // that can wait until a bigger overhaul occurs. For now, we leave this at
+ // VI(1) knowing that it's basically harmless.
vlog.VI(1).Infof("Exiting netAcceptLoop: net.Listener.Accept() failed on %v with %v", ln.netLn, err)
return
}
@@ -212,10 +240,9 @@
vf.StopAccepting()
if verror.ErrorID(err) == verror.ErrAborted.ID {
ln.manager.vifs.Delete(vf)
+ return nil, nil, verror.New(stream.ErrAborted, nil, err)
}
- // TODO(cnicolaou): use one of ErrSecurity or ErrProtocol when the vif package
- // is converted.
- return nil, nil, verror.New(stream.ErrSecOrNet, nil, verror.New(errFailedToEstablishVC, nil, err))
+ return nil, nil, err
}
flow, err := vc.Connect()
if err != nil {
diff --git a/profiles/internal/rpc/stream/manager/manager.go b/profiles/internal/rpc/stream/manager/manager.go
index 756a479..5ee4fd0 100644
--- a/profiles/internal/rpc/stream/manager/manager.go
+++ b/profiles/internal/rpc/stream/manager/manager.go
@@ -30,15 +30,22 @@
const pkgPath = "v.io/x/ref/profiles/internal/rpc/stream/manager"
var (
- errUnknownNetwork = reg(".unknownNetwork", "unknown network{:3}")
- errEndpointParseError = reg(".endpointParseError", "failed to parse endpoint {3}{:4}")
- errAlreadyShutdown = reg(".alreadyShutdown", "already shutdown")
- errProvidedServerBlessingsWithoutPrincipal = reg(".serverBlessingsWithoutPrincipal", "blessings provided but with no principal")
- errNoBlessingNames = reg(".noBlessingNames", "no blessing names could be extracted for the provided principal")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errUnknownNetwork = reg(".errUnknownNetwork", "unknown network{:3}")
+ errEndpointParseError = reg(".errEndpointParseError", "failed to parse endpoint {3}{:4}")
+ errAlreadyShutdown = reg(".errAlreadyShutdown", "already shutdown")
+ errProvidedServerBlessingsWithoutPrincipal = reg(".errServerBlessingsWithoutPrincipal", "blessings provided but with no principal")
+ errNoBlessingNames = reg(".errNoBlessingNames", "no blessing names could be extracted for the provided principal")
)
const (
- defaultIdleTimeout = 30 * time.Minute
+ // The default time after which an VIF is closed if no VC is opened.
+ defaultStartTimeout = 3 * time.Second
+ // The default time after which an idle VC is closed.
+ defaultIdleTimeout = 30 * time.Second
)
// InternalNew creates a new stream.Manager for managing streams where the local
@@ -80,7 +87,11 @@
func dial(network, address string, timeout time.Duration) (net.Conn, error) {
if d, _, _ := rpc.RegisteredProtocol(network); d != nil {
- return d(network, address, timeout)
+ conn, err := d(network, address, timeout)
+ if err != nil {
+ return nil, verror.New(stream.ErrNetwork, nil, err)
+ }
+ return conn, nil
}
return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnknownNetwork, nil, network))
}
@@ -105,9 +116,6 @@
vlog.VI(1).Infof("(%q, %q) not in VIF cache. Dialing", network, address)
conn, err := dial(network, address, timeout)
if err != nil {
- if !stream.IsStreamError(err) {
- err = verror.New(stream.ErrNetwork, nil, err)
- }
return nil, err
}
// (network, address) in the endpoint might not always match up
@@ -129,10 +137,11 @@
vRange = r
}
}
+ opts = append([]stream.VCOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
vf, err := vif.InternalNewDialedVIF(conn, m.rid, principal, vRange, m.deleteVIF, opts...)
if err != nil {
conn.Close()
- return nil, verror.New(stream.ErrNetwork, nil, err)
+ return nil, err
}
// TODO(ashankar): If two goroutines are simultaneously invoking
// manager.Dial, it is possible that two VIFs are inserted into m.vifs
@@ -156,7 +165,7 @@
}
opts = append([]stream.VCOpt{m.sessionCache, vc.IdleTimeout{defaultIdleTimeout}}, opts...)
vc, err := vf.Dial(remote, principal, opts...)
- if !retry || verror.ErrorID(err) != verror.ErrAborted.ID {
+ if !retry || verror.ErrorID(err) != stream.ErrAborted.ID {
return vc, err
}
vf.Close()
@@ -166,7 +175,11 @@
func listen(protocol, address string) (net.Listener, error) {
if _, l, _ := rpc.RegisteredProtocol(protocol); l != nil {
- return l(protocol, address)
+ ln, err := l(protocol, address)
+ if err != nil {
+ return nil, verror.New(stream.ErrNetwork, nil, err)
+ }
+ return ln, nil
}
return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnknownNetwork, nil, protocol))
}
@@ -202,10 +215,6 @@
}
netln, err := listen(protocol, address)
if err != nil {
- if !stream.IsStreamError(err) {
- vlog.Infof("XXXX %v : %s\n", verror.ErrorID(err), err)
- err = verror.New(stream.ErrBadArg, nil, err)
- }
return nil, nil, err
}
diff --git a/profiles/internal/rpc/stream/manager/manager_test.go b/profiles/internal/rpc/stream/manager/manager_test.go
index b26d57e..0581802 100644
--- a/profiles/internal/rpc/stream/manager/manager_test.go
+++ b/profiles/internal/rpc/stream/manager/manager_test.go
@@ -383,6 +383,59 @@
}
}
+func TestStartTimeout(t *testing.T) {
+ const (
+ startTime = 5 * time.Millisecond
+ // We use a long wait time here since it takes some time for the underlying
+ // VIF of the other side to be closed especially in race testing.
+ waitTime = 250 * time.Millisecond
+ )
+
+ var (
+ server = InternalNew(naming.FixedRoutingID(0x55555555))
+ pserver = testutil.NewPrincipal("server")
+ lopts = []stream.ListenerOpt{vc.StartTimeout{startTime}}
+ )
+
+ // Pause the start timers.
+ triggerTimers := vif.SetFakeTimers()
+
+ ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ go func() {
+ for {
+ _, err := ln.Accept()
+ if err != nil {
+ return
+ }
+ }
+ }()
+
+ _, err = net.Dial(ep.Addr().Network(), ep.Addr().String())
+ if err != nil {
+ t.Fatalf("net.Dial failed: %v", err)
+ }
+
+ // Trigger the start timers.
+ triggerTimers()
+
+ // No VC is opened. The VIF should be closed after start timeout.
+ timeout := time.After(waitTime)
+ for done := false; !done; {
+ select {
+ case <-time.After(startTime * 2):
+ done = numVIFs(server) == 0
+ case <-timeout:
+ done = true
+ }
+ }
+ if n := numVIFs(server); n != 0 {
+ t.Errorf("Server has %d VIFs; want 0\n%v", n, debugString(server))
+ }
+}
+
func testIdleTimeout(t *testing.T, testServer bool) {
const (
idleTime = 10 * time.Millisecond
diff --git a/profiles/internal/rpc/stream/message/coding.go b/profiles/internal/rpc/stream/message/coding.go
index 4186a0a..619ebd3 100644
--- a/profiles/internal/rpc/stream/message/coding.go
+++ b/profiles/internal/rpc/stream/message/coding.go
@@ -6,18 +6,31 @@
import (
"encoding/binary"
- "errors"
- "fmt"
"io"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/rpc/stream/id"
)
-var errLargerThan3ByteUint = errors.New("integer too large to represent in 3 bytes")
+const pkgPath = "v.io/x/ref/profiles/internal/rpc/stream/message"
+
+func reg(id, msg string) verror.IDAction {
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errLargerThan3ByteUint = reg(".errLargerThan3ByteUnit", "integer too large to represent in 3 bytes")
+ errReadWrongNumBytes = reg(".errReadWrongNumBytes", "read {3} bytes, wanted to read {4}")
+)
func write3ByteUint(dst []byte, n int) error {
if n >= (1<<24) || n < 0 {
- return errLargerThan3ByteUint
+ return verror.New(errLargerThan3ByteUint, nil)
}
dst[0] = byte((n & 0xff0000) >> 16)
dst[1] = byte((n & 0x00ff00) >> 8)
@@ -59,7 +72,7 @@
return err
}
if n != int(size) {
- return io.ErrUnexpectedEOF
+ return verror.New(errReadWrongNumBytes, nil, n, int(size))
}
*s = string(bytes)
return nil
@@ -75,12 +88,13 @@
return err
}
if n != int(size) {
- return io.ErrUnexpectedEOF
+ return verror.New(errReadWrongNumBytes, nil, n, int(size))
}
return nil
}
-// byteReader adapts an io.Reader to an io.ByteReader
+// byteReader adapts an io.Reader to an io.ByteReader so that we can
+// use it with encoding/Binary for varint etc.
type byteReader struct{ io.Reader }
func (b byteReader) ReadByte() (byte, error) {
@@ -92,7 +106,7 @@
case err != nil:
return 0, err
default:
- return 0, fmt.Errorf("read %d bytes, wanted to read 1", n)
+ return 0, verror.New(errReadWrongNumBytes, nil, n, 1)
}
}
diff --git a/profiles/internal/rpc/stream/message/control.go b/profiles/internal/rpc/stream/message/control.go
index 75afa34..93bfb31 100644
--- a/profiles/internal/rpc/stream/message/control.go
+++ b/profiles/internal/rpc/stream/message/control.go
@@ -10,11 +10,26 @@
"io"
"v.io/v23/naming"
+ "v.io/v23/verror"
+
inaming "v.io/x/ref/profiles/internal/naming"
"v.io/x/ref/profiles/internal/rpc/stream/id"
"v.io/x/ref/profiles/internal/rpc/version"
)
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errUnrecognizedVCControlMessageCommand = reg(".errUnrecognizedVCControlMessageCommand",
+ "unrecognized VC control message command({3})")
+ errUnrecognizedVCControlMessageType = reg(".errUnrecognizedVCControlMessageType",
+ "unrecognized VC control message type({3})")
+ errFailedToDeserializedVCControlMessage = reg(".errFailedToDeserializedVCControlMessage", "failed to deserialize control message {3}({4}): {5}")
+ errFailedToWriteHeader = reg(".errFailedToWriteHeader", "failed to write header. Wrote {3} bytes instead of {4}{:5}")
+)
+
// Control is the interface implemented by all control messages.
type Control interface {
readFrom(r *bytes.Buffer) error
@@ -145,12 +160,12 @@
case *SetupVC:
command = setupVCCommand
default:
- return fmt.Errorf("unrecognized VC control message: %T", m)
+ return verror.New(errUnrecognizedVCControlMessageType, nil, fmt.Sprintf("%T", m))
}
var header [1]byte
header[0] = byte(command)
if n, err := w.Write(header[:]); n != len(header) || err != nil {
- return fmt.Errorf("failed to write header. Got (%d, %v) want (%d, nil)", n, err, len(header))
+ return verror.New(errFailedToWriteHeader, nil, n, len(header), err)
}
if err := m.writeTo(w); err != nil {
return err
@@ -162,7 +177,7 @@
var header byte
var err error
if header, err = r.ReadByte(); err != nil {
- return nil, fmt.Errorf("message too small, cannot read control message command (0, %v)", err)
+ return nil, err
}
command := command(header)
var m Control
@@ -182,10 +197,10 @@
case setupVCCommand:
m = new(SetupVC)
default:
- return nil, fmt.Errorf("unrecognized VC control message command(%d)", command)
+ return nil, verror.New(errUnrecognizedVCControlMessageCommand, nil, command)
}
if err := m.readFrom(r); err != nil {
- return nil, fmt.Errorf("failed to deserialize control message %d(%T): %v", command, m, err)
+ return nil, verror.New(errFailedToDeserializedVCControlMessage, nil, command, fmt.Sprintf("%T", m), err)
}
return m, nil
}
diff --git a/profiles/internal/rpc/stream/message/message.go b/profiles/internal/rpc/stream/message/message.go
index b8ef71f..c729949 100644
--- a/profiles/internal/rpc/stream/message/message.go
+++ b/profiles/internal/rpc/stream/message/message.go
@@ -64,11 +64,13 @@
import (
"bytes"
- "errors"
"fmt"
"io"
"v.io/x/lib/vlog"
+
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/iobuf"
"v.io/x/ref/profiles/internal/rpc/stream/crypto"
"v.io/x/ref/profiles/internal/rpc/stream/id"
@@ -93,8 +95,16 @@
)
var (
- emptyMessageErr = errors.New("message is empty")
- corruptedMessageErr = errors.New("corrupted message")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errEmptyMessage = reg(".errEmptyMessage", "message is empty")
+ errCorruptedMessage = reg(".errCorruptedMessage", "corrupted message")
+ errInvalidMessageType = reg("errInvalidMessageType", "invalid message type {3}")
+ errUnrecognizedMessageType = reg("errUrecognizedMessageType", "unrecognized message type {3}")
+ errFailedToReadVCHeader = reg(".errFailedToReadVCHeader", "failed to read VC header{:3}")
+ errFailedToReadPayload = reg(".errFailedToReadPayload", "failed to read payload of {3} bytes for type {4}{:5}")
)
// T is the interface implemented by all messages communicated over a VIF.
@@ -117,7 +127,7 @@
func ReadFrom(r *iobuf.Reader, c crypto.ControlCipher) (T, error) {
header, err := r.Read(commonHeaderSizeBytes)
if err != nil {
- return nil, fmt.Errorf("failed to read VC header: %v", err)
+ return nil, verror.New(errFailedToReadVCHeader, nil, err)
}
c.Decrypt(header.Contents)
msgType := header.Contents[0]
@@ -125,14 +135,14 @@
header.Release()
payload, err := r.Read(msgPayloadSize)
if err != nil {
- return nil, fmt.Errorf("failed to read payload of %d bytes for type %d: %v", msgPayloadSize, msgType, err)
+ return nil, verror.New(errFailedToReadPayload, nil, msgPayloadSize, msgType, err)
}
macSize := c.MACSize()
switch msgType {
case controlType, controlTypeWS:
if !c.Open(payload.Contents) {
payload.Release()
- return nil, corruptedMessageErr
+ return nil, verror.New(errCorruptedMessage, nil)
}
m, err := readControl(bytes.NewBuffer(payload.Contents[:msgPayloadSize-macSize]))
payload.Release()
@@ -140,7 +150,7 @@
case dataType, dataTypeWS:
if !c.Open(payload.Contents[0 : dataHeaderSizeBytes+macSize]) {
payload.Release()
- return nil, corruptedMessageErr
+ return nil, verror.New(errCorruptedMessage, nil)
}
m := &Data{
VCI: id.VC(read4ByteUint(payload.Contents[0:4])),
@@ -152,7 +162,7 @@
return m, nil
default:
payload.Release()
- return nil, fmt.Errorf("unrecognized message type: %d", msgType)
+ return nil, verror.New(errUnrecognizedMessageType, nil, msgType)
}
}
@@ -207,7 +217,7 @@
_, err := w.Write(msg)
return err
default:
- return fmt.Errorf("invalid message type %T", m)
+ return verror.New(errInvalidMessageType, nil, fmt.Sprintf("%T", m))
}
return nil
}
@@ -215,7 +225,7 @@
// EncryptMessage encrypts the message's control data in place.
func EncryptMessage(msg []byte, c crypto.ControlCipher) error {
if len(msg) == 0 {
- return emptyMessageErr
+ return verror.New(errEmptyMessage, nil)
}
n := len(msg)
switch msgType := msg[0]; msgType {
@@ -224,7 +234,7 @@
case dataType:
n = HeaderSizeBytes + c.MACSize()
default:
- return fmt.Errorf("unrecognized message type: %d", msgType)
+ return verror.New(errUnrecognizedMessageType, nil, msgType)
}
c.Encrypt(msg[0:commonHeaderSizeBytes])
c.Seal(msg[commonHeaderSizeBytes:n])
diff --git a/profiles/internal/rpc/stream/proxy/proxy.go b/profiles/internal/rpc/stream/proxy/proxy.go
index 3738181..184b209 100644
--- a/profiles/internal/rpc/stream/proxy/proxy.go
+++ b/profiles/internal/rpc/stream/proxy/proxy.go
@@ -5,7 +5,6 @@
package proxy
import (
- "errors"
"fmt"
"net"
"sync"
@@ -25,6 +24,7 @@
"v.io/x/ref/profiles/internal/lib/iobuf"
"v.io/x/ref/profiles/internal/lib/publisher"
"v.io/x/ref/profiles/internal/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/rpc/stream"
"v.io/x/ref/profiles/internal/rpc/stream/crypto"
"v.io/x/ref/profiles/internal/rpc/stream/id"
"v.io/x/ref/profiles/internal/rpc/stream/message"
@@ -37,13 +37,36 @@
const pkgPath = "v.io/x/ref/profiles/proxy"
-var (
- errNoRoutingTableEntry = errors.New("routing table has no entry for the VC")
- errProcessVanished = errors.New("remote process vanished")
- errDuplicateOpenVC = errors.New("duplicate OpenVC request")
+func reg(id, msg string) verror.IDAction {
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
- errNoDecoder = verror.Register(pkgPath+".errNoDecoder", verror.NoRetry, "{1:}{2:} proxy: failed to create Decoder{:_}")
- errNoRequest = verror.Register(pkgPath+".errNoRequest", verror.NoRetry, "{1:}{2:} proxy: unable to read Request{:_}")
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errNoRoutingTableEntry = reg(".errNoRoutingTableEntry", "routing table has no entry for the VC")
+ errProcessVanished = reg(".errProcessVanished", "remote process vanished")
+ errDuplicateOpenVC = reg(".errDuplicateOpenVC", "duplicate OpenVC request")
+ errVomDecoder = reg(".errVomDecoder", "failed to create vom decoder{:3}")
+ errVomEncoder = reg(".errVomEncoder", "failed to create vom encoder{:3}")
+ errVomEncodeResponse = reg(".errVomEncodeResponse", "failed to encode response from proxy{:3}")
+ errNoRequest = reg(".errNoRequest", "unable to read Request{:3}")
+ errServerClosedByProxy = reg(".errServerClosedByProxy", "server closed by proxy")
+ errRemoveServerVC = reg(".errRemoveServerVC", "failed to remove server VC {3}{:4}")
+ errNetConnClosing = reg(".errNetConnClosing", "net.Conn is closing")
+ errFailedToAcceptHealthCheck = reg(".errFailedToAcceptHealthCheck", "failed to accept health check flow")
+ errIncompatibleVersions = reg(".errIncompatibleVersions", "{:3}")
+ errAlreadyProxied = reg(".errAlreadyProxied", "server with routing id {3} is already being proxied")
+ errUnknownNetwork = reg(".errUnknownNetwork", "unknown network {3}")
+ errListenFailed = reg(".errListenFailed", "net.Listen({3}, {4}) failed{:5}")
+ errFailedToForwardRxBufs = reg(".errFailedToForwardRxBufs", "failed to forward receive buffers{:3}")
+ errFailedToFowardDataMsg = reg(".errFailedToFowardDataMsg", "failed to forward data message{:3}")
+ errFailedToFowardOpenFlow = reg(".errFailedToFowardOpenFlow", "failed to forward open flow{:3}")
+ errUnsupportedSetupVC = reg(".errUnsupportedSetupVC", "proxy support for SetupVC not implemented yet")
+ errServerNotBeingProxied = reg(".errServerNotBeingProxied", "no server with routing id {3} is being proxied")
+ errServerVanished = reg(".errServerVanished", "server with routing id {3} vanished")
)
// Proxy routes virtual circuit (VC) traffic between multiple underlying
@@ -94,9 +117,9 @@
func (s *server) Close(err error) {
if vc := s.Process.RemoveServerVC(s.VC.VCI()); vc != nil {
if err != nil {
- vc.Close(err.Error())
+ vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errRemoveServerVC, nil, s.VC.VCI(), err)))
} else {
- vc.Close("server closed by proxy")
+ vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errServerClosedByProxy, nil)))
}
s.Process.SendCloseVC(s.VC.VCI(), err)
}
@@ -118,7 +141,7 @@
m.mu.Lock()
defer m.mu.Unlock()
if m.m[key] != nil {
- return fmt.Errorf("server with routing id %v is already being proxied", key)
+ return verror.New(stream.ErrProxy, nil, verror.New(errAlreadyProxied, nil, key))
}
m.m[key] = server
proxyLog().Infof("Started proxying server: %v", server)
@@ -197,11 +220,11 @@
func internalNew(rid naming.RoutingID, principal security.Principal, network, address, pubAddress string) (*Proxy, error) {
_, listenFn, _ := rpc.RegisteredProtocol(network)
if listenFn == nil {
- return nil, fmt.Errorf("unknown network %s", network)
+ return nil, verror.New(stream.ErrProxy, nil, verror.New(errUnknownNetwork, nil, network))
}
ln, err := listenFn(network, address)
if err != nil {
- return nil, fmt.Errorf("net.Listen(%q, %q) failed: %v", network, address, err)
+ return nil, verror.New(stream.ErrProxy, nil, verror.New(errListenFailed, nil, network, address, err))
}
if len(pubAddress) == 0 {
pubAddress = ln.Addr().String()
@@ -276,7 +299,7 @@
// See comments in protocol.vdl for the protocol between servers and the proxy.
conn, err := hr.Listener.Accept()
if err != nil {
- server.Close(errors.New("failed to accept health check flow"))
+ server.Close(verror.New(stream.ErrProxy, nil, verror.New(errFailedToAcceptHealthCheck, nil)))
return
}
server.Process.InitVCI(server.VC.VCI())
@@ -284,9 +307,9 @@
var response Response
dec, err := vom.NewDecoder(conn)
if err != nil {
- response.Error = verror.New(errNoDecoder, nil, err)
+ response.Error = verror.New(stream.ErrProxy, nil, verror.New(errVomDecoder, nil, err))
} else if err := dec.Decode(&request); err != nil {
- response.Error = verror.New(errNoRequest, nil, err)
+ response.Error = verror.New(stream.ErrProxy, nil, verror.New(errNoRequest, nil, err))
} else if err := p.servers.Add(server); err != nil {
response.Error = verror.Convert(verror.ErrUnknown, nil, err)
} else {
@@ -302,12 +325,12 @@
enc, err := vom.NewEncoder(conn)
if err != nil {
proxyLog().Infof("Failed to create Encoder for server %v: %v", server, err)
- server.Close(err)
+ server.Close(verror.New(stream.ErrProxy, nil, verror.New(errVomEncoder, nil, err)))
return
}
if err := enc.Encode(response); err != nil {
proxyLog().Infof("Failed to encode response %#v for server %v", response, server)
- server.Close(err)
+ server.Close(verror.New(stream.ErrProxy, nil, verror.New(errVomEncodeResponse, nil, err)))
return
}
// Reject all other flows
@@ -341,7 +364,7 @@
c.Add(d.VCI, cid.Flow(), bytes)
if err := d.Process.queue.Put(&message.AddReceiveBuffers{Counters: c}); err != nil {
process.RemoveRoute(srcVCI)
- process.SendCloseVC(srcVCI, fmt.Errorf("proxy failed to forward receive buffers: %v", err))
+ process.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToForwardRxBufs, nil, err)))
}
}
}
@@ -470,11 +493,11 @@
if err := d.Process.queue.Put(m); err != nil {
m.Release()
p.RemoveRoute(srcVCI)
- p.SendCloseVC(srcVCI, fmt.Errorf("proxy failed to forward data message: %v", err))
+ p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToFowardDataMsg, nil, err)))
}
break
}
- p.SendCloseVC(srcVCI, errNoRoutingTableEntry)
+ p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errNoRoutingTableEntry, nil)))
case *message.OpenFlow:
if vc := p.ServerVC(m.VCI); vc != nil {
if err := vc.AcceptFlow(m.Flow); err != nil {
@@ -491,14 +514,14 @@
m.VCI = d.VCI
if err := d.Process.queue.Put(m); err != nil {
p.RemoveRoute(srcVCI)
- p.SendCloseVC(srcVCI, fmt.Errorf("proxy failed to forward open flow message: %v", err))
+ p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToFowardOpenFlow, nil, err)))
}
break
}
- p.SendCloseVC(srcVCI, errNoRoutingTableEntry)
+ p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errNoRoutingTableEntry, nil)))
case *message.CloseVC:
if vc := p.RemoveServerVC(m.VCI); vc != nil {
- vc.Close(m.Error)
+ vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errRemoveServerVC, nil, m.VCI, m.Error)))
break
}
srcVCI := m.VCI
@@ -515,13 +538,13 @@
if naming.Compare(dstrid, p.proxy.rid) || naming.Compare(dstrid, naming.NullRoutingID) {
// VC that terminates at the proxy.
// TODO(ashankar,mattr): Implement this!
- p.SendCloseVC(m.VCI, fmt.Errorf("proxy support for SetupVC not implemented yet"))
+ p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil, verror.New(errUnsupportedSetupVC, nil)))
p.proxy.routeCounters(p, m.Counters)
break
}
dstprocess := p.proxy.servers.Process(dstrid)
if dstprocess == nil {
- p.SendCloseVC(m.VCI, fmt.Errorf("no server with routing id %v is being proxied", dstrid))
+ p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil, verror.New(errServerNotBeingProxied, nil, dstrid)))
p.proxy.routeCounters(p, m.Counters)
break
}
@@ -538,7 +561,7 @@
dstVCI := dstprocess.AllocVCI()
startRoutingVC(srcVCI, dstVCI, p, dstprocess)
if d = p.Route(srcVCI); d == nil {
- p.SendCloseVC(srcVCI, fmt.Errorf("server with routing id %v vanished", dstrid))
+ p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errServerVanished, nil, dstrid)))
p.proxy.routeCounters(p, m.Counters)
break
}
@@ -576,7 +599,7 @@
}
dstprocess := p.proxy.servers.Process(dstrid)
if dstprocess == nil {
- p.SendCloseVC(m.VCI, fmt.Errorf("no server with routing id %v is being proxied", dstrid))
+ p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil, verror.New(errServerNotBeingProxied, nil, dstrid)))
p.proxy.routeCounters(p, m.Counters)
break
}
@@ -672,11 +695,11 @@
rt := p.routingTable
p.routingTable = nil
for _, vc := range p.servers {
- vc.Close("net.Conn is closing")
+ vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errNetConnClosing, nil)))
}
p.mu.Unlock()
for _, d := range rt {
- d.Process.SendCloseVC(d.VCI, errProcessVanished)
+ d.Process.SendCloseVC(d.VCI, verror.New(stream.ErrProxy, nil, verror.New(errProcessVanished, nil)))
}
p.bq.Close()
p.queue.Close()
@@ -695,12 +718,12 @@
p.mu.Lock()
defer p.mu.Unlock()
if vc := p.servers[m.VCI]; vc != nil {
- vc.Close("duplicate OpenVC request")
+ vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errDuplicateOpenVC, nil)))
return nil
}
version, err := version.CommonVersion(m.DstEndpoint, m.SrcEndpoint)
if err != nil {
- p.SendCloseVC(m.VCI, fmt.Errorf("incompatible RPC protocol versions: %v", err))
+ p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil, verror.New(errIncompatibleVersions, nil, err)))
return nil
}
vc := vc.InternalNew(vc.Params{
diff --git a/profiles/internal/rpc/stream/vc/auth.go b/profiles/internal/rpc/stream/vc/auth.go
index ff0dee9..b74a351 100644
--- a/profiles/internal/rpc/stream/vc/auth.go
+++ b/profiles/internal/rpc/stream/vc/auth.go
@@ -6,16 +6,16 @@
import (
"bytes"
- "errors"
- "fmt"
"io"
- "v.io/x/ref/profiles/internal/lib/iobuf"
- "v.io/x/ref/profiles/internal/rpc/stream/crypto"
-
"v.io/v23/rpc/version"
"v.io/v23/security"
+ "v.io/v23/verror"
"v.io/v23/vom"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+ "v.io/x/ref/profiles/internal/rpc/stream"
+ "v.io/x/ref/profiles/internal/rpc/stream/crypto"
)
var (
@@ -24,12 +24,17 @@
)
var (
- errSameChannelPublicKey = errors.New("same public keys for both ends of the channel")
- errChannelIDMismatch = errors.New("channel id does not match expectation")
- errChecksumMismatch = errors.New("checksum mismatch")
- errInvalidSignatureInMessage = errors.New("signature does not verify in authentication handshake message")
- errNoCertificatesReceived = errors.New("no certificates received")
- errSingleCertificateRequired = errors.New("exactly one X.509 certificate chain with exactly one certificate is required")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errVomDecoder = reg(".errVomDecoder", "failed to create vom decoder{:3}")
+ errVomEncoder = reg(".errVomEncoder", "failed to create vom encoder{:3}")
+ errVomEncodeBlessing = reg(".errVomEncodeRequest", "failed to encode blessing{:3}")
+ errHandshakeMessage = reg(".errHandshakeMessage", "failed to read hanshake message{:3}")
+ errInvalidSignatureInMessage = reg(".errInvalidSignatureInMessage", "signature does not verify in authentication handshake message")
+ errFailedToCreateSelfBlessing = reg(".errFailedToCreateSelfBlessing", "failed to create self blessing{:3}")
+ errNoBlessingsToPresentToServer = reg(".errerrNoBlessingsToPresentToServer ", "no blessings to present as a server")
)
// AuthenticateAsServer executes the authentication protocol at the server.
@@ -37,7 +42,7 @@
// by the server.
func AuthenticateAsServer(conn io.ReadWriteCloser, principal security.Principal, server security.Blessings, dc DischargeClient, crypter crypto.Crypter, v version.RPCVersion) (security.Blessings, map[string]security.Discharge, error) {
if server.IsZero() {
- return security.Blessings{}, nil, errors.New("no blessings to present as a server")
+ return security.Blessings{}, nil, verror.New(stream.ErrSecurity, nil, verror.New(errNoBlessingsToPresentToServer, nil))
}
var serverDischarges []security.Discharge
if tpcavs := server.ThirdPartyCaveats(); len(tpcavs) > 0 && dc != nil {
@@ -71,7 +76,7 @@
params.RemoteBlessings = server
params.RemoteDischarges = serverDischarges
if err := auth.Authorize(params); err != nil {
- return security.Blessings{}, security.Blessings{}, nil, err
+ return security.Blessings{}, security.Blessings{}, nil, verror.New(stream.ErrNotTrusted, nil, err)
}
}
@@ -81,7 +86,7 @@
principal := params.LocalPrincipal
client, err := principal.BlessSelf("vcauth")
if err != nil {
- return security.Blessings{}, security.Blessings{}, nil, fmt.Errorf("failed to created self blessing: %v", err)
+ return security.Blessings{}, security.Blessings{}, nil, verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateSelfBlessing, nil, err))
}
if err := writeBlessings(conn, authClientContextTag, crypter, principal, client, nil, v); err != nil {
return security.Blessings{}, security.Blessings{}, nil, err
@@ -97,17 +102,17 @@
var buf bytes.Buffer
enc, err := vom.NewEncoder(&buf)
if err != nil {
- return err
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncoder, nil, err))
}
if err := enc.Encode(signature); err != nil {
- return err
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
}
if err := enc.Encode(b); err != nil {
- return err
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
}
if v >= version.RPCVersion5 {
if err := enc.Encode(discharges); err != nil {
- return err
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
}
}
msg, err := crypter.Encrypt(iobuf.NewSlice(buf.Bytes()))
@@ -117,9 +122,12 @@
defer msg.Release()
enc, err = vom.NewEncoder(w)
if err != nil {
- return err
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncoder, nil, err))
}
- return enc.Encode(msg.Contents)
+ if err := enc.Encode(msg.Contents); err != nil {
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+ }
+ return nil
}
func readBlessings(r io.Reader, tag []byte, crypter crypto.Crypter, v version.RPCVersion) (security.Blessings, map[string]security.Discharge, error) {
@@ -127,10 +135,10 @@
var noBlessings security.Blessings
dec, err := vom.NewDecoder(r)
if err != nil {
- return noBlessings, nil, fmt.Errorf("failed to create new decoder: %v", err)
+ return noBlessings, nil, verror.New(stream.ErrNetwork, nil, verror.New(errVomDecoder, nil, err))
}
if err := dec.Decode(&msg); err != nil {
- return noBlessings, nil, fmt.Errorf("failed to read handshake message: %v", err)
+ return noBlessings, nil, verror.New(stream.ErrNetwork, nil, verror.New(errHandshakeMessage, nil, err))
}
buf, err := crypter.Decrypt(iobuf.NewSlice(msg))
if err != nil {
@@ -139,7 +147,7 @@
defer buf.Release()
dec, err = vom.NewDecoder(bytes.NewReader(buf.Contents))
if err != nil {
- return noBlessings, nil, fmt.Errorf("failed to create new decoder: %v", err)
+ return noBlessings, nil, verror.New(stream.ErrNetwork, nil, verror.New(errVomDecoder, nil, err))
}
var (
@@ -147,19 +155,19 @@
sig security.Signature
)
if err = dec.Decode(&sig); err != nil {
- return noBlessings, nil, err
+ return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
}
if err = dec.Decode(&blessings); err != nil {
- return noBlessings, nil, err
+ return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
}
var discharges []security.Discharge
if v >= version.RPCVersion5 {
if err := dec.Decode(&discharges); err != nil {
- return noBlessings, nil, err
+ return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
}
}
if !sig.Verify(blessings.PublicKey(), append(tag, crypter.ChannelBinding()...)) {
- return noBlessings, nil, errInvalidSignatureInMessage
+ return noBlessings, nil, verror.New(stream.ErrSecurity, nil, verror.New(errInvalidSignatureInMessage, nil))
}
return blessings, mkDischargeMap(discharges), nil
}
diff --git a/profiles/internal/rpc/stream/vc/flow.go b/profiles/internal/rpc/stream/vc/flow.go
index 4a5af99..88a6b42 100644
--- a/profiles/internal/rpc/stream/vc/flow.go
+++ b/profiles/internal/rpc/stream/vc/flow.go
@@ -7,6 +7,7 @@
import (
"v.io/v23/naming"
"v.io/v23/security"
+
"v.io/x/ref/profiles/internal/rpc/stream"
)
diff --git a/profiles/internal/rpc/stream/vc/listener.go b/profiles/internal/rpc/stream/vc/listener.go
index c4b007b..991d776 100644
--- a/profiles/internal/rpc/stream/vc/listener.go
+++ b/profiles/internal/rpc/stream/vc/listener.go
@@ -5,13 +5,20 @@
package vc
import (
- "errors"
+ "v.io/v23/verror"
"v.io/x/ref/profiles/internal/lib/upcqueue"
"v.io/x/ref/profiles/internal/rpc/stream"
)
-var errListenerClosed = errors.New("Listener has been closed")
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errListenerClosed = reg(".errListenerClosed", "Listener has been closed")
+ errGetFromQueue = reg(".errGetFromQueue", "upcqueue.Get failed{:3}")
+)
type listener struct {
q *upcqueue.T
@@ -24,7 +31,7 @@
func (l *listener) Enqueue(f stream.Flow) error {
err := l.q.Put(f)
if err == upcqueue.ErrQueueIsClosed {
- return errListenerClosed
+ return verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
}
return err
}
@@ -32,10 +39,10 @@
func (l *listener) Accept() (stream.Flow, error) {
item, err := l.q.Get(nil)
if err == upcqueue.ErrQueueIsClosed {
- return nil, errListenerClosed
+ return nil, verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
}
if err != nil {
- return nil, err
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errGetFromQueue, nil, err))
}
return item.(stream.Flow), nil
}
diff --git a/profiles/internal/rpc/stream/vc/listener_test.go b/profiles/internal/rpc/stream/vc/listener_test.go
index 6ddc5cf..1aa4899 100644
--- a/profiles/internal/rpc/stream/vc/listener_test.go
+++ b/profiles/internal/rpc/stream/vc/listener_test.go
@@ -5,10 +5,13 @@
package vc
import (
+ "strings"
"testing"
"v.io/v23/naming"
"v.io/v23/security"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/rpc/stream"
)
@@ -56,10 +59,10 @@
if err := ln.Close(); err != nil {
t.Error(err)
}
- if err := ln.Enqueue(f1); err != errListenerClosed {
+ if err := ln.Enqueue(f1); verror.ErrorID(err) != stream.ErrBadState.ID || !strings.Contains(err.Error(), "closed") {
t.Error(err)
}
- if f, err := ln.Accept(); f != nil || err != errListenerClosed {
+ if f, err := ln.Accept(); f != nil || verror.ErrorID(err) != stream.ErrBadState.ID || !strings.Contains(err.Error(), "closed") {
t.Errorf("Accept returned (%p, %v) wanted (nil, %v)", f, err, errListenerClosed)
}
}
diff --git a/profiles/internal/rpc/stream/vc/reader.go b/profiles/internal/rpc/stream/vc/reader.go
index be778fb..c90dbdd 100644
--- a/profiles/internal/rpc/stream/vc/reader.go
+++ b/profiles/internal/rpc/stream/vc/reader.go
@@ -5,14 +5,24 @@
package vc
import (
- "fmt"
"io"
"sync"
"sync/atomic"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/iobuf"
vsync "v.io/x/ref/profiles/internal/lib/sync"
"v.io/x/ref/profiles/internal/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/rpc/stream"
+)
+
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errGetFailed = reg(".errGetFailed", "upcqueue.Get failed:{:3}")
)
// readHandler is the interface used by the reader to notify other components
@@ -63,9 +73,9 @@
return 0, io.EOF
case vsync.ErrCanceled:
// As per net.Conn.Read specification
- return 0, timeoutError{}
+ return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
default:
- return 0, fmt.Errorf("upcqueue.Get failed: %v", err)
+ return 0, verror.New(stream.ErrNetwork, nil, verror.New(errGetFailed, nil, err))
}
}
r.buf = slice.(*iobuf.Slice)
@@ -103,10 +113,3 @@
func (r *reader) Put(slice *iobuf.Slice) error {
return r.src.Put(slice)
}
-
-// timeoutError implements net.Error with Timeout returning true.
-type timeoutError struct{}
-
-func (t timeoutError) Error() string { return "deadline exceeded" }
-func (t timeoutError) Timeout() bool { return true }
-func (t timeoutError) Temporary() bool { return false }
diff --git a/profiles/internal/rpc/stream/vc/vc.go b/profiles/internal/rpc/stream/vc/vc.go
index 3774761..5ea0a6a 100644
--- a/profiles/internal/rpc/stream/vc/vc.go
+++ b/profiles/internal/rpc/stream/vc/vc.go
@@ -9,7 +9,6 @@
// Verbosity level 2 is for per-Flow messages.
import (
- "errors"
"fmt"
"io"
"sort"
@@ -21,6 +20,7 @@
"v.io/v23/naming"
"v.io/v23/rpc/version"
"v.io/v23/security"
+ "v.io/v23/verror"
"v.io/v23/vom"
"v.io/x/lib/vlog"
@@ -32,10 +32,38 @@
"v.io/x/ref/profiles/internal/rpc/stream/id"
)
+const pkgPath = "v.io/x/ref/profiles/internal/rpc/stream/vc"
+
+func reg(id, msg string) verror.IDAction {
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
var (
- errAlreadyListening = errors.New("Listen has already been called")
- errDuplicateFlow = errors.New("duplicate OpenFlow message")
- errUnrecognizedFlow = errors.New("unrecognized flow")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errAlreadyListening = reg(".errAlreadyListening", "Listen has already been called")
+ errDuplicateFlow = reg(".errDuplicateFlow", "duplicate OpenFlow message")
+ errUnrecognizedFlow = reg(".errUnrecognizedFlow", "unrecognized flow")
+ errFailedToCreateWriterForFlow = reg(".errFailedToCreateWriterForFlow", "failed to create writer for Flow{:3}")
+ errConnectOnClosedVC = reg(".errConnectOnClosedVC", "connect on closed VC{:3}")
+ errFailedToDecryptPayload = reg(".errFailedToDecryptPayload", "failed to decrypt payload{:3}")
+ errIgnoringMessageOnClosedVC = reg(".errIgnoringMessageOnClosedVC", "ignoring message for Flow {3} on closed VC {4}")
+ errVomTypedDecoder = reg(".errVomDecoder", "failed to create typed vom decoder{:3}")
+ errVomTypedEncoder = reg(".errVomEncoder", "failed to create vom typed encoder{:3}")
+ errFailedToCreateFlowForWireType = reg(".errFailedToCreateFlowForWireType", "fail to create a Flow for wire type{:3}")
+ errFlowForWireTypeNotAccepted = reg(".errFlowForWireTypeNotAccepted", "Flow for wire type not accepted{:3}")
+ errFailedToCreateTLSFlow = reg(".errFailedToCreateTLSFlow", "failed to create a Flow for setting up TLS{3:}")
+ errFailedToSetupTLS = reg(".errFailedToSetupTLS", "failed to setup TLS{:3}")
+ errFailedToCreateFlowForAuth = reg(".errFailedToCreateFlowForAuth", "failed to create a Flow for authentication{:3}")
+ errAuthFailed = reg(".errAuthFailed", "authentication failed{:3}")
+ errNoActiveListener = reg(".errNoActiveListener", "no active listener on VCI {3}")
+ errFailedToCreateWriterForNewFlow = reg(".errFailedToCreateWriterForNewFlow", "failed to create writer for new flow({3}){:4}")
+ errFailedToEnqueueFlow = reg(".errFailedToEnqueueFlow", "failed to enqueue flow at listener{:3}")
+ errTLSFlowNotAccepted = reg(".errTLSFlowNotAccepted", "TLS handshake Flow not accepted{:3}")
+ errAuthFlowNotAccepted = reg(".errAuthFlowNotAccepted", "authentication Flow not accepted{:3}")
+ errFailedToAcceptSystemFlows = reg(".errFailedToAcceptSystemFlows", "failed to accept system flows{:3}")
)
// DischargeExpiryBuffer specifies how much before discharge expiration we should
@@ -78,7 +106,7 @@
nextConnectFID id.Flow
listener *listener // non-nil iff Listen has been called and the VC has not been closed.
crypter crypto.Crypter
- closeReason string // reason why the VC was closed
+ closeReason error // reason why the VC was closed, possibly nil
closeCh chan struct{}
closed bool
@@ -118,6 +146,13 @@
func (DialContext) RPCStreamVCOpt() {}
func (DialContext) RPCStreamListenerOpt() {}
+// StartTimeout specifies the time after which the underlying VIF is closed
+// if no VC is opened.
+type StartTimeout struct{ time.Duration }
+
+func (StartTimeout) RPCStreamVCOpt() {}
+func (StartTimeout) RPCStreamListenerOpt() {}
+
// IdleTimeout specifies the time after which an idle VC is closed.
type IdleTimeout struct{ time.Duration }
@@ -219,7 +254,7 @@
func (vc *VC) connectFID(fid id.Flow, priority bqueue.Priority, opts ...stream.FlowOpt) (stream.Flow, error) {
writer, err := vc.newWriter(fid, priority)
if err != nil {
- return nil, fmt.Errorf("failed to create writer for Flow: %v", err)
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateWriterForFlow, nil, err))
}
f := &flow{
backingVC: vc,
@@ -230,7 +265,7 @@
if vc.flowMap == nil {
vc.mu.Unlock()
f.Shutdown()
- return nil, fmt.Errorf("Connect on closed VC(%q)", vc.closeReason)
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errConnectOnClosedVC, nil, vc.closeReason))
}
vc.flowMap[fid] = f
vc.mu.Unlock()
@@ -244,7 +279,7 @@
vc.mu.Lock()
defer vc.mu.Unlock()
if vc.listener != nil {
- return nil, errAlreadyListening
+ return nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyListening, nil))
}
vc.listener = newListener()
return vc.listener, nil
@@ -264,7 +299,7 @@
if vc.flowMap == nil {
vc.mu.Unlock()
payload.Release()
- return fmt.Errorf("ignoring message for Flow %d on closed VC %d", fid, vc.VCI())
+ return verror.New(stream.ErrNetwork, nil, verror.New(errIgnoringMessageOnClosedVC, nil, fid, vc.VCI()))
}
// TLS decryption is stateful, so even if the message will be discarded
// because of other checks further down in this method, go through with
@@ -274,7 +309,7 @@
var err error
if payload, err = vc.crypter.Decrypt(payload); err != nil {
vc.mu.Unlock()
- return fmt.Errorf("failed to decrypt payload: %v", err)
+ return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToDecryptPayload, nil, err))
}
}
if payload.Size() == 0 {
@@ -286,12 +321,12 @@
if f == nil {
vc.mu.Unlock()
payload.Release()
- return errUnrecognizedFlow
+ return verror.New(stream.ErrNetwork, nil, verror.New(errDuplicateFlow, nil))
}
vc.mu.Unlock()
if err := f.reader.Put(payload); err != nil {
payload.Release()
- return err
+ return verror.New(stream.ErrNetwork, nil, err)
}
return nil
}
@@ -303,10 +338,10 @@
vc.mu.Lock()
defer vc.mu.Unlock()
if vc.listener == nil {
- return fmt.Errorf("no active listener on VCI %d", vc.vci)
+ return verror.New(stream.ErrBadState, nil, vc.vci)
}
if _, exists := vc.flowMap[fid]; exists {
- return errDuplicateFlow
+ return verror.New(stream.ErrNetwork, nil, verror.New(errDuplicateFlow, nil))
}
priority := normalFlowPriority
// We use the same high priority for all reserved flows including handshake and
@@ -318,7 +353,7 @@
}
writer, err := vc.newWriter(fid, priority)
if err != nil {
- return fmt.Errorf("failed to create writer for new flow(%d): %v", fid, err)
+ return verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateWriterForNewFlow, nil, fid, err))
}
f := &flow{
backingVC: vc,
@@ -327,7 +362,7 @@
}
if err = vc.listener.Enqueue(f); err != nil {
f.Shutdown()
- return fmt.Errorf("failed to enqueue flow at listener: %v", err)
+ return verror.New(stream.ErrNetwork, nil, verror.New(errFailedToEnqueueFlow, nil, err))
}
vc.flowMap[fid] = f
// New flow accepted, notify remote end that it can send over data.
@@ -375,7 +410,7 @@
// Close closes the VC and all flows on it, allowing any pending writes in the
// flow to drain.
-func (vc *VC) Close(reason string) error {
+func (vc *VC) Close(reason error) error {
vlog.VI(1).Infof("Closing VC %v. Reason:%q", vc, reason)
vc.mu.Lock()
if vc.closed {
@@ -401,12 +436,16 @@
return nil
}
-// err prefers vc.closeReason over err.
-func (vc *VC) err(err error) error {
+// appendCloseReason adds a closeReason, if any, as a sub error to err.
+func (vc *VC) appendCloseReason(err error) error {
vc.mu.Lock()
defer vc.mu.Unlock()
- if vc.closeReason != "" {
- return errors.New(vc.closeReason)
+ if vc.closeReason != nil {
+ return verror.AddSubErrs(err, nil, verror.SubErr{
+ Name: "remote=" + vc.RemoteEndpoint().String(),
+ Err: vc.closeReason,
+ Options: verror.Print,
+ })
}
return err
}
@@ -436,11 +475,16 @@
// Establish TLS
handshakeConn, err := vc.connectFID(HandshakeFlowID, systemFlowPriority)
if err != nil {
- return vc.err(fmt.Errorf("failed to create a Flow for setting up TLS: %v", err))
+ return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateTLSFlow, nil, err)))
}
crypter, err := crypto.NewTLSClient(handshakeConn, handshakeConn.LocalEndpoint(), handshakeConn.RemoteEndpoint(), tlsSessionCache, vc.pool)
if err != nil {
- return vc.err(fmt.Errorf("failed to setup TLS: %v", err))
+ // Assume that we don't trust the server if the TLS handshake fails for any
+ // reason other than EOF.
+ if err == io.EOF {
+ return vc.appendCloseReason(verror.New(stream.ErrNetwork, nil, verror.New(errFailedToSetupTLS, nil, err)))
+ }
+ return vc.appendCloseReason(verror.New(stream.ErrNotTrusted, nil, verror.New(errFailedToSetupTLS, nil, err)))
}
// Authenticate (exchange identities)
@@ -454,7 +498,7 @@
// stream API.
authConn, err := vc.connectFID(AuthFlowID, systemFlowPriority)
if err != nil {
- return vc.err(fmt.Errorf("failed to create a Flow for authentication: %v", err))
+ return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForAuth, nil, err)))
}
params := security.CallParams{
LocalPrincipal: principal,
@@ -465,7 +509,7 @@
if err != nil || len(rBlessings.ThirdPartyCaveats()) == 0 {
authConn.Close()
if err != nil {
- return vc.err(fmt.Errorf("authentication failed: %v", err))
+ return vc.appendCloseReason(err)
}
} else {
go vc.recvDischargesLoop(authConn)
@@ -483,7 +527,7 @@
// Open system flows.
if err = vc.connectSystemFlows(); err != nil {
- return vc.err(fmt.Errorf("failed to connect system flows: %v", err))
+ return vc.appendCloseReason(err)
}
vlog.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
@@ -543,7 +587,7 @@
go func() {
sendErr := func(err error) {
ln.Close()
- result <- HandshakeResult{nil, vc.err(err)}
+ result <- HandshakeResult{nil, vc.appendCloseReason(err)}
}
// TODO(ashankar): There should be a timeout on this Accept
// call. Otherwise, a malicious (or incompetent) client can
@@ -552,7 +596,7 @@
// the identity exchange protocol.
handshakeConn, err := ln.Accept()
if err != nil {
- sendErr(fmt.Errorf("TLS handshake Flow not accepted: %v", err))
+ sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errTLSFlowNotAccepted, nil, err)))
return
}
vc.mu.Lock()
@@ -563,14 +607,14 @@
// Establish TLS
crypter, err := crypto.NewTLSServer(handshakeConn, handshakeConn.LocalEndpoint(), handshakeConn.RemoteEndpoint(), vc.pool)
if err != nil {
- sendErr(fmt.Errorf("failed to setup TLS: %v", err))
+ sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToSetupTLS, nil, err)))
return
}
// Authenticate (exchange identities)
authConn, err := ln.Accept()
if err != nil {
- sendErr(fmt.Errorf("Authentication Flow not accepted: %v", err))
+ sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errAuthFlowNotAccepted, nil, err)))
return
}
vc.mu.Lock()
@@ -580,7 +624,7 @@
rBlessings, lDischarges, err := AuthenticateAsServer(authConn, principal, lBlessings, dischargeClient, crypter, vc.version)
if err != nil {
authConn.Close()
- sendErr(fmt.Errorf("authentication failed: %v", err))
+ sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errAuthFailed, nil, err)))
return
}
@@ -602,7 +646,7 @@
// Accept system flows.
if err = vc.acceptSystemFlows(ln); err != nil {
- sendErr(fmt.Errorf("failed to accept system flows: %v", err))
+ sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errFailedToAcceptSystemFlows, nil, err)))
}
vlog.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
@@ -702,18 +746,18 @@
}
conn, err := vc.connectFID(TypeFlowID, systemFlowPriority)
if err != nil {
- return fmt.Errorf("fail to create a Flow for wire type: %v", err)
+ return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForWireType, nil, err))
}
typeEnc, err := vom.NewTypeEncoder(conn)
if err != nil {
conn.Close()
- return fmt.Errorf("failed to create type encoder: %v", err)
+ return verror.New(stream.ErrSecurity, nil, verror.New(errVomTypedEncoder, nil, err))
}
vc.dataCache.Insert(TypeEncoderKey{}, typeEnc)
typeDec, err := vom.NewTypeDecoder(conn)
if err != nil {
conn.Close()
- return fmt.Errorf("failed to create type decoder: %v", err)
+ return verror.New(stream.ErrSecurity, nil, verror.New(errVomTypedDecoder, nil, err))
}
vc.dataCache.Insert(TypeDecoderKey{}, typeDec)
return nil
@@ -725,18 +769,18 @@
}
conn, err := ln.Accept()
if err != nil {
- return fmt.Errorf("Flow for wire type not accepted: %v", err)
+ return verror.New(errFlowForWireTypeNotAccepted, nil, err)
}
typeDec, err := vom.NewTypeDecoder(conn)
if err != nil {
conn.Close()
- return fmt.Errorf("failed to create type decoder: %v", err)
+ return verror.New(errVomTypedDecoder, nil, err)
}
vc.dataCache.Insert(TypeDecoderKey{}, typeDec)
typeEnc, err := vom.NewTypeEncoder(conn)
if err != nil {
conn.Close()
- return fmt.Errorf("failed to create type encoder: %v", err)
+ return verror.New(errVomTypedEncoder, nil, err)
}
vc.dataCache.Insert(TypeEncoderKey{}, typeEnc)
return nil
diff --git a/profiles/internal/rpc/stream/vc/vc_test.go b/profiles/internal/rpc/stream/vc/vc_test.go
index 6bc422d..649d671 100644
--- a/profiles/internal/rpc/stream/vc/vc_test.go
+++ b/profiles/internal/rpc/stream/vc/vc_test.go
@@ -409,7 +409,7 @@
t.Fatal(err)
}
defer h.Close()
- h.VC.Close("reason")
+ h.VC.Close(fmt.Errorf("reason"))
if err := h.VC.AcceptFlow(id.Flow(10)); err == nil {
t.Fatalf("New flows should not be accepted once the VC is closed")
}
@@ -423,7 +423,7 @@
t.Fatal(err)
}
defer h.Close()
- h.VC.Close("myerr")
+ h.VC.Close(fmt.Errorf("myerr"))
if f, err := vc.Connect(); f != nil || err == nil || !strings.Contains(err.Error(), "myerr") {
t.Fatalf("Got (%v, %v), want (nil, %q)", f, err, "myerr")
}
@@ -594,7 +594,7 @@
}
func (h *helper) Close() {
- h.VC.Close("helper closed")
+ h.VC.Close(fmt.Errorf("helper closed"))
h.bq.Close()
h.mu.Lock()
otherEnd := h.otherEnd
diff --git a/profiles/internal/rpc/stream/vc/writer.go b/profiles/internal/rpc/stream/vc/writer.go
index 0e04985..ab4ccfc 100644
--- a/profiles/internal/rpc/stream/vc/writer.go
+++ b/profiles/internal/rpc/stream/vc/writer.go
@@ -5,18 +5,28 @@
package vc
import (
- "errors"
- "fmt"
"io"
"sync"
"sync/atomic"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/bqueue"
"v.io/x/ref/profiles/internal/lib/iobuf"
vsync "v.io/x/ref/profiles/internal/lib/sync"
+ "v.io/x/ref/profiles/internal/rpc/stream"
)
-var errWriterClosed = errors.New("attempt to call Write on Flow that has been Closed")
+var (
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errWriterClosed = reg(".errWriterClosed", "attempt to call Write on Flow that has been Closed")
+ errBQueuePutFailed = reg(".errBqueuePutFailed", "bqueue.Writer.Put failed{:3}")
+ errFailedToGetQuota = reg(".errFailedToGetQuota", "failed to get quota from receive buffers shared by all new flows on a VC{:3}")
+ errCanceled = reg(".errCanceled", "underlying queues canceled")
+)
// writer implements the io.Writer and SetWriteDeadline interfaces for Flow.
type writer struct {
@@ -48,7 +58,7 @@
Alloc: alloc,
SharedCounters: counters,
closed: make(chan struct{}),
- closeError: errWriterClosed,
+ closeError: verror.New(errWriterClosed, nil),
}
}
@@ -114,7 +124,10 @@
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed {
- return 0, w.closeError
+ if w.closeError == io.EOF {
+ return 0, io.EOF
+ }
+ return 0, verror.New(stream.ErrBadState, nil, w.closeError)
}
for len(b) > 0 {
@@ -129,9 +142,9 @@
}
if err := w.SharedCounters.DecN(uint(n), w.deadline); err != nil {
if err == vsync.ErrCanceled {
- return 0, timeoutError{}
+ return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
}
- return 0, fmt.Errorf("failed to get quota from receive buffers shared by all new flows on a VC: %v", err)
+ return 0, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToGetQuota, nil, err))
}
w.muSharedCountersBorrowed.Lock()
w.sharedCountersBorrowed = n
@@ -144,11 +157,11 @@
atomic.AddUint32(&w.totalBytes, uint32(written))
switch err {
case bqueue.ErrCancelled, vsync.ErrCanceled:
- return written, timeoutError{}
+ return written, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
case bqueue.ErrWriterIsClosed:
- return written, w.closeError
+ return written, verror.New(stream.ErrBadState, nil, verror.New(errWriterClosed, nil))
default:
- return written, fmt.Errorf("bqueue.Writer.Put failed: %v", err)
+ return written, verror.New(stream.ErrNetwork, nil, verror.New(errBQueuePutFailed, nil, err))
}
}
written += n
diff --git a/profiles/internal/rpc/stream/vc/writer_test.go b/profiles/internal/rpc/stream/vc/writer_test.go
index 5b318a0..eb7018a 100644
--- a/profiles/internal/rpc/stream/vc/writer_test.go
+++ b/profiles/internal/rpc/stream/vc/writer_test.go
@@ -11,10 +11,13 @@
"reflect"
"testing"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/bqueue"
"v.io/x/ref/profiles/internal/lib/bqueue/drrqueue"
"v.io/x/ref/profiles/internal/lib/iobuf"
"v.io/x/ref/profiles/internal/lib/sync"
+ "v.io/x/ref/profiles/internal/rpc/stream"
)
// TestWrite is a very basic, easy to follow, but not very thorough test of the
@@ -95,8 +98,8 @@
w := newTestWriter(bw, shared)
w.Close()
- if n, err := w.Write([]byte{1, 2}); n != 0 || err != errWriterClosed {
- t.Errorf("Got (%v, %v) want (0, %v)", n, err, errWriterClosed)
+ if n, err := w.Write([]byte{1, 2}); n != 0 || verror.ErrorID(err) != stream.ErrBadState.ID {
+ t.Errorf("Got (%v, %v) want (0, %v)", n, err, stream.ErrBadState)
}
}
@@ -204,8 +207,8 @@
go w.Close()
<-w.Closed()
- if n, err := w.Write([]byte{1, 2}); n != 0 || err != errWriterClosed {
- t.Errorf("Got (%v, %v) want (0, %v)", n, err, errWriterClosed)
+ if n, err := w.Write([]byte{1, 2}); n != 0 || verror.ErrorID(err) != stream.ErrBadState.ID {
+ t.Errorf("Got (%v, %v) want (0, %v)", n, err, stream.ErrBadState.ID)
}
}
diff --git a/profiles/internal/rpc/stream/vif/auth.go b/profiles/internal/rpc/stream/vif/auth.go
index ff45990..d1cb403 100644
--- a/profiles/internal/rpc/stream/vif/auth.go
+++ b/profiles/internal/rpc/stream/vif/auth.go
@@ -6,14 +6,14 @@
import (
"crypto/rand"
- "errors"
- "fmt"
"io"
"golang.org/x/crypto/nacl/box"
rpcversion "v.io/v23/rpc/version"
"v.io/v23/security"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/iobuf"
"v.io/x/ref/profiles/internal/rpc/stream"
"v.io/x/ref/profiles/internal/rpc/stream/crypto"
@@ -23,9 +23,15 @@
)
var (
- errUnsupportedEncryptVersion = errors.New("unsupported encryption version")
- errVersionNegotiationFailed = errors.New("encryption version negotiation failed")
- nullCipher crypto.NullControlCipher
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errAuthFailed = reg(".errAuthFailed", "authentication failed{:3}")
+ errUnsupportedEncryptVersion = reg(".errUnsupportedEncryptVersion", "unsupported encryption version {4} < {5}")
+ errNaclBoxVersionNegotiationFailed = reg(".errNaclBoxVersionNegotiationFailed", "nacl box encryption version negotiation failed")
+ errVersionNegotiationFailed = reg(".errVersionNegotiationFailed", "encryption version negotiation failed")
+ nullCipher crypto.NullControlCipher
)
// privateData includes secret data we need for encryption.
@@ -71,7 +77,7 @@
var err error
versions, err = versions.Intersect(&version.Range{Min: 0, Max: rpcversion.RPCVersion5})
if err != nil {
- return nil, err
+ return nil, verror.New(stream.ErrNetwork, nil, err)
}
}
if versions.Max < rpcversion.RPCVersion6 {
@@ -81,26 +87,26 @@
// The client has not yet sent its public data. Construct it and send it.
pvt, pub, err := makeHopSetup(versions)
if err != nil {
- return nil, err
+ return nil, verror.New(stream.ErrSecurity, nil, err)
}
if err := message.WriteTo(writer, &pub, nullCipher); err != nil {
- return nil, err
+ return nil, verror.New(stream.ErrNetwork, nil, err)
}
// Read the server's public data.
pmsg, err := message.ReadFrom(reader, nullCipher)
if err != nil {
- return nil, err
+ return nil, verror.New(stream.ErrNetwork, nil, err)
}
ppub, ok := pmsg.(*message.HopSetup)
if !ok {
- return nil, errVersionNegotiationFailed
+ return nil, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
}
// Choose the max version in the intersection.
vrange, err := pub.Versions.Intersect(&ppub.Versions)
if err != nil {
- return nil, err
+ return nil, verror.New(stream.ErrNetwork, nil, err)
}
v := vrange.Max
if v < rpcversion.RPCVersion6 {
@@ -114,18 +120,18 @@
func authenticateAsClient(writer io.Writer, reader *iobuf.Reader, params security.CallParams, auth *vc.ServerAuthorizer,
pvt *privateData, pub, ppub *message.HopSetup, version rpcversion.RPCVersion) (crypto.ControlCipher, error) {
if version < rpcversion.RPCVersion6 {
- return nil, errUnsupportedEncryptVersion
+ return nil, verror.New(errUnsupportedEncryptVersion, nil, version, rpcversion.RPCVersion6)
}
pbox := ppub.NaclBox()
if pbox == nil {
- return nil, errVersionNegotiationFailed
+ return nil, verror.New(errNaclBoxVersionNegotiationFailed, nil)
}
c := crypto.NewControlCipherRPC6(&pbox.PublicKey, &pvt.naclBoxPrivateKey, false)
sconn := newSetupConn(writer, reader, c)
// TODO(jyh): act upon the authentication results.
_, _, _, err := vc.AuthenticateAsClient(sconn, crypto.NewNullCrypter(), params, auth, version)
if err != nil {
- return nil, fmt.Errorf("authentication failed: %v", err)
+ return nil, err
}
return c, nil
}
@@ -174,18 +180,18 @@
func authenticateAsServerRPC6(writer io.Writer, reader *iobuf.Reader, principal security.Principal, lBlessings security.Blessings, dc vc.DischargeClient,
pvt *privateData, pub, ppub *message.HopSetup, version rpcversion.RPCVersion) (crypto.ControlCipher, error) {
if version < rpcversion.RPCVersion6 {
- return nil, errUnsupportedEncryptVersion
+ return nil, verror.New(errUnsupportedEncryptVersion, nil, version, rpcversion.RPCVersion6)
}
box := ppub.NaclBox()
if box == nil {
- return nil, errVersionNegotiationFailed
+ return nil, verror.New(errNaclBoxVersionNegotiationFailed, nil)
}
c := crypto.NewControlCipherRPC6(&box.PublicKey, &pvt.naclBoxPrivateKey, true)
sconn := newSetupConn(writer, reader, c)
// TODO(jyh): act upon authentication results.
_, _, err := vc.AuthenticateAsServer(sconn, principal, lBlessings, dc, crypto.NewNullCrypter(), version)
if err != nil {
- return nil, fmt.Errorf("authentication failed: %v", err)
+ return nil, verror.New(errAuthFailed, nil, err)
}
return c, nil
}
diff --git a/profiles/internal/rpc/stream/vif/idletimer.go b/profiles/internal/rpc/stream/vif/idletimer.go
index bed551e..84f9c23 100644
--- a/profiles/internal/rpc/stream/vif/idletimer.go
+++ b/profiles/internal/rpc/stream/vif/idletimer.go
@@ -56,6 +56,7 @@
t.stopped = true
}
}
+ m.stopped = true
}
// Insert starts the idle timer for the given VC. If there is no active flows
diff --git a/profiles/internal/rpc/stream/vif/idletimer_test.go b/profiles/internal/rpc/stream/vif/idletimer_test.go
index 404bd08..1d9ce57 100644
--- a/profiles/internal/rpc/stream/vif/idletimer_test.go
+++ b/profiles/internal/rpc/stream/vif/idletimer_test.go
@@ -121,6 +121,9 @@
// Stop the timer. Should not be notified.
m.Stop()
+ if m.Insert(vc1, idleTime) {
+ t.Fatal("timer has been stopped, but can insert a vc")
+ }
if err := WaitForNotifications(notify, waitTime); err != nil {
t.Error(err)
}
diff --git a/profiles/internal/rpc/stream/vif/setup_conn.go b/profiles/internal/rpc/stream/vif/setup_conn.go
index 9287799..06d71a6 100644
--- a/profiles/internal/rpc/stream/vif/setup_conn.go
+++ b/profiles/internal/rpc/stream/vif/setup_conn.go
@@ -7,7 +7,10 @@
import (
"io"
+ "v.io/v23/verror"
+
"v.io/x/ref/profiles/internal/lib/iobuf"
+ "v.io/x/ref/profiles/internal/rpc/stream"
"v.io/x/ref/profiles/internal/rpc/stream/crypto"
"v.io/x/ref/profiles/internal/rpc/stream/message"
)
@@ -37,7 +40,7 @@
}
emsg, ok := msg.(*message.HopSetupStream)
if !ok {
- return 0, errVersionNegotiationFailed
+ return 0, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
}
s.rbuffer = emsg.Data
}
diff --git a/profiles/internal/rpc/stream/vif/vif.go b/profiles/internal/rpc/stream/vif/vif.go
index 670721d..dcd37b4 100644
--- a/profiles/internal/rpc/stream/vif/vif.go
+++ b/profiles/internal/rpc/stream/vif/vif.go
@@ -11,7 +11,6 @@
import (
"bytes"
- "errors"
"fmt"
"net"
"sort"
@@ -42,8 +41,31 @@
const pkgPath = "v.io/x/ref/profiles/internal/rpc/stream/vif"
+func reg(id, msg string) verror.IDAction {
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
var (
- errShuttingDown = verror.Register(pkgPath+".errShuttingDown", verror.NoRetry, "{1:}{2:} underlying network connection({3}) shutting down{:_}")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ errShuttingDown = reg(".errShuttingDown", "underlying network connection({3}) shutting down")
+ errVCHandshakeFailed = reg(".errVCHandshakeFailed", "VC handshake failed{:3}")
+ errSendOnExpressQFailed = reg(".errSendOnExpressQFailed", "vif.sendOnExpressQ(OpenVC) failed{:3}")
+ errVIFIsBeingClosed = reg(".errVIFIsBeingClosed", "VIF is being closed")
+ errVIFAlreadyAcceptingFlows = reg(".errVIFAlreadyAcceptingFlows", "already accepting flows on VIF {3}")
+ errVCsNotAcceptedOnVIF = reg(".errVCsNotAcceptedOnVIF", "VCs not accepted on VIF {3}")
+ errAcceptFailed = reg(".errAcceptFailed", "Accept failed{:3}")
+ errRemoteEndClosedVC = reg(".errRemoteEndClosedVC", "remote end closed VC{:3}")
+ errFlowsNoLongerAccepted = reg(".errFlowsNowLongerAccepted", "Flows no longer being accepted")
+ errVCAcceptFailed = reg(".errVCAcceptFailed", "VC accept failed{:3}")
+ errIdleTimeout = reg(".errIdleTimeout", "idle timeout")
+ errVIFAlreadySetup = reg(".errVIFAlreadySetupt", "VIF is already setup")
+ errBqueueWriterForXpress = reg(".errBqueueWriterForXpress", "failed to create bqueue.Writer for express messages{:3}")
+ errBqueueWriterForControl = reg(".errBqueueWriterForControl", "failed to create bqueue.Writer for flow control counters{:3}")
+ errBqueueWriterForStopping = reg(".errBqueueWriterForStopping", "failed to create bqueue.Writer for stopping the write loop{:3}")
+ errWriteFailed = reg(".errWriteFailed", "write failed: got ({3}, {4}) for {5} byte message)")
)
// VIF implements a "virtual interface" over an underlying network connection
@@ -64,6 +86,9 @@
ctrlCipher crypto.ControlCipher
writeMu sync.Mutex
+ muStartTimer sync.Mutex
+ startTimer timer
+
vcMap *vcMap
idleTimerMap *idleTimerMap
wpending, rpending vsync.WaitGroup
@@ -131,10 +156,6 @@
sharedFlowID = vc.SharedFlowID
)
-var (
- errAlreadySetup = errors.New("VIF is already setup")
-)
-
// InternalNewDialedVIF creates a new virtual interface over the provided
// network connection, under the assumption that the conn object was created
// using net.Dial. If onClose is given, it is run in its own goroutine when
@@ -164,10 +185,18 @@
return nil, err
}
var blessings security.Blessings
+
if principal != nil {
blessings = principal.BlessingStore().Default()
}
- return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs), versions, principal, blessings, onClose, nil, nil, c)
+ var startTimeout time.Duration
+ for _, o := range opts {
+ switch v := o.(type) {
+ case vc.StartTimeout:
+ startTimeout = v.Duration
+ }
+ }
+ return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs), versions, principal, blessings, startTimeout, onClose, nil, nil, c)
}
// InternalNewAcceptedVIF creates a new virtual interface over the provided
@@ -184,10 +213,17 @@
func InternalNewAcceptedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, blessings security.Blessings, versions *version.Range, onClose func(*VIF), lopts ...stream.ListenerOpt) (*VIF, error) {
pool := iobuf.NewPool(0)
reader := iobuf.NewReader(pool, conn)
- return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs)+1, versions, principal, blessings, onClose, upcqueue.New(), lopts, &crypto.NullControlCipher{})
+ var startTimeout time.Duration
+ for _, o := range lopts {
+ switch v := o.(type) {
+ case vc.StartTimeout:
+ startTimeout = v.Duration
+ }
+ }
+ return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs)+1, versions, principal, blessings, startTimeout, onClose, upcqueue.New(), lopts, &crypto.NullControlCipher{})
}
-func internalNew(conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, rid naming.RoutingID, initialVCI id.VC, versions *version.Range, principal security.Principal, blessings security.Blessings, onClose func(*VIF), acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher) (*VIF, error) {
+func internalNew(conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, rid naming.RoutingID, initialVCI id.VC, versions *version.Range, principal security.Principal, blessings security.Blessings, startTimeout time.Duration, onClose func(*VIF), acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher) (*VIF, error) {
var (
// Choose IDs that will not conflict with any other (VC, Flow)
// pairs. VCI 0 is never used by the application (it is
@@ -201,19 +237,19 @@
expressQ, err := outgoing.NewWriter(expressID, expressPriority, defaultBytesBufferedPerFlow)
if err != nil {
- return nil, fmt.Errorf("failed to create bqueue.Writer for express messages: %v", err)
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForXpress, nil, err))
}
expressQ.Release(-1) // Disable flow control
flowQ, err := outgoing.NewWriter(flowID, controlPriority, flowToken.Size())
if err != nil {
- return nil, fmt.Errorf("failed to create bqueue.Writer for flow control counters: %v", err)
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForControl, nil, err))
}
flowQ.Release(-1) // Disable flow control
stopQ, err := outgoing.NewWriter(stopID, stopPriority, 1)
if err != nil {
- return nil, fmt.Errorf("failed to create bqueue.Writer for stopping the write loop: %v", err)
+ return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForStopping, nil, err))
}
stopQ.Release(-1) // Disable flow control
@@ -238,10 +274,13 @@
msgCounters: make(map[string]int64),
blessings: blessings,
}
+ if startTimeout > 0 {
+ vif.startTimer = newTimer(startTimeout, vif.Close)
+ }
vif.idleTimerMap = newIdleTimerMap(func(vci id.VC) {
vc, _, _ := vif.vcMap.Find(vci)
if vc != nil {
- vif.closeVCAndSendMsg(vc, "idle timeout")
+ vif.closeVCAndSendMsg(vc, false, verror.New(errIdleTimeout, nil))
}
})
go vif.readLoop()
@@ -281,14 +320,13 @@
Counters: counters})
if err != nil {
vif.deleteVC(vc.VCI())
- err = fmt.Errorf("vif.sendOnExpressQ(OpenVC) failed: %v", err)
- vc.Close(err.Error())
+ err = verror.New(stream.ErrNetwork, nil, verror.New(errSendOnExpressQFailed, nil, err))
+ vc.Close(err)
return nil, err
}
if err := vc.HandshakeDialedVC(principal, opts...); err != nil {
vif.deleteVC(vc.VCI())
- err = fmt.Errorf("VC handshake failed: %v", err)
- vc.Close(err.Error())
+ vc.Close(err)
return nil, err
}
return vc, nil
@@ -343,7 +381,7 @@
// Stop the idle timers.
vif.idleTimerMap.Stop()
for _, vc := range vcs {
- vc.VC.Close("VIF is being closed")
+ vc.VC.Close(verror.New(stream.ErrNetwork, nil, verror.New(errVIFIsBeingClosed, nil)))
}
// Wait for the vcWriteLoops to exit (after draining queued up messages).
vif.stopQ.Close()
@@ -367,7 +405,7 @@
vif.muListen.Lock()
defer vif.muListen.Unlock()
if vif.acceptor != nil {
- return fmt.Errorf("already accepting Flows on VIF %v", vif)
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVIFIsBeingClosed, nil, vif))
}
vif.acceptor = upcqueue.New()
vif.listenerOpts = opts
@@ -409,11 +447,11 @@
acceptor := vif.acceptor
vif.muListen.Unlock()
if acceptor == nil {
- return ConnectorAndFlow{}, fmt.Errorf("VCs not accepted on VIF %v", vif)
+ return ConnectorAndFlow{}, verror.New(stream.ErrNetwork, nil, verror.New(errVCsNotAcceptedOnVIF, nil, vif))
}
item, err := acceptor.Get(nil)
if err != nil {
- return ConnectorAndFlow{}, fmt.Errorf("Accept failed: %v", err)
+ return ConnectorAndFlow{}, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
}
return item.(ConnectorAndFlow), nil
}
@@ -421,7 +459,7 @@
func (vif *VIF) String() string {
l := vif.conn.LocalAddr()
r := vif.conn.RemoteAddr()
- return fmt.Sprintf("(%s, %s) <-> (%s, %s)", r.Network(), r, l.Network(), l)
+ return fmt.Sprintf("(%s, %s) <-> (%s, %s)", l.Network(), l, r.Network(), r)
}
func (vif *VIF) readLoop() {
@@ -510,7 +548,7 @@
// to indicate a 'remote close' rather than a 'local one'. This helps
// with error reporting since we expect reads/writes to occur
// after a remote close, but not after a local close.
- vc.Close(fmt.Sprintf("remote end closed VC(%v)", m.Error))
+ vc.Close(verror.New(stream.ErrNetwork, nil, verror.New(errRemoteEndClosedVC, nil, m.Error)))
return nil
}
vlog.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
@@ -532,7 +570,7 @@
case *message.HopSetup:
// Configure the VIF. This takes over the conn during negotiation.
if vif.isSetup {
- return errAlreadySetup
+ return verror.New(stream.ErrNetwork, nil, verror.New(errVIFAlreadySetup, nil))
}
vif.muListen.Lock()
dischargeClient := getDischargeClient(vif.listenerOpts)
@@ -579,10 +617,16 @@
vif.rpending.Wait()
}
+func clientVCClosed(err error) bool {
+ // If we've encountered a networking error, then all likelihood the
+ // connection to the client is closed.
+ return verror.ErrorID(err) == stream.ErrNetwork.ID
+}
+
func (vif *VIF) acceptFlowsLoop(vc *vc.VC, c <-chan vc.HandshakeResult) {
hr := <-c
if hr.Error != nil {
- vif.closeVCAndSendMsg(vc, hr.Error.Error())
+ vif.closeVCAndSendMsg(vc, clientVCClosed(hr.Error), hr.Error)
return
}
@@ -590,13 +634,13 @@
acceptor := vif.acceptor
vif.muListen.Unlock()
if acceptor == nil {
- vif.closeVCAndSendMsg(vc, "Flows no longer being accepted")
+ vif.closeVCAndSendMsg(vc, false, verror.New(errFlowsNoLongerAccepted, nil))
return
}
// Notify any listeners that a new VC has been established
if err := acceptor.Put(ConnectorAndFlow{vc, nil}); err != nil {
- vif.closeVCAndSendMsg(vc, fmt.Sprintf("VC accept failed: %v", err))
+ vif.closeVCAndSendMsg(vc, clientVCClosed(err), verror.New(errVCAcceptFailed, nil, err))
return
}
@@ -766,7 +810,7 @@
return err
}
if n, err := vif.conn.Write(msg); err != nil {
- return fmt.Errorf("write failed: got (%d, %v) for %d byte message", n, err, len(msg))
+ return verror.New(stream.ErrNetwork, nil, verror.New(errWriteFailed, nil, n, err, len(msg)))
}
return nil
}
@@ -824,6 +868,12 @@
if err != nil {
return nil, err
}
+ vif.muStartTimer.Lock()
+ if vif.startTimer != nil {
+ vif.startTimer.Stop()
+ vif.startTimer = nil
+ }
+ vif.muStartTimer.Unlock()
// There may be a data race in accessing ctrlCipher when a new VC is created
// before authentication finishes in an accepted VIF. We lock it to avoid it.
//
@@ -842,6 +892,9 @@
Version: version,
})
added, rq, wq := vif.vcMap.Insert(vc)
+ if added {
+ vif.idleTimerMap.Insert(vc.VCI(), idleTimeout)
+ }
// Start vcWriteLoop
if added = added && vif.wpending.TryAdd(); added {
go vif.vcWriteLoop(vc, wq)
@@ -857,14 +910,10 @@
if wq != nil {
wq.Close()
}
- vif.vcMap.Delete(vci)
- vc.Close("underlying network connection shutting down")
- // We embed an error inside verror.ErrAborted because other layers
- // check for the "Aborted" error as a special case. Perhaps
- // eventually we'll get rid of the Aborted layer.
- return nil, verror.New(verror.ErrAborted, nil, verror.New(errShuttingDown, nil, vif))
+ vc.Close(verror.New(stream.ErrAborted, nil, verror.New(errShuttingDown, nil, vif)))
+ vif.deleteVC(vci)
+ return nil, verror.New(stream.ErrAborted, nil, verror.New(errShuttingDown, nil, vif))
}
- vif.idleTimerMap.Insert(vc.VCI(), idleTimeout)
return vc, nil
}
@@ -875,16 +924,18 @@
}
}
-func (vif *VIF) closeVCAndSendMsg(vc *vc.VC, msg string) {
- vlog.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, msg)
+func (vif *VIF) closeVCAndSendMsg(vc *vc.VC, clientVCClosed bool, errMsg error) {
+ vlog.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, errMsg)
vif.deleteVC(vc.VCI())
- vc.Close(msg)
- // HACK: Don't send CloseVC if it is a "failed new decoder" error because that means the
- // client already has closed its VC.
- // TODO(suharshs,ataly,ashankar): Find a better way to fix: https://github.com/veyron/release-issues/issues/1234.
- if strings.Contains(msg, "failed to create new decoder") {
+ vc.Close(errMsg)
+ if clientVCClosed {
+ // No point in sending to the client if the VC is closed, or otherwise broken.
return
}
+ msg := ""
+ if errMsg != nil {
+ msg = errMsg.Error()
+ }
if err := vif.sendOnExpressQ(&message.CloseVC{
VCI: vc.VCI(),
Error: msg,
@@ -910,7 +961,7 @@
for _, vc := range vcs {
if naming.Compare(vc.RemoteEndpoint().RoutingID(), remote.RoutingID()) {
vlog.VI(1).Infof("VCI %d on VIF %s being closed because of ShutdownVCs call", vc.VCI(), vif)
- vif.closeVCAndSendMsg(vc, "")
+ vif.closeVCAndSendMsg(vc, false, nil)
n++
}
}
diff --git a/profiles/internal/rpc/stream/vif/vif_test.go b/profiles/internal/rpc/stream/vif/vif_test.go
index a9d2bff..6e20202 100644
--- a/profiles/internal/rpc/stream/vif/vif_test.go
+++ b/profiles/internal/rpc/stream/vif/vif_test.go
@@ -269,7 +269,7 @@
notifyFuncS := func(vf *vif.VIF) { notifyS <- vf }
// Close the client VIF. Both client and server should be notified.
- client, server, err := New(nil, nil, notifyFuncC, notifyFuncS)
+ client, server, err := New(nil, nil, notifyFuncC, notifyFuncS, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -282,7 +282,7 @@
}
// Same as above, but close the server VIF at this time.
- client, server, err = New(nil, nil, notifyFuncC, notifyFuncS)
+ client, server, err = New(nil, nil, notifyFuncC, notifyFuncS, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -305,7 +305,7 @@
newVIF := func() (vf, remote *vif.VIF) {
var err error
- vf, remote, err = New(nil, nil, notifyFunc, notifyFunc)
+ vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -376,7 +376,65 @@
func TestCloseWhenEmpty(t *testing.T) { testCloseWhenEmpty(t, false) }
func TestCloseWhenEmptyServer(t *testing.T) { testCloseWhenEmpty(t, true) }
-func testCloseIdleVC(t *testing.T, testServer bool) {
+func testStartTimeout(t *testing.T, testServer bool) {
+ const (
+ startTime = 5 * time.Millisecond
+ // We use a long wait time here since it takes some time for the underlying network
+ // connection of the other side to be closed especially in race testing.
+ waitTime = 150 * time.Millisecond
+ )
+
+ notify := make(chan interface{})
+ notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+ newVIF := func() (vf, remote *vif.VIF, triggerTimers func()) {
+ triggerTimers = vif.SetFakeTimers()
+ var vfStartTime, remoteStartTime time.Duration = startTime, 0
+ if testServer {
+ vfStartTime, remoteStartTime = remoteStartTime, vfStartTime
+ }
+ var err error
+ vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, []stream.VCOpt{vc.StartTimeout{vfStartTime}}, []stream.ListenerOpt{vc.StartTimeout{remoteStartTime}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = vf.StartAccepting(); err != nil {
+ t.Fatal(err)
+ }
+ if testServer {
+ vf, remote = remote, vf
+ }
+ return
+ }
+
+ // No VC opened. Should be closed after the start timeout.
+ vf, remote, triggerTimers := newVIF()
+ triggerTimers()
+ if err := vif.WaitForNotifications(notify, waitTime, vf, remote); err != nil {
+ t.Error(err)
+ }
+
+ // Open one VC. Should not be closed.
+ vf, remote, triggerTimers = newVIF()
+ if _, _, err := createVC(vf, remote, makeEP(0x10)); err != nil {
+ t.Fatal(err)
+ }
+ triggerTimers()
+ if err := vif.WaitForNotifications(notify, waitTime); err != nil {
+ t.Error(err)
+ }
+
+ // Close the VC. Should be closed.
+ vf.ShutdownVCs(makeEP(0x10))
+ if err := vif.WaitForNotifications(notify, waitTime, vf, remote); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestStartTimeout(t *testing.T) { testStartTimeout(t, false) }
+func TestStartTimeoutServer(t *testing.T) { testStartTimeout(t, true) }
+
+func testIdleTimeout(t *testing.T, testServer bool) {
const (
idleTime = 10 * time.Millisecond
waitTime = idleTime * 2
@@ -387,7 +445,7 @@
newVIF := func() (vf, remote *vif.VIF) {
var err error
- if vf, remote, err = New(nil, nil, notifyFunc, notifyFunc); err != nil {
+ if vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, nil, nil); err != nil {
t.Fatal(err)
}
if err = vf.StartAccepting(); err != nil {
@@ -488,8 +546,8 @@
}
}
-func TestCloseIdleVC(t *testing.T) { testCloseIdleVC(t, false) }
-func TestCloseIdleVCServer(t *testing.T) { testCloseIdleVC(t, true) }
+func TestIdleTimeout(t *testing.T) { testIdleTimeout(t, false) }
+func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
func TestShutdownVCs(t *testing.T) {
client, server := NewClientServer()
@@ -710,7 +768,7 @@
func NewClientServer() (client, server *vif.VIF) {
var err error
- client, server, err = New(nil, nil, nil, nil)
+ client, server, err = New(nil, nil, nil, nil, nil, nil)
if err != nil {
panic(err)
}
@@ -718,15 +776,15 @@
}
func NewVersionedClientServer(clientVersions, serverVersions *iversion.Range) (client, server *vif.VIF, verr error) {
- return New(clientVersions, serverVersions, nil, nil)
+ return New(clientVersions, serverVersions, nil, nil, nil, nil)
}
-func New(clientVersions, serverVersions *iversion.Range, clientOnClose, serverOnClose func(*vif.VIF)) (client, server *vif.VIF, verr error) {
+func New(clientVersions, serverVersions *iversion.Range, clientOnClose, serverOnClose func(*vif.VIF), opts []stream.VCOpt, lopts []stream.ListenerOpt) (client, server *vif.VIF, verr error) {
c1, c2 := pipe()
var cerr error
cl := make(chan *vif.VIF)
go func() {
- c, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), testutil.NewPrincipal("client"), clientVersions, clientOnClose)
+ c, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), testutil.NewPrincipal("client"), clientVersions, clientOnClose, opts...)
if err != nil {
cerr = err
close(cl)
@@ -736,7 +794,7 @@
}()
pserver := testutil.NewPrincipal("server")
bserver := pserver.BlessingStore().Default()
- s, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, bserver, serverVersions, serverOnClose)
+ s, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, bserver, serverVersions, serverOnClose, lopts...)
c, ok := <-cl
if err != nil {
verr = err
diff --git a/profiles/internal/rpc/test/client_test.go b/profiles/internal/rpc/test/client_test.go
index 6db0f38..31de6ea 100644
--- a/profiles/internal/rpc/test/client_test.go
+++ b/profiles/internal/rpc/test/client_test.go
@@ -7,6 +7,7 @@
import (
"fmt"
"io"
+ "net"
"os"
"path/filepath"
"runtime"
@@ -20,12 +21,15 @@
"v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/security"
+ "v.io/v23/vdlroot/signature"
"v.io/v23/verror"
"v.io/x/ref/envvar"
_ "v.io/x/ref/profiles"
inaming "v.io/x/ref/profiles/internal/naming"
irpc "v.io/x/ref/profiles/internal/rpc"
+ "v.io/x/ref/profiles/internal/rpc/stream/message"
+ "v.io/x/ref/profiles/internal/testing/mocks/mocknet"
"v.io/x/ref/services/mounttable/mounttablelib"
"v.io/x/ref/test"
"v.io/x/ref/test/expect"
@@ -36,15 +40,23 @@
//go:generate v23 test generate .
func rootMT(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ seclevel := options.SecurityConfidential
+ if len(args) == 1 && args[0] == "nosec" {
+ seclevel = options.SecurityNone
+ }
+ return runRootMT(stdin, stdout, stderr, seclevel, env, args...)
+}
+
+func runRootMT(stdin io.Reader, stdout, stderr io.Writer, seclevel options.SecurityLevel, env map[string]string, args ...string) error {
ctx, shutdown := v23.Init()
defer shutdown()
lspec := v23.GetListenSpec(ctx)
- server, err := v23.NewServer(ctx, options.ServesMountTable(true))
+ server, err := v23.NewServer(ctx, options.ServesMountTable(true), seclevel)
if err != nil {
return fmt.Errorf("root failed: %v", err)
}
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
}
@@ -123,12 +135,8 @@
args = args[1:]
client := v23.GetClient(ctx)
for _, a := range args {
- h, err := client.StartCall(ctx, name, "Echo", []interface{}{a})
- if err != nil {
- return err
- }
var r string
- if err := h.Finish(&r); err != nil {
+ if err := client.Call(ctx, name, "Echo", []interface{}{a}, []interface{}{&r}); err != nil {
return err
}
fmt.Fprintf(stdout, r)
@@ -142,12 +150,12 @@
return ctx, shutdown
}
-func runMountTable(t *testing.T, ctx *context.T) (*modules.Shell, func()) {
+func runMountTable(t *testing.T, ctx *context.T, args ...string) (*modules.Shell, func()) {
sh, err := modules.NewShell(ctx, nil, testing.Verbose(), t)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
- root, err := sh.Start("rootMT", nil)
+ root, err := sh.Start("rootMT", nil, args...)
if err != nil {
t.Fatalf("unexpected error for root mt: %s", err)
}
@@ -254,14 +262,17 @@
}
}
-func logErrors(t *testing.T, logerr, logstack bool, err error) {
+func logErrors(t *testing.T, msg string, logerr, logstack, debugString bool, err error) {
_, file, line, _ := runtime.Caller(2)
loc := fmt.Sprintf("%s:%d", filepath.Base(file), line)
if logerr {
- t.Logf("%s: %v", loc, err)
+ t.Logf("%s: %s: %v", loc, msg, err)
}
if logstack {
- t.Logf("%s: %v", loc, verror.Stack(err).String())
+ t.Logf("%s: %s: %v", loc, msg, verror.Stack(err).String())
+ }
+ if debugString {
+ t.Logf("%s: %s: %v", loc, msg, verror.DebugString(err))
}
}
@@ -273,52 +284,51 @@
ns := v23.GetNamespace(ctx)
v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
- logErr := func(err error) {
- logErrors(t, true, false, err)
+ logErr := func(msg string, err error) {
+ logErrors(t, msg, true, false, false, err)
}
emptyCtx := &context.T{}
_, err := client.StartCall(emptyCtx, "noname", "nomethod", nil)
- logErr(err)
if verror.ErrorID(err) != verror.ErrBadArg.ID {
t.Fatalf("wrong error: %s", err)
}
+ logErr("no context", err)
p1 := options.ServerPublicKey{testutil.NewPrincipal().PublicKey()}
p2 := options.ServerPublicKey{testutil.NewPrincipal().PublicKey()}
_, err = client.StartCall(ctx, "noname", "nomethod", nil, p1, p2)
- logErr(err)
if verror.ErrorID(err) != verror.ErrBadArg.ID {
t.Fatalf("wrong error: %s", err)
}
+ logErr("too many public keys", err)
// This will fail with NoServers, but because there is no mount table
// to communicate with. The error message should include a
// 'connection refused' string.
ns.SetRoots("/127.0.0.1:8101")
_, err = client.StartCall(ctx, "noname", "nomethod", nil, options.NoRetry{})
- logErr(err)
if verror.ErrorID(err) != verror.ErrNoServers.ID {
t.Fatalf("wrong error: %s", err)
}
- if want := "connection refused"; !strings.Contains(err.Error(), want) {
+ if want := "connection refused"; !strings.Contains(verror.DebugString(err), want) {
t.Fatalf("wrong error: %s - doesn't contain %q", err, want)
}
+ logErr("no mount table", err)
// This will fail with NoServers, but because there really is no
// name registered with the mount table.
_, shutdown = runMountTable(t, ctx)
defer shutdown()
_, err = client.StartCall(ctx, "noname", "nomethod", nil, options.NoRetry{})
- logErr(err)
if verror.ErrorID(err) != verror.ErrNoServers.ID {
t.Fatalf("wrong error: %s", err)
}
roots := ns.Roots()
-
if unwanted := "connection refused"; strings.Contains(err.Error(), unwanted) {
t.Fatalf("wrong error: %s - does contain %q", err, unwanted)
}
+ logErr("no name registered", err)
// The following tests will fail with NoServers, but because there are
// no protocols that the client and servers (mount table, and "name") share.
@@ -332,20 +342,18 @@
// This will fail in its attempt to call ResolveStep to the mount table
// because we are using both the new context and the new client.
_, err = nclient.StartCall(nctx, "name", "nomethod", nil, options.NoRetry{})
- logErr(err)
if verror.ErrorID(err) != verror.ErrNoServers.ID {
t.Fatalf("wrong error: %s", err)
}
if want := "ResolveStep"; !strings.Contains(err.Error(), want) {
t.Fatalf("wrong error: %s - doesn't contain %q", err, want)
}
+ logErr("mismatched protocols", err)
// This will fail in its attempt to invoke the actual RPC because
// we are using the old context (which supplies the context for the calls
// to ResolveStep) and the new client.
_, err = nclient.StartCall(ctx, "name", "nomethod", nil, options.NoRetry{})
- logErr(err)
-
if verror.ErrorID(err) != verror.ErrNoServers.ID {
t.Fatalf("wrong error: %s", err)
}
@@ -356,6 +364,7 @@
t.Fatalf("wrong error: %s - does contain %q", err, unwanted)
}
+ logErr("mismatched protocols", err)
// The following two tests will fail due to a timeout.
ns.SetRoots("/203.0.113.10:8101")
@@ -372,7 +381,7 @@
if call != nil {
t.Fatalf("expected call to be nil")
}
- logErr(err)
+ logErr("timeout to mount table", err)
// This, second test, will fail due a timeout contacting the server itself.
ns.SetRoots(roots...)
@@ -389,7 +398,115 @@
if call != nil {
t.Fatalf("expected call to be nil")
}
- logErr(err)
+ logErr("timeout to server", err)
+}
+
+func dropDataDialer(network, address string, timeout time.Duration) (net.Conn, error) {
+ matcher := func(read bool, msg message.T) bool {
+ switch msg.(type) {
+ case *message.Data:
+ return true
+ }
+ return false
+ }
+ opts := mocknet.Opts{
+ Mode: mocknet.V23CloseAtMessage,
+ V23MessageMatcher: matcher,
+ }
+ return mocknet.DialerWithOpts(opts, network, address, timeout)
+}
+
+func TestStartCallBadProtocol(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ client := v23.GetClient(ctx)
+
+ ns := v23.GetNamespace(ctx)
+ ns.CacheCtl(naming.DisableCache(true))
+
+ logErr := func(msg string, err error) {
+ logErrors(t, msg, true, false, false, err)
+ }
+
+ rpc.RegisterProtocol("dropData", dropDataDialer, net.Listen)
+
+ // The following test will fail due to a broken connection.
+ // We need to run mount table and servers with no security to use
+ // the V23CloseAtMessage net.Conn mock.
+ _, shutdown = runMountTable(t, ctx, "nosec")
+ defer shutdown()
+
+ roots := ns.Roots()
+ brkRoot, err := mocknet.RewriteEndpointProtocol(roots[0], "dropData")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ns.SetRoots(brkRoot.Name())
+
+ nctx, _ := context.WithTimeout(ctx, 100*time.Millisecond)
+ call, err := client.StartCall(nctx, "name", "noname", nil, options.NoRetry{}, options.SecurityNone)
+ if verror.ErrorID(err) != verror.ErrBadProtocol.ID {
+ t.Fatalf("wrong error: %s", err)
+ }
+ if call != nil {
+ t.Fatalf("expected call to be nil")
+ }
+ logErr("broken connection", err)
+
+ // The following test will fail with because the client will set up
+ // a secure connection to a server that isn't expecting one.
+ name, fn := initServer(t, ctx, options.SecurityNone)
+ defer fn()
+
+ call, err = client.StartCall(nctx, name, "noname", nil, options.NoRetry{})
+ if verror.ErrorID(err) != verror.ErrNoServers.ID {
+ t.Fatalf("wrong error: %s", err)
+ }
+ if call != nil {
+ t.Fatalf("expected call to be nil")
+ }
+ logErr("insecure server", err)
+
+ // This is the inverse, secure server, insecure client
+ name, fn = initServer(t, ctx)
+ defer fn()
+
+ call, err = client.StartCall(nctx, name, "noname", nil, options.NoRetry{}, options.SecurityNone)
+ if verror.ErrorID(err) != verror.ErrBadProtocol.ID {
+ t.Fatalf("wrong error: %s", err)
+ }
+ if call != nil {
+ t.Fatalf("expected call to be nil")
+ }
+ logErr("insecure client", err)
+}
+
+func TestStartCallSecurity(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ client := v23.GetClient(ctx)
+
+ logErr := func(msg string, err error) {
+ logErrors(t, msg, true, false, false, err)
+ }
+
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ // Create a context with a new principal that doesn't match the server,
+ // so that the client will not trust the server.
+ ctx1, err := v23.SetPrincipal(ctx, testutil.NewPrincipal("test-blessing"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ call, err := client.StartCall(ctx1, name, "noname", nil, options.NoRetry{})
+ if verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+ t.Fatalf("wrong error: %s", err)
+ }
+ if call != nil {
+ t.Fatalf("expected call to be nil")
+ }
+ logErr("client does not trust server", err)
}
func childPing(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
@@ -398,20 +515,16 @@
v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
name := args[0]
- call, err := v23.GetClient(ctx).StartCall(ctx, name, "Ping", nil)
- if err != nil {
- fmt.Errorf("unexpected error: %s", err)
- }
got := ""
- if err := call.Finish(&got); err != nil {
+ if err := v23.GetClient(ctx).Call(ctx, name, "Ping", nil, []interface{}{&got}); err != nil {
fmt.Errorf("unexpected error: %s", err)
}
fmt.Fprintf(stdout, "RESULT=%s\n", got)
return nil
}
-func initServer(t *testing.T, ctx *context.T) (string, func()) {
- server, err := v23.NewServer(ctx)
+func initServer(t *testing.T, ctx *context.T, opts ...rpc.ServerOpt) (string, func()) {
+ server, err := v23.NewServer(ctx, opts...)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
@@ -447,13 +560,10 @@
name, fn := initServer(t, ctx)
defer fn()
ctx, _ = context.WithTimeout(ctx, time.Millisecond)
- call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
- if err != nil {
+ if err := v23.GetClient(ctx).Call(ctx, name, "Sleep", nil, nil); err != nil {
testForVerror(t, err, verror.ErrTimeout)
return
}
- err = call.Finish()
- testForVerror(t, err, verror.ErrTimeout)
}
func TestArgsAndResponses(t *testing.T) {
@@ -663,8 +773,7 @@
_, fn := runMountTable(t, ctx)
defer fn()
name := "noservers"
- ctx, _ = context.WithTimeout(ctx, 1000*time.Millisecond)
- call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil, options.NoRetry{})
if err != nil {
testForVerror(t, err, verror.ErrNoServers)
return
@@ -747,5 +856,138 @@
}
}
-// TODO(cnicolaou:) tests for:
-// -- Test for bad discharges error and correct invalidation, client.go:870..880
+func TestMethodErrors(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ clt := v23.GetClient(ctx)
+
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ logErr := func(msg string, err error) {
+ logErrors(t, msg, true, false, false, err)
+ }
+
+ // Unknown method
+ call, err := clt.StartCall(ctx, name, "NoMethod", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verr := call.Finish()
+ if verror.ErrorID(verr) != verror.ErrUnknownMethod.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ logErr("unknown method", verr)
+
+ // Unknown suffix
+ call, err = clt.StartCall(ctx, name+"/NoSuffix", "Ping", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ verr = call.Finish()
+ if verror.ErrorID(verr) != verror.ErrUnknownSuffix.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ logErr("unknown suffix", verr)
+
+ // Too many args.
+ call, err = clt.StartCall(ctx, name, "Ping", []interface{}{1, 2})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r1 := ""
+ verr = call.Finish(&r1)
+ if verror.ErrorID(verr) != verror.ErrBadProtocol.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ if got, want := verr.Error(), "wrong number of input arguments"; !strings.Contains(got, want) {
+ t.Fatalf("want %q to contain %q", got, want)
+ }
+ logErr("wrong # args", verr)
+
+ // Too many results.
+ call, err = clt.StartCall(ctx, name, "Ping", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r2 := ""
+ verr = call.Finish(&r1, &r2)
+ if verror.ErrorID(verr) != verror.ErrBadProtocol.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ if got, want := verr.Error(), "results, but want"; !strings.Contains(got, want) {
+ t.Fatalf("want %q to contain %q", got, want)
+ }
+ logErr("wrong # results", verr)
+
+ // Mismatched arg types
+ call, err = clt.StartCall(ctx, name, "Echo", []interface{}{1})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ verr = call.Finish(&r2)
+ if verror.ErrorID(verr) != verror.ErrBadProtocol.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ if got, want := verr.Error(), "aren't compatible"; !strings.Contains(got, want) {
+ t.Fatalf("want %q to contain %q", got, want)
+ }
+ logErr("wrong arg types", verr)
+
+ // Mismatched result types
+ call, err = clt.StartCall(ctx, name, "Ping", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r3 := 2
+ verr = call.Finish(&r3)
+ if verror.ErrorID(verr) != verror.ErrBadProtocol.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ if got, want := verr.Error(), "aren't compatible"; !strings.Contains(got, want) {
+ t.Fatalf("want %q to contain %q", got, want)
+ }
+ logErr("wrong result types", verr)
+}
+
+func TestReservedMethodErrors(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ clt := v23.GetClient(ctx)
+
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ logErr := func(msg string, err error) {
+ logErrors(t, msg, true, false, false, err)
+ }
+
+ // This call will fail because the __xx suffix is not supported by
+ // the dispatcher implementing Signature.
+ call, err := clt.StartCall(ctx, name+"/__xx", "__Signature", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sig := []signature.Interface{}
+ verr := call.Finish(&sig)
+ if verror.ErrorID(verr) != verror.ErrUnknownSuffix.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ logErr("unknown suffix", verr)
+
+ // This call will fail for the same reason, but with a different error,
+ // saying that MethodSignature is an unknown method.
+ call, err = clt.StartCall(ctx, name+"/__xx", "__MethodSignature", []interface{}{"dummy"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ verr = call.Finish(&sig)
+ if verror.ErrorID(verr) != verror.ErrUnknownMethod.ID {
+ t.Fatalf("wrong error: %s", verr)
+ }
+ logErr("unknown method", verr)
+}
diff --git a/profiles/internal/rpc/test/proxy_test.go b/profiles/internal/rpc/test/proxy_test.go
index 3fe5e72..ed47050 100644
--- a/profiles/internal/rpc/test/proxy_test.go
+++ b/profiles/internal/rpc/test/proxy_test.go
@@ -80,9 +80,7 @@
if expected == len(pubState) {
break
}
- fmt.Fprintf(stderr, "%s\n", pub.DebugString())
delay := time.Second
- fmt.Fprintf(stderr, "Sleeping: %s\n", delay)
time.Sleep(delay)
}
}
@@ -350,8 +348,15 @@
verifyMountMissing(t, ctx, ns, name)
status = server.Status()
- if len(status.Proxies) != 1 || status.Proxies[0].Proxy != spec.Proxy || verror.ErrorID(status.Proxies[0].Error) != verror.ErrNoServers.ID {
- t.Fatalf("proxy status is incorrect: %v", status.Proxies)
+ if got, want := len(status.Proxies), 1; got != want {
+ t.Logf("Proxies: %v", status.Proxies)
+ t.Fatalf("got %v, want %v", got, want)
+ }
+ if got, want := status.Proxies[0].Proxy, spec.Proxy; got != want {
+ t.Fatalf("got %v, want %v", got, want)
+ }
+ if got, want := verror.ErrorID(status.Proxies[0].Error), verror.ErrNoServers.ID; got != want {
+ t.Fatalf("got %v, want %v", got, want)
}
// Proxy restarts, calls should eventually start succeeding.
diff --git a/profiles/internal/rpc/test/simple_test.go b/profiles/internal/rpc/test/simple_test.go
index bc653f4..c83ec25 100644
--- a/profiles/internal/rpc/test/simple_test.go
+++ b/profiles/internal/rpc/test/simple_test.go
@@ -29,6 +29,10 @@
return "pong", nil
}
+func (s *simple) Echo(call rpc.ServerCall, arg string) (string, error) {
+ return arg, nil
+}
+
func (s *simple) Source(call rpc.StreamServerCall, start int) error {
i := start
backoff := 25 * time.Millisecond
diff --git a/profiles/internal/rpc/version/version.go b/profiles/internal/rpc/version/version.go
index 4ea452e..668784e 100644
--- a/profiles/internal/rpc/version/version.go
+++ b/profiles/internal/rpc/version/version.go
@@ -11,6 +11,7 @@
"v.io/v23/naming"
"v.io/v23/rpc/version"
+ "v.io/v23/verror"
)
// Range represents a range of RPC versions.
@@ -34,14 +35,26 @@
CheckCompatibility = SupportedRange.CheckCompatibility
)
+const pkgPath = "v.io/x/ref/profiles/internal/rpc/version"
+
+func reg(id, msg string) verror.IDAction {
+ return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
var (
- NoCompatibleVersionErr = fmt.Errorf("No compatible RPC version available")
- UnknownVersionErr = fmt.Errorf("There was not enough information to determine a version.")
+ // These errors are intended to be used as arguments to higher
+ // level errors and hence {1}{2} is omitted from their format
+ // strings to avoid repeating these n-times in the final error
+ // message visible to the user.
+ ErrNoCompatibleVersion = reg(".errNoCompatibleVersionErr", "No compatible RPC version available{:3} not in range {4}..{5}")
+ ErrUnknownVersion = reg(".errUnknownVersionErr", "There was not enough information to determine a version")
+ errInternalTypeConversionError = reg(".errInternalTypeConversionError", "failed to convert {3} to v.io/ref/profiles/internal/naming.Endpoint {3}")
)
// IsVersionError returns true if err is a versioning related error.
func IsVersionError(err error) bool {
- return err == NoCompatibleVersionErr || err == UnknownVersionErr
+ id := verror.ErrorID(err)
+ return id == ErrNoCompatibleVersion.ID || id == ErrUnknownVersion.ID
}
// Endpoint returns an endpoint with the Min/MaxRPCVersion properly filled in
@@ -77,9 +90,9 @@
}
if min == u || max == u {
- err = UnknownVersionErr
+ err = verror.New(ErrUnknownVersion, nil)
} else if min > max {
- err = NoCompatibleVersionErr
+ err = verror.New(ErrNoCompatibleVersion, nil, u, min, max)
}
return
}
@@ -102,7 +115,7 @@
func (r *Range) ProxiedEndpoint(rid naming.RoutingID, proxy naming.Endpoint) (*inaming.Endpoint, error) {
proxyEP, ok := proxy.(*inaming.Endpoint)
if !ok {
- return nil, fmt.Errorf("unrecognized naming.Endpoint type %T", proxy)
+ return nil, verror.New(errInternalTypeConversionError, nil, fmt.Sprintf("%T", proxy))
}
ep := &inaming.Endpoint{
@@ -129,11 +142,11 @@
func (r *Range) CommonVersion(a, b naming.Endpoint) (version.RPCVersion, error) {
aEP, ok := a.(*inaming.Endpoint)
if !ok {
- return 0, fmt.Errorf("Unrecognized naming.Endpoint type: %T", a)
+ return 0, verror.New(errInternalTypeConversionError, nil, fmt.Sprintf("%T", a))
}
bEP, ok := b.(*inaming.Endpoint)
if !ok {
- return 0, fmt.Errorf("Unrecognized naming.Endpoint type: %T", b)
+ return 0, verror.New(errInternalTypeConversionError, nil, fmt.Sprintf("%T", b))
}
_, max, err := intersectEndpoints(aEP, bEP)
@@ -144,7 +157,7 @@
// We want to use the maximum common version of the protocol. We just
// need to make sure that it is supported by this RPC implementation.
if max < r.Min || max > r.Max {
- return version.UnknownRPCVersion, NoCompatibleVersionErr
+ return version.UnknownRPCVersion, verror.New(ErrNoCompatibleVersion, nil, max, r.Min, r.Max)
}
return max, nil
}
@@ -154,7 +167,7 @@
func (r *Range) CheckCompatibility(remote naming.Endpoint) error {
remoteEP, ok := remote.(*inaming.Endpoint)
if !ok {
- return fmt.Errorf("Unrecognized naming.Endpoint type: %T", remote)
+ return verror.New(errInternalTypeConversionError, nil, fmt.Sprintf("%T", remote))
}
_, _, err := intersectRanges(r.Min, r.Max,
diff --git a/profiles/internal/rpc/version/version_test.go b/profiles/internal/rpc/version/version_test.go
index 7515a35..bc54048 100644
--- a/profiles/internal/rpc/version/version_test.go
+++ b/profiles/internal/rpc/version/version_test.go
@@ -11,6 +11,7 @@
"v.io/v23/naming"
"v.io/v23/rpc/version"
+ "v.io/v23/verror"
)
func TestCommonVersion(t *testing.T) {
@@ -20,17 +21,17 @@
localMin, localMax version.RPCVersion
remoteMin, remoteMax version.RPCVersion
expectedVer version.RPCVersion
- expectedErr error
+ expectedErr verror.IDAction
}
tests := []testCase{
- {0, 0, 0, 0, 0, UnknownVersionErr},
- {0, 1, 2, 3, 0, NoCompatibleVersionErr},
- {2, 3, 0, 1, 0, NoCompatibleVersionErr},
- {0, 5, 5, 6, 0, NoCompatibleVersionErr},
- {0, 2, 2, 4, 2, nil},
- {0, 2, 1, 3, 2, nil},
- {1, 3, 1, 3, 3, nil},
- {3, 3, 3, 3, 3, nil},
+ {0, 0, 0, 0, 0, ErrUnknownVersion},
+ {0, 1, 2, 3, 0, ErrNoCompatibleVersion},
+ {2, 3, 0, 1, 0, ErrNoCompatibleVersion},
+ {0, 5, 5, 6, 0, ErrNoCompatibleVersion},
+ {0, 2, 2, 4, 2, verror.ErrUnknown},
+ {0, 2, 1, 3, 2, verror.ErrUnknown},
+ {1, 3, 1, 3, 3, verror.ErrUnknown},
+ {3, 3, 3, 3, 3, verror.ErrUnknown},
}
for _, tc := range tests {
local := &inaming.Endpoint{
@@ -41,10 +42,14 @@
MinRPCVersion: tc.remoteMin,
MaxRPCVersion: tc.remoteMax,
}
- if ver, err := r.CommonVersion(local, remote); ver != tc.expectedVer || err != tc.expectedErr {
+ ver, err := r.CommonVersion(local, remote)
+ if ver != tc.expectedVer || (err != nil && verror.ErrorID(err) != tc.expectedErr.ID) {
t.Errorf("Unexpected result for local: %v, remote: %v. Got (%d, %v) wanted (%d, %v)",
local, remote, ver, err, tc.expectedVer, tc.expectedErr)
}
+ if err != nil {
+ t.Logf("%s", err)
+ }
}
}
@@ -93,15 +98,15 @@
type testCase struct {
supportMin, supportMax version.RPCVersion
remoteMin, remoteMax version.RPCVersion
- expectedError error
+ expectedError verror.IDAction
}
tests := []testCase{
- {0, 0, 0, 0, UnknownVersionErr},
- {5, 10, 1, 4, NoCompatibleVersionErr},
- {1, 4, 5, 10, NoCompatibleVersionErr},
- {1, 10, 2, 9, nil},
- {3, 8, 1, 4, nil},
- {3, 8, 7, 9, nil},
+ {0, 0, 0, 0, ErrUnknownVersion},
+ {5, 10, 1, 4, ErrNoCompatibleVersion},
+ {1, 4, 5, 10, ErrNoCompatibleVersion},
+ {1, 10, 2, 9, verror.ErrUnknown},
+ {3, 8, 1, 4, verror.ErrUnknown},
+ {3, 8, 7, 9, verror.ErrUnknown},
}
for _, tc := range tests {
@@ -110,9 +115,13 @@
MinRPCVersion: tc.remoteMin,
MaxRPCVersion: tc.remoteMax,
}
- if err := r.CheckCompatibility(remote); err != tc.expectedError {
+ err := r.CheckCompatibility(remote)
+ if err != nil && verror.ErrorID(err) != tc.expectedError.ID {
t.Errorf("Unexpected error for case %+v: got %v, wanted %v",
tc, err, tc.expectedError)
}
+ if err != nil {
+ t.Logf("%s", err)
+ }
}
}
diff --git a/profiles/internal/rt/mgmt.go b/profiles/internal/rt/mgmt.go
index 13412c1..8893016 100644
--- a/profiles/internal/rt/mgmt.go
+++ b/profiles/internal/rt/mgmt.go
@@ -98,9 +98,5 @@
func (rt *Runtime) callbackToParent(ctx *context.T, parentName, myName string) error {
ctx, _ = context.WithTimeout(ctx, time.Minute)
- call, err := rt.GetClient(ctx).StartCall(ctx, parentName, "Set", []interface{}{mgmt.AppCycleManagerConfigKey, myName})
- if err != nil {
- return err
- }
- return call.Finish()
+ return rt.GetClient(ctx).Call(ctx, parentName, "Set", []interface{}{mgmt.AppCycleManagerConfigKey, myName}, nil)
}
diff --git a/profiles/internal/testing/mocks/mocknet/mocknet.go b/profiles/internal/testing/mocks/mocknet/mocknet.go
index 192def4..0b73e4b 100644
--- a/profiles/internal/testing/mocks/mocknet/mocknet.go
+++ b/profiles/internal/testing/mocks/mocknet/mocknet.go
@@ -331,7 +331,11 @@
}
func RewriteEndpointProtocol(ep string, protocol string) (naming.Endpoint, error) {
- n, err := v23.NewEndpoint(ep)
+ addr := ep
+ if naming.Rooted(ep) {
+ addr, _ = naming.SplitAddressName(ep)
+ }
+ n, err := v23.NewEndpoint(addr)
if err != nil {
return nil, err
}
diff --git a/profiles/internal/vtrace/vtrace_test.go b/profiles/internal/vtrace/vtrace_test.go
index a1a73f6..fc9873b 100644
--- a/profiles/internal/vtrace/vtrace_test.go
+++ b/profiles/internal/vtrace/vtrace_test.go
@@ -49,7 +49,7 @@
if err != nil {
t.Fatalf("Could not listen for mt %v", err)
}
- disp, err := mounttablelib.NewMountTableDispatcher("")
+ disp, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
t.Fatalf("Could not create mt dispatcher %v", err)
}
diff --git a/services/GO.PACKAGE b/services/GO.PACKAGE
index 34ef35b..d1f6537 100644
--- a/services/GO.PACKAGE
+++ b/services/GO.PACKAGE
@@ -1,6 +1,7 @@
{
"dependencies": {
"incoming": [
+ {"allow": "v.io/x/browser/runner/...", "comment":"temporarily allowing dependency from v.io/x/runner"},
{"allow": "v.io/x/ref/services/..."},
{"allow": "v.io/x/ref/test/modules", "comment":"temporarily allowing dependency from lib"},
{"allow": "v.io/x/ref/test/v23tests", "comment":"temporarily allowing dependency from lib"},
diff --git a/services/debug/debug/debug_v23_test.go b/services/debug/debug/debug_v23_test.go
index f644acc..5b0f047 100644
--- a/services/debug/debug/debug_v23_test.go
+++ b/services/debug/debug/debug_v23_test.go
@@ -100,8 +100,8 @@
file := createTestLogFile(i, testLogData)
logName := filepath.Base(file.Name())
runCount := 12
- for i := 0; i < runCount; i++ {
- binary.Start("logs", "read", "__debug/logs/"+logName).WaitOrDie(nil, nil)
+ for c := 0; c < runCount; c++ {
+ binary.Start("logs", "read", "__debug/logs/"+logName).WaitOrDie(os.Stderr, os.Stderr)
}
got := binary.Start("stats", "read", "__debug/stats/rpc/server/routing-id/*/methods/ReadLog/latency-ms").Output()
diff --git a/services/device/device/devicemanager_mock_test.go b/services/device/device/devicemanager_mock_test.go
index 6bed75f..9065755 100644
--- a/services/device/device/devicemanager_mock_test.go
+++ b/services/device/device/devicemanager_mock_test.go
@@ -11,6 +11,7 @@
"os"
"path/filepath"
"testing"
+ "time"
"v.io/v23"
"v.io/v23/context"
@@ -225,12 +226,12 @@
}
type StopStimulus struct {
- fun string
- timeDelta uint32
+ fun string
+ delta time.Duration
}
-func (mni *mockDeviceInvoker) Stop(_ rpc.ServerCall, timeDelta uint32) error {
- return mni.simpleCore(StopStimulus{"Stop", timeDelta}, "Stop")
+func (mni *mockDeviceInvoker) Stop(_ rpc.ServerCall, delta time.Duration) error {
+ return mni.simpleCore(StopStimulus{"Stop", delta}, "Stop")
}
func (mni *mockDeviceInvoker) Suspend(_ rpc.ServerCall) error {
diff --git a/services/device/device/instance_impl.go b/services/device/device/instance_impl.go
index e6c3289..058dff8 100644
--- a/services/device/device/instance_impl.go
+++ b/services/device/device/instance_impl.go
@@ -8,6 +8,7 @@
import (
"fmt"
+ "time"
"v.io/v23/services/device"
"v.io/x/lib/cmdline"
@@ -29,7 +30,7 @@
}
appName := args[0]
- if err := device.ApplicationClient(appName).Stop(gctx, 5); err != nil {
+ if err := device.ApplicationClient(appName).Stop(gctx, 5*time.Second); err != nil {
return fmt.Errorf("Stop failed: %v", err)
}
fmt.Fprintf(cmd.Stdout(), "Stop succeeded\n")
diff --git a/services/device/device/instance_impl_test.go b/services/device/device/instance_impl_test.go
index d06dcbf..46d1dba 100644
--- a/services/device/device/instance_impl_test.go
+++ b/services/device/device/instance_impl_test.go
@@ -9,6 +9,7 @@
"reflect"
"strings"
"testing"
+ "time"
"v.io/v23/naming"
"v.io/v23/verror"
@@ -66,7 +67,7 @@
t.Fatalf("Unexpected output from list. Got %q, expected %q", got, expected)
}
expected := []interface{}{
- StopStimulus{"Stop", 5},
+ StopStimulus{"Stop", 5 * time.Second},
}
if got := tape.Play(); !reflect.DeepEqual(expected, got) {
t.Errorf("invalid call sequence. Got %v, want %v", got, expected)
diff --git a/services/device/internal/impl/app_service.go b/services/device/internal/impl/app_service.go
index 31a10f0..d5c9a5a 100644
--- a/services/device/internal/impl/app_service.go
+++ b/services/device/internal/impl/app_service.go
@@ -668,6 +668,7 @@
// initializeSubAccessLists updates the provided acl for instance-specific ACLs
func (i *appService) initializeSubAccessLists(instanceDir string, blessings []string, acl access.Permissions) error {
for _, b := range blessings {
+ b = b + string(security.ChainSeparator) + string(security.NoExtension)
for _, tag := range access.AllTypicalTags() {
acl.Add(security.BlessingPattern(b), string(tag))
}
@@ -1029,7 +1030,7 @@
// TODO(caprita): implement deadline for Stop.
-func (i *appService) Stop(call rpc.ServerCall, deadline uint32) error {
+func (i *appService) Stop(call rpc.ServerCall, deadline time.Duration) error {
instanceDir, err := i.instanceDir()
if err != nil {
return err
diff --git a/services/device/internal/impl/device_service.go b/services/device/internal/impl/device_service.go
index 18eba41..efb5b2e 100644
--- a/services/device/internal/impl/device_service.go
+++ b/services/device/internal/impl/device_service.go
@@ -50,6 +50,7 @@
"strconv"
"strings"
"sync"
+ "time"
"v.io/v23"
"v.io/v23/context"
@@ -255,7 +256,7 @@
return len(result.Profiles) > 0, nil
}
-func (*deviceService) Reset(call rpc.ServerCall, deadline uint64) error {
+func (*deviceService) Reset(call rpc.ServerCall, deadline time.Duration) error {
// TODO(jsimsa): Implement.
return nil
}
@@ -586,7 +587,7 @@
return verror.New(ErrInvalidSuffix, call.Context())
}
-func (*deviceService) Stop(call rpc.ServerCall, _ uint32) error {
+func (*deviceService) Stop(call rpc.ServerCall, _ time.Duration) error {
v23.GetAppCycle(call.Context()).Stop()
return nil
}
diff --git a/services/device/internal/impl/impl_test.go b/services/device/internal/impl/impl_test.go
index fd22cce..326ab3e 100644
--- a/services/device/internal/impl/impl_test.go
+++ b/services/device/internal/impl/impl_test.go
@@ -400,6 +400,7 @@
dmh := servicetest.RunCommand(t, sh, dmPauseBeforeStopEnv, deviceManagerCmd, dmArgs...)
defer func() {
syscall.Kill(dmh.Pid(), syscall.SIGINT)
+ verifyNoRunningProcesses(t)
}()
servicetest.ReadPID(t, dmh)
@@ -854,6 +855,7 @@
startAppExpectError(t, ctx, appID, impl.ErrInvalidOperation.ID)
// Cleanly shut down the device manager.
+ defer verifyNoRunningProcesses(t)
syscall.Kill(dmh.Pid(), syscall.SIGINT)
dmh.Expect("dm terminated")
dmh.ExpectEOF()
@@ -1006,6 +1008,7 @@
dmh := servicetest.RunCommand(t, sh, nil, deviceManagerCmd, "dm", root, "unused_helper", "unused_app_repo_name", "unused_curr_link")
pid := servicetest.ReadPID(t, dmh)
defer syscall.Kill(pid, syscall.SIGINT)
+ defer verifyNoRunningProcesses(t)
// Create an envelope for an app.
*envelope = envelopeFromShell(sh, nil, appCmd, "google naps")
@@ -1172,6 +1175,7 @@
// Start an instance of the app.
instance1ID := startApp(t, ctx, appID)
+ defer stopApp(t, ctx, appID, instance1ID)
// Wait until the app pings us that it's ready.
select {
@@ -1287,6 +1291,7 @@
dmh := servicetest.RunCommand(t, sh, nil, deviceManagerCmd, "dm", root, helperPath, "unused_app_repo_name", "unused_curr_link")
pid := servicetest.ReadPID(t, dmh)
defer syscall.Kill(pid, syscall.SIGINT)
+ defer verifyNoRunningProcesses(t)
// Create the local server that the app uses to let us know it's ready.
pingCh, cleanup := setupPingServer(t, ctx)
@@ -1323,7 +1328,8 @@
appID := installApp(t, ctx, packages)
// Start an instance of the app.
- startApp(t, ctx, appID)
+ instance1ID := startApp(t, ctx, appID)
+ defer stopApp(t, ctx, appID, instance1ID)
// Wait until the app pings us that it's ready.
select {
@@ -1404,6 +1410,7 @@
dmh := servicetest.RunCommand(t, sh, nil, deviceManagerCmd, "dm", root, "unused_helper", "unused_app_repo_name", "unused_curr_link")
pid := servicetest.ReadPID(t, dmh)
defer syscall.Kill(pid, syscall.SIGINT)
+ defer verifyNoRunningProcesses(t)
deviceStub := device.DeviceClient("dm/device")
// Attempt to list associations on the device manager without having
@@ -1503,6 +1510,7 @@
dmh := servicetest.RunCommand(t, sh, nil, deviceManagerCmd, "-mocksetuid", "dm", root, helperPath, "unused_app_repo_name", "unused_curr_link")
pid := servicetest.ReadPID(t, dmh)
defer syscall.Kill(pid, syscall.SIGINT)
+ defer verifyNoRunningProcesses(t)
// Claim the devicemanager with selfCtx as root/self/alice
claimDevice(t, selfCtx, "dm", "alice", noPairingToken)
@@ -1519,6 +1527,19 @@
// Install and start the app as root/self.
appID := installApp(t, selfCtx)
+ vlog.VI(2).Infof("Validate that the created app has the right permission lists.")
+ acl, _, err := appStub(appID).GetPermissions(selfCtx)
+ if err != nil {
+ t.Fatalf("GetPermissions on appID: %v failed %v", appID, err)
+ }
+ expected := make(access.Permissions)
+ for _, tag := range access.AllTypicalTags() {
+ expected[string(tag)] = access.AccessList{In: []security.BlessingPattern{"root/self/$"}}
+ }
+ if got, want := acl.Normalize(), expected.Normalize(); !reflect.DeepEqual(got, want) {
+ t.Errorf("got %#v, expected %#v", got, want)
+ }
+
// Start an instance of the app but this time it should fail: we do not
// have an associated uname for the invoking identity.
startAppExpectError(t, selfCtx, appID, verror.ErrNoAccess.ID)
@@ -1570,6 +1591,21 @@
vlog.VI(2).Infof("other attempting to run an app with access. Should succeed.")
instance2ID := startApp(t, otherCtx, appID)
verifyPingArgs(t, pingCh, testUserName, "flag-val-envelope", "env-var") // Wait until the app pings us that it's ready.
+
+ vlog.VI(2).Infof("Validate that created instance has the right permissions.")
+ expected = make(access.Permissions)
+ for _, tag := range access.AllTypicalTags() {
+ expected[string(tag)] = access.AccessList{In: []security.BlessingPattern{"root/other/$"}}
+ }
+ acl, _, err = appStub(appID, instance2ID).GetPermissions(selfCtx)
+ if err != nil {
+ t.Fatalf("GetPermissions on instance %v/%v failed: %v", appID, instance2ID, err)
+ }
+ if got, want := acl.Normalize(), expected.Normalize(); !reflect.DeepEqual(got, want) {
+ t.Errorf("got %#v, expected %#v ", got, want)
+ }
+
+ // Shutdown the app.
suspendApp(t, otherCtx, appID, instance2ID)
vlog.VI(2).Infof("Verify that Resume with the same systemName works.")
diff --git a/services/device/internal/impl/instance_reaping.go b/services/device/internal/impl/instance_reaping.go
index 2c7da8c..f7e9672 100644
--- a/services/device/internal/impl/instance_reaping.go
+++ b/services/device/internal/impl/instance_reaping.go
@@ -30,8 +30,14 @@
type reaper chan pidInstanceDirPair
+var stashedPidMap map[string]int
+
func newReaper(ctx *context.T, root string) (reaper, error) {
pidMap, err := findAllTheInstances(ctx, root)
+
+ // Used only by the testing code that verifies that all processes
+ // have been shutdown.
+ stashedPidMap = pidMap
if err != nil {
return nil, err
}
diff --git a/services/device/internal/impl/instance_reaping_test.go b/services/device/internal/impl/instance_reaping_test.go
index bac5e15..d50d3cc 100644
--- a/services/device/internal/impl/instance_reaping_test.go
+++ b/services/device/internal/impl/instance_reaping_test.go
@@ -78,6 +78,7 @@
// TODO(rjkroege): Exercise the polling loop code.
// Cleanly shut down the device manager.
+ verifyNoRunningProcesses(t)
syscall.Kill(dmh.Pid(), syscall.SIGINT)
dmh.Expect("dm terminated")
dmh.ExpectEOF()
@@ -198,6 +199,7 @@
// TODO(rjkroege): Should be in a defer to ensure that the device
// manager is cleaned up even if the test fails in an exceptional way.
+ verifyNoRunningProcesses(t)
syscall.Kill(dmh.Pid(), syscall.SIGINT)
dmh.Expect("dm terminated")
dmh.ExpectEOF()
diff --git a/services/device/internal/impl/only_for_test.go b/services/device/internal/impl/only_for_test.go
index 23bcc37..583d545 100644
--- a/services/device/internal/impl/only_for_test.go
+++ b/services/device/internal/impl/only_for_test.go
@@ -68,3 +68,9 @@
func WrapBaseCleanupDir(path, helper string) {
baseCleanupDir(path, helper)
}
+
+// RunningChildrenProcesses uses the reaper to verify that a test has
+// successfully shut down all processes.
+func RunningChildrenProcesses() bool {
+ return len(stashedPidMap) > 0
+}
diff --git a/services/device/internal/impl/util_test.go b/services/device/internal/impl/util_test.go
index 3cc4314..3a99927 100644
--- a/services/device/internal/impl/util_test.go
+++ b/services/device/internal/impl/util_test.go
@@ -613,3 +613,9 @@
}
}
+
+func verifyNoRunningProcesses(t *testing.T) {
+ if impl.RunningChildrenProcesses() {
+ t.Errorf("device manager incorrectly terminating with child processes still running")
+ }
+}
diff --git a/services/device/internal/starter/starter.go b/services/device/internal/starter/starter.go
index d0e814e..62d5744 100644
--- a/services/device/internal/starter/starter.go
+++ b/services/device/internal/starter/starter.go
@@ -304,7 +304,7 @@
}
func startMounttable(ctx *context.T, n NamespaceArgs) (string, func(), error) {
- mtName, stopMT, err := mounttablelib.StartServers(ctx, n.ListenSpec, n.Name, n.Neighborhood, n.AccessListFile)
+ mtName, stopMT, err := mounttablelib.StartServers(ctx, n.ListenSpec, n.Name, n.Neighborhood, n.AccessListFile, "mounttable")
if err != nil {
vlog.Errorf("mounttablelib.StartServers(%#v) failed: %v", n, err)
} else {
diff --git a/services/internal/servicetest/modules.go b/services/internal/servicetest/modules.go
index 9883ebc..5d43db5 100644
--- a/services/internal/servicetest/modules.go
+++ b/services/internal/servicetest/modules.go
@@ -47,7 +47,7 @@
if err != nil {
return fmt.Errorf("root failed: %v", err)
}
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
}
diff --git a/services/mounttable/mounttabled/mounttable.go b/services/mounttable/mounttabled/mounttable.go
index 7dc5759..d417726 100644
--- a/services/mounttable/mounttabled/mounttable.go
+++ b/services/mounttable/mounttabled/mounttable.go
@@ -28,7 +28,7 @@
ctx, shutdown := v23.Init()
defer shutdown()
- name, stop, err := mounttablelib.StartServers(ctx, v23.GetListenSpec(ctx), *mountName, *nhName, *aclFile)
+ name, stop, err := mounttablelib.StartServers(ctx, v23.GetListenSpec(ctx), *mountName, *nhName, *aclFile, "mounttable")
if err != nil {
vlog.Errorf("mounttablelib.StartServers failed: %v", err)
os.Exit(1)
diff --git a/services/mounttable/mounttablelib/mounttable.go b/services/mounttable/mounttablelib/mounttable.go
index 2d8a28a..8bd6e21 100644
--- a/services/mounttable/mounttablelib/mounttable.go
+++ b/services/mounttable/mounttablelib/mounttable.go
@@ -13,8 +13,6 @@
"sync"
"time"
- "v.io/x/ref/lib/glob"
-
"v.io/v23"
"v.io/v23/context"
"v.io/v23/naming"
@@ -23,7 +21,11 @@
"v.io/v23/security/access"
"v.io/v23/services/mounttable"
"v.io/v23/verror"
+
"v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/glob"
+ "v.io/x/ref/lib/stats"
)
const pkgPath = "v.io/x/ref/services/mounttable/mounttablelib"
@@ -51,8 +53,9 @@
// mountTable represents a namespace. One exists per server instance.
type mountTable struct {
- root *node
- superUsers access.AccessList
+ root *node
+ superUsers access.AccessList
+ nodeCounter *stats.Integer
}
var _ rpc.Dispatcher = (*mountTable)(nil)
@@ -92,17 +95,52 @@
// aclfile is a JSON-encoded mapping from paths in the mounttable to the
// access.Permissions for that path. The tags used in the map are the typical
// access tags (the Tag type defined in v.io/v23/security/access).
-func NewMountTableDispatcher(aclfile string) (rpc.Dispatcher, error) {
+//
+// statsPrefix is the prefix for for exported statistics objects.
+func NewMountTableDispatcher(aclfile, statsPrefix string) (rpc.Dispatcher, error) {
mt := &mountTable{
- root: new(node),
+ root: new(node),
+ nodeCounter: stats.NewInteger(naming.Join(statsPrefix, "num-nodes")),
}
- mt.root.parent = new(node) // just for its lock
+ mt.root.parent = mt.newNode() // just for its lock
if err := mt.parseAccessLists(aclfile); err != nil && !os.IsNotExist(err) {
return nil, err
}
return mt, nil
}
+// newNode creates a new node, and updates the number of nodes.
+func (mt *mountTable) newNode() *node {
+ mt.nodeCounter.Incr(1)
+ return new(node)
+}
+
+// deleteNode deletes a node and all its children, and updates the number of
+// nodes.
+func (mt *mountTable) deleteNode(parent *node, child string) {
+ // Assumes that parent and parent[child] are locked.
+
+ // Walk the tree and count the number of nodes deleted.
+ n := parent.children[child]
+ if n == nil {
+ return
+ }
+ count := int64(0)
+ queue := []*node{n}
+ for len(queue) > 0 {
+ count++
+ n := queue[0]
+ queue = queue[1:]
+ for _, ch := range n.children {
+ ch.Lock() // Keep locked until it is deleted.
+ queue = append(queue, ch)
+ }
+ }
+
+ mt.nodeCounter.Incr(-count)
+ delete(parent.children, child)
+}
+
func (mt *mountTable) parseAccessLists(path string) error {
vlog.VI(2).Infof("parseAccessLists(%s)", path)
if path == "" {
@@ -306,7 +344,7 @@
}
}
// At this point cur is still locked, OK to use and change it.
- next := new(node)
+ next := mt.newNode()
next.parent = cur
if cur.amTemplate != nil {
next.acls = createTAMGFromTemplate(cur.amTemplate, e)
@@ -368,7 +406,7 @@
return nil, nil, err
}
if !n.mount.isActive() {
- removed := n.removeUseless()
+ removed := n.removeUseless(mt)
n.parent.Unlock()
n.Unlock()
// If we removed the node, see if we can remove any of its
@@ -498,13 +536,13 @@
// removeUseless removes a node and all of its ascendants that are not useful.
//
// We assume both n and n.parent are locked.
-func (n *node) removeUseless() bool {
+func (n *node) removeUseless(mt *mountTable) bool {
if len(n.children) > 0 || n.mount.isActive() || n.explicitAccessLists {
return false
}
for k, c := range n.parent.children {
if c == n {
- delete(n.parent.children, k)
+ mt.deleteNode(n.parent, k)
break
}
}
@@ -523,7 +561,7 @@
n.Unlock()
break
}
- removed := n.removeUseless()
+ removed := n.removeUseless(mt)
n.parent.Unlock()
n.Unlock()
if !removed {
@@ -549,7 +587,7 @@
} else if n.mount != nil && n.mount.servers.remove(server) == 0 {
n.mount = nil
}
- removed := n.removeUseless()
+ removed := n.removeUseless(mt)
n.parent.Unlock()
n.Unlock()
if removed {
@@ -581,7 +619,7 @@
if !deleteSubTree && len(n.children) > 0 {
return verror.New(errNotEmpty, call.Context(), ms.name)
}
- delete(n.parent.children, ms.elems[len(ms.elems)-1])
+ mt.deleteNode(n.parent, ms.elems[len(ms.elems)-1])
return nil
}
@@ -597,7 +635,7 @@
// If this is a mount point, we're done.
if m := n.mount; m != nil {
- removed := n.removeUseless()
+ removed := n.removeUseless(mt)
if removed {
n.parent.Unlock()
n.Unlock()
@@ -623,32 +661,37 @@
}
if !pattern.Finished() {
- // Recurse through the children. OK if client has read access to the
- // directory or has traverse access to the directory and any access to the child.
- allAllowed := true
+ // We can only list children to whom we have some access AND either
+ // - we have Read or Admin access to the directory or
+ // - we have Resolve or Create access to the directory and the
+ // next element in the pattern is a fixed string.
if err := n.satisfies(mt, call, globTags); err != nil {
- allAllowed = false
if err := n.satisfies(mt, call, traverseTags); err != nil {
- n.parent.Unlock()
- n.Unlock()
- return
+ goto out
+ }
+ fixed, _ := pattern.SplitFixedPrefix()
+ if len(fixed) == 0 {
+ goto out
}
}
+
+ // Since we will be unlocking the node,
+ // we need to grab the list of children before any unlocking.
children := make(map[string]*node, len(n.children))
for k, c := range n.children {
children[k] = c
}
n.parent.Unlock()
+
+ // Recurse through the children.
for k, c := range children {
// At this point, n lock is held.
if ok, _, suffix := pattern.MatchInitialSegment(k); ok {
c.Lock()
- if !allAllowed {
- // If child allows any access show it. Otherwise, skip.
- if err := c.satisfies(mt, call, allTags); err != nil {
- c.Unlock()
- continue
- }
+ // If child allows any access show it. Otherwise, skip.
+ if err := c.satisfies(mt, call, allTags); err != nil {
+ c.Unlock()
+ continue
}
mt.globStep(c, naming.Join(name, k), suffix, call, ch)
n.Lock()
@@ -661,8 +704,9 @@
n.Lock()
}
+out:
// Remove if no longer useful.
- if n.removeUseless() || pattern.Len() != 0 {
+ if n.removeUseless(mt) || pattern.Len() != 0 {
n.parent.Unlock()
n.Unlock()
return
diff --git a/services/mounttable/mounttablelib/mounttable_test.go b/services/mounttable/mounttablelib/mounttable_test.go
index 300e997..b3a9594 100644
--- a/services/mounttable/mounttablelib/mounttable_test.go
+++ b/services/mounttable/mounttablelib/mounttable_test.go
@@ -6,6 +6,7 @@
import (
"errors"
+ "fmt"
"io"
"reflect"
"runtime/debug"
@@ -20,9 +21,11 @@
"v.io/v23/rpc"
"v.io/v23/security"
"v.io/v23/security/access"
+ "v.io/v23/services/stats"
+ "v.io/v23/vdl"
"v.io/x/lib/vlog"
- _ "v.io/x/ref/profiles"
+ "v.io/x/ref/services/debug/debuglib"
"v.io/x/ref/test"
"v.io/x/ref/test/testutil"
)
@@ -39,14 +42,7 @@
func doMount(t *testing.T, ctx *context.T, ep, suffix, service string, shouldSucceed bool) {
name := naming.JoinAddressName(ep, suffix)
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "Mount", []interface{}{service, uint32(ttlSecs), 0}, options.NoResolve{})
- if err != nil {
- if !shouldSucceed {
- return
- }
- boom(t, "Failed to Mount %s onto %s: %s", service, name, err)
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, name, "Mount", []interface{}{service, uint32(ttlSecs), 0}, nil, options.NoResolve{}); err != nil {
if !shouldSucceed {
return
}
@@ -57,14 +53,7 @@
func doUnmount(t *testing.T, ctx *context.T, ep, suffix, service string, shouldSucceed bool) {
name := naming.JoinAddressName(ep, suffix)
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "Unmount", []interface{}{service}, options.NoResolve{})
- if err != nil {
- if !shouldSucceed {
- return
- }
- boom(t, "Failed to Mount %s onto %s: %s", service, name, err)
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, name, "Unmount", []interface{}{service}, nil, options.NoResolve{}); err != nil {
if !shouldSucceed {
return
}
@@ -75,14 +64,7 @@
func doGetPermissions(t *testing.T, ctx *context.T, ep, suffix string, shouldSucceed bool) (acl access.Permissions, version string) {
name := naming.JoinAddressName(ep, suffix)
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "GetPermissions", nil, options.NoResolve{})
- if err != nil {
- if !shouldSucceed {
- return
- }
- boom(t, "Failed to GetPermissions %s: %s", name, err)
- }
- if err := call.Finish(&acl, &version); err != nil {
+ if err := client.Call(ctx, name, "GetPermissions", nil, []interface{}{&acl, &version}, options.NoResolve{}); err != nil {
if !shouldSucceed {
return
}
@@ -94,14 +76,7 @@
func doSetPermissions(t *testing.T, ctx *context.T, ep, suffix string, acl access.Permissions, version string, shouldSucceed bool) {
name := naming.JoinAddressName(ep, suffix)
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "SetPermissions", []interface{}{acl, version}, options.NoResolve{})
- if err != nil {
- if !shouldSucceed {
- return
- }
- boom(t, "Failed to SetPermissions %s: %s", name, err)
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, name, "SetPermissions", []interface{}{acl, version}, nil, options.NoResolve{}); err != nil {
if !shouldSucceed {
return
}
@@ -112,14 +87,7 @@
func doDeleteNode(t *testing.T, ctx *context.T, ep, suffix string, shouldSucceed bool) {
name := naming.JoinAddressName(ep, suffix)
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "Delete", []interface{}{false}, options.NoResolve{})
- if err != nil {
- if !shouldSucceed {
- return
- }
- boom(t, "Failed to Delete node %s: %s", name, err)
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, name, "Delete", []interface{}{false}, nil, options.NoResolve{}); err != nil {
if !shouldSucceed {
return
}
@@ -130,14 +98,7 @@
func doDeleteSubtree(t *testing.T, ctx *context.T, ep, suffix string, shouldSucceed bool) {
name := naming.JoinAddressName(ep, suffix)
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "Delete", []interface{}{true}, options.NoResolve{})
- if err != nil {
- if !shouldSucceed {
- return
- }
- boom(t, "Failed to Delete subtree %s: %s", name, err)
- }
- if err := call.Finish(); err != nil {
+ if err := client.Call(ctx, name, "Delete", []interface{}{true}, nil, options.NoResolve{}); err != nil {
if !shouldSucceed {
return
}
@@ -159,13 +120,9 @@
func resolve(ctx *context.T, name string) (*naming.MountEntry, error) {
// Resolve the name one level.
- client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, name, "ResolveStep", nil, options.NoResolve{})
- if err != nil {
- return nil, err
- }
var entry naming.MountEntry
- if err := call.Finish(&entry); err != nil {
+ client := v23.GetClient(ctx)
+ if err := client.Call(ctx, name, "ResolveStep", nil, []interface{}{&entry}, options.NoResolve{}); err != nil {
return nil, err
}
if len(entry.Servers) < 1 {
@@ -182,12 +139,8 @@
}
// Export the value.
client := v23.GetClient(ctx)
- call, err := client.StartCall(ctx, mountentry2names(resolved)[0], "Export", []interface{}{contents, true}, options.NoResolve{})
- if err != nil {
- boom(t, "Failed to Export.StartCall %s to %s: %s", name, contents, err)
- }
- if err := call.Finish(); err != nil {
- boom(t, "Failed to Export.StartCall %s to %s: %s", name, contents, err)
+ if err := client.Call(ctx, mountentry2names(resolved)[0], "Export", []interface{}{contents, true}, nil, options.NoResolve{}); err != nil {
+ boom(t, "Failed to Export.Call %s to %s: %s", name, contents, err)
}
}
@@ -225,17 +178,19 @@
}
func newMT(t *testing.T, acl string, rootCtx *context.T) (rpc.Server, string) {
- server, err := v23.NewServer(rootCtx, options.ServesMountTable(true))
+ reservedDisp := debuglib.NewDispatcher(vlog.Log.LogDir, nil)
+ ctx := v23.SetReservedNameDispatcher(rootCtx, reservedDisp)
+ server, err := v23.NewServer(ctx, options.ServesMountTable(true))
if err != nil {
boom(t, "r.NewServer: %s", err)
}
// Add mount table service.
- mt, err := NewMountTableDispatcher(acl)
+ mt, err := NewMountTableDispatcher(acl, "mounttable")
if err != nil {
boom(t, "NewMountTableDispatcher: %v", err)
}
// Start serving on a loopback address.
- eps, err := server.Listen(v23.GetListenSpec(rootCtx))
+ eps, err := server.Listen(v23.GetListenSpec(ctx))
if err != nil {
boom(t, "Failed to Listen mount table: %s", err)
}
@@ -616,16 +571,74 @@
}
func TestBadAccessLists(t *testing.T) {
- _, err := NewMountTableDispatcher("testdata/invalid.acl")
+ _, err := NewMountTableDispatcher("testdata/invalid.acl", "mounttable")
if err == nil {
boom(t, "Expected json parse error in acl file")
}
- _, err = NewMountTableDispatcher("testdata/doesntexist.acl")
+ _, err = NewMountTableDispatcher("testdata/doesntexist.acl", "mounttable")
if err != nil {
boom(t, "Missing acl file should not cause an error")
}
}
+func nodeCount(t *testing.T, ctx *context.T, addr string) int64 {
+ st := stats.StatsClient(naming.JoinAddressName(addr, "__debug/stats/mounttable/num-nodes"))
+ v, err := st.Value(ctx)
+ if err != nil {
+ t.Fatalf("Failed to get mounttable/num-nodes: %v", err)
+ return -1
+ }
+ var value int64
+ if err := vdl.Convert(&value, v); err != nil {
+ t.Fatalf("Unexpected value type for mounttable/num-nodes: %v", err)
+ }
+ return value
+}
+
+func TestNodeCounter(t *testing.T) {
+ rootCtx, shutdown := test.InitForTest()
+ defer shutdown()
+
+ server, estr := newMT(t, "", rootCtx)
+ defer server.Stop()
+
+ // Test flat tree
+ for i := 1; i <= 10; i++ {
+ name := fmt.Sprintf("node%d", i)
+ addr := naming.JoinAddressName(estr, name)
+ doMount(t, rootCtx, estr, name, addr, true)
+ if expected, got := int64(i+1), nodeCount(t, rootCtx, estr); got != expected {
+ t.Errorf("Unexpected number of nodes. Got %d, expected %d", got, expected)
+ }
+ }
+ for i := 1; i <= 10; i++ {
+ name := fmt.Sprintf("node%d", i)
+ if i%2 == 0 {
+ doUnmount(t, rootCtx, estr, name, "", true)
+ } else {
+ doDeleteSubtree(t, rootCtx, estr, name, true)
+ }
+ if expected, got := int64(11-i), nodeCount(t, rootCtx, estr); got != expected {
+ t.Errorf("Unexpected number of nodes. Got %d, expected %d", got, expected)
+ }
+ }
+
+ // Test deep tree
+ doMount(t, rootCtx, estr, "1/2/3/4/5/6/7/8/9a/10", naming.JoinAddressName(estr, ""), true)
+ doMount(t, rootCtx, estr, "1/2/3/4/5/6/7/8/9b/11", naming.JoinAddressName(estr, ""), true)
+ if expected, got := int64(13), nodeCount(t, rootCtx, estr); got != expected {
+ t.Errorf("Unexpected number of nodes. Got %d, expected %d", got, expected)
+ }
+ doDeleteSubtree(t, rootCtx, estr, "1/2/3/4/5", true)
+ if expected, got := int64(5), nodeCount(t, rootCtx, estr); got != expected {
+ t.Errorf("Unexpected number of nodes. Got %d, expected %d", got, expected)
+ }
+ doDeleteSubtree(t, rootCtx, estr, "1", true)
+ if expected, got := int64(1), nodeCount(t, rootCtx, estr); got != expected {
+ t.Errorf("Unexpected number of nodes. Got %d, expected %d", got, expected)
+ }
+}
+
func initTest() (rootCtx *context.T, aliceCtx *context.T, bobCtx *context.T, shutdown v23.Shutdown) {
test.Init()
ctx, shutdown := test.InitForTest()
diff --git a/services/mounttable/mounttablelib/servers.go b/services/mounttable/mounttablelib/servers.go
index 77f358c..d63bfea 100644
--- a/services/mounttable/mounttablelib/servers.go
+++ b/services/mounttable/mounttablelib/servers.go
@@ -15,7 +15,7 @@
"v.io/x/lib/vlog"
)
-func StartServers(ctx *context.T, listenSpec rpc.ListenSpec, mountName, nhName, aclFile string) (string, func(), error) {
+func StartServers(ctx *context.T, listenSpec rpc.ListenSpec, mountName, nhName, aclFile, debugPrefix string) (string, func(), error) {
var stopFuncs []func() error
stop := func() {
for i := len(stopFuncs) - 1; i >= 0; i-- {
@@ -29,7 +29,7 @@
return "", nil, err
}
stopFuncs = append(stopFuncs, mtServer.Stop)
- mt, err := NewMountTableDispatcher(aclFile)
+ mt, err := NewMountTableDispatcher(aclFile, debugPrefix)
if err != nil {
vlog.Errorf("NewMountTable failed: %v", err)
stop()
diff --git a/services/role/roled/internal/config.vdl b/services/role/roled/internal/config.vdl
index 734ca95..e940903 100644
--- a/services/role/roled/internal/config.vdl
+++ b/services/role/roled/internal/config.vdl
@@ -28,4 +28,8 @@
// string representation of a time.Duration, e.g. "24h". An empty string
// indicates that the role blessing will not expire.
Expiry string
+ // The blessings issued for this role will only be valid for
+ // communicating with peers that match at least one of these patterns.
+ // If the list is empty, all peers are allowed.
+ Peers []security.BlessingPattern
}
diff --git a/services/role/roled/internal/config.vdl.go b/services/role/roled/internal/config.vdl.go
index 0928eb7..7e23661 100644
--- a/services/role/roled/internal/config.vdl.go
+++ b/services/role/roled/internal/config.vdl.go
@@ -37,6 +37,10 @@
// string representation of a time.Duration, e.g. "24h". An empty string
// indicates that the role blessing will not expire.
Expiry string
+ // The blessings issued for this role will only be valid for
+ // communicating with peers that match at least one of these patterns.
+ // If the list is empty, all peers are allowed.
+ Peers []security.BlessingPattern
}
func (Config) __VDLReflect(struct {
diff --git a/services/role/roled/internal/role.go b/services/role/roled/internal/role.go
index ee578d4..ad1ea41 100644
--- a/services/role/roled/internal/role.go
+++ b/services/role/roled/internal/role.go
@@ -85,18 +85,26 @@
}
func caveats(ctx *context.T, config *Config) ([]security.Caveat, error) {
- if config.Expiry == "" {
- return nil, nil
+ var caveats []security.Caveat
+ if config.Expiry != "" {
+ d, err := time.ParseDuration(config.Expiry)
+ if err != nil {
+ return nil, verror.Convert(verror.ErrInternal, ctx, err)
+ }
+ expiry, err := security.ExpiryCaveat(time.Now().Add(d))
+ if err != nil {
+ return nil, verror.Convert(verror.ErrInternal, ctx, err)
+ }
+ caveats = append(caveats, expiry)
}
- d, err := time.ParseDuration(config.Expiry)
- if err != nil {
- return nil, verror.Convert(verror.ErrInternal, ctx, err)
+ if len(config.Peers) != 0 {
+ peer, err := security.NewCaveat(security.PeerBlessingsCaveat, config.Peers)
+ if err != nil {
+ return nil, verror.Convert(verror.ErrInternal, ctx, err)
+ }
+ caveats = append(caveats, peer)
}
- expiry, err := security.ExpiryCaveat(time.Now().Add(d))
- if err != nil {
- return nil, verror.Convert(verror.ErrInternal, ctx, err)
- }
- return []security.Caveat{expiry}, nil
+ return caveats, nil
}
func createBlessings(ctx *context.T, config *Config, principal security.Principal, extensions []string, caveats []security.Caveat, dischargerLocation string) (security.Blessings, error) {
diff --git a/services/role/roled/internal/role_test.go b/services/role/roled/internal/role_test.go
index 483279f..022549e 100644
--- a/services/role/roled/internal/role_test.go
+++ b/services/role/roled/internal/role_test.go
@@ -82,7 +82,7 @@
errID verror.ID
blessings []string
}{
- {user1, "", verror.ErrNoExist.ID, nil},
+ {user1, "", verror.ErrUnknownMethod.ID, nil},
{user1, "unknown", verror.ErrNoAccess.ID, nil},
{user2, "unknown", verror.ErrNoAccess.ID, nil},
{user3, "unknown", verror.ErrNoAccess.ID, nil},
@@ -109,16 +109,91 @@
if verror.ErrorID(err) != tc.errID {
t.Errorf("unexpected error ID for (%q, %q). Got %#v, expected %#v", user, tc.role, verror.ErrorID(err), tc.errID)
}
- if err == nil {
- previousBlessings, _ := v23.GetPrincipal(tc.ctx).BlessingStore().Set(blessings, security.AllPrincipals)
- blessingNames, rejected := callTest(t, tc.ctx, testAddr)
- if !reflect.DeepEqual(blessingNames, tc.blessings) {
- t.Errorf("unexpected blessings for (%q, %q). Got %q, expected %q", user, tc.role, blessingNames, tc.blessings)
- }
- if len(rejected) != 0 {
- t.Errorf("unexpected rejected blessings for (%q, %q): %q", user, tc.role, rejected)
- }
- v23.GetPrincipal(tc.ctx).BlessingStore().Set(previousBlessings, security.AllPrincipals)
+ if err != nil {
+ continue
+ }
+ previousBlessings, _ := v23.GetPrincipal(tc.ctx).BlessingStore().Set(blessings, security.AllPrincipals)
+ blessingNames, rejected := callTest(t, tc.ctx, testAddr)
+ if !reflect.DeepEqual(blessingNames, tc.blessings) {
+ t.Errorf("unexpected blessings for (%q, %q). Got %q, expected %q", user, tc.role, blessingNames, tc.blessings)
+ }
+ if len(rejected) != 0 {
+ t.Errorf("unexpected rejected blessings for (%q, %q): %q", user, tc.role, rejected)
+ }
+ v23.GetPrincipal(tc.ctx).BlessingStore().Set(previousBlessings, security.AllPrincipals)
+ }
+}
+
+func TestPeerBlessingCaveats(t *testing.T) {
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ workdir, err := ioutil.TempDir("", "test-role-server-")
+ if err != nil {
+ t.Fatal("ioutil.TempDir failed: %v", err)
+ }
+ defer os.RemoveAll(workdir)
+
+ roleConf := irole.Config{
+ Members: []security.BlessingPattern{"root/users/user/_role"},
+ Peers: []security.BlessingPattern{
+ security.BlessingPattern("root/peer1"),
+ security.BlessingPattern("root/peer3"),
+ },
+ }
+ irole.WriteConfig(t, roleConf, filepath.Join(workdir, "role.conf"))
+
+ var (
+ root = testutil.NewIDProvider("root")
+ user = newPrincipalContext(t, ctx, root, "users/user/_role")
+ peer1 = newPrincipalContext(t, ctx, root, "peer1")
+ peer2 = newPrincipalContext(t, ctx, root, "peer2")
+ peer3 = newPrincipalContext(t, ctx, root, "peer3")
+ )
+
+ roleAddr := newRoleServer(t, newPrincipalContext(t, ctx, root, "roles"), workdir)
+
+ tDisp := &testDispatcher{}
+ server1, testPeer1 := newServer(t, peer1)
+ if err := server1.ServeDispatcher("", tDisp); err != nil {
+ t.Fatalf("server.ServeDispatcher failed: %v", err)
+ }
+ server2, testPeer2 := newServer(t, peer2)
+ if err := server2.ServeDispatcher("", tDisp); err != nil {
+ t.Fatalf("server.ServeDispatcher failed: %v", err)
+ }
+ server3, testPeer3 := newServer(t, peer3)
+ if err := server3.ServeDispatcher("", tDisp); err != nil {
+ t.Fatalf("server.ServeDispatcher failed: %v", err)
+ }
+
+ c := role.RoleClient(naming.Join(roleAddr, "role"))
+ blessings, err := c.SeekBlessings(user)
+ if err != nil {
+ t.Errorf("unexpected erro:", err)
+ }
+ v23.GetPrincipal(user).BlessingStore().Set(blessings, security.AllPrincipals)
+
+ testcases := []struct {
+ peer string
+ blessingNames []string
+ rejectedNames []string
+ }{
+ {testPeer1, []string{"root/roles/role"}, nil},
+ {testPeer2, nil, []string{"root/roles/role"}},
+ {testPeer3, []string{"root/roles/role"}, nil},
+ }
+ for i, tc := range testcases {
+ blessingNames, rejected := callTest(t, user, tc.peer)
+ var rejectedNames []string
+ for _, r := range rejected {
+ rejectedNames = append(rejectedNames, r.Blessing)
+ }
+ if !reflect.DeepEqual(blessingNames, tc.blessingNames) {
+ t.Errorf("Unexpected blessing names for #%d. Got %q, expected %q", i, blessingNames, tc.blessingNames)
+ }
+ if !reflect.DeepEqual(rejectedNames, tc.rejectedNames) {
+ t.Errorf("Unexpected rejected names for #%d. Got %q, expected %q", i, rejectedNames, tc.rejectedNames)
}
}
}
diff --git a/services/wspr/internal/app/app.go b/services/wspr/internal/app/app.go
index 6c8b51c..2eb6db3 100644
--- a/services/wspr/internal/app/app.go
+++ b/services/wspr/internal/app/app.go
@@ -44,7 +44,7 @@
noResults = verror.Register(pkgPath+".noResults", verror.NoRetry, "{1} {2} no results from call {_}")
badCaveatType = verror.Register(pkgPath+".badCaveatType", verror.NoRetry, "{1} {2} bad caveat type {_}")
unknownBlessings = verror.Register(pkgPath+".unknownBlessings", verror.NoRetry, "{1} {2} unknown public id {_}")
- invalidBlessingsHandle = verror.Register(pkgPath+".invalidBlessingsHandle", verror.NoRetry, "{1} {2} invalid blessings handle {_}")
+ invalidBlessingsHandle = verror.Register(pkgPath+".invalidBlessingsHandle", verror.NoRetry, "{1} {2} invalid blessings handle {3} {_}")
)
type outstandingRequest struct {
@@ -78,7 +78,7 @@
outstandingRequests map[int32]*outstandingRequest
// Maps flowids to the server that owns them.
- flowMap map[int32]*server.Server
+ flowMap map[int32]interface{}
// A manager that Handles fetching and caching signature of remote services
signatureManager lib.SignatureManager
@@ -157,6 +157,9 @@
w.Error(err) // Send streaming error as is
return
}
+ if blessings, ok := item.(security.Blessings); ok {
+ item = principal.ConvertBlessingsToHandle(blessings, c.blessingsCache.GetOrAddHandle(blessings))
+ }
vomItem, err := lib.VomEncode(item)
if err != nil {
w.Error(verror.New(marshallingError, ctx, item, err))
@@ -171,6 +174,7 @@
}
}
results := make([]*vdl.Value, msg.NumOutArgs)
+ wireBlessingsType := vdl.TypeOf(security.WireBlessings{})
// This array will have pointers to the values in results.
resultptrs := make([]interface{}, msg.NumOutArgs)
for i := range results {
@@ -181,6 +185,16 @@
w.Error(err)
return
}
+ for i, val := range results {
+ if val.Type() == wireBlessingsType {
+ var blessings security.Blessings
+ if err := vdl.Convert(&blessings, val); err != nil {
+ w.Error(err)
+ return
+ }
+ results[i] = vdl.ValueOf(principal.ConvertBlessingsToHandle(blessings, c.blessingsCache.GetOrAddHandle(blessings)))
+ }
+ }
c.sendRPCResponse(ctx, w, span, results)
}
@@ -210,6 +224,8 @@
callOpts = append(callOpts, options.AllowedServersPolicy(v.Value))
case RpcCallOptionRetryTimeout:
callOpts = append(callOpts, options.RetryTimeout(v.Value))
+ case RpcCallOptionUseGranter:
+ callOpts = append(callOpts, &jsGranter{c, v.Value})
default:
return nil, fmt.Errorf("Unknown RpcCallOption type %T", v)
}
@@ -236,13 +252,13 @@
// CreateNewFlow creats a new server flow that will be used to write out
// streaming messages to Javascript.
-func (c *Controller) CreateNewFlow(s *server.Server, stream rpc.Stream) *server.Flow {
+func (c *Controller) CreateNewFlow(s interface{}, stream rpc.Stream) *server.Flow {
c.Lock()
defer c.Unlock()
id := c.lastGeneratedId
c.lastGeneratedId += 2
c.flowMap[id] = s
- os := newStream()
+ os := newStream(c.blessingsCache)
os.init(stream)
c.outstandingRequests[id] = &outstandingRequest{
stream: os,
@@ -250,7 +266,7 @@
return &server.Flow{ID: id, Writer: c.writerCreator(id)}
}
-// CleanupFlow removes the bookkeping for a previously created flow.
+// CleanupFlow removes the bookkeeping for a previously created flow.
func (c *Controller) CleanupFlow(id int32) {
c.Lock()
request := c.outstandingRequests[id]
@@ -268,12 +284,17 @@
return c.ctx
}
-// AddBlessings adds the Blessings to the local blessings store and returns
-// the handle to it. This function exists because JS only has
-// a handle to the blessings to avoid shipping the certificate forest
-// to JS and back.
-func (c *Controller) AddBlessings(blessings security.Blessings) principal.BlessingsHandle {
- return c.blessingsCache.Add(blessings)
+// GetOrAddBlessingsHandle adds the Blessings to the local blessings store if they
+// don't already existand returns the handle to it. This function exists
+// because JS only has a handle to the blessings to avoid shipping the
+// certificate forest to JS and back.
+func (c *Controller) GetOrAddBlessingsHandle(blessings security.Blessings) principal.BlessingsHandle {
+ return c.blessingsCache.GetOrAddHandle(blessings)
+}
+
+// GetBlessings gets blessings for a given blessings handle.
+func (c *Controller) GetBlessings(handle principal.BlessingsHandle) security.Blessings {
+ return c.blessingsCache.GetBlessings(handle)
}
// Cleanup cleans up any outstanding rpcs.
@@ -308,7 +329,7 @@
func (c *Controller) setup() {
c.signatureManager = lib.NewSignatureManager()
c.outstandingRequests = make(map[int32]*outstandingRequest)
- c.flowMap = make(map[int32]*server.Server)
+ c.flowMap = make(map[int32]interface{})
c.servers = make(map[uint32]*server.Server)
}
@@ -346,6 +367,11 @@
return
}
+ for i, arg := range inArgs {
+ if jsBlessings, ok := arg.(principal.JsBlessings); ok {
+ inArgs[i] = c.blessingsCache.GetBlessings(jsBlessings.Handle)
+ }
+ }
// We have to make the start call synchronous so we can make sure that we populate
// the call map before we can Handle a recieve call.
call, err := c.startCall(ctx, w, msg, inArgs)
@@ -451,9 +477,9 @@
// requests.
func (c *Controller) HandleCaveatValidationResponse(id int32, data string) {
c.Lock()
- server := c.flowMap[id]
+ server, ok := c.flowMap[id].(*server.Server)
c.Unlock()
- if server == nil {
+ if !ok {
vlog.Errorf("unexpected result from JavaScript. No server found matching id %d.", id)
return // ignore unknown server
}
@@ -464,7 +490,7 @@
func (c *Controller) HandleVeyronRequest(ctx *context.T, id int32, data string, w lib.ClientWriter) {
binbytes, err := hex.DecodeString(data)
if err != nil {
- w.Error(verror.Convert(verror.ErrInternal, ctx, err))
+ w.Error(verror.Convert(verror.ErrInternal, ctx, fmt.Errorf("Error decoding hex string %q: %v", data, err)))
return
}
decoder, err := vom.NewDecoder(bytes.NewReader(binbytes))
@@ -519,7 +545,7 @@
// to put the outstanding stream in the map before we make the async call so that
// the future send know which queue to write to, even if the client call isn't
// actually ready yet.
- request.stream = newStream()
+ request.stream = newStream(c.blessingsCache)
}
c.Lock()
c.outstandingRequests[id] = request
@@ -566,9 +592,9 @@
// run by the Javascript server.
func (c *Controller) HandleLookupResponse(id int32, data string) {
c.Lock()
- server := c.flowMap[id]
+ server, ok := c.flowMap[id].(*server.Server)
c.Unlock()
- if server == nil {
+ if !ok {
vlog.Errorf("unexpected result from JavaScript. No channel "+
"for MessageId: %d exists. Ignoring the results.", id)
//Ignore unknown responses that don't belong to any channel
@@ -581,9 +607,9 @@
// run by the Javascript server.
func (c *Controller) HandleAuthResponse(id int32, data string) {
c.Lock()
- server := c.flowMap[id]
+ server, ok := c.flowMap[id].(*server.Server)
c.Unlock()
- if server == nil {
+ if !ok {
vlog.Errorf("unexpected result from JavaScript. No channel "+
"for MessageId: %d exists. Ignoring the results.", id)
//Ignore unknown responses that don't belong to any channel
@@ -610,8 +636,8 @@
// given javascript server.
func (c *Controller) Stop(_ rpc.ServerCall, serverId uint32) error {
c.Lock()
- server := c.servers[serverId]
- if server == nil {
+ server, ok := c.servers[serverId]
+ if !ok {
c.Unlock()
return nil
}
@@ -654,9 +680,9 @@
// by filling the corresponding channel with the result from JavaScript.
func (c *Controller) HandleServerResponse(id int32, data string) {
c.Lock()
- server := c.flowMap[id]
+ server, ok := c.flowMap[id].(*server.Server)
c.Unlock()
- if server == nil {
+ if !ok {
vlog.Errorf("unexpected result from JavaScript. No channel "+
"for MessageId: %d exists. Ignoring the results.", id)
//Ignore unknown responses that don't belong to any channel
@@ -687,8 +713,7 @@
// UnlinkBlessings removes the given blessings from the blessings store.
func (c *Controller) UnlinkBlessings(_ rpc.ServerCall, handle principal.BlessingsHandle) error {
- c.blessingsCache.Remove(handle)
- return nil
+ return c.blessingsCache.RemoveReference(handle)
}
// Bless binds extensions of blessings held by this principal to
@@ -699,8 +724,8 @@
extension string,
caveats []security.Caveat) (string, principal.BlessingsHandle, error) {
var inputBlessing security.Blessings
- if inputBlessing = c.blessingsCache.Get(blessingHandle); inputBlessing.IsZero() {
- return "", principal.ZeroHandle, verror.New(invalidBlessingsHandle, nil)
+ if inputBlessing = c.GetBlessings(blessingHandle); inputBlessing.IsZero() {
+ return "", principal.ZeroHandle, verror.New(invalidBlessingsHandle, nil, blessingHandle)
}
key, err := principal.DecodePublicKey(publicKey)
@@ -717,12 +742,12 @@
if err != nil {
return "", principal.ZeroHandle, err
}
- handle := c.blessingsCache.Add(blessings)
+ handle := c.blessingsCache.GetOrAddHandle(blessings)
return publicKey, handle, nil
}
// BlessSelf creates a blessing with the provided name for this principal.
-func (c *Controller) BlessSelf(call rpc.ServerCall,
+func (c *Controller) BlessSelf(_ rpc.ServerCall,
extension string, caveats []security.Caveat) (string, principal.BlessingsHandle, error) {
p := v23.GetPrincipal(c.ctx)
blessings, err := p.BlessSelf(extension)
@@ -730,12 +755,60 @@
return "", principal.ZeroHandle, verror.Convert(verror.ErrInternal, nil, err)
}
- handle := c.blessingsCache.Add(blessings)
+ handle := c.blessingsCache.GetOrAddHandle(blessings)
encKey, err := principal.EncodePublicKey(p.PublicKey())
return encKey, handle, err
}
+// PutToBlessingStore puts a blessing with the provided name to the blessing store
+// with the specified blessing pattern.
+func (c *Controller) PutToBlessingStore(_ rpc.ServerCall, handle principal.BlessingsHandle, pattern security.BlessingPattern) (*principal.JsBlessings, error) {
+ var inputBlessing security.Blessings
+ if inputBlessing = c.GetBlessings(handle); inputBlessing.IsZero() {
+ return nil, verror.New(invalidBlessingsHandle, nil, handle)
+ }
+
+ p := v23.GetPrincipal(c.ctx)
+ outBlessing, err := p.BlessingStore().Set(inputBlessing, security.BlessingPattern(pattern))
+ if err != nil {
+ return nil, err
+ }
+
+ if outBlessing.IsZero() {
+ return nil, nil
+ }
+
+ jsBlessing := principal.ConvertBlessingsToHandle(outBlessing, c.blessingsCache.GetOrAddHandle(outBlessing))
+ return jsBlessing, nil
+}
+
+func (c *Controller) GetDefaultBlessings(rpc.ServerCall) (*principal.JsBlessings, error) {
+ p := v23.GetPrincipal(c.ctx)
+ outBlessings := p.BlessingStore().Default()
+
+ if outBlessings.IsZero() {
+ return nil, nil
+ }
+
+ jsBlessing := principal.ConvertBlessingsToHandle(outBlessings, c.blessingsCache.GetOrAddHandle(outBlessings))
+ return jsBlessing, nil
+}
+
+// HandleGranterResponse handles the result of a Granter request.
+func (c *Controller) HandleGranterResponse(id int32, data string) {
+ c.Lock()
+ granterStr, ok := c.flowMap[id].(*granterStream)
+ c.Unlock()
+ if !ok {
+ vlog.Errorf("unexpected result from JavaScript. Flow was not a granter "+
+ "stream for MessageId: %d exists. Ignoring the results.", id)
+ //Ignore unknown responses that don't belong to any channel
+ return
+ }
+ granterStr.Send(data)
+}
+
func (c *Controller) RemoteBlessings(call rpc.ServerCall, name, method string) ([]string, error) {
vlog.VI(2).Infof("requesting remote blessings for %q", name)
diff --git a/services/wspr/internal/app/app.vdl b/services/wspr/internal/app/app.vdl
index 8f5f122..46d7689 100644
--- a/services/wspr/internal/app/app.vdl
+++ b/services/wspr/internal/app/app.vdl
@@ -11,6 +11,8 @@
"v.io/v23/security"
"v.io/v23/vtrace"
+ "v.io/x/ref/services/wspr/internal/principal"
+ "v.io/x/ref/services/wspr/internal/rpc/server"
)
type RpcRequest struct {
@@ -39,9 +41,22 @@
type RpcCallOption union {
AllowedServersPolicy []security.BlessingPattern
RetryTimeout time.Duration
+ UseGranter GranterHandle
}
type RpcResponse struct {
OutArgs []any
TraceResponse vtrace.Response
}
+
+type GranterHandle int32
+
+type GranterRequest struct {
+ GranterHandle GranterHandle
+ Call server.SecurityCall
+}
+
+type GranterResponse struct {
+ Blessings principal.BlessingsHandle
+ Err error
+}
diff --git a/services/wspr/internal/app/app.vdl.go b/services/wspr/internal/app/app.vdl.go
index 60dfc1f..76358f7 100644
--- a/services/wspr/internal/app/app.vdl.go
+++ b/services/wspr/internal/app/app.vdl.go
@@ -18,6 +18,8 @@
"v.io/v23/security"
time_2 "v.io/v23/vdlroot/time"
"v.io/v23/vtrace"
+ "v.io/x/ref/services/wspr/internal/principal"
+ "v.io/x/ref/services/wspr/internal/rpc/server"
)
type RpcRequest struct {
@@ -52,6 +54,8 @@
RpcCallOptionAllowedServersPolicy struct{ Value []security.BlessingPattern }
// RpcCallOptionRetryTimeout represents field RetryTimeout of the RpcCallOption union type.
RpcCallOptionRetryTimeout struct{ Value time.Duration }
+ // RpcCallOptionUseGranter represents field UseGranter of the RpcCallOption union type.
+ RpcCallOptionUseGranter struct{ Value GranterHandle }
// __RpcCallOptionReflect describes the RpcCallOption union type.
__RpcCallOptionReflect struct {
Name string "v.io/x/ref/services/wspr/internal/app.RpcCallOption"
@@ -59,6 +63,7 @@
Union struct {
AllowedServersPolicy RpcCallOptionAllowedServersPolicy
RetryTimeout RpcCallOptionRetryTimeout
+ UseGranter RpcCallOptionUseGranter
}
}
)
@@ -73,6 +78,11 @@
func (x RpcCallOptionRetryTimeout) Name() string { return "RetryTimeout" }
func (x RpcCallOptionRetryTimeout) __VDLReflect(__RpcCallOptionReflect) {}
+func (x RpcCallOptionUseGranter) Index() int { return 2 }
+func (x RpcCallOptionUseGranter) Interface() interface{} { return x.Value }
+func (x RpcCallOptionUseGranter) Name() string { return "UseGranter" }
+func (x RpcCallOptionUseGranter) __VDLReflect(__RpcCallOptionReflect) {}
+
type RpcResponse struct {
OutArgs []*vdl.Value
TraceResponse vtrace.Response
@@ -83,8 +93,38 @@
}) {
}
+type GranterHandle int32
+
+func (GranterHandle) __VDLReflect(struct {
+ Name string "v.io/x/ref/services/wspr/internal/app.GranterHandle"
+}) {
+}
+
+type GranterRequest struct {
+ GranterHandle GranterHandle
+ Call server.SecurityCall
+}
+
+func (GranterRequest) __VDLReflect(struct {
+ Name string "v.io/x/ref/services/wspr/internal/app.GranterRequest"
+}) {
+}
+
+type GranterResponse struct {
+ Blessings principal.BlessingsHandle
+ Err error
+}
+
+func (GranterResponse) __VDLReflect(struct {
+ Name string "v.io/x/ref/services/wspr/internal/app.GranterResponse"
+}) {
+}
+
func init() {
vdl.Register((*RpcRequest)(nil))
vdl.Register((*RpcCallOption)(nil))
vdl.Register((*RpcResponse)(nil))
+ vdl.Register((*GranterHandle)(nil))
+ vdl.Register((*GranterRequest)(nil))
+ vdl.Register((*GranterResponse)(nil))
}
diff --git a/services/wspr/internal/app/app_test.go b/services/wspr/internal/app/app_test.go
index fbb96fc..d59f5ee 100644
--- a/services/wspr/internal/app/app_test.go
+++ b/services/wspr/internal/app/app_test.go
@@ -122,7 +122,7 @@
}
func startMountTableServer(ctx *context.T) (rpc.Server, naming.Endpoint, error) {
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return nil, nil, err
}
@@ -196,7 +196,7 @@
writer := testwriter.Writer{}
var stream *outstandingStream
if len(testCase.streamingInputs) > 0 {
- stream = newStream()
+ stream = newStream(nil)
controller.outstandingRequests[0] = &outstandingRequest{
stream: stream,
}
diff --git a/services/wspr/internal/app/controller.vdl b/services/wspr/internal/app/controller.vdl
index 5e7e498..ff4f1c0 100644
--- a/services/wspr/internal/app/controller.vdl
+++ b/services/wspr/internal/app/controller.vdl
@@ -30,9 +30,14 @@
Bless(publicKey string, blessingHandle principal.BlessingsHandle, extension string, caveat []security.Caveat) (string, principal.BlessingsHandle | error)
// BlessSelf creates a blessing with the provided name for this principal.
BlessSelf(name string, caveats []security.Caveat) (string, principal.BlessingsHandle | error)
+ // PutToBlessingStore puts the specified blessing to the blessing store under the provided pattern.
+ PutToBlessingStore(blessingHandle principal.BlessingsHandle, pattern security.BlessingPattern) (?principal.JsBlessings | error)
// RemoteBlessings fetches the remote blessings for a given name and method.
RemoteBlessings(name, method string) ([]string | error)
// Signature fetches the signature for a given name.
Signature(name string) ([]signature.Interface | error)
+
+ // GetDefaultBlessings fetches the default blessings for the principal of the controller.
+ GetDefaultBlessings() (?principal.JsBlessings | error)
}
diff --git a/services/wspr/internal/app/controller.vdl.go b/services/wspr/internal/app/controller.vdl.go
index 4004b80..212ec74 100644
--- a/services/wspr/internal/app/controller.vdl.go
+++ b/services/wspr/internal/app/controller.vdl.go
@@ -39,10 +39,14 @@
Bless(ctx *context.T, publicKey string, blessingHandle principal.BlessingsHandle, extension string, caveat []security.Caveat, opts ...rpc.CallOpt) (string, principal.BlessingsHandle, error)
// BlessSelf creates a blessing with the provided name for this principal.
BlessSelf(ctx *context.T, name string, caveats []security.Caveat, opts ...rpc.CallOpt) (string, principal.BlessingsHandle, error)
+ // PutToBlessingStore puts the specified blessing to the blessing store under the provided pattern.
+ PutToBlessingStore(ctx *context.T, blessingHandle principal.BlessingsHandle, pattern security.BlessingPattern, opts ...rpc.CallOpt) (*principal.JsBlessings, error)
// RemoteBlessings fetches the remote blessings for a given name and method.
RemoteBlessings(ctx *context.T, name string, method string, opts ...rpc.CallOpt) ([]string, error)
// Signature fetches the signature for a given name.
Signature(ctx *context.T, name string, opts ...rpc.CallOpt) ([]signature.Interface, error)
+ // GetDefaultBlessings fetches the default blessings for the principal of the controller.
+ GetDefaultBlessings(*context.T, ...rpc.CallOpt) (*principal.JsBlessings, error)
}
// ControllerClientStub adds universal methods to ControllerClientMethods.
@@ -95,6 +99,11 @@
return
}
+func (c implControllerClientStub) PutToBlessingStore(ctx *context.T, i0 principal.BlessingsHandle, i1 security.BlessingPattern, opts ...rpc.CallOpt) (o0 *principal.JsBlessings, err error) {
+ err = v23.GetClient(ctx).Call(ctx, c.name, "PutToBlessingStore", []interface{}{i0, i1}, []interface{}{&o0}, opts...)
+ return
+}
+
func (c implControllerClientStub) RemoteBlessings(ctx *context.T, i0 string, i1 string, opts ...rpc.CallOpt) (o0 []string, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "RemoteBlessings", []interface{}{i0, i1}, []interface{}{&o0}, opts...)
return
@@ -105,6 +114,11 @@
return
}
+func (c implControllerClientStub) GetDefaultBlessings(ctx *context.T, opts ...rpc.CallOpt) (o0 *principal.JsBlessings, err error) {
+ err = v23.GetClient(ctx).Call(ctx, c.name, "GetDefaultBlessings", nil, []interface{}{&o0}, opts...)
+ return
+}
+
// ControllerServerMethods is the interface a server writer
// implements for Controller.
type ControllerServerMethods interface {
@@ -125,10 +139,14 @@
Bless(call rpc.ServerCall, publicKey string, blessingHandle principal.BlessingsHandle, extension string, caveat []security.Caveat) (string, principal.BlessingsHandle, error)
// BlessSelf creates a blessing with the provided name for this principal.
BlessSelf(call rpc.ServerCall, name string, caveats []security.Caveat) (string, principal.BlessingsHandle, error)
+ // PutToBlessingStore puts the specified blessing to the blessing store under the provided pattern.
+ PutToBlessingStore(call rpc.ServerCall, blessingHandle principal.BlessingsHandle, pattern security.BlessingPattern) (*principal.JsBlessings, error)
// RemoteBlessings fetches the remote blessings for a given name and method.
RemoteBlessings(call rpc.ServerCall, name string, method string) ([]string, error)
// Signature fetches the signature for a given name.
Signature(call rpc.ServerCall, name string) ([]signature.Interface, error)
+ // GetDefaultBlessings fetches the default blessings for the principal of the controller.
+ GetDefaultBlessings(rpc.ServerCall) (*principal.JsBlessings, error)
}
// ControllerServerStubMethods is the server interface containing
@@ -194,6 +212,10 @@
return s.impl.BlessSelf(call, i0, i1)
}
+func (s implControllerServerStub) PutToBlessingStore(call rpc.ServerCall, i0 principal.BlessingsHandle, i1 security.BlessingPattern) (*principal.JsBlessings, error) {
+ return s.impl.PutToBlessingStore(call, i0, i1)
+}
+
func (s implControllerServerStub) RemoteBlessings(call rpc.ServerCall, i0 string, i1 string) ([]string, error) {
return s.impl.RemoteBlessings(call, i0, i1)
}
@@ -202,6 +224,10 @@
return s.impl.Signature(call, i0)
}
+func (s implControllerServerStub) GetDefaultBlessings(call rpc.ServerCall) (*principal.JsBlessings, error) {
+ return s.impl.GetDefaultBlessings(call)
+}
+
func (s implControllerServerStub) Globber() *rpc.GlobState {
return s.gs
}
@@ -283,6 +309,17 @@
},
},
{
+ Name: "PutToBlessingStore",
+ Doc: "// PutToBlessingStore puts the specified blessing to the blessing store under the provided pattern.",
+ InArgs: []rpc.ArgDesc{
+ {"blessingHandle", ``}, // principal.BlessingsHandle
+ {"pattern", ``}, // security.BlessingPattern
+ },
+ OutArgs: []rpc.ArgDesc{
+ {"", ``}, // *principal.JsBlessings
+ },
+ },
+ {
Name: "RemoteBlessings",
Doc: "// RemoteBlessings fetches the remote blessings for a given name and method.",
InArgs: []rpc.ArgDesc{
@@ -303,5 +340,12 @@
{"", ``}, // []signature.Interface
},
},
+ {
+ Name: "GetDefaultBlessings",
+ Doc: "// GetDefaultBlessings fetches the default blessings for the principal of the controller.",
+ OutArgs: []rpc.ArgDesc{
+ {"", ``}, // *principal.JsBlessings
+ },
+ },
},
}
diff --git a/services/wspr/internal/app/granter.go b/services/wspr/internal/app/granter.go
new file mode 100644
index 0000000..cf35a57
--- /dev/null
+++ b/services/wspr/internal/app/granter.go
@@ -0,0 +1,81 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package app
+
+import (
+ "fmt"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/security"
+ "v.io/x/ref/services/wspr/internal/lib"
+ "v.io/x/ref/services/wspr/internal/rpc/server"
+)
+
+// This is a Granter that redirects grant requests to javascript
+// and waits for the response.
+// Implements security.Granter
+type jsGranter struct {
+ c server.ServerHelper
+ granterHandle GranterHandle
+}
+
+func (g *jsGranter) Grant(ctx *context.T) (blessings security.Blessings, err error) {
+ stream := &granterStream{make(chan *GranterResponse, 1)}
+ flow := g.c.CreateNewFlow(stream, stream)
+ request := &GranterRequest{
+ GranterHandle: g.granterHandle,
+ Call: server.ConvertSecurityCall(g.c, ctx, true),
+ }
+ encoded, err := lib.VomEncode(request)
+ if err != nil {
+ return security.Blessings{}, err
+ }
+ if err := flow.Writer.Send(lib.ResponseGranterRequest, encoded); err != nil {
+ return security.Blessings{}, err
+ }
+ timeoutTime := time.Second * 5 // get real timeout
+ select {
+ case <-time.After(timeoutTime):
+ return security.Blessings{}, fmt.Errorf("Timed out receiving response from javascript granter")
+ case response := <-stream.c:
+ if response.Err != nil {
+ return security.Blessings{}, response.Err
+ }
+ if inputBlessing := g.c.GetBlessings(response.Blessings); inputBlessing.IsZero() {
+ return security.Blessings{}, fmt.Errorf("Unknown blessing handle received from javascript")
+ } else {
+ return inputBlessing, nil
+ }
+ }
+}
+
+func (g *jsGranter) RPCCallOpt() {}
+
+// Granter stream exists because our incoming request handling mechanism
+// works on streams.
+// It simply decodes the response from js and sends it to the granter.
+type granterStream struct {
+ c chan *GranterResponse
+}
+
+func (g *granterStream) Send(item interface{}) error {
+ dataString := item.(string)
+ var gr *GranterResponse
+ if err := lib.VomDecode(dataString, &gr); err != nil {
+ return fmt.Errorf("error decoding granter response: %v", err)
+ }
+ g.c <- gr
+ return nil
+}
+
+func (g *granterStream) Recv(itemptr interface{}) error {
+ panic("Shouldn't be called")
+}
+
+func (g *granterStream) CloseSend() error {
+ close(g.c)
+ return nil
+}
diff --git a/services/wspr/internal/app/messaging.go b/services/wspr/internal/app/messaging.go
index ff9c7c4..703d802 100644
--- a/services/wspr/internal/app/messaging.go
+++ b/services/wspr/internal/app/messaging.go
@@ -78,6 +78,9 @@
// A response to a caveat validation request.
CaveatValidationResponse = 21
+
+ // A response to a granter request.
+ GranterResponseMessage = 22
)
type Message struct {
@@ -118,6 +121,8 @@
go c.HandleAuthResponse(msg.Id, msg.Data)
case CaveatValidationResponse:
go c.HandleCaveatValidationResponse(msg.Id, msg.Data)
+ case GranterResponseMessage:
+ go c.HandleGranterResponse(msg.Id, msg.Data)
default:
w.Error(verror.New(errUnknownMessageType, ctx, msg.Type))
diff --git a/services/wspr/internal/app/mock_jsServer_test.go b/services/wspr/internal/app/mock_jsServer_test.go
index febba77..75e33ce 100644
--- a/services/wspr/internal/app/mock_jsServer_test.go
+++ b/services/wspr/internal/app/mock_jsServer_test.go
@@ -15,6 +15,7 @@
"v.io/v23/vdl"
"v.io/v23/vdlroot/signature"
"v.io/v23/vom"
+ "v.io/x/ref/internal/reflectutil"
"v.io/x/ref/services/wspr/internal/lib"
"v.io/x/ref/services/wspr/internal/principal"
"v.io/x/ref/services/wspr/internal/rpc/server"
@@ -215,7 +216,7 @@
return nil
}
- var msg server.ServerRPCRequest
+ var msg server.ServerRpcRequest
if err := lib.VomDecode(v.(string), &msg); err != nil {
m.controller.HandleServerResponse(m.flowCount, internalErrJSON(err))
return nil
@@ -231,7 +232,13 @@
return nil
}
- if field, got, want := "Args", msg.Args, m.inArgs; !reflect.DeepEqual(got, want) {
+ vals := make([]interface{}, len(msg.Args))
+ for i, vArg := range msg.Args {
+ if err := vdl.Convert(&vals[i], vArg); err != nil {
+ panic(err)
+ }
+ }
+ if field, got, want := "Args", vals, m.inArgs; !reflectutil.DeepEqual(got, want, &reflectutil.DeepEqualOpts{SliceEqNilEmpty: true}) {
m.controller.HandleServerResponse(m.flowCount, internalErrJSON(fmt.Sprintf("unexpected value for %s: got %v, want %v", field, got, want)))
return nil
}
diff --git a/services/wspr/internal/app/stream.go b/services/wspr/internal/app/stream.go
index a5c3637..1a8c653 100644
--- a/services/wspr/internal/app/stream.go
+++ b/services/wspr/internal/app/stream.go
@@ -9,6 +9,7 @@
"v.io/v23/rpc"
"v.io/x/ref/services/wspr/internal/lib"
+ "v.io/x/ref/services/wspr/internal/principal"
)
type initConfig struct {
@@ -39,15 +40,19 @@
done chan bool
// true if the stream has been closed.
closed bool
+
+ // Used to translate from JsBlesssings to Blessings
+ blessingsCache *principal.JSBlessingsHandles
}
-func newStream() *outstandingStream {
+func newStream(cache *principal.JSBlessingsHandles) *outstandingStream {
os := &outstandingStream{
initChan: make(chan *initConfig, 1),
// We allow queueing up to 100 messages before init is called.
// TODO(bjornick): Deal with the case that the queue is full.
- messages: make(chan *message, 100),
- done: make(chan bool),
+ messages: make(chan *message, 100),
+ done: make(chan bool),
+ blessingsCache: cache,
}
go os.loop()
return os
@@ -80,6 +85,10 @@
msg.writer.Error(fmt.Errorf("failed to decode stream arg from %v: %v", msg.data, err))
break
}
+
+ if jsBlessings, ok := item.(principal.JsBlessings); ok {
+ item = os.blessingsCache.GetBlessings(jsBlessings.Handle)
+ }
if err := config.stream.Send(item); err != nil {
msg.writer.Error(fmt.Errorf("failed to send on stream: %v", err))
}
diff --git a/services/wspr/internal/browspr/browspr_test.go b/services/wspr/internal/browspr/browspr_test.go
index 81ab8d0..e6502a8 100644
--- a/services/wspr/internal/browspr/browspr_test.go
+++ b/services/wspr/internal/browspr/browspr_test.go
@@ -31,7 +31,7 @@
//go:generate v23 test generate
func startMounttable(ctx *context.T) (rpc.Server, naming.Endpoint, error) {
- mt, err := mounttablelib.NewMountTableDispatcher("")
+ mt, err := mounttablelib.NewMountTableDispatcher("", "mounttable")
if err != nil {
return nil, nil, err
}
diff --git a/services/wspr/internal/lib/vom.go b/services/wspr/internal/lib/vom.go
index d3f437a..2ddbcfb 100644
--- a/services/wspr/internal/lib/vom.go
+++ b/services/wspr/internal/lib/vom.go
@@ -7,6 +7,7 @@
import (
"bytes"
"encoding/hex"
+ "fmt"
"v.io/v23/vom"
)
@@ -34,7 +35,7 @@
func VomDecode(data string, v interface{}) error {
binbytes, err := hex.DecodeString(data)
if err != nil {
- return err
+ return fmt.Errorf("Error decoding hex string %q: %v", data, err)
}
decoder, err := vom.NewDecoder(bytes.NewReader(binbytes))
if err != nil {
diff --git a/services/wspr/internal/lib/writer.go b/services/wspr/internal/lib/writer.go
index a1ccecc..75866fd 100644
--- a/services/wspr/internal/lib/writer.go
+++ b/services/wspr/internal/lib/writer.go
@@ -17,6 +17,7 @@
ResponseCancel = 7
ResponseValidate = 8 // Request to validate caveats.
ResponseLog = 9 // Sends a message to be logged.
+ ResponseGranterRequest = 10
)
type Response struct {
diff --git a/services/wspr/internal/principal/js_blessings_store.go b/services/wspr/internal/principal/js_blessings_store.go
index f660c50..8007982 100644
--- a/services/wspr/internal/principal/js_blessings_store.go
+++ b/services/wspr/internal/principal/js_blessings_store.go
@@ -5,11 +5,18 @@
package principal
import (
+ "fmt"
+ "reflect"
"sync"
"v.io/v23/security"
)
+type refToBlessings struct {
+ blessings security.Blessings
+ refCount int
+}
+
// JSBlessingsHandles is a store for Blessings in use by JS code.
//
// We don't pass the full Blessings object to avoid serializing
@@ -19,37 +26,65 @@
type JSBlessingsHandles struct {
mu sync.Mutex
lastHandle BlessingsHandle
- store map[BlessingsHandle]security.Blessings
+ store map[BlessingsHandle]*refToBlessings
}
// NewJSBlessingsHandles returns a newly initialized JSBlessingsHandles
func NewJSBlessingsHandles() *JSBlessingsHandles {
return &JSBlessingsHandles{
- store: map[BlessingsHandle]security.Blessings{},
+ store: map[BlessingsHandle]*refToBlessings{},
}
}
-// Add adds a Blessings to the store and returns the handle to it.
-func (s *JSBlessingsHandles) Add(blessings security.Blessings) BlessingsHandle {
+// GetOrAddHandle looks for a corresponding blessing handle and adds one if not found.
+func (s *JSBlessingsHandles) GetOrAddHandle(blessings security.Blessings) BlessingsHandle {
s.mu.Lock()
defer s.mu.Unlock()
+ // Look for an existing blessing.
+ for handle, ref := range s.store {
+ if reflect.DeepEqual(blessings, ref.blessings) {
+ ref.refCount++
+ return handle
+ }
+ }
+
+ // Otherwise add it
s.lastHandle++
handle := s.lastHandle
- s.store[handle] = blessings
+ s.store[handle] = &refToBlessings{
+ blessings: blessings,
+ refCount: 1,
+ }
return handle
}
-// Remove removes the Blessings associated with the handle.
-func (s *JSBlessingsHandles) Remove(handle BlessingsHandle) {
+// RemoveReference indicates the removal of a reference to
+// the Blessings associated with the handle.
+func (s *JSBlessingsHandles) RemoveReference(handle BlessingsHandle) error {
s.mu.Lock()
defer s.mu.Unlock()
- delete(s.store, handle)
+ ref, ok := s.store[handle]
+ if !ok {
+ return fmt.Errorf("Could not find reference to handle being removed: %v", handle)
+ }
+ ref.refCount--
+ if ref.refCount == 0 {
+ delete(s.store, handle)
+ }
+ if ref.refCount < 0 {
+ return fmt.Errorf("Unexpected negative ref count")
+ }
+ return nil
}
-// Get returns the Blessings represented by the handle. Returns nil
+// GetBlessings returns the Blessings represented by the handle. Returns nil
// if no Blessings exists for the handle.
-func (s *JSBlessingsHandles) Get(handle BlessingsHandle) security.Blessings {
+func (s *JSBlessingsHandles) GetBlessings(handle BlessingsHandle) security.Blessings {
s.mu.Lock()
defer s.mu.Unlock()
- return s.store[handle]
+ ref, ok := s.store[handle]
+ if !ok {
+ return security.Blessings{}
+ }
+ return ref.blessings
}
diff --git a/services/wspr/internal/principal/js_blessings_store_test.go b/services/wspr/internal/principal/js_blessings_store_test.go
index 0433504..1f79c6e 100644
--- a/services/wspr/internal/principal/js_blessings_store_test.go
+++ b/services/wspr/internal/principal/js_blessings_store_test.go
@@ -15,13 +15,41 @@
s := NewJSBlessingsHandles()
b := blessSelf(testutil.NewPrincipal(), "irrelevant")
- h := s.Add(b)
- if got := s.Get(h); !reflect.DeepEqual(got, b) {
+ h := s.GetOrAddHandle(b)
+ if got := s.GetBlessings(h); !reflect.DeepEqual(got, b) {
t.Fatalf("Get after adding: got: %v, want: %v", got, b)
}
- s.Remove(h)
- if got := s.Get(h); !got.IsZero() {
+ hGetOrAdd := s.GetOrAddHandle(b)
+ if h != hGetOrAdd {
+ t.Fatalf("Expected same handle from get or add. got: %v, want: %v", hGetOrAdd, h)
+ }
+ if len(s.store) != 1 {
+ t.Fatalf("Expected single entry to exist")
+ }
+
+ b2 := blessSelf(testutil.NewPrincipal(), "secondBlessing")
+ hNewFromGetOrAdd := s.GetOrAddHandle(b2)
+ if hNewFromGetOrAdd == h {
+ t.Fatalf("Expected to get new handle on new name. got: %v, want: %v", hGetOrAdd, h)
+ }
+
+ s.RemoveReference(h)
+ if got := s.GetBlessings(h); !reflect.DeepEqual(got, b) {
+ t.Fatalf("Expected to still be able to find after first remove: got: %v, want %v", got, b)
+ }
+
+ s.RemoveReference(h)
+ if got := s.GetBlessings(h); !got.IsZero() {
+ t.Fatalf("Get after removing: got: %v, want nil", got)
+ }
+
+ if got := s.GetBlessings(hNewFromGetOrAdd); !reflect.DeepEqual(got, b2) {
+ t.Fatalf("Expected to still be able to get second blessing: got: %v, want %v", got, b2)
+ }
+
+ s.RemoveReference(hNewFromGetOrAdd)
+ if got := s.GetBlessings(h); !got.IsZero() {
t.Fatalf("Get after removing: got: %v, want nil", got)
}
}
diff --git a/services/wspr/internal/rpc/server/server.go b/services/wspr/internal/rpc/server/server.go
index c9bf07c..19299b6 100644
--- a/services/wspr/internal/rpc/server/server.go
+++ b/services/wspr/internal/rpc/server/server.go
@@ -32,30 +32,16 @@
Writer lib.ClientWriter
}
-// A request from the proxy to javascript to handle an RPC
-type ServerRPCRequest struct {
- ServerId uint32
- Handle int32
- Method string
- Args []interface{}
- Call ServerRPCRequestCall
-}
-
-type ServerRPCRequestCall struct {
- SecurityCall SecurityCall
- Deadline vdltime.Deadline
- TraceRequest vtrace.Request
-}
-
type FlowHandler interface {
- CreateNewFlow(server *Server, sender rpc.Stream) *Flow
+ CreateNewFlow(server interface{}, sender rpc.Stream) *Flow
CleanupFlow(id int32)
}
type HandleStore interface {
- // Adds blessings to the store and returns handle to the blessings
- AddBlessings(blessings security.Blessings) principal.BlessingsHandle
+ GetBlessings(handle principal.BlessingsHandle) security.Blessings
+ // Gets or adds blessings to the store and returns handle to the blessings
+ GetOrAddBlessingsHandle(blessings security.Blessings) principal.BlessingsHandle
}
type ServerHelper interface {
@@ -136,7 +122,7 @@
func (s *Server) createRemoteInvokerFunc(handle int32) remoteInvokeFunc {
return func(methodName string, args []interface{}, call rpc.StreamServerCall) <-chan *lib.ServerRpcReply {
- securityCall := s.convertSecurityCall(call.Context(), true)
+ securityCall := ConvertSecurityCall(s.helper, call.Context(), true)
flow := s.helper.CreateNewFlow(s, call)
replyChan := make(chan *lib.ServerRpcReply, 1)
@@ -158,18 +144,32 @@
return replyChan
}
- rpcCall := ServerRPCRequestCall{
- SecurityCall: securityCall,
- Deadline: timeout,
- TraceRequest: vtrace.GetRequest(call.Context()),
+ var grantedBlessings *principal.JsBlessings
+ if !call.GrantedBlessings().IsZero() {
+ grantedBlessings = convertBlessingsToHandle(s.helper, call.GrantedBlessings())
+ }
+
+ rpcCall := ServerRpcRequestCall{
+ SecurityCall: securityCall,
+ Deadline: timeout,
+ TraceRequest: vtrace.GetRequest(call.Context()),
+ GrantedBlessings: grantedBlessings,
+ }
+
+ var vdlValArgs []*vdl.Value = make([]*vdl.Value, len(args))
+ for i, arg := range args {
+ if blessings, ok := arg.(security.Blessings); ok {
+ arg = principal.ConvertBlessingsToHandle(blessings, s.helper.GetOrAddBlessingsHandle(blessings))
+ }
+ vdlValArgs[i] = vdl.ValueOf(arg)
}
// Send a invocation request to JavaScript
- message := ServerRPCRequest{
+ message := ServerRpcRequest{
ServerId: s.id,
Handle: handle,
Method: lib.LowercaseFirstCharacter(methodName),
- Args: args,
+ Args: vdlValArgs,
Call: rpcCall,
}
vomMessage, err := lib.VomEncode(message)
@@ -198,7 +198,7 @@
ch <- &lib.ServerRpcReply{nil, &err, vtrace.Response{}}
}()
- go proxyStream(call, flow.Writer)
+ go proxyStream(call, flow.Writer, s.helper)
return replyChan
}
@@ -235,7 +235,7 @@
// Until the tests get fixed, we need to create a security context before creating the flow
// because creating the security context creates a flow and flow ids will be off.
// See https://github.com/veyron/release-issues/issues/1181
- securityCall := s.convertSecurityCall(call.Context(), true)
+ securityCall := ConvertSecurityCall(s.helper, call.Context(), true)
globChan := make(chan naming.GlobReply, 1)
flow := s.helper.CreateNewFlow(s, &globStream{
@@ -259,17 +259,23 @@
return nil, verror.Convert(verror.ErrInternal, call.Context(), err).(verror.E)
}
- rpcCall := ServerRPCRequestCall{
- SecurityCall: securityCall,
- Deadline: timeout,
+ var grantedBlessings *principal.JsBlessings
+ if !call.GrantedBlessings().IsZero() {
+ grantedBlessings = convertBlessingsToHandle(s.helper, call.GrantedBlessings())
+ }
+
+ rpcCall := ServerRpcRequestCall{
+ SecurityCall: securityCall,
+ Deadline: timeout,
+ GrantedBlessings: grantedBlessings,
}
// Send a invocation request to JavaScript
- message := ServerRPCRequest{
+ message := ServerRpcRequest{
ServerId: s.id,
Handle: handle,
Method: "Glob__",
- Args: []interface{}{pattern},
+ Args: []*vdl.Value{vdl.ValueOf(pattern)},
Call: rpcCall,
}
vomMessage, err := lib.VomEncode(message)
@@ -302,9 +308,13 @@
}
}
-func proxyStream(stream rpc.Stream, w lib.ClientWriter) {
+func proxyStream(stream rpc.Stream, w lib.ClientWriter, blessingsCache HandleStore) {
var item interface{}
for err := stream.Recv(&item); err == nil; err = stream.Recv(&item) {
+ if blessings, ok := item.(security.Blessings); ok {
+ item = principal.ConvertBlessingsToHandle(blessings, blessingsCache.GetOrAddBlessingsHandle(blessings))
+
+ }
vomItem, err := lib.VomEncode(item)
if err != nil {
w.Error(verror.Convert(verror.ErrInternal, nil, err))
@@ -321,8 +331,8 @@
}
}
-func (s *Server) convertBlessingsToHandle(blessings security.Blessings) principal.JsBlessings {
- return *principal.ConvertBlessingsToHandle(blessings, s.helper.AddBlessings(blessings))
+func convertBlessingsToHandle(helper ServerHelper, blessings security.Blessings) *principal.JsBlessings {
+ return principal.ConvertBlessingsToHandle(blessings, helper.GetOrAddBlessingsHandle(blessings))
}
func makeListOfErrors(numErrors int, err error) []error {
@@ -338,7 +348,7 @@
func (s *Server) validateCavsInJavascript(ctx *context.T, cavs [][]security.Caveat) []error {
flow := s.helper.CreateNewFlow(s, nil)
req := CaveatValidationRequest{
- Call: s.convertSecurityCall(ctx, false),
+ Call: ConvertSecurityCall(s.helper, ctx, false),
Cavs: cavs,
}
@@ -433,7 +443,7 @@
return outResults
}
-func (s *Server) convertSecurityCall(ctx *context.T, includeBlessingStrings bool) SecurityCall {
+func ConvertSecurityCall(helper ServerHelper, ctx *context.T, includeBlessingStrings bool) SecurityCall {
call := security.GetCall(ctx)
var localEndpoint string
if call.LocalEndpoint() != nil {
@@ -445,7 +455,7 @@
}
var localBlessings principal.JsBlessings
if !call.LocalBlessings().IsZero() {
- localBlessings = s.convertBlessingsToHandle(call.LocalBlessings())
+ localBlessings = *convertBlessingsToHandle(helper, call.LocalBlessings())
}
anymtags := make([]*vdl.Value, len(call.MethodTags()))
for i, mtag := range call.MethodTags() {
@@ -458,7 +468,7 @@
LocalEndpoint: localEndpoint,
RemoteEndpoint: remoteEndpoint,
LocalBlessings: localBlessings,
- RemoteBlessings: s.convertBlessingsToHandle(call.RemoteBlessings()),
+ RemoteBlessings: *convertBlessingsToHandle(helper, call.RemoteBlessings()),
}
if includeBlessingStrings {
secCall.LocalBlessingStrings = security.LocalBlessingNames(ctx)
@@ -473,7 +483,7 @@
return func(ctx *context.T) error {
// Until the tests get fixed, we need to create a security context before creating the flow
// because creating the security context creates a flow and flow ids will be off.
- securityCall := s.convertSecurityCall(ctx, true)
+ securityCall := ConvertSecurityCall(s.helper, ctx, true)
flow := s.helper.CreateNewFlow(s, nil)
replyChan := make(chan error, 1)
@@ -585,6 +595,22 @@
vlog.VI(0).Infof("response received from JavaScript server for "+
"MessageId %d with result %v", id, reply)
s.helper.CleanupFlow(id)
+ if reply.Err != nil {
+ ch <- &reply
+ return
+ }
+ jsBlessingsType := vdl.TypeOf(principal.JsBlessings{})
+ for i, val := range reply.Results {
+ if val.Type() == jsBlessingsType {
+ var jsBlessings principal.JsBlessings
+ if err := vdl.Convert(&jsBlessings, val); err != nil {
+ reply.Err = err
+ break
+ }
+ reply.Results[i] = vdl.ValueOf(
+ s.helper.GetBlessings(jsBlessings.Handle))
+ }
+ }
ch <- &reply
}
diff --git a/services/wspr/internal/rpc/server/server.vdl b/services/wspr/internal/rpc/server/server.vdl
index 00fad2b..06eda35 100644
--- a/services/wspr/internal/rpc/server/server.vdl
+++ b/services/wspr/internal/rpc/server/server.vdl
@@ -5,7 +5,10 @@
package server
import (
+ "time"
+
"v.io/v23/security"
+ "v.io/v23/vtrace"
"v.io/x/ref/services/wspr/internal/principal"
)
@@ -35,3 +38,19 @@
InvalidValidationResponseFromJavascript() {"en": "Invalid validation response from javascript"}
ServerStopped() {RetryBackoff, "en": "Server has been stopped"}
)
+
+type ServerRpcRequestCall struct {
+ SecurityCall SecurityCall
+ Deadline time.WireDeadline
+ TraceRequest vtrace.Request
+ GrantedBlessings ?principal.JsBlessings
+}
+
+// A request from the proxy to javascript to handle an RPC
+type ServerRpcRequest struct {
+ ServerId uint32
+ Handle int32
+ Method string
+ Args []any
+ Call ServerRpcRequestCall
+}
diff --git a/services/wspr/internal/rpc/server/server.vdl.go b/services/wspr/internal/rpc/server/server.vdl.go
index eb13c27..1f3aafe 100644
--- a/services/wspr/internal/rpc/server/server.vdl.go
+++ b/services/wspr/internal/rpc/server/server.vdl.go
@@ -16,6 +16,8 @@
// VDL user imports
"v.io/v23/security"
+ "v.io/v23/vdlroot/time"
+ "v.io/v23/vtrace"
"v.io/x/ref/services/wspr/internal/principal"
)
@@ -55,10 +57,38 @@
}) {
}
+type ServerRpcRequestCall struct {
+ SecurityCall SecurityCall
+ Deadline time.Deadline
+ TraceRequest vtrace.Request
+ GrantedBlessings *principal.JsBlessings
+}
+
+func (ServerRpcRequestCall) __VDLReflect(struct {
+ Name string "v.io/x/ref/services/wspr/internal/rpc/server.ServerRpcRequestCall"
+}) {
+}
+
+// A request from the proxy to javascript to handle an RPC
+type ServerRpcRequest struct {
+ ServerId uint32
+ Handle int32
+ Method string
+ Args []*vdl.Value
+ Call ServerRpcRequestCall
+}
+
+func (ServerRpcRequest) __VDLReflect(struct {
+ Name string "v.io/x/ref/services/wspr/internal/rpc/server.ServerRpcRequest"
+}) {
+}
+
func init() {
vdl.Register((*SecurityCall)(nil))
vdl.Register((*CaveatValidationRequest)(nil))
vdl.Register((*CaveatValidationResponse)(nil))
+ vdl.Register((*ServerRpcRequestCall)(nil))
+ vdl.Register((*ServerRpcRequest)(nil))
}
var (
diff --git a/test/modules/modules_test.go b/test/modules/modules_test.go
index 79ca03c..4e7f012 100644
--- a/test/modules/modules_test.go
+++ b/test/modules/modules_test.go
@@ -154,7 +154,7 @@
sh.Cleanup(&stdout, &stderr)
want := ""
if testing.Verbose() {
- want = "---- Shell Cleanup ----\n"
+ want = "---- Shell Cleanup ----\n---- Cleanup calling cancelCtx ----\n---- Shell Cleanup Complete ----\n"
}
if got := stdout.String(); got != "" && got != want {
t.Errorf("got %q, want %q", got, want)
diff --git a/test/modules/shell.go b/test/modules/shell.go
index 204336b..8ae6194 100644
--- a/test/modules/shell.go
+++ b/test/modules/shell.go
@@ -229,7 +229,7 @@
}
sh.ctx = ctx
- if sh.tempCredDir, err = ioutil.TempDir("", "shell_credentials"); err != nil {
+ if sh.tempCredDir, err = ioutil.TempDir("", "shell_credentials-"); err != nil {
return nil, err
}
if sh.agent, err = keymgr.NewLocalAgent(ctx, sh.tempCredDir, nil); err != nil {
@@ -665,9 +665,9 @@
}
}
- if verbose {
- writeMsg("---- Shell Cleanup ----\n")
- }
+ writeMsg("---- Shell Cleanup ----\n")
+ defer writeMsg("---- Shell Cleanup Complete ----\n")
+
sh.mu.Lock()
handles := make([]Handle, 0, len(sh.lifoHandles))
for _, h := range sh.lifoHandles {
@@ -707,6 +707,7 @@
}
if sh.cancelCtx != nil {
+ writeMsg("---- Cleanup calling cancelCtx ----\n")
// Note(ribrdb, caprita): This will shutdown the agents. If there
// were errors shutting down it is possible there could be child
// processes still running, and stopping the agent may cause
diff --git a/test/testutil/dispatcher.go b/test/testutil/dispatcher.go
index 737a390..671f4d6 100644
--- a/test/testutil/dispatcher.go
+++ b/test/testutil/dispatcher.go
@@ -7,6 +7,7 @@
import (
"v.io/v23/rpc"
"v.io/v23/security"
+ "v.io/v23/verror"
)
// LeafDispatcher returns a dispatcher for a single object obj, using
@@ -23,7 +24,7 @@
func (d leafDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
if suffix != "" {
- return nil, nil, rpc.NewErrUnknownSuffix(nil, suffix)
+ return nil, nil, verror.New(verror.ErrUnknownSuffix, nil, suffix)
}
return d.invoker, d.auth, nil
}
diff --git a/test/v23tests/binary.go b/test/v23tests/binary.go
index 023e64b..9cbee4f 100644
--- a/test/v23tests/binary.go
+++ b/test/v23tests/binary.go
@@ -8,7 +8,6 @@
"bytes"
"io"
"os"
- "path"
"strings"
"v.io/x/lib/vlog"
@@ -38,14 +37,6 @@
envVars []string
}
-func (b *Binary) cleanup() {
- binaryDir := path.Dir(b.path)
- vlog.Infof("cleaning up %s", binaryDir)
- if err := os.RemoveAll(binaryDir); err != nil {
- vlog.Infof("WARNING: RemoveAll(%s) failed (%v)", binaryDir, err)
- }
-}
-
// StartOpts returns the current the StartOpts
func (b *Binary) StartOpts() modules.StartOpts {
return b.opts
diff --git a/test/v23tests/v23tests.go b/test/v23tests/v23tests.go
index 87d024c..caf7fc9 100644
--- a/test/v23tests/v23tests.go
+++ b/test/v23tests/v23tests.go
@@ -268,6 +268,7 @@
for _, tempDir := range t.tempDirs {
vlog.VI(1).Infof("V23Test.Cleanup: cleaning up %s", tempDir)
+ vlog.Infof("V23Test.Cleanup: cleaning up %s", tempDir)
if err := os.RemoveAll(tempDir); err != nil {
vlog.Errorf("WARNING: RemoveAll(%q) failed: %v", tempDir, err)
}
@@ -430,7 +431,7 @@
func (t *T) buildPkg(pkg string) *Binary {
then := time.Now()
loc := Caller(1)
- cached, built_path, err := buildPkg(t.BinDir(), pkg)
+ cached, built_path, err := buildPkg(t, t.BinDir(), pkg)
if err != nil {
t.Fatalf("%s: buildPkg(%s) failed: %v", loc, pkg, err)
return nil
@@ -555,7 +556,7 @@
// build artifacts. Note that the clients of this function should not modify
// the contents of this directory directly and instead defer to the cleanup
// function.
-func buildPkg(binDir, pkg string) (bool, string, error) {
+func buildPkg(t *T, binDir, pkg string) (bool, string, error) {
binFile := filepath.Join(binDir, path.Base(pkg))
vlog.Infof("buildPkg: %v .. %v", binDir, pkg)
if _, err := os.Stat(binFile); err != nil {