Merge "Resolve vanadium/issues#776"
diff --git a/cmd/gclogs/doc.go b/cmd/gclogs/doc.go
index f9ede8b..95242e8 100644
--- a/cmd/gclogs/doc.go
+++ b/cmd/gclogs/doc.go
@@ -35,5 +35,7 @@
The global flags are:
-metadata=<just specify -metadata to activate>
Displays metadata for the program and exits.
+ -time=false
+ Dump timing information to stderr before exiting the program.
*/
package main
diff --git a/cmd/mounttable/doc.go b/cmd/mounttable/doc.go
index 1ccdb36..7543861 100644
--- a/cmd/mounttable/doc.go
+++ b/cmd/mounttable/doc.go
@@ -36,6 +36,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/cmd/namespace/doc.go b/cmd/namespace/doc.go
index fb8f24d..623f185 100644
--- a/cmd/namespace/doc.go
+++ b/cmd/namespace/doc.go
@@ -44,6 +44,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/cmd/principal/doc.go b/cmd/principal/doc.go
index fe730cb..3115fd7 100644
--- a/cmd/principal/doc.go
+++ b/cmd/principal/doc.go
@@ -46,6 +46,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/cmd/uniqueid/doc.go b/cmd/uniqueid/doc.go
index 03b5c4e..57f1c17 100644
--- a/cmd/uniqueid/doc.go
+++ b/cmd/uniqueid/doc.go
@@ -20,6 +20,8 @@
The global flags are:
-metadata=<just specify -metadata to activate>
Displays metadata for the program and exits.
+ -time=false
+ Dump timing information to stderr before exiting the program.
Uniqueid generate - Generates UniqueIds
diff --git a/cmd/vdl/doc.go b/cmd/vdl/doc.go
index afc068a..b17b57c 100644
--- a/cmd/vdl/doc.go
+++ b/cmd/vdl/doc.go
@@ -43,6 +43,8 @@
The global flags are:
-metadata=<just specify -metadata to activate>
Displays metadata for the program and exits.
+ -time=false
+ Dump timing information to stderr before exiting the program.
Vdl generate
diff --git a/cmd/vom/doc.go b/cmd/vom/doc.go
index 82a6a30..dc1a03f 100644
--- a/cmd/vom/doc.go
+++ b/cmd/vom/doc.go
@@ -19,6 +19,8 @@
The global flags are:
-metadata=<just specify -metadata to activate>
Displays metadata for the program and exits.
+ -time=false
+ Dump timing information to stderr before exiting the program.
Vom decode - Decode data encoded in the vom format
diff --git a/cmd/vomtestgen/doc.go b/cmd/vomtestgen/doc.go
index 7dd15f7..fdfb8d9 100644
--- a/cmd/vomtestgen/doc.go
+++ b/cmd/vomtestgen/doc.go
@@ -48,6 +48,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/cmd/vomtestgen/generate.go b/cmd/vomtestgen/generate.go
index d780d1c..2b7cf1c 100644
--- a/cmd/vomtestgen/generate.go
+++ b/cmd/vomtestgen/generate.go
@@ -28,10 +28,13 @@
const (
testpkg = "v.io/v23/vom/testdata"
+ typespkg = testpkg + "/types"
vomdataCanonical = testpkg + "/" + vomdataConfig
vomdataConfig = "vomdata.vdl.config"
)
+var versions = []vom.Version{vom.Version80, vom.Version81}
+
var cmdGenerate = &cmdline.Command{
Runner: cmdline.RunnerFunc(runGenerate),
Name: "vomtestgen",
@@ -97,25 +100,28 @@
if !strings.HasSuffix(inName, ".vdl.config") {
return env.UsageErrorf(`vomdata file doesn't end in ".vdl.config": %s`, inName)
}
- outName := inName[:len(inName)-len(".config")]
- // Remove the generated file, so that it doesn't interfere with compiling the
- // config. Ignore errors since it might not exist yet.
- if err := os.Remove(outName); err == nil {
- fmt.Fprintf(debug, "Removed output file %v\n", outName)
- }
config, err := compileConfig(debug, inName, compileEnv)
if err != nil {
return err
}
- data, err := generate(config)
- if err != nil {
- return err
+ for _, version := range versions {
+ baseName := filepath.Base(inName)
+ outName := fmt.Sprintf("%s/data%x/%s", filepath.Dir(inName), version, baseName[:len(baseName)-len(".config")])
+ // Remove the generated file, so that it doesn't interfere with compiling the
+ // config. Ignore errors since it might not exist yet.
+ if err := os.Remove(outName); err == nil {
+ fmt.Fprintf(debug, "Removed output file %v\n", outName)
+ }
+ data, err := generate(config, version)
+ if err != nil {
+ return err
+ }
+ if err := writeFile(data, outName); err != nil {
+ return err
+ }
+ debug.Reset() // Don't dump debugging information on success
+ fmt.Fprintf(env.Stdout, "Wrote output file %v\n", outName)
}
- if err := writeFile(data, outName); err != nil {
- return err
- }
- debug.Reset() // Don't dump debugging information on success
- fmt.Fprintf(env.Stdout, "Wrote output file %v\n", outName)
return nil
}
@@ -177,7 +183,7 @@
return config, err
}
-func generate(config *vdl.Value) ([]byte, error) {
+func generate(config *vdl.Value, version vom.Version) ([]byte, error) {
// This config needs to have a specific struct format. See @testdata/vomtype.vdl.
// TODO(alexfandrianto): Instead of this, we should have separate generator
// functions that switch off of the vomdata config filename. That way, we can
@@ -190,26 +196,17 @@
// This file was auto-generated via "vomtest generate".
// DO NOT UPDATE MANUALLY; read the comments in `+vomdataConfig+`.
-package testdata
-`)
+package data%x
+`, version)
imports := codegen.ImportsForValue(config, testpkg)
if len(imports) > 0 {
fmt.Fprintf(buf, "\n%s\n", vdlgen.Imports(imports))
}
+ typesPkgName := imports.LookupLocal(typespkg)
fmt.Fprintf(buf, `
-// TestCase represents an individual testcase for vom encoding and decoding.
-type TestCase struct {
- Name string // Name of the testcase
- Value any // Value to test
- TypeString string // The string representation of the Type
- Hex string // Hex pattern representing vom encoding
- HexVersion string // Hex pattern representing vom encoding of Version
- HexType string // Hex pattern representing vom encoding of Type
- HexValue string // Hex pattern representing vom encoding of Value
-}
// Tests contains the testcases to use to test vom encoding and decoding.
-const Tests = []TestCase {`)
+const Tests = []%s.TestCase {`, typesPkgName)
// The vom encode-decode test cases need to be of type []any.
encodeDecodeTests := config.StructField(0)
if got, want := encodeDecodeTests.Type(), vdl.ListType(vdl.AnyType); got != want {
@@ -224,7 +221,7 @@
value = value.Elem()
}
valstr := vdlgen.TypedConst(value, testpkg, imports)
- hexversion, hextype, hexvalue, vomdump, err := toVomHex(value)
+ hexversion, hextype, hexvalue, vomdump, err := toVomHex(version, value)
if err != nil {
return nil, err
}
@@ -277,7 +274,7 @@
// The values within a ConvertGroup can convert between themselves w/o error.
// However, values in higher-indexed ConvertGroups will error when converting up
// to the primary type of the lower-indexed ConvertGroups.
-const ConvertTests = map[string][]ConvertGroup{`)
+const ConvertTests = map[string][]%s.ConvertGroup{`, typesPkgName)
for _, testName := range vdl.SortValuesAsString(convertTests.Keys()) {
fmt.Fprintf(buf, `
%[1]q: {`, testName.RawString())
@@ -318,24 +315,27 @@
return buf.Bytes(), nil
}
-func toVomHex(value *vdl.Value) (string, string, string, string, error) {
+func toVomHex(version vom.Version, value *vdl.Value) (string, string, string, string, error) {
var buf, typebuf bytes.Buffer
- encoder := vom.NewEncoderWithTypeEncoder(&buf, vom.NewTypeEncoder(&typebuf))
+ encoder := vom.NewVersionedEncoderWithTypeEncoder(version, &buf, vom.NewVersionedTypeEncoder(version, &typebuf))
if err := encoder.Encode(value); err != nil {
return "", "", "", "", fmt.Errorf("vom.Encode(%v) failed: %v", value, err)
}
- version, _ := buf.ReadByte() // Read the version byte.
+ versionByte, _ := buf.ReadByte() // Read the version byte.
if typebuf.Len() > 0 {
typebuf.ReadByte() // Remove the version byte.
}
- vombytes := append(append([]byte{version}, typebuf.Bytes()...), buf.Bytes()...)
+ vombytes := append(append([]byte{versionByte}, typebuf.Bytes()...), buf.Bytes()...)
const pre = "\t// "
- vomdump := pre + strings.Replace(vom.Dump(vombytes), "\n", "\n"+pre, -1)
+ var vomdump string
+ if version == 0x80 {
+ vomdump = pre + strings.Replace(vom.Dump(vombytes), "\n", "\n"+pre, -1)
+ }
if strings.HasSuffix(vomdump, "\n"+pre) {
vomdump = vomdump[:len(vomdump)-len("\n"+pre)]
}
// TODO(toddw): Add hex pattern bracketing for map and set.
- return fmt.Sprintf("%x", version), fmt.Sprintf("%x", typebuf.Bytes()), fmt.Sprintf("%x", buf.Bytes()), vomdump, nil
+ return fmt.Sprintf("%x", versionByte), fmt.Sprintf("%x", typebuf.Bytes()), fmt.Sprintf("%x", buf.Bytes()), vomdump, nil
}
func writeFile(data []byte, outName string) error {
diff --git a/cmd/vrpc/doc.go b/cmd/vrpc/doc.go
index ff84dc9..25baf70 100644
--- a/cmd/vrpc/doc.go
+++ b/cmd/vrpc/doc.go
@@ -36,6 +36,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/cmd/vrun/doc.go b/cmd/vrun/doc.go
index b0ef6a5..6e5a996 100644
--- a/cmd/vrun/doc.go
+++ b/cmd/vrun/doc.go
@@ -36,6 +36,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/examples/rps/rpsbot/doc.go b/examples/rps/rpsbot/doc.go
index 1f94e3a..cec0fcd 100644
--- a/examples/rps/rpsbot/doc.go
+++ b/examples/rps/rpsbot/doc.go
@@ -36,6 +36,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/examples/rps/rpsplayer/doc.go b/examples/rps/rpsplayer/doc.go
index bfa7fea..a6713f9 100644
--- a/examples/rps/rpsplayer/doc.go
+++ b/examples/rps/rpsplayer/doc.go
@@ -33,6 +33,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/examples/rps/rpsscorekeeper/doc.go b/examples/rps/rpsscorekeeper/doc.go
index 93e07f7..e295e78 100644
--- a/examples/rps/rpsscorekeeper/doc.go
+++ b/examples/rps/rpsscorekeeper/doc.go
@@ -32,6 +32,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/examples/tunnel/tunneld/doc.go b/examples/tunnel/tunneld/doc.go
index 653eab7..71a8d8c 100644
--- a/examples/tunnel/tunneld/doc.go
+++ b/examples/tunnel/tunneld/doc.go
@@ -12,6 +12,10 @@
tunneld [flags]
The tunneld flags are:
+ -acl=
+ JSON-encoded Permissions. Takes precedence over --acl-file.
+ -acl-file=
+ File containing JSON-encoded Permissions.
-name=
Name to publish the server as.
@@ -30,6 +34,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/examples/tunnel/tunneld/main.go b/examples/tunnel/tunneld/main.go
index 167f86d..d6ecc79 100644
--- a/examples/tunnel/tunneld/main.go
+++ b/examples/tunnel/tunneld/main.go
@@ -8,25 +8,26 @@
package main
import (
+ "bytes"
"fmt"
- "v.io/x/lib/cmdline"
-
- "v.io/v23/context"
-
"v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/security"
+ "v.io/v23/security/access"
+ "v.io/x/lib/cmdline"
"v.io/x/ref/examples/tunnel"
- "v.io/x/ref/lib/security/securityflag"
"v.io/x/ref/lib/signals"
"v.io/x/ref/lib/v23cmd"
-
_ "v.io/x/ref/runtime/factories/roaming"
)
-var name string
+var name, aclLiteral, aclFile string
func main() {
cmdRoot.Flags.StringVar(&name, "name", "", "Name to publish the server as.")
+ cmdRoot.Flags.StringVar(&aclFile, "acl-file", "", "File containing JSON-encoded Permissions.")
+ cmdRoot.Flags.StringVar(&aclLiteral, "acl", "", "JSON-encoded Permissions. Takes precedence over --acl-file.")
cmdline.HideGlobalFlagsExcept()
cmdline.Main(cmdRoot)
}
@@ -41,7 +42,22 @@
}
func runTunnelD(ctx *context.T, env *cmdline.Env, args []string) error {
- auth := securityflag.NewAuthorizerOrDie()
+ var (
+ auth security.Authorizer
+ err error
+ )
+ switch {
+ case aclLiteral != "":
+ var perms access.Permissions
+ if perms, err = access.ReadPermissions(bytes.NewBufferString(aclLiteral)); err != nil {
+ return fmt.Errorf("ReadPermissions(%v) failed: %v", aclLiteral, err)
+ }
+ auth = access.TypicalTagTypePermissionsAuthorizer(perms)
+ case aclFile != "":
+ if auth, err = access.PermissionsAuthorizerFromFile(aclFile, access.TypicalTagType()); err != nil {
+ return fmt.Errorf("PermissionsAuthorizerFromFile(%v) failed: %v", aclFile, err)
+ }
+ }
ctx, server, err := v23.WithNewServer(ctx, name, tunnel.TunnelServer(&T{}), auth)
if err != nil {
return fmt.Errorf("NewServer failed: %v", err)
diff --git a/examples/tunnel/vsh/doc.go b/examples/tunnel/vsh/doc.go
index 3094470..b5eaac2 100644
--- a/examples/tunnel/vsh/doc.go
+++ b/examples/tunnel/vsh/doc.go
@@ -65,6 +65,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/lib/discovery/advertise.go b/lib/discovery/advertise.go
index f44ca89..c53d3c4 100644
--- a/lib/discovery/advertise.go
+++ b/lib/discovery/advertise.go
@@ -12,22 +12,23 @@
)
var (
- errNoInterfaceName = verror.Register(pkgPath+".errNoInterfaceName", verror.NoRetry, "{1:}{2:} interface name not provided")
- errNotPackableAttributes = verror.Register(pkgPath+".errNotPackableAttributes", verror.NoRetry, "{1:}{2:} attribute not packable")
- errNoAddresses = verror.Register(pkgPath+".errNoAddress", verror.NoRetry, "{1:}{2:} address not provided")
- errNotPackableAddresses = verror.Register(pkgPath+".errNotPackableAddresses", verror.NoRetry, "{1:}{2:} address not packable")
+ errAlreadyBeingAdvertised = verror.Register(pkgPath+".errAlreadyBeingAdvertised", verror.NoRetry, "{1:}{2:} already being advertised")
+ errNoInterfaceName = verror.Register(pkgPath+".errNoInterfaceName", verror.NoRetry, "{1:}{2:} interface name not provided")
+ errNotPackableAttributes = verror.Register(pkgPath+".errNotPackableAttributes", verror.NoRetry, "{1:}{2:} attribute not packable")
+ errNoAddresses = verror.Register(pkgPath+".errNoAddress", verror.NoRetry, "{1:}{2:} address not provided")
+ errNotPackableAddresses = verror.Register(pkgPath+".errNotPackableAddresses", verror.NoRetry, "{1:}{2:} address not packable")
)
// Advertise implements discovery.Advertiser.
-func (ds *ds) Advertise(ctx *context.T, service discovery.Service, visibility []security.BlessingPattern) error {
+func (ds *ds) Advertise(ctx *context.T, service discovery.Service, visibility []security.BlessingPattern) (<-chan struct{}, error) {
if len(service.InterfaceName) == 0 {
- return verror.New(errNoInterfaceName, ctx)
+ return nil, verror.New(errNoInterfaceName, ctx)
}
if len(service.Addrs) == 0 {
- return verror.New(errNoAddresses, ctx)
+ return nil, verror.New(errNoAddresses, ctx)
}
if err := validateAttributes(service.Attrs); err != nil {
- return err
+ return nil, err
}
if len(service.InstanceUuid) == 0 {
@@ -39,20 +40,49 @@
Service: service,
}
if err := encrypt(&ad, visibility); err != nil {
- return err
+ return nil, err
}
ctx, cancel, err := ds.addTask(ctx)
if err != nil {
- return err
+ return nil, err
}
- barrier := NewBarrier(func() { ds.removeTask(ctx) })
+ id := string(ad.Service.InstanceUuid)
+ if !ds.addAd(id) {
+ cancel()
+ ds.removeTask(ctx)
+ return nil, verror.New(errAlreadyBeingAdvertised, ctx)
+ }
+
+ done := make(chan struct{})
+ barrier := NewBarrier(func() {
+ ds.removeAd(id)
+ ds.removeTask(ctx)
+ close(done)
+ })
for _, plugin := range ds.plugins {
if err := plugin.Advertise(ctx, ad, barrier.Add()); err != nil {
cancel()
- return err
+ return nil, err
}
}
- return nil
+ return done, nil
+}
+
+func (ds *ds) addAd(id string) bool {
+ ds.mu.Lock()
+ if _, exist := ds.ads[id]; exist {
+ ds.mu.Unlock()
+ return false
+ }
+ ds.ads[id] = struct{}{}
+ ds.mu.Unlock()
+ return true
+}
+
+func (ds *ds) removeAd(id string) {
+ ds.mu.Lock()
+ delete(ds.ads, id)
+ ds.mu.Unlock()
}
diff --git a/lib/discovery/attributes.go b/lib/discovery/attributes.go
new file mode 100644
index 0000000..8b4ab69
--- /dev/null
+++ b/lib/discovery/attributes.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package discovery
+
+import (
+ "errors"
+ "strings"
+
+ "v.io/v23/discovery"
+)
+
+// validateAttributes returns an error if the attributes are not suitable for advertising.
+func validateAttributes(attrs discovery.Attributes) error {
+ for k, _ := range attrs {
+ if len(k) == 0 {
+ return errors.New("empty key")
+ }
+ if strings.HasPrefix(k, "_") {
+ return errors.New("key starts with '_'")
+ }
+ for _, c := range k {
+ if c < 0x20 || c > 0x7e {
+ return errors.New("key is not printable US-ASCII")
+ }
+ if c == '=' {
+ return errors.New("key includes '='")
+ }
+ }
+ }
+ return nil
+}
+
diff --git a/lib/discovery/attributes_test.go b/lib/discovery/attributes_test.go
new file mode 100644
index 0000000..7387565
--- /dev/null
+++ b/lib/discovery/attributes_test.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package discovery
+
+import (
+ "testing"
+
+ "v.io/v23/discovery"
+)
+
+func TestValidateAttributes(t *testing.T) {
+ valids := []discovery.Attributes{
+ discovery.Attributes{"key": "v"},
+ discovery.Attributes{"k_e.y": "v"},
+ discovery.Attributes{"k!": "v"},
+ }
+ for i, attrs := range valids {
+ if err := validateAttributes(attrs); err != nil {
+ t.Errorf("[%d]: valid attributes got error: %v", i, err)
+ }
+ }
+
+ invalids := []discovery.Attributes{
+ discovery.Attributes{"_key": "v"},
+ discovery.Attributes{"k=ey": "v"},
+ discovery.Attributes{"key\n": "v"},
+ }
+ for i, attrs := range invalids {
+ if err := validateAttributes(attrs); err == nil {
+ t.Errorf("[%d]: invalid attributes didn't get error", i)
+ }
+ }
+}
+
diff --git a/lib/discovery/cipher.go b/lib/discovery/cipher.go
index 5225d44..597891a 100644
--- a/lib/discovery/cipher.go
+++ b/lib/discovery/cipher.go
@@ -40,13 +40,13 @@
// We only encrypt addresses for now.
//
// TODO(jhahn): Revisit the scope of encryption.
- encrypted := make([]string, len(ad.Addrs))
- for i, addr := range ad.Addrs {
+ encrypted := make([]string, len(ad.Service.Addrs))
+ for i, addr := range ad.Service.Addrs {
var n [24]byte
binary.LittleEndian.PutUint64(n[:], uint64(i))
encrypted[i] = string(secretbox.Seal(nil, []byte(addr), &n, sharedKey))
}
- ad.Addrs = encrypted
+ ad.Service.Addrs = encrypted
return nil
}
@@ -77,8 +77,8 @@
// Note that we should not modify the slice element directly here since the
// underlying plugins may cache services and the next plugin.Scan() may return
// the already decrypted addresses.
- decrypted := make([]string, len(ad.Addrs))
- for i, encrypted := range ad.Addrs {
+ decrypted := make([]string, len(ad.Service.Addrs))
+ for i, encrypted := range ad.Service.Addrs {
var n [24]byte
binary.LittleEndian.PutUint64(n[:], uint64(i))
addr, ok := secretbox.Open(nil, []byte(encrypted), &n, sharedKey)
@@ -87,7 +87,7 @@
}
decrypted[i] = string(addr)
}
- ad.Addrs = decrypted
+ ad.Service.Addrs = decrypted
return nil
}
diff --git a/lib/discovery/discovery.go b/lib/discovery/discovery.go
index fd00e48..78597b7 100644
--- a/lib/discovery/discovery.go
+++ b/lib/discovery/discovery.go
@@ -7,8 +7,6 @@
import (
"sync"
- "github.com/pborman/uuid"
-
"v.io/v23/context"
"v.io/v23/discovery"
"v.io/v23/verror"
@@ -16,36 +14,9 @@
const pkgPath = "v.io/x/ref/runtime/internal/discovery"
-// Advertisement holds a set of service properties to advertise.
-type Advertisement struct {
- discovery.Service
-
- // The service UUID to advertise.
- ServiceUuid uuid.UUID
-
- // Type of encryption applied to the advertisement so that it can
- // only be decoded by authorized principals.
- EncryptionAlgorithm EncryptionAlgorithm
- // If the advertisement is encrypted, then the data required to
- // decrypt it. The format of this data is a function of the algorithm.
- EncryptionKeys []EncryptionKey
-
- // TODO(jhahn): Add proximity.
- // TODO(jhahn): Use proximity for Lost.
- Lost bool
-}
-
-type EncryptionAlgorithm int
-type EncryptionKey []byte
-
-const (
- NoEncryption EncryptionAlgorithm = 0
- TestEncryption EncryptionAlgorithm = 1
- IbeEncryption EncryptionAlgorithm = 2
-)
var (
- errClosed = verror.Register(pkgPath+".errClosed", verror.NoRetry, "{1:}{2:} closed")
+ errDiscoveryClosed = verror.Register(pkgPath+".errDiscoveryClosed", verror.NoRetry, "{1:}{2:} discovery closed")
)
// ds is an implementation of discovery.T.
@@ -55,8 +26,9 @@
mu sync.Mutex
closed bool // GUARDED_BY(mu)
tasks map[*context.T]func() // GUARDED_BY(mu)
+ wg sync.WaitGroup
- wg sync.WaitGroup
+ ads map[string]struct{} // GUARDED_BY(mu)
}
func (ds *ds) Close() {
@@ -77,7 +49,7 @@
ds.mu.Lock()
if ds.closed {
ds.mu.Unlock()
- return nil, nil, verror.New(errClosed, ctx)
+ return nil, nil, verror.New(errDiscoveryClosed, ctx)
}
ctx, cancel := context.WithCancel(ctx)
ds.tasks[ctx] = cancel
@@ -88,21 +60,24 @@
func (ds *ds) removeTask(ctx *context.T) {
ds.mu.Lock()
- _, exist := ds.tasks[ctx]
- delete(ds.tasks, ctx)
- ds.mu.Unlock()
- if exist {
+ if _, exist := ds.tasks[ctx]; exist {
+ delete(ds.tasks, ctx)
ds.wg.Done()
}
+ ds.mu.Unlock()
}
// New returns a new Discovery instance initialized with the given plugins.
//
// Mostly for internal use. Consider to use factory.New.
func NewWithPlugins(plugins []Plugin) discovery.T {
+ if len(plugins) == 0 {
+ panic("no plugins")
+ }
ds := &ds{
plugins: make([]Plugin, len(plugins)),
tasks: make(map[*context.T]func()),
+ ads: make(map[string]struct{}),
}
copy(ds.plugins, plugins)
return ds
diff --git a/lib/discovery/discovery_test.go b/lib/discovery/discovery_test.go
index d14cc73..28984cd 100644
--- a/lib/discovery/discovery_test.go
+++ b/lib/discovery/discovery_test.go
@@ -9,6 +9,7 @@
"fmt"
"reflect"
"runtime"
+ "sync"
"testing"
"time"
@@ -25,11 +26,21 @@
)
func advertise(ctx *context.T, ds discovery.Advertiser, perms []security.BlessingPattern, services ...discovery.Service) (func(), error) {
- ctx, stop := context.WithCancel(ctx)
+ var wg sync.WaitGroup
+ tr := idiscovery.NewTrigger()
+ ctx, cancel := context.WithCancel(ctx)
for _, service := range services {
- if err := ds.Advertise(ctx, service, perms); err != nil {
+ wg.Add(1)
+ done, err := ds.Advertise(ctx, service, perms)
+ if err != nil {
+ cancel()
return nil, fmt.Errorf("Advertise failed: %v", err)
}
+ tr.Add(wg.Done, done)
+ }
+ stop := func() {
+ cancel()
+ wg.Wait()
}
return stop, nil
}
@@ -237,11 +248,33 @@
}
}
+func TestDuplicates(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
+ ds := idiscovery.NewWithPlugins([]idiscovery.Plugin{mock.New()})
+ defer ds.Close()
+
+ service := discovery.Service{
+ InstanceUuid: idiscovery.NewInstanceUUID(),
+ InterfaceName: "v.io/v23/a",
+ Addrs: []string{"/h1:123/x"},
+ }
+
+ if _, err := advertise(ctx, ds, nil, service); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := advertise(ctx, ds, nil, service); err == nil {
+ t.Error("expect an error; but got none")
+ }
+}
+
func TestClose(t *testing.T) {
ctx, shutdown := test.V23Init()
defer shutdown()
ds := idiscovery.NewWithPlugins([]idiscovery.Plugin{mock.New()})
+
service := discovery.Service{
InstanceUuid: idiscovery.NewInstanceUUID(),
InterfaceName: "v.io/v23/a",
diff --git a/lib/discovery/encoding.go b/lib/discovery/encoding.go
index d58b315..f6edd07 100644
--- a/lib/discovery/encoding.go
+++ b/lib/discovery/encoding.go
@@ -9,32 +9,8 @@
"encoding/binary"
"errors"
"io"
- "strings"
-
- "v.io/v23/discovery"
)
-// validateAttributes returns an error if the attributes are not suitable for advertising.
-func validateAttributes(attrs discovery.Attributes) error {
- for k, _ := range attrs {
- if len(k) == 0 {
- return errors.New("empty key")
- }
- if strings.HasPrefix(k, "_") {
- return errors.New("key starts with '_'")
- }
- for _, c := range k {
- if c < 0x20 || c > 0x7e {
- return errors.New("key is not printable US-ASCII")
- }
- if c == '=' {
- return errors.New("key includes '='")
- }
- }
- }
- return nil
-}
-
// PackAddresses packs addresses into a byte slice.
func PackAddresses(addrs []string) []byte {
var buf bytes.Buffer
diff --git a/lib/discovery/encoding_test.go b/lib/discovery/encoding_test.go
index 488bc77..ebaa103 100644
--- a/lib/discovery/encoding_test.go
+++ b/lib/discovery/encoding_test.go
@@ -2,78 +2,48 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package discovery
+package discovery_test
import (
"reflect"
"testing"
- "v.io/v23/discovery"
+ "v.io/x/ref/lib/discovery"
+ "v.io/x/ref/lib/discovery/testdata"
)
-func TestValidateAttributes(t *testing.T) {
- valids := []discovery.Attributes{
- discovery.Attributes{"key": "v"},
- discovery.Attributes{"k_e.y": "v"},
- discovery.Attributes{"k!": "v"},
- }
- for i, attrs := range valids {
- if err := validateAttributes(attrs); err != nil {
- t.Errorf("[%d]: valid attributes got error: %v", i, err)
- }
- }
-
- invalids := []discovery.Attributes{
- discovery.Attributes{"_key": "v"},
- discovery.Attributes{"k=ey": "v"},
- discovery.Attributes{"key\n": "v"},
- }
- for i, attrs := range invalids {
- if err := validateAttributes(attrs); err == nil {
- t.Errorf("[%d]: invalid attributes didn't get error", i)
- }
- }
-}
-
func TestPackAddresses(t *testing.T) {
- tests := [][]string{
- []string{"a12345"},
- []string{"a1234", "b5678", "c9012"},
- nil,
- }
-
- for _, test := range tests {
- pack := PackAddresses(test)
- unpack, err := UnpackAddresses(pack)
+ for _, test := range testdata.PackAddressTestData {
+ pack := discovery.PackAddresses(test.In)
+ if !reflect.DeepEqual(pack, test.Packed) {
+ t.Errorf("packed to: %v, but wanted: %v", pack, test.Packed)
+ }
+ unpack, err := discovery.UnpackAddresses(test.Packed)
if err != nil {
t.Errorf("unpacked error: %v", err)
continue
}
- if !reflect.DeepEqual(test, unpack) {
- t.Errorf("unpacked to %v, but want %v", unpack, test)
+ if !reflect.DeepEqual(test.In, unpack) {
+ t.Errorf("unpacked to %v, but want %v", unpack, test.In)
}
}
}
func TestPackEncryptionKeys(t *testing.T) {
- tests := []struct {
- algo EncryptionAlgorithm
- keys []EncryptionKey
- }{
- {TestEncryption, []EncryptionKey{EncryptionKey("0123456789")}},
- {IbeEncryption, []EncryptionKey{EncryptionKey("012345"), EncryptionKey("123456"), EncryptionKey("234567")}},
- {NoEncryption, nil},
- }
+ for _, test := range testdata.PackEncryptionKeysTestData {
+ pack := discovery.PackEncryptionKeys(test.Algo, test.Keys)
- for _, test := range tests {
- pack := PackEncryptionKeys(test.algo, test.keys)
- algo, keys, err := UnpackEncryptionKeys(pack)
+ if !reflect.DeepEqual(pack, test.Packed) {
+ t.Errorf("packed to: %v, but wanted: %v", pack, test.Packed)
+ }
+
+ algo, keys, err := discovery.UnpackEncryptionKeys(test.Packed)
if err != nil {
t.Errorf("unpacked error: %v", err)
continue
}
- if algo != test.algo || !reflect.DeepEqual(keys, test.keys) {
- t.Errorf("unpacked to (%d, %v), but want (%d, %v)", algo, keys, test.algo, test.keys)
+ if algo != test.Algo || !reflect.DeepEqual(keys, test.Keys) {
+ t.Errorf("unpacked to (%v, %v), but want (%v, %v)", algo, keys, test.Algo, test.Keys)
}
}
}
diff --git a/lib/discovery/factory/lazy.go b/lib/discovery/factory/lazy.go
index 05af440..5622562 100644
--- a/lib/discovery/factory/lazy.go
+++ b/lib/discovery/factory/lazy.go
@@ -27,10 +27,10 @@
derr error
}
-func (l *lazyFactory) Advertise(ctx *context.T, service discovery.Service, visibility []security.BlessingPattern) error {
+func (l *lazyFactory) Advertise(ctx *context.T, service discovery.Service, visibility []security.BlessingPattern) (<-chan struct{}, error) {
l.once.Do(l.init)
if l.derr != nil {
- return l.derr
+ return nil, l.derr
}
return l.d.Advertise(ctx, service, visibility)
}
diff --git a/lib/discovery/factory/lazy_test.go b/lib/discovery/factory/lazy_test.go
index f97cb9f..003ec45 100644
--- a/lib/discovery/factory/lazy_test.go
+++ b/lib/discovery/factory/lazy_test.go
@@ -27,9 +27,9 @@
return m, nil
}
-func (m *mock) Advertise(_ *context.T, _ discovery.Service, _ []security.BlessingPattern) error {
+func (m *mock) Advertise(_ *context.T, _ discovery.Service, _ []security.BlessingPattern) (<-chan struct{}, error) {
m.numAdvertises++
- return nil
+ return nil, nil
}
func (m *mock) Scan(_ *context.T, _ string) (<-chan discovery.Update, error) {
@@ -91,7 +91,7 @@
}
// Closed already; Shouldn't initialize it again.
- if err := d.Advertise(nil, discovery.Service{}, nil); err != errClosed {
+ if _, err := d.Advertise(nil, discovery.Service{}, nil); err != errClosed {
t.Errorf("expected an error %v, but got %v", errClosed, err)
}
if err := m.check(0, 0, 0, 0); err != nil {
@@ -111,7 +111,7 @@
m := mock{initErr: errInit}
d := newLazyFactory(m.init)
- if err := d.Advertise(nil, discovery.Service{}, nil); err != errInit {
+ if _, err := d.Advertise(nil, discovery.Service{}, nil); err != errInit {
t.Errorf("expected an error %v, but got %v", errInit, err)
}
if err := m.check(1, 0, 0, 0); err != nil {
diff --git a/lib/discovery/plugin.go b/lib/discovery/plugin.go
index daca158..092f510 100644
--- a/lib/discovery/plugin.go
+++ b/lib/discovery/plugin.go
@@ -5,8 +5,6 @@
package discovery
import (
- "github.com/pborman/uuid"
-
"v.io/v23/context"
)
@@ -24,5 +22,5 @@
// deadline. done should be called once when scanning is done or canceled.
//
// TODO(jhahn): Pass a filter on service attributes.
- Scan(ctx *context.T, serviceUuid uuid.UUID, ch chan<- Advertisement, done func()) error
+ Scan(ctx *context.T, serviceUuid Uuid, ch chan<- Advertisement, done func()) error
}
diff --git a/lib/discovery/plugins/ble/advertisement.go b/lib/discovery/plugins/ble/advertisement.go
index 7a8a3f9..50de7f9 100644
--- a/lib/discovery/plugins/ble/advertisement.go
+++ b/lib/discovery/plugins/ble/advertisement.go
@@ -21,38 +21,28 @@
attrs map[string][]byte
}
-const (
- // This uuids are v5 uuid generated out of band. These constants need
- // to be accessible in all the languages that have a ble implementation
- instanceUUID = "12db9a9c-1c7c-5560-bc6b-73a115c93413" // NewAttributeUUID("_instanceuuid")
- instanceNameUUID = "ffbdcff3-e56f-58f0-8c1a-e416c39aac0d" // NewAttributeUUID("_instancename")
- interfaceNameUUID = "b2cadfd4-d003-576c-acad-58b8e3a9cbc8" // NewAttributeUUID("_interfacename")
- addrsUUID = "ad2566b7-59d8-50ae-8885-222f43f65fdc" // NewAttributeUUID("_addrs")
- encryptionUUID = "6286d80a-adaa-519a-8a06-281a4645a607" // NewAttributeUUID("_encryption")
-)
-
func newAdvertisment(adv discovery.Advertisement) bleAdv {
attrs := map[string][]byte{
- instanceUUID: adv.InstanceUuid,
- interfaceNameUUID: []byte(adv.InterfaceName),
+ InstanceUUID: adv.Service.InstanceUuid,
+ InterfaceNameUUID: []byte(adv.Service.InterfaceName),
}
- if len(adv.InstanceName) > 0 {
- attrs[instanceNameUUID] = []byte(adv.InstanceName)
+ if len(adv.Service.InstanceName) > 0 {
+ attrs[InstanceNameUUID] = []byte(adv.Service.InstanceName)
}
- if len(adv.Addrs) > 0 {
- attrs[addrsUUID] = discovery.PackAddresses(adv.Addrs)
+ if len(adv.Service.Addrs) > 0 {
+ attrs[AddrsUUID] = discovery.PackAddresses(adv.Service.Addrs)
}
if adv.EncryptionAlgorithm != discovery.NoEncryption {
- attrs[encryptionUUID] = discovery.PackEncryptionKeys(adv.EncryptionAlgorithm, adv.EncryptionKeys)
+ attrs[EncryptionUUID] = discovery.PackEncryptionKeys(adv.EncryptionAlgorithm, adv.EncryptionKeys)
}
- for k, v := range adv.Attrs {
+ for k, v := range adv.Service.Attrs {
hexUUID := discovery.NewAttributeUUID(k).String()
attrs[hexUUID] = []byte(k + "=" + v)
}
return bleAdv{
- instanceID: adv.InstanceUuid,
- serviceUUID: adv.ServiceUuid,
+ instanceID: adv.Service.InstanceUuid,
+ serviceUUID: uuid.UUID(adv.ServiceUuid),
attrs: attrs,
}
}
@@ -63,23 +53,23 @@
InstanceUuid: a.instanceID,
Attrs: make(vdiscovery.Attributes),
},
- ServiceUuid: a.serviceUUID,
+ ServiceUuid: discovery.Uuid(a.serviceUUID),
}
var err error
for k, v := range a.attrs {
switch k {
- case instanceUUID:
- adv.InstanceUuid = v
- case instanceNameUUID:
- adv.InstanceName = string(v)
- case interfaceNameUUID:
- adv.InterfaceName = string(v)
- case addrsUUID:
- if adv.Addrs, err = discovery.UnpackAddresses(v); err != nil {
+ case InstanceUUID:
+ adv.Service.InstanceUuid = v
+ case InstanceNameUUID:
+ adv.Service.InstanceName = string(v)
+ case InterfaceNameUUID:
+ adv.Service.InterfaceName = string(v)
+ case AddrsUUID:
+ if adv.Service.Addrs, err = discovery.UnpackAddresses(v); err != nil {
return nil, err
}
- case encryptionUUID:
+ case EncryptionUUID:
if adv.EncryptionAlgorithm, adv.EncryptionKeys, err = discovery.UnpackEncryptionKeys(v); err != nil {
return nil, err
}
@@ -88,7 +78,7 @@
if len(parts) != 2 {
return nil, fmt.Errorf("incorrectly formatted value, %s", v)
}
- adv.Attrs[parts[0]] = parts[1]
+ adv.Service.Attrs[parts[0]] = parts[1]
}
}
return adv, nil
diff --git a/lib/discovery/plugins/ble/advertisement_test.go b/lib/discovery/plugins/ble/advertisement_test.go
index 7525be1..c24216d 100644
--- a/lib/discovery/plugins/ble/advertisement_test.go
+++ b/lib/discovery/plugins/ble/advertisement_test.go
@@ -8,36 +8,23 @@
"reflect"
"testing"
- "github.com/pborman/uuid"
-
- vdiscovery "v.io/v23/discovery"
-
- "v.io/x/ref/lib/discovery"
+ "v.io/x/ref/lib/discovery/plugins/ble/testdata"
)
func TestConvertingBackAndForth(t *testing.T) {
- v23Adv := discovery.Advertisement{
- Service: vdiscovery.Service{
- InstanceUuid: []byte(discovery.NewInstanceUUID()),
- InstanceName: "service",
- Attrs: vdiscovery.Attributes{
- "key1": "value1",
- "key2": "value2",
- },
- Addrs: []string{"localhost:1000", "example.com:540"},
- },
- ServiceUuid: uuid.NewUUID(),
- EncryptionAlgorithm: discovery.TestEncryption,
- EncryptionKeys: []discovery.EncryptionKey{discovery.EncryptionKey("k")},
- }
+ for _, test := range testdata.ConversionTestData {
+ v23Adv := test.VAdvertisement
+ adv := newAdvertisment(v23Adv)
+ if !reflect.DeepEqual(adv.attrs, test.BleAdvertisement) {
+ t.Errorf("wanted: %v, got %v", test.BleAdvertisement, adv.attrs)
+ }
+ out, err := adv.toDiscoveryAdvertisement()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
- adv := newAdvertisment(v23Adv)
- out, err := adv.toDiscoveryAdvertisement()
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
-
- if !reflect.DeepEqual(&v23Adv, out) {
- t.Errorf("input does not equal output: %v, %v", v23Adv, out)
+ if !reflect.DeepEqual(&v23Adv, out) {
+ t.Errorf("input does not equal output: %v, %v", v23Adv, out)
+ }
}
}
diff --git a/lib/discovery/plugins/ble/const.vdl b/lib/discovery/plugins/ble/const.vdl
new file mode 100644
index 0000000..e408727
--- /dev/null
+++ b/lib/discovery/plugins/ble/const.vdl
@@ -0,0 +1,15 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ble
+
+const (
+ // This uuids are v5 uuid generated out of band. These constants need
+ // to be accessible in all the languages that have a ble implementation
+ InstanceUUID = "12db9a9c-1c7c-5560-bc6b-73a115c93413" // NewAttributeUUID("_instanceuuid")
+ InstanceNameUUID = "ffbdcff3-e56f-58f0-8c1a-e416c39aac0d" // NewAttributeUUID("_instancename")
+ InterfaceNameUUID = "b2cadfd4-d003-576c-acad-58b8e3a9cbc8" // NewAttributeUUID("_interfacename")
+ AddrsUUID = "ad2566b7-59d8-50ae-8885-222f43f65fdc" // NewAttributeUUID("_addrs")
+ EncryptionUUID = "6286d80a-adaa-519a-8a06-281a4645a607" // NewAttributeUUID("_encryption")
+)
diff --git a/lib/discovery/plugins/ble/const.vdl.go b/lib/discovery/plugins/ble/const.vdl.go
new file mode 100644
index 0000000..91f48be
--- /dev/null
+++ b/lib/discovery/plugins/ble/const.vdl.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: const.vdl
+
+package ble
+
+// This uuids are v5 uuid generated out of band. These constants need
+// to be accessible in all the languages that have a ble implementation
+const InstanceUUID = "12db9a9c-1c7c-5560-bc6b-73a115c93413" // NewAttributeUUID("_instanceuuid")
+
+const InstanceNameUUID = "ffbdcff3-e56f-58f0-8c1a-e416c39aac0d" // NewAttributeUUID("_instancename")
+
+const InterfaceNameUUID = "b2cadfd4-d003-576c-acad-58b8e3a9cbc8" // NewAttributeUUID("_interfacename")
+
+const AddrsUUID = "ad2566b7-59d8-50ae-8885-222f43f65fdc" // NewAttributeUUID("_addrs")
+
+const EncryptionUUID = "6286d80a-adaa-519a-8a06-281a4645a607" // NewAttributeUUID("_encryption")
diff --git a/lib/discovery/plugins/ble/neighborhood.go b/lib/discovery/plugins/ble/neighborhood.go
index c909855..e6a1d37 100644
--- a/lib/discovery/plugins/ble/neighborhood.go
+++ b/lib/discovery/plugins/ble/neighborhood.go
@@ -160,17 +160,17 @@
b.device.SetServices(v)
}
-func (b *bleNeighborhood) addScanner(uuid uuid.UUID) (chan *discovery.Advertisement, int64) {
+func (b *bleNeighborhood) addScanner(uid discovery.Uuid) (chan *discovery.Advertisement, int64) {
ch := make(chan *discovery.Advertisement)
s := &scanner{
- uuid: uuid,
+ uuid: uuid.UUID(uid),
ch: ch,
}
b.mu.Lock()
id := b.nextScanId
b.nextScanId++
b.scannersById[id] = s
- key := uuid.String()
+ key := uuid.UUID(uid).String()
m, found := b.scannersByService[key]
if !found {
m = map[int64]*scanner{}
@@ -206,9 +206,15 @@
return
default:
}
+
b.device.Advertise(b.computeAdvertisement())
+ b.mu.Lock()
+ hasScanner := len(b.scannersById) > 0
+ b.mu.Unlock()
// TODO(bjornick): Don't scan unless there is a scanner running.
- b.device.Scan([]gatt.UUID{}, true)
+ if hasScanner {
+ b.device.Scan([]gatt.UUID{}, true)
+ }
}
// seenHash returns whether or not we have seen the hash <h> before.
@@ -333,7 +339,7 @@
services[uid.String()] = &bleAdv{
serviceUUID: uid,
attrs: charMap,
- instanceID: charMap[strings.Replace(instanceUUID, "-", "", -1)],
+ instanceID: charMap[strings.Replace(InstanceUUID, "-", "", -1)],
}
}
b.saveDevice(h, p.ID(), services)
@@ -466,6 +472,7 @@
w(k)
}
adv := &gatt.AdvPacket{}
+ adv.AppendFlags(0x06)
adv.AppendManufacturerData(manufacturerId, hasher.Sum(nil))
adv.AppendName(b.name)
return adv
diff --git a/lib/discovery/plugins/ble/plugin.go b/lib/discovery/plugins/ble/plugin.go
index fde8592..886e739 100644
--- a/lib/discovery/plugins/ble/plugin.go
+++ b/lib/discovery/plugins/ble/plugin.go
@@ -10,8 +10,6 @@
import (
"v.io/v23/context"
"v.io/x/ref/lib/discovery"
-
- "github.com/pborman/uuid"
)
type blePlugin struct {
@@ -22,14 +20,14 @@
func (b *blePlugin) Advertise(ctx *context.T, ad discovery.Advertisement, done func()) error {
b.b.addAdvertisement(newAdvertisment(ad))
stop := func() {
- b.b.removeService(ad.InstanceUuid)
+ b.b.removeService(ad.Service.InstanceUuid)
done()
}
b.trigger.Add(stop, ctx.Done())
return nil
}
-func (b *blePlugin) Scan(ctx *context.T, serviceUuid uuid.UUID, scan chan<- discovery.Advertisement, done func()) error {
+func (b *blePlugin) Scan(ctx *context.T, serviceUuid discovery.Uuid, scan chan<- discovery.Advertisement, done func()) error {
ch, id := b.b.addScanner(serviceUuid)
drain := func() {
for range ch {
diff --git a/lib/discovery/plugins/ble/testdata/advertisement.vdl b/lib/discovery/plugins/ble/testdata/advertisement.vdl
new file mode 100644
index 0000000..097d056
--- /dev/null
+++ b/lib/discovery/plugins/ble/testdata/advertisement.vdl
@@ -0,0 +1,49 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+import (
+ "v.io/v23/discovery"
+ idiscovery "v.io/x/ref/lib/discovery"
+)
+
+// AdvertisementConversionTestCase represents a test case for converting between the Vanadium Advertisement format
+// and the Ble Advertisement format.
+type AdvertisementConversionTestCase struct {
+ VAdvertisement idiscovery.Advertisement
+
+ // BleAdvertisement is a map from human readable uuid strings to the byte data.
+ BleAdvertisement map[string][]byte
+}
+
+// ConversionTestData contains test cases for conversions between the ble format and the v23 advertising format.
+const ConversionTestData = []AdvertisementConversionTestCase{
+ AdvertisementConversionTestCase{
+ VAdvertisement: idiscovery.Advertisement{
+ Service: discovery.Service{
+ InstanceUuid: []byte{5, 146, 235, 25, 108, 124, 65, 162, 165, 230, 1, 162, 179, 150, 87, 30},
+ InstanceName: "service",
+ InterfaceName: "v.io/x/ref",
+ Attrs: discovery.Attributes{
+ "key1": "value1",
+ "key2": "value2",
+ },
+ Addrs: []string{"localhost:1000", "example.com:540"},
+ },
+ ServiceUuid: idiscovery.Uuid("\xde\xed\xe9d\xa2\xe9T\x17\x83\x84\xdd\x0c\x86\xd2D\x0e"),
+ EncryptionAlgorithm: idiscovery.TestEncryption,
+ EncryptionKeys: []idiscovery.EncryptionKey{idiscovery.EncryptionKey("k")},
+ },
+ BleAdvertisement: map[string][]byte{
+ "6286d80a-adaa-519a-8a06-281a4645a607": []byte{1, 1, 107},
+ "4ce68e8b-173b-597e-9f93-ca453e7bb790": []byte{107, 101, 121, 49, 61, 118, 97, 108, 117, 101, 49},
+ "777f349c-d01f-5543-aa31-528e48bb53bd": []byte{107, 101, 121, 50, 61, 118, 97, 108, 117, 101, 50},
+ "12db9a9c-1c7c-5560-bc6b-73a115c93413": []byte{5, 146, 235, 25, 108, 124, 65, 162, 165, 230, 1, 162, 179, 150, 87, 30},
+ "b2cadfd4-d003-576c-acad-58b8e3a9cbc8": []byte{118, 46, 105, 111, 47, 120, 47, 114, 101, 102},
+ "ffbdcff3-e56f-58f0-8c1a-e416c39aac0d": []byte{115, 101, 114, 118, 105, 99, 101},
+ "ad2566b7-59d8-50ae-8885-222f43f65fdc": []byte{14, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 49, 48, 48, 48, 15, 101, 120, 97, 109, 112, 108, 101, 46, 99, 111, 109, 58, 53, 52, 48},
+ },
+ },
+}
diff --git a/lib/discovery/plugins/ble/testdata/advertisement.vdl.go b/lib/discovery/plugins/ble/testdata/advertisement.vdl.go
new file mode 100644
index 0000000..224288b
--- /dev/null
+++ b/lib/discovery/plugins/ble/testdata/advertisement.vdl.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: advertisement.vdl
+
+package testdata
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+
+ // VDL user imports
+ "v.io/v23/discovery"
+ discovery_2 "v.io/x/ref/lib/discovery"
+)
+
+// AdvertisementConversionTestCase represents a test case for converting between the Vanadium Advertisement format
+// and the Ble Advertisement format.
+type AdvertisementConversionTestCase struct {
+ VAdvertisement discovery_2.Advertisement
+ // BleAdvertisement is a map from human readable uuid strings to the byte data.
+ BleAdvertisement map[string][]byte
+}
+
+func (AdvertisementConversionTestCase) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery/plugins/ble/testdata.AdvertisementConversionTestCase"`
+}) {
+}
+
+func init() {
+ vdl.Register((*AdvertisementConversionTestCase)(nil))
+}
+
+// ConversionTestData contains test cases for conversions between the ble format and the v23 advertising format.
+var ConversionTestData = []AdvertisementConversionTestCase{
+ {
+ VAdvertisement: discovery_2.Advertisement{
+ Service: discovery.Service{
+ InstanceUuid: []byte("\x05\x92\xeb\x19l|A\xa2\xa5\xe6\x01\xa2\xb3\x96W\x1e"),
+ InstanceName: "service",
+ InterfaceName: "v.io/x/ref",
+ Attrs: discovery.Attributes{
+ "key1": "value1",
+ "key2": "value2",
+ },
+ Addrs: []string{
+ "localhost:1000",
+ "example.com:540",
+ },
+ },
+ ServiceUuid: discovery_2.Uuid("\xde\xed\xe9d\xa2\xe9T\x17\x83\x84\xdd\f\x86\xd2D\x0e"),
+ EncryptionAlgorithm: 1,
+ EncryptionKeys: []discovery_2.EncryptionKey{
+ discovery_2.EncryptionKey("k"),
+ },
+ },
+ BleAdvertisement: map[string][]byte{
+ "12db9a9c-1c7c-5560-bc6b-73a115c93413": []byte("\x05\x92\xeb\x19l|A\xa2\xa5\xe6\x01\xa2\xb3\x96W\x1e"),
+ "4ce68e8b-173b-597e-9f93-ca453e7bb790": []byte("key1=value1"),
+ "6286d80a-adaa-519a-8a06-281a4645a607": []byte("\x01\x01k"),
+ "777f349c-d01f-5543-aa31-528e48bb53bd": []byte("key2=value2"),
+ "ad2566b7-59d8-50ae-8885-222f43f65fdc": []byte("\x0elocalhost:1000\x0fexample.com:540"),
+ "b2cadfd4-d003-576c-acad-58b8e3a9cbc8": []byte("v.io/x/ref"),
+ "ffbdcff3-e56f-58f0-8c1a-e416c39aac0d": []byte("service"),
+ },
+ },
+}
diff --git a/lib/discovery/plugins/mdns/mdns.go b/lib/discovery/plugins/mdns/mdns.go
index 4cc5e6a..a006562 100644
--- a/lib/discovery/plugins/mdns/mdns.go
+++ b/lib/discovery/plugins/mdns/mdns.go
@@ -72,10 +72,10 @@
}
func (p *plugin) Advertise(ctx *context.T, ad idiscovery.Advertisement, done func()) (err error) {
- serviceName := ad.ServiceUuid.String() + serviceNameSuffix
+ serviceName := uuid.UUID(ad.ServiceUuid).String() + serviceNameSuffix
// We use the instance uuid as the host name so that we can get the instance uuid
// from the lost service instance, which has no txt records at all.
- hostName := encodeInstanceUuid(ad.InstanceUuid)
+ hostName := encodeInstanceUuid(ad.Service.InstanceUuid)
txt, err := createTxtRecords(&ad)
if err != nil {
done()
@@ -104,12 +104,12 @@
return nil
}
-func (p *plugin) Scan(ctx *context.T, serviceUuid uuid.UUID, ch chan<- idiscovery.Advertisement, done func()) error {
+func (p *plugin) Scan(ctx *context.T, serviceUuid idiscovery.Uuid, ch chan<- idiscovery.Advertisement, done func()) error {
var serviceName string
if len(serviceUuid) == 0 {
serviceName = v23ServiceName
} else {
- serviceName = serviceUuid.String() + serviceNameSuffix
+ serviceName = uuid.UUID(serviceUuid).String() + serviceNameSuffix
}
go func() {
@@ -189,19 +189,19 @@
func createTxtRecords(ad *idiscovery.Advertisement) ([]string, error) {
// Prepare a txt record with attributes and addresses to announce.
- txt := appendTxtRecord(nil, attrInterface, ad.InterfaceName)
- if len(ad.InstanceName) > 0 {
- txt = appendTxtRecord(txt, attrName, ad.InstanceName)
+ txt := appendTxtRecord(nil, attrInterface, ad.Service.InterfaceName)
+ if len(ad.Service.InstanceName) > 0 {
+ txt = appendTxtRecord(txt, attrName, ad.Service.InstanceName)
}
- if len(ad.Addrs) > 0 {
- addrs := idiscovery.PackAddresses(ad.Addrs)
+ if len(ad.Service.Addrs) > 0 {
+ addrs := idiscovery.PackAddresses(ad.Service.Addrs)
txt = appendTxtRecord(txt, attrAddrs, string(addrs))
}
if ad.EncryptionAlgorithm != idiscovery.NoEncryption {
enc := idiscovery.PackEncryptionKeys(ad.EncryptionAlgorithm, ad.EncryptionKeys)
txt = appendTxtRecord(txt, attrEncryption, string(enc))
}
- for k, v := range ad.Attrs {
+ for k, v := range ad.Service.Attrs {
txt = appendTxtRecord(txt, k, v)
}
txt, err := maybeSplitLargeTXT(txt)
@@ -245,7 +245,7 @@
return ad, nil
}
- ad.Attrs = make(discovery.Attributes)
+ ad.Service.Attrs = make(discovery.Attributes)
for _, rr := range service.TxtRRs {
txt, err := maybeJoinLargeTXT(rr.Txt)
if err != nil {
@@ -259,11 +259,11 @@
}
switch k, v := p[0], p[1]; k {
case attrName:
- ad.InstanceName = v
+ ad.Service.InstanceName = v
case attrInterface:
- ad.InterfaceName = v
+ ad.Service.InterfaceName = v
case attrAddrs:
- if ad.Addrs, err = idiscovery.UnpackAddresses([]byte(v)); err != nil {
+ if ad.Service.Addrs, err = idiscovery.UnpackAddresses([]byte(v)); err != nil {
return idiscovery.Advertisement{}, err
}
case attrEncryption:
@@ -271,7 +271,7 @@
return idiscovery.Advertisement{}, err
}
default:
- ad.Attrs[k] = v
+ ad.Service.Attrs[k] = v
}
}
}
diff --git a/lib/discovery/plugins/mdns/mdns_test.go b/lib/discovery/plugins/mdns/mdns_test.go
index 7617a5c..e6c2b35 100644
--- a/lib/discovery/plugins/mdns/mdns_test.go
+++ b/lib/discovery/plugins/mdns/mdns_test.go
@@ -70,7 +70,7 @@
func startScan(ctx *context.T, p idiscovery.Plugin, interfaceName string) (<-chan idiscovery.Advertisement, func(), error) {
ctx, cancel := context.WithCancel(ctx)
scan := make(chan idiscovery.Advertisement)
- var serviceUuid uuid.UUID
+ var serviceUuid idiscovery.Uuid
if len(interfaceName) > 0 {
serviceUuid = idiscovery.NewServiceUUID(interfaceName)
}
@@ -108,7 +108,7 @@
for _, want := range wants {
matched := false
for i, ad := range ads {
- if !uuid.Equal(ad.InstanceUuid, want.InstanceUuid) {
+ if !uuid.Equal(ad.Service.InstanceUuid, want.InstanceUuid) {
continue
}
if lost {
diff --git a/lib/discovery/plugins/mock/mock.go b/lib/discovery/plugins/mock/mock.go
index 9c61600..d12caef 100644
--- a/lib/discovery/plugins/mock/mock.go
+++ b/lib/discovery/plugins/mock/mock.go
@@ -39,7 +39,7 @@
p.mu.Lock()
ads := p.services[key]
for i, a := range ads {
- if uuid.Equal(a.InstanceUuid, ad.InstanceUuid) {
+ if uuid.Equal(uuid.UUID(a.Service.InstanceUuid), uuid.UUID(ad.Service.InstanceUuid)) {
ads = append(ads[:i], ads[i+1:]...)
break
}
@@ -56,7 +56,7 @@
return nil
}
-func (p *plugin) Scan(ctx *context.T, serviceUuid uuid.UUID, ch chan<- discovery.Advertisement, done func()) error {
+func (p *plugin) Scan(ctx *context.T, serviceUuid discovery.Uuid, ch chan<- discovery.Advertisement, done func()) error {
rescan := make(chan struct{})
go func() {
var updateSeqSeen int
@@ -87,7 +87,7 @@
continue
}
for _, ad := range ads {
- current[string(ad.InstanceUuid)] = ad
+ current[string(ad.Service.InstanceUuid)] = ad
}
}
p.mu.Unlock()
diff --git a/lib/discovery/scan.go b/lib/discovery/scan.go
index 85a380c..1f5953f 100644
--- a/lib/discovery/scan.go
+++ b/lib/discovery/scan.go
@@ -5,17 +5,16 @@
package discovery
import (
- "github.com/pborman/uuid"
-
"v.io/v23"
"v.io/v23/context"
"v.io/v23/discovery"
+ "v.io/v23/security"
)
// Scan implements discovery.Scanner.
func (ds *ds) Scan(ctx *context.T, query string) (<-chan discovery.Update, error) {
// TODO(jhann): Implement a simple query processor.
- var serviceUuid uuid.UUID
+ var serviceUuid Uuid
if len(query) > 0 {
serviceUuid = NewServiceUUID(query)
}
@@ -54,10 +53,7 @@
principal := v23.GetPrincipal(ctx)
var names []string
if principal != nil {
- blessings := principal.BlessingStore().Default()
- for n, _ := range principal.BlessingsInfo(blessings) {
- names = append(names, n)
- }
+ names = security.BlessingNames(principal, principal.BlessingStore().Default())
}
// A plugin may returns a Lost event with clearing all attributes including encryption
@@ -74,16 +70,22 @@
}
continue
}
- id := string(ad.InstanceUuid)
// TODO(jhahn): Merge scanData based on InstanceUuid.
+ var update discovery.Update
+ id := string(ad.Service.InstanceUuid)
if ad.Lost {
if _, ok := found[id]; ok {
delete(found, id)
- updateCh <- discovery.UpdateLost{discovery.Lost{InstanceUuid: ad.InstanceUuid}}
+ update = discovery.UpdateLost{discovery.Lost{InstanceUuid: ad.Service.InstanceUuid}}
}
} else {
found[id] = struct{}{}
- updateCh <- discovery.UpdateFound{discovery.Found{Service: ad.Service}}
+ update = discovery.UpdateFound{discovery.Found{Service: ad.Service}}
+ }
+ select {
+ case updateCh <- update:
+ case <-ctx.Done():
+ return
}
case <-ctx.Done():
return
diff --git a/lib/discovery/testdata/encoding.vdl b/lib/discovery/testdata/encoding.vdl
new file mode 100644
index 0000000..7df4f39
--- /dev/null
+++ b/lib/discovery/testdata/encoding.vdl
@@ -0,0 +1,66 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This files contains testdata for v.io/x/ref/lib/discovery/encoding_test.go. The
+// testdata is in a vdl file so that we can make sure the encoding implementations in
+// all the languages produce the same byte output.
+
+package testdata
+
+import (
+ "v.io/x/ref/lib/discovery"
+)
+
+// PackAddressTest represents a test case for PackAddress.
+type PackAddressTest struct {
+ // In is the addresses to pack.
+ In []string
+ // Packed is the expected packed output.
+ Packed []byte
+}
+
+const PackAddressTestData = []PackAddressTest{
+ PackAddressTest{
+ In: []string{"a12345"},
+ Packed: []byte{6, 97, 49, 50, 51, 52, 53},
+ },
+ PackAddressTest{
+ In: []string{"a1234", "b5678", "c9012"},
+ Packed: []byte{5, 97, 49, 50, 51, 52, 5, 98, 53, 54, 55, 56, 5, 99, 57, 48, 49, 50},
+ },
+ // An empty input should create an empty output.
+ PackAddressTest{},
+}
+
+// PackEncryptionKeysTest represents a test case for PackEncryptionKeys
+type PackEncryptionKeysTest struct {
+ // Algo is the algorithm that's in use.
+ // but that isn't defined in vdl yet.
+ Algo discovery.EncryptionAlgorithm
+ // Keys are the encryption keys.
+ // but that isn't defined in vdl yet.
+ Keys []discovery.EncryptionKey
+ // Packed is the expected output bytes.
+ Packed []byte
+}
+
+const PackEncryptionKeysTestData = []PackEncryptionKeysTest{
+ PackEncryptionKeysTest{
+ Algo: 1,
+ Keys: []discovery.EncryptionKey{discovery.EncryptionKey("0123456789")},
+ Packed: []byte{1, 10, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57},
+ },
+ PackEncryptionKeysTest{
+ Algo: 2,
+ Keys: []discovery.EncryptionKey{
+ discovery.EncryptionKey("012345"),
+ discovery.EncryptionKey("123456"),
+ discovery.EncryptionKey("234567"),
+ },
+ Packed: []byte{2, 6, 48, 49, 50, 51, 52, 53, 6, 49, 50, 51, 52, 53, 54, 6, 50, 51, 52, 53, 54, 55},
+ },
+ PackEncryptionKeysTest{
+ Packed: []byte{0},
+ },
+}
diff --git a/lib/discovery/testdata/encoding.vdl.go b/lib/discovery/testdata/encoding.vdl.go
new file mode 100644
index 0000000..5bf5454
--- /dev/null
+++ b/lib/discovery/testdata/encoding.vdl.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: encoding.vdl
+
+package testdata
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+
+ // VDL user imports
+ "v.io/x/ref/lib/discovery"
+)
+
+// PackAddressTest represents a test case for PackAddress.
+type PackAddressTest struct {
+ // In is the addresses to pack.
+ In []string
+ // Packed is the expected packed output.
+ Packed []byte
+}
+
+func (PackAddressTest) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery/testdata.PackAddressTest"`
+}) {
+}
+
+// PackEncryptionKeysTest represents a test case for PackEncryptionKeys
+type PackEncryptionKeysTest struct {
+ // Algo is the algorithm that's in use.
+ // but that isn't defined in vdl yet.
+ Algo discovery.EncryptionAlgorithm
+ // Keys are the encryption keys.
+ // but that isn't defined in vdl yet.
+ Keys []discovery.EncryptionKey
+ // Packed is the expected output bytes.
+ Packed []byte
+}
+
+func (PackEncryptionKeysTest) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery/testdata.PackEncryptionKeysTest"`
+}) {
+}
+
+func init() {
+ vdl.Register((*PackAddressTest)(nil))
+ vdl.Register((*PackEncryptionKeysTest)(nil))
+}
+
+var PackAddressTestData = []PackAddressTest{
+ {
+ In: []string{
+ "a12345",
+ },
+ Packed: []byte("\x06a12345"),
+ },
+ {
+ In: []string{
+ "a1234",
+ "b5678",
+ "c9012",
+ },
+ Packed: []byte("\x05a1234\x05b5678\x05c9012"),
+ },
+ {},
+}
+
+var PackEncryptionKeysTestData = []PackEncryptionKeysTest{
+ {
+ Algo: 1,
+ Keys: []discovery.EncryptionKey{
+ discovery.EncryptionKey("0123456789"),
+ },
+ Packed: []byte("\x01\n0123456789"),
+ },
+ {
+ Algo: 2,
+ Keys: []discovery.EncryptionKey{
+ discovery.EncryptionKey("012345"),
+ discovery.EncryptionKey("123456"),
+ discovery.EncryptionKey("234567"),
+ },
+ Packed: []byte("\x02\x06012345\x06123456\x06234567"),
+ },
+ {
+ Packed: []byte("\x00"),
+ },
+}
diff --git a/lib/discovery/testdata/uuid.vdl b/lib/discovery/testdata/uuid.vdl
new file mode 100644
index 0000000..192aab7
--- /dev/null
+++ b/lib/discovery/testdata/uuid.vdl
@@ -0,0 +1,32 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This files contains testdata for v.io/x/ref/lib/discovery/uuid_test.go. The
+// testdata is in a vdl file so that we can make sure the uuid implementations in
+// all the languages produce the same output.
+
+package testdata
+
+// UuidTestData represents the inputs and outputs for a uuid test.
+type UuidTestData struct {
+ // In is the input string.
+ In string
+ // Want is the expected uuid's human-readable string form.
+ Want string
+}
+
+const InterfaceNameTest = []UuidTestData{
+ UuidTestData{
+ In: "v.io",
+ Want: "2101363c-688d-548a-a600-34d506e1aad0",
+ },
+ UuidTestData{
+ In: "v.io/v23/abc",
+ Want: "6726c4e5-b6eb-5547-9228-b2913f4fad52",
+ },
+ UuidTestData{
+ In: "v.io/v23/abc/xyz",
+ Want: "be8a57d7-931d-5ee4-9243-0bebde0029a5",
+ },
+}
diff --git a/lib/discovery/testdata/uuid.vdl.go b/lib/discovery/testdata/uuid.vdl.go
new file mode 100644
index 0000000..ad67a8a
--- /dev/null
+++ b/lib/discovery/testdata/uuid.vdl.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: uuid.vdl
+
+package testdata
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+)
+
+// UuidTestData represents the inputs and outputs for a uuid test.
+type UuidTestData struct {
+ // In is the input string.
+ In string
+ // Want is the expected uuid's human-readable string form.
+ Want string
+}
+
+func (UuidTestData) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery/testdata.UuidTestData"`
+}) {
+}
+
+func init() {
+ vdl.Register((*UuidTestData)(nil))
+}
+
+var InterfaceNameTest = []UuidTestData{
+ {
+ In: "v.io",
+ Want: "2101363c-688d-548a-a600-34d506e1aad0",
+ },
+ {
+ In: "v.io/v23/abc",
+ Want: "6726c4e5-b6eb-5547-9228-b2913f4fad52",
+ },
+ {
+ In: "v.io/v23/abc/xyz",
+ Want: "be8a57d7-931d-5ee4-9243-0bebde0029a5",
+ },
+}
diff --git a/lib/discovery/trigger_test.go b/lib/discovery/trigger_test.go
index da2db00..dc5a2b0 100644
--- a/lib/discovery/trigger_test.go
+++ b/lib/discovery/trigger_test.go
@@ -42,4 +42,11 @@
if got, want := <-done, 0; got != want {
t.Errorf("Trigger failed; got %v, but wanted %v", got, want)
}
+
+ // Make sure the callback is triggered even when it is added with a closed channel.
+ close(c0)
+ tr.Add(f0, c0)
+ if got, want := <-done, 0; got != want {
+ t.Errorf("Trigger failed; got %v, but wanted %v", got, want)
+ }
}
diff --git a/lib/discovery/types.vdl b/lib/discovery/types.vdl
new file mode 100644
index 0000000..066409d
--- /dev/null
+++ b/lib/discovery/types.vdl
@@ -0,0 +1,40 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package discovery
+
+import (
+ "v.io/v23/discovery"
+)
+
+type EncryptionAlgorithm int32
+type EncryptionKey []byte
+
+const (
+ NoEncryption = EncryptionAlgorithm(0)
+ TestEncryption = EncryptionAlgorithm(1)
+ IbeEncryption = EncryptionAlgorithm(2)
+)
+
+type Uuid []byte
+
+// Advertisement holds a set of service properties to advertise.
+type Advertisement struct {
+ Service discovery.Service
+
+ // The service UUID to advertise.
+ ServiceUuid Uuid
+
+ // Type of encryption applied to the advertisement so that it can
+ // only be decoded by authorized principals.
+ EncryptionAlgorithm EncryptionAlgorithm
+ // If the advertisement is encrypted, then the data required to
+ // decrypt it. The format of this data is a function of the algorithm.
+ EncryptionKeys []EncryptionKey
+
+ // TODO(jhahn): Add proximity.
+ // TODO(jhahn): Use proximity for Lost.
+ Lost bool
+}
+
diff --git a/lib/discovery/types.vdl.go b/lib/discovery/types.vdl.go
new file mode 100644
index 0000000..fb26f63
--- /dev/null
+++ b/lib/discovery/types.vdl.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: types.vdl
+
+package discovery
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+
+ // VDL user imports
+ "v.io/v23/discovery"
+)
+
+type EncryptionAlgorithm int32
+
+func (EncryptionAlgorithm) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery.EncryptionAlgorithm"`
+}) {
+}
+
+type EncryptionKey []byte
+
+func (EncryptionKey) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery.EncryptionKey"`
+}) {
+}
+
+type Uuid []byte
+
+func (Uuid) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery.Uuid"`
+}) {
+}
+
+// Advertisement holds a set of service properties to advertise.
+type Advertisement struct {
+ Service discovery.Service
+ // The service UUID to advertise.
+ ServiceUuid Uuid
+ // Type of encryption applied to the advertisement so that it can
+ // only be decoded by authorized principals.
+ EncryptionAlgorithm EncryptionAlgorithm
+ // If the advertisement is encrypted, then the data required to
+ // decrypt it. The format of this data is a function of the algorithm.
+ EncryptionKeys []EncryptionKey
+ // TODO(jhahn): Add proximity.
+ // TODO(jhahn): Use proximity for Lost.
+ Lost bool
+}
+
+func (Advertisement) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/discovery.Advertisement"`
+}) {
+}
+
+func init() {
+ vdl.Register((*EncryptionAlgorithm)(nil))
+ vdl.Register((*EncryptionKey)(nil))
+ vdl.Register((*Uuid)(nil))
+ vdl.Register((*Advertisement)(nil))
+}
+
+const NoEncryption = EncryptionAlgorithm(0)
+
+const TestEncryption = EncryptionAlgorithm(1)
+
+const IbeEncryption = EncryptionAlgorithm(2)
diff --git a/lib/discovery/uuid.go b/lib/discovery/uuid.go
index e18457b..2958663 100644
--- a/lib/discovery/uuid.go
+++ b/lib/discovery/uuid.go
@@ -18,14 +18,14 @@
)
// NewServiceUUID returns a version 5 UUID for the given interface name.
-func NewServiceUUID(interfaceName string) uuid.UUID {
- return uuid.NewSHA1(v23UUID, []byte(interfaceName))
+func NewServiceUUID(interfaceName string) Uuid {
+ return Uuid(uuid.NewSHA1(v23UUID, []byte(interfaceName)))
}
// NewInstanceUUID returns a version 4 (random) UUID. Mostly used for
// uniquely identifying the discovery service instance.
-func NewInstanceUUID() uuid.UUID {
- return uuid.NewRandom()
+func NewInstanceUUID() Uuid {
+ return Uuid(uuid.NewRandom())
}
// NewAttributeUUID returns a version 5 UUID for the given key.
diff --git a/lib/discovery/uuid_test.go b/lib/discovery/uuid_test.go
index 447de1d..bdac2bb 100644
--- a/lib/discovery/uuid_test.go
+++ b/lib/discovery/uuid_test.go
@@ -8,20 +8,14 @@
"testing"
"v.io/x/ref/lib/discovery"
+ "v.io/x/ref/lib/discovery/testdata"
+ "github.com/pborman/uuid"
)
func TestServiceUUID(t *testing.T) {
- tests := []struct {
- in, want string
- }{
- {"v.io", "2101363c-688d-548a-a600-34d506e1aad0"},
- {"v.io/v23/abc", "6726c4e5-b6eb-5547-9228-b2913f4fad52"},
- {"v.io/v23/abc/xyz", "be8a57d7-931d-5ee4-9243-0bebde0029a5"},
- }
-
- for _, test := range tests {
- if got := discovery.NewServiceUUID(test.in).String(); got != test.want {
- t.Errorf("ServiceUUID for %q mismatch; got %q, want %q", test.in, got, test.want)
+ for _, test := range testdata.InterfaceNameTest {
+ if got := uuid.UUID(discovery.NewServiceUUID(test.In)).String(); got != test.Want {
+ t.Errorf("ServiceUUID for %q mismatch; got %q, want %q", test.In, got, test.Want)
}
}
}
@@ -29,7 +23,7 @@
func TestInstanceUUID(t *testing.T) {
uuids := make(map[string]struct{})
for x := 0; x < 100; x++ {
- uuid := discovery.NewInstanceUUID().String()
+ uuid := uuid.UUID(discovery.NewInstanceUUID()).String()
if _, ok := uuids[uuid]; ok {
t.Errorf("InstanceUUID returned duplicated UUID %q", uuid)
}
diff --git a/lib/security/audit/principal.go b/lib/security/audit/principal.go
index 75df141..95dd8cc 100644
--- a/lib/security/audit/principal.go
+++ b/lib/security/audit/principal.go
@@ -71,23 +71,10 @@
return d, nil
}
-func (p *auditingPrincipal) BlessingsInfo(b security.Blessings) map[string][]security.Caveat {
- return p.principal.BlessingsInfo(b)
-}
-
func (p *auditingPrincipal) PublicKey() security.PublicKey { return p.principal.PublicKey() }
func (p *auditingPrincipal) Roots() security.BlessingRoots { return p.principal.Roots() }
func (p *auditingPrincipal) BlessingStore() security.BlessingStore { return p.principal.BlessingStore() }
-func (p *auditingPrincipal) Encrypter() security.BlessingsBasedEncrypter {
- return p.principal.Encrypter()
-}
-
-// TODO(ataly): Return an auditing decrypter instead.
-func (p *auditingPrincipal) Decrypter() security.BlessingsBasedDecrypter {
- return p.principal.Decrypter()
-}
-
func (p *auditingPrincipal) audit(err error, method string, args args, result interface{}) error {
if err != nil {
return err
diff --git a/lib/security/audit/principal_test.go b/lib/security/audit/principal_test.go
index faad182..75148b6 100644
--- a/lib/security/audit/principal_test.go
+++ b/lib/security/audit/principal_test.go
@@ -199,15 +199,9 @@
return d, p.NextError
}
-func (p *mockPrincipal) BlessingsInfo(b security.Blessings) map[string][]security.Caveat {
- return nil
-}
-
func (p *mockPrincipal) PublicKey() security.PublicKey { return p.NextResult.(security.PublicKey) }
func (p *mockPrincipal) Roots() security.BlessingRoots { return nil }
func (p *mockPrincipal) BlessingStore() security.BlessingStore { return nil }
-func (p *mockPrincipal) Encrypter() security.BlessingsBasedEncrypter { return nil }
-func (p *mockPrincipal) Decrypter() security.BlessingsBasedDecrypter { return nil }
type mockAuditor struct {
LastEntry audit.Entry
@@ -261,7 +255,7 @@
t.Fatal(err)
}
signer := security.NewInMemoryECDSASigner(key)
- p, err := security.CreatePrincipal(signer, nil, nil, nil, nil)
+ p, err := security.CreatePrincipal(signer, nil, nil)
if err != nil {
t.Fatal(err)
}
diff --git a/lib/security/bcrypter/crypter.go b/lib/security/bcrypter/crypter.go
new file mode 100644
index 0000000..5e93126
--- /dev/null
+++ b/lib/security/bcrypter/crypter.go
@@ -0,0 +1,296 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypter defines the mechanisms for blessings based
+// encryption and decryption.
+package bcrypter
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "strings"
+ "sync"
+
+ "v.io/v23/context"
+ "v.io/v23/security"
+
+ "v.io/x/lib/ibe"
+)
+
+const hashTruncation = 16
+
+// Crypter provides operations for encrypting and decrypting messages for
+// principals with specific blessings.
+//
+// In particular, it offers a mechanism to encrypt a message for a specific
+// blessing pattern so that it can only be decrypted by crypters that possess
+// a private key for a blessing matched by that pattern. Such a private key
+// is generated by the identity provider that granted the blessing.
+type Crypter struct {
+ mu sync.RWMutex
+ // root blessing -> []ibe.Params
+ params map[string][]ibe.Params
+ // paramsId -> patternId -> ibe.PrivateKey
+ keys map[string]map[string]ibe.PrivateKey
+}
+
+// Ciphertext represents the ciphertext generated by a Crypter.
+type Ciphertext struct {
+ wire WireCiphertext
+}
+
+// Root represents an identity provider for the purposes of blessings based
+// encryption and decryption.
+//
+// It generates private keys for specific blessings which can be used
+// to decrypt any message encrypted for a pattern matched by the blessing (
+// assuming the encryption used this identity provider's parameters).
+type Root struct {
+ // master is the IBE Master that this root uses to extract IBE
+ // private keys.
+ master ibe.Master
+ // blessing is the blessing name of the identity provider. The identity
+ // provider can extract private keys for blessings that are extensions
+ // of this blessing name.
+ blessing string
+}
+
+// Params represents the public parameters of an identity provider (aka Root).
+type Params struct {
+ // blessing is the blessing name of the identity provider.
+ blessing string
+ // params are the public IBE params of the identity provider
+ params ibe.Params
+}
+
+// PrivateKey represent the private key corresponding to a blessing.
+//
+// The private key can be used for decrypting any message encrypted using
+// a pattern matched by the blessing (assuming the private key and encryption
+// used the same identity provider parameters).
+type PrivateKey struct {
+ // blessing is the blessing for which this private key was extracted for.
+ blessing string
+ // params represents the public parameters of the identity provider
+ // that extracted this private key. The blessing must be an extension
+ // of params.blessing.
+ params Params
+ // keys contain private keys extracted for each blessing pattern that is
+ // matched by the blessing and is an extension of root.blessing.
+ //
+ // For example, if the blessing is "google/u/alice/phone" and root.blessing
+ // is "google/u" then the keys are extracted for patterns "google/u",
+ // "google/u/alice", "google/u/alice/phone", and "google/u/alice/phone/$".
+ //
+ // The private keys are listed in increasing order of the lengths of the
+ // corresponding patterns.
+ keys []ibe.PrivateKey
+}
+
+// Encrypt encrypts the provided fixed-length 'plaintext' so that it can
+// only be decrypted by a crypter possessing a private key for a blessing
+// matching the provided blessing pattern.
+//
+// Encryption makes use of the public parameters of the identity provider
+// that is authoritative on the set of blessings that match the provided
+// blessing pattern. These paramaters must have been previously added to
+// this crypter via AddParams.
+func (c *Crypter) Encrypt(ctx *context.T, forPattern security.BlessingPattern, plaintext *[32]byte) (*Ciphertext, error) {
+ if !forPattern.IsValid() {
+ return nil, fmt.Errorf("provided blessing pattern %v is invalid", forPattern)
+ }
+ ciphertext := &Ciphertext{wire: WireCiphertext{PatternId: idPattern(forPattern), Bytes: make(map[string][]byte)}}
+ paramsFound := false
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ for name, ibeParamsList := range c.params {
+ if !isExtensionOf(forPattern, name) {
+ continue
+ }
+ for _, ibeParams := range ibeParamsList {
+ ctxt := make([]byte, ibe.CiphertextSize)
+ if err := ibeParams.Encrypt(string(forPattern), (*plaintext)[:], ctxt); err != nil {
+ return nil, NewErrInternal(ctx, err)
+ }
+ paramsId, err := idParams(ibeParams)
+ if err != nil {
+ return nil, NewErrInternal(ctx, err)
+ }
+ paramsFound = true
+ ciphertext.wire.Bytes[paramsId] = ctxt
+ }
+ }
+ if !paramsFound {
+ return nil, NewErrNoParams(ctx, forPattern)
+ }
+ return ciphertext, nil
+}
+
+// Decrypt decrypts the provided 'ciphertext' and returns the corresponding
+// plaintext.
+//
+// Decryption succeeds only if this crypter possesses a private key for a
+// blessing that matches the blessing pattern corresponding to the ciphertext.
+func (c *Crypter) Decrypt(ctx *context.T, ciphertext *Ciphertext) (*[32]byte, error) {
+ var (
+ plaintext [32]byte
+ keyFound bool
+ )
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ for paramsId, cbytes := range ciphertext.wire.Bytes {
+ if keys, found := c.keys[paramsId]; !found {
+ continue
+ } else if key, found := keys[ciphertext.wire.PatternId]; !found {
+ continue
+ } else if err := key.Decrypt(cbytes, plaintext[:]); err != nil {
+ return nil, NewErrInternal(ctx, err)
+ }
+ keyFound = true
+ break
+ }
+ if !keyFound {
+ return nil, NewErrPrivateKeyNotFound(ctx)
+ }
+ return &plaintext, nil
+}
+
+// AddKey adds the provided private key 'key' and the associated public
+// parameters (key.Params()) to this crypter.
+func (c *Crypter) AddKey(ctx *context.T, key *PrivateKey) error {
+ patterns := matchedBy(key.blessing, key.params.blessing)
+ if got, want := len(key.keys), len(patterns); got != want {
+ return NewErrInvalidPrivateKey(ctx, fmt.Errorf("got %d IBE private keys for blessing %v (and root blessing %v), expected %d", got, key.blessing, key.params.blessing, want))
+ }
+
+ paramsId, err := idParams(key.params.params)
+ if err != nil {
+ return NewErrInternal(ctx, err)
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.params[key.params.blessing] = append(c.params[key.params.blessing], key.params.params)
+ if _, found := c.keys[paramsId]; !found {
+ c.keys[paramsId] = make(map[string]ibe.PrivateKey)
+ }
+ for i, p := range patterns {
+ c.keys[paramsId][idPattern(p)] = key.keys[i]
+ }
+ return nil
+}
+
+// AddParams adds the provided identity provider parameters to this crypter.
+//
+// The added parameters would be used to encrypt plaintexts for blessing patterns
+// that the identity provider is authoritative on.
+func (c *Crypter) AddParams(ctx *context.T, params Params) error {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ // TODO(ataly, ashankar): Avoid adding duplicate params to the list.
+ c.params[params.blessing] = append(c.params[params.blessing], params.params)
+ return nil
+}
+
+// Blessing returns the blessing that this private key was extracted for.
+func (k *PrivateKey) Blessing() string {
+ return k.blessing
+}
+
+// Params returns the public parameters of the identity provider that
+// extracted this private key.
+func (k *PrivateKey) Params() Params {
+ return k.params
+}
+
+// Params returns the public parameters of the identity provider represented
+// by 'r'.
+func (r *Root) Params() Params {
+ return Params{blessing: r.blessing, params: r.master.Params()}
+}
+
+// Extract returns a private key for the provided blessing.
+//
+// The private key can be used for decrypting any message encrypted using a
+// pattern matched by the blessing (assuming the encryption made use of the
+// public parameters of this root).
+func (r *Root) Extract(ctx *context.T, blessing string) (*PrivateKey, error) {
+ patterns := matchedBy(blessing, r.blessing)
+ if len(patterns) == 0 {
+ return nil, fmt.Errorf("blessing %v does not match the blessing pattern this root is authoritative on: %v", blessing, r.blessing)
+ }
+ key := &PrivateKey{
+ blessing: blessing,
+ params: r.Params(),
+ keys: make([]ibe.PrivateKey, len(patterns)),
+ }
+ for i, p := range patterns {
+ ibeKey, err := r.master.Extract(string(p))
+ if err != nil {
+ return nil, NewErrInternal(ctx, err)
+ }
+ key.keys[i] = ibeKey
+ }
+ return key, nil
+}
+
+// Blessing returns the blessing name of the identity provider with
+// public parameters 'p'.
+func (p *Params) Blessing() string {
+ return p.blessing
+}
+
+// NewCrypter returns a new Crypter with an empty set of private keys
+// and identity provider parameters.
+func NewCrypter() *Crypter {
+ return &Crypter{params: make(map[string][]ibe.Params), keys: make(map[string]map[string]ibe.PrivateKey)}
+}
+
+// NewRoot returns a new root identity provider that has the provided
+// blessing name and uses the provided 'master' for setting up identity-based
+// encryption.
+func NewRoot(blessing string, master ibe.Master) *Root {
+ return &Root{blessing: blessing, master: master}
+}
+
+// matchedBy returns the set of blessing patterns (in increasing order
+// of length) that are matched by the provided 'blessing' and are equal
+// to or extensions of the blessing name 'root'.
+func matchedBy(blessing, root string) []security.BlessingPattern {
+ if !security.BlessingPattern(root).MatchedBy(blessing) {
+ return nil
+ }
+ patterns := make([]security.BlessingPattern, strings.Count(blessing, security.ChainSeparator)+2-strings.Count(string(root), security.ChainSeparator))
+ patterns[len(patterns)-1] = security.BlessingPattern(blessing).MakeNonExtendable()
+ patterns[len(patterns)-2] = security.BlessingPattern(blessing)
+ for idx := len(patterns) - 3; idx >= 0; idx-- {
+ blessing = blessing[0:strings.LastIndex(blessing, string(security.ChainSeparator))]
+ patterns[idx] = security.BlessingPattern(blessing)
+ }
+ return patterns
+}
+
+// idPattern returns a 128-bit truncated SHA-256 hash of a blessing pattern.
+func idPattern(pattern security.BlessingPattern) string {
+ h := sha256.Sum256([]byte(pattern))
+ truncated := h[:hashTruncation]
+ return string(truncated)
+}
+
+// idParams returns a 128-bit truncated SHA-256 hash of the marshaled IBE params.
+func idParams(params ibe.Params) (string, error) {
+ paramsBytes, err := ibe.MarshalParams(params)
+ if err != nil {
+ return "", err
+ }
+ h := sha256.Sum256(paramsBytes)
+ truncated := h[:hashTruncation]
+ return string(truncated), nil
+}
+
+// isExtensionOf returns true if the all blessings matching the provided
+// 'pattern' are an extension of the provided 'root' blessing
+func isExtensionOf(pattern security.BlessingPattern, root string) bool {
+ return string(pattern) == root || strings.HasPrefix(string(pattern), root+security.ChainSeparator)
+}
diff --git a/lib/security/bcrypter/crypter_test.go b/lib/security/bcrypter/crypter_test.go
new file mode 100644
index 0000000..cea179c
--- /dev/null
+++ b/lib/security/bcrypter/crypter_test.go
@@ -0,0 +1,310 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypter
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "v.io/v23/context"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+
+ "v.io/x/lib/ibe"
+)
+
+func newRoot(name string) *Root {
+ master, err := ibe.SetupBB1()
+ if err != nil {
+ panic(err)
+ }
+ return NewRoot(name, master)
+}
+
+func newPlaintext() [32]byte {
+ var m [32]byte
+ if n := copy(m[:], []byte("AThirtyTwoBytePieceOfTextThisIs!")); n != len(m) {
+ panic(fmt.Errorf("plaintext string must be %d bytes, not %d", len(m), n))
+ }
+ return m
+}
+
+func TextExtract(t *testing.T) {
+ ctx, shutdown := context.RootContext()
+ defer shutdown()
+
+ googleYoutube := newRoot("google/youtube")
+
+ // googleYoutube shoud not be able to extract a key for the blessing "google".
+ if _, err := googleYoutube.Extract(ctx, "google"); err == nil {
+ t.Fatal("extraction for google unexpectedly succeeded")
+ }
+
+ // googleYoutube should be able to extract keys for the following blessings.
+ blessings := []string{"google/youtube", "google/youtube/alice", "google/youtube/bob", "google/youtube/alice/phone"}
+ for _, b := range blessings {
+ key, err := googleYoutube.Extract(ctx, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := key.Blessing(); got != b {
+ t.Fatalf("extracted key is for blessing %v, want key for blessing %v", got, b)
+ }
+ if got, want := key.Params, googleYoutube.Params(); !reflect.DeepEqual(got, want) {
+ t.Fatalf("extract key is for params %v, want key for params %v", got, want)
+ }
+ }
+ // Every key should be unique.
+ key1, _ := googleYoutube.Extract(ctx, "google/youtube/alice")
+ key2, _ := googleYoutube.Extract(ctx, "google/youtube/alice")
+ if reflect.DeepEqual(key1, key2) {
+ t.Fatal("Two Extract operations yielded the same PrivateKey")
+ }
+}
+
+func TestEncrypt(t *testing.T) {
+ ctx, shutdown := context.RootContext()
+ defer shutdown()
+
+ var (
+ googleYoutube = newRoot("google/youtube")
+ google = newRoot("google")
+
+ encrypter = NewCrypter()
+ ptxt = newPlaintext()
+ )
+
+ // empty encrypter should not be able to encrypt for any pattern.
+ if _, err := encrypter.Encrypt(ctx, "google/youtube/alice", &ptxt); verror.ErrorID(err) != ErrNoParams.ID {
+ t.Fatalf("Got error %v, wanted error with ID %v", err, ErrNoParams.ID)
+ }
+
+ // add googleYoutube's params to the encrypter.
+ if err := encrypter.AddParams(ctx, googleYoutube.Params()); err != nil {
+ t.Fatal(err)
+ }
+ // encrypting for "google/youtube/alice" should now succeed.
+ if _, err := encrypter.Encrypt(ctx, "google/youtube/alice", &ptxt); err != nil {
+ t.Fatal(err)
+ }
+
+ // encrypting for pattern "google" should still fail as the encrypter
+ // does not have params that are authoritative on all blessings matching
+ // the pattern "google" (the googleYoutube params are authoritative on
+ // blessings matching "google/youtube").
+ if _, err := encrypter.Encrypt(ctx, "google", &ptxt); verror.ErrorID(err) != ErrNoParams.ID {
+ t.Fatalf("Got error %v, wanted error with ID %v", err, ErrNoParams.ID)
+ }
+ // add google's params to the encrypter.
+ if err := encrypter.AddParams(ctx, google.Params()); err != nil {
+ t.Fatal(err)
+ }
+ // encrypting for "google" should now succeed.
+ if _, err := encrypter.Encrypt(ctx, "google", &ptxt); err != nil {
+ t.Fatal(err)
+ }
+
+ // Encryption should succeed for all of the following patterns
+ patterns := []security.BlessingPattern{"google", "google/$", "google/alice", "google/bob", "google/bob/phone"}
+ for _, p := range patterns {
+ if _, err := encrypter.Encrypt(ctx, p, &ptxt); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Every ciphertext should be unique.
+ ctxt1, _ := encrypter.Encrypt(ctx, "google", &ptxt)
+ ctxt2, _ := encrypter.Encrypt(ctx, "google", &ptxt)
+ if reflect.DeepEqual(ctxt1, ctxt2) {
+ t.Fatal("Two Encrypt operations yielded the same Ciphertext")
+ }
+}
+
+func TestDecrypt(t *testing.T) {
+ ctx, shutdown := context.RootContext()
+ defer shutdown()
+
+ addParams := func(c *Crypter, params Params) {
+ if err := c.AddParams(ctx, params); err != nil {
+ t.Fatal(err)
+ }
+ }
+ extract := func(r *Root, b string) *PrivateKey {
+ key, err := r.Extract(ctx, b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return key
+ }
+ var (
+ // Create two roots for the name "google".
+ google1 = newRoot("google")
+ google2 = newRoot("google")
+
+ encrypter = NewCrypter()
+ ptxt = newPlaintext()
+
+ googleAlice1 = extract(google1, "google/alice/phone")
+ googleAlice2 = extract(google2, "google/alice/tablet/app")
+ )
+
+ // Add roots google1 and google2 to the encrypter.
+ addParams(encrypter, google1.Params())
+ addParams(encrypter, google2.Params())
+ // encrypt for the pattern "google/alice"
+ ctxt, err := encrypter.Encrypt(ctx, "google/alice", &ptxt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // A decrypter without any private key should not be able
+ // to decrypt this ciphertext.
+ decrypter := NewCrypter()
+ if _, err := decrypter.Decrypt(ctx, ctxt); verror.ErrorID(err) != ErrPrivateKeyNotFound.ID {
+ t.Fatalf("Got error %v, wanted error with ID %v", err, ErrPrivateKeyNotFound.ID)
+ }
+
+ // Add key googleAlice1 to the decrypter.
+ if err := decrypter.AddKey(ctx, googleAlice1); err != nil {
+ t.Fatal(err)
+ }
+ // Decryption should now succeed.
+ if got, err := decrypter.Decrypt(ctx, ctxt); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal((*got)[:], ptxt[:]) {
+ t.Fatalf("Got plaintext %v, want %v", *got, ptxt)
+ }
+
+ // Decryption should have succeeded had the decrypter only contained
+ // googleAlice2.
+ decrypter = NewCrypter()
+ if err := decrypter.AddKey(ctx, googleAlice2); err != nil {
+ t.Fatal(err)
+ }
+ if got, err := decrypter.Decrypt(ctx, ctxt); err != nil {
+ t.Fatal(err)
+ } else if !bytes.Equal((*got)[:], ptxt[:]) {
+ t.Fatalf("Got plaintext %v, want %v", *got, ptxt)
+ }
+
+ // Decryption should fail for ciphertexts encrypted for the following
+ // patterns (At this point the decrypter only has a key for the blessing
+ // "google/alice/tablet/app" from the root google2).
+ patterns := []security.BlessingPattern{"google/alice/$", "google/bob", "google/alice/tablet/$", "google/bob/tablet"}
+ for _, p := range patterns {
+ ctxt, err := encrypter.Encrypt(ctx, p, &ptxt)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := decrypter.Decrypt(ctx, ctxt); verror.ErrorID(err) != ErrPrivateKeyNotFound.ID {
+ t.Fatalf("Got error %v, wanted error with ID %v", err, ErrPrivateKeyNotFound.ID)
+ }
+ }
+
+ // Adding the private key googleAlice2 should have also added
+ // google's public params. Thus encrypting for the following
+ // patterns should succeed.
+ patterns = []security.BlessingPattern{"google", "google/$", "google/alice", "google/bob", "google/bob/phone"}
+ for _, p := range patterns {
+ if _, err := decrypter.Encrypt(ctx, p, &ptxt); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // But encrypting for the following patterns should fail.
+ patterns = []security.BlessingPattern{"youtube", "youtube/$", "youtube/alice"}
+ for _, p := range patterns {
+ if _, err := decrypter.Encrypt(ctx, p, &ptxt); verror.ErrorID(err) != ErrNoParams.ID {
+ t.Fatalf("Got error %v, wanted error with ID %v", err, ErrNoParams.ID)
+ }
+ }
+}
+
+func TestWireCiphertext(t *testing.T) {
+ ctx, shutdown := context.RootContext()
+ defer shutdown()
+ ptxt := newPlaintext()
+
+ encrypt := func(params Params, pattern security.BlessingPattern) *Ciphertext {
+ enc := NewCrypter()
+ if err := enc.AddParams(ctx, params); err != nil {
+ t.Fatal(err)
+ }
+ ctxt, err := enc.Encrypt(ctx, pattern, &ptxt)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return ctxt
+ }
+ decryptAndVerify := func(ctxt *Ciphertext, key *PrivateKey) error {
+ dec := NewCrypter()
+ if err := dec.AddKey(ctx, key); err != nil {
+ return err
+ }
+ if got, err := dec.Decrypt(ctx, ctxt); err != nil {
+ return err
+ } else if !bytes.Equal((*got)[:], ptxt[:]) {
+ return fmt.Errorf("got plaintext %v, want %v", *got, ptxt)
+ }
+ return nil
+ }
+
+ var (
+ google = newRoot("google")
+ params = google.Params()
+ key, _ = google.Extract(ctx, "google")
+ ctxt = encrypt(params, "google/$")
+ )
+ // Verify that the ciphertext 'ctxt' can be decrypted using
+ // private key 'key' into the desired plaintext.
+ if err := decryptAndVerify(ctxt, key); err != nil {
+ t.Fatal(err)
+ }
+
+ // Marshal and Unmarshal ciphertext 'ctxt' and verify that decryption
+ // with private key 'key' still succeeds.
+ var (
+ newCtxt Ciphertext
+ wireCtxt WireCiphertext
+ )
+ ctxt.ToWire(&wireCtxt)
+ newCtxt.FromWire(wireCtxt)
+ if err := decryptAndVerify(&newCtxt, key); err != nil {
+ t.Fatal(err)
+ }
+
+ // Marshal and Unmarshal the private key 'key' and verify that decryption
+ // of 'ctxt' still succeeds.
+ var (
+ newKey PrivateKey
+ wireKey WirePrivateKey
+ )
+ if err := key.ToWire(&wireKey); err != nil {
+ t.Fatal(err)
+ } else if err = newKey.FromWire(wireKey); err != nil {
+ t.Fatal(err)
+ }
+ if err := decryptAndVerify(ctxt, &newKey); err != nil {
+ t.Fatal(err)
+ }
+
+ // Marshal and Unmarshal the root params and verify that encryption
+ // still results in a ciphertext that can be decrypted with the private
+ // key 'key'.
+ var (
+ newParams Params
+ wireParams WireParams
+ )
+ if err := params.ToWire(&wireParams); err != nil {
+ t.Fatal(err)
+ } else if err = newParams.FromWire(wireParams); err != nil {
+ t.Fatal(err)
+ }
+ ctxt = encrypt(newParams, "google/$")
+ if err := decryptAndVerify(ctxt, key); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/lib/security/bcrypter/errors.vdl b/lib/security/bcrypter/errors.vdl
index c69895d..ddb1968 100644
--- a/lib/security/bcrypter/errors.vdl
+++ b/lib/security/bcrypter/errors.vdl
@@ -4,17 +4,19 @@
package bcrypter
+import "v.io/v23/security"
+
error (
Internal(err error) {
"en": "internal error: {err}",
}
- InvalidArg(err error) {
- "en": "invalid argument: {err}",
- }
- InvalidScheme(gotScheme int32, supportedSchemes []int32) {
- "en": "invalid cryptographic scheme: {gotScheme}, supported schemes: {supportedSchemes}",
+ NoParams(pattern security.BlessingPattern) {
+ "en": "no public parameters available for encrypting for pattern: {pattern}",
}
PrivateKeyNotFound() {
"en": "no private key found for decrypting ciphertext",
}
+ InvalidPrivateKey(err error) {
+ "en": "private key is invalid: {err}",
+ }
)
diff --git a/lib/security/bcrypter/errors.vdl.go b/lib/security/bcrypter/errors.vdl.go
index b4f23dc..40d261d 100644
--- a/lib/security/bcrypter/errors.vdl.go
+++ b/lib/security/bcrypter/errors.vdl.go
@@ -12,20 +12,23 @@
"v.io/v23/context"
"v.io/v23/i18n"
"v.io/v23/verror"
+
+ // VDL user imports
+ "v.io/v23/security"
)
var (
ErrInternal = verror.Register("v.io/x/ref/lib/security/bcrypter.Internal", verror.NoRetry, "{1:}{2:} internal error: {3}")
- ErrInvalidArg = verror.Register("v.io/x/ref/lib/security/bcrypter.InvalidArg", verror.NoRetry, "{1:}{2:} invalid argument: {3}")
- ErrInvalidScheme = verror.Register("v.io/x/ref/lib/security/bcrypter.InvalidScheme", verror.NoRetry, "{1:}{2:} invalid cryptographic scheme: {3}, supported schemes: {4}")
+ ErrNoParams = verror.Register("v.io/x/ref/lib/security/bcrypter.NoParams", verror.NoRetry, "{1:}{2:} no public parameters available for encrypting for pattern: {3}")
ErrPrivateKeyNotFound = verror.Register("v.io/x/ref/lib/security/bcrypter.PrivateKeyNotFound", verror.NoRetry, "{1:}{2:} no private key found for decrypting ciphertext")
+ ErrInvalidPrivateKey = verror.Register("v.io/x/ref/lib/security/bcrypter.InvalidPrivateKey", verror.NoRetry, "{1:}{2:} private key is invalid: {3}")
)
func init() {
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrInternal.ID), "{1:}{2:} internal error: {3}")
- i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrInvalidArg.ID), "{1:}{2:} invalid argument: {3}")
- i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrInvalidScheme.ID), "{1:}{2:} invalid cryptographic scheme: {3}, supported schemes: {4}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrNoParams.ID), "{1:}{2:} no public parameters available for encrypting for pattern: {3}")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrPrivateKeyNotFound.ID), "{1:}{2:} no private key found for decrypting ciphertext")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrInvalidPrivateKey.ID), "{1:}{2:} private key is invalid: {3}")
}
// NewErrInternal returns an error with the ErrInternal ID.
@@ -33,17 +36,17 @@
return verror.New(ErrInternal, ctx, err)
}
-// NewErrInvalidArg returns an error with the ErrInvalidArg ID.
-func NewErrInvalidArg(ctx *context.T, err error) error {
- return verror.New(ErrInvalidArg, ctx, err)
-}
-
-// NewErrInvalidScheme returns an error with the ErrInvalidScheme ID.
-func NewErrInvalidScheme(ctx *context.T, gotScheme int32, supportedSchemes []int32) error {
- return verror.New(ErrInvalidScheme, ctx, gotScheme, supportedSchemes)
+// NewErrNoParams returns an error with the ErrNoParams ID.
+func NewErrNoParams(ctx *context.T, pattern security.BlessingPattern) error {
+ return verror.New(ErrNoParams, ctx, pattern)
}
// NewErrPrivateKeyNotFound returns an error with the ErrPrivateKeyNotFound ID.
func NewErrPrivateKeyNotFound(ctx *context.T) error {
return verror.New(ErrPrivateKeyNotFound, ctx)
}
+
+// NewErrInvalidPrivateKey returns an error with the ErrInvalidPrivateKey ID.
+func NewErrInvalidPrivateKey(ctx *context.T, err error) error {
+ return verror.New(ErrInvalidPrivateKey, ctx, err)
+}
diff --git a/lib/security/bcrypter/ibe.go b/lib/security/bcrypter/ibe.go
deleted file mode 100644
index d72aa87..0000000
--- a/lib/security/bcrypter/ibe.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bcrypter
-
-import (
- "crypto/sha256"
- "errors"
- "fmt"
- "strings"
-
- "v.io/v23/context"
- "v.io/v23/security"
-
- "v.io/x/lib/ibe"
-)
-
-const hashTruncation = 16
-
-// ibeEncrypter implements an security.BlessingsBasedEncrypter that
-// uses the security.IBE cryptographic scheme.
-type ibeEncrypter struct {
- params ibe.Params
-}
-
-func (b *ibeEncrypter) Encrypt(ctx *context.T, forPatterns []security.BlessingPattern, plaintext *[32]byte) (*security.Ciphertext, error) {
- ciphertext := &security.Ciphertext{
- Scheme: security.IBE,
- Ciphertexts: make(map[string][]byte),
- }
- if len(forPatterns) == 0 {
- return ciphertext, nil
- }
- for _, p := range forPatterns {
- ctxt := make([]byte, ibe.CiphertextSize)
- if err := b.params.Encrypt(string(p), (*plaintext)[:], ctxt); err != nil {
- return nil, NewErrInternal(ctx, err)
- }
- h := hash(p)
- // Verify that the hash does not collide with the hashes of the patterns
- // seen so far in this loop.
- if _, ok := ciphertext.Ciphertexts[h]; ok {
- return nil, NewErrInternal(ctx, fmt.Errorf("cannot encrypt as the hash of the pattern %v collides with one of the other patterns", p))
- }
- ciphertext.Ciphertexts[h] = ctxt
- }
- return ciphertext, nil
-}
-
-// ibeDecrypter implements a security.BlessingsBasedDecrypter that
-// uses the security.IBE cryptographic scheme.
-type ibeDecrypter struct {
- keys map[string]ibe.PrivateKey
-}
-
-func (b *ibeDecrypter) Decrypt(ctx *context.T, ciphertext *security.Ciphertext) (*[32]byte, error) {
- if ciphertext.Scheme != security.IBE {
- return nil, NewErrInvalidScheme(ctx, int32(ciphertext.Scheme), []int32{int32(security.IBE)})
- }
- var (
- key ibe.PrivateKey
- keyFound bool
- err error
- plaintext [32]byte
- )
- for p, ctxt := range ciphertext.Ciphertexts {
- key, keyFound = b.keys[p]
- if !keyFound {
- continue
- }
- if err = key.Decrypt(ctxt, plaintext[:]); err == nil {
- break
- }
- }
- if !keyFound {
- return nil, NewErrPrivateKeyNotFound(ctx)
- }
- if err != nil {
- return nil, NewErrInternal(ctx, err)
- }
- return &plaintext, nil
-}
-
-// NewIBEEncrypter constucts a new encrypter using the provided ibe.Params
-// that uses the security.IBE cryptographic scheme.
-func NewIBEEncrypter(params ibe.Params) security.BlessingsBasedEncrypter {
- return &ibeEncrypter{params: params}
-}
-
-// NewIBEDecrypter constructs a new decrypter for the provided blessing using
-// provided slice of IBE private keys corresponding to the blessing. The
-// decrypter uses the security.IBE cryptographic scheme. See Also: ExtractPrivateKeys.
-func NewIBEDecrypter(blessing string, privateKeys []ibe.PrivateKey) (security.BlessingsBasedDecrypter, error) {
- if len(blessing) == 0 {
- return nil, errors.New("blessing cannot be empty")
- }
- decrypter := &ibeDecrypter{keys: make(map[string]ibe.PrivateKey)}
- patterns := matchedBy(blessing)
- if got, want := len(privateKeys), len(patterns); got != want {
- return nil, fmt.Errorf("got %d private keys for blessing %v, expected %d", got, blessing, want)
- }
- for i, p := range patterns {
- decrypter.keys[hash(p)] = privateKeys[i]
- }
- return decrypter, nil
-}
-
-// ExtractPrivateKeys returns a slice of IBE private keys for the provided
-// blessing, extracted using the provided IBE Master.
-//
-// The slice of private keys contains private keys extracted for each blessing
-// pattern matched by the blessing (i.e., the blessing pattern string is
-// the identity for which the private key is extracted). Furthermore, the private
-// keys are organized in increasing order of the lengths of the corresponding
-// patterns.
-func ExtractPrivateKeys(master ibe.Master, blessing string) ([]ibe.PrivateKey, error) {
- if len(blessing) == 0 {
- return nil, errors.New("blessing must be non-empty")
- }
- patterns := matchedBy(blessing)
- keys := make([]ibe.PrivateKey, len(patterns))
- for i, p := range patterns {
- ibeKey, err := master.Extract(string(p))
- if err != nil {
- return nil, err
- }
- keys[i] = ibeKey
- }
- return keys, nil
-}
-
-// matchedBy returns the set of blessing patterns (in increasing order
-// of length) matched by the provided blessing. The provided blessing
-// must be non-empty.
-func matchedBy(blessing string) []security.BlessingPattern {
- patterns := make([]security.BlessingPattern, strings.Count(blessing, security.ChainSeparator)+2)
- patterns[len(patterns)-1] = security.BlessingPattern(blessing) + security.ChainSeparator + security.NoExtension
- patterns[len(patterns)-2] = security.BlessingPattern(blessing)
- for idx := len(patterns) - 3; idx >= 0; idx-- {
- blessing = blessing[0:strings.LastIndex(blessing, string(security.ChainSeparator))]
- patterns[idx] = security.BlessingPattern(blessing)
- }
- return patterns
-}
-
-// hash returns a 128-bit truncated SHA-256 hash of a blessing pattern.
-func hash(pattern security.BlessingPattern) string {
- h := sha256.Sum256([]byte(pattern))
- truncated := h[:hashTruncation]
- return string(truncated)
-}
diff --git a/lib/security/bcrypter/ibe_test.go b/lib/security/bcrypter/ibe_test.go
deleted file mode 100644
index ea4091d..0000000
--- a/lib/security/bcrypter/ibe_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bcrypter
-
-import (
- "bytes"
- "testing"
-
- "v.io/v23/context"
- "v.io/v23/security"
- "v.io/v23/verror"
-
- "v.io/x/lib/ibe"
-)
-
-func TestIBECrypter(t *testing.T) {
- blessing := "google/bob/tablet"
- newPlaintext := func() [32]byte {
- var m [32]byte
- if n := copy(m[:], []byte("AThirtyTwoBytePieceOfTextThisIs!")); n != len(m) {
- t.Fatalf("plaintext string must be %d bytes, not %d", len(m), n)
- }
- return m
- }
- master, err := ibe.SetupBB1()
- if err != nil {
- t.Fatal(err)
- }
- privateKeys, err := ExtractPrivateKeys(master, blessing)
- if err != nil {
- t.Fatal(err)
- }
-
- encrypter := NewIBEEncrypter(master.Params())
- decrypter, err := NewIBEDecrypter(blessing, privateKeys)
- if err != nil {
- t.Fatal(err)
- }
- msg := newPlaintext()
-
- ctx, shutdown := context.RootContext()
- defer shutdown()
-
- // Validate that bob's tablets can only decrypt messages encrypted
- // for patterns matched by its blessings.
- test := struct {
- valid, invalid [][]security.BlessingPattern
- }{
- valid: [][]security.BlessingPattern{
- []security.BlessingPattern{"google"},
- []security.BlessingPattern{"google/bob"},
- []security.BlessingPattern{"google/bob/tablet"},
- []security.BlessingPattern{"google/bob/tablet/$"},
- []security.BlessingPattern{"google/bob", "google/$"},
- },
- invalid: [][]security.BlessingPattern{
- nil,
- []security.BlessingPattern{"google/$"},
- []security.BlessingPattern{"google/bob/$", "samsung/tablet"},
- []security.BlessingPattern{"google/bob/tablet/youtube"},
- },
- }
- var (
- ciphertext *security.Ciphertext
- plaintext *[32]byte
- )
- for _, patterns := range test.valid {
- if ciphertext, err = encrypter.Encrypt(ctx, patterns, &msg); err != nil {
- t.Fatal(err)
- }
- if plaintext, err = decrypter.Decrypt(ctx, ciphertext); err != nil || !bytes.Equal((*plaintext)[:], msg[:]) {
- t.Fatalf("Ciphertext for patterns %v: decryption returned %v, want nil", patterns, err)
- }
- }
- for _, patterns := range test.invalid {
- if ciphertext, err = encrypter.Encrypt(ctx, patterns, &msg); err != nil {
- t.Fatal(err)
- }
- if plaintext, err = decrypter.Decrypt(ctx, ciphertext); verror.ErrorID(err) != ErrPrivateKeyNotFound.ID {
- t.Fatalf("Ciphertext for patterns %v, decryption succeeded, wanted it to fail", patterns)
- }
- }
-}
diff --git a/lib/security/bcrypter/marshal.go b/lib/security/bcrypter/marshal.go
new file mode 100644
index 0000000..4ebc12f
--- /dev/null
+++ b/lib/security/bcrypter/marshal.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypter
+
+import (
+ "v.io/x/lib/ibe"
+)
+
+// ToWire marshals the Ciphertext 'c' into the WireCiphertext 'wire'
+func (c *Ciphertext) ToWire(wire *WireCiphertext) {
+ *wire = c.wire
+}
+
+// FromWire unmarshals the provided WireCiphertext into the Ciphertext 'c'.
+func (c *Ciphertext) FromWire(wire WireCiphertext) {
+ c.wire = wire
+}
+
+// ToWire marshals the Params 'p' into the WireParams 'wire'.
+func (p *Params) ToWire(wire *WireParams) error {
+ ibeParamsBytes, err := ibe.MarshalParams(p.params)
+ if err != nil {
+ return err
+ }
+ wire.Blessing = p.blessing
+ wire.Params = ibeParamsBytes
+ return nil
+}
+
+// FromWire unmarshals the provided WireParams into the Params 'p'.
+func (p *Params) FromWire(wire WireParams) error {
+ ibeParams, err := ibe.UnmarshalParams(wire.Params)
+ if err != nil {
+ return err
+ }
+ p.params = ibeParams
+ p.blessing = wire.Blessing
+ return nil
+}
+
+// ToWire marshals the PrivateKey 'k' into the WirePrivateKey 'wire'.
+func (k *PrivateKey) ToWire(wire *WirePrivateKey) error {
+ if err := k.params.ToWire(&wire.Params); err != nil {
+ return err
+ }
+ wire.Blessing = k.blessing
+ wire.Keys = make([][]byte, len(k.keys))
+ var err error
+ for i, ibeKey := range k.keys {
+ if wire.Keys[i], err = ibe.MarshalPrivateKey(ibeKey); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// FromWire unmarshals the provided WirePrivateKey into the PrivateKey 'k'.
+func (k *PrivateKey) FromWire(wire WirePrivateKey) error {
+ var params Params
+ if err := params.FromWire(wire.Params); err != nil {
+ return err
+ }
+ k.blessing = wire.Blessing
+ k.params = params
+ k.keys = make([]ibe.PrivateKey, len(wire.Keys))
+ var err error
+ for i, ibeKeyBytes := range wire.Keys {
+ if k.keys[i], err = ibe.UnmarshalPrivateKey(k.params.params, ibeKeyBytes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/lib/security/bcrypter/types.vdl b/lib/security/bcrypter/types.vdl
new file mode 100644
index 0000000..705a0ae
--- /dev/null
+++ b/lib/security/bcrypter/types.vdl
@@ -0,0 +1,57 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypter
+
+// WireCiphertext represents the wire format of the ciphertext
+// generated by a Crypter.
+type WireCiphertext struct {
+ // PatternId is an identifier of the blessing pattern that this
+ // ciphertext is for. It is represented by a 16 byte truncated
+ // SHA256 hash of the pattern.
+ PatternId string
+ // Bytes is a map from an identifier of the public IBE params to
+ // the ciphertext bytes that were generated using those params.
+ //
+ // The params identitifier is a 16 byte truncated SHA256 hash
+ // of the marshaled form of the IBE params.
+ Bytes map[string][]byte
+}
+
+// WireParams represents the wire format of the public parameters
+// of an identity provider (aka Root).
+type WireParams struct {
+ // Blessing is the blessing name of the identity provider. The identity
+ // provider can extract private keys for blessings that are extensions
+ // of this blessing name.
+ Blessing string
+ // Params is the marshaled form of the public IBE params of the
+ // the identity provider.
+ Params []byte
+}
+
+// WirePrivateKey represents the wire format of the private key corresponding
+// to a blessing.
+type WirePrivateKey struct {
+ // Blessing is the blessing for which this private key was extracted for.
+ Blessing string
+ // Params are the public parameters of the identity provider that extracted
+ // this private key.
+ Params WireParams
+ // Keys contain the extracted IBE private keys for each pattern that is
+ // matched by the blessing and is an extension of the identity provider's
+ // name. The keys are enumerated in increasing order of the lengths of the
+ // corresponding patterns.
+ //
+ // For example, if the blessing is "google/u/alice/phone" and the identity
+ // provider's name is "google/u" then the keys are extracted for the patterns
+ // - "google/u"
+ // - "google/u/alice"
+ // - "google/u/alice/phone"
+ // - "google/u/alice/phone/$"
+ //
+ // The private keys are listed in increasing order of the lengths of the
+ // corresponding patterns.
+ Keys [][]byte
+}
diff --git a/lib/security/bcrypter/types.vdl.go b/lib/security/bcrypter/types.vdl.go
new file mode 100644
index 0000000..62f604c
--- /dev/null
+++ b/lib/security/bcrypter/types.vdl.go
@@ -0,0 +1,86 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: types.vdl
+
+package bcrypter
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+)
+
+// WireCiphertext represents the wire format of the ciphertext
+// generated by a Crypter.
+type WireCiphertext struct {
+ // PatternId is an identifier of the blessing pattern that this
+ // ciphertext is for. It is represented by a 16 byte truncated
+ // SHA256 hash of the pattern.
+ PatternId string
+ // Bytes is a map from an identifier of the public IBE params to
+ // the ciphertext bytes that were generated using those params.
+ //
+ // The params identitifier is a 16 byte truncated SHA256 hash
+ // of the marshaled form of the IBE params.
+ Bytes map[string][]byte
+}
+
+func (WireCiphertext) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/security/bcrypter.WireCiphertext"`
+}) {
+}
+
+// WireParams represents the wire format of the public parameters
+// of an identity provider (aka Root).
+type WireParams struct {
+ // Blessing is the blessing name of the identity provider. The identity
+ // provider can extract private keys for blessings that are extensions
+ // of this blessing name.
+ Blessing string
+ // Params is the marshaled form of the public IBE params of the
+ // the identity provider.
+ Params []byte
+}
+
+func (WireParams) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/security/bcrypter.WireParams"`
+}) {
+}
+
+// WirePrivateKey represents the wire format of the private key corresponding
+// to a blessing.
+type WirePrivateKey struct {
+ // Blessing is the blessing for which this private key was extracted for.
+ Blessing string
+ // Params are the public parameters of the identity provider that extracted
+ // this private key.
+ Params WireParams
+ // Keys contain the extracted IBE private keys for each pattern that is
+ // matched by the blessing and is an extension of the identity provider's
+ // name. The keys are enumerated in increasing order of the lengths of the
+ // corresponding patterns.
+ //
+ // For example, if the blessing is "google/u/alice/phone" and the identity
+ // provider's name is "google/u" then the keys are extracted for the patterns
+ // - "google/u"
+ // - "google/u/alice"
+ // - "google/u/alice/phone"
+ // - "google/u/alice/phone/$"
+ //
+ // The private keys are listed in increasing order of the lengths of the
+ // corresponding patterns.
+ Keys [][]byte
+}
+
+func (WirePrivateKey) __VDLReflect(struct {
+ Name string `vdl:"v.io/x/ref/lib/security/bcrypter.WirePrivateKey"`
+}) {
+}
+
+func init() {
+ vdl.Register((*WireCiphertext)(nil))
+ vdl.Register((*WireParams)(nil))
+ vdl.Register((*WirePrivateKey)(nil))
+}
diff --git a/lib/security/principal.go b/lib/security/principal.go
index 39d3dfb..9532970 100644
--- a/lib/security/principal.go
+++ b/lib/security/principal.go
@@ -44,7 +44,7 @@
if err != nil {
return nil, err
}
- return security.CreatePrincipal(security.NewInMemoryECDSASigner(priv), newInMemoryBlessingStore(pub), newInMemoryBlessingRoots(), nil, nil)
+ return security.CreatePrincipal(security.NewInMemoryECDSASigner(priv), newInMemoryBlessingStore(pub), newInMemoryBlessingRoots())
}
// PrincipalStateSerializer is used to persist BlessingRoots/BlessingStore state for
@@ -73,9 +73,9 @@
// same serializers. Otherwise, the state (ie: BlessingStore, BlessingRoots) is kept in memory.
func NewPrincipalFromSigner(signer security.Signer, state *PrincipalStateSerializer) (security.Principal, error) {
if state == nil {
- return security.CreatePrincipal(signer, newInMemoryBlessingStore(signer.PublicKey()), newInMemoryBlessingRoots(), nil, nil)
+ return security.CreatePrincipal(signer, newInMemoryBlessingStore(signer.PublicKey()), newInMemoryBlessingRoots())
}
- serializationSigner, err := security.CreatePrincipal(signer, nil, nil, nil, nil)
+ serializationSigner, err := security.CreatePrincipal(signer, nil, nil)
if err != nil {
return nil, verror.New(errCantCreateSigner, nil, err)
}
@@ -87,7 +87,7 @@
if err != nil {
return nil, verror.New(errCantLoadBlessingStore, nil, err)
}
- return security.CreatePrincipal(signer, blessingStore, blessingRoots, nil, nil)
+ return security.CreatePrincipal(signer, blessingStore, blessingRoots)
}
// LoadPersistentPrincipal reads state for a principal (private key, BlessingRoots, BlessingStore)
diff --git a/lib/security/serialization/serialization_test.go b/lib/security/serialization/serialization_test.go
index 241b3e2..3ed62a7 100644
--- a/lib/security/serialization/serialization_test.go
+++ b/lib/security/serialization/serialization_test.go
@@ -68,7 +68,7 @@
if err != nil {
panic(err)
}
- p, err := security.CreatePrincipal(security.NewInMemoryECDSASigner(key), nil, nil, nil, nil)
+ p, err := security.CreatePrincipal(security.NewInMemoryECDSASigner(key), nil, nil)
if err != nil {
panic(err)
}
diff --git a/lib/vdl/codegen/java/util_val.go b/lib/vdl/codegen/java/util_val.go
index 3853435..ff6ffe3 100644
--- a/lib/vdl/codegen/java/util_val.go
+++ b/lib/vdl/codegen/java/util_val.go
@@ -59,7 +59,7 @@
return "false"
}
case vdl.Byte:
- return "(byte)" + strconv.FormatUint(uint64(v.Byte()), 10)
+ return "(byte)0x" + strconv.FormatUint(uint64(v.Byte()), 16)
case vdl.Uint16:
return fmt.Sprintf("new %s((short) %s)", javaType(v.Type(), true, env), strconv.FormatUint(v.Uint(), 10))
case vdl.Int16:
diff --git a/lib/vdl/codegen/javascript/gen.go b/lib/vdl/codegen/javascript/gen.go
index bb86600..5547c0d 100644
--- a/lib/vdl/codegen/javascript/gen.go
+++ b/lib/vdl/codegen/javascript/gen.go
@@ -203,28 +203,22 @@
}
}
-func primitiveWithOptionalName(primitive, name string) string {
- if name == "" {
- return "types." + primitive
- }
- return "new vdl.Type({kind: vdl.kind." + primitive + ", name: '" + name + "'})"
-}
-
// typedConst returns a javascript string representing a const that is always
// wrapped with type information
func typedConst(names typeNames, v *vdl.Value) string {
- switch v.Kind() {
- case vdl.Any, vdl.TypeObject:
+ if v.Kind() == vdl.TypeObject || v.Kind() == vdl.Any {
return untypedConst(names, v)
- default:
- // We call canonicalize.reduce so that we convert to native types
- // The constructor would have done the reduction of the field values
- // but it doesn't convert to native types.
- return fmt.Sprintf("canonicalize.reduce(new %s(%s, true), %s)",
- names.LookupConstructor(v.Type()),
- untypedConst(names, v),
- names.LookupType(v.Type()))
}
+ if v.Kind() == vdl.Enum && names.IsDefinedInExternalPkg(v.Type()) {
+ return fmt.Sprintf("%s.%s", qualifiedName(v.Type()), vdlutil.ToConstCase(v.EnumLabel()))
+ }
+ // We call canonicalize.reduce so that we convert to native types
+ // The constructor would have done the reduction of the field values
+ // but it doesn't convert to native types.
+ return fmt.Sprintf("canonicalize.reduce(new %s(%s, true), %s)",
+ names.LookupConstructor(v.Type()),
+ untypedConst(names, v),
+ names.LookupType(v.Type()))
}
// Returns a Not Implemented stub for the method
diff --git a/lib/vdl/codegen/javascript/pkg_types.go b/lib/vdl/codegen/javascript/pkg_types.go
index b2dc8fd..993f277 100644
--- a/lib/vdl/codegen/javascript/pkg_types.go
+++ b/lib/vdl/codegen/javascript/pkg_types.go
@@ -35,16 +35,33 @@
return tn.constructorFromTypeName(name)
}
- pkgPath, name := vdl.SplitIdent(t.Name())
- pkgParts := strings.Split(pkgPath, "/")
- pkgName := pkgParts[len(pkgParts)-1]
- return fmt.Sprintf("%s.%s", pkgName, name)
+ return qualifiedName(t)
}
func (tn typeNames) constructorFromTypeName(name string) string {
return "(vdl.registry.lookupOrCreateConstructor(" + name + "))"
}
+// Is this type defined in a different package?
+func (tn typeNames) IsDefinedInExternalPkg(t *vdl.Type) bool {
+ if _, ok := builtinJSType(t); ok {
+ return false
+ }
+ if _, ok := tn[t]; ok {
+ return false
+ }
+ return true
+}
+
+// qualifiedName returns a name representing the type prefixed by
+// its package name (e.g. "a.X")
+func qualifiedName(t *vdl.Type) string {
+ pkgPath, name := vdl.SplitIdent(t.Name())
+ pkgParts := strings.Split(pkgPath, "/")
+ pkgName := pkgParts[len(pkgParts)-1]
+ return fmt.Sprintf("%s.%s", pkgName, name)
+}
+
// LookupType returns a string representing the type.
// - If it is a built in type, return the name.
// - Otherwise get type type from the constructor.
diff --git a/lib/vdl/codegen/vdlgen/import.go b/lib/vdl/codegen/vdlgen/import.go
index 248b936..348be58 100644
--- a/lib/vdl/codegen/vdlgen/import.go
+++ b/lib/vdl/codegen/vdlgen/import.go
@@ -8,6 +8,8 @@
// TODO(toddw): Add tests
import (
+ "strconv"
+
"v.io/x/ref/lib/vdl/codegen"
)
@@ -22,7 +24,7 @@
if imp.Name != "" {
s += imp.Name + " "
}
- s += imp.Path
+ s += strconv.Quote(imp.Path)
}
s += "\n)"
}
diff --git a/runtime/factories/fake/fake.go b/runtime/factories/fake/fake.go
index 309e282..0972209 100644
--- a/runtime/factories/fake/fake.go
+++ b/runtime/factories/fake/fake.go
@@ -24,6 +24,7 @@
"v.io/x/ref/runtime/internal/lib/xwebsocket"
// TODO(suharshs): Remove these once we switch to the flow protocols.
+ _ "v.io/x/ref/runtime/internal/flow/protocols/local"
_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
_ "v.io/x/ref/runtime/internal/rpc/protocols/wsh"
diff --git a/runtime/internal/flow/conn/auth.go b/runtime/internal/flow/conn/auth.go
index 7f95652..16ac858 100644
--- a/runtime/internal/flow/conn/auth.go
+++ b/runtime/internal/flow/conn/auth.go
@@ -16,12 +16,14 @@
"v.io/v23/context"
"v.io/v23/flow"
"v.io/v23/flow/message"
+ "v.io/v23/naming"
"v.io/v23/rpc/version"
"v.io/v23/security"
"v.io/v23/verror"
"v.io/v23/vom"
slib "v.io/x/ref/lib/security"
iflow "v.io/x/ref/runtime/internal/flow"
+ inaming "v.io/x/ref/runtime/internal/naming"
)
var (
@@ -30,12 +32,17 @@
)
func (c *Conn) dialHandshake(ctx *context.T, versions version.RPCVersionRange, auth flow.PeerAuthorizer) error {
- binding, err := c.setup(ctx, versions)
+ binding, remoteEndpoint, err := c.setup(ctx, versions)
if err != nil {
return err
}
-
- bflow := c.newFlowLocked(ctx, blessingsFlowID, 0, 0, true, true)
+ c.isProxy = c.remote.RoutingID() != naming.NullRoutingID && c.remote.RoutingID() != remoteEndpoint.RoutingID()
+ // We use the remote ends local endpoint as our remote endpoint when the routingID's
+ // of the endpoints differ. This is an indicator that we are talking to a proxy.
+ // This means that the manager will need to dial a subsequent conn on this conn
+ // to the end server.
+ c.remote.(*inaming.Endpoint).RID = remoteEndpoint.RoutingID()
+ bflow := c.newFlowLocked(ctx, blessingsFlowID, 0, 0, nil, true, true)
bflow.releaseLocked(DefaultBytesBufferedPerFlow)
c.blessingsFlow = newBlessingsFlow(ctx, &c.loopWG, bflow)
@@ -58,35 +65,39 @@
lAuth := &message.Auth{
ChannelBinding: signedBinding,
}
- // We only send our blessings if we are a server in addition to being a client,
+ // We only send our real blessings if we are a server in addition to being a client,
// and we are not talking through a proxy.
- // Otherwise, we only send our public key.
- if c.handler != nil && !c.isProxy {
- c.loopWG.Add(1)
- lAuth.BlessingsKey, _, err = c.blessingsFlow.send(ctx, c.lBlessings, nil)
- if err != nil {
- return err
- }
- // We send discharges asynchronously to prevent making a second RPC while
- // trying to build up the connection for another. If the two RPCs happen to
- // go to the same server a deadlock will result.
- // This commonly happens when we make a Resolve call. During the Resolve we
- // will try to fetch discharges to send to the mounttable, leading to a
- // Resolve of the discharge server name. The two resolve calls may be to
- // the same mounttable.
- defer func() { go c.refreshDischarges(ctx) }()
+ // Otherwise, we only send our public key through a nameless blessings object.
+ if c.lBlessings.IsZero() || c.handler == nil || c.isProxy {
+ c.lBlessings, _ = security.NamelessBlessing(v23.GetPrincipal(ctx).PublicKey())
}
- lAuth.PublicKey = v23.GetPrincipal(ctx).PublicKey()
- return c.mp.writeMsg(ctx, lAuth)
+ if lAuth.BlessingsKey, _, err = c.blessingsFlow.send(ctx, c.lBlessings, nil); err != nil {
+ return err
+ }
+ if err = c.mp.writeMsg(ctx, lAuth); err != nil {
+ return err
+ }
+ // We send discharges asynchronously to prevent making a second RPC while
+ // trying to build up the connection for another. If the two RPCs happen to
+ // go to the same server a deadlock will result.
+ // This commonly happens when we make a Resolve call. During the Resolve we
+ // will try to fetch discharges to send to the mounttable, leading to a
+ // Resolve of the discharge server name. The two resolve calls may be to
+ // the same mounttable.
+ c.loopWG.Add(1)
+ go c.refreshDischarges(ctx)
+ return nil
}
func (c *Conn) acceptHandshake(ctx *context.T, versions version.RPCVersionRange) error {
- binding, err := c.setup(ctx, versions)
+ binding, remoteEndpoint, err := c.setup(ctx, versions)
if err != nil {
return err
}
+ c.isProxy = false
+ c.remote = remoteEndpoint
c.blessingsFlow = newBlessingsFlow(ctx, &c.loopWG,
- c.newFlowLocked(ctx, blessingsFlowID, 0, 0, true, true))
+ c.newFlowLocked(ctx, blessingsFlowID, 0, 0, nil, true, true))
signedBinding, err := v23.GetPrincipal(ctx).Sign(append(authAcceptorTag, binding...))
if err != nil {
return err
@@ -105,10 +116,10 @@
return err
}
-func (c *Conn) setup(ctx *context.T, versions version.RPCVersionRange) ([]byte, error) {
+func (c *Conn) setup(ctx *context.T, versions version.RPCVersionRange) ([]byte, naming.Endpoint, error) {
pk, sk, err := box.GenerateKey(rand.Reader)
if err != nil {
- return nil, err
+ return nil, nil, err
}
lSetup := &message.Setup{
Versions: versions,
@@ -126,35 +137,26 @@
if err != nil {
<-ch
if verror.ErrorID(err) == message.ErrWrongProtocol.ID {
- return nil, err
+ return nil, nil, err
}
- return nil, NewErrRecv(ctx, "unknown", err)
+ return nil, nil, NewErrRecv(ctx, "unknown", err)
}
rSetup, valid := msg.(*message.Setup)
if !valid {
<-ch
- return nil, NewErrUnexpectedMsg(ctx, reflect.TypeOf(msg).String())
+ return nil, nil, NewErrUnexpectedMsg(ctx, reflect.TypeOf(msg).String())
}
if err := <-ch; err != nil {
- return nil, NewErrSend(ctx, "setup", c.remote.String(), err)
+ return nil, nil, NewErrSend(ctx, "setup", c.remote.String(), err)
}
if c.version, err = version.CommonVersion(ctx, lSetup.Versions, rSetup.Versions); err != nil {
- return nil, err
+ return nil, nil, err
}
if c.local == nil {
c.local = rSetup.PeerRemoteEndpoint
}
- // We use the remote ends local endpoint as our remote endpoint when the routingID's
- // of the endpoints differ. This as an indicator to the manager that we are talking to a proxy.
- // This means that the manager will need to dial a subsequent conn on this conn
- // to the end server.
- // TODO(suharshs): Determine how to authorize the proxy.
- c.isProxy = c.remote != nil && c.remote.RoutingID() != rSetup.PeerLocalEndpoint.RoutingID()
- if c.remote == nil || c.isProxy {
- c.remote = rSetup.PeerLocalEndpoint
- }
if rSetup.PeerNaClPublicKey == nil {
- return nil, NewErrMissingSetupOption(ctx, "peerNaClPublicKey")
+ return nil, nil, NewErrMissingSetupOption(ctx, "peerNaClPublicKey")
}
binding := c.mp.setupEncryption(ctx, pk, sk, rSetup.PeerNaClPublicKey)
// if we're encapsulated in another flow, tell that flow to stop
@@ -162,7 +164,7 @@
if f, ok := c.mp.rw.(*flw); ok {
f.disableEncryption()
}
- return binding, nil
+ return binding, rSetup.PeerLocalEndpoint, nil
}
func (c *Conn) readRemoteAuth(ctx *context.T, binding []byte, dialer bool) (security.Blessings, map[string]security.Discharge, error) {
@@ -183,15 +185,12 @@
return security.Blessings{}, nil, err
}
}
- c.mu.Lock()
- c.rPublicKey = rauth.PublicKey
- c.mu.Unlock()
c.rBKey = rauth.BlessingsKey
// Only read the blessings if we were the dialer. Any blessings from the dialer
// will be sent later.
var rBlessings security.Blessings
var rDischarges map[string]security.Discharge
- if rauth.BlessingsKey != 0 && dialer {
+ if rauth.BlessingsKey != 0 {
var err error
// TODO(mattr): Make sure we cancel out of this at some point.
rBlessings, rDischarges, err = c.blessingsFlow.getRemote(ctx, rauth.BlessingsKey, rauth.DischargeKey)
@@ -308,9 +307,6 @@
switch bd := bd.(type) {
case BlessingsFlowMessageBlessings:
bkey, blessings := bd.Value.BKey, bd.Value.Blessings
- if bkey == noExist {
- return nil
- }
// When accepting, make sure the blessings received are bound to the conn's
// remote public key.
b.f.conn.mu.Lock()
@@ -324,9 +320,6 @@
b.mu.Unlock()
case BlessingsFlowMessageDischarges:
bkey, dkey, discharges := bd.Value.BKey, bd.Value.DKey, bd.Value.Discharges
- if bkey == noExist {
- return nil
- }
b.mu.Lock()
b.incoming.discharges[dkey] = discharges
b.incoming.dkeys[bkey] = dkey
@@ -340,12 +333,9 @@
defer b.mu.Unlock()
b.mu.Lock()
for {
- if bkey == noExist {
- return security.Blessings{}, nil, nil
- }
blessings, hasB := b.incoming.blessings[bkey]
if hasB {
- if dkey == noExist {
+ if dkey == 0 {
return blessings, nil, nil
}
discharges, hasD := b.incoming.discharges[dkey]
@@ -367,9 +357,6 @@
defer b.mu.Unlock()
b.mu.Lock()
for {
- if bkey == noExist {
- return security.Blessings{}, nil, nil
- }
blessings, has := b.incoming.blessings[bkey]
if has {
dkey := b.incoming.dkeys[bkey]
diff --git a/runtime/internal/flow/conn/auth_test.go b/runtime/internal/flow/conn/auth_test.go
index e590f1e..5f8337e 100644
--- a/runtime/internal/flow/conn/auth_test.go
+++ b/runtime/internal/flow/conn/auth_test.go
@@ -42,7 +42,7 @@
}
func dialFlow(t *testing.T, ctx *context.T, dc *Conn, b security.Blessings) flow.Flow {
- df, err := dc.Dial(ctx, peerAuthorizer{b})
+ df, err := dc.Dial(ctx, peerAuthorizer{b}, nil)
if err != nil {
t.Fatal(err)
}
@@ -112,7 +112,7 @@
dctx := NewPrincipalWithTPCaveat(t, ctx, "dialer")
actx := NewPrincipalWithTPCaveat(t, ctx, "acceptor")
aflows := make(chan flow.Flow, 2)
- dc, ac, _ := setupConns(t, dctx, actx, nil, aflows, false)
+ dc, ac := setupConns(t, "local", "", dctx, actx, nil, aflows)
defer dc.Close(dctx, nil)
defer ac.Close(actx, nil)
@@ -130,7 +130,7 @@
// We should not be able to dial in the other direction, because that flow
// manager is not willing to accept flows.
- _, err := ac.Dial(actx, flowtest.AllowAllPeersAuthorizer{})
+ _, err := ac.Dial(actx, flowtest.AllowAllPeersAuthorizer{}, nil)
if verror.ErrorID(err) != ErrDialingNonServer.ID {
t.Errorf("got %v, wanted ErrDialingNonServer", err)
}
@@ -150,7 +150,7 @@
actx := NewPrincipalWithTPCaveat(t, ctx, "acceptor")
dflows := make(chan flow.Flow, 2)
aflows := make(chan flow.Flow, 2)
- dc, ac, _ := setupConns(t, dctx, actx, dflows, aflows, false)
+ dc, ac := setupConns(t, "local", "", dctx, actx, dflows, aflows)
defer dc.Close(dctx, nil)
defer ac.Close(actx, nil)
diff --git a/runtime/internal/flow/conn/close_test.go b/runtime/internal/flow/conn/close_test.go
index 2824bb1..e4ecece 100644
--- a/runtime/internal/flow/conn/close_test.go
+++ b/runtime/internal/flow/conn/close_test.go
@@ -8,6 +8,7 @@
"bytes"
"fmt"
"io"
+ "sync"
"testing"
"v.io/v23"
@@ -15,20 +16,66 @@
"v.io/v23/flow"
_ "v.io/x/ref/runtime/factories/fake"
"v.io/x/ref/runtime/internal/flow/flowtest"
+ "v.io/x/ref/runtime/internal/flow/protocols/debug"
"v.io/x/ref/test/goroutines"
)
+type conn struct {
+ flow.Conn
+ set *set
+}
+
+func (c *conn) Close() error {
+ c.set.remove(c.Conn)
+ return c.Conn.Close()
+}
+
+type set struct {
+ mu sync.Mutex
+ conns map[flow.Conn]bool
+}
+
+func (w *set) add(c flow.Conn) flow.Conn {
+ w.mu.Lock()
+ w.conns[c] = true
+ w.mu.Unlock()
+ return &conn{c, w}
+}
+
+func (s *set) remove(c flow.Conn) {
+ s.mu.Lock()
+ delete(s.conns, c)
+ s.mu.Unlock()
+}
+
+func (s *set) closeAll() {
+ s.mu.Lock()
+ for c := range s.conns {
+ c.Close()
+ }
+ s.mu.Unlock()
+}
+
+func (s *set) open() int {
+ s.mu.Lock()
+ o := len(s.conns)
+ s.mu.Unlock()
+ return o
+}
+
func TestRemoteDialerClose(t *testing.T) {
defer goroutines.NoLeaks(t, leakWaitTime)()
ctx, shutdown := v23.Init()
defer shutdown()
- d, a, w := setupConns(t, ctx, ctx, nil, nil, false)
+ s := set{conns: map[flow.Conn]bool{}}
+ ctx = debug.WithFilter(ctx, s.add)
+ d, a := setupConns(t, "debug", "local/", ctx, ctx, nil, nil)
d.Close(ctx, fmt.Errorf("Closing randomly."))
<-d.Closed()
<-a.Closed()
- if !w.IsClosed() {
- t.Errorf("The connection should be closed")
+ if s.open() != 0 {
+ t.Errorf("The connections should be closed")
}
}
@@ -37,12 +84,14 @@
ctx, shutdown := v23.Init()
defer shutdown()
- d, a, w := setupConns(t, ctx, ctx, nil, nil, false)
+ s := set{conns: map[flow.Conn]bool{}}
+ ctx = debug.WithFilter(ctx, s.add)
+ d, a := setupConns(t, "debug", "local/", ctx, ctx, nil, nil)
a.Close(ctx, fmt.Errorf("Closing randomly."))
<-a.Closed()
<-d.Closed()
- if !w.IsClosed() {
- t.Errorf("The connection should be closed")
+ if s.open() != 0 {
+ t.Errorf("The connections should be closed")
}
}
@@ -51,8 +100,10 @@
ctx, shutdown := v23.Init()
defer shutdown()
- d, a, w := setupConns(t, ctx, ctx, nil, nil, false)
- w.Close()
+ s := set{conns: map[flow.Conn]bool{}}
+ ctx = debug.WithFilter(ctx, s.add)
+ d, a := setupConns(t, "debug", "local/", ctx, ctx, nil, nil)
+ s.closeAll()
<-a.Closed()
<-d.Closed()
}
@@ -62,15 +113,15 @@
ctx, shutdown := v23.Init()
defer shutdown()
- d, a, _ := setupConns(t, ctx, ctx, nil, nil, false)
+ d, a := setupConns(t, "local", "", ctx, ctx, nil, nil)
d.Close(ctx, fmt.Errorf("Closing randomly."))
<-d.Closed()
<-a.Closed()
- if _, err := d.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}); err == nil {
+ if _, err := d.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil); err == nil {
t.Errorf("Nil error dialing on dialer")
}
- if _, err := a.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}); err == nil {
+ if _, err := a.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil); err == nil {
t.Errorf("Nil error dialing on acceptor")
}
}
@@ -81,7 +132,7 @@
ctx, shutdown := v23.Init()
defer shutdown()
for _, dialerDials := range []bool{true, false} {
- df, flows, cl := setupFlow(t, ctx, ctx, dialerDials)
+ df, flows, cl := setupFlow(t, "local", "", ctx, ctx, dialerDials)
if _, err := df.WriteMsg([]byte("hello")); err != nil {
t.Fatalf("write failed: %v", err)
}
@@ -117,13 +168,13 @@
ctx, shutdown := v23.Init()
defer shutdown()
accept := make(chan flow.Flow, 1)
- dc, ac, _ := setupConns(t, ctx, ctx, nil, accept, false)
+ dc, ac := setupConns(t, "local", "", ctx, ctx, nil, accept)
defer func() {
dc.Close(ctx, nil)
ac.Close(ctx, nil)
}()
dctx, cancel := context.WithCancel(ctx)
- df, err := dc.Dial(dctx, flowtest.AllowAllPeersAuthorizer{})
+ df, err := dc.Dial(dctx, flowtest.AllowAllPeersAuthorizer{}, nil)
if err != nil {
t.Fatal(err)
}
@@ -153,13 +204,13 @@
ctx, shutdown := v23.Init()
defer shutdown()
accept := make(chan flow.Flow, 1)
- dc, ac, _ := setupConns(t, ctx, ctx, nil, accept, false)
+ dc, ac := setupConns(t, "local", "", ctx, ctx, nil, accept)
defer func() {
dc.Close(ctx, nil)
ac.Close(ctx, nil)
}()
dctx, cancel := context.WithCancel(ctx)
- df, err := dc.Dial(dctx, flowtest.AllowAllPeersAuthorizer{})
+ df, err := dc.Dial(dctx, flowtest.AllowAllPeersAuthorizer{}, nil)
if err != nil {
t.Fatal(err)
}
diff --git a/runtime/internal/flow/conn/conn.go b/runtime/internal/flow/conn/conn.go
index dc761df..80d0034 100644
--- a/runtime/internal/flow/conn/conn.go
+++ b/runtime/internal/flow/conn/conn.go
@@ -17,6 +17,7 @@
"v.io/v23/security"
"v.io/v23/verror"
iflow "v.io/x/ref/runtime/internal/flow"
+ inaming "v.io/x/ref/runtime/internal/naming"
)
// flowID is a number assigned to identify a flow.
@@ -29,7 +30,6 @@
const mtu = 1 << 16
const DefaultBytesBufferedPerFlow = 1 << 20
-const noExist = 0
const (
expressPriority = iota
@@ -137,8 +137,8 @@
mp: newMessagePipe(conn),
handler: handler,
lBlessings: lBlessings,
- local: local,
- remote: remote,
+ local: endpointCopy(local),
+ remote: endpointCopy(remote),
closed: make(chan struct{}),
lameDucked: make(chan struct{}),
nextFid: reservedFlows,
@@ -181,7 +181,7 @@
mp: newMessagePipe(conn),
handler: handler,
lBlessings: lBlessings,
- local: local,
+ local: endpointCopy(local),
closed: make(chan struct{}),
lameDucked: make(chan struct{}),
nextFid: reservedFlows + 1,
@@ -225,8 +225,8 @@
}
// Dial dials a new flow on the Conn.
-func (c *Conn) Dial(ctx *context.T, auth flow.PeerAuthorizer) (flow.Flow, error) {
- if c.rBKey == noExist {
+func (c *Conn) Dial(ctx *context.T, auth flow.PeerAuthorizer, remote naming.Endpoint) (flow.Flow, error) {
+ if c.remote.RoutingID() == naming.NullRoutingID {
return nil, NewErrDialingNonServer(ctx)
}
rBlessings, rDischarges, err := c.blessingsFlow.getLatestRemote(ctx, c.rBKey)
@@ -234,20 +234,26 @@
return nil, err
}
var bkey, dkey uint64
+ var blessings security.Blessings
+ var discharges map[string]security.Discharge
if !c.isProxy {
// TODO(suharshs): On the first flow dial, find a way to not call this twice.
- rbnames, rejected, err := auth.AuthorizePeer(ctx, c.local, c.remote, rBlessings, rDischarges)
+ rbnames, rejected, err := auth.AuthorizePeer(ctx, c.local, remote, rBlessings, rDischarges)
if err != nil {
return nil, iflow.MaybeWrapError(verror.ErrNotTrusted, ctx, err)
}
- blessings, discharges, err := auth.BlessingsForPeer(ctx, rbnames)
+ blessings, discharges, err = auth.BlessingsForPeer(ctx, rbnames)
if err != nil {
return nil, NewErrNoBlessingsForPeer(ctx, rbnames, rejected, err)
}
- bkey, dkey, err = c.blessingsFlow.send(ctx, blessings, discharges)
- if err != nil {
- return nil, err
- }
+ }
+ if blessings.IsZero() {
+ // its safe to ignore this error since c.lBlessings must be valid, so the
+ // encoding of the publicKey can never error out.
+ blessings, _ = security.NamelessBlessing(c.lBlessings.PublicKey())
+ }
+ if bkey, dkey, err = c.blessingsFlow.send(ctx, blessings, discharges); err != nil {
+ return nil, err
}
defer c.mu.Unlock()
c.mu.Lock()
@@ -256,7 +262,8 @@
}
id := c.nextFid
c.nextFid += 2
- return c.newFlowLocked(ctx, id, bkey, dkey, true, false), nil
+ // TODO(suharshs): endpoint fix below
+ return c.newFlowLocked(ctx, id, bkey, dkey, remote, true, false), nil
}
// LocalEndpoint returns the local vanadium Endpoint
@@ -444,7 +451,7 @@
return nil // Conn is already being closed.
}
handler := c.handler
- f := c.newFlowLocked(ctx, msg.ID, msg.BlessingsKey, msg.DischargeKey, false, true)
+ f := c.newFlowLocked(ctx, msg.ID, msg.BlessingsKey, msg.DischargeKey, nil, false, true)
f.releaseLocked(msg.InitialCounters)
c.toRelease[msg.ID] = DefaultBytesBufferedPerFlow
c.borrowing[msg.ID] = true
@@ -668,3 +675,8 @@
c.notifyNextWriterLocked(s)
return err
}
+
+func endpointCopy(ep naming.Endpoint) naming.Endpoint {
+ var cp inaming.Endpoint = *(ep.(*inaming.Endpoint))
+ return &cp
+}
diff --git a/runtime/internal/flow/conn/conn_test.go b/runtime/internal/flow/conn/conn_test.go
index c63bb6f..7501fb1 100644
--- a/runtime/internal/flow/conn/conn_test.go
+++ b/runtime/internal/flow/conn/conn_test.go
@@ -71,7 +71,7 @@
defer goroutines.NoLeaks(t, leakWaitTime)()
ctx, shutdown := v23.Init()
- df, flows, cl := setupFlow(t, ctx, ctx, true)
+ df, flows, cl := setupFlow(t, "local", "", ctx, ctx, true)
defer cl()
defer shutdown()
@@ -89,10 +89,9 @@
defer goroutines.NoLeaks(t, leakWaitTime)()
ctx, shutdown := v23.Init()
-
- dmrw, amrw, _ := flowtest.NewMRWPair(ctx)
+ dmrw, amrw := flowtest.Pipe(t, ctx, "local", "")
versions := version.RPCVersionRange{Min: 3, Max: 5}
- ep, err := v23.NewEndpoint("localhost:80")
+ ep, err := v23.NewEndpoint("@6@@batman.com:1234@@000000000000000000000000dabbad00@m@@@")
if err != nil {
t.Fatal(err)
}
@@ -116,7 +115,7 @@
}()
d, a := <-dch, <-ach
var f flow.Flow
- if f, err = d.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}); err != nil {
+ if f, err = d.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil); err != nil {
t.Fatal(err)
}
// Write a byte to send the openFlow message.
@@ -127,7 +126,7 @@
<-q1
// After updating to fh2 the flow should be accepted in fh2.
a.UpdateFlowHandler(ctx, fh2)
- if f, err = d.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}); err != nil {
+ if f, err = d.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil); err != nil {
t.Fatal(err)
}
// Write a byte to send the openFlow message.
diff --git a/runtime/internal/flow/conn/flow.go b/runtime/internal/flow/conn/flow.go
index a460245..e4127a3 100644
--- a/runtime/internal/flow/conn/flow.go
+++ b/runtime/internal/flow/conn/flow.go
@@ -11,6 +11,7 @@
"v.io/v23/context"
"v.io/v23/flow"
"v.io/v23/flow/message"
+ "v.io/v23/naming"
"v.io/v23/security"
"v.io/v23/verror"
)
@@ -24,6 +25,7 @@
bkey, dkey uint64
noEncrypt bool
writeCh chan struct{}
+ remote naming.Endpoint
// These variables can only be modified by SetDeadlineContext which cannot
// be called concurrently with other methods on the flow. Therefore they
@@ -56,7 +58,7 @@
// Ensure that *flw implements flow.Flow.
var _ flow.Flow = &flw{}
-func (c *Conn) newFlowLocked(ctx *context.T, id uint64, bkey, dkey uint64, dialed, preopen bool) *flw {
+func (c *Conn) newFlowLocked(ctx *context.T, id uint64, bkey, dkey uint64, remote naming.Endpoint, dialed, preopen bool) *flw {
f := &flw{
id: id,
dialed: dialed,
@@ -70,6 +72,7 @@
// flow will be notifying itself, so if there's no buffer a deadlock will
// occur.
writeCh: make(chan struct{}, 1),
+ remote: remote,
}
f.next, f.prev = f, f
f.ctx, f.cancel = context.WithCancel(ctx)
@@ -305,6 +308,19 @@
return f.ctx
}
+// LocalEndpoint returns the local vanadium endpoint.
+func (f *flw) LocalEndpoint() naming.Endpoint {
+ return f.conn.local
+}
+
+// RemoteEndpoint returns the remote vanadium endpoint.
+func (f *flw) RemoteEndpoint() naming.Endpoint {
+ if f.remote != nil {
+ return f.remote
+ }
+ return f.conn.remote
+}
+
// LocalBlessings returns the blessings presented by the local end of the flow
// during authentication.
func (f *flw) LocalBlessings() security.Blessings {
diff --git a/runtime/internal/flow/conn/flowcontrol_test.go b/runtime/internal/flow/conn/flowcontrol_test.go
index 71c72c6..15ee809 100644
--- a/runtime/internal/flow/conn/flowcontrol_test.go
+++ b/runtime/internal/flow/conn/flowcontrol_test.go
@@ -16,7 +16,7 @@
"v.io/v23/flow"
"v.io/v23/flow/message"
_ "v.io/x/ref/runtime/factories/fake"
- "v.io/x/ref/runtime/internal/flow/flowtest"
+ "v.io/x/ref/runtime/internal/flow/protocols/debug"
)
func block(c *Conn, p int) chan struct{} {
@@ -40,10 +40,6 @@
return unblock
}
-func forkForRead(conn *Conn) *flowtest.MRW {
- return conn.mp.rw.(*flowtest.MRW).ForkForRead()
-}
-
func waitFor(f func() bool) {
t := time.NewTicker(10 * time.Millisecond)
defer t.Stop()
@@ -71,6 +67,30 @@
})
}
+type readConn struct {
+ flow.Conn
+ ch chan message.Message
+ ctx *context.T
+}
+
+func (r *readConn) ReadMsg() ([]byte, error) {
+ b, err := r.Conn.ReadMsg()
+ if len(b) > 0 {
+ m, _ := message.Read(r.ctx, b)
+ switch msg := m.(type) {
+ case *message.OpenFlow:
+ if msg.ID > 1 { // Ignore the blessings flow.
+ r.ch <- m
+ }
+ case *message.Data:
+ if msg.ID > 1 { // Ignore the blessings flow.
+ r.ch <- m
+ }
+ }
+ }
+ return b, err
+}
+
func TestOrdering(t *testing.T) {
const nflows = 5
const nmessages = 5
@@ -78,10 +98,13 @@
ctx, shutdown := v23.Init()
defer shutdown()
- flows, accept, dc, ac := setupFlows(t, ctx, ctx, true, nflows, true)
+ ch := make(chan message.Message, 100)
+ fctx := debug.WithFilter(ctx, func(c flow.Conn) flow.Conn {
+ return &readConn{c, ch, ctx}
+ })
+ flows, accept, dc, ac := setupFlows(t, "debug", "local/", ctx, fctx, true, nflows)
unblock := block(dc, 0)
- fork := forkForRead(ac)
var wg sync.WaitGroup
wg.Add(2 * nflows)
defer wg.Wait()
@@ -116,37 +139,16 @@
for i := 0; i < nmessages; i++ {
found := map[uint64]bool{}
for j := 0; j < nflows; j++ {
- s, err := fork.ReadMsg()
- if err != nil {
- t.Fatal(err)
- }
- m, err := message.Read(ctx, s)
- if err != nil {
- t.Fatal(err)
- }
+ m := <-ch
switch msg := m.(type) {
case *message.OpenFlow:
found[msg.ID] = true
case *message.Data:
found[msg.ID] = true
- default:
- t.Fatalf("Unexpected message %#v", m)
}
}
if len(found) != nflows {
t.Fatalf("Did not recieve a message from each flow in round %d: %v", i, found)
}
}
- // expect the teardown message last
- s, err := fork.ReadMsg()
- if err != nil {
- t.Fatal(err)
- }
- m, err := message.Read(ctx, s)
- if err != nil {
- t.Fatal(err)
- }
- if _, ok := m.(*message.TearDown); !ok {
- t.Errorf("expected teardown got %#v", m)
- }
}
diff --git a/runtime/internal/flow/conn/lameduck_test.go b/runtime/internal/flow/conn/lameduck_test.go
index 8166e4a..583c5cb 100644
--- a/runtime/internal/flow/conn/lameduck_test.go
+++ b/runtime/internal/flow/conn/lameduck_test.go
@@ -23,7 +23,7 @@
defer shutdown()
dflows, aflows := make(chan flow.Flow, 3), make(chan flow.Flow, 3)
- dc, ac, _ := setupConns(t, ctx, ctx, dflows, aflows, false)
+ dc, ac := setupConns(t, "local", "", ctx, ctx, dflows, aflows)
go func() {
for {
@@ -41,7 +41,7 @@
}()
// Dial a flow and write it (which causes it to open).
- f1, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{})
+ f1, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil)
if err != nil {
t.Fatal(err)
}
@@ -49,11 +49,11 @@
t.Fatal(err)
}
// Dial more flows, but don't write to them yet.
- f2, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{})
+ f2, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil)
if err != nil {
t.Fatal(err)
}
- f3, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{})
+ f3, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil)
if err != nil {
t.Fatal(err)
}
@@ -64,7 +64,7 @@
waitFor(dc.RemoteLameDuck)
// Now we shouldn't be able to dial from dc because it's in lame duck mode.
- if _, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}); err == nil {
+ if _, err := dc.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil); err == nil {
t.Fatalf("expected an error, got nil")
}
diff --git a/runtime/internal/flow/conn/util_test.go b/runtime/internal/flow/conn/util_test.go
index 19f8554..890c705 100644
--- a/runtime/internal/flow/conn/util_test.go
+++ b/runtime/internal/flow/conn/util_test.go
@@ -29,17 +29,15 @@
}
func setupConns(t *testing.T,
+ network, address string,
dctx, actx *context.T,
- dflows, aflows chan<- flow.Flow,
- noencrypt bool) (dialed, accepted *Conn, _ *flowtest.Wire) {
- var dmrw, amrw *flowtest.MRW
- var w *flowtest.Wire
- if noencrypt {
- dmrw, amrw, w = flowtest.NewUnencryptedMRWPair(dctx)
- } else {
- dmrw, amrw, w = flowtest.NewMRWPair(dctx)
- }
+ dflows, aflows chan<- flow.Flow) (dialed, accepted *Conn) {
+ dmrw, amrw := flowtest.Pipe(t, actx, network, address)
versions := version.RPCVersionRange{Min: 3, Max: 5}
+ ridep, err := v23.NewEndpoint("@6@@batman.com:1234@@000000000000000000000000dabbad00@m@@@")
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
ep, err := v23.NewEndpoint("localhost:80")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
@@ -48,11 +46,13 @@
ach := make(chan *Conn)
go func() {
var handler FlowHandler
+ dep := ep
if dflows != nil {
handler = fh(dflows)
+ dep = ridep
}
dBlessings := v23.GetPrincipal(dctx).BlessingStore().Default()
- d, err := NewDialed(dctx, dBlessings, dmrw, ep, ep, versions, flowtest.AllowAllPeersAuthorizer{}, time.Minute, handler)
+ d, err := NewDialed(dctx, dBlessings, dmrw, dep, ep, versions, flowtest.AllowAllPeersAuthorizer{}, time.Minute, handler)
if err != nil {
panic(err)
}
@@ -64,24 +64,24 @@
handler = fh(aflows)
}
aBlessings := v23.GetPrincipal(actx).BlessingStore().Default()
- a, err := NewAccepted(actx, aBlessings, amrw, ep, versions, time.Minute, handler)
+ a, err := NewAccepted(actx, aBlessings, amrw, ridep, versions, time.Minute, handler)
if err != nil {
panic(err)
}
ach <- a
}()
- return <-dch, <-ach, w
+ return <-dch, <-ach
}
-func setupFlow(t *testing.T, dctx, actx *context.T, dialFromDialer bool) (dialed flow.Flow, accepted <-chan flow.Flow, close func()) {
- dfs, accepted, ac, dc := setupFlows(t, dctx, actx, dialFromDialer, 1, false)
+func setupFlow(t *testing.T, network, address string, dctx, actx *context.T, dialFromDialer bool) (dialed flow.Flow, accepted <-chan flow.Flow, close func()) {
+ dfs, accepted, ac, dc := setupFlows(t, network, address, dctx, actx, dialFromDialer, 1)
return dfs[0], accepted, func() { dc.Close(dctx, nil); ac.Close(dctx, nil) }
}
-func setupFlows(t *testing.T, dctx, actx *context.T, dialFromDialer bool, n int, noencrypt bool) (dialed []flow.Flow, accepted <-chan flow.Flow, dc, ac *Conn) {
+func setupFlows(t *testing.T, network, address string, dctx, actx *context.T, dialFromDialer bool, n int) (dialed []flow.Flow, accepted <-chan flow.Flow, dc, ac *Conn) {
dialed = make([]flow.Flow, n)
dflows, aflows := make(chan flow.Flow, n), make(chan flow.Flow, n)
- d, a, _ := setupConns(t, dctx, actx, dflows, aflows, noencrypt)
+ d, a := setupConns(t, network, address, dctx, actx, dflows, aflows)
if !dialFromDialer {
d, a = a, d
dctx, actx = actx, dctx
@@ -89,7 +89,7 @@
}
for i := 0; i < n; i++ {
var err error
- if dialed[i], err = d.Dial(dctx, flowtest.AllowAllPeersAuthorizer{}); err != nil {
+ if dialed[i], err = d.Dial(dctx, flowtest.AllowAllPeersAuthorizer{}, nil); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
diff --git a/runtime/internal/flow/flowtest/flowtest.go b/runtime/internal/flow/flowtest/flowtest.go
index 9c8db3e..5c3a996 100644
--- a/runtime/internal/flow/flowtest/flowtest.go
+++ b/runtime/internal/flow/flowtest/flowtest.go
@@ -5,147 +5,37 @@
package flowtest
import (
- "fmt"
- "io"
- "sync"
+ "testing"
+ "time"
"v.io/v23"
"v.io/v23/context"
+ "v.io/v23/flow"
"v.io/v23/naming"
"v.io/v23/security"
-
- "v.io/x/ref/internal/logger"
)
-type Wire struct {
- ctx *context.T
- mu sync.Mutex
- c *sync.Cond
- closed bool
-}
-
-func (w *Wire) Close() {
- w.mu.Lock()
- w.closed = true
- w.c.Broadcast()
- w.mu.Unlock()
-}
-
-func (w *Wire) IsClosed() bool {
- w.mu.Lock()
- c := w.closed
- w.mu.Unlock()
- return c
-}
-
-type MRW struct {
- wire *Wire
- in []byte
- peers []*MRW
- noencrypt bool
-}
-
-func NewMRWPair(ctx *context.T) (*MRW, *MRW, *Wire) {
- w := &Wire{ctx: ctx}
- w.c = sync.NewCond(&w.mu)
- a, b := &MRW{wire: w}, &MRW{wire: w}
- a.peers, b.peers = []*MRW{b}, []*MRW{a}
- return a, b, w
-}
-
-func NewUnencryptedMRWPair(ctx *context.T) (*MRW, *MRW, *Wire) {
- a, b, w := NewMRWPair(ctx)
- a.noencrypt = true
- b.noencrypt = true
- return a, b, w
-}
-
-func (f *MRW) UnsafeDisableEncryption() bool {
- return f.noencrypt
-}
-
-func (f *MRW) WriteMsg(data ...[]byte) (int, error) {
- buf := []byte{}
- for _, d := range data {
- buf = append(buf, d...)
+// Pipe returns a connection pair dialed on against a listener using
+// the given network and address.
+func Pipe(t *testing.T, ctx *context.T, network, address string) (dialed, accepted flow.Conn) {
+ local, _ := flow.RegisteredProtocol(network)
+ if local == nil {
+ t.Fatalf("No registered protocol %s", network)
}
- if len(buf) == 0 {
- return 0, nil
+ l, err := local.Listen(ctx, network, address)
+ if err != nil {
+ t.Fatal(err)
}
- logbuf := buf
- if len(buf) > 128 {
- logbuf = buf[:128]
+ d, err := local.Dial(ctx, l.Addr().Network(), l.Addr().String(), time.Second)
+ if err != nil {
+ t.Fatal(err)
}
- logger.Global().VI(2).Infof("Writing %d bytes to the wire: %#v", len(buf), logbuf)
- defer f.wire.mu.Unlock()
- f.wire.mu.Lock()
- if f.peers == nil {
- f.wire.mu.Unlock()
- return 0, fmt.Errorf("Attempting to write to read-only fork.")
+ a, err := l.Accept(ctx)
+ if err != nil {
+ t.Fatal(err)
}
- for !f.wire.closed {
- ready := true
- for _, peer := range f.peers {
- if peer.in != nil {
- ready = false
- }
- }
- if ready {
- break
- }
- f.wire.c.Wait()
- }
- if f.wire.closed {
- return 0, io.EOF
- }
- for _, peer := range f.peers {
- peer.in = append([]byte{}, buf...)
- }
- f.wire.c.Broadcast()
- return len(buf), nil
-}
-
-func (f *MRW) ReadMsg() (buf []byte, err error) {
- defer f.wire.mu.Unlock()
- f.wire.mu.Lock()
- for f.in == nil && !f.wire.closed {
- f.wire.c.Wait()
- }
- buf, f.in = f.in, nil
- f.wire.c.Broadcast()
-
- logbuf := buf
- if len(buf) > 128 {
- logbuf = buf[:128]
- }
- logger.Global().VI(2).Infof("Reading %d bytes from the wire: %#v", len(buf), logbuf)
-
- if buf == nil {
- return nil, io.EOF
- }
- return buf, nil
-}
-
-func (f *MRW) Close() error {
- f.wire.Close()
- return nil
-}
-
-func (f *MRW) ForkForRead() *MRW {
- defer f.wire.mu.Unlock()
- f.wire.mu.Lock()
- fork := &MRW{
- wire: f.wire,
- peers: nil,
- noencrypt: f.noencrypt,
- }
- for _, fp := range f.peers {
- fp.peers = append(fp.peers, fork)
- }
- if f.in != nil {
- fork.in = append([]byte{}, f.in...)
- }
- return fork
+ l.Close()
+ return d, a
}
type AllowAllPeersAuthorizer struct{}
diff --git a/runtime/internal/flow/manager/conncache.go b/runtime/internal/flow/manager/conncache.go
index dd61c56..f5b5215 100644
--- a/runtime/internal/flow/manager/conncache.go
+++ b/runtime/internal/flow/manager/conncache.go
@@ -82,14 +82,14 @@
// Insert adds conn to the cache.
// An error will be returned iff the cache has been closed.
-func (c *ConnCache) Insert(conn *conn.Conn) error {
+func (c *ConnCache) Insert(conn *conn.Conn, protocol, address string) error {
defer c.mu.Unlock()
c.mu.Lock()
if c.addrCache == nil {
return NewErrCacheClosed(nil)
}
ep := conn.RemoteEndpoint()
- k := key(ep.Addr().Network(), ep.Addr().String(), ep.BlessingNames())
+ k := key(protocol, address, ep.BlessingNames())
entry := &connEntry{
conn: conn,
rid: ep.RoutingID(),
diff --git a/runtime/internal/flow/manager/conncache_test.go b/runtime/internal/flow/manager/conncache_test.go
index 8d48d41..ff1f83c 100644
--- a/runtime/internal/flow/manager/conncache_test.go
+++ b/runtime/internal/flow/manager/conncache_test.go
@@ -16,6 +16,7 @@
"v.io/v23/rpc/version"
connpackage "v.io/x/ref/runtime/internal/flow/conn"
"v.io/x/ref/runtime/internal/flow/flowtest"
+ _ "v.io/x/ref/runtime/internal/flow/protocols/local"
inaming "v.io/x/ref/runtime/internal/naming"
)
@@ -31,7 +32,7 @@
Blessings: []string{"A", "B", "C"},
}
conn := makeConnAndFlow(t, ctx, remote).c
- if err := c.Insert(conn); err != nil {
+ if err := c.Insert(conn, remote.Protocol, remote.Address); err != nil {
t.Fatal(err)
}
// We should be able to find the conn in the cache.
@@ -86,6 +87,7 @@
otherEP := &inaming.Endpoint{
Protocol: "other",
Address: "other",
+ RID: naming.FixedRoutingID(0x2222),
Blessings: []string{"other"},
}
otherConn := makeConnAndFlow(t, ctx, otherEP).c
@@ -105,7 +107,7 @@
}(ch)
// We insert the other conn into the cache.
- if err := c.Insert(otherConn); err != nil {
+ if err := c.Insert(otherConn, otherEP.Protocol, otherEP.Address); err != nil {
t.Fatal(err)
}
c.Unreserve(otherEP.Protocol, otherEP.Address, otherEP.Blessings)
@@ -116,7 +118,7 @@
// Insert a duplicate conn to ensure that replaced conns still get closed.
dupConn := makeConnAndFlow(t, ctx, remote).c
- if err := c.Insert(dupConn); err != nil {
+ if err := c.Insert(dupConn, remote.Protocol, remote.Address); err != nil {
t.Fatal(err)
}
@@ -146,7 +148,8 @@
c := NewConnCache()
conns := nConnAndFlows(t, ctx, 10)
for _, conn := range conns {
- if err := c.Insert(conn.c); err != nil {
+ addr := conn.c.RemoteEndpoint().Addr()
+ if err := c.Insert(conn.c, addr.Network(), addr.String()); err != nil {
t.Fatal(err)
}
}
@@ -177,7 +180,8 @@
c = NewConnCache()
conns = nConnAndFlows(t, ctx, 10)
for _, conn := range conns {
- if err := c.Insert(conn.c); err != nil {
+ addr := conn.c.RemoteEndpoint().Addr()
+ if err := c.Insert(conn.c, addr.Network(), addr.String()); err != nil {
t.Fatal(err)
}
}
@@ -211,7 +215,8 @@
c = NewConnCache()
conns = nConnAndFlows(t, ctx, 10)
for _, conn := range conns {
- if err := c.Insert(conn.c); err != nil {
+ addr := conn.c.RemoteEndpoint().Addr()
+ if err := c.Insert(conn.c, addr.Network(), addr.String()); err != nil {
t.Fatal(err)
}
}
@@ -284,14 +289,14 @@
for i := 0; i < n; i++ {
cfs[i] = makeConnAndFlow(t, ctx, &inaming.Endpoint{
Protocol: strconv.Itoa(i),
- RID: naming.FixedRoutingID(uint64(i)),
+ RID: naming.FixedRoutingID(uint64(i + 1)), // We need to have a nonzero rid for bidi.
})
}
return cfs
}
func makeConnAndFlow(t *testing.T, ctx *context.T, ep naming.Endpoint) connAndFlow {
- dmrw, amrw, _ := flowtest.NewMRWPair(ctx)
+ dmrw, amrw := flowtest.Pipe(t, ctx, "local", "")
dch := make(chan *connpackage.Conn)
ach := make(chan *connpackage.Conn)
lBlessings := v23.GetPrincipal(ctx).BlessingStore().Default()
@@ -314,7 +319,7 @@
}()
conn := <-dch
<-ach
- f, err := conn.Dial(ctx, flowtest.AllowAllPeersAuthorizer{})
+ f, err := conn.Dial(ctx, flowtest.AllowAllPeersAuthorizer{}, nil)
if err != nil {
t.Fatal(err)
}
diff --git a/runtime/internal/flow/manager/manager.go b/runtime/internal/flow/manager/manager.go
index 5a67e18..01ebcc8 100644
--- a/runtime/internal/flow/manager/manager.go
+++ b/runtime/internal/flow/manager/manager.go
@@ -45,21 +45,19 @@
func NewWithBlessings(ctx *context.T, serverBlessings security.Blessings, rid naming.RoutingID) flow.Manager {
m := &manager{
- rid: rid,
- closed: make(chan struct{}),
- cache: NewConnCache(),
- ctx: ctx,
- serverBlessings: serverBlessings,
+ rid: rid,
+ closed: make(chan struct{}),
+ cache: NewConnCache(),
+ ctx: ctx,
}
if rid != naming.NullRoutingID {
+ m.serverBlessings = serverBlessings
+ m.serverNames = security.BlessingNames(v23.GetPrincipal(ctx), serverBlessings)
m.ls = &listenState{
q: upcqueue.New(),
listeners: []flow.Listener{},
stopProxy: make(chan struct{}),
}
- for b, _ := range v23.GetPrincipal(ctx).BlessingsInfo(m.serverBlessings) {
- m.serverNames = append(m.serverNames, b)
- }
}
go func() {
ticker := time.NewTicker(reapCacheInterval)
@@ -343,7 +341,7 @@
h.ctx,
h.m.serverBlessings,
f,
- f.Conn().LocalEndpoint(),
+ f.LocalEndpoint(),
version.Supported,
handshakeTimeout,
fh)
@@ -472,20 +470,20 @@
flowConn.Close()
return nil, nil, iflow.MaybeWrapError(flow.ErrDialFailed, ctx, err)
}
- if err := m.cache.Insert(c); err != nil {
+ if err := m.cache.Insert(c, network, address); err != nil {
return nil, nil, flow.NewErrBadState(ctx, err)
}
// Now that c is in the cache we can explicitly unreserve.
m.cache.Unreserve(network, address, remote.BlessingNames())
}
- f, err := c.Dial(ctx, auth)
+ f, err := c.Dial(ctx, auth, remote)
if err != nil {
return nil, nil, iflow.MaybeWrapError(flow.ErrDialFailed, ctx, err)
}
// If we are dialing out to a Proxy, we need to dial a conn on this flow, and
// return a flow on that corresponding conn.
- if proxyConn := c; remote.RoutingID() != proxyConn.RemoteEndpoint().RoutingID() {
+ if proxyConn := c; remote.RoutingID() != naming.NullRoutingID && remote.RoutingID() != proxyConn.RemoteEndpoint().RoutingID() {
var fh conn.FlowHandler
if m.ls != nil {
m.ls.mu.Lock()
@@ -511,7 +509,7 @@
if err := m.cache.InsertWithRoutingID(c); err != nil {
return nil, nil, iflow.MaybeWrapError(flow.ErrBadState, ctx, err)
}
- f, err = c.Dial(ctx, auth)
+ f, err = c.Dial(ctx, auth, remote)
if err != nil {
proxyConn.Close(ctx, err)
return nil, nil, iflow.MaybeWrapError(flow.ErrDialFailed, ctx, err)
diff --git a/runtime/internal/flow/manager/manager_test.go b/runtime/internal/flow/manager/manager_test.go
index 7efc876..6a949f8 100644
--- a/runtime/internal/flow/manager/manager_test.go
+++ b/runtime/internal/flow/manager/manager_test.go
@@ -98,7 +98,7 @@
<-dm.Closed()
}
-func TestNullClientBlessings(t *testing.T) {
+func TestPublicKeyOnlyClientBlessings(t *testing.T) {
defer goroutines.NoLeaks(t, leakWaitTime)()
ctx, shutdown := v23.Init()
@@ -108,16 +108,17 @@
}
nulldm := New(ctx, naming.NullRoutingID)
_, af := testFlows(t, ctx, nulldm, am, flowtest.AllowAllPeersAuthorizer{})
- // Ensure that the remote blessings of the underlying conn of the accepted flow are zero.
- if rBlessings := af.Conn().(*conn.Conn).RemoteBlessings(); !rBlessings.IsZero() {
- t.Errorf("got %v, want zero-value blessings", rBlessings)
+ // Ensure that the remote blessings of the underlying conn of the accepted blessings
+ // only has the public key of the client and no certificates.
+ if rBlessings := af.Conn().(*conn.Conn).RemoteBlessings(); len(rBlessings.String()) > 0 || rBlessings.PublicKey() == nil {
+ t.Errorf("got %v, want no-cert blessings", rBlessings)
}
dm := New(ctx, naming.FixedRoutingID(0x1111))
_, af = testFlows(t, ctx, dm, am, flowtest.AllowAllPeersAuthorizer{})
// Ensure that the remote blessings of the underlying conn of the accepted flow are
// non-zero if we did specify a RoutingID.
- if rBlessings := af.Conn().(*conn.Conn).RemoteBlessings(); rBlessings.IsZero() {
- t.Errorf("got %v, want non-zero blessings", rBlessings)
+ if rBlessings := af.Conn().(*conn.Conn).RemoteBlessings(); len(rBlessings.String()) == 0 {
+ t.Errorf("got %v, want full blessings", rBlessings)
}
shutdown()
diff --git a/runtime/internal/flow/protocols/debug/debug.go b/runtime/internal/flow/protocols/debug/debug.go
new file mode 100644
index 0000000..351ed80
--- /dev/null
+++ b/runtime/internal/flow/protocols/debug/debug.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/flow"
+ "v.io/v23/naming"
+ inaming "v.io/x/ref/runtime/internal/naming"
+)
+
+func init() {
+ flow.RegisterProtocol("debug", &debug{})
+}
+
+// Filters give you the chance to add your own logic to flow.Conns
+// that get created. This is useful in tests. For example you might
+// want to modify all packets, print them out, or perhaps cause
+// errors.
+type Filter func(flow.Conn) flow.Conn
+
+type filterKey struct{}
+
+// WithFilter attaches a filter to the context. Any connection
+// accepted or dialed with the debug protocol using this context will
+// be wrapped via a call to filter.
+func WithFilter(ctx *context.T, filter Filter) *context.T {
+ return context.WithValue(ctx, filterKey{}, filter)
+}
+func filter(ctx *context.T, c flow.Conn) flow.Conn {
+ if f, ok := ctx.Value(filterKey{}).(Filter); ok {
+ return f(c)
+ }
+ return c
+}
+
+func WrapName(name string) string {
+ addr, suffix := naming.SplitAddressName(name)
+ if addr == "" {
+ return name
+ }
+ iep, err := inaming.NewEndpoint(addr)
+ if err != nil {
+ return name
+ }
+ iep.Protocol, iep.Address = WrapAddress(iep.Protocol, iep.Address)
+ return naming.JoinAddressName(iep.String(), suffix)
+}
+
+func WrapAddress(protocol, address string) (string, string) {
+ return "debug", protocol + "/" + address
+}
+
+type addr string
+
+func (a addr) Network() string { return "debug" }
+func (a addr) String() string { return string(a) }
+
+type conn struct {
+ base flow.Conn
+ addr addr
+}
+
+func (c *conn) LocalAddr() net.Addr { return c.addr }
+func (c *conn) ReadMsg() ([]byte, error) { return c.base.ReadMsg() }
+func (c *conn) WriteMsg(data ...[]byte) (int, error) { return c.base.WriteMsg(data...) }
+func (c *conn) Close() error { return c.base.Close() }
+func (c *conn) UnsafeDisableEncryption() bool { return true }
+
+type listener struct {
+ base flow.Listener
+ addr addr
+ debug *debug
+}
+
+func (l *listener) Accept(ctx *context.T) (flow.Conn, error) {
+ c, err := l.base.Accept(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &conn{base: filter(ctx, c), addr: l.addr}, nil
+}
+func (l *listener) Addr() net.Addr { return l.addr }
+func (l *listener) Close() error { return l.base.Close() }
+
+type debug struct{}
+
+func (d *debug) Dial(ctx *context.T, network, address string, timeout time.Duration) (flow.Conn, error) {
+ var base flow.Protocol
+ if network, address, base = baseProtocol(address); base == nil {
+ return nil, fmt.Errorf("could not find underlying protocol %q", network)
+ }
+ c, err := base.Dial(ctx, network, address, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return &conn{base: filter(ctx, c), addr: addr(network + "/" + address)}, nil
+}
+func (d *debug) Listen(ctx *context.T, network, address string) (flow.Listener, error) {
+ var base flow.Protocol
+ if network, address, base = baseProtocol(address); base == nil {
+ return nil, fmt.Errorf("could not find underlying protocol %q", network)
+ }
+ l, err := base.Listen(ctx, network, address)
+ if err != nil {
+ return nil, err
+ }
+ return &listener{base: l, addr: addr(l.Addr().Network() + "/" + l.Addr().String())}, nil
+}
+func (d *debug) Resolve(ctx *context.T, network, address string) (string, string, error) {
+ return network, address, nil
+}
+
+func baseProtocol(in string) (network, address string, base flow.Protocol) {
+ parts := strings.SplitN(in, "/", 2)
+ if len(parts) == 2 {
+ network, address = parts[0], parts[1]
+ } else {
+ network, address = "", parts[0]
+ }
+ base, names := flow.RegisteredProtocol(network)
+ if network == "" && len(names) > 0 {
+ network = names[0]
+ }
+ return network, address, base
+}
diff --git a/runtime/internal/flow/protocols/local/init.go b/runtime/internal/flow/protocols/local/init.go
new file mode 100644
index 0000000..706c26f
--- /dev/null
+++ b/runtime/internal/flow/protocols/local/init.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package local
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/flow"
+)
+
+func init() {
+ flow.RegisterProtocol("local", &local{listeners: map[addr]*listener{}})
+}
+
+type addr string
+
+func (a addr) Network() string { return "local" }
+func (a addr) String() string { return string(a) }
+
+type conn struct {
+ addr addr
+ incoming chan []byte
+ peer *conn
+ *wire
+}
+
+func (c *conn) LocalAddr() net.Addr { return c.addr }
+func (c *conn) ReadMsg() ([]byte, error) {
+ select {
+ case msg := <-c.incoming:
+ return msg, nil
+ case <-c.wire.closech:
+ return nil, io.EOF
+ }
+}
+func (c *conn) WriteMsg(data ...[]byte) (int, error) {
+ l := 0
+ for _, b := range data {
+ l += len(b)
+ }
+ agg := make([]byte, 0, l)
+ for _, b := range data {
+ agg = append(agg, b...)
+ }
+ select {
+ case c.peer.incoming <- agg:
+ return l, nil
+ case <-c.wire.closech:
+ return 0, io.EOF
+ }
+}
+
+type wire struct {
+ mu sync.Mutex
+ closed bool
+ closech chan struct{}
+}
+
+func (w *wire) Close() error {
+ w.mu.Lock()
+ if !w.closed {
+ w.closed = true
+ close(w.closech)
+ }
+ w.mu.Unlock()
+ return nil
+}
+
+type listener struct {
+ addr addr
+ conns chan *conn
+ closech chan struct{}
+ local *local
+ closed bool // protected by local.mu
+}
+
+func (l *listener) Accept(ctx *context.T) (flow.Conn, error) {
+ select {
+ case c := <-l.conns:
+ return c, nil
+ case <-l.closech:
+ return nil, fmt.Errorf("listener closed")
+ }
+}
+func (l *listener) Addr() net.Addr {
+ return l.addr
+}
+
+func (l *listener) Close() error {
+ l.local.mu.Lock()
+ if !l.closed {
+ l.closed = true
+ close(l.closech)
+ }
+ l.local.mu.Unlock()
+ return nil
+}
+
+type local struct {
+ mu sync.Mutex
+ next int
+ listeners map[addr]*listener
+}
+
+func (l *local) nextAddrLocked() addr {
+ ret := strconv.FormatInt(int64(l.next), 10)
+ l.next++
+ return addr(ret)
+}
+
+func (l *local) Dial(ctx *context.T, network, address string, timeout time.Duration) (flow.Conn, error) {
+ l.mu.Lock()
+ listener := l.listeners[addr(address)]
+ daddr, aaddr := l.nextAddrLocked(), l.nextAddrLocked()
+ l.mu.Unlock()
+
+ if listener == nil {
+ return nil, fmt.Errorf("unreachable")
+ }
+
+ w := &wire{closech: make(chan struct{})}
+ d := &conn{addr: daddr, incoming: make(chan []byte), wire: w}
+ a := &conn{addr: aaddr, incoming: make(chan []byte), wire: w}
+ d.peer, a.peer = a, d
+
+ t := time.NewTimer(timeout)
+ defer t.Stop()
+ select {
+ case <-t.C:
+ return nil, fmt.Errorf("timeout")
+ case listener.conns <- a:
+ return d, nil
+ case <-listener.closech:
+ return nil, fmt.Errorf("unreachable")
+ }
+}
+
+func (l *local) Resolve(ctx *context.T, network, address string) (string, string, error) {
+ return network, address, nil
+}
+
+func (l *local) Listen(ctx *context.T, network, address string) (flow.Listener, error) {
+ defer l.mu.Unlock()
+ l.mu.Lock()
+ a := addr(address)
+ if a == "" {
+ a = l.nextAddrLocked()
+ }
+ if _, ok := l.listeners[a]; ok {
+ return nil, fmt.Errorf("address in use")
+ }
+ listener := &listener{
+ addr: a,
+ conns: make(chan *conn, 1),
+ closech: make(chan struct{}),
+ local: l,
+ }
+ l.listeners[a] = listener
+ return listener, nil
+}
diff --git a/runtime/internal/rpc/benchmark/benchmark/doc.go b/runtime/internal/rpc/benchmark/benchmark/doc.go
index 8016f5e..20ccde2 100644
--- a/runtime/internal/rpc/benchmark/benchmark/doc.go
+++ b/runtime/internal/rpc/benchmark/benchmark/doc.go
@@ -76,6 +76,8 @@
write an execution trace to the named file after execution
-test.v=false
verbose: print additional output
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v23.credentials=
directory to use for storing security credentials
-v23.i18n-catalogue=
diff --git a/runtime/internal/rpc/benchmark/benchmarkd/doc.go b/runtime/internal/rpc/benchmark/benchmarkd/doc.go
index a2bf3f8..11b555c 100644
--- a/runtime/internal/rpc/benchmark/benchmarkd/doc.go
+++ b/runtime/internal/rpc/benchmark/benchmarkd/doc.go
@@ -62,6 +62,8 @@
write an execution trace to the named file after execution
-test.v=false
verbose: print additional output
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/runtime/internal/rpc/client.go b/runtime/internal/rpc/client.go
index c2aa7a7..11a59f7 100644
--- a/runtime/internal/rpc/client.go
+++ b/runtime/internal/rpc/client.go
@@ -59,7 +59,6 @@
errResultDecoding = reg(".errResultDecoding", "failed to decode result #{3}{:4}")
errResponseDecoding = reg(".errResponseDecoding", "failed to decode response{:3}")
errRemainingStreamResults = reg(".errRemaingStreamResults", "stream closed with remaining stream results")
- errNoBlessingsForPeer = reg(".errNoBlessingsForPeer", "no blessings tagged for peer {3}{:4}")
errBlessingGrant = reg(".errBlessingGrant", "failed to grant blessing to server with blessings{:3}")
errBlessingAdd = reg(".errBlessingAdd", "failed to add blessing granted to server{:3}")
errPeerAuthorizeFailed = reg(".errPeerAuthorizedFailed", "failed to authorize flow with remote blessings{:3} {:4}")
@@ -106,7 +105,7 @@
return c
}
-func (c *client) createFlow(ctx *context.T, principal security.Principal, ep naming.Endpoint, vcOpts []stream.VCOpt) (stream.Flow, *verror.SubErr) {
+func (c *client) createFlow(ctx *context.T, principal security.Principal, ep naming.Endpoint, vcOpts []stream.VCOpt, flowOpts []stream.FlowOpt) (stream.Flow, *verror.SubErr) {
suberr := func(err error) *verror.SubErr {
return &verror.SubErr{Err: err, Options: verror.Print}
}
@@ -120,7 +119,7 @@
// We are serializing the creation of all flows per VC. This is okay
// because if one flow creation is to block, it is likely that all others
// for that VC would block as well.
- if flow, err := found.Connect(); err == nil {
+ if flow, err := found.Connect(flowOpts...); err == nil {
return flow, nil
}
// If the vc fails to establish a new flow, we assume it's
@@ -145,7 +144,7 @@
return nil, suberr(err)
}
- flow, err := v.Connect()
+ flow, err := v.Connect(flowOpts...)
if err != nil {
return nil, suberr(err)
}
@@ -336,7 +335,7 @@
// authorizer, both during creation of the VC underlying the flow and the
// flow itself.
// TODO(cnicolaou): implement real, configurable load balancing.
-func (c *client) tryCreateFlow(ctx *context.T, principal security.Principal, index int, name, server, method string, auth security.Authorizer, ch chan<- *serverStatus, vcOpts []stream.VCOpt) {
+func (c *client) tryCreateFlow(ctx *context.T, principal security.Principal, index int, name, server, method string, auth security.Authorizer, ch chan<- *serverStatus, vcOpts []stream.VCOpt, flowOpts []stream.FlowOpt) {
defer c.wg.Done()
status := &serverStatus{index: index, server: server}
var span vtrace.Span
@@ -366,7 +365,7 @@
status.serverErr = suberr(verror.New(errInvalidEndpoint, ctx))
return
}
- if status.flow, status.serverErr = c.createFlow(ctx, principal, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth})); status.serverErr != nil {
+ if status.flow, status.serverErr = c.createFlow(ctx, principal, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth}), flowOpts); status.serverErr != nil {
status.serverErr.Name = suberrName(server, name, method)
ctx.VI(2).Infof("rpc: Failed to create Flow with %v: %v", server, status.serverErr.Err)
return
@@ -471,7 +470,9 @@
responses := make([]*serverStatus, attempts)
ch := make(chan *serverStatus, attempts)
- vcOpts := append(translateVCOpts(opts), c.vcOpts...)
+ vcOpts, flowOpts := translateStreamOpts(opts)
+ vcOpts = append(vcOpts, c.vcOpts...)
+
authorizer := newServerAuthorizer(blessingPattern, opts...)
for i, server := range resolved.Names() {
// Create a copy of vcOpts for each call to tryCreateFlow
@@ -487,7 +488,7 @@
c.wg.Add(1)
c.mu.Unlock()
- go c.tryCreateFlow(ctx, principal, i, name, server, method, authorizer, ch, vcOptsCopy)
+ go c.tryCreateFlow(ctx, principal, i, name, server, method, authorizer, ch, vcOptsCopy, flowOpts)
}
var timeoutChan <-chan time.Time
diff --git a/runtime/internal/rpc/options.go b/runtime/internal/rpc/options.go
index 09da261..7ecf8e3 100644
--- a/runtime/internal/rpc/options.go
+++ b/runtime/internal/rpc/options.go
@@ -13,6 +13,7 @@
"v.io/x/ref/lib/apilog"
"v.io/x/ref/runtime/internal/rpc/stream"
+ "v.io/x/ref/runtime/internal/rpc/stream/vc"
)
// PreferredProtocols instructs the Runtime implementation to select
@@ -79,11 +80,13 @@
return false
}
-func translateVCOpts(opts []rpc.CallOpt) (vcOpts []stream.VCOpt) {
+func translateStreamOpts(opts []rpc.CallOpt) (vcOpts []stream.VCOpt, flowOpts []stream.FlowOpt) {
for _, o := range opts {
switch v := o.(type) {
case stream.VCOpt:
vcOpts = append(vcOpts, v)
+ case options.ChannelTimeout:
+ flowOpts = append(flowOpts, vc.ChannelTimeout(time.Duration(v)))
case options.SecurityLevel:
switch v {
case options.SecurityNone:
diff --git a/runtime/internal/rpc/server.go b/runtime/internal/rpc/server.go
index b58a3cd..80fa2bf 100644
--- a/runtime/internal/rpc/server.go
+++ b/runtime/internal/rpc/server.go
@@ -294,7 +294,9 @@
s.preferredProtocols = []string(opt)
case options.SecurityLevel:
securityLevel = opt
-
+ case options.ChannelTimeout:
+ s.listenerOpts = append(s.listenerOpts,
+ vc.ChannelTimeout(time.Duration(opt)))
}
}
diff --git a/runtime/internal/rpc/stream/manager/manager.go b/runtime/internal/rpc/stream/manager/manager.go
index eb9af6d..36ea02b 100644
--- a/runtime/internal/rpc/stream/manager/manager.go
+++ b/runtime/internal/rpc/stream/manager/manager.go
@@ -15,7 +15,6 @@
"v.io/v23/context"
"v.io/v23/naming"
"v.io/v23/rpc"
-
"v.io/v23/security"
"v.io/v23/verror"
@@ -370,10 +369,7 @@
if p == nil {
return nil, nil
}
- var ret []string
- for b, _ := range p.BlessingsInfo(b) {
- ret = append(ret, b)
- }
+ ret := security.BlessingNames(p, b)
if len(ret) == 0 {
return nil, verror.New(stream.ErrBadArg, nil, verror.New(errNoBlessingNames, nil))
}
diff --git a/runtime/internal/rpc/stream/message/control.go b/runtime/internal/rpc/stream/message/control.go
index 8ccc494..dbb474d 100644
--- a/runtime/internal/rpc/stream/message/control.go
+++ b/runtime/internal/rpc/stream/message/control.go
@@ -90,6 +90,17 @@
Data []byte
}
+// HealthCheckRequest is used to periodically check to see if the remote end
+// is still available.
+type HealthCheckRequest struct {
+ VCI id.VC
+}
+
+// HealthCheckResponse is sent in response to a health check request.
+type HealthCheckResponse struct {
+ VCI id.VC
+}
+
// Command enum.
type command uint8
@@ -100,6 +111,8 @@
setupCommand command = 4
setupStreamCommand command = 5
setupVCCommand command = 6
+ healthCheckReqCommand command = 7
+ healthCheckRespCommand command = 8
)
// SetupOption is the base interface for optional Setup options.
@@ -160,6 +173,10 @@
command = setupStreamCommand
case *SetupVC:
command = setupVCCommand
+ case *HealthCheckRequest:
+ command = healthCheckReqCommand
+ case *HealthCheckResponse:
+ command = healthCheckRespCommand
default:
return verror.New(errUnrecognizedVCControlMessageType, nil, fmt.Sprintf("%T", m))
}
@@ -195,6 +212,10 @@
m = new(SetupStream)
case setupVCCommand:
m = new(SetupVC)
+ case healthCheckReqCommand:
+ m = new(HealthCheckRequest)
+ case healthCheckRespCommand:
+ m = new(HealthCheckResponse)
default:
return nil, verror.New(errUnrecognizedVCControlMessageCommand, nil, command)
}
@@ -220,6 +241,22 @@
return
}
+func (m *HealthCheckRequest) writeTo(w io.Writer) (err error) {
+ return writeInt(w, m.VCI)
+}
+
+func (m *HealthCheckRequest) readFrom(r *bytes.Buffer) (err error) {
+ return readInt(r, &m.VCI)
+}
+
+func (m *HealthCheckResponse) writeTo(w io.Writer) (err error) {
+ return writeInt(w, m.VCI)
+}
+
+func (m *HealthCheckResponse) readFrom(r *bytes.Buffer) (err error) {
+ return readInt(r, &m.VCI)
+}
+
func (m *SetupVC) writeTo(w io.Writer) (err error) {
if err = writeInt(w, m.VCI); err != nil {
return
diff --git a/runtime/internal/rpc/stream/proxy/proxy.go b/runtime/internal/rpc/stream/proxy/proxy.go
index ee68f73..a010f18 100644
--- a/runtime/internal/rpc/stream/proxy/proxy.go
+++ b/runtime/internal/rpc/stream/proxy/proxy.go
@@ -448,9 +448,7 @@
RID: p.rid,
}
if prncpl := p.principal; prncpl != nil {
- for b, _ := range prncpl.BlessingsInfo(prncpl.BlessingStore().Default()) {
- ep.Blessings = append(ep.Blessings, b)
- }
+ ep.Blessings = security.BlessingNames(prncpl, prncpl.BlessingStore().Default())
}
return ep
}
@@ -600,6 +598,21 @@
p.RemoveRoute(srcVCI)
case *message.AddReceiveBuffers:
p.proxy.routeCounters(p, m.Counters)
+ case *message.HealthCheckRequest:
+ if svc := p.ServerVC(m.VCI); svc != nil {
+ // If the request is for the proxy, simply respond to it.
+ p.queue.Put(&message.HealthCheckResponse{VCI: m.VCI})
+ } else if dst := p.Route(m.VCI); dst != nil {
+ m.VCI = dst.VCI
+ dst.Process.queue.Put(m)
+ }
+ case *message.HealthCheckResponse:
+ // Note that the proxy never sends health check requests, so responses
+ // should always be forwarded.
+ if dst := p.Route(m.VCI); dst != nil {
+ m.VCI = dst.VCI
+ dst.Process.queue.Put(m)
+ }
case *message.SetupVC:
// First let's ensure that we can speak a common protocol verison.
intersection, err := iversion.SupportedRange.Intersect(&m.Setup.Versions)
@@ -805,6 +818,10 @@
}
}
+func (p *process) SendHealthCheck(vci id.VC) {
+ p.queue.Put(&message.HealthCheckRequest{VCI: vci})
+}
+
func (p *process) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
if bytes == 0 {
return
diff --git a/runtime/internal/rpc/stream/vc/flow.go b/runtime/internal/rpc/stream/vc/flow.go
index d5f2d2f..ae9cffa 100644
--- a/runtime/internal/rpc/stream/vc/flow.go
+++ b/runtime/internal/rpc/stream/vc/flow.go
@@ -5,6 +5,8 @@
package vc
import (
+ "time"
+
"v.io/v23/naming"
"v.io/v23/security"
@@ -15,6 +17,7 @@
backingVC
*reader
*writer
+ channelTimeout time.Duration
}
type backingVC interface {
diff --git a/runtime/internal/rpc/stream/vc/vc.go b/runtime/internal/rpc/stream/vc/vc.go
index 4a383aa..6687a5d 100644
--- a/runtime/internal/rpc/stream/vc/vc.go
+++ b/runtime/internal/rpc/stream/vc/vc.go
@@ -38,6 +38,8 @@
return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
}
+const defaultChannelTimeout = 30 * time.Minute
+
var (
// These errors are intended to be used as arguments to higher
// level errors and hence {1}{2} is omitted from their format
@@ -64,6 +66,7 @@
errFailedToCreateWriterForNewFlow = reg(".errFailedToCreateWriterForNewFlow", "failed to create writer for new flow({3}){:4}")
errFailedToEnqueueFlow = reg(".errFailedToEnqueueFlow", "failed to enqueue flow at listener{:3}")
errFailedToAcceptSystemFlows = reg(".errFailedToAcceptSystemFlows", "failed to accept system flows{:3}")
+ errHealthCheckFailed = reg(".errHealthCheckFailed", "the healthcheck deadline expired.")
)
// DischargeExpiryBuffer specifies how much before discharge expiration we should
@@ -76,6 +79,11 @@
defer apilog.LogCall(nil)(nil) // gologcop: DO NOT EDIT, MUST BE FIRST STATEMENT
}
+type ChannelTimeout time.Duration
+
+func (ChannelTimeout) RPCStreamFlowOpt() {}
+func (ChannelTimeout) RPCStreamListenerOpt() {}
+
const DefaultServerDischargeExpiryBuffer = 20 * time.Second
// DataCache Keys for TypeEncoder/Decoder.
@@ -113,6 +121,10 @@
version version.RPCVersion
remotePubKeyChan chan *crypto.BoxKey // channel which will receive the remote public key during setup.
+ healthCheckNewFlow chan time.Duration
+ healthCheckResponse chan struct{}
+ defaultChannelTimeout time.Duration
+
helper Helper
dataCache *dataCache // dataCache contains information that can shared between Flows from this VC.
loopWG sync.WaitGroup
@@ -176,6 +188,8 @@
// NewWriter creates a buffer queue for Write operations on the
// stream.Flow implementation.
NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error)
+
+ SendHealthCheck(vci id.VC)
}
// Priorities of flows.
@@ -200,13 +214,14 @@
// Params encapsulates the set of parameters needed to create a new VC.
type Params struct {
- VCI id.VC // Identifier of the VC
- Dialed bool // True if the VC was initiated by the local process.
- LocalEP naming.Endpoint // Endpoint of the local end of the VC.
- RemoteEP naming.Endpoint // Endpoint of the remote end of the VC.
- Pool *iobuf.Pool // Byte pool used for read and write buffer allocations.
- ReserveBytes uint // Number of padding bytes to reserve for headers.
- Helper Helper
+ VCI id.VC // Identifier of the VC
+ Dialed bool // True if the VC was initiated by the local process.
+ LocalEP naming.Endpoint // Endpoint of the local end of the VC.
+ RemoteEP naming.Endpoint // Endpoint of the remote end of the VC.
+ Pool *iobuf.Pool // Byte pool used for read and write buffer allocations.
+ ReserveBytes uint // Number of padding bytes to reserve for headers.
+ ChannelTimeout time.Duration // How long to wait before closing an unresponsive channel.
+ Helper Helper
}
// InternalNew creates a new VC, which implements the stream.VC interface.
@@ -219,6 +234,10 @@
if p.Dialed {
fidOffset = 0
}
+ channelTimeout := defaultChannelTimeout
+ if p.ChannelTimeout != 0 {
+ channelTimeout = p.ChannelTimeout
+ }
return &VC{
ctx: ctx,
vci: p.VCI,
@@ -234,11 +253,14 @@
// id if the VC was initiated by the local process,
// and have an odd id if the VC was initiated by the
// remote process.
- nextConnectFID: id.Flow(NumReservedFlows + fidOffset),
- crypter: crypto.NewNullCrypter(),
- closeCh: make(chan struct{}),
- helper: p.Helper,
- dataCache: newDataCache(),
+ nextConnectFID: id.Flow(NumReservedFlows + fidOffset),
+ crypter: crypto.NewNullCrypter(),
+ closeCh: make(chan struct{}),
+ helper: p.Helper,
+ dataCache: newDataCache(),
+ healthCheckNewFlow: make(chan time.Duration, 1),
+ healthCheckResponse: make(chan struct{}, 1),
+ defaultChannelTimeout: channelTimeout,
}
}
@@ -263,6 +285,12 @@
reader: newReader(readHandlerImpl{vc, fid}),
writer: writer,
}
+ for _, opt := range opts {
+ switch o := opt.(type) {
+ case ChannelTimeout:
+ f.channelTimeout = time.Duration(o)
+ }
+ }
vc.mu.Lock()
if vc.flowMap == nil {
vc.mu.Unlock()
@@ -270,6 +298,9 @@
return nil, verror.New(stream.ErrNetwork, nil, verror.New(errConnectOnClosedVC, nil, vc.closeReason))
}
vc.flowMap[fid] = f
+ if f.channelTimeout != 0 && vc.version >= version.RPCVersion12 {
+ vc.healthCheckNewFlow <- f.channelTimeout
+ }
vc.mu.Unlock()
// New flow created, inform remote end that data can be received on it.
vc.helper.NotifyOfNewFlow(vc.vci, fid, DefaultBytesBufferedPerFlow)
@@ -434,6 +465,14 @@
}
vc.loopWG.Wait()
+
+ // TODO(bprosnitz) Stop the type decoder
+ // We can't stop the type decoder here, because the VC is closed
+ // before all of the type are read.
+ // The type decoder goroutine should still stop, however, because it will get
+ // an EOF at the end of the stream.
+ // typeDec.(*vom.TypeDecoder).Stop()
+
return nil
}
@@ -524,6 +563,14 @@
if err = vc.connectSystemFlows(); err != nil {
return vc.appendCloseReason(err)
}
+
+ vc.mu.Lock()
+ if !vc.closed {
+ vc.loopWG.Add(1)
+ go vc.healthCheckLoop()
+ }
+ vc.mu.Unlock()
+
vc.ctx.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
return nil
}
@@ -569,6 +616,14 @@
if err := vc.connectSystemFlows(); err != nil {
return vc.appendCloseReason(err)
}
+
+ vc.mu.Lock()
+ if !vc.closed {
+ vc.loopWG.Add(1)
+ go vc.healthCheckLoop()
+ }
+ vc.mu.Unlock()
+
vc.ctx.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, params.RemoteBlessings, params.LocalBlessings)
return nil
}
@@ -604,6 +659,12 @@
return vc.appendCloseReason(err)
}
}
+ vc.mu.Lock()
+ if !vc.closed {
+ vc.loopWG.Add(1)
+ go vc.healthCheckLoop()
+ }
+ vc.mu.Unlock()
vc.ctx.VI(1).Infof("Client VC %v handshaked with no authentication.", vc)
return nil
}
@@ -678,6 +739,14 @@
return
}
vc.ctx.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+
+ vc.mu.Lock()
+ if !vc.closed {
+ vc.loopWG.Add(1)
+ go vc.healthCheckLoop()
+ }
+ vc.mu.Unlock()
+
result <- HandshakeResult{ln, nil}
}()
return result
@@ -720,6 +789,14 @@
return
}
vc.ctx.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, params.RemoteBlessings, params.LocalBlessings)
+
+ vc.mu.Lock()
+ if !vc.closed {
+ vc.loopWG.Add(1)
+ go vc.healthCheckLoop()
+ }
+ vc.mu.Unlock()
+
result <- HandshakeResult{ln, nil}
}()
return result
@@ -885,7 +962,9 @@
return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForWireType, nil, err))
}
vc.dataCache.Insert(TypeEncoderKey{}, vom.NewTypeEncoder(conn))
- vc.dataCache.Insert(TypeDecoderKey{}, vom.NewTypeDecoder(conn))
+ td := vom.NewTypeDecoder(conn)
+ td.Start()
+ vc.dataCache.Insert(TypeDecoderKey{}, td)
vc.mu.Lock()
rBlessings := vc.remoteBlessings
@@ -912,7 +991,9 @@
return verror.New(errFailedToCreateFlowForWireType, nil, err)
}
vc.dataCache.Insert(TypeEncoderKey{}, vom.NewTypeEncoder(conn))
- vc.dataCache.Insert(TypeDecoderKey{}, vom.NewTypeDecoder(conn))
+ td := vom.NewTypeDecoder(conn)
+ td.Start()
+ vc.dataCache.Insert(TypeDecoderKey{}, td)
vc.mu.Lock()
lBlessings := vc.localBlessings
@@ -953,6 +1034,23 @@
return ret
}
+// channelTimeout returns the minimum failure detection delay of all active flows on this VC.
+// A return value of zero means that we are not doing health checks.
+func (vc *VC) channelTimeout() time.Duration {
+ // This is not a great implementation, but it is simple, and in current programs
+ // the number of active flows on a VC is almost always very small.
+ // In the new RPC system we should consider a more efficient implementation.
+ vc.mu.Lock()
+ min := vc.defaultChannelTimeout
+ for _, f := range vc.flowMap {
+ if f.channelTimeout != 0 && f.channelTimeout < min {
+ min = f.channelTimeout
+ }
+ }
+ vc.mu.Unlock()
+ return min
+}
+
// findFlow finds the flow id for the provided flow.
// Returns 0 if there is none.
func (vc *VC) findFlow(flow interface{}) id.Flow {
@@ -1088,6 +1186,12 @@
return newWriter(MaxPayloadSizeBytes, bq, alloc, vc.sharedCounters), nil
}
+func (vc *VC) HandleHealthCheckResponse() {
+ if vc.Version() >= version.RPCVersion12 {
+ vc.healthCheckResponse <- struct{}{}
+ }
+}
+
// readHandlerImpl is an adapter for the readHandler interface required by
// the reader type.
type readHandlerImpl struct {
@@ -1132,3 +1236,52 @@
}
return dischargeClient, dischargeExpiryBuffer
}
+
+// healthCheckLoop runs a state machine that manages health checks for the VC.
+func (vc *VC) healthCheckLoop() {
+ defer vc.loopWG.Done()
+ if vc.Version() < version.RPCVersion12 {
+ return
+ }
+
+ // By default we health check the channel every 30 minutes.
+ channelTimeout, now := vc.channelTimeout(), time.Now()
+ sendTimer, closeTimer := time.NewTimer(channelTimeout/2), time.NewTimer(channelTimeout)
+ sendTime, closeTime := now.Add(channelTimeout/2), now.Add(channelTimeout)
+ outstandingRequest := false
+ defer sendTimer.Stop()
+ defer closeTimer.Stop()
+ for {
+ select {
+ case <-vc.closeCh:
+ // The VC is closing, no need for health checks.
+ return
+ case <-vc.healthCheckResponse:
+ outstandingRequest = false
+ channelTimeout, now = vc.channelTimeout(), time.Now()
+ sendTimer.Reset(channelTimeout / 2)
+ closeTimer.Reset(channelTimeout)
+ sendTime, closeTime = now.Add(channelTimeout/2), now.Add(channelTimeout)
+ case <-closeTimer.C:
+ vc.Close(verror.New(stream.ErrAborted, nil, verror.New(errHealthCheckFailed, nil)))
+ return
+ case <-sendTimer.C:
+ if !outstandingRequest {
+ vc.helper.SendHealthCheck(vc.vci)
+ outstandingRequest = true
+ }
+ case newChannelTimeout := <-vc.healthCheckNewFlow:
+ // New flows might have tighter requirements.
+ now = time.Now()
+ newSendTime, newCloseTime := now.Add(newChannelTimeout/2), now.Add(newChannelTimeout)
+ if newSendTime.Before(sendTime) {
+ sendTime = newSendTime
+ sendTimer.Reset(newChannelTimeout / 2)
+ }
+ if newCloseTime.Before(closeTime) {
+ closeTime = newCloseTime
+ closeTimer.Reset(newChannelTimeout)
+ }
+ }
+ }
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_test.go b/runtime/internal/rpc/stream/vc/vc_test.go
index 2ec9d82..0f8a20c 100644
--- a/runtime/internal/rpc/stream/vc/vc_test.go
+++ b/runtime/internal/rpc/stream/vc/vc_test.go
@@ -682,6 +682,8 @@
}
}
+func (h *helper) SendHealthCheck(vci id.VC) {}
+
func (h *helper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
h.mu.Lock()
defer h.mu.Unlock()
diff --git a/runtime/internal/rpc/stream/vif/vif.go b/runtime/internal/rpc/stream/vif/vif.go
index d8fd6d7..7cc1b0b 100644
--- a/runtime/internal/rpc/stream/vif/vif.go
+++ b/runtime/internal/rpc/stream/vif/vif.go
@@ -332,7 +332,7 @@
}
}
principal := stream.GetPrincipalVCOpts(ctx, opts...)
- vc, err := vif.newVC(ctx, vif.allocVCI(), vif.localEP, remoteEP, idleTimeout, true)
+ vc, err := vif.newVC(ctx, vif.allocVCI(), vif.localEP, remoteEP, idleTimeout, 0, true)
if err != nil {
return nil, err
}
@@ -436,14 +436,16 @@
ctx.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not accept VCs", m, vif)
return errors.New("VCs not accepted")
}
- var idleTimeout time.Duration
+ var channelTimeout, idleTimeout time.Duration
for _, o := range lopts {
switch v := o.(type) {
case vc.IdleTimeout:
idleTimeout = v.Duration
+ case vc.ChannelTimeout:
+ channelTimeout = time.Duration(v)
}
}
- vcobj, err := vif.newVC(ctx, m.VCI, m.RemoteEndpoint, m.LocalEndpoint, idleTimeout, false)
+ vcobj, err := vif.newVC(ctx, m.VCI, m.RemoteEndpoint, m.LocalEndpoint, idleTimeout, channelTimeout, false)
if err != nil {
return err
}
@@ -737,6 +739,14 @@
}
vif.ctx.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
+ case *message.HealthCheckRequest:
+ vif.sendOnExpressQ(&message.HealthCheckResponse{VCI: m.VCI})
+
+ case *message.HealthCheckResponse:
+ if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+ vc.HandleHealthCheckResponse()
+ }
+
case *message.Setup:
vif.ctx.Infof("Ignoring redundant Setup message %T on VIF %s", m, vif)
@@ -1024,7 +1034,7 @@
return ret
}
-func (vif *VIF) newVC(ctx *context.T, vci id.VC, localEP, remoteEP naming.Endpoint, idleTimeout time.Duration, side vifSide) (*vc.VC, error) {
+func (vif *VIF) newVC(ctx *context.T, vci id.VC, localEP, remoteEP naming.Endpoint, idleTimeout, channelTimeout time.Duration, side vifSide) (*vc.VC, error) {
vif.muStartTimer.Lock()
if vif.startTimer != nil {
vif.startTimer.Stop()
@@ -1032,13 +1042,14 @@
}
vif.muStartTimer.Unlock()
vc := vc.InternalNew(ctx, vc.Params{
- VCI: vci,
- Dialed: side == dialedVIF,
- LocalEP: localEP,
- RemoteEP: remoteEP,
- Pool: vif.pool,
- ReserveBytes: uint(message.HeaderSizeBytes + vif.ctrlCipher.MACSize()),
- Helper: vcHelper{vif},
+ VCI: vci,
+ Dialed: side == dialedVIF,
+ LocalEP: localEP,
+ RemoteEP: remoteEP,
+ Pool: vif.pool,
+ ReserveBytes: uint(message.HeaderSizeBytes + vif.ctrlCipher.MACSize()),
+ ChannelTimeout: channelTimeout,
+ Helper: vcHelper{vif},
})
added, rq, wq := vif.vcMap.Insert(vc)
if added {
@@ -1182,6 +1193,10 @@
h.vif.sendOnExpressQ(&message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)})
}
+func (h vcHelper) SendHealthCheck(vci id.VC) {
+ h.vif.sendOnExpressQ(&message.HealthCheckRequest{VCI: vci})
+}
+
func (h vcHelper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
if bytes == 0 {
return
diff --git a/runtime/internal/rpc/stress/mtstress/doc.go b/runtime/internal/rpc/stress/mtstress/doc.go
index 75a4ac4..e4fd432 100644
--- a/runtime/internal/rpc/stress/mtstress/doc.go
+++ b/runtime/internal/rpc/stress/mtstress/doc.go
@@ -37,6 +37,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/runtime/internal/rpc/stress/stress/doc.go b/runtime/internal/rpc/stress/stress/doc.go
index 04e1957..2fc6914 100644
--- a/runtime/internal/rpc/stress/stress/doc.go
+++ b/runtime/internal/rpc/stress/stress/doc.go
@@ -34,6 +34,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/runtime/internal/rpc/stress/stressd/doc.go b/runtime/internal/rpc/stress/stressd/doc.go
index ece1767..0ca5ed9 100644
--- a/runtime/internal/rpc/stress/stressd/doc.go
+++ b/runtime/internal/rpc/stress/stressd/doc.go
@@ -30,6 +30,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/runtime/internal/rpc/test/cancel_test.go b/runtime/internal/rpc/test/cancel_test.go
index cd5d9e9..031090c 100644
--- a/runtime/internal/rpc/test/cancel_test.go
+++ b/runtime/internal/rpc/test/cancel_test.go
@@ -6,15 +6,22 @@
import (
"io"
+ "net"
+ "sync"
"testing"
+ "time"
"v.io/v23"
"v.io/v23/context"
+ "v.io/v23/naming"
+ "v.io/v23/options"
"v.io/v23/rpc"
"v.io/v23/security"
"v.io/v23/verror"
"v.io/x/ref"
+ "v.io/x/ref/runtime/factories/generic"
"v.io/x/ref/runtime/internal/flow/conn"
+ inaming "v.io/x/ref/runtime/internal/naming"
"v.io/x/ref/runtime/internal/rpc/stream/vc"
"v.io/x/ref/test"
)
@@ -190,3 +197,204 @@
waitForCancel(t, ts, cancel)
<-done
}
+
+type channelTestServer struct {
+ waiting chan struct{}
+ canceled chan struct{}
+}
+
+func (s *channelTestServer) Run(ctx *context.T, call rpc.ServerCall, wait time.Duration) error {
+ time.Sleep(wait)
+ return nil
+}
+
+func (s *channelTestServer) WaitForCancel(ctx *context.T, call rpc.ServerCall) error {
+ close(s.waiting)
+ <-ctx.Done()
+ close(s.canceled)
+ return nil
+}
+
+type disConn struct {
+ net.Conn
+ mu sync.Mutex
+ stopread, stopwrite bool
+}
+
+func (p *disConn) stop(read, write bool) {
+ p.mu.Lock()
+ p.stopread = read
+ p.stopwrite = write
+ p.mu.Unlock()
+}
+func (p *disConn) Write(b []byte) (int, error) {
+ p.mu.Lock()
+ stopwrite := p.stopwrite
+ p.mu.Unlock()
+ if stopwrite {
+ return len(b), nil
+ }
+ return p.Conn.Write(b)
+}
+func (p *disConn) Read(b []byte) (int, error) {
+ for {
+ n, err := p.Conn.Read(b)
+ p.mu.Lock()
+ stopread := p.stopread
+ p.mu.Unlock()
+ if err != nil || !stopread {
+ return n, err
+ }
+ }
+}
+
+func registerDisProtocol(wrap string, conns chan *disConn) {
+ dial, resolve, listen, protonames := rpc.RegisteredProtocol(wrap)
+ rpc.RegisterProtocol("dis", func(ctx *context.T, p, a string, t time.Duration) (net.Conn, error) {
+ conn, err := dial(ctx, protonames[0], a, t)
+ if err == nil {
+ dc := &disConn{Conn: conn}
+ conns <- dc
+ conn = dc
+ }
+ return conn, err
+ }, func(ctx *context.T, protocol, address string) (string, string, error) {
+ _, a, err := resolve(ctx, protonames[0], address)
+ return "dis", a, err
+ }, func(ctx *context.T, protocol, address string) (net.Listener, error) {
+ return listen(ctx, protonames[0], address)
+ })
+}
+
+func findEndpoint(ctx *context.T, s rpc.Server) naming.Endpoint {
+ if status := s.Status(); len(status.Endpoints) > 0 {
+ return status.Endpoints[0]
+ } else {
+ timer := time.NewTicker(10 * time.Millisecond)
+ defer timer.Stop()
+ for _ = range timer.C {
+ if status = s.Status(); len(status.Proxies) > 0 {
+ return status.Proxies[0].Endpoint
+ }
+ }
+ }
+ return nil // Unreachable
+}
+
+func testChannelTimeout(t *testing.T, ctx *context.T) {
+ _, s, err := v23.WithNewServer(ctx, "", &channelTestServer{}, security.AllowEveryone())
+ if err != nil {
+ t.Fatal(err)
+ }
+ ep := findEndpoint(ctx, s)
+ conns := make(chan *disConn, 1)
+ registerDisProtocol(ep.Addr().Network(), conns)
+
+ iep := ep.(*inaming.Endpoint)
+ iep.Protocol = "dis"
+
+ // Long calls don't cause the timeout, the control stream is still operating.
+ err = v23.GetClient(ctx).Call(ctx, iep.Name(), "Run", []interface{}{2 * time.Second},
+ nil, options.ChannelTimeout(500*time.Millisecond))
+ if err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+ (<-conns).stop(true, true)
+ err = v23.GetClient(ctx).Call(ctx, iep.Name(), "Run", []interface{}{time.Duration(0)},
+ nil, options.ChannelTimeout(100*time.Millisecond))
+ if err == nil {
+ t.Errorf("wanted non-nil error", err)
+ }
+}
+
+func TestChannelTimeout(t *testing.T) {
+ if ref.RPCTransitionState() >= ref.XServers {
+ t.Skip("The new RPC system does not yet support channel timeouts")
+ }
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ testChannelTimeout(t, ctx)
+}
+
+func TestChannelTimeout_Proxy(t *testing.T) {
+ if ref.RPCTransitionState() >= ref.XServers {
+ t.Skip("The new RPC system does not yet support channel timeouts")
+ }
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
+ ls := v23.GetListenSpec(ctx)
+ pshutdown, pendpoint, err := generic.NewProxy(ctx, ls, security.AllowEveryone(), "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer pshutdown()
+ ls.Addrs = nil
+ ls.Proxy = pendpoint.Name()
+ testChannelTimeout(t, v23.WithListenSpec(ctx, ls))
+}
+
+func testChannelTimeOut_Server(t *testing.T, ctx *context.T) {
+ cts := &channelTestServer{
+ canceled: make(chan struct{}),
+ waiting: make(chan struct{}),
+ }
+ _, s, err := v23.WithNewServer(ctx, "", cts, security.AllowEveryone(),
+ options.ChannelTimeout(500*time.Millisecond))
+ if err != nil {
+ t.Fatal(err)
+ }
+ ep := findEndpoint(ctx, s)
+ conns := make(chan *disConn, 1)
+ registerDisProtocol(ep.Addr().Network(), conns)
+
+ iep := ep.(*inaming.Endpoint)
+ iep.Protocol = "dis"
+
+ // Long calls don't cause the timeout, the control stream is still operating.
+ err = v23.GetClient(ctx).Call(ctx, iep.Name(), "Run", []interface{}{2 * time.Second},
+ nil)
+ if err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+ // When the server closes the VC in response to the channel timeout the server
+ // call will see a cancellation. We do a call and wait for that server-side
+ // cancellation. Then we cancel the client call just to clean up.
+ cctx, cancel := context.WithCancel(ctx)
+ done := make(chan struct{})
+ go func() {
+ v23.GetClient(cctx).Call(cctx, iep.Name(), "WaitForCancel", nil, nil)
+ close(done)
+ }()
+ <-cts.waiting
+ (<-conns).stop(true, true)
+ <-cts.canceled
+ cancel()
+ <-done
+}
+
+func TestChannelTimeout_Server(t *testing.T) {
+ if ref.RPCTransitionState() >= ref.XServers {
+ t.Skip("The new RPC system does not yet support channel timeouts")
+ }
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ testChannelTimeOut_Server(t, ctx)
+}
+
+func TestChannelTimeout_ServerProxy(t *testing.T) {
+ if ref.RPCTransitionState() >= ref.XServers {
+ t.Skip("The new RPC system does not yet support channel timeouts")
+ }
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+ ls := v23.GetListenSpec(ctx)
+ pshutdown, pendpoint, err := generic.NewProxy(ctx, ls, security.AllowEveryone(), "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer pshutdown()
+ ls.Addrs = nil
+ ls.Proxy = pendpoint.Name()
+ testChannelTimeOut_Server(t, v23.WithListenSpec(ctx, ls))
+}
diff --git a/runtime/internal/rpc/test/client_test.go b/runtime/internal/rpc/test/client_test.go
index efec8ac..a42aca4 100644
--- a/runtime/internal/rpc/test/client_test.go
+++ b/runtime/internal/rpc/test/client_test.go
@@ -6,6 +6,7 @@
import (
"fmt"
+ "io"
"net"
"os"
"path/filepath"
@@ -17,6 +18,8 @@
"v.io/v23"
"v.io/v23/context"
+ "v.io/v23/flow"
+ fmessage "v.io/v23/flow/message"
"v.io/v23/naming"
"v.io/v23/options"
"v.io/v23/rpc"
@@ -28,6 +31,7 @@
"v.io/x/ref/internal/logger"
lsecurity "v.io/x/ref/lib/security"
_ "v.io/x/ref/runtime/factories/generic"
+ "v.io/x/ref/runtime/internal/flow/protocols/debug"
inaming "v.io/x/ref/runtime/internal/naming"
irpc "v.io/x/ref/runtime/internal/rpc"
"v.io/x/ref/runtime/internal/rpc/stream/message"
@@ -52,6 +56,14 @@
func runRootMT(seclevel options.SecurityLevel, env *modules.Env, args ...string) error {
ctx, shutdown := v23.Init()
defer shutdown()
+ if seclevel == options.SecurityNone && ref.RPCTransitionState() >= ref.XServers {
+ ls := v23.GetListenSpec(ctx)
+ for i := range ls.Addrs {
+ ls.Addrs[i].Protocol, ls.Addrs[i].Address = debug.WrapAddress(
+ ls.Addrs[i].Protocol, ls.Addrs[i].Address)
+ }
+ ctx = v23.WithListenSpec(ctx, ls)
+ }
mt, err := mounttablelib.NewMountTableDispatcher(ctx, "", "", "mounttable")
if err != nil {
return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
@@ -407,7 +419,7 @@
func TestStartCallBadProtocol(t *testing.T) {
if ref.RPCTransitionState() >= ref.XServers {
- t.Skip("This test needs to be fixed under the new protocol")
+ t.Skip("This version of the test only runs under the old rpc system.")
}
ctx, shutdown := test.V23Init()
defer shutdown()
@@ -472,6 +484,64 @@
logErr("insecure client", err)
}
+type closeConn struct {
+ ctx *context.T
+ flow.Conn
+ closed chan struct{}
+}
+
+func (c *closeConn) ReadMsg() ([]byte, error) {
+ buf, err := c.Conn.ReadMsg()
+ if err == nil {
+ if m, err := fmessage.Read(c.ctx, buf); err == nil {
+ if _, ok := m.(*fmessage.Data); ok {
+ close(c.closed)
+ c.Conn.Close()
+ return nil, io.EOF
+ }
+ }
+ }
+ return buf, err
+}
+
+func TestStartCallBadProtocol_NewRPC(t *testing.T) {
+ if ref.RPCTransitionState() < ref.XServers {
+ t.Skip("This version of the test only runs under the new RPC system")
+ }
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
+ client := v23.GetClient(ctx)
+
+ logErr := func(msg string, err error) {
+ logErrors(t, msg, true, false, false, err)
+ }
+
+ ns := v23.GetNamespace(ctx)
+ // The following test will fail due to a broken connection.
+ // We need to run mount table and servers with no security to use
+ // the V23CloseAtMessage net.Conn mock.
+ _, shutdown = runMountTable(t, ctx, "nosec")
+ defer shutdown()
+ ns.SetRoots(debug.WrapName(ns.Roots()[0]))
+ ch := make(chan struct{})
+ nctx := debug.WithFilter(ctx, func(c flow.Conn) flow.Conn {
+ return &closeConn{ctx, c, ch}
+ })
+ call, err := client.StartCall(nctx, "name", "noname", nil, options.NoRetry{})
+ if verror.ErrorID(err) != verror.ErrNoServers.ID {
+ t.Errorf("wrong error: %s", verror.DebugString(err))
+ }
+ if call != nil {
+ t.Errorf("expected call to be nil")
+ }
+ logErr("broken connection", err)
+
+ // Make sure we failed because we really did close the connection
+ // with our filter
+ <-ch
+}
+
func TestStartCallSecurity(t *testing.T) {
ctx, shutdown := test.V23Init()
defer shutdown()
diff --git a/runtime/internal/rpc/test/full_test.go b/runtime/internal/rpc/test/full_test.go
index ad8bfdb..5af791a 100644
--- a/runtime/internal/rpc/test/full_test.go
+++ b/runtime/internal/rpc/test/full_test.go
@@ -932,8 +932,7 @@
(*crypto.BoxKey)(rpk))
// Read the auth message from the server.
- var rAuth *message.Auth
- for {
+ for auth := false; !auth; {
b, err = conn.ReadMsg()
if err != nil {
t.Fatal(err)
@@ -945,23 +944,26 @@
if err != nil {
t.Fatal(err)
}
- if rAuth, ok = m.(*message.Auth); ok {
- break
+ switch m.(type) {
+ case *message.Auth:
+ auth = true
+ case *message.Data:
+ default:
+ continue
}
- }
+ if b, err = message.Append(ctx, m, nil); err != nil {
+ t.Fatal(err)
+ }
+ tmp := make([]byte, len(b)+cipher.MACSize())
+ copy(tmp, b)
+ b = tmp
+ if err = cipher.Seal(b); err != nil {
+ t.Fatal(err)
+ }
+ if _, err = conn.WriteMsg(b); err != nil {
+ t.Fatal(err)
+ }
- // Send the auth message back to the server.
- if b, err = message.Append(ctx, rAuth, nil); err != nil {
- t.Fatal(err)
- }
- tmp := make([]byte, len(b)+cipher.MACSize())
- copy(tmp, b)
- b = tmp
- if err = cipher.Seal(b); err != nil {
- t.Fatal(err)
- }
- if _, err = conn.WriteMsg(b); err != nil {
- t.Fatal(err)
}
// The server should send a tearDown message complaining about the channel binding.
diff --git a/runtime/internal/rpc/typecache.go b/runtime/internal/rpc/typecache.go
index db96d0b..64c37ac 100644
--- a/runtime/internal/rpc/typecache.go
+++ b/runtime/internal/rpc/typecache.go
@@ -50,6 +50,7 @@
tce.cancel = c
tce.enc = vom.NewTypeEncoder(f)
tce.dec = vom.NewTypeDecoder(f)
+ tce.dec.Start() // Stopped in collect()
close(tce.ready)
}
}
@@ -104,6 +105,7 @@
if tce.cancel != nil {
tce.cancel()
}
+ tce.dec.Stop()
delete(tc.flows, conn)
}
}
diff --git a/runtime/internal/rpc/version/version.go b/runtime/internal/rpc/version/version.go
index 06cc3cd..67af478 100644
--- a/runtime/internal/rpc/version/version.go
+++ b/runtime/internal/rpc/version/version.go
@@ -25,7 +25,7 @@
//
// Min is incremented whenever we want to remove support for old protocol
// versions.
-var SupportedRange = &Range{Min: version.RPCVersion10, Max: version.RPCVersion11}
+var SupportedRange = &Range{Min: version.RPCVersion10, Max: version.RPCVersion12}
var Supported = version.RPCVersionRange{Min: version.RPCVersion10, Max: version.RPCVersion11}
func init() {
diff --git a/runtime/internal/rpc/xclient.go b/runtime/internal/rpc/xclient.go
index b9d19e2..0c78353 100644
--- a/runtime/internal/rpc/xclient.go
+++ b/runtime/internal/rpc/xclient.go
@@ -282,13 +282,6 @@
security.Blessings, map[string]security.Discharge, error) {
localPrincipal := v23.GetPrincipal(ctx)
clientB := localPrincipal.BlessingStore().ForPeer(peerNames...)
- if clientB.IsZero() {
- // TODO(ataly, ashankar): We need not error out here and instead can just
- // send the <nil> blessings to the server.
- // TODO(suharshs): Make this a different error when we are making all the vdl errors
- // in a errors.vdl file.
- return security.Blessings{}, nil, verror.New(errNoBlessingsForPeer, ctx, nil, nil)
- }
impetus, err := mkDischargeImpetus(peerNames, x.method, x.args)
if err != nil {
return security.Blessings{}, nil, err
@@ -554,7 +547,7 @@
// network connection being shutdown abruptly.
func (fc *flowXClient) close(err error) error {
subErr := verror.SubErr{Err: err, Options: verror.Print}
- subErr.Name = "remote=" + fc.flow.Conn().RemoteEndpoint().String()
+ subErr.Name = "remote=" + fc.flow.RemoteEndpoint().String()
// TODO(toddw): cancel context instead?
if _, cerr := fc.flow.WriteMsgAndClose(); cerr != nil && err == nil {
// TODO(mattr): The context is often already canceled here, in
@@ -625,8 +618,8 @@
LocalPrincipal: v23.GetPrincipal(ctx),
LocalBlessings: fc.flow.LocalBlessings(),
RemoteBlessings: fc.flow.RemoteBlessings(),
- LocalEndpoint: fc.flow.Conn().LocalEndpoint(),
- RemoteEndpoint: fc.flow.Conn().RemoteEndpoint(),
+ LocalEndpoint: fc.flow.LocalEndpoint(),
+ RemoteEndpoint: fc.flow.RemoteEndpoint(),
LocalDischarges: fc.flow.LocalDischarges(),
RemoteDischarges: fc.flow.RemoteDischarges(),
Method: method,
diff --git a/runtime/internal/rpc/xserver.go b/runtime/internal/rpc/xserver.go
index 4a51f49..5b8a798 100644
--- a/runtime/internal/rpc/xserver.go
+++ b/runtime/internal/rpc/xserver.go
@@ -861,9 +861,9 @@
}
func (fs *xflowServer) LocalEndpoint() naming.Endpoint {
//nologcall
- return fs.flow.Conn().LocalEndpoint()
+ return fs.flow.LocalEndpoint()
}
func (fs *xflowServer) RemoteEndpoint() naming.Endpoint {
//nologcall
- return fs.flow.Conn().RemoteEndpoint()
+ return fs.flow.RemoteEndpoint()
}
diff --git a/runtime/internal/rt/security.go b/runtime/internal/rt/security.go
index 1b8d6cf..35d9427 100644
--- a/runtime/internal/rt/security.go
+++ b/runtime/internal/rt/security.go
@@ -8,18 +8,14 @@
"fmt"
"os"
"os/user"
- "strconv"
- "syscall"
"v.io/v23/context"
- "v.io/v23/naming"
"v.io/v23/security"
"v.io/v23/verror"
"v.io/x/ref"
"v.io/x/ref/lib/exec"
"v.io/x/ref/lib/mgmt"
vsecurity "v.io/x/ref/lib/security"
- inaming "v.io/x/ref/runtime/internal/naming"
"v.io/x/ref/services/agent"
"v.io/x/ref/services/agent/agentlib"
)
@@ -30,9 +26,7 @@
}
if len(credentials) > 0 {
// Explicitly specified credentials, ignore the agent.
- if _, fd, _ := agentEP(); fd >= 0 {
- syscall.Close(fd)
- }
+
// TODO(ataly, ashankar): If multiple runtimes are getting
// initialized at the same time from the same
// ref.EnvCredentials we will need some kind of locking for the
@@ -54,40 +48,6 @@
} else if principal != nil {
return principal, nil, func() { principal.Close() }, nil
}
- if ep, _, err := agentEP(); err != nil {
- return nil, nil, nil, err
- } else if ep != nil {
- // Use a new stream manager and an "incomplete" client (the
- // principal is nil) to talk to the agent.
- //
- // The lack of a principal works out for the rpc.Client
- // only because the agent uses anonymous unix sockets and
- // the SecurityNone option.
- //
- // Using a distinct stream manager to manage agent-related
- // connections helps isolate these connections to the agent
- // from management of any other connections created in the
- // process (such as future RPCs to other services).
- if ctx, err = r.WithNewStreamManager(ctx); err != nil {
- return nil, nil, nil, err
- }
- client := r.GetClient(ctx)
-
- // We reparent the context we use to construct the agent.
- // We do this because the agent needs to be able to make RPCs
- // during runtime shutdown.
- ctx, shutdown = context.WithRootCancel(ctx)
-
- // TODO(cnicolaou): the agentlib can call back into runtime to get the principal,
- // which will be a problem if the runtime is not initialized, hence this code
- // path is fragile. We should ideally provide an option to work around this case.
- if principal, err = agentlib.NewAgentPrincipal(ctx, ep, client); err != nil {
- shutdown()
- client.Close()
- return nil, nil, nil, err
- }
- return principal, []interface{}{client}, shutdown, nil
- }
// No agent, no explicit credentials specified: - create a new principal and blessing in memory.
if principal, err = vsecurity.NewPrincipal(); err != nil {
return principal, nil, nil, err
@@ -95,15 +55,6 @@
return principal, nil, func() {}, vsecurity.InitDefaultBlessings(principal, defaultBlessingName())
}
-func parseAgentFD(ep naming.Endpoint) (int, error) {
- fd := ep.Addr().String()
- ifd, err := strconv.Atoi(fd)
- if err != nil {
- ifd = -1
- }
- return ifd, nil
-}
-
func ipcAgent() (agent.Principal, error) {
handle, err := exec.GetChildHandle()
if err != nil && verror.ErrorID(err) != exec.ErrNoVersion.ID {
@@ -122,40 +73,6 @@
return agentlib.NewAgentPrincipalX(path)
}
-// agentEP returns an Endpoint to be used to communicate with
-// the security agent if the current process has been configured to use the
-// agent.
-func agentEP() (naming.Endpoint, int, error) {
- handle, err := exec.GetChildHandle()
- if err != nil && verror.ErrorID(err) != exec.ErrNoVersion.ID {
- return nil, -1, err
- }
- var endpoint string
- if handle != nil {
- // We were started by a parent (presumably, device manager).
- endpoint, _ = handle.Config.Get(mgmt.SecurityAgentEndpointConfigKey)
- } else {
- endpoint = os.Getenv(ref.EnvAgentEndpoint)
- }
- if endpoint == "" {
- return nil, -1, nil
- }
- ep, err := inaming.NewEndpoint(endpoint)
- if err != nil {
- return nil, -1, err
- }
-
- // Don't let children accidentally inherit the agent connection.
- fd, err := parseAgentFD(ep)
- if err != nil {
- return nil, -1, err
- }
- if fd >= 0 {
- syscall.CloseOnExec(fd)
- }
- return ep, fd, nil
-}
-
func defaultBlessingName() string {
var name string
if user, _ := user.Current(); user != nil && len(user.Username) > 0 {
diff --git a/services/agent/agentd/doc.go b/services/agent/agentd/doc.go
index 22f3860..d040b7c 100644
--- a/services/agent/agentd/doc.go
+++ b/services/agent/agentd/doc.go
@@ -51,6 +51,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-vmodule=
diff --git a/services/agent/agentlib/client.go b/services/agent/agentlib/client.go
index 6312028..6f4438a 100644
--- a/services/agent/agentlib/client.go
+++ b/services/agent/agentlib/client.go
@@ -9,24 +9,14 @@
import (
"fmt"
"io"
- "net"
- "os"
- "strconv"
"sync"
- "syscall"
- "v.io/v23/context"
- "v.io/v23/naming"
- "v.io/v23/options"
- "v.io/v23/rpc"
"v.io/v23/security"
"v.io/v23/verror"
- "v.io/v23/vtrace"
"v.io/x/ref/internal/logger"
"v.io/x/ref/services/agent"
"v.io/x/ref/services/agent/internal/cache"
"v.io/x/ref/services/agent/internal/ipc"
- "v.io/x/ref/services/agent/internal/unixfd"
)
const pkgPath = "v.io/x/ref/services/agent/agentlib"
@@ -73,35 +63,6 @@
return nil
}
-type vrpcCaller struct {
- ctx *context.T
- client rpc.Client
- name string
- cancel func()
-}
-
-func (c *vrpcCaller) Close() error {
- c.cancel()
- return nil
-}
-
-func (c *vrpcCaller) call(name string, results []interface{}, args ...interface{}) error {
- call, err := c.startCall(name, args...)
- if err != nil {
- return err
- }
- if err := call.Finish(results...); err != nil {
- return err
- }
- return nil
-}
-
-func (c *vrpcCaller) startCall(name string, args ...interface{}) (rpc.ClientCall, error) {
- ctx, _ := vtrace.WithNewTrace(c.ctx)
- // SecurityNone is safe here since we're using anonymous unix sockets.
- return c.client.StartCall(ctx, c.name, name, args, options.SecurityNone, options.Preresolved{})
-}
-
func results(inputs ...interface{}) []interface{} {
return inputs
}
@@ -141,68 +102,6 @@
return cached, nil
}
-// NewAgentPrincipal returns a security.Pricipal using the PrivateKey held in a remote agent process.
-// 'endpoint' is the endpoint for connecting to the agent, typically obtained from
-// os.GetEnv(envvar.AgentEndpoint).
-// 'ctx' should not have a deadline, and should never be cancelled while the
-// principal is in use.
-func NewAgentPrincipal(ctx *context.T, endpoint naming.Endpoint, insecureClient rpc.Client) (security.Principal, error) {
- p, err := newUncachedPrincipal(ctx, endpoint, insecureClient)
- if err != nil {
- return p, err
- }
- caller := p.caller.(*vrpcCaller)
- call, callErr := caller.startCall("NotifyWhenChanged")
- if callErr != nil {
- return nil, callErr
- }
- return cache.NewCachedPrincipal(caller.ctx, p, call)
-}
-func newUncachedPrincipal(ctx *context.T, ep naming.Endpoint, insecureClient rpc.Client) (*client, error) {
- // This isn't a real vanadium endpoint. It contains the vanadium version
- // info, but the address is serving the agent protocol.
- if ep.Addr().Network() != "" {
- return nil, verror.New(errInvalidProtocol, ctx, ep.Addr().Network())
- }
- fd, err := strconv.Atoi(ep.Addr().String())
- if err != nil {
- return nil, err
- }
- syscall.ForkLock.Lock()
- fd, err = syscall.Dup(fd)
- if err == nil {
- syscall.CloseOnExec(fd)
- }
- syscall.ForkLock.Unlock()
- if err != nil {
- return nil, err
- }
- f := os.NewFile(uintptr(fd), "agent_client")
- defer f.Close()
- conn, err := net.FileConn(f)
- if err != nil {
- return nil, err
- }
- // This is just an arbitrary 1 byte string. The value is ignored.
- data := make([]byte, 1)
- addr, err := unixfd.SendConnection(conn.(*net.UnixConn), data)
- if err != nil {
- return nil, err
- }
- ctx, cancel := context.WithCancel(ctx)
- caller := &vrpcCaller{
- client: insecureClient,
- name: naming.JoinAddressName(agentEndpoint("unixfd", addr.String()), ""),
- ctx: ctx,
- cancel: cancel,
- }
- agent := &client{caller: caller}
- if err := agent.fetchPublicKey(); err != nil {
- return nil, err
- }
- return agent, nil
-}
-
func (c *client) Close() error {
return c.caller.Close()
}
@@ -249,16 +148,6 @@
return c.key
}
-func (c *client) BlessingsInfo(blessings security.Blessings) map[string][]security.Caveat {
- var bInfo map[string][]security.Caveat
- err := c.caller.call("BlessingsInfo", results(&bInfo), blessings)
- if err != nil {
- logger.Global().Infof("error calling BlessingsInfo: %v", err)
- return nil
- }
- return bInfo
-}
-
func (c *client) BlessingStore() security.BlessingStore {
return &blessingStore{caller: c.caller, key: c.key}
}
@@ -267,16 +156,6 @@
return &blessingRoots{c.caller}
}
-// TODO(ataly): Implement this method.
-func (c *client) Encrypter() security.BlessingsBasedEncrypter {
- return nil
-}
-
-// TODO(ataly): Implement this method.
-func (c *client) Decrypter() security.BlessingsBasedDecrypter {
- return nil
-}
-
type blessingStore struct {
caller caller
key security.PublicKey
@@ -396,13 +275,3 @@
return
}
-func agentEndpoint(proto, addr string) string {
- // TODO: use naming.FormatEndpoint when it supports version 6.
- return fmt.Sprintf("@6@%s@%s@@@s@@@", proto, addr)
-}
-
-func AgentEndpoint(fd int) string {
- // We use an empty protocol here because this isn't really speaking
- // veyron rpc.
- return agentEndpoint("", fmt.Sprintf("%d", fd))
-}
diff --git a/services/agent/agentlib/peer_test.go b/services/agent/agentlib/peer_test.go
index 3d5ceeb..cb9ed7c 100644
--- a/services/agent/agentlib/peer_test.go
+++ b/services/agent/agentlib/peer_test.go
@@ -5,16 +5,9 @@
package agentlib
import (
- "v.io/v23/context"
- "v.io/v23/naming"
- "v.io/v23/rpc"
"v.io/v23/security"
)
-func NewUncachedPrincipal(ctx *context.T, endpoint naming.Endpoint, insecureClient rpc.Client) (security.Principal, error) {
- return newUncachedPrincipal(ctx, endpoint, insecureClient)
-}
-
func NewUncachedPrincipalX(path string) (security.Principal, error) {
return newUncachedPrincipalX(path)
}
diff --git a/services/agent/internal/cache/cache.go b/services/agent/internal/cache/cache.go
index 307019f..ea2e8cb 100644
--- a/services/agent/internal/cache/cache.go
+++ b/services/agent/internal/cache/cache.go
@@ -345,10 +345,6 @@
/* impl */ agent.Principal
}
-func (p *cachedPrincipal) BlessingsInfo(blessings security.Blessings) map[string][]security.Caveat {
- return p.cache.BlessingsInfo(blessings)
-}
-
func (p *cachedPrincipal) BlessingStore() security.BlessingStore {
return p.cache.BlessingStore()
}
@@ -414,7 +410,7 @@
cachedRoots.flush()
cachedStore.flush()
}
- sp, err := security.CreatePrincipal(dummySigner{impl.PublicKey()}, cachedStore, cachedRoots, impl.Encrypter(), impl.Decrypter())
+ sp, err := security.CreatePrincipal(dummySigner{impl.PublicKey()}, cachedStore, cachedRoots)
if err != nil {
return
}
diff --git a/services/agent/internal/pingpong/doc.go b/services/agent/internal/pingpong/doc.go
index d223a6a..41245c8 100644
--- a/services/agent/internal/pingpong/doc.go
+++ b/services/agent/internal/pingpong/doc.go
@@ -30,6 +30,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/agent/internal/server/server.go b/services/agent/internal/server/server.go
index 20e7669..73fe276 100644
--- a/services/agent/internal/server/server.go
+++ b/services/agent/internal/server/server.go
@@ -231,11 +231,6 @@
return a.principal.PublicKey().MarshalBinary()
}
-func (a *agentd) BlessingsInfo(blessings security.Blessings) (map[string][]security.Caveat, error) {
- a.mu.RLock()
- return a.principal.BlessingsInfo(blessings), nil
-}
-
func (a *agentd) BlessingStoreSet(blessings security.Blessings, forPeers security.BlessingPattern) (security.Blessings, error) {
defer a.unlock()
a.mu.Lock()
diff --git a/services/agent/internal/test_principal/doc.go b/services/agent/internal/test_principal/doc.go
index 05b9476..689da03 100644
--- a/services/agent/internal/test_principal/doc.go
+++ b/services/agent/internal/test_principal/doc.go
@@ -26,6 +26,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/agent/internal/test_principal/main.go b/services/agent/internal/test_principal/main.go
index 7fc129e..eca2af3 100644
--- a/services/agent/internal/test_principal/main.go
+++ b/services/agent/internal/test_principal/main.go
@@ -62,10 +62,8 @@
errorf("%v environment variable is not set", ref.EnvAgentPath)
}
// A pristine agent has a single blessing "agent_principal" (from agentd/main.go).
- if blessings := p.BlessingsInfo(p.BlessingStore().Default()); len(blessings) != 1 {
- errorf("Got %d blessings, expected 1: %v", len(blessings), blessings)
- } else if _, ok := blessings["agent_principal"]; !ok {
- errorf("No agent_principal blessins, got %v", blessings)
+ if got, want := security.BlessingNames(p, p.BlessingStore().Default()), []string{"agent_principal"}; !reflect.DeepEqual(got, want) {
+ errorf("Got %v want %v", got, want)
}
// BlessSelf
diff --git a/services/agent/internal/unixfd/unixfd.go b/services/agent/internal/unixfd/unixfd.go
deleted file mode 100644
index 025ccaf..0000000
--- a/services/agent/internal/unixfd/unixfd.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package unixfd provides provides support for Dialing and Listening
-// on already connected file descriptors (like those returned by socketpair).
-package unixfd
-
-import (
- "fmt"
- "io"
- "net"
- "os"
- "strconv"
- "sync"
- "syscall"
- "time"
- "unsafe"
-
- "v.io/v23/context"
- "v.io/v23/rpc"
- "v.io/v23/verror"
-)
-
-const pkgPath = "v.io/x/ref/services/agent/internal/unixfd"
-
-var (
- errListenerClosed = verror.Register(pkgPath+".errListenerClosed", verror.NoRetry, "{1:}{2:} listener closed{:_}")
- errListenerAlreadyClosed = verror.Register(pkgPath+".errListenerAlreadyClosed", verror.NoRetry, "{1:}{2:} listener already closed{:_}")
- errCantSendSocketWithoutData = verror.Register(pkgPath+".errCantSendSocketWithoutData", verror.NoRetry, "{1:}{2:} cannot send a socket without data.{:_}")
- errWrongSentLength = verror.Register(pkgPath+".errWrongSentLength", verror.NoRetry, "{1:}{2:} expected to send {3}, {4} bytes, sent {5}, {6}{:_}")
- errTooBigOOB = verror.Register(pkgPath+".errTooBigOOB", verror.NoRetry, "{1:}{2:} received too large oob data ({3}, max {4}){:_}")
- errBadNetwork = verror.Register(pkgPath+".errBadNetwork", verror.NoRetry, "{1:}{2:} invalid network{:_}")
-)
-
-const Network string = "unixfd"
-
-func init() {
- rpc.RegisterProtocol(Network, unixFDConn, unixFDResolve, unixFDListen)
-}
-
-// singleConnListener implements net.Listener for an already-connected socket.
-// This is different from net.FileListener, which calls syscall.Listen
-// on an unconnected socket.
-type singleConnListener struct {
- c chan net.Conn
- addr net.Addr
- sync.Mutex
-}
-
-func (l *singleConnListener) getChan() chan net.Conn {
- l.Lock()
- defer l.Unlock()
- return l.c
-}
-
-func (l *singleConnListener) Accept() (net.Conn, error) {
- c := l.getChan()
- if c == nil {
- return nil, verror.New(errListenerClosed, nil)
- }
- if conn, ok := <-c; ok {
- return conn, nil
- }
- return nil, io.EOF
-}
-
-func (l *singleConnListener) Close() error {
- l.Lock()
- defer l.Unlock()
- lc := l.c
- if lc == nil {
- return verror.New(errListenerAlreadyClosed, nil)
- }
- close(l.c)
- l.c = nil
- // If the socket was never Accept'ed we need to close it.
- if c, ok := <-lc; ok {
- return c.Close()
- }
- return nil
-}
-
-func (l *singleConnListener) Addr() net.Addr {
- return l.addr
-}
-
-func unixFDConn(ctx *context.T, protocol, address string, timeout time.Duration) (net.Conn, error) {
- // TODO(cnicolaou): have this respect the timeout. Possibly have a helper
- // function that can be generally used for this, but in practice, I think
- // it'll be cleaner to use the underlying protocol's deadline support of it
- // has it.
- fd, err := strconv.ParseInt(address, 10, 32)
- if err != nil {
- return nil, err
- }
- file := os.NewFile(uintptr(fd), "tmp")
- conn, err := net.FileConn(file)
- // 'file' is not used after this point, but we keep it open
- // so that 'address' remains valid.
- if err != nil {
- file.Close()
- return nil, err
- }
- // We wrap 'conn' so we can customize the address, and also
- // to close 'file'.
- return &fdConn{addr: addr(address), sock: file, Conn: conn}, nil
-}
-
-type fdConn struct {
- addr net.Addr
- sock *os.File
- net.Conn
-
- mu sync.Mutex
- closed bool
-}
-
-func (c *fdConn) Close() (err error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil
- }
-
- c.closed = true
- defer c.sock.Close()
- return c.Conn.Close()
-}
-
-func (c *fdConn) LocalAddr() net.Addr {
- return c.addr
-}
-
-func (c *fdConn) RemoteAddr() net.Addr {
- return c.addr
-}
-
-func unixFDResolve(ctx *context.T, _, address string) (string, string, error) {
- return Network, address, nil
-}
-
-func unixFDListen(ctx *context.T, protocol, address string) (net.Listener, error) {
- conn, err := unixFDConn(ctx, protocol, address, 0)
- if err != nil {
- return nil, err
- }
- c := make(chan net.Conn, 1)
- c <- conn
- return &singleConnListener{c, conn.LocalAddr(), sync.Mutex{}}, nil
-}
-
-type addr string
-
-func (a addr) Network() string { return Network }
-func (a addr) String() string { return string(a) }
-
-// Addr returns a net.Addr for the unixfd network for the given file descriptor.
-func Addr(fd uintptr) net.Addr {
- return addr(fmt.Sprintf("%d", fd))
-}
-
-type fileDescriptor struct {
- fd chan int
- name string
-}
-
-func newFd(fd int, name string) *fileDescriptor {
- ch := make(chan int, 1)
- ch <- fd
- close(ch)
- d := &fileDescriptor{ch, name}
- return d
-}
-
-func (f *fileDescriptor) releaseAddr() net.Addr {
- if fd, ok := <-f.fd; ok {
- return Addr(uintptr(fd))
- }
- return nil
-}
-
-func (f *fileDescriptor) releaseFile() *os.File {
- if fd, ok := <-f.fd; ok {
- return os.NewFile(uintptr(fd), f.name)
- }
- return nil
-}
-
-// maybeClose closes the file descriptor, if it hasn't been released.
-func (f *fileDescriptor) maybeClose() {
- if file := f.releaseFile(); file != nil {
- file.Close()
- }
-}
-
-// Socketpair returns a pair of connected sockets for communicating with a child process.
-func Socketpair() (*net.UnixConn, *os.File, error) {
- lfd, rfd, err := socketpair()
- if err != nil {
- return nil, nil, err
- }
- defer rfd.maybeClose()
- file := lfd.releaseFile()
- // FileConn dups the fd, so we still want to close the original one.
- defer file.Close()
- conn, err := net.FileConn(file)
- if err != nil {
- return nil, nil, err
- }
- return conn.(*net.UnixConn), rfd.releaseFile(), nil
-}
-
-func socketpair() (local, remote *fileDescriptor, err error) {
- syscall.ForkLock.RLock()
- fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0)
- if err == nil {
- syscall.CloseOnExec(fds[0])
- syscall.CloseOnExec(fds[1])
- }
- syscall.ForkLock.RUnlock()
- if err != nil {
- return nil, nil, err
- }
- return newFd(fds[0], "local"), newFd(fds[1], "remote"), nil
-}
-
-// SendConnection creates a new connected socket and sends
-// one end over 'conn', along with 'data'. It returns the address for
-// the local end of the socketpair.
-// Note that the returned address is an open file descriptor,
-// which you must close if you do not Dial or Listen to the address.
-func SendConnection(conn *net.UnixConn, data []byte) (addr net.Addr, err error) {
- if len(data) < 1 {
- return nil, verror.New(errCantSendSocketWithoutData, nil)
- }
- remote, local, err := socketpair()
- if err != nil {
- return nil, err
- }
- defer local.maybeClose()
- rfile := remote.releaseFile()
-
- rights := syscall.UnixRights(int(rfile.Fd()))
- n, oobn, err := conn.WriteMsgUnix(data, rights, nil)
- if err != nil {
- rfile.Close()
- return nil, err
- } else if n != len(data) || oobn != len(rights) {
- rfile.Close()
- return nil, verror.New(errWrongSentLength, nil, len(data), len(rights), n, oobn)
- }
- // Wait for the other side to acknowledge.
- // This is to work around a race on OS X where it appears we can close
- // the file descriptor before it gets transfered over the socket.
- f := local.releaseFile()
- syscall.ForkLock.Lock()
- fd, err := syscall.Dup(int(f.Fd()))
- if err != nil {
- syscall.ForkLock.Unlock()
- f.Close()
- rfile.Close()
- return nil, err
- }
- syscall.CloseOnExec(fd)
- syscall.ForkLock.Unlock()
- newConn, err := net.FileConn(f)
- f.Close()
- if err != nil {
- rfile.Close()
- return nil, err
- }
- newConn.Read(make([]byte, 1))
- newConn.Close()
- rfile.Close()
-
- return Addr(uintptr(fd)), nil
-}
-
-const cmsgDataLength = int(unsafe.Sizeof(int(1)))
-
-// ReadConnection reads a connection and additional data sent on 'conn' via a call to SendConnection.
-// 'buf' must be large enough to hold the data.
-// The returned function must be called when you are ready for the other side
-// to start sending data, but before writing anything to the connection.
-// If there is an error you must still call the function before closing the connection.
-func ReadConnection(conn *net.UnixConn, buf []byte) (net.Addr, int, func(), error) {
- oob := make([]byte, syscall.CmsgLen(cmsgDataLength))
- n, oobn, _, _, err := conn.ReadMsgUnix(buf, oob)
- if err != nil {
- return nil, n, nil, err
- }
- if oobn > len(oob) {
- return nil, n, nil, verror.New(errTooBigOOB, nil, oobn, len(oob))
- }
- scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
- if err != nil {
- return nil, n, nil, err
- }
- fd := -1
- // Loop through any file descriptors we are sent, and close
- // all extras.
- for _, scm := range scms {
- fds, err := syscall.ParseUnixRights(&scm)
- if err != nil {
- return nil, n, nil, err
- }
- for _, f := range fds {
- if fd == -1 {
- fd = f
- } else if f != -1 {
- syscall.Close(f)
- }
- }
- }
- if fd == -1 {
- return nil, n, nil, nil
- }
- result := Addr(uintptr(fd))
- syscall.ForkLock.Lock()
- fd, err = syscall.Dup(fd)
- if err != nil {
- syscall.ForkLock.Unlock()
- CloseUnixAddr(result)
- return nil, n, nil, err
- }
- syscall.CloseOnExec(fd)
- syscall.ForkLock.Unlock()
- file := os.NewFile(uintptr(fd), "newconn")
- newconn, err := net.FileConn(file)
- file.Close()
- if err != nil {
- CloseUnixAddr(result)
- return nil, n, nil, err
- }
- return result, n, func() {
- newconn.Write(make([]byte, 1))
- newconn.Close()
- }, nil
-}
-
-func CloseUnixAddr(addr net.Addr) error {
- if addr.Network() != Network {
- return verror.New(errBadNetwork, nil)
- }
- fd, err := strconv.ParseInt(addr.String(), 10, 32)
- if err != nil {
- return err
- }
- return syscall.Close(int(fd))
-}
diff --git a/services/agent/internal/unixfd/unixfd_test.go b/services/agent/internal/unixfd/unixfd_test.go
deleted file mode 100644
index 06f7010..0000000
--- a/services/agent/internal/unixfd/unixfd_test.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unixfd
-
-import (
- "bytes"
- "io"
- "net"
- "reflect"
- "testing"
-
- "v.io/v23/context"
-)
-
-type nothing struct{}
-
-func dial(fd *fileDescriptor) (net.Conn, net.Addr, error) {
- addr := fd.releaseAddr()
- ctx, _ := context.RootContext()
- conn, err := unixFDConn(ctx, Network, addr.String(), 0)
- return conn, addr, err
-}
-
-func listen(fd *fileDescriptor) (net.Listener, net.Addr, error) {
- addr := fd.releaseAddr()
- ctx, _ := context.RootContext()
- l, err := unixFDListen(ctx, Network, addr.String())
- return l, addr, err
-}
-
-func testWrite(t *testing.T, c net.Conn, data string) {
- n, err := c.Write([]byte(data))
- if err != nil {
- t.Errorf("Write: %v", err)
- return
- }
- if n != len(data) {
- t.Errorf("Wrote %d bytes, expected %d", n, len(data))
- }
-}
-
-func testRead(t *testing.T, c net.Conn, expected string) {
- buf := make([]byte, len(expected)+2)
- n, err := c.Read(buf)
- if err != nil {
- t.Errorf("Read: %v", err)
- return
- }
- if n != len(expected) || !bytes.Equal(buf[0:n], []byte(expected)) {
- t.Errorf("got %q, expected %q", buf[0:n], expected)
- }
-}
-
-func TestDial(t *testing.T) {
- local, remote, err := socketpair()
- if err != nil {
- t.Fatalf("socketpair: %v", err)
- }
- a, a_addr, err := dial(local)
- if err != nil {
- t.Fatalf("dial: %v", err)
- }
- b, b_addr, err := dial(remote)
- if err != nil {
- t.Fatalf("dial: %v", err)
- }
-
- testWrite(t, a, "TEST1")
- testRead(t, b, "TEST1")
- testWrite(t, b, "TEST2")
- testRead(t, a, "TEST2")
-
- if !reflect.DeepEqual(a.LocalAddr(), a_addr) {
- t.Errorf("Invalid address %v, expected %v", a.LocalAddr(), a_addr)
- }
- if !reflect.DeepEqual(a.RemoteAddr(), a_addr) {
- t.Errorf("Invalid address %v, expected %v", a.RemoteAddr(), a_addr)
- }
- if !reflect.DeepEqual(b.LocalAddr(), b_addr) {
- t.Errorf("Invalid address %v, expected %v", a.LocalAddr(), b_addr)
- }
- if !reflect.DeepEqual(b.RemoteAddr(), b_addr) {
- t.Errorf("Invalid address %v, expected %v", a.RemoteAddr(), b_addr)
- }
-}
-
-func TestListen(t *testing.T) {
- local, remote, err := socketpair()
- if err != nil {
- t.Fatalf("socketpair: %v", err)
- }
- a, _, err := dial(local)
- if err != nil {
- t.Fatalf("dial: %v", err)
- }
- l, _, err := listen(remote)
- if err != nil {
- t.Fatalf("listen: %v", err)
- }
- b, err := l.Accept()
- if err != nil {
- t.Fatalf("accept: %v", err)
- }
- start := make(chan nothing, 0)
- done := make(chan nothing)
- go func() {
- defer close(done)
- <-start
- if _, err := l.Accept(); err != io.EOF {
- t.Fatalf("accept: expected EOF, got %v", err)
- }
- }()
-
- // block until the goroutine starts running
- start <- nothing{}
- testWrite(t, a, "LISTEN")
- testRead(t, b, "LISTEN")
-
- err = l.Close()
- if err != nil {
- t.Fatalf("close: %v", err)
- }
- <-done
-
- // After closed, accept should fail immediately
- _, err = l.Accept()
- if err == nil {
- t.Fatalf("Accept succeeded after close")
- }
- err = l.Close()
- if err == nil {
- t.Fatalf("Close succeeded twice")
- }
-}
-
-func TestSendConnection(t *testing.T) {
- server, client, err := Socketpair()
- if err != nil {
- t.Fatalf("Socketpair: %v", err)
- }
- uclient, err := net.FileConn(client)
- if err != nil {
- t.Fatalf("FileConn: %v", err)
- }
- var readErr error
- var n int
- var saddr net.Addr
- done := make(chan struct{})
- buf := make([]byte, 10)
- go func() {
- var ack func()
- saddr, n, ack, readErr = ReadConnection(server, buf)
- if ack != nil {
- ack()
- }
- close(done)
- }()
- caddr, err := SendConnection(uclient.(*net.UnixConn), []byte("hello"))
- if err != nil {
- t.Fatalf("SendConnection: %v", err)
- }
- <-done
- if readErr != nil {
- t.Fatalf("ReadConnection: %v", readErr)
- }
- if saddr == nil {
- t.Fatalf("ReadConnection returned nil, %d", n)
- }
- data := buf[0:n]
- if !bytes.Equal([]byte("hello"), data) {
- t.Fatalf("unexpected data %q", data)
- }
-
- ctx, _ := context.RootContext()
- a, err := unixFDConn(ctx, Network, caddr.String(), 0)
- if err != nil {
- t.Fatalf("dial %v: %v", caddr, err)
- }
- b, err := unixFDConn(ctx, Network, saddr.String(), 0)
- if err != nil {
- t.Fatalf("dial %v: %v", saddr, err)
- }
-
- testWrite(t, a, "TEST1")
- testRead(t, b, "TEST1")
- testWrite(t, b, "TEST2")
- testRead(t, a, "TEST2")
-}
diff --git a/services/agent/keymgr/client.go b/services/agent/keymgr/client.go
index 7515f58..e4e8213 100644
--- a/services/agent/keymgr/client.go
+++ b/services/agent/keymgr/client.go
@@ -7,17 +7,10 @@
package keymgr
import (
- "net"
- "os"
- "strconv"
- "sync"
-
- "v.io/v23/context"
"v.io/v23/verror"
"v.io/x/ref/services/agent"
"v.io/x/ref/services/agent/internal/ipc"
"v.io/x/ref/services/agent/internal/server"
- "v.io/x/ref/services/agent/internal/unixfd"
)
const pkgPath = "v.io/x/ref/services/agent/keymgr"
@@ -30,22 +23,10 @@
verror.NoRetry, "{1:}{2:} Invalid key handle")
)
-const defaultManagerSocket = 4
-
type keyManager struct {
conn *ipc.IPCConn
}
-type Agent struct {
- conn *net.UnixConn // Guarded by mu
- mu sync.Mutex
-}
-
-// NewAgent returns a client connected to the agent on the default file descriptors.
-func NewAgent() (*Agent, error) {
- return newAgent(defaultManagerSocket)
-}
-
// NewKeyManager returns a client connected to the specified KeyManager.
func NewKeyManager(path string) (agent.KeyManager, error) {
i := ipc.NewIPC()
@@ -61,46 +42,6 @@
return server.NewLocalKeyManager(path, passphrase)
}
-func newAgent(fd int) (a *Agent, err error) {
- file := os.NewFile(uintptr(fd), "fd")
- defer file.Close()
- conn, err := net.FileConn(file)
- if err != nil {
- return nil, err
- }
-
- return &Agent{conn: conn.(*net.UnixConn)}, nil
-}
-
-// TODO(caprita): Get rid of *context.T arg. Doesn't seem to be used.
-
-// NewPrincipal creates a new principal and returns the handle and a socket serving
-// the principal.
-// Typically the socket will be passed to a child process using cmd.ExtraFiles.
-func (a *Agent) NewPrincipal(ctx *context.T, inMemory bool) (handle []byte, conn *os.File, err error) {
- req := make([]byte, 1)
- if inMemory {
- req[0] = 1
- }
- a.mu.Lock()
- defer a.mu.Unlock()
- conn, err = a.connect(req)
- if err != nil {
- return nil, nil, err
- }
- buf := make([]byte, agent.PrincipalHandleByteSize)
- n, err := a.conn.Read(buf)
- if err != nil {
- conn.Close()
- return nil, nil, err
- }
- if n != agent.PrincipalHandleByteSize {
- conn.Close()
- return nil, nil, verror.New(errInvalidResponse, ctx, agent.PrincipalHandleByteSize, n)
- }
- return buf, conn, nil
-}
-
// NewPrincipal creates a new principal and returns a handle.
// The handle may be passed to ServePrincipal to start an agent serving the principal.
func (m *keyManager) NewPrincipal(inMemory bool) (handle [agent.PrincipalHandleByteSize]byte, err error) {
@@ -109,30 +50,6 @@
return
}
-func (a *Agent) connect(req []byte) (*os.File, error) {
- addr, err := unixfd.SendConnection(a.conn, req)
- if err != nil {
- return nil, err
- }
- fd, err := strconv.ParseInt(addr.String(), 10, 32)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), "client"), nil
-}
-
-// NewConnection creates a connection to an agent which exports a principal
-// previously created with NewPrincipal.
-// Typically this will be passed to a child process using cmd.ExtraFiles.
-func (a *Agent) NewConnection(handle []byte) (*os.File, error) {
- if len(handle) != agent.PrincipalHandleByteSize {
- return nil, verror.New(errInvalidKeyHandle, nil)
- }
- a.mu.Lock()
- defer a.mu.Unlock()
- return a.connect(handle)
-}
-
// ServePrincipal creates a socket at socketPath and serves a principal
// previously created with NewPrincipal.
func (m *keyManager) ServePrincipal(handle [agent.PrincipalHandleByteSize]byte, socketPath string) error {
diff --git a/services/agent/pod_agentd/doc.go b/services/agent/pod_agentd/doc.go
index 987873b..f49220b 100644
--- a/services/agent/pod_agentd/doc.go
+++ b/services/agent/pod_agentd/doc.go
@@ -38,6 +38,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/agent/vbecome/doc.go b/services/agent/vbecome/doc.go
index 7dcdc36..c0ce02b 100644
--- a/services/agent/vbecome/doc.go
+++ b/services/agent/vbecome/doc.go
@@ -36,6 +36,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/agent/wire.vdl b/services/agent/wire.vdl
index 59b1fdc..071108e 100644
--- a/services/agent/wire.vdl
+++ b/services/agent/wire.vdl
@@ -42,7 +42,6 @@
Sign(message []byte) (security.Signature | error)
MintDischarge(forCaveat, caveatOnDischarge security.Caveat, additionalCaveatsOnDischarge []security.Caveat) (security.WireDischarge | error)
PublicKey() ([]byte | error)
- BlessingsInfo(blessings security.WireBlessings) (map[string][]security.Caveat | error)
BlessingStoreSet(blessings security.WireBlessings, forPeers security.BlessingPattern) (security.WireBlessings | error)
BlessingStoreForPeer(peerBlessings []string) (security.WireBlessings | error)
diff --git a/services/agent/wire.vdl.go b/services/agent/wire.vdl.go
index 77b790c..d6eb15e 100644
--- a/services/agent/wire.vdl.go
+++ b/services/agent/wire.vdl.go
@@ -131,7 +131,6 @@
Sign(ctx *context.T, message []byte, opts ...rpc.CallOpt) (security.Signature, error)
MintDischarge(ctx *context.T, forCaveat security.Caveat, caveatOnDischarge security.Caveat, additionalCaveatsOnDischarge []security.Caveat, opts ...rpc.CallOpt) (security.Discharge, error)
PublicKey(*context.T, ...rpc.CallOpt) ([]byte, error)
- BlessingsInfo(ctx *context.T, blessings security.Blessings, opts ...rpc.CallOpt) (map[string][]security.Caveat, error)
BlessingStoreSet(ctx *context.T, blessings security.Blessings, forPeers security.BlessingPattern, opts ...rpc.CallOpt) (security.Blessings, error)
BlessingStoreForPeer(ctx *context.T, peerBlessings []string, opts ...rpc.CallOpt) (security.Blessings, error)
BlessingStoreSetDefault(ctx *context.T, blessings security.Blessings, opts ...rpc.CallOpt) error
@@ -192,11 +191,6 @@
return
}
-func (c implAgentClientStub) BlessingsInfo(ctx *context.T, i0 security.Blessings, opts ...rpc.CallOpt) (o0 map[string][]security.Caveat, err error) {
- err = v23.GetClient(ctx).Call(ctx, c.name, "BlessingsInfo", []interface{}{i0}, []interface{}{&o0}, opts...)
- return
-}
-
func (c implAgentClientStub) BlessingStoreSet(ctx *context.T, i0 security.Blessings, i1 security.BlessingPattern, opts ...rpc.CallOpt) (o0 security.Blessings, err error) {
err = v23.GetClient(ctx).Call(ctx, c.name, "BlessingStoreSet", []interface{}{i0, i1}, []interface{}{&o0}, opts...)
return
@@ -347,7 +341,6 @@
Sign(ctx *context.T, call rpc.ServerCall, message []byte) (security.Signature, error)
MintDischarge(ctx *context.T, call rpc.ServerCall, forCaveat security.Caveat, caveatOnDischarge security.Caveat, additionalCaveatsOnDischarge []security.Caveat) (security.Discharge, error)
PublicKey(*context.T, rpc.ServerCall) ([]byte, error)
- BlessingsInfo(ctx *context.T, call rpc.ServerCall, blessings security.Blessings) (map[string][]security.Caveat, error)
BlessingStoreSet(ctx *context.T, call rpc.ServerCall, blessings security.Blessings, forPeers security.BlessingPattern) (security.Blessings, error)
BlessingStoreForPeer(ctx *context.T, call rpc.ServerCall, peerBlessings []string) (security.Blessings, error)
BlessingStoreSetDefault(ctx *context.T, call rpc.ServerCall, blessings security.Blessings) error
@@ -378,7 +371,6 @@
Sign(ctx *context.T, call rpc.ServerCall, message []byte) (security.Signature, error)
MintDischarge(ctx *context.T, call rpc.ServerCall, forCaveat security.Caveat, caveatOnDischarge security.Caveat, additionalCaveatsOnDischarge []security.Caveat) (security.Discharge, error)
PublicKey(*context.T, rpc.ServerCall) ([]byte, error)
- BlessingsInfo(ctx *context.T, call rpc.ServerCall, blessings security.Blessings) (map[string][]security.Caveat, error)
BlessingStoreSet(ctx *context.T, call rpc.ServerCall, blessings security.Blessings, forPeers security.BlessingPattern) (security.Blessings, error)
BlessingStoreForPeer(ctx *context.T, call rpc.ServerCall, peerBlessings []string) (security.Blessings, error)
BlessingStoreSetDefault(ctx *context.T, call rpc.ServerCall, blessings security.Blessings) error
@@ -448,10 +440,6 @@
return s.impl.PublicKey(ctx, call)
}
-func (s implAgentServerStub) BlessingsInfo(ctx *context.T, call rpc.ServerCall, i0 security.Blessings) (map[string][]security.Caveat, error) {
- return s.impl.BlessingsInfo(ctx, call, i0)
-}
-
func (s implAgentServerStub) BlessingStoreSet(ctx *context.T, call rpc.ServerCall, i0 security.Blessings, i1 security.BlessingPattern) (security.Blessings, error) {
return s.impl.BlessingStoreSet(ctx, call, i0, i1)
}
@@ -574,15 +562,6 @@
},
},
{
- Name: "BlessingsInfo",
- InArgs: []rpc.ArgDesc{
- {"blessings", ``}, // security.Blessings
- },
- OutArgs: []rpc.ArgDesc{
- {"", ``}, // map[string][]security.Caveat
- },
- },
- {
Name: "BlessingStoreSet",
InArgs: []rpc.ArgDesc{
{"blessings", ``}, // security.Blessings
diff --git a/services/application/application/doc.go b/services/application/application/doc.go
index e90bb42..4f17c66 100644
--- a/services/application/application/doc.go
+++ b/services/application/application/doc.go
@@ -35,6 +35,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/application/applicationd/doc.go b/services/application/applicationd/doc.go
index 740e023..6fa63e3 100644
--- a/services/application/applicationd/doc.go
+++ b/services/application/applicationd/doc.go
@@ -33,6 +33,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/binary/binary/doc.go b/services/binary/binary/doc.go
index 3ad2675..763135b 100644
--- a/services/binary/binary/doc.go
+++ b/services/binary/binary/doc.go
@@ -33,6 +33,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/binary/binaryd/doc.go b/services/binary/binaryd/doc.go
index e3c8fa3..be2892c 100644
--- a/services/binary/binaryd/doc.go
+++ b/services/binary/binaryd/doc.go
@@ -35,6 +35,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/binary/tidy/doc.go b/services/binary/tidy/doc.go
index 7f67a37..01d07be 100644
--- a/services/binary/tidy/doc.go
+++ b/services/binary/tidy/doc.go
@@ -31,6 +31,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/build/build/doc.go b/services/build/build/doc.go
index 2699b49..6e621c8 100644
--- a/services/build/build/doc.go
+++ b/services/build/build/doc.go
@@ -30,6 +30,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/build/buildd/doc.go b/services/build/buildd/doc.go
index 2a5df3a..8cae0c7 100644
--- a/services/build/buildd/doc.go
+++ b/services/build/buildd/doc.go
@@ -36,6 +36,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/cluster/cluster_agent/doc.go b/services/cluster/cluster_agent/doc.go
index 0ab4629..5b20720 100644
--- a/services/cluster/cluster_agent/doc.go
+++ b/services/cluster/cluster_agent/doc.go
@@ -37,6 +37,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/cluster/cluster_agentd/cluster_agentd_v23_test.go b/services/cluster/cluster_agentd/cluster_agentd_v23_test.go
index 7602b47..db9376f 100644
--- a/services/cluster/cluster_agentd/cluster_agentd_v23_test.go
+++ b/services/cluster/cluster_agentd/cluster_agentd_v23_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:generate v23 test generate
+//go:generate jiri test generate
package main_test
diff --git a/services/cluster/cluster_agentd/doc.go b/services/cluster/cluster_agentd/doc.go
index bb9b0ba..fbc2877 100644
--- a/services/cluster/cluster_agentd/doc.go
+++ b/services/cluster/cluster_agentd/doc.go
@@ -34,6 +34,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/cluster/vkube/cluster-agent.go b/services/cluster/vkube/cluster-agent.go
new file mode 100644
index 0000000..20f267d
--- /dev/null
+++ b/services/cluster/vkube/cluster-agent.go
@@ -0,0 +1,220 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "v.io/v23/context"
+ "v.io/v23/security"
+ "v.io/v23/services/device"
+)
+
+const (
+ clusterAgentServiceName = "cluster-agent"
+ clusterAgentServicePort = 8193
+ clusterAgentApplicationName = "cluster-agentd"
+)
+
+// createClusterAgent creates a ReplicationController and a Service to run the
+// cluster agent.
+func createClusterAgent(ctx *context.T, config *vkubeConfig) error {
+ if err := createNamespaceIfNotExist(config.ClusterAgent.Namespace); err != nil {
+ return err
+ }
+ version := "latest"
+ if p := strings.Split(config.ClusterAgent.Image, ":"); len(p) == 2 {
+ version = p[1]
+ }
+ ca := object{
+ "apiVersion": "v1",
+ "kind": "ReplicationController",
+ "metadata": object{
+ "name": clusterAgentApplicationName + "-" + version,
+ "labels": object{
+ "application": clusterAgentApplicationName,
+ },
+ "namespace": config.ClusterAgent.Namespace,
+ },
+ "spec": object{
+ "replicas": 1,
+ "template": object{
+ "metadata": object{
+ "labels": object{
+ "application": clusterAgentApplicationName,
+ "deployment": version,
+ },
+ },
+ "spec": object{
+ "containers": []object{
+ object{
+ "name": "cluster-agentd",
+ "image": config.ClusterAgent.Image,
+ "ports": []object{
+ object{
+ "containerPort": clusterAgentServicePort,
+ },
+ },
+ "resources": object{
+ "limits": object{
+ "cpu": config.ClusterAgent.CPU,
+ "memory": config.ClusterAgent.Memory,
+ },
+ },
+ "volumeMounts": []object{
+ object{
+ "name": "data",
+ "mountPath": "/data",
+ },
+ object{
+ "name": "logs",
+ "mountPath": "/logs",
+ },
+ },
+ "env": []object{
+ object{
+ "name": "ROOT_BLESSINGS",
+ "value": rootBlessings(ctx),
+ },
+ object{
+ "name": "CLAIMER",
+ "value": clusterAgentClaimer(config),
+ },
+ object{
+ "name": "ADMIN",
+ "value": config.ClusterAgent.Admin,
+ },
+ object{
+ "name": "DATADIR",
+ "value": "/data",
+ },
+ object{
+ "name": "LOGDIR",
+ "value": "/logs",
+ },
+ },
+ },
+ },
+ "volumes": []interface{}{
+ object{
+ "name": "logs",
+ "emptyDir": object{},
+ },
+ },
+ },
+ },
+ },
+ }
+ if config.ClusterAgent.PersistentDisk == "" {
+ ca.append("spec.template.spec.volumes", object{
+ "name": "data",
+ "emptyDir": object{},
+ })
+ } else {
+ ca.append("spec.template.spec.volumes", object{
+ "name": "data",
+ "gcePersistentDisk": object{
+ "pdName": config.ClusterAgent.PersistentDisk,
+ "fsType": "ext4",
+ },
+ })
+ }
+
+ if out, err := kubectlCreate(ca); err != nil {
+ return fmt.Errorf("failed to create replication controller: %v\n%s\n", err, string(out))
+ }
+
+ svc := object{
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": object{
+ "name": clusterAgentServiceName,
+ "namespace": config.ClusterAgent.Namespace,
+ },
+ "spec": object{
+ "ports": []object{
+ object{
+ "port": clusterAgentServicePort,
+ "targetPort": clusterAgentServicePort,
+ },
+ },
+ "selector": object{
+ "application": clusterAgentApplicationName,
+ },
+ "type": "LoadBalancer",
+ },
+ }
+ if config.ClusterAgent.ExternalIP != "" {
+ svc.set("spec.loadBalancerIP", config.ClusterAgent.ExternalIP)
+ }
+ if out, err := kubectlCreate(svc); err != nil {
+ return fmt.Errorf("failed to create service: %v\n%s\n", err, string(out))
+ }
+ return nil
+}
+
+// stopClusterAgent stops the cluster agent ReplicationController and deletes
+// its Service.
+func stopClusterAgent(config *vkubeConfig) error {
+ if out, err := kubectl("--namespace="+config.ClusterAgent.Namespace, "stop", "rc", "-l", "application="+clusterAgentApplicationName); err != nil {
+ return fmt.Errorf("failed to stop %s: %v: %s", clusterAgentApplicationName, err, out)
+ }
+ if out, err := kubectl("--namespace="+config.ClusterAgent.Namespace, "delete", "service", clusterAgentServiceName); err != nil {
+ return fmt.Errorf("failed to delete %s: %v: %s", clusterAgentServiceName, err, out)
+ }
+ return nil
+}
+
+// clusterAgentClaimer returns the blessing name of the claimer of the cluster
+// agent.
+func clusterAgentClaimer(config *vkubeConfig) string {
+ p := strings.Split(config.ClusterAgent.Blessing, security.ChainSeparator)
+ return strings.Join(p[:len(p)-1], security.ChainSeparator)
+}
+
+// findClusterAgent returns the external address of the cluster agent.
+func findClusterAgent(config *vkubeConfig, includeBlessings bool) (string, error) {
+ out, err := kubectl("--namespace="+config.ClusterAgent.Namespace, "get", "service", clusterAgentServiceName, "-o", "json")
+ if err != nil {
+ return "", fmt.Errorf("failed to get info of %s: %v: %s", clusterAgentServiceName, err, out)
+ }
+ var svc object
+ if err := svc.importJSON(out); err != nil {
+ return "", fmt.Errorf("failed to parse kubectl output: %v", err)
+ }
+ ports := svc.getObjectArray("spec.ports")
+ if len(ports) == 0 {
+ return "", fmt.Errorf("service %q has no ports", clusterAgentServiceName)
+ }
+ port := ports[0].getInt("port")
+ if port < 0 {
+ return "", fmt.Errorf("service %q has no valid port: %v", clusterAgentServiceName, port)
+ }
+ ingress := svc.getObjectArray("status.loadBalancer.ingress")
+ if len(ingress) == 0 {
+ return "", fmt.Errorf("service %q has no loadbalancer ingress", clusterAgentServiceName)
+ }
+ ip := ingress[0].getString("ip")
+ if ip == "" {
+ return "", fmt.Errorf("service %q loadbalancer has no valid ip", clusterAgentServiceName)
+ }
+ if includeBlessings {
+ return fmt.Sprintf("/(%s)@%s:%d", config.ClusterAgent.Blessing, ip, port), nil
+ }
+ return fmt.Sprintf("/%s:%d", ip, port), nil
+}
+
+// claimClusterAgent claims the cluster agent with the given blessing extension.
+func claimClusterAgent(ctx *context.T, config *vkubeConfig, extension string) error {
+ addr, err := findClusterAgent(config, false)
+ if err != nil {
+ return err
+ }
+ if err := device.ClaimableClient(addr).Claim(ctx, "", &granter{extension: extension}); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/services/cluster/vkube/config.go b/services/cluster/vkube/config.go
new file mode 100644
index 0000000..a7c2c55
--- /dev/null
+++ b/services/cluster/vkube/config.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+)
+
+// The config file used by the vkube command.
+type vkubeConfig struct {
+ // The GCE project name.
+ Project string `json:"project"`
+ // The GCE zone.
+ Zone string `json:"zone"`
+ // The name of the Kubernetes cluster.
+ Cluster string `json:"cluster"`
+
+ ClusterAgent clusterAgentConfig `json:"clusterAgent"`
+ PodAgent podAgentConfig `json:"podAgent"`
+}
+
+type clusterAgentConfig struct {
+ // The Kubernetes namespace of the cluster agent. An empty
+ // value is equivalent to "default".
+ Namespace string `json:"namespace"`
+ // The name of the docker image for the cluster agent.
+ Image string `json:"image"`
+ // The amount of CPU to reserve for the cluster agent.
+ CPU string `json:"cpu"`
+ // The amount of memory to reserve for the cluster agent.
+ Memory string `json:"memory"`
+ // The blessing name of the cluster agent.
+ Blessing string `json:"blessing"`
+ // The blessing pattern of the cluster agent admin, i.e. who's
+ // allowed to create and delete secrets.
+ Admin string `json:"admin"`
+ // The external IP address of the cluster agent. An empty value
+ // means that an ephemeral address will be used.
+ // TODO(rthellend): This doesn't currently work.
+ // https://github.com/kubernetes/kubernetes/issues/10323
+ // https://github.com/kubernetes/kubernetes/pull/13005
+ ExternalIP string `json:"externalIP"`
+ // The name of the Persistent Disk of the cluster agent. An
+ // value means that the cluster agent won't use a persistent
+ // disk.
+ PersistentDisk string `json:"persistentDisk"`
+}
+
+type podAgentConfig struct {
+ // The name of the docker image for the pod agent.
+ Image string `json:"image"`
+}
+
+// readConfig reads a config file.
+func readConfig(fileName string) (*vkubeConfig, error) {
+ data, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return nil, err
+ }
+ var config vkubeConfig
+ if err := json.Unmarshal(data, &config); err != nil {
+ return nil, fmt.Errorf("json.Unmarshal: %v", err)
+ }
+ if config.ClusterAgent.Namespace == "" {
+ config.ClusterAgent.Namespace = "default"
+ }
+ return &config, nil
+}
diff --git a/services/cluster/vkube/doc.go b/services/cluster/vkube/doc.go
new file mode 100644
index 0000000..24bbfaf
--- /dev/null
+++ b/services/cluster/vkube/doc.go
@@ -0,0 +1,179 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+
+/*
+Manages Vanadium applications on kubernetes
+
+Usage:
+ vkube [flags] <command>
+
+The vkube commands are:
+ get-credentials Gets the kubernetes credentials from Google Cloud.
+ start Starts an application.
+ update Updates an application.
+ stop Stops an application.
+ start-cluster-agent Starts the cluster agent.
+ stop-cluster-agent Stops the cluster agent.
+ claim-cluster-agent Claims the cluster agent.
+ build-docker-images Builds the docker images for the cluster and pod agents.
+ help Display help for commands or topics
+
+The vkube flags are:
+ -config=vkube.cfg
+ The 'vkube.cfg' file to use.
+ -gcloud=gcloud
+ The 'gcloud' binary to use.
+ -kubectl=kubectl
+ The 'kubectl' binary to use.
+
+The global flags are:
+ -alsologtostderr=true
+ log to standard error as well as files
+ -log_backtrace_at=:0
+ when logging hits line file:N, emit a stack trace
+ -log_dir=
+ if non-empty, write log files to this directory
+ -logtostderr=false
+ log to standard error instead of files
+ -max_stack_buf_size=4292608
+ max size in bytes of the buffer to use for logging stack traces
+ -metadata=<just specify -metadata to activate>
+ Displays metadata for the program and exits.
+ -stderrthreshold=2
+ logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
+ -v=0
+ log level for V logs
+ -v23.credentials=
+ directory to use for storing security credentials
+ -v23.i18n-catalogue=
+ 18n catalogue files to load, comma separated
+ -v23.namespace.root=[/(dev.v.io/role/vprod/service/mounttabled)@ns.dev.v.io:8101]
+ local namespace root; can be repeated to provided multiple roots
+ -v23.proxy=
+ object name of proxy service to use to export services across network
+ boundaries
+ -v23.tcp.address=
+ address to listen on
+ -v23.tcp.protocol=wsh
+ protocol to listen with
+ -v23.vtrace.cache-size=1024
+ The number of vtrace traces to store in memory.
+ -v23.vtrace.collect-regexp=
+ Spans and annotations that match this regular expression will trigger trace
+ collection.
+ -v23.vtrace.dump-on-shutdown=true
+ If true, dump all stored traces on runtime shutdown.
+ -v23.vtrace.sample-rate=0
+ Rate (from 0.0 to 1.0) to sample vtrace traces.
+ -vmodule=
+ comma-separated list of pattern=N settings for filename-filtered logging
+ -vpath=
+ comma-separated list of pattern=N settings for file pathname-filtered logging
+
+Vkube get-credentials
+
+Gets the kubernetes credentials from Google Cloud.
+
+Usage:
+ vkube get-credentials
+
+Vkube start
+
+Starts an application.
+
+Usage:
+ vkube start [flags] <extension>
+
+<extension> The blessing name extension to give to the application.
+
+The vkube start flags are:
+ -f=
+ Filename to use to create the kubernetes resource.
+
+Vkube update
+
+Updates an application to a new version with a rolling update, preserving the
+existing blessings.
+
+Usage:
+ vkube update [flags]
+
+The vkube update flags are:
+ -f=
+ Filename to use to update the kubernetes resource.
+
+Vkube stop
+
+Stops an application.
+
+Usage:
+ vkube stop [flags]
+
+The vkube stop flags are:
+ -f=
+ Filename to use to stop the kubernetes resource.
+
+Vkube start-cluster-agent
+
+Starts the cluster agent.
+
+Usage:
+ vkube start-cluster-agent
+
+Vkube stop-cluster-agent
+
+Stops the cluster agent.
+
+Usage:
+ vkube stop-cluster-agent
+
+Vkube claim-cluster-agent
+
+Claims the cluster agent.
+
+Usage:
+ vkube claim-cluster-agent
+
+Vkube build-docker-images
+
+Builds the docker images for the cluster and pod agents.
+
+Usage:
+ vkube build-docker-images [flags]
+
+The vkube build-docker-images flags are:
+ -v=false
+ When true, the output is more verbose.
+
+Vkube help - Display help for commands or topics
+
+Help with no args displays the usage of the parent command.
+
+Help with args displays the usage of the specified sub-command or help topic.
+
+"help ..." recursively displays help for all commands and topics.
+
+Usage:
+ vkube help [flags] [command/topic ...]
+
+[command/topic ...] optionally identifies a specific sub-command or help topic.
+
+The vkube help flags are:
+ -style=compact
+ The formatting style for help output:
+ compact - Good for compact cmdline output.
+ full - Good for cmdline output, shows all global flags.
+ godoc - Good for godoc processing.
+ Override the default by setting the CMDLINE_STYLE environment variable.
+ -width=<terminal width>
+ Format output to this target width in runes, or unlimited if width < 0.
+ Defaults to the terminal width if available. Override the default by setting
+ the CMDLINE_WIDTH environment variable.
+*/
+package main
diff --git a/services/cluster/vkube/docker.go b/services/cluster/vkube/docker.go
new file mode 100644
index 0000000..4eb223b
--- /dev/null
+++ b/services/cluster/vkube/docker.go
@@ -0,0 +1,161 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+const (
+ clusterAgentDockerfile = `
+FROM debian:stable
+
+# gcloud
+RUN apt-get update && apt-get install -y -qq --no-install-recommends wget unzip python php5-mysql php5-cli php5-cgi openjdk-7-jre-headless openssh-client python-openssl && apt-get clean
+RUN wget https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && unzip google-cloud-sdk.zip && rm google-cloud-sdk.zip
+ENV CLOUDSDK_PYTHON_SITEPACKAGES 1
+ENV HOME /root
+RUN google-cloud-sdk/install.sh --usage-reporting=false --path-update=true --bash-completion=true --rc-path=/root/.bashrc --disable-installation-options && \
+ google-cloud-sdk/bin/gcloud --quiet components update preview alpha beta app kubectl && \
+ google-cloud-sdk/bin/gcloud --quiet config set component_manager/disable_update_check true
+ENV PATH /google-cloud-sdk/bin:$PATH
+
+# vanadium
+#RUN apt-get install --no-install-recommends -y -q libssl1.0.0
+ADD claimable cluster_agent cluster_agentd init.sh /usr/local/bin/
+RUN chmod 755 /usr/local/bin/*
+
+EXPOSE 8193
+CMD ["/usr/local/bin/init.sh"]
+`
+ clusterAgentInitSh = `#!/bin/sh
+if [ ! -e "${DATADIR}/perms" ]; then
+ # Not claimed
+ /usr/local/bin/claimable \
+ --v23.credentials="${DATADIR}/creds" \
+ --v23.tcp.address=:8193 \
+ --root-blessings="${ROOT_BLESSINGS}" \
+ --perms-dir="${DATADIR}/perms" \
+ --v23.permissions.literal="{\"Admin\":{\"In\":[\"${CLAIMER}\"]}}" \
+ --log_dir="${LOGDIR}" \
+ --alsologtostderr=false
+fi
+
+mkdir -p "${DATADIR}/blessings"
+
+exec /usr/local/bin/cluster_agentd \
+ --v23.credentials="${DATADIR}/creds" \
+ --v23.tcp.address=:8193 \
+ --v23.permissions.literal="{\"Admin\":{\"In\":[\"${ADMIN}\"]}}" \
+ --log_dir="${LOGDIR}" \
+ --root-dir="${DATADIR}/blessings" \
+ --alsologtostderr=false
+`
+
+ podAgentDockerfile = `
+FROM debian:stable
+RUN apt-get update && apt-get install --no-install-recommends -y -q libssl1.0.0
+ADD pod_agentd /usr/local/bin/
+RUN chmod 755 /usr/local/bin/pod_agentd
+`
+)
+
+type dockerFile struct {
+ name string
+ content []byte
+}
+
+type dockerCmd struct {
+ name string
+ args []string
+}
+
+func buildDockerImages(config *vkubeConfig, verbose bool, stdout io.Writer) error {
+ ts := time.Now().Format("20060102150405")
+ // Cluster agent image.
+ imageName := removeTag(config.ClusterAgent.Image)
+ imageNameTag := fmt.Sprintf("%s:%s", imageName, ts)
+
+ var out io.Writer
+ if verbose {
+ out = stdout
+ }
+
+ if err := buildDockerImage([]dockerFile{
+ {"Dockerfile", []byte(clusterAgentDockerfile)},
+ {"init.sh", []byte(clusterAgentInitSh)},
+ }, []dockerCmd{
+ {"jiri", []string{"go", "build", "-o", "claimable", "v.io/x/ref/services/device/claimable"}},
+ {"jiri", []string{"go", "build", "-o", "cluster_agent", "v.io/x/ref/services/cluster/cluster_agent"}},
+ {"jiri", []string{"go", "build", "-o", "cluster_agentd", "v.io/x/ref/services/cluster/cluster_agentd"}},
+ {"docker", []string{"build", "-t", imageName, "."}},
+ {"docker", []string{"tag", imageName, imageNameTag}},
+ {flagGcloudBin, []string{"--project=" + config.Project, "docker", "push", imageName}},
+ }, out); err != nil {
+ return err
+ }
+ fmt.Fprintf(stdout, "Pushed %s successfully.\n", imageNameTag)
+
+ // Pod agent image.
+ imageName = removeTag(config.PodAgent.Image)
+ imageNameTag = fmt.Sprintf("%s:%s", imageName, ts)
+
+ if err := buildDockerImage([]dockerFile{
+ {"Dockerfile", []byte(podAgentDockerfile)},
+ }, []dockerCmd{
+ {"jiri", []string{"go", "build", "-o", "pod_agentd", "v.io/x/ref/services/agent/pod_agentd"}},
+ {"docker", []string{"build", "-t", imageName, "."}},
+ {"docker", []string{"tag", imageName, imageNameTag}},
+ {flagGcloudBin, []string{"--project=" + config.Project, "docker", "push", imageName}},
+ }, out); err != nil {
+ return err
+ }
+ fmt.Fprintf(stdout, "Pushed %s successfully.\n", imageNameTag)
+ return nil
+}
+
+func removeTag(name string) string {
+ if p := strings.Split(name, ":"); len(p) > 0 {
+ return p[0]
+ }
+ return ""
+}
+
+func buildDockerImage(files []dockerFile, cmds []dockerCmd, stdout io.Writer) error {
+ workDir, err := ioutil.TempDir("", "docker-build-")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(workDir)
+
+ for _, f := range files {
+ if stdout != nil {
+ fmt.Fprintf(stdout, "#### Writing %q\n", f.name)
+ }
+ if err := ioutil.WriteFile(filepath.Join(workDir, f.name), f.content, 0600); err != nil {
+ return fmt.Errorf("failed to write %q: %v", f.name, err)
+ }
+ }
+ for _, c := range cmds {
+ if stdout != nil {
+ fmt.Fprintf(stdout, "#### Running %s %s\n", c.name, strings.Join(c.args, " "))
+ }
+ cmd := exec.Command(c.name, c.args...)
+ cmd.Dir = workDir
+ cmd.Stdout = stdout
+ cmd.Stderr = stdout
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("%v failed: %v", c, err)
+ }
+ }
+ return nil
+}
diff --git a/services/cluster/vkube/main.go b/services/cluster/vkube/main.go
new file mode 100644
index 0000000..4360be1
--- /dev/null
+++ b/services/cluster/vkube/main.go
@@ -0,0 +1,291 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The following enables go generate to generate the doc.go file.
+//go:generate go run $JIRI_ROOT/release/go/src/v.io/x/lib/cmdline/testdata/gendoc.go .
+
+package main
+
+import (
+ "fmt"
+ "strings"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/x/lib/cmdline"
+ "v.io/x/ref/lib/v23cmd"
+ _ "v.io/x/ref/runtime/factories/generic"
+)
+
+var (
+ flagConfigFile string
+ flagKubectlBin string
+ flagGcloudBin string
+ flagResourceFile string
+ flagVerbose bool
+)
+
+func main() {
+ cmdline.HideGlobalFlagsExcept()
+
+ cmd := &cmdline.Command{
+ Name: "vkube",
+ Short: "Manages Vanadium applications on kubernetes",
+ Long: "Manages Vanadium applications on kubernetes",
+ Children: []*cmdline.Command{
+ cmdGetCredentials,
+ cmdStart,
+ cmdUpdate,
+ cmdStop,
+ cmdStartClusterAgent,
+ cmdStopClusterAgent,
+ cmdClaimClusterAgent,
+ cmdBuildDockerImages,
+ },
+ }
+ cmd.Flags.StringVar(&flagConfigFile, "config", "vkube.cfg", "The 'vkube.cfg' file to use.")
+ cmd.Flags.StringVar(&flagKubectlBin, "kubectl", "kubectl", "The 'kubectl' binary to use.")
+ cmd.Flags.StringVar(&flagGcloudBin, "gcloud", "gcloud", "The 'gcloud' binary to use.")
+
+ cmdStart.Flags.StringVar(&flagResourceFile, "f", "", "Filename to use to create the kubernetes resource.")
+
+ cmdUpdate.Flags.StringVar(&flagResourceFile, "f", "", "Filename to use to update the kubernetes resource.")
+
+ cmdStop.Flags.StringVar(&flagResourceFile, "f", "", "Filename to use to stop the kubernetes resource.")
+
+ cmdBuildDockerImages.Flags.BoolVar(&flagVerbose, "v", false, "When true, the output is more verbose.")
+
+ cmdline.Main(cmd)
+}
+
+var cmdGetCredentials = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdGetCredentials),
+ Name: "get-credentials",
+ Short: "Gets the kubernetes credentials from Google Cloud.",
+ Long: "Gets the kubernetes credentials from Google Cloud.",
+}
+
+func runCmdGetCredentials(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ if config.Cluster == "" {
+ return fmt.Errorf("Cluster must be set.")
+ }
+ if config.Project == "" {
+ return fmt.Errorf("Project must be set.")
+ }
+ if config.Zone == "" {
+ return fmt.Errorf("Zone must be set.")
+ }
+ return getCredentials(config.Cluster, config.Project, config.Zone)
+}
+
+var cmdStart = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdStart),
+ Name: "start",
+ Short: "Starts an application.",
+ Long: "Starts an application.",
+ ArgsName: "<extension>",
+ ArgsLong: "<extension> The blessing name extension to give to the application.",
+}
+
+func runCmdStart(ctx *context.T, env *cmdline.Env, args []string) error {
+ if expected, got := 1, len(args); expected != got {
+ return env.UsageErrorf("start: incorrect number of arguments, expected %d, got %d", expected, got)
+ }
+ extension := args[0]
+
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ if flagResourceFile == "" {
+ return fmt.Errorf("-f must be specified.")
+ }
+ rc, err := readReplicationControllerConfig(flagResourceFile)
+ if err != nil {
+ return err
+ }
+ for _, v := range []string{"spec.template.metadata.labels.application", "spec.template.metadata.labels.deployment"} {
+ if rc.getString(v) == "" {
+ fmt.Fprintf(env.Stderr, "WARNING: %q is not set. Rolling updates will not work.\n", v)
+ }
+ }
+ agentAddr, err := findClusterAgent(config, true)
+ if err != nil {
+ return err
+ }
+ secretName, err := makeSecretName()
+ if err != nil {
+ return err
+ }
+ namespace := rc.getString("metadata.namespace")
+ appName := rc.getString("spec.template.metadata.labels.application")
+ if n, err := findReplicationControllerNameForApp(appName, namespace); err == nil {
+ return fmt.Errorf("replication controller for application=%q already running: %s", appName, n)
+ }
+ if err := createSecret(ctx, secretName, namespace, agentAddr, extension); err != nil {
+ return err
+ }
+ fmt.Fprintln(env.Stdout, "Created Secret successfully.")
+
+ if err := createReplicationController(ctx, config, rc, secretName); err != nil {
+ if err := deleteSecret(ctx, config, secretName, namespace); err != nil {
+ ctx.Error(err)
+ }
+ return err
+ }
+ fmt.Fprintln(env.Stdout, "Created replication controller successfully.")
+ return nil
+}
+
+var cmdUpdate = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdUpdate),
+ Name: "update",
+ Short: "Updates an application.",
+ Long: "Updates an application to a new version with a rolling update, preserving the existing blessings.",
+}
+
+func runCmdUpdate(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ if flagResourceFile == "" {
+ return fmt.Errorf("-f must be specified.")
+ }
+ rc, err := readReplicationControllerConfig(flagResourceFile)
+ if err != nil {
+ return err
+ }
+ if err := updateReplicationController(ctx, config, rc); err != nil {
+ return err
+ }
+ fmt.Fprintln(env.Stdout, "Updated replication controller successfully.")
+ return nil
+}
+
+var cmdStop = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdStop),
+ Name: "stop",
+ Short: "Stops an application.",
+ Long: "Stops an application.",
+}
+
+func runCmdStop(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ if flagResourceFile == "" {
+ return fmt.Errorf("-f must be specified.")
+ }
+ rc, err := readReplicationControllerConfig(flagResourceFile)
+ if err != nil {
+ return err
+ }
+ name := rc.getString("metadata.name")
+ if name == "" {
+ return fmt.Errorf("metadata.name must be set")
+ }
+ namespace := rc.getString("metadata.namespace")
+ secretName, err := findSecretName(name, namespace)
+ if err != nil {
+ return err
+ }
+ if out, err := kubectl("--namespace="+namespace, "stop", "rc", name); err != nil {
+ return fmt.Errorf("failed to stop replication controller: %v: %s", err, out)
+ }
+ fmt.Fprintf(env.Stdout, "Stopping replication controller.\n")
+ if err := deleteSecret(ctx, config, secretName, namespace); err != nil {
+ return fmt.Errorf("failed to delete Secret: %v", err)
+ }
+ fmt.Fprintf(env.Stdout, "Deleting Secret.\n")
+ return nil
+}
+
+var cmdStartClusterAgent = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdStartClusterAgent),
+ Name: "start-cluster-agent",
+ Short: "Starts the cluster agent.",
+ Long: "Starts the cluster agent.",
+}
+
+func runCmdStartClusterAgent(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ if err := createClusterAgent(ctx, config); err != nil {
+ return err
+ }
+ fmt.Fprintf(env.Stdout, "Starting Cluster Agent.\n")
+ return nil
+}
+
+var cmdStopClusterAgent = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdStopClusterAgent),
+ Name: "stop-cluster-agent",
+ Short: "Stops the cluster agent.",
+ Long: "Stops the cluster agent.",
+}
+
+func runCmdStopClusterAgent(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ if err := stopClusterAgent(config); err != nil {
+ return err
+ }
+ fmt.Fprintf(env.Stdout, "Stopping Cluster Agent.\n")
+ return nil
+}
+
+var cmdClaimClusterAgent = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdClaimClusterAgent),
+ Name: "claim-cluster-agent",
+ Short: "Claims the cluster agent.",
+ Long: "Claims the cluster agent.",
+}
+
+func runCmdClaimClusterAgent(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ myBlessings := v23.GetPrincipal(ctx).BlessingStore().Default()
+ claimer := clusterAgentClaimer(config)
+ if !myBlessings.CouldHaveNames([]string{claimer}) {
+ return fmt.Errorf("principal isn't the expected claimer: got %q, expected %q", myBlessings, claimer)
+ }
+ extension := strings.TrimPrefix(config.ClusterAgent.Blessing, claimer+security.ChainSeparator)
+ if err := claimClusterAgent(ctx, config, extension); err != nil {
+ if verror.ErrorID(err) == verror.ErrUnknownMethod.ID {
+ return fmt.Errorf("already claimed")
+ }
+ return err
+ }
+ fmt.Fprintf(env.Stdout, "Claimed Cluster Agent successfully.\n")
+ return nil
+}
+
+var cmdBuildDockerImages = &cmdline.Command{
+ Runner: v23cmd.RunnerFunc(runCmdBuildDockerImages),
+ Name: "build-docker-images",
+ Short: "Builds the docker images for the cluster and pod agents.",
+ Long: "Builds the docker images for the cluster and pod agents.",
+}
+
+func runCmdBuildDockerImages(ctx *context.T, env *cmdline.Env, args []string) error {
+ config, err := readConfig(flagConfigFile)
+ if err != nil {
+ return err
+ }
+ return buildDockerImages(config, flagVerbose, env.Stdout)
+}
diff --git a/services/cluster/vkube/object.go b/services/cluster/vkube/object.go
new file mode 100644
index 0000000..4d517e0
--- /dev/null
+++ b/services/cluster/vkube/object.go
@@ -0,0 +1,149 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// object simplifies the parsing and handling of json objects that are
+// unmarshaled into an empty interface.
+type object map[string]interface{}
+
+func (o *object) importJSON(data []byte) error {
+ var decode interface{}
+ if err := json.Unmarshal(data, &decode); err != nil {
+ return err
+ }
+ c := convertToObject(decode)
+ var ok bool
+ if *o, ok = c.(object); !ok {
+ return fmt.Errorf("object is %T", c)
+ }
+ return nil
+}
+
+// convertToObject converts all occurrences of map[string]interface{} to object.
+func convertToObject(i interface{}) interface{} {
+ switch obj := i.(type) {
+ case map[string]interface{}:
+ for k, v := range obj {
+ obj[k] = convertToObject(v)
+ }
+ return object(obj)
+ case []interface{}:
+ for x, y := range obj {
+ obj[x] = convertToObject(y)
+ }
+ return obj
+ default:
+ return obj
+ }
+}
+
+func (o object) json() ([]byte, error) {
+ return json.MarshalIndent(o, "", " ")
+}
+
+// get retrieves the value of an object inside this object, e.g.:
+// if o = { "a": { "b": "c" } }, o.get("a.b") == "c".
+func (o object) get(name string) interface{} {
+ parts := strings.Split(name, ".")
+ var obj interface{} = o
+ for _, p := range parts {
+ m, ok := obj.(object)
+ if !ok {
+ return nil
+ }
+ var exists bool
+ if obj, exists = m[p]; !exists {
+ return nil
+ }
+ }
+ return obj
+}
+
+// set sets the value of an object inside this object, e.g.:
+// if o = { "a": { "b": "c" } }, o.set("a.b", "X") change "c" to "X".
+func (o object) set(name string, value interface{}) error {
+ parts := strings.Split(name, ".")
+ var obj interface{} = o
+ for {
+ m, ok := obj.(object)
+ if !ok {
+ return fmt.Errorf("%q not an object", name)
+ }
+
+ p := parts[0]
+ parts = parts[1:]
+
+ if len(parts) == 0 {
+ m[p] = value
+ break
+ }
+ if obj, ok = m[p]; !ok {
+ obj = make(object)
+ m[p] = obj
+ }
+ }
+ return nil
+}
+
+// getString retrieves a string object.
+func (c object) getString(name string) string {
+ switch s := c.get(name).(type) {
+ case string:
+ return s
+ case nil:
+ return ""
+ default:
+ return fmt.Sprintf("%v", s)
+ }
+}
+
+// getString retrieves a integer object.
+func (c object) getInt(name string) int {
+ switch v := c.get(name).(type) {
+ case int:
+ return v
+ case float64:
+ return int(v)
+ default:
+ return -1
+ }
+}
+
+// getObjectArray retrieves an array of objects.
+func (c object) getObjectArray(name string) []object {
+ s, ok := c.get(name).([]interface{})
+ if !ok {
+ return nil
+ }
+ n := make([]object, len(s))
+ for i, o := range s {
+ if x, ok := o.(object); ok {
+ n[i] = x
+ continue
+ }
+ return nil
+ }
+ return n
+}
+
+// append adds objects to an array.
+func (c object) append(name string, values ...interface{}) error {
+ obj := c.get(name)
+ if obj == nil {
+ obj = []interface{}{}
+ }
+ switch array := obj.(type) {
+ case []interface{}:
+ return c.set(name, append(array, values...))
+ default:
+ return fmt.Errorf("%q is not an array", name)
+ }
+}
diff --git a/services/cluster/vkube/object_test.go b/services/cluster/vkube/object_test.go
new file mode 100644
index 0000000..62be6ee
--- /dev/null
+++ b/services/cluster/vkube/object_test.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "testing"
+)
+
+func TestObject(t *testing.T) {
+ o := make(object)
+ o.set("foo", "bar")
+ o.set("slice", []interface{}{"a", "b", "c"})
+ o.set("obj", object{"name": "Bob"})
+ o.set("x.y.z", 5)
+ o.append("slice", "d")
+
+ out, err := o.json()
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ expected := `{
+ "foo": "bar",
+ "obj": {
+ "name": "Bob"
+ },
+ "slice": [
+ "a",
+ "b",
+ "c",
+ "d"
+ ],
+ "x": {
+ "y": {
+ "z": 5
+ }
+ }
+}`
+ if got := string(out); got != expected {
+ t.Errorf("Unexpected output. Got %q, expected %q", got, expected)
+ }
+}
+
+func TestObjectJSON(t *testing.T) {
+ json := `{
+ "foo": "bar",
+ "bar": 10,
+ "list": [ { "x":0 }, { "x":1 }, { "x":2 } ],
+ "x": { "y": [ 1, 2, 3 ] }
+ }`
+
+ o := make(object)
+ if err := o.importJSON([]byte(json)); err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if got, expected := o.getString("foo"), "bar"; got != expected {
+ t.Errorf("Unexpected value. Got %#v, expected %#v", got, expected)
+ }
+ if got, expected := o.getInt("bar"), 10; got != expected {
+ t.Errorf("Unexpected value. Got %#v, expected %#v", got, expected)
+ }
+ if got, expected := o.getString("notthere"), ""; got != expected {
+ t.Errorf("Unexpected value. Got %#v, expected %#v", got, expected)
+ }
+ if got, expected := o.getString("x.y"), "[1 2 3]"; got != expected {
+ t.Errorf("Unexpected value. Got %#v, expected %#v", got, expected)
+ }
+ o.append("x.y", 4)
+ list := o.getObjectArray("list")
+ for i, item := range list {
+ if got, expected := item.get("x"), float64(i); got != expected {
+ t.Errorf("Unexpected value for x. Got %#v, expected %#v", got, expected)
+ }
+ }
+ list = append(list, object{"x": "y"})
+ o.set("list", list)
+
+ out, err := o.json()
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ expected := `{
+ "bar": 10,
+ "foo": "bar",
+ "list": [
+ {
+ "x": 0
+ },
+ {
+ "x": 1
+ },
+ {
+ "x": 2
+ },
+ {
+ "x": "y"
+ }
+ ],
+ "x": {
+ "y": [
+ 1,
+ 2,
+ 3,
+ 4
+ ]
+ }
+}`
+ if got := string(out); got != expected {
+ t.Errorf("Unexpected output. Got %q, expected %q", got, expected)
+ }
+}
diff --git a/services/cluster/vkube/util.go b/services/cluster/vkube/util.go
new file mode 100644
index 0000000..cd69360
--- /dev/null
+++ b/services/cluster/vkube/util.go
@@ -0,0 +1,334 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os/exec"
+ "strings"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/rpc"
+ "v.io/v23/security"
+ "v.io/v23/vom"
+ "v.io/x/ref/services/cluster"
+)
+
+// getCredentials uses the gcloud command to get the credentials required to
+// access the kubernetes cluster.
+func getCredentials(cluster, project, zone string) error {
+ if out, err := exec.Command(flagGcloudBin, "config", "set", "container/cluster", cluster).CombinedOutput(); err != nil {
+ return fmt.Errorf("failed to set container/cluster: %v: %s", err, out)
+ }
+ if out, err := exec.Command(flagGcloudBin, "container", "clusters", "get-credentials", cluster, "--project", project, "--zone", zone).CombinedOutput(); err != nil {
+ return fmt.Errorf("failed to set get credentials for %q: %v: %s", cluster, err, out)
+ }
+ return nil
+}
+
+// localAgentAddress returns the address of the cluster agent to use from within
+// the cluster.
+func localAgentAddress(config *vkubeConfig) string {
+ return fmt.Sprintf("/(%s)@%s.%s:%d",
+ config.ClusterAgent.Blessing,
+ clusterAgentServiceName,
+ config.ClusterAgent.Namespace,
+ clusterAgentServicePort,
+ )
+}
+
+// readReplicationControllerConfig reads a ReplicationController config from a
+// file.
+func readReplicationControllerConfig(fileName string) (object, error) {
+ data, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return nil, err
+ }
+ var rc object
+ if err := rc.importJSON(data); err != nil {
+ return nil, err
+ }
+ if kind := rc.getString("kind"); kind != "ReplicationController" {
+ return nil, fmt.Errorf("expected kind=\"ReplicationController\", got %q", kind)
+ }
+ return rc, nil
+}
+
+// addPodAgent takes either a ReplicationController or Pod object and adds a
+// pod-agent container to it. The existing containers are updated to use the
+// pod agent.
+func addPodAgent(ctx *context.T, config *vkubeConfig, obj object, secretName string) error {
+ var base string
+ switch kind := obj.getString("kind"); kind {
+ case "ReplicationController":
+ base = "spec.template."
+ case "Pod":
+ base = ""
+ default:
+ return fmt.Errorf("expected kind=\"ReplicationController\" or \"Pod\", got %q", kind)
+ }
+
+ // Add the volumes used by the pod agent container.
+ if err := obj.append(base+"spec.volumes",
+ object{"name": "agent-logs", "emptyDir": object{}},
+ object{"name": "agent-secret", "secret": object{"secretName": secretName}},
+ object{"name": "agent-socket", "emptyDir": object{}},
+ ); err != nil {
+ return err
+ }
+
+ // Update the existing containers to talk to the pod agent.
+ containers := obj.getObjectArray(base + "spec.containers")
+ for _, c := range containers {
+ if err := c.append("env", object{"name": "V23_AGENT_PATH", "value": "/agent/socket/agent.sock"}); err != nil {
+ return err
+ }
+ if err := c.append("volumeMounts", object{"name": "agent-socket", "mountPath": "/agent/socket", "readOnly": true}); err != nil {
+ return err
+ }
+ }
+
+ // Add the pod agent container.
+ containers = append(containers, object{
+ "name": "pod-agent",
+ "image": config.PodAgent.Image,
+ "args": []string{
+ "pod_agentd",
+ "--agent=" + localAgentAddress(config),
+ "--root-blessings=" + rootBlessings(ctx),
+ "--secret-key-file=/agent/secret/secret",
+ "--socket-path=/agent/socket/agent.sock",
+ "--log_dir=/logs",
+ },
+ "volumeMounts": []object{
+ object{"name": "agent-logs", "mountPath": "/logs"},
+ object{"name": "agent-secret", "mountPath": "/agent/secret", "readOnly": true},
+ object{"name": "agent-socket", "mountPath": "/agent/socket"},
+ },
+ })
+ return obj.set(base+"spec.containers", containers)
+}
+
+// createSecret gets a new secret key from the cluster agent, and then creates a
+// Secret object on kubernetes with it.
+func createSecret(ctx *context.T, secretName, namespace, agentAddr, extension string) error {
+ secret, err := cluster.ClusterAgentAdminClient(agentAddr).NewSecret(ctx, &granter{extension: extension})
+ if err != nil {
+ return err
+ }
+ if out, err := kubectlCreate(object{
+ "apiVersion": "v1",
+ "kind": "Secret",
+ "metadata": object{
+ "name": secretName,
+ "namespace": namespace,
+ },
+ "type": "Opaque",
+ "data": object{
+ "secret": base64.StdEncoding.EncodeToString([]byte(secret)),
+ },
+ }); err != nil {
+ return fmt.Errorf("failed to create secret %q: %v\n%s\n", secretName, err, string(out))
+ }
+ return nil
+}
+
+type granter struct {
+ rpc.CallOpt
+ extension string
+}
+
+func (g *granter) Grant(ctx *context.T, call security.Call) (security.Blessings, error) {
+ p := call.LocalPrincipal()
+ return p.Bless(call.RemoteBlessings().PublicKey(), p.BlessingStore().Default(), g.extension, security.UnconstrainedUse())
+}
+
+// deleteSecret deletes a Secret object and its associated secret key and
+// blessings.
+// We know the name of the Secret object, but we don't know the secret key. The
+// only way to get it back from Kubernetes is to mount the Secret Object to a
+// Pod, and then use the secret key to delete the secret key.
+func deleteSecret(ctx *context.T, config *vkubeConfig, name, namespace string) error {
+ podName := fmt.Sprintf("delete-secret-%s", name)
+ del := object{
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": object{
+ "name": podName,
+ "namespace": namespace,
+ },
+ "spec": object{
+ "containers": []interface{}{
+ object{
+ "name": "delete-secret",
+ "image": config.ClusterAgent.Image,
+ "args": []string{
+ "/bin/bash",
+ "-c",
+ "cluster_agent --agent='" + localAgentAddress(config) + "' forget $(cat /agent/secret/secret) && /google-cloud-sdk/bin/kubectl --namespace=" + namespace + " delete secret " + name + " && /google-cloud-sdk/bin/kubectl --namespace=" + namespace + " delete pod " + podName,
+ },
+ "volumeMounts": []interface{}{
+ object{"name": "agent-secret", "mountPath": "/agent/secret", "readOnly": true},
+ },
+ },
+ },
+ "restartPolicy": "OnFailure",
+ "activeDeadlineSeconds": 300,
+ },
+ }
+ if err := addPodAgent(ctx, config, del, name); err != nil {
+ return err
+ }
+ out, err := kubectlCreate(del)
+ if err != nil {
+ return fmt.Errorf("failed to create delete Pod: %v: %s", err, out)
+ }
+ return nil
+}
+
+// createReplicationController takes a ReplicationController object, adds a
+// pod-agent, and then creates it on kubernetes.
+func createReplicationController(ctx *context.T, config *vkubeConfig, rc object, secretName string) error {
+ if err := addPodAgent(ctx, config, rc, secretName); err != nil {
+ return err
+ }
+ if out, err := kubectlCreate(rc); err != nil {
+ return fmt.Errorf("failed to create replication controller: %v\n%s\n", err, string(out))
+ }
+ return nil
+}
+
+// updateReplicationController takes a ReplicationController object, adds a
+// pod-agent, and then performs a rolling update.
+func updateReplicationController(ctx *context.T, config *vkubeConfig, rc object) error {
+ oldName, err := findReplicationControllerNameForApp(rc.getString("spec.template.metadata.labels.application"), rc.getString("metadata.namespace"))
+ if err != nil {
+ return err
+ }
+ secretName, err := findSecretName(oldName, rc.getString("metadata.namespace"))
+ if err != nil {
+ return err
+ }
+ if err := addPodAgent(ctx, config, rc, secretName); err != nil {
+ return err
+ }
+ json, err := rc.json()
+ if err != nil {
+ return err
+ }
+ cmd := exec.Command(flagKubectlBin, "rolling-update", oldName, "-f", "-")
+ cmd.Stdin = bytes.NewBuffer(json)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return fmt.Errorf("failed to update replication controller %q: %v\n%s\n", oldName, err, string(out))
+ }
+ return nil
+}
+
+// createNamespaceIfNotExist creates a Namespace object if it doesn't already exist.
+func createNamespaceIfNotExist(name string) error {
+ if _, err := kubectl("get", "namespace", name); err == nil {
+ return nil
+ }
+ if out, err := kubectlCreate(object{
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": object{
+ "name": name,
+ },
+ }); err != nil {
+ return fmt.Errorf("failed to create Namespace %q: %v: %s", name, err, out)
+ }
+ return nil
+}
+
+// makeSecretName creates a random name for a Secret Object.
+func makeSecretName() (string, error) {
+ b := make([]byte, 16)
+ if _, err := rand.Read(b); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("secret-%s", hex.EncodeToString(b)), nil
+}
+
+// findReplicationControllerNameForApp returns the name of the
+// ReplicationController that is currently used to run the given application.
+func findReplicationControllerNameForApp(app, namespace string) (string, error) {
+ data, err := kubectl("--namespace="+namespace, "get", "rc", "-l", "application="+app, "-o", "json")
+ if err != nil {
+ return "", fmt.Errorf("failed to get replication controller for application %q: %v\n%s\n", app, err, string(data))
+ }
+ var list object
+ if err := list.importJSON(data); err != nil {
+ return "", fmt.Errorf("failed to parse kubectl output: %v", err)
+ }
+ items := list.getObjectArray("items")
+ if c := len(items); c != 1 {
+ return "", fmt.Errorf("found %d replication controllers for application %q", c, app)
+ }
+ name := items[0].getString("metadata.name")
+ if name == "" {
+ return "", fmt.Errorf("missing metadata.name")
+ }
+ return name, nil
+}
+
+// findSecretName finds the name of the Secret Object associated the given
+// Replication Controller.
+func findSecretName(rcName, namespace string) (string, error) {
+ data, err := kubectl("--namespace="+namespace, "get", "rc", rcName, "-o", "json")
+ if err != nil {
+ return "", fmt.Errorf("failed to get replication controller %q: %v\n%s\n", rcName, err, string(data))
+ }
+ var rc object
+ if err := rc.importJSON(data); err != nil {
+ return "", fmt.Errorf("failed to parse kubectl output: %v", err)
+ }
+ for _, v := range rc.getObjectArray("spec.template.spec.volumes") {
+ if v.getString("name") == "agent-secret" {
+ return v.getString("secret.secretName"), nil
+ }
+ }
+ return "", fmt.Errorf("failed to find secretName in replication controller %q", rcName)
+}
+
+// kubectlCreate runs 'kubectl create -f' on the given object and returns the
+// output.
+func kubectlCreate(o object) ([]byte, error) {
+ json, err := o.json()
+ if err != nil {
+ return nil, err
+ }
+ cmd := exec.Command(flagKubectlBin, "create", "-f", "-")
+ cmd.Stdin = bytes.NewBuffer(json)
+ return cmd.CombinedOutput()
+}
+
+// kubectl runs the 'kubectl' command with the given arguments and returns the
+// output.
+func kubectl(args ...string) ([]byte, error) {
+ return exec.Command(flagKubectlBin, args...).CombinedOutput()
+}
+
+// rootBlessings returns the root blessings for the current principal.
+func rootBlessings(ctx *context.T) string {
+ p := v23.GetPrincipal(ctx)
+ b64 := []string{}
+ for _, root := range security.RootBlessings(p.BlessingStore().Default()) {
+ data, err := vom.Encode(root)
+ if err != nil {
+ ctx.Fatalf("vom.Encode failed: %v", err)
+ }
+ // We use URLEncoding to be compatible with the principal
+ // command.
+ b64 = append(b64, base64.URLEncoding.EncodeToString(data))
+ }
+ return strings.Join(b64, ",")
+}
diff --git a/services/cluster/vkube/util_test.go b/services/cluster/vkube/util_test.go
new file mode 100644
index 0000000..f43cb54
--- /dev/null
+++ b/services/cluster/vkube/util_test.go
@@ -0,0 +1,203 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "v.io/x/ref/test"
+)
+
+func TestAddPodAgent(t *testing.T) {
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
+ const (
+ myAppJSON = `{
+ "apiVersion": "v1",
+ "kind": "ReplicationController",
+ "metadata": {
+ "name": "my-app",
+ "labels": {
+ "run": "my-app"
+ }
+ },
+ "spec": {
+ "replicas": 5,
+ "template": {
+ "metadata": {
+ "labels": {
+ "run": "my-app"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "my-app",
+ "image": "registry/me/my-app:latest",
+ "ports": [
+ { "containerPort": 8193, "hostPort": 8193 }
+ ],
+ "volumeMounts": [
+ { "name": "app-logs", "mountPath": "/logs" }
+ ]
+ }
+ ],
+ "volumes": [
+ { "name": "app-logs", "emptyDir": {} }
+ ]
+ }
+ }
+ }
+}`
+
+ expected = `{
+ "apiVersion": "v1",
+ "kind": "ReplicationController",
+ "metadata": {
+ "labels": {
+ "run": "my-app"
+ },
+ "name": "my-app"
+ },
+ "spec": {
+ "replicas": 5,
+ "template": {
+ "metadata": {
+ "labels": {
+ "run": "my-app"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "V23_AGENT_PATH",
+ "value": "/agent/socket/agent.sock"
+ }
+ ],
+ "image": "registry/me/my-app:latest",
+ "name": "my-app",
+ "ports": [
+ {
+ "containerPort": 8193,
+ "hostPort": 8193
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/logs",
+ "name": "app-logs"
+ },
+ {
+ "mountPath": "/agent/socket",
+ "name": "agent-socket",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "pod_agentd",
+ "--agent=/(root/cluster-agent)@cluster-agent.test:8193",
+ "--root-blessings=ROOT-BLESSINGS",
+ "--secret-key-file=/agent/secret/secret",
+ "--socket-path=/agent/socket/agent.sock",
+ "--log_dir=/logs"
+ ],
+ "image": "",
+ "name": "pod-agent",
+ "volumeMounts": [
+ {
+ "mountPath": "/logs",
+ "name": "agent-logs"
+ },
+ {
+ "mountPath": "/agent/secret",
+ "name": "agent-secret",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/agent/socket",
+ "name": "agent-socket"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "emptyDir": {},
+ "name": "app-logs"
+ },
+ {
+ "emptyDir": {},
+ "name": "agent-logs"
+ },
+ {
+ "name": "agent-secret",
+ "secret": {
+ "secretName": "myapp-secret"
+ }
+ },
+ {
+ "emptyDir": {},
+ "name": "agent-socket"
+ }
+ ]
+ }
+ }
+ }
+}`
+ )
+
+ var myAppObj object
+ if err := myAppObj.importJSON([]byte(myAppJSON)); err != nil {
+ t.Fatalf("importJSON failed: %v", err)
+ }
+
+ config := &vkubeConfig{
+ ClusterAgent: clusterAgentConfig{
+ Blessing: "root/cluster-agent",
+ Namespace: "test",
+ },
+ }
+ if err := addPodAgent(ctx, config, myAppObj, "myapp-secret"); err != nil {
+ t.Fatalf("addPodAgent failed: %v", err)
+ }
+ outBytes, err := myAppObj.json()
+ if err != nil {
+ t.Fatalf("json failed: %v", err)
+ }
+ got := strings.Replace(string(outBytes), rootBlessings(ctx), "ROOT-BLESSINGS", 1)
+
+ if got != expected {
+ t.Errorf("unexpected output. Got %s, expected %s", got, expected)
+ diff(t, expected, got)
+ }
+}
+
+func diff(t *testing.T, expected, got string) {
+ dir, err := ioutil.TempDir("", "diff-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ expectedFile := filepath.Join(dir, "expected")
+ if err := ioutil.WriteFile(expectedFile, []byte(expected), 0644); err != nil {
+ t.Fatal(err)
+ }
+ gotFile := filepath.Join(dir, "got")
+ if err := ioutil.WriteFile(gotFile, []byte(got), 0644); err != nil {
+ t.Fatal(err)
+ }
+ out, _ := exec.Command("diff", "-u", expectedFile, gotFile).CombinedOutput()
+ t.Log(string(out))
+}
diff --git a/services/debug/debug/doc.go b/services/debug/debug/doc.go
index 9d73f54..79b1167 100644
--- a/services/debug/debug/doc.go
+++ b/services/debug/debug/doc.go
@@ -34,6 +34,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
@@ -193,7 +195,7 @@
$ debug pprof run a/b/c heap --text $ debug pprof run a/b/c profile -gv
The debug pprof run flags are:
- -pprofcmd=v23 go tool pprof
+ -pprofcmd=jiri go tool pprof
The pprof command to use.
Debug pprof proxy
diff --git a/services/debug/debug/impl.go b/services/debug/debug/impl.go
index 846fbed..a163448 100644
--- a/services/debug/debug/impl.go
+++ b/services/debug/debug/impl.go
@@ -71,7 +71,7 @@
cmdStatsWatch.Flags.BoolVar(&showType, "type", false, "When true, the type of the values will be displayed.")
// pprof flags
- cmdPProfRun.Flags.StringVar(&pprofCmd, "pprofcmd", "v23 go tool pprof", "The pprof command to use.")
+ cmdPProfRun.Flags.StringVar(&pprofCmd, "pprofcmd", "jiri go tool pprof", "The pprof command to use.")
}
var cmdVtrace = &cmdline.Command{
diff --git a/services/device/claimable/doc.go b/services/device/claimable/doc.go
index f3e1183..f0d7a37 100644
--- a/services/device/claimable/doc.go
+++ b/services/device/claimable/doc.go
@@ -37,6 +37,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/device/device/doc.go b/services/device/device/doc.go
index f5e85af..a14531b 100644
--- a/services/device/device/doc.go
+++ b/services/device/device/doc.go
@@ -53,6 +53,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/device/deviced/doc.go b/services/device/deviced/doc.go
index efea027..b3b717f 100644
--- a/services/device/deviced/doc.go
+++ b/services/device/deviced/doc.go
@@ -74,6 +74,8 @@
Path to the application to exec.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-username=
The UNIX user name used for the other functions of this tool.
-v=0
diff --git a/services/device/deviced/internal/impl/app_service.go b/services/device/deviced/internal/impl/app_service.go
index e3ee797..ac637e4 100644
--- a/services/device/deviced/internal/impl/app_service.go
+++ b/services/device/deviced/internal/impl/app_service.go
@@ -154,7 +154,6 @@
vsecurity "v.io/x/ref/lib/security"
"v.io/x/ref/services/agent"
"v.io/x/ref/services/agent/agentlib"
- "v.io/x/ref/services/agent/keymgr"
"v.io/x/ref/services/device/internal/config"
"v.io/x/ref/services/device/internal/errors"
"v.io/x/ref/services/internal/packages"
@@ -211,9 +210,6 @@
type securityAgentState struct {
// Security agent key manager client.
keyMgr agent.KeyManager
- // Deprecated: security agent key manager client based on pipe
- // connections.
- keyMgrAgent *keymgr.Agent
}
// appRunner is the subset of the appService object needed to
@@ -540,35 +536,6 @@
return installationDir, nil
}
-// agentPrincipal creates a Principal backed by the given agent connection,
-// taking ownership of the connection. The returned cancel function is to be
-// called when the Principal is no longer in use.
-func agentPrincipal(ctx *context.T, conn *os.File) (security.Principal, func(), error) {
- agentctx, cancel := context.WithCancel(ctx)
- var err error
- if agentctx, err = v23.WithNewStreamManager(agentctx); err != nil {
- cancel()
- conn.Close()
- return nil, nil, err
- }
- // TODO: This should use the same network as the agent we're using,
- // not whatever this process was compiled with.
- ep, err := v23.NewEndpoint(agentlib.AgentEndpoint(int(conn.Fd())))
- if err != nil {
- cancel()
- conn.Close()
- return nil, nil, err
- }
- p, err := agentlib.NewAgentPrincipal(agentctx, ep, v23.GetClient(agentctx))
- if err != nil {
- cancel()
- conn.Close()
- return nil, nil, err
- }
- conn.Close()
- return p, cancel, nil
-}
-
// setupPrincipal sets up the instance's principal, with the right blessings.
func setupPrincipal(ctx *context.T, instanceDir string, call device.ApplicationInstantiateServerCall, securityAgent *securityAgentState, info *instanceInfo, rootDir string) error {
var p security.Principal
@@ -602,25 +569,6 @@
if p, err = agentlib.NewAgentPrincipalX(sockPath); err != nil {
return verror.New(errors.ErrOperationFailed, ctx, "NewAgentPrincipalX failed", err)
}
- case securityAgent != nil && securityAgent.keyMgrAgent != nil:
- // This code path is deprecated in favor of the socket agent
- // connection.
-
- // TODO(caprita): Part of the cleanup upon destroying an
- // instance, we should tell the agent to drop the principal.
- handle, conn, err := securityAgent.keyMgrAgent.NewPrincipal(ctx, false)
- if err != nil {
- return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("NewPrincipal() failed %v", err))
- }
- var cancel func()
- if p, cancel, err = agentPrincipal(ctx, conn); err != nil {
- return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("agentPrincipal failed: %v", err))
- }
- defer cancel()
- info.SecurityAgentHandle = handle
- // conn will be closed when the connection to the agent is shut
- // down, as a result of cancel() shutting down the stream
- // manager. No need to call conn.Close().
default:
credentialsDir := filepath.Join(instanceDir, "credentials")
// TODO(caprita): The app's system user id needs access to this dir.
@@ -676,8 +624,8 @@
// Put the names of the device manager's default blessings as patterns
// for the child, so that the child uses the right blessing when talking
// back to the device manager.
- for n, _ := range dmPrincipal.BlessingsInfo(dmPrincipal.BlessingStore().Default()) {
- if _, err := p.BlessingStore().Set(dmBlessings, security.BlessingPattern(n)); err != nil {
+ for _, pattern := range security.DefaultBlessingPatterns(dmPrincipal) {
+ if _, err := p.BlessingStore().Set(dmBlessings, pattern); err != nil {
return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("BlessingStore.Set() failed: %v", err))
}
}
@@ -993,26 +941,6 @@
ctx.Errorf("StopServing failed: %v", err)
}
}()
- case sa != nil && sa.keyMgrAgent != nil:
- // This code path is deprecated in favor of the socket agent
- // connection.
- file, err := sa.keyMgrAgent.NewConnection(info.SecurityAgentHandle)
- if err != nil {
- ctx.Errorf("NewConnection(%v) failed: %v", info.SecurityAgentHandle, err)
- return 0, err
- }
- agentCleaner = func() {
- file.Close()
- }
- // We need to account for the file descriptors corresponding to
- // std{err|out|in} as well as the implementation-specific pipes
- // that the vexec library adds to ExtraFiles during
- // handle.Start. vexec.FileOffset properly offsets fd
- // accordingly.
- fd := len(cmd.ExtraFiles) + vexec.FileOffset
- cmd.ExtraFiles = append(cmd.ExtraFiles, file)
- ep := agentlib.AgentEndpoint(fd)
- cfg.Set(mgmt.SecurityAgentEndpointConfigKey, ep)
default:
cmd.Env = append(cmd.Env, ref.EnvCredentials+"="+filepath.Join(instanceDir, "credentials"))
}
@@ -1785,18 +1713,6 @@
}()
}
debugInfo.PrincipalType = "Agent-based"
- case sa != nil && sa.keyMgrAgent != nil:
- file, err := sa.keyMgrAgent.NewConnection(debugInfo.Info.SecurityAgentHandle)
- if err != nil {
- ctx.Errorf("NewConnection(%v) failed: %v", debugInfo.Info.SecurityAgentHandle, err)
- return "", err
- }
- var cancel func()
- if debugInfo.Principal, cancel, err = agentPrincipal(ctx, file); err != nil {
- return "", err
- }
- defer cancel()
- debugInfo.PrincipalType = "Agent-based-deprecated"
default:
credentialsDir := filepath.Join(instanceDir, "credentials")
var err error
diff --git a/services/device/deviced/internal/impl/device_service.go b/services/device/deviced/internal/impl/device_service.go
index be3db00..3445163 100644
--- a/services/device/deviced/internal/impl/device_service.go
+++ b/services/device/deviced/internal/impl/device_service.go
@@ -277,7 +277,6 @@
cfg.Set(mgmt.AddressConfigKey, "127.0.0.1:0")
var p security.Principal
- var agentHandle []byte
switch sa := s.securityAgent; {
case sa != nil && sa.keyMgr != nil:
@@ -306,19 +305,6 @@
if p, err = agentlib.NewAgentPrincipalX(sockPath); err != nil {
return verror.New(errors.ErrOperationFailed, ctx, "NewAgentPrincipalX failed", err)
}
- case sa != nil && sa.keyMgrAgent != nil:
- // This code path is deprecated in favor of the socket agent
- // connection.
- handle, conn, err := sa.keyMgrAgent.NewPrincipal(ctx, true)
- if err != nil {
- return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("NewPrincipal() failed %v", err))
- }
- agentHandle = handle
- var cancel func()
- if p, cancel, err = agentPrincipal(ctx, conn); err != nil {
- return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("agentPrincipal failed: %v", err))
- }
- defer cancel()
default:
credentialsDir := filepath.Join(workspace, "credentials")
var err error
@@ -339,23 +325,6 @@
return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("AddToRoots() failed: %v", err))
}
- if s.securityAgent != nil && s.securityAgent.keyMgrAgent != nil {
- // This code path is deprecated in favor of the socket agent
- // connection.
- file, err := s.securityAgent.keyMgrAgent.NewConnection(agentHandle)
- if err != nil {
- return verror.New(errors.ErrOperationFailed, ctx, fmt.Sprintf("NewConnection(%v) failed: %v", agentHandle, err))
- }
- defer file.Close()
-
- fd := len(cmd.ExtraFiles) + vexec.FileOffset
- cmd.ExtraFiles = append(cmd.ExtraFiles, file)
- // TODO: This should use the same network as the agent we're using,
- // not whatever this process was compiled with.
- ep := agentlib.AgentEndpoint(fd)
- cfg.Set(mgmt.SecurityAgentEndpointConfigKey, ep)
- }
-
handle := vexec.NewParentHandle(cmd, vexec.ConfigOpt{Config: cfg})
// Start the child process.
if err := handle.Start(); err != nil {
diff --git a/services/device/deviced/internal/impl/dispatcher.go b/services/device/deviced/internal/impl/dispatcher.go
index b3dee78..5d12e66 100644
--- a/services/device/deviced/internal/impl/dispatcher.go
+++ b/services/device/deviced/internal/impl/dispatcher.go
@@ -136,16 +136,6 @@
keyMgr: km,
}
}
- } else if len(os.Getenv(ref.EnvAgentEndpoint)) > 0 {
- // This code path is deprecated in favor of socket agent
- // connection.
- if keyMgrAgent, err := keymgr.NewAgent(); err != nil {
- return nil, nil, verror.New(errNewAgentFailed, ctx, err)
- } else {
- d.internal.securityAgent = &securityAgentState{
- keyMgrAgent: keyMgrAgent,
- }
- }
}
runner := &appRunner{
callback: d.internal.callback,
diff --git a/services/discovery/service.go b/services/discovery/service.go
new file mode 100644
index 0000000..079e4e6
--- /dev/null
+++ b/services/discovery/service.go
@@ -0,0 +1,103 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package discovery
+
+import (
+ "sync"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/discovery"
+ "v.io/v23/rpc"
+ "v.io/v23/security"
+ sdiscovery "v.io/v23/services/discovery"
+ "v.io/v23/verror"
+)
+
+const pkgPath = "v.io/x/ref/services/discovery"
+
+const (
+ maxActiveHandles = int(^uint16(0)) // 65535.
+)
+
+var (
+ errTooManyServices = verror.Register(pkgPath+".errTooManyServices", verror.NoRetry, "{1:}{2:} too many registered services")
+)
+
+type impl struct {
+ ctx *context.T
+ d discovery.T
+
+ mu sync.Mutex
+ handles map[sdiscovery.ServiceHandle]func() // GUARDED_BY(mu)
+ lastHandle sdiscovery.ServiceHandle // GUARDED_BY(mu)
+}
+
+func (s *impl) RegisterService(ctx *context.T, call rpc.ServerCall, service discovery.Service, visibility []security.BlessingPattern) (sdiscovery.ServiceHandle, error) {
+ ctx, cancel := context.WithCancel(s.ctx)
+ done, err := s.d.Advertise(ctx, service, visibility)
+ if err != nil {
+ cancel()
+ return 0, err
+ }
+
+ s.mu.Lock()
+ if len(s.handles) >= maxActiveHandles {
+ s.mu.Unlock()
+ cancel()
+ return 0, verror.New(errTooManyServices, ctx)
+ }
+ handle := s.lastHandle + 1
+ for {
+ if handle == 0 { // Avoid zero handle.
+ handle++
+ }
+ if _, exist := s.handles[handle]; !exist {
+ break
+ }
+ }
+ s.handles[handle] = func() {
+ cancel()
+ <-done
+ }
+ s.lastHandle = handle
+ s.mu.Unlock()
+ return handle, nil
+}
+
+func (s *impl) UnregisterService(ctx *context.T, call rpc.ServerCall, handle sdiscovery.ServiceHandle) error {
+ s.mu.Lock()
+ stop := s.handles[handle]
+ delete(s.handles, handle)
+ s.mu.Unlock()
+ if stop != nil {
+ stop()
+ }
+ return nil
+}
+
+func (s *impl) Scan(ctx *context.T, call sdiscovery.ScannerScanServerCall, query string) error {
+ updateCh, err := s.d.Scan(ctx, query)
+ if err != nil {
+ return err
+ }
+
+ stream := call.SendStream()
+ for update := range updateCh {
+ if err = stream.Send(update); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewDiscoveryService returns a new Discovery service implementation.
+func NewDiscoveryService(ctx *context.T) sdiscovery.DiscoveryServerMethods {
+ return &impl{
+ ctx: ctx,
+ d: v23.GetDiscovery(ctx),
+ handles: make(map[sdiscovery.ServiceHandle]func()),
+ }
+}
diff --git a/services/discovery/service_test.go b/services/discovery/service_test.go
new file mode 100644
index 0000000..449f6d1
--- /dev/null
+++ b/services/discovery/service_test.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package discovery
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/discovery"
+ sdiscovery "v.io/v23/services/discovery"
+
+ idiscovery "v.io/x/ref/lib/discovery"
+ fdiscovery "v.io/x/ref/lib/discovery/factory"
+ "v.io/x/ref/lib/discovery/plugins/mock"
+ _ "v.io/x/ref/runtime/factories/generic"
+ "v.io/x/ref/test"
+)
+
+func TestBasic(t *testing.T) {
+ fdiscovery.InjectDiscovery(idiscovery.NewWithPlugins([]idiscovery.Plugin{mock.New()}))
+ ctx, shutdown := test.V23Init()
+ defer shutdown()
+
+ ds := NewDiscoveryService(ctx)
+ ctx, server, err := v23.WithNewServer(ctx, "", sdiscovery.DiscoveryServer(ds), nil)
+ if err != nil {
+ t.Fatalf("NewServer() failed: %v", err)
+ }
+ defer server.Stop()
+ addr := server.Status().Endpoints[0].Name()
+
+ services := []discovery.Service{
+ {
+ InstanceUuid: idiscovery.NewInstanceUUID(),
+ InterfaceName: "v.io/v23/a",
+ Attrs: discovery.Attributes{"a1": "v1"},
+ Addrs: []string{"/h1:123/x"},
+ },
+ {
+ InstanceUuid: idiscovery.NewInstanceUUID(),
+ InterfaceName: "v.io/v23/b",
+ Attrs: discovery.Attributes{"b1": "v1"},
+ Addrs: []string{"/h1:123/y"},
+ },
+ }
+
+ var handles []sdiscovery.ServiceHandle
+ advertiser := sdiscovery.AdvertiserClient(addr)
+ for _, service := range services {
+ handle, err := advertiser.RegisterService(ctx, service, nil)
+ if err != nil {
+ t.Fatalf("RegisterService() failed: %v", err)
+ }
+ handles = append(handles, handle)
+ }
+
+ scanner := sdiscovery.ScannerClient(addr)
+ if err := scanAndMatch(ctx, scanner, "", services...); err != nil {
+ t.Error(err)
+ }
+
+ if err := advertiser.UnregisterService(ctx, handles[0]); err != nil {
+ t.Fatalf("UnregisterService() failed: %v", err)
+ }
+ if err := scanAndMatch(ctx, scanner, "", services[1]); err != nil {
+ t.Error(err)
+ }
+}
+
+func scanAndMatch(ctx *context.T, scanner sdiscovery.ScannerClientStub, query string, wants ...discovery.Service) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ stream, err := scanner.Scan(ctx, query)
+ if err != nil {
+ return err
+ }
+
+ recv := stream.RecvStream()
+ for len(wants) > 0 {
+ if !recv.Advance() {
+ return recv.Err()
+ }
+ found := recv.Value().Interface().(discovery.Found)
+ matched := false
+ for i, want := range wants {
+ if reflect.DeepEqual(found.Service, want) {
+ wants = append(wants[:i], wants[i+1:]...)
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ return fmt.Errorf("unexpected service found: %v", found.Service)
+ }
+ }
+
+ // Make sure there is no more update.
+ time.AfterFunc(5*time.Millisecond, cancel)
+ if recv.Advance() {
+ return fmt.Errorf("unexpected update: %v", recv.Value())
+ }
+ return nil
+}
diff --git a/services/groups/groups/doc.go b/services/groups/groups/doc.go
index a925394..afa74ed 100644
--- a/services/groups/groups/doc.go
+++ b/services/groups/groups/doc.go
@@ -35,6 +35,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/identity/identityd/doc.go b/services/identity/identityd/doc.go
index e2bfac9..be8a14e 100644
--- a/services/identity/identityd/doc.go
+++ b/services/identity/identityd/doc.go
@@ -79,6 +79,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/identity/internal/blesser/macaroon_test.go b/services/identity/internal/blesser/macaroon_test.go
index 1afc3fb..4ee9d7a 100644
--- a/services/identity/internal/blesser/macaroon_test.go
+++ b/services/identity/internal/blesser/macaroon_test.go
@@ -56,19 +56,33 @@
// When the user does not recognize the provider, it should not see any strings for
// the client's blessings.
- if got := user.BlessingsInfo(b); got != nil {
- t.Errorf("Got blessing with info %v, want nil", got)
+ if got := security.BlessingNames(user, b); len(got) != 0 {
+ t.Errorf("Got %v, want nil", got)
}
// But once it recognizes the provider, it should see exactly the name
// "provider/bugsbunny" for the caveat cOnlyMethodFoo.
security.AddToRoots(user, b)
- binfo := user.BlessingsInfo(b)
- if num := len(binfo); num != 1 {
- t.Errorf("Got blessings with %d names, want exactly one name", num)
+ if got, want := security.BlessingNames(user, b), []string{"provider/bugsbunny"}; !reflect.DeepEqual(got, want) {
+ t.Errorf("Got %v, want %v", got, want)
}
- wantName := "provider/bugsbunny"
- if got, want := binfo[wantName], []security.Caveat{cOnlyMethodFoo}; !reflect.DeepEqual(got, want) {
- t.Errorf("binfo[%q]: Got %v, want %v", wantName, got, want)
+ // RemoteBlessingNames should see "provider/bugsbunny" only when caveats are met.
+ for idx, test := range []struct {
+ params security.CallParams
+ names []string
+ }{
+ {
+ params: security.CallParams{LocalPrincipal: user, RemoteBlessings: b, Method: "Foo"},
+ names: []string{"provider/bugsbunny"},
+ },
+ {
+ params: security.CallParams{LocalPrincipal: user, RemoteBlessings: b, Method: "Bar"},
+ names: nil,
+ },
+ } {
+ got, _ := security.RemoteBlessingNames(ctx, security.NewCall(&test.params))
+ if !reflect.DeepEqual(got, test.names) {
+ t.Errorf("#%d) Got %v, want %v", idx, got, test.names)
+ }
}
}
diff --git a/services/identity/internal/blesser/oauth_test.go b/services/identity/internal/blesser/oauth_test.go
index 63b7b5b..f497649 100644
--- a/services/identity/internal/blesser/oauth_test.go
+++ b/services/identity/internal/blesser/oauth_test.go
@@ -6,7 +6,6 @@
import (
"reflect"
- "sort"
"strings"
"testing"
"time"
@@ -56,18 +55,14 @@
// When the user does not recognize the provider, it should not see any strings for
// the client's blessings.
- if got := user.BlessingsInfo(b); got != nil {
- t.Errorf("Got blessing with info %v, want nil", got)
+ if got := security.BlessingNames(user, b); len(got) != 0 {
+ t.Errorf("Got %v, want nil")
}
// But once it recognizes the provider, it should see exactly the name
// "provider/testemail@example.com/test-client".
security.AddToRoots(user, b)
- binfo := user.BlessingsInfo(b)
- if num := len(binfo); num != 1 {
- t.Errorf("Got blessings with %d names, want exactly one name", num)
- }
- if _, ok := binfo[join("provider", wantExtension)]; !ok {
- t.Errorf("BlessingsInfo %v does not have name %s", binfo, wantExtension)
+ if got, want := security.BlessingNames(user, b), []string{join("provider", wantExtension)}; !reflect.DeepEqual(got, want) {
+ t.Errorf("Got %v, want %v", got, want)
}
}
@@ -75,6 +70,8 @@
var (
provider, user = testutil.NewPrincipal(), testutil.NewPrincipal()
ctx, call = fakeContextAndCall(provider, user)
+ now = time.Now()
+ expires = now.Add(time.Minute)
)
mockEmail := "testemail@example.com"
mockClientID := "test-client-id"
@@ -90,7 +87,7 @@
},
})
- expiryCav, err := security.NewExpiryCaveat(time.Now().Add(time.Minute))
+ expiryCav, err := security.NewExpiryCaveat(expires)
if err != nil {
t.Fatal(err)
}
@@ -116,36 +113,37 @@
// When the user does not recognize the provider, it should not see any strings for
// the client's blessings.
- if got := user.BlessingsInfo(b); got != nil {
- t.Errorf("Got blessing with info %v, want nil", got)
+ if got := security.BlessingNames(user, b); len(got) != 0 {
+ t.Errorf("Got %v, want nil", got)
}
// But once it recognizes the provider, it should see exactly the name
// "provider/testemail@example.com/test-client".
security.AddToRoots(user, b)
- binfo := user.BlessingsInfo(b)
- if num := len(binfo); num != 1 {
- t.Errorf("Got blessings with %d names, want exactly one name", num)
+ allnames := []string{join("provider", wantExtension)}
+ if got, want := security.BlessingNames(user, b), allnames; !reflect.DeepEqual(got, want) {
+ t.Errorf("Got %v, want %v", got, want)
}
- cavs, ok := binfo[join("provider", wantExtension)]
- if !ok {
- t.Errorf("BlessingsInfo %v does not have name %s", binfo, wantExtension)
+ // The presence of caveats will be tested by RemoteBlessingNames
+ for _, test := range []struct {
+ Time time.Time
+ Method string
+ Names []string
+ }{
+ {now, "foo", allnames},
+ {now, "bar", allnames},
+ {now, "baz", nil},
+ {expires.Add(time.Nanosecond), "foo", nil},
+ {expires.Add(time.Nanosecond), "bar", nil},
+ {expires.Add(time.Nanosecond), "baz", nil},
+ } {
+ call := security.NewCall(&security.CallParams{
+ LocalPrincipal: user,
+ RemoteBlessings: b,
+ Timestamp: test.Time,
+ Method: test.Method,
+ })
+ if got, _ := security.RemoteBlessingNames(ctx, call); !reflect.DeepEqual(got, test.Names) {
+ t.Errorf("%#v: Got %v, want %v", test, got, test.Names)
+ }
}
- if !caveatsMatch(cavs, caveats) {
- t.Errorf("got %v, want %v", cavs, caveats)
- }
-}
-
-func caveatsMatch(got, want []security.Caveat) bool {
- if len(got) != len(want) {
- return false
- }
- gotStrings := make([]string, len(got))
- wantStrings := make([]string, len(want))
- for i := 0; i < len(got); i++ {
- gotStrings[i] = got[i].String()
- wantStrings[i] = want[i].String()
- }
- sort.Strings(gotStrings)
- sort.Strings(wantStrings)
- return reflect.DeepEqual(gotStrings, wantStrings)
}
diff --git a/services/identity/internal/handlers/handlers_test.go b/services/identity/internal/handlers/handlers_test.go
index 2b3282f..a278cc0 100644
--- a/services/identity/internal/handlers/handlers_test.go
+++ b/services/identity/internal/handlers/handlers_test.go
@@ -8,6 +8,7 @@
"bytes"
"encoding/base64"
"encoding/json"
+ "fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
@@ -221,16 +222,16 @@
}
// Verify the name and caveats on the blessings.
- binfo := blesseePrin.BlessingsInfo(blessings)
- if len(binfo) != 1 {
- t.Errorf("got blessings with %d names, want blessings with 1 name", len(binfo))
+ if got, want := security.BlessingNames(blesseePrin, blessings), []string{
+ "blesser" + security.ChainSeparator + testClientID + security.ChainSeparator + testEmail,
+ }; !reflect.DeepEqual(got, want) {
+ t.Errorf("Got %v, want %v", got, want)
}
- wantName := "blesser" + security.ChainSeparator + testClientID + security.ChainSeparator + testEmail
- caveats, ok := binfo[wantName]
- if !ok {
- t.Errorf("expected blessing with name %v, got none", wantName)
+ caveats, err := extractCaveats(blessings)
+ if err != nil {
+ t.Error(err)
+ continue
}
-
if len(testcase.caveats) > 0 {
// The blessing must have exactly those caveats that were provided in the request.
if !caveatsMatch(t, caveats, testcase.caveats) {
@@ -249,6 +250,28 @@
}
}
+func extractCaveats(b security.Blessings) ([]security.Caveat, error) {
+ // Extract the wire encoding of the blessings and fish them out.
+ bytes, err := vom.Encode(b)
+ if err != nil {
+ return nil, err
+ }
+ var wire security.WireBlessings
+ if err := vom.Decode(bytes, &wire); err != nil {
+ return nil, err
+ }
+ if got, want := len(wire.CertificateChains), 1; got != want {
+ return nil, fmt.Errorf("Got %d blessings, want %d", got, want)
+ }
+ var ret []security.Caveat
+ for _, chain := range wire.CertificateChains {
+ for _, cert := range chain {
+ ret = append(ret, cert.Caveats...)
+ }
+ }
+ return ret, nil
+}
+
type caveatsSorter struct {
caveats []security.Caveat
t *testing.T
diff --git a/services/identity/internal/identityd_test/doc.go b/services/identity/internal/identityd_test/doc.go
index 1ab2911..d7c698e 100644
--- a/services/identity/internal/identityd_test/doc.go
+++ b/services/identity/internal/identityd_test/doc.go
@@ -51,6 +51,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/identity/internal/server/identityd.go b/services/identity/internal/server/identityd.go
index a388aa4..97baff3 100644
--- a/services/identity/internal/server/identityd.go
+++ b/services/identity/internal/server/identityd.go
@@ -123,9 +123,11 @@
fmt.Printf("NAME=%s\n", s.rootedObjectAddrs[0].Name())
}
<-signals.ShutdownOnSignals(ctx)
+ ctx.Infof("Received shutdown request.")
if err := rpcServer.Stop(); err != nil {
ctx.Errorf("Failed to stop rpc server: %v", err)
}
+ ctx.Infof("Successfully stopped the rpc server.")
}
func (s *IdentityServer) Listen(ctx *context.T, externalHttpAddr, httpAddr, tlsConfig string) (rpc.Server, []string, string) {
diff --git a/services/internal/binarylib/impl_test.go b/services/internal/binarylib/impl_test.go
index 8fb0201..eb2e6de 100644
--- a/services/internal/binarylib/impl_test.go
+++ b/services/internal/binarylib/impl_test.go
@@ -57,7 +57,7 @@
t.Fatalf("NewDispatcher failed: %v", err)
}
dontPublishName := ""
- ctx, server, err := v23.WithNewDispatchingServer(ctx, dontPublishName, dispatcher)
+ _, server, err := v23.WithNewDispatchingServer(ctx, dontPublishName, dispatcher)
if err != nil {
t.Fatalf("NewServer(%q) failed: %v", dontPublishName, err)
}
diff --git a/services/mounttable/mounttabled/doc.go b/services/mounttable/mounttabled/doc.go
index 4deba0d..e7ee1fc 100644
--- a/services/mounttable/mounttabled/doc.go
+++ b/services/mounttable/mounttabled/doc.go
@@ -42,6 +42,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/profile/profile/doc.go b/services/profile/profile/doc.go
index ec5189b..c93d1d1 100644
--- a/services/profile/profile/doc.go
+++ b/services/profile/profile/doc.go
@@ -34,6 +34,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/profile/profiled/doc.go b/services/profile/profiled/doc.go
index 85b8107..a188316 100644
--- a/services/profile/profiled/doc.go
+++ b/services/profile/profiled/doc.go
@@ -33,6 +33,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/proxy/proxyd/doc.go b/services/proxy/proxyd/doc.go
index cd0d3ce..5c5622f 100644
--- a/services/proxy/proxyd/doc.go
+++ b/services/proxy/proxyd/doc.go
@@ -42,6 +42,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/role/roled/doc.go b/services/role/roled/doc.go
index 0da6feb..7366356 100644
--- a/services/role/roled/doc.go
+++ b/services/role/roled/doc.go
@@ -32,6 +32,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/services/syncbase/server/interfaces/sync.vdl b/services/syncbase/server/interfaces/sync.vdl
index d144928..823a2b5 100644
--- a/services/syncbase/server/interfaces/sync.vdl
+++ b/services/syncbase/server/interfaces/sync.vdl
@@ -78,5 +78,6 @@
error (
DupSyncgroupPublish(name string) {"en": "duplicate publish on syncgroup: {name}"}
+ ConnFail() {"en": "connection to peer failed{:_}"}
BrokenCrConnection() {"en": "CrConnection stream to application does not exist or is broken."}
)
diff --git a/services/syncbase/server/interfaces/sync.vdl.go b/services/syncbase/server/interfaces/sync.vdl.go
index a413d60..efe5af5 100644
--- a/services/syncbase/server/interfaces/sync.vdl.go
+++ b/services/syncbase/server/interfaces/sync.vdl.go
@@ -24,11 +24,13 @@
var (
ErrDupSyncgroupPublish = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.DupSyncgroupPublish", verror.NoRetry, "{1:}{2:} duplicate publish on syncgroup: {3}")
+ ErrConnFail = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.ConnFail", verror.NoRetry, "{1:}{2:} connection to peer failed{:_}")
ErrBrokenCrConnection = verror.Register("v.io/x/ref/services/syncbase/server/interfaces.BrokenCrConnection", verror.NoRetry, "{1:}{2:} CrConnection stream to application does not exist or is broken.")
)
func init() {
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrDupSyncgroupPublish.ID), "{1:}{2:} duplicate publish on syncgroup: {3}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrConnFail.ID), "{1:}{2:} connection to peer failed{:_}")
i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrBrokenCrConnection.ID), "{1:}{2:} CrConnection stream to application does not exist or is broken.")
}
@@ -37,6 +39,11 @@
return verror.New(ErrDupSyncgroupPublish, ctx, name)
}
+// NewErrConnFail returns an error with the ErrConnFail ID.
+func NewErrConnFail(ctx *context.T) error {
+ return verror.New(ErrConnFail, ctx)
+}
+
// NewErrBrokenCrConnection returns an error with the ErrBrokenCrConnection ID.
func NewErrBrokenCrConnection(ctx *context.T) error {
return verror.New(ErrBrokenCrConnection, ctx)
diff --git a/services/syncbase/syncbased/mojo_main.go b/services/syncbase/syncbased/mojo_main.go
index 6a6ac41..9eeaa4b 100644
--- a/services/syncbase/syncbased/mojo_main.go
+++ b/services/syncbase/syncbased/mojo_main.go
@@ -7,8 +7,8 @@
package main
// To build:
-// cd $JIRI_ROOT/experimental/projects/ether
-// make gen/mojo/syncbased.mojo
+// cd $JIRI_ROOT/release/projects/mojo/syncbase
+// make build
import (
"log"
diff --git a/services/syncbase/testutil/layer.go b/services/syncbase/testutil/layer.go
index 7c4e6b0..9a24e88 100644
--- a/services/syncbase/testutil/layer.go
+++ b/services/syncbase/testutil/layer.go
@@ -317,10 +317,10 @@
t.Fatalf("SetPermissions failed: %v", err)
}
if _, _, err := ac.GetPermissions(ctx); verror.ErrorID(err) != verror.ErrNoAccess.ID {
- t.Fatal("GetPermissions should have failed with access error")
+ t.Fatal("GetPermissions should have failed with access error. Instead got error: %v", err)
}
if err := ac.SetPermissions(ctx, myperms, ""); verror.ErrorID(err) != verror.ErrNoAccess.ID {
- t.Fatal("SetPermissions should have failed with access error")
+ t.Fatal("SetPermissions should have failed with access error. Instead got error: %v", err)
}
}
diff --git a/services/syncbase/vsync/clock.go b/services/syncbase/vsync/clock.go
index 4076616..cddd285 100644
--- a/services/syncbase/vsync/clock.go
+++ b/services/syncbase/vsync/clock.go
@@ -72,31 +72,36 @@
// syncClock syncs the syncbase clock with peer's syncbase clock.
// TODO(jlodhia): Refactor the mount table entry search code based on the
// unified solution for looking up peer once it exists.
-func (s *syncService) syncClock(ctx *context.T, peer string) {
- vlog.VI(2).Infof("sync: syncClock: begin: contacting peer %s", peer)
- defer vlog.VI(2).Infof("sync: syncClock: end: contacting peer %s", peer)
+func (s *syncService) syncClock(ctx *context.T, peer connInfo) error {
+ vlog.VI(2).Infof("sync: syncClock: begin: contacting peer %v", peer)
+ defer vlog.VI(2).Infof("sync: syncClock: end: contacting peer %v", peer)
- info := s.copyMemberInfo(ctx, peer)
+ info := s.copyMemberInfo(ctx, peer.relName)
if info == nil {
- vlog.Fatalf("sync: syncClock: missing information in member view for %q", peer)
+ vlog.Fatalf("sync: syncClock: missing information in member view for %v", peer)
}
- // Preferred mount tables for this peer.
- if len(info.mtTables) < 1 {
- vlog.Errorf("sync: syncClock: no mount tables found to connect to peer %s", peer)
- return
+ if len(info.mtTables) < 1 && peer.addr == "" {
+ vlog.Errorf("sync: syncClock: no mount tables or endpoint found to connect to peer %v", peer)
+ return verror.New(verror.ErrInternal, ctx, peer.relName, peer.addr, "no mount tables or endpoint found")
}
+
+ if peer.addr != "" {
+ vlog.VI(4).Infof("sync: syncClock: trying neighborhood addr for peer %v", peer)
+
+ absName := naming.Join(peer.addr, util.SyncbaseSuffix)
+ return syncWithPeer(ctx, s.vclock, absName, s.name)
+ }
+
for mt, _ := range info.mtTables {
- absName := naming.Join(mt, peer, util.SyncbaseSuffix)
- if err := syncWithPeer(ctx, s.vclock, absName, s.name); err == nil {
- return
- } else if (verror.ErrorID(err) == verror.ErrNoExist.ID) || (verror.ErrorID(err) == verror.ErrInternal.ID) {
- vlog.Errorf("sync: syncClock: error returned by peer %s: %v", peer, err)
- return
+ absName := naming.Join(mt, peer.relName, util.SyncbaseSuffix)
+ if err := syncWithPeer(ctx, s.vclock, absName, s.name); verror.ErrorID(err) != interfaces.ErrConnFail.ID {
+ return err
}
}
- vlog.Errorf("sync: syncClock: couldn't connect to peer %s", peer)
- return
+
+ vlog.Errorf("sync: syncClock: couldn't connect to peer %v", peer)
+ return verror.New(interfaces.ErrConnFail, ctx, peer.relName, peer.addr, "all mount tables failed")
}
// syncWithPeer tries to sync local clock with peer syncbase clock.
@@ -156,8 +161,11 @@
if commitErr := tx.Commit(); commitErr != nil {
vlog.Errorf("sync: syncClock: error while commiting tx: %v", commitErr)
}
+ } else if (verror.ErrorID(reqErr) == verror.ErrNoExist.ID) || (verror.ErrorID(reqErr) == verror.ErrInternal.ID) {
+ vlog.Errorf("sync: syncClock: error returned by peer %s: %v", absPeerName, err)
} else {
- vlog.Errorf("sync: syncClock: received error: %v", reqErr)
+ reqErr = verror.New(interfaces.ErrConnFail, ctx, myName)
+ vlog.Errorf("sync: syncClock: received network error: %v", reqErr)
}
// Return error received while making request if any to the caller.
return reqErr
diff --git a/services/syncbase/vsync/cr_closure_test.go b/services/syncbase/vsync/cr_closure_test.go
index baa1366..09b8e1b 100644
--- a/services/syncbase/vsync/cr_closure_test.go
+++ b/services/syncbase/vsync/cr_closure_test.go
@@ -33,7 +33,7 @@
var (
updObjects map[string]*objConflictState
- zVer = string(watchable.NewVersion())
+ zVer = string(watchable.NewVersion())
batchxzbId = rand64()
batchxzb = createBatch(true /*local*/, x, z, b)
@@ -99,7 +99,7 @@
func TestGroupFor(t *testing.T) {
service := createService(t)
defer destroyService(t, service)
-
+
updObjects = createUpdObjectsMap()
iSt := &initiationState{updObjects: updObjects, tx: service.St().NewTransaction()}
createAndSaveNodeAndBatchData(iSt)
@@ -190,7 +190,7 @@
verifyBatchesByOid(t, group, b, batchxzbId, batchbcId)
verifyBatchesByOid(t, group, y, batchxyId)
verifyBatchesByOid(t, group, c, batchbcId)
-
+
objSt := updObjects[z]
if (objSt == nil) || (objSt.oldHead != zVer) || objSt.isConflict || !objSt.isAddedByCr {
t.Errorf("Unexpected value of objConflictState for z: %#v", objSt)
diff --git a/services/syncbase/vsync/initiator.go b/services/syncbase/vsync/initiator.go
index 533adf9..9b607a2 100644
--- a/services/syncbase/vsync/initiator.go
+++ b/services/syncbase/vsync/initiator.go
@@ -16,6 +16,7 @@
"v.io/v23/context"
"v.io/v23/naming"
+ "v.io/v23/options"
"v.io/v23/services/syncbase/nosql"
"v.io/v23/vdl"
"v.io/v23/verror"
@@ -48,13 +49,13 @@
// initiation round), the work done by the initiator is idempotent.
//
// TODO(hpucha): Check the idempotence, esp in addNode in DAG.
-func (s *syncService) getDeltas(ctx *context.T, peer string) {
- vlog.VI(2).Infof("sync: getDeltas: begin: contacting peer %s", peer)
- defer vlog.VI(2).Infof("sync: getDeltas: end: contacting peer %s", peer)
+func (s *syncService) getDeltas(ctx *context.T, peer connInfo) error {
+ vlog.VI(2).Infof("sync: getDeltas: begin: contacting peer %v", peer)
+ defer vlog.VI(2).Infof("sync: getDeltas: end: contacting peer %v", peer)
- info := s.copyMemberInfo(ctx, peer)
+ info := s.copyMemberInfo(ctx, peer.relName)
if info == nil {
- vlog.Fatalf("sync: getDeltas: missing information in member view for %q", peer)
+ vlog.Fatalf("sync: getDeltas: missing information in member view for %v", peer)
}
// Preferred mount tables for this peer.
@@ -62,22 +63,24 @@
// Sync each Database that may have syncgroups common with this peer,
// one at a time.
+ var errFinal error // Any error encountered is returned to the caller.
for gdbName := range info.db2sg {
- vlog.VI(4).Infof("sync: getDeltas: started for peer %s db %s", peer, gdbName)
+ vlog.VI(4).Infof("sync: getDeltas: started for peer %v db %s", peer, gdbName)
if len(prfMtTbls) < 1 {
- vlog.Errorf("sync: getDeltas: no mount tables found to connect to peer %s", peer)
- return
+ vlog.Errorf("sync: getDeltas: no mount tables found to connect to peer %v", peer)
+ return verror.New(verror.ErrInternal, ctx, peer.relName, peer.addr, "all mount tables failed")
}
c, err := newInitiationConfig(ctx, s, peer, gdbName, info, prfMtTbls)
if err != nil {
- vlog.Errorf("sync: getDeltas: couldn't initialize initiator config for peer %s, gdb %s, err %v", peer, gdbName, err)
+ vlog.Errorf("sync: getDeltas: couldn't initialize initiator config for peer %v, gdb %s, err %v", peer, gdbName, err)
+ errFinal = err
continue
}
- if err := s.getDBDeltas(ctx, peer, c, true); err == nil {
- if err := s.getDBDeltas(ctx, peer, c, false); err != nil {
+ if err = s.getDBDeltas(ctx, c, true); err == nil {
+ if err = s.getDBDeltas(ctx, c, false); err != nil {
vlog.Errorf("sync: getDeltas: failed for data sync, err %v", err)
}
} else {
@@ -85,18 +88,25 @@
vlog.Errorf("sync: getDeltas: failed for syncgroup sync, err %v", err)
}
+ if verror.ErrorID(err) == interfaces.ErrConnFail.ID {
+ return err
+ } else if err != nil {
+ errFinal = err
+ }
+
// Cache the pruned mount table list for the next Database.
prfMtTbls = c.mtTables
- vlog.VI(4).Infof("sync: getDeltas: done for peer %s db %s", peer, gdbName)
+ vlog.VI(4).Infof("sync: getDeltas: done for peer %v db %s", peer, gdbName)
}
+ return errFinal
}
// getDBDeltas gets the deltas from the chosen peer. If sg flag is set to true,
// it will sync syncgroup metadata. If sg flag is false, it will sync data.
-func (s *syncService) getDBDeltas(ctxIn *context.T, peer string, c *initiationConfig, sg bool) error {
- vlog.VI(2).Infof("sync: getDBDeltas: begin: contacting peer sg %v %s", sg, peer)
- defer vlog.VI(2).Infof("sync: getDBDeltas: end: contacting peer sg %v %s", sg, peer)
+func (s *syncService) getDBDeltas(ctxIn *context.T, c *initiationConfig, sg bool) error {
+ vlog.VI(2).Infof("sync: getDBDeltas: begin: contacting peer sg %v %v", sg, c.peer)
+ defer vlog.VI(2).Infof("sync: getDBDeltas: end: contacting peer sg %v %v", sg, c.peer)
ctx, cancel := context.WithCancel(ctxIn)
// cancel() is idempotent.
@@ -109,7 +119,7 @@
if !sg {
iSt.peerSgInfo(ctx)
if len(iSt.config.sgPfxs) == 0 {
- return verror.New(verror.ErrInternal, ctx, "no syncgroup prefixes found", peer, iSt.config.appName, iSt.config.dbName)
+ return verror.New(verror.ErrInternal, ctx, "no syncgroup prefixes found", c.peer.relName, iSt.config.appName, iSt.config.dbName)
}
}
@@ -130,7 +140,7 @@
// Make contact with the peer.
if !iSt.connectToPeer(ctx) {
- return verror.New(verror.ErrInternal, ctx, "couldn't connect to peer", peer)
+ return verror.New(interfaces.ErrConnFail, ctx, "couldn't connect to peer", c.peer.relName, c.peer.addr)
}
// Obtain deltas from the peer over the network.
@@ -156,7 +166,7 @@
// initiationConfig is the configuration information for a Database in an
// initiation round.
type initiationConfig struct {
- peer string // relative name of the peer to sync with.
+ peer connInfo // connection info of the peer to sync with.
// Mount tables that this peer may have registered with. The first entry
// in this array is the mount table where the peer was successfully
@@ -206,13 +216,12 @@
oldHead string
ancestor string
res *conflictResolution
-
// TODO(jlodhia): Add perms object and version for the row keys for pickNew
}
// newInitiatonConfig creates new initiation config. This will be shared between
// the two sync rounds in the initiation round of a Database.
-func newInitiationConfig(ctx *context.T, s *syncService, peer string, name string, info *memberInfo, mtTables []string) (*initiationConfig, error) {
+func newInitiationConfig(ctx *context.T, s *syncService, peer connInfo, name string, info *memberInfo, mtTables []string) (*initiationConfig, error) {
c := &initiationConfig{}
c.peer = peer
c.mtTables = mtTables
@@ -221,7 +230,7 @@
c.sgIds[id] = struct{}{}
}
if len(c.sgIds) == 0 {
- return nil, verror.New(verror.ErrInternal, ctx, "no syncgroups found", peer, name)
+ return nil, verror.New(verror.ErrInternal, ctx, "no syncgroups found", peer.relName, name)
}
// Note: sgPfxs will be inited when needed by the data sync.
@@ -270,7 +279,7 @@
if err != nil {
continue
}
- if _, ok := sg.Joiners[iSt.config.peer]; !ok {
+ if _, ok := sg.Joiners[iSt.config.peer.relName]; !ok {
// Peer is no longer part of the syncgroup.
continue
}
@@ -413,45 +422,60 @@
return nil
}
-// connectToPeer attempts to connect to the remote peer using the mount tables
-// obtained from all the common syncgroups.
-func (iSt *initiationState) connectToPeer(ctxIn *context.T) bool {
+// connectToPeer attempts to connect to the remote peer using the neighborhood
+// address when specified or the mount tables obtained from all the common
+// syncgroups.
+func (iSt *initiationState) connectToPeer(ctx *context.T) bool {
vlog.VI(4).Infof("sync: connectToPeer: begin")
- if len(iSt.config.mtTables) < 1 {
- vlog.Errorf("sync: connectToPeer: no mount tables found to connect to peer %s, app %s db %s", iSt.config.peer, iSt.config.appName, iSt.config.dbName)
+ if len(iSt.config.mtTables) < 1 && iSt.config.peer.addr == "" {
+ vlog.Errorf("sync: connectToPeer: no mount tables or endpoint found to connect to peer %s, app %s db %s", iSt.config.peer, iSt.config.appName, iSt.config.dbName)
return false
}
+ if iSt.config.peer.addr != "" {
+ absName := naming.Join(iSt.config.peer.addr, util.SyncbaseSuffix)
+ return iSt.connectToPeerInternal(ctx, absName)
+ }
+
for i, mt := range iSt.config.mtTables {
- ctx, cancel := context.WithCancel(ctxIn)
-
- // We start a timer to bound the amount of time we wait to
- // initiate a connection.
- t := time.AfterFunc(connectionTimeOut, cancel)
-
- absName := naming.Join(mt, iSt.config.peer, util.SyncbaseSuffix)
- c := interfaces.SyncClient(absName)
-
- vlog.VI(4).Infof("sync: connectToPeer: trying %v", absName)
-
- var err error
- iSt.stream, err = c.GetDeltas(ctx, iSt.req, iSt.config.sync.name)
- t.Stop()
-
- if err == nil {
- vlog.VI(4).Infof("sync: connectToPeer: established on %s", absName)
-
- // Prune out the unsuccessful mount tables.
- iSt.config.mtTables = iSt.config.mtTables[i:]
+ absName := naming.Join(mt, iSt.config.peer.relName, util.SyncbaseSuffix)
+ if iSt.connectToPeerInternal(ctx, absName) {
return true
}
- // When the RPC is successful, cancelling the parent context
- // will take care of cancelling the child context.
- cancel()
+ // Prune out the unsuccessful mount tables.
+ iSt.config.mtTables = iSt.config.mtTables[i:]
}
iSt.config.mtTables = nil
- vlog.Errorf("sync: connectToPeer: couldn't connect to peer %s", iSt.config.peer)
+
+ vlog.Errorf("sync: connectToPeer: couldn't connect to peer %v", iSt.config.peer)
+ return false
+}
+
+func (iSt *initiationState) connectToPeerInternal(ctxIn *context.T, absName string) bool {
+ ctx, cancel := context.WithCancel(ctxIn)
+
+ // We start a timer to bound the amount of time we wait to
+ // initiate a connection.
+ t := time.AfterFunc(connectionTimeOut, cancel)
+
+ c := interfaces.SyncClient(absName)
+
+ vlog.VI(4).Infof("sync: connectToPeer: trying %v", absName)
+
+ var err error
+ iSt.stream, err = c.GetDeltas(ctx, iSt.req, iSt.config.sync.name,
+ options.ChannelTimeout(connectionTimeOut))
+ t.Stop()
+
+ if err == nil {
+ vlog.VI(4).Infof("sync: connectToPeer: established on %s", absName)
+ return true
+ }
+
+ // When the RPC is successful, cancelling the parent context
+ // will take care of cancelling the child context.
+ cancel()
return false
}
@@ -653,7 +677,7 @@
}
}
vlog.VI(4).Infof("sync: processBlobRefs: Found blobref %v peer %v, source %v, sgs %v", br, iSt.config.peer, srcPeer, sgIds)
- info := &blobLocInfo{peer: iSt.config.peer, source: srcPeer, sgIds: sgIds}
+ info := &blobLocInfo{peer: iSt.config.peer.relName, source: srcPeer, sgIds: sgIds}
if err := iSt.config.sync.addBlobLocInfo(ctx, br, info); err != nil {
return err
}
diff --git a/services/syncbase/vsync/initiator_test.go b/services/syncbase/vsync/initiator_test.go
index 07552f0..d77bbf5 100644
--- a/services/syncbase/vsync/initiator_test.go
+++ b/services/syncbase/vsync/initiator_test.go
@@ -424,7 +424,7 @@
return svc, nil, cleanup
}
- c, err := newInitiationConfig(nil, s, "b", gdb, info, set.String.ToSlice(info.mtTables))
+ c, err := newInitiationConfig(nil, s, connInfo{relName: "b"}, gdb, info, set.String.ToSlice(info.mtTables))
if err != nil {
t.Fatalf("newInitiationConfig failed with err %v", err)
}
diff --git a/services/syncbase/vsync/sync.go b/services/syncbase/vsync/sync.go
index f4c5187..e203fdd 100644
--- a/services/syncbase/vsync/sync.go
+++ b/services/syncbase/vsync/sync.go
@@ -19,7 +19,9 @@
"sync"
"time"
+ "v.io/v23"
"v.io/v23/context"
+ "v.io/v23/discovery"
"v.io/v23/naming"
"v.io/v23/rpc"
"v.io/v23/services/syncbase/nosql"
@@ -35,10 +37,23 @@
// syncService contains the metadata for the sync module.
type syncService struct {
- // TODO(hpucha): see if "v.io/v23/uniqueid" is a better fit. It is 128 bits.
- id uint64 // globally unique id for this instance of Syncbase.
- name string // name derived from the global id.
- sv interfaces.Service
+ // TODO(hpucha): see if "v.io/v23/uniqueid" is a better fit. It is 128
+ // bits. Another alternative is to derive this from the blessing of
+ // Syncbase. Syncbase can append a uuid to the blessing it is given upon
+ // launch and use its hash as id. Note we cannot use public key since we
+ // want to support key rollover.
+ id uint64 // globally unique id for this instance of Syncbase.
+ name string // name derived from the global id.
+ sv interfaces.Service
+
+ // Root context to be used to create a context for advertising over
+ // neighborhood.
+ ctx *context.T
+
+ // Cancel function for a context derived from the root context when
+ // advertising over neighborhood. This is needed to stop advertising.
+ advCancel context.CancelFunc
+
nameLock sync.Mutex // lock needed to serialize adding and removing of Syncbase names.
// High-level lock to serialize the watcher and the initiator. This lock is
@@ -73,6 +88,11 @@
allMembers *memberView
allMembersLock sync.RWMutex
+ // In-memory map of sync peers found in the neighborhood through the
+ // discovery service. The map key is the discovery service UUID.
+ discoveryPeers map[string]*discovery.Service
+ discoveryPeersLock sync.RWMutex
+
// In-memory sync state per Database. This state is populated at
// startup, and periodically persisted by the initiator.
syncState map[string]*dbSyncStateInMem
@@ -97,6 +117,9 @@
// Syncbase clock related variables.
vclock *clock.VClock
+
+ // Peer selector for picking a peer to sync with.
+ ps peerSelector
}
// syncDatabase contains the metadata for syncing a database. This struct is
@@ -140,6 +163,7 @@
batches: make(batchSet),
sgPublishQueue: list.New(),
vclock: vclock,
+ ctx: ctx,
}
data := &syncData{}
@@ -177,7 +201,7 @@
// Channel to propagate close event to all threads.
s.closed = make(chan struct{})
- s.pending.Add(2)
+ s.pending.Add(3)
// Start watcher thread to watch for updates to local store.
go s.watchStore(ctx)
@@ -185,42 +209,161 @@
// Start initiator thread to periodically get deltas from peers.
go s.syncer(ctx)
+ // Start the discovery service thread to listen to neighborhood updates.
+ go s.discoverPeers(ctx)
+
return s, nil
}
+// Closed returns true if the sync service channel is closed indicating that the
+// service is shutting down.
+func (s *syncService) Closed() bool {
+ select {
+ case <-s.closed:
+ return true
+ default:
+ return false
+ }
+}
+
+// discoverPeers listens to updates from the discovery service to learn about
+// sync peers as they enter and leave the neighborhood.
+func (s *syncService) discoverPeers(ctx *context.T) {
+ defer s.pending.Done()
+
+ scanner := v23.GetDiscovery(ctx)
+ if scanner == nil {
+ vlog.Fatal("sync: discoverPeers: discovery service not initialized")
+ }
+
+ // TODO(rdaoud): refactor this interface name query string.
+ query := interfaces.SyncDesc.PkgPath + "/" + interfaces.SyncDesc.Name
+ ch, err := scanner.Scan(ctx, query)
+ if err != nil {
+ vlog.Errorf("sync: discoverPeers: cannot start discovery service: %v", err)
+ return
+ }
+
+ for !s.Closed() {
+ select {
+ case update, ok := <-ch:
+ if s.Closed() {
+ break
+ }
+ if !ok {
+ vlog.VI(1).Info("sync: discoverPeers: scan cancelled, stop listening and exit")
+ return
+ }
+ switch u := update.(type) {
+ case discovery.UpdateFound:
+ svc := &u.Value.Service
+ s.updateDiscoveryPeer(string(svc.InstanceUuid), svc)
+ case discovery.UpdateLost:
+ s.updateDiscoveryPeer(string(u.Value.InstanceUuid), nil)
+ default:
+ vlog.Errorf("sync: discoverPeers: ignoring invalid update: %v", update)
+ }
+
+ case <-s.closed:
+ break
+ }
+ }
+
+ vlog.VI(1).Info("sync: discoverPeers: channel closed, stop listening and exit")
+}
+
+// updateDiscoveryPeer adds or removes information about a sync peer found in
+// the neighborhood through the discovery service. If the service entry is nil
+// the peer is removed from the discovery map.
+func (s *syncService) updateDiscoveryPeer(peerInstance string, service *discovery.Service) {
+ s.discoveryPeersLock.Lock()
+ defer s.discoveryPeersLock.Unlock()
+
+ if s.discoveryPeers == nil {
+ s.discoveryPeers = make(map[string]*discovery.Service)
+ }
+
+ if service != nil {
+ vlog.VI(3).Infof("sync: updateDiscoveryPeer: adding peer %s: %v", peerInstance, service)
+ s.discoveryPeers[peerInstance] = service
+ } else {
+ vlog.VI(3).Infof("sync: updateDiscoveryPeer: removing peer %s", peerInstance)
+ delete(s.discoveryPeers, peerInstance)
+ }
+}
+
// AddNames publishes all the names for this Syncbase instance gathered from all
// the syncgroups it is currently participating in. This is needed when
// syncbased is restarted so that it can republish itself at the names being
// used in the syncgroups.
func AddNames(ctx *context.T, ss interfaces.SyncServerMethods, svr rpc.Server) error {
- vlog.VI(2).Infof("sync: AddNames:: begin")
- defer vlog.VI(2).Infof("sync: AddNames:: end")
+ vlog.VI(2).Infof("sync: AddNames: begin")
+ defer vlog.VI(2).Infof("sync: AddNames: end")
s := ss.(*syncService)
s.nameLock.Lock()
defer s.nameLock.Unlock()
mInfo := s.copyMemberInfo(ctx, s.name)
- if mInfo == nil {
- vlog.VI(2).Infof("sync: GetNames:: end returning no names")
+ if mInfo == nil || len(mInfo.mtTables) == 0 {
+ vlog.VI(2).Infof("sync: AddNames: end returning no names")
return nil
}
for mt := range mInfo.mtTables {
name := naming.Join(mt, s.name)
if err := svr.AddName(name); err != nil {
+ vlog.VI(2).Infof("sync: AddNames: end returning err %v", err)
return err
}
}
- return nil
+ return s.publishInNeighborhood(svr)
+}
+
+// publishInNeighborhood checks if the Syncbase service is already being
+// advertised over the neighborhood. If not, it begins advertising. The caller
+// of the function is holding nameLock.
+func (s *syncService) publishInNeighborhood(svr rpc.Server) error {
+ // Syncbase is already being advertised.
+ if s.advCancel != nil {
+ return nil
+ }
+
+ ctx, stop := context.WithCancel(s.ctx)
+
+ advertiser := v23.GetDiscovery(ctx)
+ if advertiser == nil {
+ vlog.Fatal("sync: publishInNeighborhood: discovery not initialized.")
+ }
+
+ // TODO(hpucha): For now we grab the current address of the server. This
+ // will be replaced by library support that will take care of roaming.
+ var eps []string
+ for _, ep := range svr.Status().Endpoints {
+ eps = append(eps, ep.Name())
+ }
+
+ sbService := discovery.Service{
+ InstanceUuid: []byte(s.name),
+ InstanceName: s.name,
+ InterfaceName: interfaces.SyncDesc.PkgPath + "/" + interfaces.SyncDesc.Name,
+ Addrs: eps,
+ }
+
+ // Duplicate calls to advertise will return an error.
+ _, err := advertiser.Advertise(ctx, sbService, nil)
+ if err == nil {
+ s.advCancel = stop
+ }
+ return err
}
// Close waits for spawned sync threads (watcher and initiator) to shut down,
// and closes the local blob store handle.
func Close(ss interfaces.SyncServerMethods) {
- vlog.VI(2).Infof("sync: Close:: begin")
- defer vlog.VI(2).Infof("sync: Close:: end")
+ vlog.VI(2).Infof("sync: Close: begin")
+ defer vlog.VI(2).Infof("sync: Close: end")
s := ss.(*syncService)
close(s.closed)
diff --git a/services/syncbase/vsync/sync_state.go b/services/syncbase/vsync/sync_state.go
index 72421cb..53eb098 100644
--- a/services/syncbase/vsync/sync_state.go
+++ b/services/syncbase/vsync/sync_state.go
@@ -277,11 +277,14 @@
if gen == 0 {
continue
}
- lrec, err := getLogRec(ctx, st, pfx, id, gen)
+ // Since log records may be filtered, we search for the last
+ // available log record going backwards from the generation up
+ // to which a device is caught up.
+ lrec, err := getPrevLogRec(ctx, st, pfx, id, gen)
if err != nil {
return 0, 0, err
}
- if lrec.Pos > maxpos {
+ if lrec != nil && lrec.Pos > maxpos {
found = true
maxpos = lrec.Pos
}
@@ -294,6 +297,21 @@
return maxgen + 1, maxpos, nil
}
+// TODO(hpucha): This can be optimized using a backwards scan or a better
+// search.
+func getPrevLogRec(ctx *context.T, st store.Store, pfx string, dev, gen uint64) (*localLogRec, error) {
+ for i := gen; i > 0; i-- {
+ rec, err := getLogRec(ctx, st, pfx, dev, i)
+ if err == nil {
+ return rec, nil
+ }
+ if verror.ErrorID(err) != verror.ErrNoExist.ID {
+ return nil, err
+ }
+ }
+ return nil, nil
+}
+
// enqueuePublishSyncgroup appends the given syncgroup to the publish queue.
func (s *syncService) enqueuePublishSyncgroup(sgName, appName, dbName string, attempted bool) {
s.sgPublishQueueLock.Lock()
diff --git a/services/syncbase/vsync/syncer.go b/services/syncbase/vsync/syncer.go
index 3ca54fa..6f1c507 100644
--- a/services/syncbase/vsync/syncer.go
+++ b/services/syncbase/vsync/syncer.go
@@ -5,11 +5,13 @@
package vsync
import (
+ "sync"
"time"
"v.io/v23/context"
"v.io/v23/verror"
"v.io/x/lib/vlog"
+ "v.io/x/ref/services/syncbase/server/interfaces"
)
// Policies to pick a peer to sync with.
@@ -17,7 +19,11 @@
// Picks a peer at random from the available set.
selectRandom = iota
- // TODO(hpucha): implement other policies.
+ // Picks a peer based on network availability and available Syncbases
+ // over the neighborhood via discovery.
+ selectNeighborhoodAware
+
+ // TODO(hpucha): implement these policies.
// Picks a peer with most differing generations.
selectMostDiff
@@ -40,6 +46,34 @@
connectionTimeOut = 2 * time.Second
)
+// connInfo holds the information needed to connect to a peer.
+//
+// TODO(hpucha): Add hints to decide if both neighborhood and mount table must
+// be tried. Currently, if the addr is set, only the addr is tried.
+type connInfo struct {
+ // Name of the peer relative to the mount table chosen by the syncgroup
+ // creator.
+ relName string
+ // Network address of the peer if available. For example, this can be
+ // obtained from neighborhood discovery.
+ addr string
+}
+
+// peerSelector defines the interface that a peer selection algorithm must
+// provide.
+type peerSelector interface {
+ // pickPeer picks a Syncbase to sync with.
+ pickPeer(ctx *context.T) (connInfo, error)
+
+ // updatePeerFromSyncer updates information for a peer that the syncer
+ // attempts to connect to.
+ updatePeerFromSyncer(ctx *context.T, peer connInfo, attemptTs time.Time, failed bool) error
+
+ // updatePeerFromResponder updates information for a peer that the
+ // responder responds to.
+ updatePeerFromResponder(ctx *context.T, peer string, connTs time.Time, gv interfaces.GenVector) error
+}
+
// syncer wakes up every peerSyncInterval to do work: (1) Refresh memberView if
// needed and pick a peer from all the known remote peers to sync with. (2) Act
// as an initiator and sync syncgroup metadata for all common syncgroups with
@@ -54,67 +88,164 @@
func (s *syncService) syncer(ctx *context.T) {
defer s.pending.Done()
+ s.newPeerSelector(ctx)
ticker := time.NewTicker(peerSyncInterval)
defer ticker.Stop()
- for {
- // Give priority to close event if both ticker and closed are
- // simultaneously triggered.
+ for !s.Closed() {
select {
- case <-s.closed:
- vlog.VI(1).Info("sync: syncer: channel closed, stop work and exit")
- return
-
case <-ticker.C:
- }
- select {
+ if s.Closed() {
+ break
+ }
+ s.syncerWork(ctx)
+
case <-s.closed:
- vlog.VI(1).Info("sync: syncer: channel closed, stop work and exit")
- return
-
- default:
+ break
}
-
- // TODO(hpucha): Cut a gen for the responder even if there is no
- // one to initiate to?
-
- // Do work.
- peer, err := s.pickPeer(ctx)
- if err != nil {
- continue
- }
-
- s.syncClock(ctx, peer)
-
- // Sync syncgroup metadata and data.
- s.getDeltas(ctx, peer)
}
+ vlog.VI(1).Info("sync: syncer: channel closed, stop work and exit")
+}
+
+func (s *syncService) syncerWork(ctx *context.T) {
+ // TODO(hpucha): Cut a gen for the responder even if there is no
+ // one to initiate to?
+
+ // Do work.
+ attemptTs := time.Now()
+ peer, err := s.ps.pickPeer(ctx)
+ if err != nil {
+ return
+ }
+
+ err = s.syncClock(ctx, peer)
+ // Abort syncing if there is a connection error with peer.
+ if verror.ErrorID(err) != interfaces.ErrConnFail.ID {
+ err = s.getDeltas(ctx, peer)
+ }
+
+ s.ps.updatePeerFromSyncer(ctx, peer, attemptTs, verror.ErrorID(err) == interfaces.ErrConnFail.ID)
}
////////////////////////////////////////
// Peer selection policies.
-// pickPeer picks a Syncbase to sync with.
-func (s *syncService) pickPeer(ctx *context.T) (string, error) {
+func (s *syncService) newPeerSelector(ctx *context.T) error {
switch peerSelectionPolicy {
case selectRandom:
- members := s.getMembers(ctx)
- // Remove myself from the set.
- delete(members, s.name)
- if len(members) == 0 {
- return "", verror.New(verror.ErrInternal, ctx, "no useful peer")
- }
-
- // Pick a peer at random.
- ind := randIntn(len(members))
- for m := range members {
- if ind == 0 {
- return m, nil
- }
- ind--
- }
- return "", verror.New(verror.ErrInternal, ctx, "random selection didn't succeed")
+ s.ps = &randomPeerSelector{s: s}
+ return nil
+ case selectNeighborhoodAware:
+ s.ps = &neighborhoodAwarePeerSelector{s: s}
+ return nil
default:
- return "", verror.New(verror.ErrInternal, ctx, "unknown peer selection policy")
+ return verror.New(verror.ErrInternal, ctx, "unknown peer selection policy")
}
}
+
+////////////////////////////////////////
+// Random selector.
+
+type randomPeerSelector struct {
+ s *syncService
+}
+
+func (ps *randomPeerSelector) pickPeer(ctx *context.T) (connInfo, error) {
+ var peer connInfo
+
+ members := ps.s.getMembers(ctx)
+ // Remove myself from the set.
+ delete(members, ps.s.name)
+ if len(members) == 0 {
+ return peer, verror.New(verror.ErrInternal, ctx, "no useful peer")
+ }
+
+ // Pick a peer at random.
+ ind := randIntn(len(members))
+ for m := range members {
+ if ind == 0 {
+ peer.relName = m
+ return peer, nil
+ }
+ ind--
+ }
+ return peer, verror.New(verror.ErrInternal, ctx, "random selection didn't succeed")
+}
+
+func (ps *randomPeerSelector) updatePeerFromSyncer(ctx *context.T, peer connInfo, attemptTs time.Time, failed bool) error {
+ // Random selector does not care about this information.
+ return nil
+}
+
+func (ps *randomPeerSelector) updatePeerFromResponder(ctx *context.T, peer string, connTs time.Time, gv interfaces.GenVector) error {
+ // Random selector does not care about this information.
+ return nil
+}
+
+////////////////////////////////////////
+// NeighborhoodAware selector.
+
+// peerSyncInfo is the running statistics collected per peer, for a peer which
+// syncs with this node or with which this node syncs with.
+type peerSyncInfo struct {
+ // Number of continuous failures noticed when attempting to connect with
+ // this peer, either via its advertised mount table or via
+ // neighborhood. These counters are reset when the connection to the
+ // peer succeeds.
+ numFailuresMountTable uint64
+ numFailuresNeighborhood uint64
+
+ // The most recent timestamp when a connection to this peer was attempted.
+ attemptTs time.Time
+ // The most recent timestamp when a connection to this peer succeeded.
+ successTs time.Time
+ // The most recent timestamp when this peer synced with this node.
+ fromTs time.Time
+ // Map of database names and their corresponding generation vectors for
+ // data and syncgroups.
+ gvs map[string]interfaces.GenVector
+}
+
+type neighborhoodAwarePeerSelector struct {
+ s *syncService
+ // In-memory cache of information relevant to syncing with a peer. This
+ // information could potentially be used in peer selection.
+ peerTbl map[string]*peerSyncInfo
+ peerTblLock sync.RWMutex
+}
+
+func (ps *neighborhoodAwarePeerSelector) pickPeer(ctx *context.T) (connInfo, error) {
+ var peer connInfo
+ return peer, nil
+}
+
+func (ps *neighborhoodAwarePeerSelector) updatePeerFromSyncer(ctx *context.T, peer connInfo, attemptTs time.Time, failed bool) error {
+ ps.peerTblLock.Lock()
+ defer ps.peerTblLock.Unlock()
+
+ info, ok := ps.peerTbl[peer.relName]
+ if !ok {
+ info = &peerSyncInfo{}
+ ps.peerTbl[peer.relName] = info
+ }
+
+ info.attemptTs = attemptTs
+ if !failed {
+ info.numFailuresMountTable = 0
+ info.numFailuresNeighborhood = 0
+ info.successTs = time.Now()
+ return nil
+ }
+
+ if peer.addr != "" {
+ info.numFailuresNeighborhood++
+ } else {
+ info.numFailuresMountTable++
+ }
+
+ return nil
+}
+
+func (ps *neighborhoodAwarePeerSelector) updatePeerFromResponder(ctx *context.T, peer string, connTs time.Time, gv interfaces.GenVector) error {
+ return nil
+}
diff --git a/services/syncbase/vsync/syncgroup.go b/services/syncbase/vsync/syncgroup.go
index 5433869..3d1f5fc 100644
--- a/services/syncbase/vsync/syncgroup.go
+++ b/services/syncbase/vsync/syncgroup.go
@@ -1144,9 +1144,7 @@
}
}
- // TODO(hpucha): Do we have to publish in neighborhood explicitly?
-
- return nil
+ return ss.publishInNeighborhood(call.Server())
}
func (sd *syncDatabase) joinSyncgroupAtAdmin(ctx *context.T, call rpc.ServerCall, sgName, name string, myInfo wire.SyncgroupMemberInfo) (interfaces.Syncgroup, string, interfaces.PrefixGenVector, error) {
diff --git a/services/syncbase/vsync/util.go b/services/syncbase/vsync/util.go
index 12877dc..4e0c523 100644
--- a/services/syncbase/vsync/util.go
+++ b/services/syncbase/vsync/util.go
@@ -98,7 +98,6 @@
return util.JoinKeyParts(p.TableName, p.RowPrefix)
}
-
// TODO(jlodhia): extractAppKey() method is temporary for conflict resolution.
// Will be removed once SyncgroupPrefix is refactored into a generic
// TableRow struct.
diff --git a/services/syncbase/vsync/watcher.go b/services/syncbase/vsync/watcher.go
index 08a4313..0d5e5d6 100644
--- a/services/syncbase/vsync/watcher.go
+++ b/services/syncbase/vsync/watcher.go
@@ -64,24 +64,20 @@
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- for {
+ for !s.Closed() {
select {
- case <-s.closed:
- vlog.VI(1).Info("sync: watchStore: channel closed, stop watching and exit")
- return
-
case <-ticker.C:
+ if s.Closed() {
+ break
+ }
+ s.processStoreUpdates(ctx)
- }
- select {
case <-s.closed:
- vlog.VI(1).Info("sync: watchStore: channel closed, stop watching and exit")
- return
-
- default:
+ break
}
- s.processStoreUpdates(ctx)
}
+
+ vlog.VI(1).Info("sync: watchStore: channel closed, stop watching and exit")
}
// processStoreUpdates fetches updates from all databases and processes them.
diff --git a/services/wspr/internal/app/app.go b/services/wspr/internal/app/app.go
index 28e2c9d..010a9a3 100644
--- a/services/wspr/internal/app/app.go
+++ b/services/wspr/internal/app/app.go
@@ -344,6 +344,8 @@
c.typeReader.Close()
c.cancel()
+
+ c.typeDecoder.Stop()
}
func (c *Controller) setup() {
@@ -353,6 +355,7 @@
c.servers = make(map[uint32]*server.Server)
c.typeReader = lib.NewTypeReader()
c.typeDecoder = vom.NewTypeDecoder(c.typeReader)
+ c.typeDecoder.Start()
c.typeEncoder = vom.NewTypeEncoder(lib.NewTypeWriter(c.writerCreator(typeFlow)))
c.lastGeneratedId += 2
}
diff --git a/services/wspr/internal/app/app_test.go b/services/wspr/internal/app/app_test.go
index 32bcfc3..a5d0fc1 100644
--- a/services/wspr/internal/app/app_test.go
+++ b/services/wspr/internal/app/app_test.go
@@ -431,6 +431,8 @@
ctx: ctx,
}
mock.typeDecoder = vom.NewTypeDecoder(mock.typeReader)
+ mock.typeDecoder.Start()
+ defer mock.typeDecoder.Stop()
rt, err := serveServer(ctx, mock, func(controller *Controller) {
mock.controller = controller
})
diff --git a/services/wspr/internal/browspr/browspr_test.go b/services/wspr/internal/browspr/browspr_test.go
index 32389ac..f5a00f0 100644
--- a/services/wspr/internal/browspr/browspr_test.go
+++ b/services/wspr/internal/browspr/browspr_test.go
@@ -251,8 +251,10 @@
if err != nil {
t.Errorf("Failed to hex decode from %v: %v", data, err)
}
-
- decoder := vom.NewDecoderWithTypeDecoder(bytes.NewBuffer(dataBytes), vom.NewTypeDecoder(typeReader))
+ td := vom.NewTypeDecoder(typeReader)
+ td.Start()
+ defer td.Stop()
+ decoder := vom.NewDecoderWithTypeDecoder(bytes.NewBuffer(dataBytes), td)
if err := decoder.Decode(&result); err != nil {
t.Errorf("Failed to vom decode args from %v: %v", data, err)
}
diff --git a/services/wspr/internal/principal/cache_test.go b/services/wspr/internal/principal/cache_test.go
index d2141d1..73be928 100644
--- a/services/wspr/internal/principal/cache_test.go
+++ b/services/wspr/internal/principal/cache_test.go
@@ -89,7 +89,7 @@
bc := NewBlessingsCache(notifier, onDemandGCPolicy)
// Blessings for the tests.
- p, err := security.CreatePrincipal(newSigner(), nil, nil, nil, nil)
+ p, err := security.CreatePrincipal(newSigner(), nil, nil)
if err != nil {
t.Fatal("Failed to create principal: ", err)
}
diff --git a/services/xproxyd/proxyd.go b/services/xproxyd/proxyd.go
index 06ad6b4..d14b2f2 100644
--- a/services/xproxyd/proxyd.go
+++ b/services/xproxyd/proxyd.go
@@ -142,7 +142,7 @@
}
func (p *proxy) replyToServer(ctx *context.T, f flow.Flow) error {
- rid := f.Conn().RemoteEndpoint().RoutingID()
+ rid := f.RemoteEndpoint().RoutingID()
eps, err := p.returnEndpoints(ctx, rid, "")
if err != nil {
return err
@@ -155,7 +155,7 @@
// returned endpoint doesn't matter because it will eventually be replaced
// by a server's rid by some later proxy.
// TODO(suharshs): Use a local route instead of this global routingID.
- rid := f.Conn().RemoteEndpoint().RoutingID()
+ rid := f.RemoteEndpoint().RoutingID()
eps, err := p.returnEndpoints(ctx, naming.NullRoutingID, rid.String())
if err != nil {
return err
diff --git a/test/goroutines/goroutines.go b/test/goroutines/goroutines.go
index c83011e..555d666 100644
--- a/test/goroutines/goroutines.go
+++ b/test/goroutines/goroutines.go
@@ -21,6 +21,7 @@
var ignoredGoroutines = []string{
"runtime.ensureSigM",
+ "sync.(*WaitGroup).Done",
}
type Goroutine struct {
@@ -44,11 +45,11 @@
bufsize *= 2
buf = make([]byte, bufsize)
}
- return Parse(buf)
+ return Parse(buf, true)
}
// Parse parses a stack trace into a structure representation.
-func Parse(buf []byte) ([]*Goroutine, error) {
+func Parse(buf []byte, ignore bool) ([]*Goroutine, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
var out []*Goroutine
for scanner.Scan() {
@@ -59,7 +60,7 @@
if err != nil {
return out, fmt.Errorf("Error %v parsing trace:\n%s", err, string(buf))
}
- if !shouldIgnore(g) {
+ if !ignore || !shouldIgnore(g) {
out = append(out, g)
}
}
diff --git a/test/goroutines/goroutines_test.go b/test/goroutines/goroutines_test.go
index c8c236c..3b3f725 100644
--- a/test/goroutines/goroutines_test.go
+++ b/test/goroutines/goroutines_test.go
@@ -107,7 +107,7 @@
buf = buf[:runtime.Stack(buf, true)]
close(wait)
- gs, err := Parse(buf)
+ gs, err := Parse(buf, false)
if err != nil {
t.Fatal(err)
}
diff --git a/test/hello/helloclient/doc.go b/test/hello/helloclient/doc.go
index 6f7c049..35062a9 100644
--- a/test/hello/helloclient/doc.go
+++ b/test/hello/helloclient/doc.go
@@ -30,6 +30,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=
diff --git a/test/hello/helloserver/doc.go b/test/hello/helloserver/doc.go
index 4174c3c..91cba82 100644
--- a/test/hello/helloserver/doc.go
+++ b/test/hello/helloserver/doc.go
@@ -30,6 +30,8 @@
Displays metadata for the program and exits.
-stderrthreshold=2
logs at or above this threshold go to stderr
+ -time=false
+ Dump timing information to stderr before exiting the program.
-v=0
log level for V logs
-v23.credentials=