syncbase: switch internal key sep to \xfe, allow ":" in keys
As part of this change, we switch to allowing \x00 and reserving \xfc
through \xff.
MultiPart: 2/2
Change-Id: I60257b21ce1feb860d992f5d49ff58d78f70f2c3
diff --git a/services/syncbase/server/db_info_test.go b/services/syncbase/server/db_info_test.go
index 7bc0870..386cd4c 100644
--- a/services/syncbase/server/db_info_test.go
+++ b/services/syncbase/server/db_info_test.go
@@ -14,7 +14,7 @@
dbName string
stKey string
}{
- {"app1", "db1", "$dbInfo:app1:db1"},
+ {"app1", "db1", "$dbInfo\xfeapp1\xfedb1"},
}
for _, test := range tests {
got, want := dbInfoStKey(&app{name: test.appName}, test.dbName), test.stKey
diff --git a/services/syncbase/server/nosql/database.go b/services/syncbase/server/nosql/database.go
index 6412297..32740d7 100644
--- a/services/syncbase/server/nosql/database.go
+++ b/services/syncbase/server/nosql/database.go
@@ -351,13 +351,13 @@
return nil, err
}
it := sntx.Scan(util.ScanPrefixArgs(util.TablePrefix, ""))
- key := []byte{}
+ keyBytes := []byte{}
res := []string{}
for it.Advance() {
- key = it.Key(key)
- parts := util.SplitKeyParts(string(key))
+ keyBytes = it.Key(keyBytes)
+ parts := util.SplitNKeyParts(string(keyBytes), 2)
// For explanation of Escape(), see comment in server/nosql/dispatcher.go.
- res = append(res, pubutil.Escape(parts[len(parts)-1]))
+ res = append(res, pubutil.Escape(parts[1]))
}
if err := it.Err(); err != nil {
return nil, err
@@ -508,9 +508,9 @@
if s.it[s.curr].Advance() {
// key
keyBytes := s.it[s.curr].Key(nil)
- parts := util.SplitKeyParts(string(keyBytes))
+ parts := util.SplitNKeyParts(string(keyBytes), 3)
// TODO(rogulenko): Check access for the key.
- s.currKey = parts[len(parts)-1]
+ s.currKey = parts[2]
// value
valueBytes := s.it[s.curr].Value(nil)
var currValue *vdl.Value
diff --git a/services/syncbase/server/nosql/database_watch.go b/services/syncbase/server/nosql/database_watch.go
index e9e1921..b0f61a5 100644
--- a/services/syncbase/server/nosql/database_watch.go
+++ b/services/syncbase/server/nosql/database_watch.go
@@ -170,13 +170,12 @@
default:
continue
}
- parts := util.SplitKeyParts(opKey)
// TODO(rogulenko): Currently we only process rows, i.e. keys of the form
- // <rowPrefix>:xxx:yyy. Consider processing other keys.
- if len(parts) != 3 || parts[0] != util.RowPrefix {
+ // <RowPrefix>:xxx:yyy. Consider processing other keys.
+ if !util.IsRowKey(opKey) {
continue
}
- table, row := parts[1], parts[2]
+ table, row := util.ParseTableAndRowOrDie(opKey)
// Filter out unnecessary rows and rows that we can't access.
if table != t.name || !strings.HasPrefix(row, prefix) {
continue
diff --git a/services/syncbase/server/nosql/table.go b/services/syncbase/server/nosql/table.go
index a43dfe3..5850e62 100644
--- a/services/syncbase/server/nosql/table.go
+++ b/services/syncbase/server/nosql/table.go
@@ -166,8 +166,9 @@
for it.Advance() {
key = it.Key(key)
// Check perms.
- parts := util.SplitKeyParts(string(key))
- externalKey := parts[len(parts)-1]
+ // See comment in util/constants.go for why we use SplitNKeyParts.
+ parts := util.SplitNKeyParts(string(key), 3)
+ externalKey := parts[2]
permsPrefix, err := t.checkAccess(ctx, call, tx, externalKey)
if err != nil {
// TODO(rogulenko): Revisit this behavior. Probably we should
@@ -211,8 +212,9 @@
for it.Advance() {
key, value = it.Key(key), it.Value(value)
// Check perms.
- parts := util.SplitKeyParts(string(key))
- externalKey := parts[len(parts)-1]
+ // See comment in util/constants.go for why we use SplitNKeyParts.
+ parts := util.SplitNKeyParts(string(key), 3)
+ externalKey := parts[2]
if _, err := t.checkAccess(ctx, call, sntx, externalKey); err != nil {
it.Cancel()
return err
@@ -534,8 +536,10 @@
return "", "", nil
}
defer it.Cancel()
- parts := util.SplitKeyParts(string(it.Key(nil)))
- prefix = strings.TrimSuffix(parts[len(parts)-1], util.PrefixRangeLimitSuffix)
+ // See comment in util/constants.go for why we use SplitNKeyParts.
+ parts := util.SplitNKeyParts(string(it.Key(nil)), 3)
+ externalKey := parts[2]
+ prefix = strings.TrimSuffix(externalKey, util.PrefixRangeLimitSuffix)
value := it.Value(nil)
if err = vom.Decode(value, &parent); err != nil {
return "", "", verror.New(verror.ErrInternal, ctx, err)
diff --git a/services/syncbase/server/util/constants.go b/services/syncbase/server/util/constants.go
index fa89fa8..272233e 100644
--- a/services/syncbase/server/util/constants.go
+++ b/services/syncbase/server/util/constants.go
@@ -9,6 +9,9 @@
)
// Constants related to storage engine keys.
+// Note, these are persisted and therefore must not be modified.
+// TODO(sadovsky): Use one-byte strings. Changing these prefixes breaks various
+// tests. Tests generally shouldn't depend on the values of these constants.
const (
AppPrefix = "$app"
ClockPrefix = "$clock"
@@ -23,28 +26,9 @@
TablePrefix = "$table"
VersionPrefix = "$version"
- // Note, these are persisted and therefore must not be modified.
- // Below, they are ordered lexicographically by value.
- // TODO(sadovsky): Changing these prefixes breaks various tests. Tests
- // generally shouldn't depend on the values of these constants.
- /*
- AppPrefix = "a"
- ClockPrefix = "c"
- DatabasePrefix = "d"
- DbInfoPrefix = "i"
- LogPrefix = "l"
- PermsPrefix = "p"
- RowPrefix = "r"
- ServicePrefix = "s"
- TablePrefix = "t"
- VersionPrefix = "v"
- SyncPrefix = "y"
- */
-
- // Separator for parts of storage engine keys.
- // TODO(sadovsky): Switch to \xff or \x00, both of which are disallowed in
- // client-specified names and keys.
- KeyPartSep = ":"
+ // KeyPartSep is a separator for parts of storage engine keys, e.g. separating
+ // table name from row key.
+ KeyPartSep = "\xfe"
// PrefixRangeLimitSuffix is a key suffix that indicates the end of a prefix
// range. Must be greater than any character allowed in client-specified keys.
diff --git a/services/syncbase/server/util/key_util.go b/services/syncbase/server/util/key_util.go
index e1b9272..c5736ec 100644
--- a/services/syncbase/server/util/key_util.go
+++ b/services/syncbase/server/util/key_util.go
@@ -5,6 +5,7 @@
package util
import (
+ "fmt"
"strconv"
"strings"
@@ -18,22 +19,65 @@
return strings.Join(parts, KeyPartSep)
}
-// SplitKeyParts is the inverse of JoinKeyParts.
+// SplitKeyParts is the inverse of JoinKeyParts. Clients are generally
+// encouraged to use SplitNKeyParts.
func SplitKeyParts(key string) []string {
return strings.Split(key, KeyPartSep)
}
-// StripFirstPartOrDie strips off the first part of the given key. Typically
+// SplitNKeyParts is to SplitKeyParts as strings.SplitN is to strings.Split.
+func SplitNKeyParts(key string, n int) []string {
+ return strings.SplitN(key, KeyPartSep, n)
+}
+
+// StripFirstKeyPartOrDie strips off the first part of the given key. Typically
// used to strip off the key prefixes defined in constants.go. Panics if the
// input string has fewer than two parts.
-func StripFirstPartOrDie(key string) string {
- parts := strings.SplitN(key, KeyPartSep, 2)
+func StripFirstKeyPartOrDie(key string) string {
+ parts := SplitNKeyParts(key, 2)
if len(parts) < 2 {
- vlog.Fatalf("StripFirstPartOrDie: invalid key: %q", key)
+ vlog.Fatalf("StripFirstKeyPartOrDie: invalid key %q", key)
}
return parts[1]
}
+// FirstKeyPart returns the first part of 'key', typically a key prefix defined
+// in constants.go.
+func FirstKeyPart(key string) string {
+ return SplitNKeyParts(key, 2)[0]
+}
+
+// IsRowKey returns true iff 'key' is a storage engine key for a row.
+func IsRowKey(key string) bool {
+ return FirstKeyPart(key) == RowPrefix
+}
+
+// IsPermsKey returns true iff 'key' is a storage engine key for perms.
+func IsPermsKey(key string) bool {
+ return FirstKeyPart(key) == PermsPrefix
+}
+
+// ParseTableAndRow extracts table and row parts from the given storage engine
+// key for a row or perms. Returns an error if the given key is not a storage
+// engine key for a row or perms.
+func ParseTableAndRow(key string) (table string, row string, err error) {
+ parts := SplitNKeyParts(key, 3)
+ pfx := parts[0]
+ if len(parts) < 3 || (pfx != RowPrefix && pfx != PermsPrefix) {
+ return "", "", fmt.Errorf("ParseTableAndRow: invalid key %q", key)
+ }
+ return parts[1], parts[2], nil
+}
+
+// ParseTableAndRowOrDie calls ParseTableAndRow and panics on error.
+func ParseTableAndRowOrDie(key string) (table string, row string) {
+ table, row, err := ParseTableAndRow(key)
+ if err != nil {
+ vlog.Fatal(err)
+ }
+ return table, row
+}
+
// ScanPrefixArgs returns args for sn.Scan() for the specified prefix.
func ScanPrefixArgs(stKeyPrefix, prefix string) ([]byte, []byte) {
return ScanRangeArgs(stKeyPrefix, util.PrefixRangeStart(prefix), util.PrefixRangeLimit(prefix))
diff --git a/services/syncbase/server/util/key_util_test.go b/services/syncbase/server/util/key_util_test.go
index 69db9fa..c8b6a43 100644
--- a/services/syncbase/server/util/key_util_test.go
+++ b/services/syncbase/server/util/key_util_test.go
@@ -11,15 +11,13 @@
"v.io/x/ref/services/syncbase/server/util"
)
-type kpt struct {
+var keyPartTests = []struct {
parts []string
key string
-}
-
-var keyPartTests []kpt = []kpt{
- {[]string{"a", "b"}, "a:b"},
- {[]string{"aa", "bb"}, "aa:bb"},
- {[]string{"a", "b", "c"}, "a:b:c"},
+}{
+ {[]string{"a", "b"}, "a\xfeb"},
+ {[]string{"aa", "bb"}, "aa\xfebb"},
+ {[]string{"a", "b", "c"}, "a\xfeb\xfec"},
}
func TestJoinKeyParts(t *testing.T) {
@@ -40,13 +38,157 @@
}
}
+func TestSplitNKeyParts(t *testing.T) {
+ for _, test := range keyPartTests {
+ got, want := util.SplitNKeyParts(test.key, 1), []string{test.key}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: got %v, want %v", test.key, got, want)
+ }
+ }
+ for _, test := range keyPartTests {
+ // Note, all test cases in keyPartTests have <= 3 parts.
+ got, want := util.SplitNKeyParts(test.key, 3), test.parts
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: got %v, want %v", test.key, got, want)
+ }
+ }
+}
+
+func TestStripFirstKeyPartOrDie(t *testing.T) {
+ tests := []struct {
+ in string
+ out string
+ }{
+ {"a\xfe", ""},
+ {"a\xfeb", "b"},
+ {"a\xfe\xfe", "\xfe"},
+ {"a\xfeb\xfe", "b\xfe"},
+ {"a\xfeb\xfec", "b\xfec"},
+ }
+ for _, test := range tests {
+ got, want := util.StripFirstKeyPartOrDie(test.in), test.out
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: got %v, want %v", test.in, got, want)
+ }
+ }
+}
+
+func TestFirstKeyPart(t *testing.T) {
+ tests := []struct {
+ in string
+ out string
+ }{
+ {"", ""},
+ {"a", "a"},
+ {"a\xfe", "a"},
+ {"a\xfeb", "a"},
+ {"\xfe", ""},
+ {"\xfeb", ""},
+ }
+ for _, test := range tests {
+ got, want := util.FirstKeyPart(test.in), test.out
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: got %v, want %v", test.in, got, want)
+ }
+ }
+}
+
+func TestIsRowKey(t *testing.T) {
+ tests := []struct {
+ in string
+ out bool
+ }{
+ {"", false},
+ {"a", false},
+ {"a\xfe", false},
+ {"a\xfeb", false},
+ {util.RowPrefix, true},
+ {util.RowPrefix + "\xfe", true},
+ {util.RowPrefix + "\xfeb", true},
+ {util.PermsPrefix, false},
+ {util.PermsPrefix + "\xfe", false},
+ {util.PermsPrefix + "\xfeb", false},
+ }
+ for _, test := range tests {
+ got, want := util.IsRowKey(test.in), test.out
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: got %v, want %v", test.in, got, want)
+ }
+ }
+}
+
+func TestIsPermsKey(t *testing.T) {
+ tests := []struct {
+ in string
+ out bool
+ }{
+ {"", false},
+ {"a", false},
+ {"a\xfe", false},
+ {"a\xfeb", false},
+ {util.RowPrefix, false},
+ {util.RowPrefix + "\xfe", false},
+ {util.RowPrefix + "\xfeb", false},
+ {util.PermsPrefix, true},
+ {util.PermsPrefix + "\xfe", true},
+ {util.PermsPrefix + "\xfeb", true},
+ }
+ for _, test := range tests {
+ got, want := util.IsPermsKey(test.in), test.out
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: got %v, want %v", test.in, got, want)
+ }
+ }
+}
+
+func TestParseTableAndRow(t *testing.T) {
+ tests := []struct {
+ key string
+ table string
+ row string
+ err bool
+ }{
+ {util.RowPrefix + "\xfetb\xferow", "tb", "row", false},
+ {util.RowPrefix + "\xfetb\xfe", "tb", "", false},
+ {util.RowPrefix + "\xfe\xferow", "", "row", false},
+ {util.RowPrefix + "\xfe\xfe", "", "", false},
+ {util.PermsPrefix + "\xfetb\xferow", "tb", "row", false},
+ {util.PermsPrefix + "\xfetb\xfe", "tb", "", false},
+ {util.PermsPrefix + "\xfe\xferow", "", "row", false},
+ {util.PermsPrefix + "\xfe\xfe", "", "", false},
+ {"pfx\xfetb\xferow", "", "", true},
+ {"pfx\xfetb\xfe", "", "", true},
+ {"pfx\xfe\xferow", "", "", true},
+ {"pfx\xfe\xfe", "", "", true},
+ {"\xfetb\xferow", "", "", true},
+ {"\xfetb\xfe", "", "", true},
+ {"\xfe\xferow", "", "", true},
+ {"\xfe\xfe", "", "", true},
+ {util.RowPrefix, "", "", true},
+ {util.RowPrefix + "\xfetb", "", "", true},
+ {util.RowPrefix + "\xfe", "", "", true},
+ }
+ for _, test := range tests {
+ table, row, err := util.ParseTableAndRow(test.key)
+ if !reflect.DeepEqual(table, test.table) {
+ t.Errorf("%q: got %v, want %v", test.key, table, test.table)
+ }
+ if !reflect.DeepEqual(row, test.row) {
+ t.Errorf("%q: got %v, want %v", test.key, table, test.table)
+ }
+ if !reflect.DeepEqual(err != nil, test.err) {
+ t.Errorf("%q: got %v, want %v", test.key, err != nil, test.err)
+ }
+ }
+}
+
func TestScanPrefixArgs(t *testing.T) {
tests := []struct {
stKeyPrefix, prefix, wantStart, wantLimit string
}{
- {"x", "", "x:", "x;"},
- {"x", "a", "x:a", "x:b"},
- {"x", "a\xff", "x:a\xff", "x:b"},
+ {"x", "", "x\xfe", "x\xff"},
+ {"x", "a", "x\xfea", "x\xfeb"},
+ {"x", "a\xfe", "x\xfea\xfe", "x\xfea\xff"},
}
for _, test := range tests {
start, limit := util.ScanPrefixArgs(test.stKeyPrefix, test.prefix)
@@ -64,11 +206,11 @@
tests := []struct {
stKeyPrefix, start, limit, wantStart, wantLimit string
}{
- {"x", "", "", "x:", "x;"}, // limit "" means "no limit"
- {"x", "a", "", "x:a", "x;"}, // limit "" means "no limit"
- {"x", "a", "b", "x:a", "x:b"},
- {"x", "a", "a", "x:a", "x:a"}, // empty range
- {"x", "b", "a", "x:b", "x:a"}, // empty range
+ {"x", "", "", "x\xfe", "x\xff"}, // limit "" means "no limit"
+ {"x", "a", "", "x\xfea", "x\xff"}, // limit "" means "no limit"
+ {"x", "a", "b", "x\xfea", "x\xfeb"},
+ {"x", "a", "a", "x\xfea", "x\xfea"}, // empty range
+ {"x", "b", "a", "x\xfeb", "x\xfea"}, // empty range
}
for _, test := range tests {
start, limit := util.ScanRangeArgs(test.stKeyPrefix, test.start, test.limit)
diff --git a/services/syncbase/server/watchable/stream.go b/services/syncbase/server/watchable/stream.go
index 3448c02..b6ba752 100644
--- a/services/syncbase/server/watchable/stream.go
+++ b/services/syncbase/server/watchable/stream.go
@@ -7,6 +7,7 @@
import (
"sync"
+ "v.io/x/ref/services/syncbase/server/util"
"v.io/x/ref/services/syncbase/store"
)
@@ -44,7 +45,7 @@
return false
}
versionKey, version := s.iit.Key(nil), s.iit.Value(nil)
- s.key = []byte(join(split(string(versionKey))[1:]...)) // drop "$version" prefix
+ s.key = []byte(util.StripFirstKeyPartOrDie(string(versionKey))) // drop "$version" prefix
s.value, s.err = s.sntx.Get(makeAtVersionKey(s.key, version), nil)
if s.err != nil {
return false
diff --git a/services/syncbase/server/watchable/util.go b/services/syncbase/server/watchable/util.go
index b49530e..4661978 100644
--- a/services/syncbase/server/watchable/util.go
+++ b/services/syncbase/server/watchable/util.go
@@ -84,10 +84,6 @@
return util.JoinKeyParts(parts...)
}
-func split(key string) []string {
- return util.SplitKeyParts(key)
-}
-
func convertError(err error) error {
return verror.Convert(verror.IDAction{}, nil, err)
}
diff --git a/services/syncbase/server/watchable/watcher.go b/services/syncbase/server/watchable/watcher.go
index b78f86a..ab24d81 100644
--- a/services/syncbase/server/watchable/watcher.go
+++ b/services/syncbase/server/watchable/watcher.go
@@ -146,7 +146,7 @@
}
func parseResumeMarker(resumeMarker string) (uint64, error) {
- parts := split(resumeMarker)
+ parts := util.SplitNKeyParts(resumeMarker, 2)
if len(parts) != 2 {
return 0, verror.New(watch.ErrUnknownResumeMarker, nil, resumeMarker)
}
diff --git a/services/syncbase/testutil/constants.go b/services/syncbase/testutil/constants.go
index 3f65999..57be396 100644
--- a/services/syncbase/testutil/constants.go
+++ b/services/syncbase/testutil/constants.go
@@ -7,7 +7,13 @@
var invalidIdentifiers []string = []string{
"/",
"a/b",
+ ":",
+ "a:b",
"*",
+ "\x00",
+ "\x01",
+ "\xfa",
+ "\xfb",
"@@",
"dev.v.io/a/admin@myapp.com",
"안녕하세요",
@@ -27,11 +33,13 @@
var NotOkAppRowNames []string = []string{
"",
- ":",
- "\x00",
+ "\xfc",
+ "\xfd",
+ "\xfe",
"\xff",
- "a:b",
- "a\x00b",
+ "a\xfcb",
+ "a\xfdb",
+ "a\xfeb",
"a\xffb",
}
diff --git a/services/syncbase/vsync/cr_app_resolves.go b/services/syncbase/vsync/cr_app_resolves.go
index 89f6c0e..640dc00 100644
--- a/services/syncbase/vsync/cr_app_resolves.go
+++ b/services/syncbase/vsync/cr_app_resolves.go
@@ -13,6 +13,7 @@
"v.io/v23/verror"
"v.io/x/lib/vlog"
"v.io/x/ref/services/syncbase/server/interfaces"
+ "v.io/x/ref/services/syncbase/server/util"
"v.io/x/ref/services/syncbase/server/watchable"
"v.io/x/ref/services/syncbase/store"
)
@@ -295,7 +296,7 @@
func createRowConflictInfo(ctx *context.T, iSt *initiationState, oid string, batches []uint64, contd bool) *wire.ConflictInfo {
op := wire.RowOp{}
- op.Key = extractAppKey(oid)
+ op.Key = util.StripFirstKeyPartOrDie(oid)
objSt := iSt.updObjects[oid]
ancestorVer := objSt.ancestor
if ancestorVer != NoVersion {
diff --git a/services/syncbase/vsync/cr_app_resolves_test.go b/services/syncbase/vsync/cr_app_resolves_test.go
index 3b3c846..640c96a 100644
--- a/services/syncbase/vsync/cr_app_resolves_test.go
+++ b/services/syncbase/vsync/cr_app_resolves_test.go
@@ -11,6 +11,7 @@
wire "v.io/v23/services/syncbase/nosql"
"v.io/x/ref/services/syncbase/server/interfaces"
+ "v.io/x/ref/services/syncbase/server/util"
"v.io/x/ref/services/syncbase/server/watchable"
"v.io/x/ref/services/syncbase/store"
)
@@ -379,7 +380,7 @@
}
}
rInfo := wire.ResolutionInfo{
- Key: extractAppKey(oid),
+ Key: util.StripFirstKeyPartOrDie(oid),
Selection: sel,
Result: valRes,
Continued: cntd,
diff --git a/services/syncbase/vsync/cr_policy_filter.go b/services/syncbase/vsync/cr_policy_filter.go
index 139a5a4..f8a7a2f 100644
--- a/services/syncbase/vsync/cr_policy_filter.go
+++ b/services/syncbase/vsync/cr_policy_filter.go
@@ -10,6 +10,7 @@
"v.io/v23/context"
wire "v.io/v23/services/syncbase/nosql"
"v.io/x/lib/vlog"
+ "v.io/x/ref/services/syncbase/server/util"
)
// getDbSchema returns the SchemaMetadata for the db.
@@ -44,7 +45,7 @@
// 2. Else, if only one match specifies a type, take that one.
// 3. Else, the two matches are identical; take the last one in the Rules array.
func getResolutionType(oid string, schema *wire.SchemaMetadata) wire.ResolverType {
- if !isRowKey(oid) {
+ if !util.IsRowKey(oid) {
// This is a perms object key. Handle perms using LastWins policy till a
// better policy is available.
return wire.ResolverTypeLastWins
@@ -69,7 +70,7 @@
// applies to the given oid.
// TODO(jlodhia): Implement Type based matching.
func isRuleApplicable(oid string, rule *wire.CrRule) bool {
- tableName, rowKey := extractComponentsFromKey(oid)
+ tableName, rowKey := util.ParseTableAndRowOrDie(oid)
if rule.TableName != "" && tableName != rule.TableName {
return false
}
diff --git a/services/syncbase/vsync/dag_test.go b/services/syncbase/vsync/dag_test.go
index fedf762..8ec653b 100644
--- a/services/syncbase/vsync/dag_test.go
+++ b/services/syncbase/vsync/dag_test.go
@@ -108,7 +108,7 @@
st := svc.St()
s := svc.sync
- oid, version := "tb:foo1", "7"
+ oid, version := "tb\xfefoo1", "7"
tx := st.NewTransaction()
if err := s.addParent(nil, tx, oid, version, "haha", nil); err == nil {
@@ -236,7 +236,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -258,21 +258,21 @@
tx := st.NewTransaction()
// Make sure a new node cannot have more than 2 parents.
- if err := s.addNode(nil, tx, oid, "4", "tb:foo", false, []string{"1", "2", "3"}, NoBatchId, nil); err == nil {
+ if err := s.addNode(nil, tx, oid, "4", "tb\xfefoo", false, []string{"1", "2", "3"}, NoBatchId, nil); err == nil {
t.Errorf("addNode() did not fail when given 3 parents")
}
// Make sure a new node cannot have an invalid parent.
- if err := s.addNode(nil, tx, oid, "4", "tb:foo", false, []string{"1", "555"}, NoBatchId, nil); err == nil {
+ if err := s.addNode(nil, tx, oid, "4", "tb\xfefoo", false, []string{"1", "555"}, NoBatchId, nil); err == nil {
t.Errorf("addNode() did not fail when using an invalid parent")
}
// Make sure a new root node (no parents) can be added once a root exists.
// For the parents array, check both the "nil" and the empty array as input.
- if err := s.addNode(nil, tx, oid, "6789", "tb:foo", false, nil, NoBatchId, nil); err != nil {
+ if err := s.addNode(nil, tx, oid, "6789", "tb\xfefoo", false, nil, NoBatchId, nil); err != nil {
t.Errorf("cannot add another root node (nil parents) for object %s: %v", oid, err)
}
- if err := s.addNode(nil, tx, oid, "9999", "tb:foo", false, []string{}, NoBatchId, nil); err != nil {
+ if err := s.addNode(nil, tx, oid, "9999", "tb\xfefoo", false, []string{}, NoBatchId, nil); err != nil {
t.Errorf("cannot add another root node (empty parents) for object %s: %v", oid, err)
}
@@ -289,7 +289,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
graft, err := s.dagReplayCommands(nil, "remote-init-00.log.sync")
if err != nil {
@@ -331,7 +331,7 @@
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
- if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
t.Errorf("invalid logrec for newhead object %s:%s: %v", oid, newHead, logrec)
}
@@ -363,7 +363,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -407,10 +407,10 @@
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
- if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
t.Errorf("invalid logrec for oldhead object %s:%s: %v", oid, oldHead, logrec)
}
- if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
t.Errorf("invalid logrec for newhead object %s:%s: %v", oid, newHead, logrec)
}
@@ -446,7 +446,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -490,13 +490,13 @@
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
- if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
}
- if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
}
- if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync:log:data:10:2" {
+ if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe2" {
t.Errorf("invalid logrec for ancestor object %s:%s: %s", oid, ancestor, logrec)
}
@@ -539,7 +539,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -583,13 +583,13 @@
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
- if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
}
- if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:2" {
+ if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe2" {
t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
}
- if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync:log:data:10:2" {
+ if logrec, err := getLogRecKey(nil, st, oid, ancestor); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe2" {
t.Errorf("invalid logrec for ancestor object %s:%s: %s", oid, ancestor, logrec)
}
@@ -626,7 +626,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -670,10 +670,10 @@
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
- if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync:log:data:10:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, oldHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe10\xfe3" {
t.Errorf("invalid logrec for oldhead object %s:%s: %s", oid, oldHead, logrec)
}
- if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync:log:data:11:3" {
+ if logrec, err := getLogRecKey(nil, st, oid, newHead); err != nil || logrec != "$sync\xfelog\xfedata\xfe11\xfe3" {
t.Errorf("invalid logrec for newhead object %s:%s: %s", oid, newHead, logrec)
}
@@ -931,7 +931,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -1001,7 +1001,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -1061,7 +1061,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
@@ -1123,7 +1123,7 @@
st := svc.St()
s := svc.sync
- oid := "tb:foo1"
+ oid := "tb\xfefoo1"
if _, err := s.dagReplayCommands(nil, "local-init-00.log.sync"); err != nil {
t.Fatal(err)
diff --git a/services/syncbase/vsync/initiator.go b/services/syncbase/vsync/initiator.go
index 9b607a2..d39590a 100644
--- a/services/syncbase/vsync/initiator.go
+++ b/services/syncbase/vsync/initiator.go
@@ -670,7 +670,7 @@
// managed namespaces (e.g. "$row", "$perms"). Remove that prefix before
// comparing it with the syncgroup prefixes which are defined by the
// application.
- if strings.HasPrefix(util.StripFirstPartOrDie(objid), p) {
+ if strings.HasPrefix(util.StripFirstKeyPartOrDie(objid), p) {
for sg := range sgs {
sgIds[sg] = struct{}{}
}
@@ -903,17 +903,14 @@
}
// If this is a perms key, update the local store index.
- parts := util.SplitKeyParts(objid)
- if len(parts) < 3 {
- vlog.Fatalf("sync: updateDbAndSyncSt: bad key %s", objid)
- }
- if parts[0] == util.PermsPrefix {
- tb := iSt.config.db.Table(ctx, parts[1])
+ if util.IsPermsKey(objid) {
+ table, row := util.ParseTableAndRowOrDie(objid)
+ tb := iSt.config.db.Table(ctx, table)
var err error
if !newVersDeleted {
- err = tb.UpdatePrefixPermsIndexForSet(ctx, iSt.tx, parts[2])
+ err = tb.UpdatePrefixPermsIndexForSet(ctx, iSt.tx, row)
} else {
- err = tb.UpdatePrefixPermsIndexForDelete(ctx, iSt.tx, parts[2])
+ err = tb.UpdatePrefixPermsIndexForDelete(ctx, iSt.tx, row)
}
if err != nil {
return err
diff --git a/services/syncbase/vsync/initiator_test.go b/services/syncbase/vsync/initiator_test.go
index d77bbf5..fd37fb6 100644
--- a/services/syncbase/vsync/initiator_test.go
+++ b/services/syncbase/vsync/initiator_test.go
@@ -143,8 +143,8 @@
// Verify genvec state.
wantVec := interfaces.GenVector{
- "tb:foo1": interfaces.PrefixGenVector{11: 3},
- "tb:bar": interfaces.PrefixGenVector{11: 0},
+ "tb\xfefoo1": interfaces.PrefixGenVector{11: 3},
+ "tb\xfebar": interfaces.PrefixGenVector{11: 0},
}
if !reflect.DeepEqual(iSt.updLocal, wantVec) {
t.Fatalf("Final local gen vec mismatch got %v, want %v", iSt.updLocal, wantVec)
@@ -179,7 +179,7 @@
svc, iSt, cleanup := testInit(t, "local-init-00.log.sync", "remote-noconf-00.log.sync", false)
defer cleanup()
- objid := util.JoinKeyParts(util.RowPrefix, "tb:foo1")
+ objid := util.JoinKeyParts(util.RowPrefix, "tb\xfefoo1")
// Check all log records.
var version uint64 = 1
@@ -240,8 +240,8 @@
// Verify genvec state.
wantVec := interfaces.GenVector{
- "tb:foo1": interfaces.PrefixGenVector{11: 3},
- "tb:bar": interfaces.PrefixGenVector{11: 0},
+ "tb\xfefoo1": interfaces.PrefixGenVector{11: 3},
+ "tb\xfebar": interfaces.PrefixGenVector{11: 0},
}
if !reflect.DeepEqual(iSt.updLocal, wantVec) {
t.Fatalf("Final local gen vec failed got %v, want %v", iSt.updLocal, wantVec)
@@ -276,7 +276,7 @@
svc, iSt, cleanup := testInit(t, "local-init-00.log.sync", "remote-conf-00.log.sync", false)
defer cleanup()
- objid := util.JoinKeyParts(util.RowPrefix, "tb:foo1")
+ objid := util.JoinKeyParts(util.RowPrefix, "tb\xfefoo1")
// Verify conflict state.
if len(iSt.updObjects) != 1 {
@@ -323,7 +323,7 @@
svc, iSt, cleanup := testInit(t, "local-init-00.log.sync", "remote-conf-03.log.sync", false)
defer cleanup()
- objid := util.JoinKeyParts(util.RowPrefix, "tb:foo1")
+ objid := util.JoinKeyParts(util.RowPrefix, "tb\xfefoo1")
// Verify conflict state.
if len(iSt.updObjects) != 1 {
@@ -365,6 +365,9 @@
//////////////////////////////
// Helpers.
+// TODO(sadovsky): If any of the various t.Fatalf()'s below get triggered,
+// cleanup() is not run, and subsequent tests panic with "A runtime has already
+// been initialized".
func testInit(t *testing.T, lfile, rfile string, sg bool) (*mockService, *initiationState, func()) {
// Set a large value to prevent the initiator from running.
peerSyncInterval = 1 * time.Hour
@@ -434,7 +437,7 @@
if !sg {
iSt.peerSgInfo(nil)
// sg1.Spec.Prefixes
- testIfSgPfxsEqual(t, iSt.config.sgPfxs, []string{"foo:", "bar:"})
+ testIfSgPfxsEqual(t, iSt.config.sgPfxs, []string{"foo\xfe", "bar\xfe"})
}
sort.Strings(iSt.config.mtTables)
@@ -462,8 +465,8 @@
}
wantVec = interfaces.GenVector{
- "foo:": interfaces.PrefixGenVector{10: 0},
- "bar:": interfaces.PrefixGenVector{10: 0},
+ "foo\xfe": interfaces.PrefixGenVector{10: 0},
+ "bar\xfe": interfaces.PrefixGenVector{10: 0},
}
}
diff --git a/services/syncbase/vsync/replay_test.go b/services/syncbase/vsync/replay_test.go
index 55ae422..2547966 100644
--- a/services/syncbase/vsync/replay_test.go
+++ b/services/syncbase/vsync/replay_test.go
@@ -69,6 +69,16 @@
continue
}
+ // The current line encodes a command, i.e. it is not a comment line.
+ // Use strconv.Unquote to convert \xfe to the desired byte (for example).
+ // Note, we must wrap the original line in quotes before passing it to
+ // strconv.Unquote since strconv.Unquote expects the input string to look
+ // like a Go string literal (quoted).
+ qline := "\"" + line + "\""
+ if line, err = strconv.Unquote(qline); err != nil {
+ return nil, fmt.Errorf("%s: %s", err, qline)
+ }
+
args := strings.Split(line, "|")
nargs := len(args)
@@ -422,7 +432,15 @@
}
func TestSplitLogRecKey(t *testing.T) {
- invalid := []string{"$sync:100:bb", "log:100:bb", "$sync:log:data:100:xx", "$sync:log:data:aa:bb", "$sync:log:xx:100:bb", "$sync:log:data:aa:100:bb", "$sync:log:$sync:sgd:xx:100:bb"}
+ invalid := []string{
+ "$sync\xfe100\xfebb",
+ "log\xfe100\xfebb",
+ "$sync\xfelog\xfedata\xfe100\xfexx",
+ "$sync\xfelog\xfedata\xfeaa\xfebb",
+ "$sync\xfelog\xfexx\xfe100\xfebb",
+ "$sync\xfelog\xfedata\xfeaa\xfe100\xfebb",
+ "$sync\xfelog\xfe$sync\xfesgd\xfexx\xfe100\xfebb",
+ }
for _, k := range invalid {
if _, _, _, err := splitLogRecKey(nil, k); err == nil {
@@ -436,8 +454,8 @@
gen uint64
}{
{logDataPrefix, 10, 20},
- {"$sync:sgd:2500", 190, 540},
- {"$sync:sgd:4200", 9999, 999999},
+ {"$sync\xfesgd\xfe2500", 190, 540},
+ {"$sync\xfesgd\xfe4200", 9999, 999999},
}
for _, v := range valid {
diff --git a/services/syncbase/vsync/responder.go b/services/syncbase/vsync/responder.go
index b7cd392..16d9f92 100644
--- a/services/syncbase/vsync/responder.go
+++ b/services/syncbase/vsync/responder.go
@@ -72,7 +72,7 @@
for oid := range rSt.initVec {
gid, err := sgID(oid)
if err != nil {
- vlog.Fatalf("sync: newResponderState: invalid syncgroup key", oid)
+ vlog.Fatalf("sync: newResponderState: invalid syncgroup key %s", oid)
}
rSt.sgIds[interfaces.GroupId(gid)] = struct{}{}
}
@@ -482,7 +482,7 @@
// managed namespaces (e.g. "$row", "$perms"). Remove that prefix before
// comparing it with the syncgroup prefixes which are defined by the
// application.
- key := util.StripFirstPartOrDie(rec.Metadata.ObjId)
+ key := util.StripFirstKeyPartOrDie(rec.Metadata.ObjId)
filter := true
var maxGen uint64
diff --git a/services/syncbase/vsync/sync_state.go b/services/syncbase/vsync/sync_state.go
index 53eb098..fe389d1 100644
--- a/services/syncbase/vsync/sync_state.go
+++ b/services/syncbase/vsync/sync_state.go
@@ -507,7 +507,7 @@
// splitAppDbName is the inverse of appDbName and returns app and db name from a
// globally unique name for a Database.
func splitAppDbName(ctx *context.T, name string) (string, string, error) {
- parts := util.SplitKeyParts(name)
+ parts := util.SplitNKeyParts(name, 2)
if len(parts) != 2 {
return "", "", verror.New(verror.ErrInternal, ctx, "invalid appDbName", name)
}
diff --git a/services/syncbase/vsync/syncgroup.go b/services/syncbase/vsync/syncgroup.go
index 3d1f5fc..243fc01 100644
--- a/services/syncbase/vsync/syncgroup.go
+++ b/services/syncbase/vsync/syncgroup.go
@@ -526,7 +526,12 @@
return util.JoinKeyParts(sgDataKeyPrefix, fmt.Sprintf("%d", gid))
}
-// sgID is the inverse of sgOID and converts an oid string into a group id.
+// sgID is approximately the inverse of sgOID: it converts an oid string into a
+// group id, but assumes that oid is prefixed with util.SyncPrefix (whereas
+// sgOID does not prepend util.SyncPrefix).
+// TODO(hpucha): Add unittests that cover sgOID/sgID (and other such helpers).
+// In CL v.io/c/16919, an incorrect change to the implementation of sgID was
+// only caught by integration tests.
func sgID(oid string) (interfaces.GroupId, error) {
parts := util.SplitKeyParts(oid)
if len(parts) != 3 {
@@ -545,7 +550,8 @@
return util.JoinKeyParts(sgDataKeyPrefix, fmt.Sprintf("%d", gid), version)
}
-// sgDataKeyByOID returns the key used to access a version of the syncgroup data.
+// sgDataKeyByOID returns the key used to access a version of the syncgroup
+// data.
func sgDataKeyByOID(oid, version string) string {
return util.JoinKeyParts(oid, version)
}
@@ -1111,12 +1117,8 @@
stream := tx.Scan(start, limit)
for stream.Advance() {
k, v := stream.Key(nil), stream.Value(nil)
- parts := util.SplitKeyParts(string(k))
- if len(parts) < 2 {
- vlog.Fatalf("sync: bootstrapSyncgroup: invalid version key %s", string(k))
-
- }
- key := []byte(util.JoinKeyParts(parts[1:]...))
+ // Remove version prefix.
+ key := []byte(util.StripFirstKeyPartOrDie(string(k)))
if err := watchable.AddSyncSnapshotOp(ctx, tx, key, v); err != nil {
return err
}
diff --git a/services/syncbase/vsync/syncgroup_test.go b/services/syncbase/vsync/syncgroup_test.go
index 2a56f6e..6975eff 100644
--- a/services/syncbase/vsync/syncgroup_test.go
+++ b/services/syncbase/vsync/syncgroup_test.go
@@ -261,7 +261,7 @@
checkBadAddSyncgroup(t, st, sg, "SG with invalid (empty) table name")
sg = mkSg()
- sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "a", RowPrefix: "\xff"}}
+ sg.Spec.Prefixes = []wire.SyncgroupPrefix{{TableName: "a", RowPrefix: "\xfe"}}
checkBadAddSyncgroup(t, st, sg, "SG with invalid row prefix")
}
@@ -448,7 +448,7 @@
expMemberInfo := map[string]*memberInfo{
"phone": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId1: sg1.Joiners["phone"],
},
},
@@ -456,7 +456,7 @@
},
"tablet": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId1: sg1.Joiners["tablet"],
sgId2: sg2.Joiners["tablet"],
},
@@ -469,7 +469,7 @@
},
"cloud": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId1: sg1.Joiners["cloud"],
},
},
@@ -477,7 +477,7 @@
},
"door": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId2: sg2.Joiners["door"],
},
},
@@ -485,7 +485,7 @@
},
"lamp": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId2: sg2.Joiners["lamp"],
},
},
@@ -529,7 +529,7 @@
expMemberInfo = map[string]*memberInfo{
"tablet": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId2: sg2.Joiners["tablet"],
},
},
@@ -537,7 +537,7 @@
},
"door": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId2: sg2.Joiners["door"],
},
},
@@ -545,7 +545,7 @@
},
"lamp": &memberInfo{
db2sg: map[string]sgMemberInfo{
- "mockapp:mockdb": sgMemberInfo{
+ "mockapp\xfemockdb": sgMemberInfo{
sgId2: sg2.Joiners["lamp"],
},
},
diff --git a/services/syncbase/vsync/testdata/local-init-00.log.sync b/services/syncbase/vsync/testdata/local-init-00.log.sync
index c34695b..59fb856 100644
--- a/services/syncbase/vsync/testdata/local-init-00.log.sync
+++ b/services/syncbase/vsync/testdata/local-init-00.log.sync
@@ -1,6 +1,6 @@
# Create an object locally and update it twice (linked-list).
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addl|tb:foo1|1|||$sync:log:data:10:1|0|1|false
-addl|tb:foo1|2|1||$sync:log:data:10:2|0|1|false
-addl|tb:foo1|3|2||$sync:log:data:10:3|0|1|false
+addl|tb\xfefoo1|1|||$sync\xfelog\xfedata\xfe10\xfe1|0|1|false
+addl|tb\xfefoo1|2|1||$sync\xfelog\xfedata\xfe10\xfe2|0|1|false
+addl|tb\xfefoo1|3|2||$sync\xfelog\xfedata\xfe10\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/local-resolve-00.sync b/services/syncbase/vsync/testdata/local-resolve-00.sync
index 383b38f..28e1e9d 100644
--- a/services/syncbase/vsync/testdata/local-resolve-00.sync
+++ b/services/syncbase/vsync/testdata/local-resolve-00.sync
@@ -1,4 +1,4 @@
# Create an object locally and update it twice (linked-list).
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addl|tb:foo1|7|3|6|logrec-06|0|1|false
+addl|tb\xfefoo1|7|3|6|logrec-06|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-00.log.sync b/services/syncbase/vsync/testdata/remote-conf-00.log.sync
index 1d90e2c..261f520 100644
--- a/services/syncbase/vsync/testdata/remote-conf-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-00.log.sync
@@ -3,6 +3,6 @@
# it from the local sync at v2, then updated separately).
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|2||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|5|4||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:3|0|1|false
+addr|tb\xfefoo1|4|2||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-01.log.sync b/services/syncbase/vsync/testdata/remote-conf-01.log.sync
index 4e0d2de..ee1b836 100644
--- a/services/syncbase/vsync/testdata/remote-conf-01.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-01.log.sync
@@ -5,6 +5,6 @@
# sees 2 graft points: v1-v4 and v2-v5.
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|1||$sync:log:data:12:1|0|1|false
-addr|tb:foo1|5|2|4|$sync:log:data:11:1|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:2|0|1|false
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfedata\xfe12\xfe1|0|1|false
+addr|tb\xfefoo1|5|2|4|$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-03.log.sync b/services/syncbase/vsync/testdata/remote-conf-03.log.sync
index d96bf3f..8aed9dc 100644
--- a/services/syncbase/vsync/testdata/remote-conf-03.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-03.log.sync
@@ -1,6 +1,6 @@
# Create the same object remotely from scratch and update it twice (linked-list).
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|5|4||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:3|0|1|false
+addr|tb\xfefoo1|4|||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-conf-link.log.sync b/services/syncbase/vsync/testdata/remote-conf-link.log.sync
index c46d319..d3be9e4 100644
--- a/services/syncbase/vsync/testdata/remote-conf-link.log.sync
+++ b/services/syncbase/vsync/testdata/remote-conf-link.log.sync
@@ -1,5 +1,5 @@
# Update an object remotely, detect conflict, and bless the local version.
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|4|2||$sync:log:11:2
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|4|2||$sync\xfelog\xfe11\xfe2
diff --git a/services/syncbase/vsync/testdata/remote-init-00.log.sync b/services/syncbase/vsync/testdata/remote-init-00.log.sync
index 6189873..1683d08 100644
--- a/services/syncbase/vsync/testdata/remote-init-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-init-00.log.sync
@@ -1,7 +1,8 @@
# Create an object remotely and update it twice (linked-list).
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
+# TODO(rdaoud): The above comment is incorrect for the 'genvec' line.
-addr|tb:foo1|1|||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|2|1||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|3|2||$sync:log:data:11:3|0|1|false
-genvec|tb:foo1|10:0,11:3|tb:bar|11:0
+addr|tb\xfefoo1|1|||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|2|1||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|3|2||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+genvec|tb\xfefoo1|10:0,11:3|tb\xfebar|11:0
diff --git a/services/syncbase/vsync/testdata/remote-noconf-00.log.sync b/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
index 9bdc88c..e54a5ec 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-00.log.sync
@@ -2,7 +2,9 @@
# after it was created locally up to v3 (i.e. assume the remote sync
# received it from the local sync first, then updated it).
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|3||$sync:log:data:11:1|0|1|false
-addr|tb:foo1|5|4||$sync:log:data:11:2|0|1|false
-addr|tb:foo1|6|5||$sync:log:data:11:3|0|1|false
-genvec|tb:foo1|10:0,11:3|tb:bar|11:0
+# TODO(rdaoud): The above comment is incorrect for the 'genvec' line.
+
+addr|tb\xfefoo1|4|3||$sync\xfelog\xfedata\xfe11\xfe1|0|1|false
+addr|tb\xfefoo1|5|4||$sync\xfelog\xfedata\xfe11\xfe2|0|1|false
+addr|tb\xfefoo1|6|5||$sync\xfelog\xfedata\xfe11\xfe3|0|1|false
+genvec|tb\xfefoo1|10:0,11:3|tb\xfebar|11:0
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
index 546dfb0..11e0df5 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-00.log.sync
@@ -1,5 +1,5 @@
# Update an object remotely, detect conflict, and bless the remote version.
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|2|4||$sync:log:11:2
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|2|4||$sync\xfelog\xfe11\xfe2
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
index 4e91506..840514c 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-01.log.sync
@@ -1,5 +1,5 @@
# Update an object remotely, detect conflict, and bless the local version.
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|4|3||$sync:log:11:2
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|4|3||$sync\xfelog\xfe11\xfe2
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
index 2b75980..cd42033 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-02.log.sync
@@ -1,6 +1,6 @@
# Update an object remotely, detect conflict, and bless the remote version, and continue updating.
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-addr|tb:foo1|4|1||$sync:log:11:1|0|1|false
-linkr|tb:foo1|3|4||$sync:log:11:2
-addr|tb:foo1|5|3||$sync:log:11:3|0|1|false
+addr|tb\xfefoo1|4|1||$sync\xfelog\xfe11\xfe1|0|1|false
+linkr|tb\xfefoo1|3|4||$sync\xfelog\xfe11\xfe2
+addr|tb\xfefoo1|5|3||$sync\xfelog\xfe11\xfe3|0|1|false
diff --git a/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync b/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
index eff36cd..cbcdd58 100644
--- a/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
+++ b/services/syncbase/vsync/testdata/remote-noconf-link-repeat.log.sync
@@ -1,4 +1,4 @@
# Resolve the same conflict on two different devices.
# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<txid>|<txcount>|<deleted>
-linkr|tb:foo1|3|4||$sync:log:12:1
+linkr|tb\xfefoo1|3|4||$sync\xfelog\xfe12\xfe1
diff --git a/services/syncbase/vsync/util.go b/services/syncbase/vsync/util.go
index 4e0c523..4842a0c 100644
--- a/services/syncbase/vsync/util.go
+++ b/services/syncbase/vsync/util.go
@@ -7,7 +7,6 @@
// Sync utility functions
import (
- "strings"
"time"
"v.io/v23/context"
@@ -98,41 +97,10 @@
return util.JoinKeyParts(p.TableName, p.RowPrefix)
}
-// TODO(jlodhia): extractAppKey() method is temporary for conflict resolution.
-// Will be removed once SyncgroupPrefix is refactored into a generic
-// TableRow struct.
-// extractAppKey extracts the app key from the key sent over the wire between
-// two Syncbases. The on-wire key starts with one of the store's reserved
-// prefixes for managed namespaces (e.g. $row, $perms). This function removes
-// that prefix and returns the application component of the key. This is done
-// typically before comparing keys with the SyncGroup prefixes which are defined
-// by the application.
-func extractAppKey(key string) string {
- parts := splitKeyIntoParts(key, 2)
- return util.JoinKeyParts(parts[1:]...)
-}
-
-// isRowKey checks if the given key belongs to a data row.
-func isRowKey(key string) bool {
- return strings.HasPrefix(key, util.RowPrefix)
-}
-
-// makeRowKey takes an app key, whose structure is <table>:<row>, and converts
-// it into store's representation of row key with structure $row:<table>:<row>
-func toRowKey(appKey string) string {
- return util.JoinKeyParts(util.RowPrefix, appKey)
-}
-
-// Returns the table name and key within the table from the given row key.
-func extractComponentsFromKey(key string) (table string, row string) {
- parts := splitKeyIntoParts(key, 3)
- return parts[1], parts[2]
-}
-
-func splitKeyIntoParts(key string, minCount int) []string {
- parts := util.SplitKeyParts(key)
- if len(parts) < minCount {
- vlog.Fatalf("sync: extractKeyParts: invalid entry key %s (expected %d parts)", key, minCount)
- }
- return parts
+// toRowKey prepends RowPrefix to what is presumably a "<table>:<row>" string,
+// yielding a storage engine key for a row.
+// TODO(sadovsky): Only used by CR code. Should go away once CR stores table
+// name and row key as separate fields in a "TableAndRow" struct.
+func toRowKey(tableRow string) string {
+ return util.JoinKeyParts(util.RowPrefix, tableRow)
}
diff --git a/services/syncbase/vsync/watcher.go b/services/syncbase/vsync/watcher.go
index 0d5e5d6..c61ff55 100644
--- a/services/syncbase/vsync/watcher.go
+++ b/services/syncbase/vsync/watcher.go
@@ -422,13 +422,9 @@
}
// The key starts with one of the store's reserved prefixes for managed
- // namespaced (e.g. $row or $perm). Remove that prefix before comparing
- // it with the syncgroup prefixes which are defined by the application.
- parts := util.SplitKeyParts(key)
- if len(parts) < 2 {
- vlog.Fatalf("sync: syncable: %s: invalid entry key %s: %v", appdb, key, logEnt)
- }
- key = util.JoinKeyParts(parts[1:]...)
+ // namespaces (e.g. "$row", "$perms"). Remove that prefix before comparing it
+ // with the syncgroup prefixes which are defined by the application.
+ key = util.StripFirstKeyPartOrDie(key)
for prefix := range watchPrefixes[appdb] {
if strings.HasPrefix(key, prefix) {
diff --git a/services/syncbase/vsync/watcher_test.go b/services/syncbase/vsync/watcher_test.go
index b09a9e8..66a6c55 100644
--- a/services/syncbase/vsync/watcher_test.go
+++ b/services/syncbase/vsync/watcher_test.go
@@ -86,9 +86,9 @@
}
expPrefixes := map[string]sgPrefixes{
- "app1:db1": sgPrefixes{"foo": 2, "bar": 1},
- "app2:db1": sgPrefixes{"xyz": 1},
- "app3:db1": sgPrefixes{"haha": 1},
+ "app1\xfedb1": sgPrefixes{"foo": 2, "bar": 1},
+ "app2\xfedb1": sgPrefixes{"xyz": 1},
+ "app3\xfedb1": sgPrefixes{"haha": 1},
}
if !reflect.DeepEqual(watchPrefixes, expPrefixes) {
t.Errorf("invalid watch prefixes: got %v instead of %v", watchPrefixes, expPrefixes)
diff --git a/test/doc.go b/test/doc.go
index 206164e..8352f95 100644
--- a/test/doc.go
+++ b/test/doc.go
@@ -28,20 +28,20 @@
// --v23.tests.shell-on-fail - drop into a debug shell if the test fails.
//
// Typical usage is:
-// $ v23 go test . --v23.tests
+// $ jiri go test . --v23.tests
//
// Note that, like all flags not recognised by the go testing package, the
// v23.tests flags must follow the package spec.
//
// The sub-directories of this package provide either functionality that
// can be used within traditional go tests, or support for the v23 integration
-// test framework. The v23 command is able to generate boilerplate code
-// to support these tests. In summary, v23 test generate will generate
+// test framework. The jiri command is able to generate boilerplate code
+// to support these tests. In summary, 'jiri test generate' will generate
// go files to be checked in that include appropriate TestMain functions,
// registration calls for modules commands and wrapper functions for v23test
// tests. More detailed documentation is available via:
//
-// $ v23 test generate --help
+// $ jiri test generate --help
//
// Vanadium tests often need to run subprocesses to provide either common
// services that they depend (e.g. a mount table) and/or services that are