veyron2/storage: Move Version to veyron/services/store/raw.
The Version type is used only by sync, so it doesn't need to be in the
public API.
Change-Id: Id645076b49a328ff5f1ceac3b3938bbb048306fc
diff --git a/runtimes/google/vsync/dag.go b/runtimes/google/vsync/dag.go
index 4ab9315..956ff28 100644
--- a/runtimes/google/vsync/dag.go
+++ b/runtimes/google/vsync/dag.go
@@ -110,6 +110,8 @@
"math/rand"
"time"
+ "veyron/services/store/raw"
+
"veyron2/storage"
"veyron2/vlog"
)
@@ -119,7 +121,7 @@
)
type TxID uint64
-type dagTxMap map[storage.ID]storage.Version
+type dagTxMap map[storage.ID]raw.Version
type dag struct {
fname string // file pathname
@@ -134,17 +136,17 @@
}
type dagNode struct {
- Level uint64 // node distance from root
- Parents []storage.Version // references to parent versions
- Logrec string // reference to log record change
- TxID TxID // ID of a transaction set
- Deleted bool // true if the change was a delete
+ Level uint64 // node distance from root
+ Parents []raw.Version // references to parent versions
+ Logrec string // reference to log record change
+ TxID TxID // ID of a transaction set
+ Deleted bool // true if the change was a delete
}
type graftInfo struct {
- newNodes map[storage.Version]struct{} // set of newly added nodes during a sync
- graftNodes map[storage.Version]uint64 // set of graft nodes and their level
- newHeads map[storage.Version]struct{} // set of candidate new head nodes
+ newNodes map[raw.Version]struct{} // set of newly added nodes during a sync
+ graftNodes map[raw.Version]uint64 // set of graft nodes and their level
+ newHeads map[raw.Version]struct{} // set of candidate new head nodes
}
// openDAG opens or creates a DAG for the given filename.
@@ -240,9 +242,9 @@
graft := d.graft[oid]
if graft == nil && create {
graft = &graftInfo{
- newNodes: make(map[storage.Version]struct{}),
- graftNodes: make(map[storage.Version]uint64),
- newHeads: make(map[storage.Version]struct{}),
+ newNodes: make(map[raw.Version]struct{}),
+ graftNodes: make(map[raw.Version]uint64),
+ newHeads: make(map[raw.Version]struct{}),
}
// If a current head node exists for this object, initialize
@@ -316,8 +318,8 @@
//
// If the transaction ID is set to NoTxID, this node is not part of a transaction.
// Otherwise, track its membership in the given transaction ID.
-func (d *dag) addNode(oid storage.ID, version storage.Version, remote, deleted bool,
- parents []storage.Version, logrec string, tid TxID) error {
+func (d *dag) addNode(oid storage.ID, version raw.Version, remote, deleted bool,
+ parents []raw.Version, logrec string, tid TxID) error {
if d.store == nil {
return errors.New("invalid DAG")
}
@@ -420,7 +422,7 @@
}
// hasNode returns true if the node (oid, version) exists in the DAG DB.
-func (d *dag) hasNode(oid storage.ID, version storage.Version) bool {
+func (d *dag) hasNode(oid storage.ID, version raw.Version) bool {
if d.store == nil {
return false
}
@@ -435,7 +437,7 @@
//
// TODO(rdaoud): recompute the levels of reachable child-nodes if the new
// parent's level is greater or equal to the node's current level.
-func (d *dag) addParent(oid storage.ID, version, parent storage.Version, remote bool) error {
+func (d *dag) addParent(oid storage.ID, version, parent raw.Version, remote bool) error {
if version == parent {
return fmt.Errorf("addParent: object %v: node %d cannot be its own parent", oid, version)
}
@@ -465,7 +467,7 @@
// Make sure that adding the link does not create a cycle in the DAG.
// This is done by verifying that the node is not an ancestor of the
// parent that it is being linked to.
- err = d.ancestorIter(oid, pnode.Parents, func(oid storage.ID, v storage.Version, nd *dagNode) error {
+ err = d.ancestorIter(oid, pnode.Parents, func(oid storage.ID, v raw.Version, nd *dagNode) error {
if v == version {
return fmt.Errorf("addParent: cycle on object %v: node %d is an ancestor of parent node %d",
oid, version, parent)
@@ -512,7 +514,7 @@
}
// moveHead moves the object head node in the DAG.
-func (d *dag) moveHead(oid storage.ID, head storage.Version) error {
+func (d *dag) moveHead(oid storage.ID, head raw.Version) error {
if d.store == nil {
return errors.New("invalid DAG")
}
@@ -533,10 +535,10 @@
// added object versions are not derived in part from this device's current
// knowledge. If there is a single new-head, the object changes were applied
// without triggering a conflict.
-func (d *dag) hasConflict(oid storage.ID) (isConflict bool, newHead, oldHead, ancestor storage.Version, err error) {
- oldHead = storage.NoVersion
- newHead = storage.NoVersion
- ancestor = storage.NoVersion
+func (d *dag) hasConflict(oid storage.ID) (isConflict bool, newHead, oldHead, ancestor raw.Version, err error) {
+ oldHead = raw.NoVersion
+ newHead = raw.NoVersion
+ ancestor = raw.NoVersion
if d.store == nil {
err = errors.New("invalid DAG")
return
@@ -604,9 +606,9 @@
// breadth-first traversal starting from given version node(s). In its
// traversal it invokes the callback function once for each node, passing
// the object ID, version number and a pointer to the dagNode.
-func (d *dag) ancestorIter(oid storage.ID, startVersions []storage.Version,
- cb func(storage.ID, storage.Version, *dagNode) error) error {
- visited := make(map[storage.Version]bool)
+func (d *dag) ancestorIter(oid storage.ID, startVersions []raw.Version,
+ cb func(storage.ID, raw.Version, *dagNode) error) error {
+ visited := make(map[raw.Version]bool)
queue := list.New()
for _, version := range startVersions {
queue.PushBack(version)
@@ -614,7 +616,7 @@
}
for queue.Len() > 0 {
- version := queue.Remove(queue.Front()).(storage.Version)
+ version := queue.Remove(queue.Front()).(raw.Version)
node, err := d.getNode(oid, version)
if err != nil {
// Ignore it, the parent was previously pruned.
@@ -638,7 +640,7 @@
// DAG DB and one of its descendants is a deleted node (i.e. has its "Deleted"
// flag set true). This means that at some object mutation after this version,
// the object was deleted.
-func (d *dag) hasDeletedDescendant(oid storage.ID, version storage.Version) bool {
+func (d *dag) hasDeletedDescendant(oid storage.ID, version raw.Version) bool {
if d.store == nil {
return false
}
@@ -660,7 +662,7 @@
}
type nodeStep struct {
- node storage.Version
+ node raw.Version
deleted bool
}
@@ -706,7 +708,7 @@
// Also track any transaction sets affected by deleting DAG objects that
// have transaction IDs. This is later used to do garbage collection
// on transaction sets when pruneDone() is called.
-func (d *dag) prune(oid storage.ID, version storage.Version, delLogRec func(logrec string) error) error {
+func (d *dag) prune(oid storage.ID, version raw.Version, delLogRec func(logrec string) error) error {
if d.store == nil {
return errors.New("invalid DAG")
}
@@ -734,7 +736,7 @@
// Keep track of objects deleted from transaction in order
// to cleanup transaction sets when pruneDone() is called.
numNodeErrs, numLogErrs := 0, 0
- err = d.ancestorIter(oid, iterVersions, func(oid storage.ID, v storage.Version, node *dagNode) error {
+ err = d.ancestorIter(oid, iterVersions, func(oid storage.ID, v raw.Version, node *dagNode) error {
if tid := node.TxID; tid != NoTxID {
if d.txGC[tid] == nil {
d.txGC[tid] = make(dagTxMap)
@@ -794,7 +796,7 @@
}
// getLogrec returns the log record information for a given object version.
-func (d *dag) getLogrec(oid storage.ID, version storage.Version) (string, error) {
+func (d *dag) getLogrec(oid storage.ID, version raw.Version) (string, error) {
node, err := d.getNode(oid, version)
if err != nil {
return "", err
@@ -804,13 +806,13 @@
// objNodeKey returns the key used to access the object node (oid, version)
// in the DAG DB.
-func objNodeKey(oid storage.ID, version storage.Version) string {
+func objNodeKey(oid storage.ID, version raw.Version) string {
return fmt.Sprintf("%s:%d", oid.String(), version)
}
// setNode stores the dagNode structure for the object node (oid, version)
// in the DAG DB.
-func (d *dag) setNode(oid storage.ID, version storage.Version, node *dagNode) error {
+func (d *dag) setNode(oid storage.ID, version raw.Version, node *dagNode) error {
if d.store == nil {
return errors.New("invalid DAG")
}
@@ -820,7 +822,7 @@
// getNode retrieves the dagNode structure for the object node (oid, version)
// from the DAG DB.
-func (d *dag) getNode(oid storage.ID, version storage.Version) (*dagNode, error) {
+func (d *dag) getNode(oid storage.ID, version raw.Version) (*dagNode, error) {
if d.store == nil {
return nil, errors.New("invalid DAG")
}
@@ -833,7 +835,7 @@
}
// delNode deletes the object node (oid, version) from the DAG DB.
-func (d *dag) delNode(oid storage.ID, version storage.Version) error {
+func (d *dag) delNode(oid storage.ID, version raw.Version) error {
if d.store == nil {
return errors.New("invalid DAG")
}
@@ -847,7 +849,7 @@
}
// setHead stores version as the object head in the DAG DB.
-func (d *dag) setHead(oid storage.ID, version storage.Version) error {
+func (d *dag) setHead(oid storage.ID, version raw.Version) error {
if d.store == nil {
return errors.New("invalid DAG")
}
@@ -856,15 +858,15 @@
}
// getHead retrieves the object head from the DAG DB.
-func (d *dag) getHead(oid storage.ID) (storage.Version, error) {
- var version storage.Version
+func (d *dag) getHead(oid storage.ID) (raw.Version, error) {
+ var version raw.Version
if d.store == nil {
return version, errors.New("invalid DAG")
}
key := objHeadKey(oid)
err := d.heads.get(key, &version)
if err != nil {
- version = storage.NoVersion
+ version = raw.NoVersion
}
return version, err
}
@@ -917,9 +919,9 @@
// getParentMap is a testing and debug helper function that returns for
// an object a map of all the object version in the DAG and their parents.
// The map represents the graph of the object version history.
-func (d *dag) getParentMap(oid storage.ID) map[storage.Version][]storage.Version {
- parentMap := make(map[storage.Version][]storage.Version)
- var iterVersions []storage.Version
+func (d *dag) getParentMap(oid storage.ID) map[raw.Version][]raw.Version {
+ parentMap := make(map[raw.Version][]raw.Version)
+ var iterVersions []raw.Version
if head, err := d.getHead(oid); err == nil {
iterVersions = append(iterVersions, head)
@@ -931,7 +933,7 @@
}
// Breadth-first traversal starting from the object head.
- d.ancestorIter(oid, iterVersions, func(oid storage.ID, v storage.Version, node *dagNode) error {
+ d.ancestorIter(oid, iterVersions, func(oid storage.ID, v raw.Version, node *dagNode) error {
parentMap[v] = node.Parents
return nil
})
@@ -945,7 +947,7 @@
// reported by the other device during a sync operation. The graftNodes map
// identifies the set of old nodes where the new DAG fragments were attached
// and their depth level in the DAG.
-func (d *dag) getGraftNodes(oid storage.ID) (map[storage.Version]struct{}, map[storage.Version]uint64) {
+func (d *dag) getGraftNodes(oid storage.ID) (map[raw.Version]struct{}, map[raw.Version]uint64) {
if d.store != nil {
if ginfo := d.graft[oid]; ginfo != nil {
return ginfo.newHeads, ginfo.graftNodes
diff --git a/runtimes/google/vsync/dag_test.go b/runtimes/google/vsync/dag_test.go
index a84f13b..ff2ebab 100644
--- a/runtimes/google/vsync/dag_test.go
+++ b/runtimes/google/vsync/dag_test.go
@@ -11,6 +11,7 @@
"time"
"veyron/lib/testutil"
+ "veyron/services/store/raw"
"veyron2/storage"
)
@@ -99,7 +100,7 @@
t.Error(err)
}
- err = dag.addNode(oid, 4, false, false, []storage.Version{2, 3}, "foobar", NoTxID)
+ err = dag.addNode(oid, 4, false, false, []raw.Version{2, 3}, "foobar", NoTxID)
if err == nil || err.Error() != "invalid DAG" {
t.Errorf("addNode() did not fail on a closed DAG: %v", err)
}
@@ -131,7 +132,7 @@
t.Errorf("pruneDone() did not fail on a closed DAG: %v", err)
}
- node := &dagNode{Level: 15, Parents: []storage.Version{444, 555}, Logrec: "logrec-23"}
+ node := &dagNode{Level: 15, Parents: []raw.Version{444, 555}, Logrec: "logrec-23"}
err = dag.setNode(oid, 4, node)
if err == nil || err.Error() != "invalid DAG" {
t.Errorf("setNode() did not fail on a closed DAG: %v", err)
@@ -220,7 +221,7 @@
t.Fatalf("Cannot open new DAG file %s", dagfile)
}
- version := storage.Version(0)
+ version := raw.Version(0)
oid, err := strToObjID("111")
if err != nil {
t.Fatal(err)
@@ -239,7 +240,7 @@
t.Errorf("Non-existent object %d:%d has a logrec in DAG file %s: %v", oid, version, dagfile, logrec)
}
- node = &dagNode{Level: 15, Parents: []storage.Version{444, 555}, Logrec: "logrec-23"}
+ node = &dagNode{Level: 15, Parents: []raw.Version{444, 555}, Logrec: "logrec-23"}
if err = dag.setNode(oid, version, node); err != nil {
t.Fatalf("Cannot set object %d:%d (%v) in DAG file %s", oid, version, node, dagfile)
}
@@ -287,13 +288,13 @@
t.Fatalf("Cannot open new DAG file %s", dagfile)
}
- version := storage.Version(1)
+ version := raw.Version(1)
oid, err := strToObjID("222")
if err != nil {
t.Fatal(err)
}
- node := &dagNode{Level: 123, Parents: []storage.Version{333}, Logrec: "logrec-789"}
+ node := &dagNode{Level: 123, Parents: []raw.Version{333}, Logrec: "logrec-789"}
if err = dag.setNode(oid, version, node); err != nil {
t.Fatalf("Cannot set object %d:%d (%v) in DAG file %s", oid, version, node, dagfile)
}
@@ -343,7 +344,7 @@
t.Fatalf("Cannot open new DAG file %s", dagfile)
}
- version := storage.Version(7)
+ version := raw.Version(7)
oid, err := strToObjID("12345")
if err != nil {
t.Fatal(err)
@@ -366,13 +367,13 @@
t.Errorf("addParent() did not fail on a self-parent for object %d:%d in DAG file %s", oid, version, dagfile)
}
- for _, parent := range []storage.Version{4, 5, 6} {
+ for _, parent := range []raw.Version{4, 5, 6} {
if err = dag.addParent(oid, version, parent, true); err == nil {
t.Errorf("addParent() did not reject invalid parent %d for object %d:%d in DAG file %s",
parent, oid, version, dagfile)
}
- pnode := &dagNode{Level: 11, Logrec: fmt.Sprint("logrec-%d", parent), Parents: []storage.Version{3}}
+ pnode := &dagNode{Level: 11, Logrec: fmt.Sprint("logrec-%d", parent), Parents: []raw.Version{3}}
if err = dag.setNode(oid, parent, pnode); err != nil {
t.Fatalf("Cannot set parent object %d:%d (%v) in DAG file %s", oid, parent, pnode, dagfile)
}
@@ -391,14 +392,14 @@
t.Errorf("Cannot find stored object %d:%d in DAG file %s", oid, version, dagfile)
}
- expParents := []storage.Version{4, 5, 6}
+ expParents := []raw.Version{4, 5, 6}
if !reflect.DeepEqual(node2.Parents, expParents) {
t.Errorf("invalid parents for object %d:%d in DAG file %s: %v instead of %v",
oid, version, dagfile, node2.Parents, expParents)
}
// Creating cycles should fail.
- for v := storage.Version(1); v < version; v++ {
+ for v := raw.Version(1); v < version; v++ {
if err = dag.addParent(oid, v, version, false); err == nil {
t.Errorf("addParent() failed to reject a cycle for object %d: from ancestor %d to node %d in DAG file %s",
oid, v, version, dagfile)
@@ -518,24 +519,24 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{0: nil, 1: {0}, 2: {1}}
+ exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
}
// Make sure an existing node cannot be added again.
- if err = dag.addNode(oid, 1, false, false, []storage.Version{0, 2}, "foobar", NoTxID); err == nil {
+ if err = dag.addNode(oid, 1, false, false, []raw.Version{0, 2}, "foobar", NoTxID); err == nil {
t.Errorf("addNode() did not fail when given an existing node")
}
// Make sure a new node cannot have more than 2 parents.
- if err = dag.addNode(oid, 3, false, false, []storage.Version{0, 1, 2}, "foobar", NoTxID); err == nil {
+ if err = dag.addNode(oid, 3, false, false, []raw.Version{0, 1, 2}, "foobar", NoTxID); err == nil {
t.Errorf("addNode() did not fail when given 3 parents")
}
// Make sure a new node cannot have an invalid parent.
- if err = dag.addNode(oid, 3, false, false, []storage.Version{0, 555}, "foobar", NoTxID); err == nil {
+ if err = dag.addNode(oid, 3, false, false, []raw.Version{0, 555}, "foobar", NoTxID); err == nil {
t.Errorf("addNode() did not fail when using an invalid parent")
}
@@ -544,7 +545,7 @@
if err = dag.addNode(oid, 6789, false, false, nil, "foobar", NoTxID); err == nil {
t.Errorf("Adding a 2nd root node (nil parents) for object %d in DAG file %s did not fail", oid, dagfile)
}
- if err = dag.addNode(oid, 6789, false, false, []storage.Version{}, "foobar", NoTxID); err == nil {
+ if err = dag.addNode(oid, 6789, false, false, []raw.Version{}, "foobar", NoTxID); err == nil {
t.Errorf("Adding a 2nd root node (empty parents) for object %d in DAG file %s did not fail", oid, dagfile)
}
@@ -583,7 +584,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{0: nil, 1: {0}, 2: {1}}
+ exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -592,12 +593,12 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{2: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{2: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{}
+ expgrafts := map[raw.Version]uint64{}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
@@ -664,7 +665,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{0: nil, 1: {0}, 2: {1}, 3: {2}, 4: {3}, 5: {4}}
+ exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {2}, 4: {3}, 5: {4}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -673,12 +674,12 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{5: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{5: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{2: 2}
+ expgrafts := map[raw.Version]uint64{2: 2}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
@@ -755,7 +756,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{0: nil, 1: {0}, 2: {1}, 3: {1}, 4: {3}, 5: {4}}
+ exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {1}, 4: {3}, 5: {4}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -764,12 +765,12 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{2: struct{}{}, 5: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{2: struct{}{}, 5: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{1: 1}
+ expgrafts := map[raw.Version]uint64{1: 1}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
@@ -801,7 +802,7 @@
t.Errorf("Object %d has wrong head after conflict resolution in DAG file %s: %d", oid, dagfile, head)
}
- exp[6] = []storage.Version{2, 5}
+ exp[6] = []raw.Version{2, 5}
pmap = dag.getParentMap(oid)
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map after conflict resolution in DAG file %s: (%v) instead of (%v)",
@@ -857,7 +858,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{0: nil, 1: {0}, 2: {1}, 3: {0}, 4: {1, 3}, 5: {4}}
+ exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {0}, 4: {1, 3}, 5: {4}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -866,12 +867,12 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{2: struct{}{}, 5: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{2: struct{}{}, 5: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{0: 0, 1: 1}
+ expgrafts := map[raw.Version]uint64{0: 0, 1: 1}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
@@ -903,7 +904,7 @@
t.Errorf("Object %d has wrong head after conflict resolution in DAG file %s: %d", oid, dagfile, head)
}
- exp[6] = []storage.Version{2, 5}
+ exp[6] = []raw.Version{2, 5}
pmap = dag.getParentMap(oid)
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map after conflict resolution in DAG file %s: (%v) instead of (%v)",
@@ -944,16 +945,16 @@
}
// Loop checking the iteration behavior for different starting nodes.
- for _, start := range []storage.Version{0, 2, 5, 8} {
- visitCount := make(map[storage.Version]int)
- err = dag.ancestorIter(oid, []storage.Version{start},
- func(oid storage.ID, v storage.Version, node *dagNode) error {
+ for _, start := range []raw.Version{0, 2, 5, 8} {
+ visitCount := make(map[raw.Version]int)
+ err = dag.ancestorIter(oid, []raw.Version{start},
+ func(oid storage.ID, v raw.Version, node *dagNode) error {
visitCount[v]++
return nil
})
// Check that all prior nodes are visited only once.
- for i := storage.Version(0); i < (start + 1); i++ {
+ for i := raw.Version(0); i < (start + 1); i++ {
if visitCount[i] != 1 {
t.Errorf("wrong visit count for iter on object %d node %d starting from node %d: %d instead of 1",
oid, i, start, visitCount[i])
@@ -963,7 +964,7 @@
// Make sure an error in the callback is returned through the iterator.
cbErr := errors.New("callback error")
- err = dag.ancestorIter(oid, []storage.Version{8}, func(oid storage.ID, v storage.Version, node *dagNode) error {
+ err = dag.ancestorIter(oid, []raw.Version{8}, func(oid storage.ID, v raw.Version, node *dagNode) error {
if v == 0 {
return cbErr
}
@@ -1008,10 +1009,10 @@
t.Fatal(err)
}
- exp := map[storage.Version][]storage.Version{0: nil, 1: {0}, 2: {1}, 3: {1}, 4: {2, 3}, 5: {4}, 6: {1}, 7: {5, 6}, 8: {7}}
+ exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {1}, 4: {2, 3}, 5: {4}, 6: {1}, 7: {5, 6}, 8: {7}}
// Loop pruning at an invalid version (333) then at v0, v5, v8 and again at v8.
- testVersions := []storage.Version{333, 0, 1, 5, 7, 8, 8}
+ testVersions := []raw.Version{333, 0, 1, 5, 7, 8, 8}
delCounts := []int{0, 0, 1, 4, 2, 1, 0}
for i, version := range testVersions {
@@ -1043,7 +1044,7 @@
// Remove pruned nodes from the expected parent map used to validate
// and set the parents of the pruned node to nil.
if version < 10 {
- for j := storage.Version(0); j < version; j++ {
+ for j := raw.Version(0); j < version; j++ {
delete(exp, j)
}
exp[version] = nil
@@ -1087,11 +1088,11 @@
t.Fatal(err)
}
- exp := map[storage.Version][]storage.Version{8: nil}
+ exp := map[raw.Version][]raw.Version{8: nil}
// Prune at v8 with a callback function that fails for v3.
del, expDel := 0, 8
- version := storage.Version(8)
+ version := raw.Version(8)
err = dag.prune(oid, version, func(lr string) error {
del++
if lr == "logrec-03" {
@@ -1137,7 +1138,7 @@
}
// Put some data in "heads" table.
- headMap := make(map[storage.ID]storage.Version)
+ headMap := make(map[storage.ID]raw.Version)
for i := 0; i < 10; i++ {
// Generate a random object id in [0, 1000).
oid, err := strToObjID(fmt.Sprintf("%d", testutil.Rand.Intn(1000)))
@@ -1145,7 +1146,7 @@
t.Fatal(err)
}
// Generate a random version number for this object.
- vers := storage.Version(testutil.Rand.Intn(5000))
+ vers := raw.Version(testutil.Rand.Intn(5000))
// Cache this <oid,version> pair to verify with getHead().
headMap[oid] = vers
@@ -1161,16 +1162,16 @@
// Put some data in "nodes" table.
type nodeKey struct {
oid storage.ID
- vers storage.Version
+ vers raw.Version
}
nodeMap := make(map[nodeKey]*dagNode)
for oid, vers := range headMap {
// Generate a random dag node for this <oid, vers>.
l := uint64(testutil.Rand.Intn(20))
- p1 := storage.Version(testutil.Rand.Intn(5000))
- p2 := storage.Version(testutil.Rand.Intn(5000))
+ p1 := raw.Version(testutil.Rand.Intn(5000))
+ p2 := raw.Version(testutil.Rand.Intn(5000))
log := fmt.Sprintf("%d", testutil.Rand.Intn(1000))
- node := &dagNode{Level: l, Parents: []storage.Version{p1, p2}, Logrec: log}
+ node := &dagNode{Level: l, Parents: []raw.Version{p1, p2}, Logrec: log}
// Cache this <oid,version, dagNode> to verify with getNode().
key := nodeKey{oid: oid, vers: vers}
@@ -1264,7 +1265,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{1: nil, 2: {1, 4}, 3: {2}, 4: {1}}
+ exp := map[raw.Version][]raw.Version{1: nil, 2: {1, 4}, 3: {2}, 4: {1}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -1273,19 +1274,19 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{3: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{3: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{1: 0, 4: 1}
+ expgrafts := map[raw.Version]uint64{1: 0, 4: 1}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
// There should be no conflict.
isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 3 && oldHead == 3 && ancestor == storage.NoVersion && errConflict == nil) {
+ if !(!isConflict && newHead == 3 && oldHead == 3 && ancestor == raw.NoVersion && errConflict == nil) {
t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
@@ -1339,7 +1340,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{1: nil, 2: {1}, 3: {2}, 4: {1, 2}}
+ exp := map[raw.Version][]raw.Version{1: nil, 2: {1}, 3: {2}, 4: {1, 2}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -1348,12 +1349,12 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{3: struct{}{}, 4: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{3: struct{}{}, 4: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{1: 0, 2: 1}
+ expgrafts := map[raw.Version]uint64{1: 0, 2: 1}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
@@ -1415,7 +1416,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{1: nil, 2: {1}, 3: {2}, 4: {1, 3}}
+ exp := map[raw.Version][]raw.Version{1: nil, 2: {1}, 3: {2}, 4: {1, 3}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -1424,19 +1425,19 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{4: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{4: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{1: 0, 3: 2}
+ expgrafts := map[raw.Version]uint64{1: 0, 3: 2}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
// There should be no conflict.
isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 4 && oldHead == 3 && ancestor == storage.NoVersion && errConflict == nil) {
+ if !(!isConflict && newHead == 4 && oldHead == 3 && ancestor == raw.NoVersion && errConflict == nil) {
t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
@@ -1493,7 +1494,7 @@
pmap := dag.getParentMap(oid)
- exp := map[storage.Version][]storage.Version{1: nil, 2: {1}, 3: {2, 4}, 4: {1}, 5: {3}}
+ exp := map[raw.Version][]raw.Version{1: nil, 2: {1}, 3: {2, 4}, 4: {1}, 5: {3}}
if !reflect.DeepEqual(pmap, exp) {
t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
@@ -1502,19 +1503,19 @@
// Verify the grafting of remote nodes.
newHeads, grafts := dag.getGraftNodes(oid)
- expNewHeads := map[storage.Version]struct{}{5: struct{}{}}
+ expNewHeads := map[raw.Version]struct{}{5: struct{}{}}
if !reflect.DeepEqual(newHeads, expNewHeads) {
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts := map[storage.Version]uint64{1: 0, 3: 2, 4: 1}
+ expgrafts := map[raw.Version]uint64{1: 0, 3: 2, 4: 1}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
// There should be no conflict.
isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 5 && oldHead == 3 && ancestor == storage.NoVersion && errConflict == nil) {
+ if !(!isConflict && newHead == 5 && oldHead == 3 && ancestor == raw.NoVersion && errConflict == nil) {
t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
@@ -1547,13 +1548,13 @@
t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
}
- expgrafts = map[storage.Version]uint64{}
+ expgrafts = map[raw.Version]uint64{}
if !reflect.DeepEqual(grafts, expgrafts) {
t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
}
isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if !(!isConflict && newHead == 5 && oldHead == 5 && ancestor == storage.NoVersion && errConflict == nil) {
+ if !(!isConflict && newHead == 5 && oldHead == 5 && ancestor == raw.NoVersion && errConflict == nil) {
t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
oid, isConflict, newHead, oldHead, ancestor, errConflict)
}
@@ -1618,10 +1619,10 @@
t.Errorf("Transactions map for Tx ID %v has length %d instead of 0 in DAG file %s", tid_1, n, dagfile)
}
- if err := dag.addNode(oid_a, 3, false, false, []storage.Version{2}, "logrec-a-03", tid_1); err != nil {
+ if err := dag.addNode(oid_a, 3, false, false, []raw.Version{2}, "logrec-a-03", tid_1); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
}
- if err := dag.addNode(oid_b, 3, false, false, []storage.Version{2}, "logrec-b-03", tid_1); err != nil {
+ if err := dag.addNode(oid_b, 3, false, false, []raw.Version{2}, "logrec-b-03", tid_1); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_1, dagfile, err)
}
@@ -1639,7 +1640,7 @@
t.Errorf("Transactions map for Tx ID %v has length %d instead of 0 in DAG file %s", tid_2, n, dagfile)
}
- if err := dag.addNode(oid_c, 2, false, false, []storage.Version{1}, "logrec-c-02", tid_2); err != nil {
+ if err := dag.addNode(oid_c, 2, false, false, []raw.Version{1}, "logrec-c-02", tid_2); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_c, tid_2, dagfile, err)
}
@@ -1670,7 +1671,7 @@
bad_tid++
}
- if err := dag.addNode(oid_c, 3, false, false, []storage.Version{2}, "logrec-c-03", bad_tid); err == nil {
+ if err := dag.addNode(oid_c, 3, false, false, []raw.Version{2}, "logrec-c-03", bad_tid); err == nil {
t.Errorf("addNode() did not fail on object %d for a bad Tx ID %v in DAG file %s", oid_c, bad_tid, dagfile)
}
if err := dag.addNodeTxEnd(bad_tid); err == nil {
@@ -1802,10 +1803,10 @@
if tid_1 == NoTxID {
t.Fatal("Cannot start 1st DAG addNode() transaction")
}
- if err := dag.addNode(oid_a, 3, false, false, []storage.Version{2}, "logrec-a-03", tid_1); err != nil {
+ if err := dag.addNode(oid_a, 3, false, false, []raw.Version{2}, "logrec-a-03", tid_1); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
}
- if err := dag.addNode(oid_b, 3, false, false, []storage.Version{2}, "logrec-b-03", tid_1); err != nil {
+ if err := dag.addNode(oid_b, 3, false, false, []raw.Version{2}, "logrec-b-03", tid_1); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_1, dagfile, err)
}
if err := dag.addNodeTxEnd(tid_1); err != nil {
@@ -1816,20 +1817,20 @@
if tid_2 == NoTxID {
t.Fatal("Cannot start 2nd DAG addNode() transaction")
}
- if err := dag.addNode(oid_b, 4, false, false, []storage.Version{3}, "logrec-b-04", tid_2); err != nil {
+ if err := dag.addNode(oid_b, 4, false, false, []raw.Version{3}, "logrec-b-04", tid_2); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_2, dagfile, err)
}
- if err := dag.addNode(oid_c, 2, false, false, []storage.Version{1}, "logrec-c-02", tid_2); err != nil {
+ if err := dag.addNode(oid_c, 2, false, false, []raw.Version{1}, "logrec-c-02", tid_2); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_c, tid_2, dagfile, err)
}
if err := dag.addNodeTxEnd(tid_2); err != nil {
t.Errorf("Cannot addNodeTxEnd() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
}
- if err := dag.addNode(oid_a, 4, false, false, []storage.Version{3}, "logrec-a-04", NoTxID); err != nil {
+ if err := dag.addNode(oid_a, 4, false, false, []raw.Version{3}, "logrec-a-04", NoTxID); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
}
- if err := dag.addNode(oid_b, 5, false, false, []storage.Version{4}, "logrec-b-05", NoTxID); err != nil {
+ if err := dag.addNode(oid_b, 5, false, false, []raw.Version{4}, "logrec-b-05", NoTxID); err != nil {
t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_2, dagfile, err)
}
@@ -1904,7 +1905,7 @@
}
// Add c3 as a new head and prune at that point. This should GC Tx-2.
- if err := dag.addNode(oid_c, 3, false, false, []storage.Version{2}, "logrec-c-03", NoTxID); err != nil {
+ if err := dag.addNode(oid_c, 3, false, false, []raw.Version{2}, "logrec-c-03", NoTxID); err != nil {
t.Errorf("Cannot addNode() on object %d in DAG file %s: %v", oid_c, dagfile, err)
}
if err = dag.moveHead(oid_c, 3); err != nil {
@@ -1961,11 +1962,11 @@
}
type hasDelDescTest struct {
- node storage.Version
+ node raw.Version
result bool
}
tests := []hasDelDescTest{
- {storage.NoVersion, false},
+ {raw.NoVersion, false},
{999, false},
{1, true},
{2, true},
diff --git a/runtimes/google/vsync/gc.go b/runtimes/google/vsync/gc.go
index 4efdd6a..dedd575 100644
--- a/runtimes/google/vsync/gc.go
+++ b/runtimes/google/vsync/gc.go
@@ -84,6 +84,8 @@
"fmt"
"time"
+ "veyron/services/store/raw"
+
"veyron2/storage"
"veyron2/vlog"
)
@@ -130,14 +132,14 @@
// generation. "pos" is the position of that generation in the local
// log.
type objGCState struct {
- version storage.Version
+ version raw.Version
pos uint32
}
// objVersHist tracks all the versions of the object that need to be
// gc'ed when strictCheck is enabled.
type objVersHist struct {
- versions map[storage.Version]struct{}
+ versions map[raw.Version]struct{}
}
// syncGC contains the metadata and state for the Sync GC thread.
@@ -338,7 +340,7 @@
objHist, ok := g.verifyPruneMap[rec.ObjID]
if !ok {
objHist = &objVersHist{
- versions: make(map[storage.Version]struct{}),
+ versions: make(map[raw.Version]struct{}),
}
g.verifyPruneMap[rec.ObjID] = objHist
}
diff --git a/runtimes/google/vsync/gc_test.go b/runtimes/google/vsync/gc_test.go
index e0fed2d..a0d2643 100644
--- a/runtimes/google/vsync/gc_test.go
+++ b/runtimes/google/vsync/gc_test.go
@@ -7,6 +7,8 @@
"testing"
_ "veyron/lib/testutil"
+ "veyron/services/store/raw"
+
"veyron2/storage"
)
@@ -113,9 +115,9 @@
}
expMap1 := make(map[storage.ID]*objVersHist)
if strictCheck {
- expMap1[objid] = &objVersHist{versions: make(map[storage.Version]struct{})}
+ expMap1[objid] = &objVersHist{versions: make(map[raw.Version]struct{})}
for i := 0; i < 5; i++ {
- expMap1[objid].versions[storage.Version(i)] = struct{}{}
+ expMap1[objid].versions[raw.Version(i)] = struct{}{}
}
if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
@@ -183,8 +185,8 @@
expMap1 := make(map[storage.ID]*objVersHist)
if strictCheck {
- expMap1[objid] = &objVersHist{versions: make(map[storage.Version]struct{})}
- expMap1[objid].versions[storage.Version(3)] = struct{}{}
+ expMap1[objid] = &objVersHist{versions: make(map[raw.Version]struct{})}
+ expMap1[objid].versions[raw.Version(3)] = struct{}{}
if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
}
@@ -199,7 +201,7 @@
}
if strictCheck {
- expMap1[objid].versions[storage.Version(2)] = struct{}{}
+ expMap1[objid].versions[raw.Version(2)] = struct{}{}
if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
}
@@ -216,7 +218,7 @@
}
if strictCheck {
- expMap1[objid].versions[storage.Version(6)] = struct{}{}
+ expMap1[objid].versions[raw.Version(6)] = struct{}{}
if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v",
s.hdlGC.verifyPruneMap[objid], expMap1[objid])
@@ -267,10 +269,10 @@
}
expMap1 := make(map[storage.ID]*objVersHist)
- expMap1[objid] = &objVersHist{versions: make(map[storage.Version]struct{})}
+ expMap1[objid] = &objVersHist{versions: make(map[raw.Version]struct{})}
if strictCheck {
for i := 0; i < 5; i++ {
- expMap1[objid].versions[storage.Version(i)] = struct{}{}
+ expMap1[objid].versions[raw.Version(i)] = struct{}{}
}
if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v",
@@ -296,8 +298,8 @@
t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects[objid], expMap[objid])
}
if strictCheck {
- expMap1[objid].versions[storage.Version(5)] = struct{}{}
- expMap1[objid].versions[storage.Version(6)] = struct{}{}
+ expMap1[objid].versions[raw.Version(5)] = struct{}{}
+ expMap1[objid].versions[raw.Version(6)] = struct{}{}
if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v",
s.hdlGC.verifyPruneMap[objid], expMap[objid])
@@ -337,9 +339,9 @@
t.Errorf("Could not create objid %v", err)
}
expMap[obj1] = &objGCState{pos: 8, version: 6}
- expMap1[obj1] = &objVersHist{versions: make(map[storage.Version]struct{})}
+ expMap1[obj1] = &objVersHist{versions: make(map[raw.Version]struct{})}
for i := 1; i < 7; i++ {
- expMap1[obj1].versions[storage.Version(i)] = struct{}{}
+ expMap1[obj1].versions[raw.Version(i)] = struct{}{}
}
obj2, err := strToObjID("456")
@@ -347,20 +349,20 @@
t.Errorf("Could not create objid %v", err)
}
expMap[obj2] = &objGCState{pos: 10, version: 7}
- expMap1[obj2] = &objVersHist{versions: make(map[storage.Version]struct{})}
+ expMap1[obj2] = &objVersHist{versions: make(map[raw.Version]struct{})}
for i := 1; i < 6; i++ {
- expMap1[obj2].versions[storage.Version(i)] = struct{}{}
+ expMap1[obj2].versions[raw.Version(i)] = struct{}{}
}
- expMap1[obj2].versions[storage.Version(7)] = struct{}{}
+ expMap1[obj2].versions[raw.Version(7)] = struct{}{}
obj3, err := strToObjID("789")
if err != nil {
t.Errorf("Could not create objid %v", err)
}
expMap[obj3] = &objGCState{pos: 8, version: 4}
- expMap1[obj3] = &objVersHist{versions: make(map[storage.Version]struct{})}
+ expMap1[obj3] = &objVersHist{versions: make(map[raw.Version]struct{})}
for i := 1; i < 5; i++ {
- expMap1[obj3].versions[storage.Version(i)] = struct{}{}
+ expMap1[obj3].versions[raw.Version(i)] = struct{}{}
}
if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
@@ -393,15 +395,15 @@
expMap[obj1] = &objGCState{pos: 12, version: 9}
for i := 7; i < 10; i++ {
- expMap1[obj1].versions[storage.Version(i)] = struct{}{}
+ expMap1[obj1].versions[raw.Version(i)] = struct{}{}
}
expMap[obj2] = &objGCState{pos: 12, version: 8}
for i := 6; i < 9; i++ {
- expMap1[obj2].versions[storage.Version(i)] = struct{}{}
+ expMap1[obj2].versions[raw.Version(i)] = struct{}{}
}
expMap[obj3] = &objGCState{pos: 12, version: 6}
for i := 5; i < 7; i++ {
- expMap1[obj3].versions[storage.Version(i)] = struct{}{}
+ expMap1[obj3].versions[raw.Version(i)] = struct{}{}
}
if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
@@ -459,7 +461,7 @@
t.Errorf("Could not create objid %v", err)
}
s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[storage.Version]struct{}),
+ versions: make(map[raw.Version]struct{}),
}
}
if err := s.hdlGC.dagPruneCallBack("A:1:0"); err != nil {
@@ -510,9 +512,9 @@
t.Errorf("Could not create objid %v", err)
}
s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[storage.Version]struct{}),
+ versions: make(map[raw.Version]struct{}),
}
- s.hdlGC.verifyPruneMap[objid].versions[storage.Version(2)] = struct{}{}
+ s.hdlGC.verifyPruneMap[objid].versions[raw.Version(2)] = struct{}{}
if err := s.hdlGC.dagPruneCallBack("A:1:0"); err != nil {
t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
}
@@ -558,9 +560,9 @@
t.Errorf("Could not create objid %v", err)
}
s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[storage.Version]struct{}),
+ versions: make(map[raw.Version]struct{}),
}
- s.hdlGC.verifyPruneMap[objid].versions[storage.Version(4)] = struct{}{}
+ s.hdlGC.verifyPruneMap[objid].versions[raw.Version(4)] = struct{}{}
}
// Before pruning.
@@ -600,9 +602,9 @@
t.Errorf("Could not create objid %v", err)
}
s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[storage.Version]struct{}),
+ versions: make(map[raw.Version]struct{}),
}
- s.hdlGC.verifyPruneMap[objid].versions[storage.Version(6)] = struct{}{}
+ s.hdlGC.verifyPruneMap[objid].versions[raw.Version(6)] = struct{}{}
}
if err := s.hdlGC.dagPruneCallBack("A:3:0"); err != nil {
t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
@@ -841,7 +843,7 @@
}
// Verify DAG state.
objArr := []string{"123", "456", "789"}
- heads := []storage.Version{10, 8, 6}
+ heads := []raw.Version{10, 8, 6}
for pos, o := range objArr {
objid, err := strToObjID(o)
if err != nil {
@@ -941,7 +943,7 @@
}
// Verify DAG state.
objArr := []string{"123", "456", "789"}
- heads := []storage.Version{10, 8, 6}
+ heads := []raw.Version{10, 8, 6}
for pos, o := range objArr {
objid, err := strToObjID(o)
if err != nil {
diff --git a/runtimes/google/vsync/ilog.go b/runtimes/google/vsync/ilog.go
index ae539a7..764b6e5 100644
--- a/runtimes/google/vsync/ilog.go
+++ b/runtimes/google/vsync/ilog.go
@@ -42,6 +42,8 @@
"strconv"
"strings"
+ "veyron/services/store/raw"
+
"veyron2/storage"
"veyron2/vlog"
)
@@ -300,7 +302,7 @@
}
// createLocalLogRec creates a new local log record of type NodeRec.
-func (l *iLog) createLocalLogRec(obj storage.ID, vers storage.Version, par []storage.Version, val *LogValue) (*LogRec, error) {
+func (l *iLog) createLocalLogRec(obj storage.ID, vers raw.Version, par []raw.Version, val *LogValue) (*LogRec, error) {
rec := &LogRec{
DevID: l.s.id,
GNum: l.head.Curgen,
@@ -320,7 +322,7 @@
}
// createLocalLinkLogRec creates a new local log record of type LinkRec.
-func (l *iLog) createLocalLinkLogRec(obj storage.ID, vers, par storage.Version) (*LogRec, error) {
+func (l *iLog) createLocalLinkLogRec(obj storage.ID, vers, par raw.Version) (*LogRec, error) {
rec := &LogRec{
DevID: l.s.id,
GNum: l.head.Curgen,
@@ -329,7 +331,7 @@
ObjID: obj,
CurVers: vers,
- Parents: []storage.Version{par},
+ Parents: []raw.Version{par},
}
// Increment the LSN for the local log.
@@ -386,14 +388,14 @@
}
// processWatchRecord processes new object versions obtained from the local store.
-func (l *iLog) processWatchRecord(objID storage.ID, vers, parent storage.Version, val *LogValue, txID TxID) error {
+func (l *iLog) processWatchRecord(objID storage.ID, vers, parent raw.Version, val *LogValue, txID TxID) error {
if l.db == nil {
return errInvalidLog
}
vlog.VI(2).Infof("processWatchRecord:: adding object %v %v", objID, vers)
- if vers != storage.NoVersion {
+ if vers != raw.NoVersion {
// Check if the object's vers already exists in the DAG.
if l.s.dag.hasNode(objID, vers) {
return nil
@@ -406,17 +408,17 @@
}
}
- var pars []storage.Version
- if parent != storage.NoVersion {
- pars = []storage.Version{parent}
+ var pars []raw.Version
+ if parent != raw.NoVersion {
+ pars = []raw.Version{parent}
}
// If the current version is a deletion, generate a new version number.
if val.Delete {
- if vers != storage.NoVersion {
+ if vers != raw.NoVersion {
return fmt.Errorf("deleted vers is %v", vers)
}
- vers = storage.NewVersion()
+ vers = raw.NewVersion()
val.Mutation.Version = vers
}
diff --git a/runtimes/google/vsync/ilog_test.go b/runtimes/google/vsync/ilog_test.go
index f027c90..ea1ba6e 100644
--- a/runtimes/google/vsync/ilog_test.go
+++ b/runtimes/google/vsync/ilog_test.go
@@ -6,6 +6,8 @@
"reflect"
"testing"
+ "veyron/services/store/raw"
+
"veyron2/storage"
)
@@ -133,7 +135,7 @@
t.Errorf("CreateLocalGeneration did not fail on a closed log: %v", err)
}
- err = log.processWatchRecord(storage.NewID(), 2, storage.Version(999), &LogValue{}, NoTxID)
+ err = log.processWatchRecord(storage.NewID(), 2, raw.Version(999), &LogValue{}, NoTxID)
if err == nil || err != errInvalidLog {
t.Errorf("ProcessWatchRecord did not fail on a closed log: %v", err)
}
@@ -304,7 +306,7 @@
LSN: lsn,
ObjID: objID,
CurVers: 2,
- Parents: []storage.Version{0, 1},
+ Parents: []raw.Version{0, 1},
Value: LogValue{},
}
@@ -366,7 +368,7 @@
LSN: lsn,
ObjID: objID,
CurVers: 2,
- Parents: []storage.Version{0, 1},
+ Parents: []raw.Version{0, 1},
Value: LogValue{},
}
@@ -596,11 +598,11 @@
// fillFakeLogRecords fills fake log records for testing purposes.
func (l *iLog) fillFakeLogRecords() {
const num = 10
- var parvers []storage.Version
+ var parvers []raw.Version
id := storage.NewID()
for i := int(0); i < num; i++ {
// Create a local log record.
- curvers := storage.Version(i)
+ curvers := raw.Version(i)
rec, err := l.createLocalLogRec(id, curvers, parvers, &LogValue{})
if err != nil {
return
@@ -610,7 +612,7 @@
if err != nil {
return
}
- parvers = []storage.Version{curvers}
+ parvers = []raw.Version{curvers}
}
}
@@ -632,11 +634,11 @@
}
const num = 10
- var parvers []storage.Version
+ var parvers []raw.Version
id := storage.NewID()
for i := int(0); i < num; i++ {
// Create a local log record.
- curvers := storage.Version(i)
+ curvers := raw.Version(i)
rec, err := log.createLocalLogRec(id, curvers, parvers, &LogValue{})
if err != nil {
t.Fatalf("Cannot create local log rec ObjID: %v Current: %s Parents: %v Error: %v",
@@ -663,7 +665,7 @@
t.Errorf("Cannot put log record:: failed with err %v", err)
}
- parvers = []storage.Version{curvers}
+ parvers = []raw.Version{curvers}
}
if err = log.close(); err != nil {
@@ -895,7 +897,7 @@
t.Fatalf("GetLogRec() can not find object %s:1:%d in log file %s, err %v",
log.s.id, i, logfile, err)
}
- if curRec.CurVers != storage.Version(i) {
+ if curRec.CurVers != raw.Version(i) {
t.Errorf("Data mismatch for logrec %s:1:%d in log file %s: %v",
log.s.id, i, logfile, curRec)
}
diff --git a/runtimes/google/vsync/initiator.go b/runtimes/google/vsync/initiator.go
index f951f7c..b1d39e0 100644
--- a/runtimes/google/vsync/initiator.go
+++ b/runtimes/google/vsync/initiator.go
@@ -72,9 +72,9 @@
// updated during an initiator run.
type objConflictState struct {
isConflict bool
- newHead storage.Version
- oldHead storage.Version
- ancestor storage.Version
+ newHead raw.Version
+ oldHead raw.Version
+ ancestor raw.Version
resolvVal *LogValue
}
@@ -471,7 +471,7 @@
continue
}
- versions := make([]storage.Version, 3)
+ versions := make([]raw.Version, 3)
versions[0] = st.oldHead
versions[1] = st.newHead
versions[2] = st.ancestor
@@ -503,7 +503,7 @@
}
// getLogRecsBatch gets the log records for an array of versions.
-func (i *syncInitiator) getLogRecsBatch(obj storage.ID, versions []storage.Version) ([]*LogRec, error) {
+func (i *syncInitiator) getLogRecsBatch(obj storage.ID, versions []raw.Version) ([]*LogRec, error) {
// TODO(hpucha): Eliminate reaching into syncd's lock.
i.syncd.lock.RLock()
defer i.syncd.lock.RUnlock()
@@ -557,8 +557,8 @@
st.resolvVal.Mutation, stMutation, obj, st.newHead, st.oldHead)
// Append to mutations, skipping a delete following a delete mutation.
- if stMutation.Version != storage.NoVersion ||
- stMutation.PriorVersion != storage.NoVersion {
+ if stMutation.Version != raw.NoVersion ||
+ stMutation.PriorVersion != raw.NoVersion {
vlog.VI(2).Infof("updateStoreAndSync:: appending mutation %v for obj %v",
stMutation, obj)
m = append(m, stMutation)
@@ -607,11 +607,11 @@
// storeMutation converts a resolved mutation generated by syncd to
// one that can be sent to the store. To send to the store, it
// converts the version numbers corresponding to object deletions to
-// storage.NoVersion when required.
+// raw.NoVersion when required.
func (i *syncInitiator) storeMutation(obj storage.ID, resolvVal *LogValue) (raw.Mutation, error) {
curDelete := resolvVal.Delete
priorDelete := false
- if resolvVal.Mutation.PriorVersion != storage.NoVersion {
+ if resolvVal.Mutation.PriorVersion != raw.NoVersion {
oldRec, err := i.getLogRec(obj, resolvVal.Mutation.PriorVersion)
if err != nil {
return raw.Mutation{}, err
@@ -629,18 +629,18 @@
stMutation := resolvVal.Mutation
// Adjust the current version if this a deletion.
if curDelete {
- stMutation.Version = storage.NoVersion
+ stMutation.Version = raw.NoVersion
}
// Adjust the prior version if it is a deletion.
if priorDelete {
- stMutation.PriorVersion = storage.NoVersion
+ stMutation.PriorVersion = raw.NoVersion
}
return stMutation, nil
}
// getLogRec returns the log record corresponding to a given object and its version.
-func (i *syncInitiator) getLogRec(obj storage.ID, vers storage.Version) (*LogRec, error) {
+func (i *syncInitiator) getLogRec(obj storage.ID, vers raw.Version) (*LogRec, error) {
logKey, err := i.syncd.dag.getLogrec(obj, vers)
if err != nil {
return nil, err
@@ -674,7 +674,7 @@
rec, err = i.syncd.log.createLocalLinkLogRec(obj, st.newHead, st.oldHead)
default:
// New version was created to resolve the conflict.
- parents := []storage.Version{st.newHead, st.oldHead}
+ parents := []raw.Version{st.newHead, st.oldHead}
rec, err = i.syncd.log.createLocalLogRec(obj, st.resolvVal.Mutation.Version, parents, st.resolvVal)
}
diff --git a/runtimes/google/vsync/initiator_test.go b/runtimes/google/vsync/initiator_test.go
index d9db4bb..d2668e5 100644
--- a/runtimes/google/vsync/initiator_test.go
+++ b/runtimes/google/vsync/initiator_test.go
@@ -84,7 +84,7 @@
newHead: 20,
ancestor: 10,
}
- versions := []storage.Version{10, 40, 20}
+ versions := []raw.Version{10, 40, 20}
for _, v := range versions {
expRec := &LogRec{
DevID: "VeyronTab",
@@ -170,7 +170,7 @@
t.Errorf("Data mismatch in log record %v", curRec)
}
// Verify DAG state.
- if _, err := s.dag.getNode(objid, storage.Version(i+1)); err != nil {
+ if _, err := s.dag.getNode(objid, raw.Version(i+1)); err != nil {
t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
}
}
@@ -184,7 +184,7 @@
if st.isConflict {
t.Errorf("Detected a conflict %v", st)
}
- if st.newHead != 3 || st.oldHead != storage.NoVersion {
+ if st.newHead != 3 || st.oldHead != raw.NoVersion {
t.Errorf("Conflict detection didn't succeed %v", st)
}
if err := s.hdlInitiator.resolveConflicts(); err != nil {
@@ -193,7 +193,7 @@
if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
t.Fatalf("updateStoreAndSync failed with err %v", err)
}
- if st.resolvVal.Mutation.PriorVersion != storage.NoVersion || st.resolvVal.Mutation.Version != 3 {
+ if st.resolvVal.Mutation.PriorVersion != raw.NoVersion || st.resolvVal.Mutation.Version != 3 {
t.Errorf("Mutation generation is not accurate %v", st)
}
if s.log.head.Curgen != 1 || s.log.head.Curlsn != 0 || s.log.head.Curorder != 1 {
@@ -266,7 +266,7 @@
t.Errorf("Data mismatch in log record %v", curRec)
}
// Verify DAG state.
- if _, err := s.dag.getNode(objid, storage.Version(i+1)); err != nil {
+ if _, err := s.dag.getNode(objid, raw.Version(i+1)); err != nil {
t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
}
}
@@ -281,7 +281,7 @@
if st.isConflict {
t.Errorf("Detected a conflict %v", st)
}
- if st.newHead != 3 || st.oldHead != storage.NoVersion {
+ if st.newHead != 3 || st.oldHead != raw.NoVersion {
t.Errorf("Conflict detection didn't succeed %v", st)
}
if err := s.hdlInitiator.resolveConflicts(); err != nil {
@@ -290,7 +290,7 @@
if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
t.Fatalf("updateStoreAndSync failed with err %v", err)
}
- if st.resolvVal.Mutation.PriorVersion != storage.NoVersion || st.resolvVal.Mutation.Version != 3 {
+ if st.resolvVal.Mutation.PriorVersion != raw.NoVersion || st.resolvVal.Mutation.Version != 3 {
t.Errorf("Mutation generation is not accurate %v", st)
}
if s.log.head.Curgen != 1 || s.log.head.Curlsn != 0 || s.log.head.Curorder != 1 {
@@ -338,7 +338,7 @@
// Verify transaction state.
objs := []string{"123", "456", "789"}
objids := make([]storage.ID, 3)
- maxVers := []storage.Version{3, 2, 4}
+ maxVers := []raw.Version{3, 2, 4}
txVers := map[string]struct{}{
"123-2": struct{}{},
"123-3": struct{}{},
@@ -352,7 +352,7 @@
if err != nil {
t.Errorf("Could not create objid %v", err)
}
- for i := storage.Version(1); i <= storage.Version(maxVers[pos]); i++ {
+ for i := raw.Version(1); i <= raw.Version(maxVers[pos]); i++ {
node, err := s.dag.getNode(objids[pos], i)
if err != nil {
t.Errorf("cannot find dag node for object %d %v: %s", objids[pos], i, err)
@@ -369,7 +369,7 @@
}
// Verify transaction state for the first transaction.
- node, err := s.dag.getNode(objids[0], storage.Version(2))
+ node, err := s.dag.getNode(objids[0], raw.Version(2))
if err != nil {
t.Errorf("cannot find dag node for object %d v1: %s", objids[0], err)
}
@@ -381,9 +381,9 @@
t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
}
expTxMap := dagTxMap{
- objids[0]: storage.Version(2),
- objids[1]: storage.Version(1),
- objids[2]: storage.Version(1),
+ objids[0]: raw.Version(2),
+ objids[1]: raw.Version(1),
+ objids[2]: raw.Version(1),
}
if !reflect.DeepEqual(txMap, expTxMap) {
t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
@@ -391,7 +391,7 @@
}
// Verify transaction state for the second transaction.
- node, err = s.dag.getNode(objids[0], storage.Version(3))
+ node, err = s.dag.getNode(objids[0], raw.Version(3))
if err != nil {
t.Errorf("cannot find dag node for object %d v1: %s", objids[0], err)
}
@@ -403,8 +403,8 @@
t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
}
expTxMap = dagTxMap{
- objids[0]: storage.Version(3),
- objids[1]: storage.Version(2),
+ objids[0]: raw.Version(3),
+ objids[1]: raw.Version(2),
}
if !reflect.DeepEqual(txMap, expTxMap) {
t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
@@ -484,7 +484,7 @@
t.Errorf("Data mismatch in log record %v", curRec)
}
// Verify DAG state.
- if _, err := s.dag.getNode(objid, storage.Version(i+1)); err != nil {
+ if _, err := s.dag.getNode(objid, raw.Version(i+1)); err != nil {
t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
}
}
@@ -498,7 +498,7 @@
if st.isConflict {
t.Errorf("Detected a conflict %v", st)
}
- if st.newHead != 3 || st.oldHead != storage.NoVersion {
+ if st.newHead != 3 || st.oldHead != raw.NoVersion {
t.Errorf("Conflict detection didn't succeed %v", st)
}
if err := s.hdlInitiator.resolveConflicts(); err != nil {
@@ -507,14 +507,14 @@
if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
t.Fatalf("updateStoreAndSync failed with err %v", err)
}
- if st.resolvVal.Mutation.PriorVersion != storage.NoVersion || st.resolvVal.Mutation.Version != 3 {
+ if st.resolvVal.Mutation.PriorVersion != raw.NoVersion || st.resolvVal.Mutation.Version != 3 {
t.Errorf("Mutation generation is not accurate %v", st)
}
m, err := s.hdlInitiator.storeMutation(objid, st.resolvVal)
if err != nil {
t.Errorf("Could not translate mutation %v", err)
}
- if m.Version != storage.NoVersion || m.PriorVersion != storage.NoVersion {
+ if m.Version != raw.NoVersion || m.PriorVersion != raw.NoVersion {
t.Errorf("Data mismatch in mutation translation %v", m)
}
if s.log.head.Curgen != 1 || s.log.head.Curlsn != 0 || s.log.head.Curorder != 1 {
@@ -525,17 +525,17 @@
if head, err := s.dag.getHead(objid); err != nil || head != 3 {
t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
}
- node, err := s.dag.getNode(objid, storage.Version(3))
+ node, err := s.dag.getNode(objid, raw.Version(3))
if err != nil {
t.Errorf("cannot find dag node for object %d v3: %s", objid, err)
}
if !node.Deleted {
t.Errorf("deleted node not found for object %d v3", objid)
}
- if !s.dag.hasDeletedDescendant(objid, storage.Version(2)) {
+ if !s.dag.hasDeletedDescendant(objid, raw.Version(2)) {
t.Errorf("link to deleted node not found for object %d from v2", objid)
}
- if !s.dag.hasDeletedDescendant(objid, storage.Version(1)) {
+ if !s.dag.hasDeletedDescendant(objid, raw.Version(1)) {
t.Errorf("link to deleted node not found for object %d from v1", objid)
}
}
@@ -603,7 +603,7 @@
}
objs := []string{"123", "456"}
- newHeads := []storage.Version{6, 2}
+ newHeads := []raw.Version{6, 2}
conflicts := []bool{false, true}
for pos, o := range objs {
objid, err := strToObjID(o)
@@ -634,24 +634,24 @@
if st.oldHead != 3 {
t.Errorf("Conflict detection didn't succeed for obj123 %v", st)
}
- if m.Version != storage.NoVersion || m.PriorVersion != 3 {
+ if m.Version != raw.NoVersion || m.PriorVersion != 3 {
t.Errorf("Data mismatch in mutation translation for obj123 %v", m)
}
// Test echo back from watch for these mutations.
- if err := s.log.processWatchRecord(objid, 0, storage.Version(3), &LogValue{}, NoTxID); err != nil {
+ if err := s.log.processWatchRecord(objid, 0, raw.Version(3), &LogValue{}, NoTxID); err != nil {
t.Errorf("Echo processing from watch failed %v", err)
}
}
if pos == 1 {
- if st.oldHead == storage.NoVersion {
+ if st.oldHead == raw.NoVersion {
t.Errorf("Conflict detection didn't succeed for obj456 %v", st)
}
- if m.Version != 2 || m.PriorVersion != storage.NoVersion {
+ if m.Version != 2 || m.PriorVersion != raw.NoVersion {
t.Errorf("Data mismatch in mutation translation for obj456 %v", m)
}
// Test echo back from watch for these mutations.
- if err := s.log.processWatchRecord(objid, storage.Version(2), 0, &LogValue{}, NoTxID); err != nil {
+ if err := s.log.processWatchRecord(objid, raw.Version(2), 0, &LogValue{}, NoTxID); err != nil {
t.Errorf("Echo processing from watch failed %v", err)
}
}
@@ -718,7 +718,7 @@
}
// Check all log records.
for _, devid := range []DeviceID{"VeyronPhone", "VeyronTab"} {
- v := storage.Version(1)
+ v := raw.Version(1)
for i := LSN(0); i < 3; i++ {
curRec, err := s.log.getLogRec(devid, GenID(1), i)
if err != nil || curRec == nil {
@@ -832,7 +832,7 @@
lcount := []LSN{3, 4}
// Check all log records.
for index, devid := range []DeviceID{"VeyronPhone", "VeyronTab"} {
- v := storage.Version(1)
+ v := raw.Version(1)
for i := LSN(0); i < lcount[index]; i++ {
curRec, err := s.log.getLogRec(devid, GenID(1), i)
if err != nil || curRec == nil {
@@ -954,7 +954,7 @@
// Check all log records.
lcount := []LSN{2, 4, 1}
for index, devid := range []DeviceID{"VeyronPhone", "VeyronTab", "VeyronLaptop"} {
- v := storage.Version(1)
+ v := raw.Version(1)
for i := LSN(0); i < lcount[index]; i++ {
curRec, err := s.log.getLogRec(devid, GenID(1), i)
if err != nil || curRec == nil {
diff --git a/runtimes/google/vsync/replay_test.go b/runtimes/google/vsync/replay_test.go
index 26df5fa..1904f31 100644
--- a/runtimes/google/vsync/replay_test.go
+++ b/runtimes/google/vsync/replay_test.go
@@ -15,6 +15,8 @@
"strconv"
"strings"
+ "veyron/services/store/raw"
+
"veyron2/storage"
)
@@ -29,8 +31,8 @@
type syncCommand struct {
cmd int
objID storage.ID
- version storage.Version
- parents []storage.Version
+ version raw.Version
+ parents []raw.Version
logrec string
devID DeviceID
genVec GenVector
@@ -54,12 +56,12 @@
return objID, nil
}
-func strToVersion(verStr string) (storage.Version, error) {
+func strToVersion(verStr string) (raw.Version, error) {
ver, err := strconv.ParseUint(verStr, 10, 64)
if err != nil {
return 0, err
}
- return storage.Version(ver), nil
+ return raw.Version(ver), nil
}
func strToGenID(genIDStr string) (GenID, error) {
@@ -100,7 +102,7 @@
if err != nil {
return nil, fmt.Errorf("%s:%d: invalid version: %s", file, lineno, args[2])
}
- var parents []storage.Version
+ var parents []raw.Version
for i := 3; i <= 4; i++ {
if args[i] != "" {
pver, err := strToVersion(args[i])
@@ -173,7 +175,7 @@
return nil, fmt.Errorf("%s:%d: invalid parent (to-node) version: %s", file, lineno, args[3])
}
- cmd := syncCommand{version: version, parents: []storage.Version{parent}, logrec: args[5]}
+ cmd := syncCommand{version: version, parents: []raw.Version{parent}, logrec: args[5]}
if args[0] == "linkl" {
cmd.cmd = linkLocal
} else {
diff --git a/runtimes/google/vsync/util_test.go b/runtimes/google/vsync/util_test.go
index d0bd35d..b669ffe 100644
--- a/runtimes/google/vsync/util_test.go
+++ b/runtimes/google/vsync/util_test.go
@@ -8,8 +8,6 @@
"time"
"veyron/services/store/raw"
-
- "veyron2/storage"
)
// getFileName generates a filename for a temporary (per unit test) kvdb file.
@@ -94,7 +92,7 @@
for _, cmd := range cmds {
switch cmd.cmd {
case addLocal:
- parent := storage.NoVersion
+ parent := raw.NoVersion
if cmd.parents != nil {
parent = cmd.parents[0]
}
diff --git a/runtimes/google/vsync/vsync.vdl b/runtimes/google/vsync/vsync.vdl
index f6d8b9b..e5ac7e9 100644
--- a/runtimes/google/vsync/vsync.vdl
+++ b/runtimes/google/vsync/vsync.vdl
@@ -45,8 +45,8 @@
RecType byte
// Object related information.
ObjID storage.ID
- CurVers storage.Version
- Parents []storage.Version
+ CurVers raw.Version
+ Parents []raw.Version
Value LogValue
}
diff --git a/runtimes/google/vsync/vsync.vdl.go b/runtimes/google/vsync/vsync.vdl.go
index 9e33c8a..21502ba 100644
--- a/runtimes/google/vsync/vsync.vdl.go
+++ b/runtimes/google/vsync/vsync.vdl.go
@@ -53,8 +53,8 @@
RecType byte
// Object related information.
ObjID storage.ID
- CurVers storage.Version
- Parents []storage.Version
+ CurVers raw.Version
+ Parents []raw.Version
Value LogValue
}
@@ -350,7 +350,7 @@
}
result.TypeDefs = []_gen_vdlutil.Any{
- _gen_wiretype.NamedPrimitiveType{Type: 0x3, Name: "veyron/runtimes/google/vsync.DeviceID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.GenID", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x41, Elem: 0x42, Name: "veyron/runtimes/google/vsync.GenVector", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.LSN", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.ArrayType{Elem: 0x46, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron2/storage.Version", Tags: []string(nil)}, _gen_wiretype.SliceType{Elem: 0x48, Name: "", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.StructType{
+ _gen_wiretype.NamedPrimitiveType{Type: 0x3, Name: "veyron/runtimes/google/vsync.DeviceID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.GenID", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x41, Elem: 0x42, Name: "veyron/runtimes/google/vsync.GenVector", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.LSN", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.ArrayType{Elem: 0x46, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/services/store/raw.Version", Tags: []string(nil)}, _gen_wiretype.SliceType{Elem: 0x48, Name: "", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.StructType{
[]_gen_wiretype.FieldType{
_gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
_gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
diff --git a/runtimes/google/vsync/watcher_test.go b/runtimes/google/vsync/watcher_test.go
index f6848d7..835b3eb 100644
--- a/runtimes/google/vsync/watcher_test.go
+++ b/runtimes/google/vsync/watcher_test.go
@@ -392,7 +392,7 @@
oidC := storage.ID{0x70, 0xff, 0x65, 0xec, 0xf, 0x82, 0x5f, 0x44, 0xb6, 0x9f, 0x89, 0x5e, 0xea, 0x75, 0x9d, 0x71}
oids := []storage.ID{oidRoot, oidA, oidC}
- heads := []storage.Version{0x4d65822107fcfd52, 0xa858149c6e2d1000, 0x380704bb7b4d7c03}
+ heads := []raw.Version{0x4d65822107fcfd52, 0xa858149c6e2d1000, 0x380704bb7b4d7c03}
for i, oid := range oids {
expHead := heads[i]
@@ -409,7 +409,7 @@
if err != nil {
t.Errorf("cannot find head node for object %d: %s", oidB, err)
}
- if headB == storage.NoVersion || headB == storage.Version(0x55104dc76695721d) {
+ if headB == raw.NoVersion || headB == raw.Version(0x55104dc76695721d) {
t.Errorf("wrong head for object B %d: %d ", oidB, headB)
}
@@ -427,8 +427,8 @@
}
expTxMap := dagTxMap{
oidRoot: heads[0],
- oidA: storage.Version(0x57e9d1860d1d68d8),
- oidB: storage.Version(0x55104dc76695721d),
+ oidA: raw.Version(0x57e9d1860d1d68d8),
+ oidB: raw.Version(0x55104dc76695721d),
}
if !reflect.DeepEqual(txMap, expTxMap) {
t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
@@ -448,7 +448,7 @@
t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
}
expTxMap = dagTxMap{
- oidA: storage.Version(0x365a858149c6e2d1),
+ oidA: raw.Version(0x365a858149c6e2d1),
oidC: heads[2],
}
if !reflect.DeepEqual(txMap, expTxMap) {
@@ -485,7 +485,7 @@
if !node.Deleted {
t.Errorf("deleted node not found for object %d %v: %s", oidB, headB, err)
}
- if !s.dag.hasDeletedDescendant(oidB, storage.Version(0x55104dc76695721d)) {
+ if !s.dag.hasDeletedDescendant(oidB, raw.Version(0x55104dc76695721d)) {
t.Errorf("link to deleted node not found for object %d %v: %s", oidB, headB, err)
}
diff --git a/services/store/memstore/state/cell.go b/services/store/memstore/state/cell.go
index f0f9c32..0422713 100644
--- a/services/store/memstore/state/cell.go
+++ b/services/store/memstore/state/cell.go
@@ -4,6 +4,7 @@
"bytes"
"veyron/services/store/memstore/refs"
+ "veyron/services/store/raw"
// TODO(cnicolaou): mv lib/functional into veyron somewhere.
"veyron/runtimes/google/lib/functional"
@@ -39,7 +40,7 @@
buffered bool
// version is the version number.
- Version storage.Version
+ Version raw.Version
// TODO(jyh): Add stat info and attributes.
}
diff --git a/services/store/memstore/state/log.go b/services/store/memstore/state/log.go
index 68aea9c..79996b5 100644
--- a/services/store/memstore/state/log.go
+++ b/services/store/memstore/state/log.go
@@ -2,6 +2,7 @@
import (
"veyron/services/store/memstore/refs"
+ "veyron/services/store/raw"
"veyron2/storage"
"veyron2/verror"
@@ -39,7 +40,7 @@
ID storage.ID
Value interface{}
Dir []*storage.DEntry
- Version storage.Version
+ Version raw.Version
}
func (st *State) Write(enc *vom.Encoder) error {
diff --git a/services/store/memstore/state/mutable_snapshot.go b/services/store/memstore/state/mutable_snapshot.go
index 731d6ca..85b403c 100644
--- a/services/store/memstore/state/mutable_snapshot.go
+++ b/services/store/memstore/state/mutable_snapshot.go
@@ -48,7 +48,7 @@
// deletions is the current set of deletions. The version is at
// the point of deletion.
- deletions map[storage.ID]storage.Version
+ deletions map[storage.ID]raw.Version
}
// Mutations represents a set of mutations to the state. This is used to
@@ -66,7 +66,7 @@
SetRootID bool
// Preconditions is the set of expected versions.
- Preconditions map[storage.ID]storage.Version
+ Preconditions map[storage.ID]raw.Version
// Delta is the set of changes.
Delta map[storage.ID]*Mutation
@@ -76,13 +76,13 @@
// precondition, where *if* the value exists, it should have the specified
// version. The target snapshot is allowed to perform garbage collection
// too, so the deleted value is not required to exist.
- Deletions map[storage.ID]storage.Version
+ Deletions map[storage.ID]raw.Version
}
// mutation is an update to a single value in the state.
type Mutation struct {
// Postcondition is the version after the mutation.
- Postcondition storage.Version
+ Postcondition raw.Version
// Value is the new value.
Value interface{}
@@ -116,9 +116,9 @@
// reset resets the Mutations state.
func (m *Mutations) reset() {
- m.Preconditions = make(map[storage.ID]storage.Version)
+ m.Preconditions = make(map[storage.ID]raw.Version)
m.Delta = make(map[storage.ID]*Mutation)
- m.Deletions = make(map[storage.ID]storage.Version)
+ m.Deletions = make(map[storage.ID]raw.Version)
}
// addPrecondition adds a precondition if it does not already exisn.
@@ -146,7 +146,7 @@
snapshot: newSnapshot(admin),
gcRoots: make(map[storage.ID]struct{}),
mutations: newMutations(),
- deletions: make(map[storage.ID]storage.Version),
+ deletions: make(map[storage.ID]raw.Version),
}
}
@@ -210,7 +210,7 @@
} else {
mu.Preconditions[c.ID] = c.Version
m = &Mutation{
- Postcondition: storage.NewVersion(),
+ Postcondition: raw.NewVersion(),
Value: c.Value,
Dir: d,
refs: c.refs,
@@ -234,7 +234,7 @@
Value: v,
Dir: refs.EmptyDir,
inRefs: refs.Empty,
- Version: storage.NoVersion,
+ Version: raw.NoVersion,
}
c.setRefs()
if !sn.refsExist(c.refs) {
@@ -458,7 +458,7 @@
return errDuplicatePutMutation
}
// If the object has no version, it was deleted.
- if extmu.Version == storage.NoVersion {
+ if extmu.Version == raw.NoVersion {
mus.Deletions[id] = extmu.PriorVersion
if extmu.IsRoot {
mus.SetRootID = true
diff --git a/services/store/memstore/state/state.go b/services/store/memstore/state/state.go
index c398b86..e6212a8 100644
--- a/services/store/memstore/state/state.go
+++ b/services/store/memstore/state/state.go
@@ -4,6 +4,7 @@
"time"
"veyron/services/store/memstore/refs"
+ "veyron/services/store/raw"
"veyron2/security"
"veyron2/storage"
@@ -70,7 +71,7 @@
mu.Timestamp = ts
mu.Deletions = st.snapshot.deletions
st.timestamp = ts
- st.snapshot.deletions = make(map[storage.ID]storage.Version)
+ st.snapshot.deletions = make(map[storage.ID]raw.Version)
return &mu
}
diff --git a/services/store/memstore/store_test.go b/services/store/memstore/store_test.go
index 0800ff5..bd1da25 100644
--- a/services/store/memstore/store_test.go
+++ b/services/store/memstore/store_test.go
@@ -141,8 +141,8 @@
// Add /, /a, /a/b
id1, id2, id3 := storage.NewID(), storage.NewID(), storage.NewID()
- pre1, pre2, pre3 := storage.NoVersion, storage.NoVersion, storage.NoVersion
- post1, post2, post3 := storage.NewVersion(), storage.NewVersion(), storage.NewVersion()
+ pre1, pre2, pre3 := raw.NoVersion, raw.NoVersion, raw.NoVersion
+ post1, post2, post3 := raw.NewVersion(), raw.NewVersion(), raw.NewVersion()
v1, v2, v3 := "v1", "v2", "v3"
storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
@@ -178,7 +178,7 @@
// Remove /a/b
pre1, pre2, pre3 = post1, post2, post3
- post2 = storage.NewVersion()
+ post2 = raw.NewVersion()
storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
raw.Mutation{
@@ -194,7 +194,7 @@
expectNotExists(t, st, nil, "a/b")
// Garbage-collect /a/b
- post3 = storage.NoVersion
+ post3 = raw.NoVersion
storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
raw.Mutation{
@@ -210,7 +210,7 @@
// Remove /
pre1, pre2, pre3 = post1, post2, post3
- post1 = storage.NoVersion
+ post1 = raw.NoVersion
storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
raw.Mutation{
@@ -225,7 +225,7 @@
expectNotExists(t, st, nil, "a/b")
// Garbage-collect /a
- post2 = storage.NoVersion
+ post2 = raw.NoVersion
storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
raw.Mutation{
@@ -255,8 +255,8 @@
// Add /, /a
id1, id2 := storage.NewID(), storage.NewID()
- pre1, pre2 := storage.NoVersion, storage.NoVersion
- post1, post2 := storage.NewVersion(), storage.NewVersion()
+ pre1, pre2 := raw.NoVersion, raw.NoVersion
+ post1, post2 := raw.NewVersion(), raw.NewVersion()
v1, v2 := "v1", "v2"
storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
@@ -282,8 +282,8 @@
expectValue(t, st, nil, "/a", v2)
// Attempt to update /a with a bad precondition
- pre2 = storage.NewVersion()
- post2 = storage.NewVersion()
+ pre2 = raw.NewVersion()
+ post2 = raw.NewVersion()
v2 = "v4"
s := storetesting.PutMutations(rootPublicID, st.PutMutations)
@@ -318,16 +318,16 @@
s := storetesting.PutMutations(rootPublicID, st.PutMutations)
s.SendStream().Send(raw.Mutation{
ID: id,
- PriorVersion: storage.NoVersion,
- Version: storage.NewVersion(),
+ PriorVersion: raw.NoVersion,
+ Version: raw.NewVersion(),
IsRoot: true,
Value: "v1",
Dir: empty,
})
s.SendStream().Send(raw.Mutation{
ID: id,
- PriorVersion: storage.NoVersion,
- Version: storage.NewVersion(),
+ PriorVersion: raw.NoVersion,
+ Version: raw.NewVersion(),
IsRoot: true,
Value: "v2",
Dir: empty,
@@ -353,8 +353,8 @@
s := storetesting.PutMutations(rootPublicID, st.PutMutations)
s.SendStream().Send(raw.Mutation{
ID: storage.NewID(),
- PriorVersion: storage.NoVersion,
- Version: storage.NewVersion(),
+ PriorVersion: raw.NoVersion,
+ Version: raw.NewVersion(),
IsRoot: true,
Value: "v1",
Dir: empty,
diff --git a/services/store/memstore/testing/util.go b/services/store/memstore/testing/util.go
index 293bc75..a030884 100644
--- a/services/store/memstore/testing/util.go
+++ b/services/store/memstore/testing/util.go
@@ -352,7 +352,7 @@
}}
}
-func ExpectMutationExists(t *testing.T, changes []types.Change, id storage.ID, pre, post storage.Version, isRoot bool, value string, dir []storage.DEntry) {
+func ExpectMutationExists(t *testing.T, changes []types.Change, id storage.ID, pre, post raw.Version, isRoot bool, value string, dir []storage.DEntry) {
change := findMutation(t, changes, id)
if change.State != types.Exists {
t.Fatalf("Expected id to exist: %v", id)
@@ -373,7 +373,7 @@
expectDirEquals(t, cv.Dir, dir)
}
-func ExpectMutationDoesNotExist(t *testing.T, changes []types.Change, id storage.ID, pre storage.Version, isRoot bool) {
+func ExpectMutationDoesNotExist(t *testing.T, changes []types.Change, id storage.ID, pre raw.Version, isRoot bool) {
change := findMutation(t, changes, id)
if change.State != types.DoesNotExist {
t.Fatalf("Expected id to not exist: %v", id)
@@ -382,7 +382,7 @@
if cv.PriorVersion != pre {
t.Fatalf("Expected PriorVersion to be %v, but was: %v", pre, cv.PriorVersion)
}
- if cv.Version != storage.NoVersion {
+ if cv.Version != raw.NoVersion {
t.Fatalf("Expected Version to be NoVersion, but was: %v", cv.Version)
}
if cv.IsRoot != isRoot {
diff --git a/services/store/memstore/watch/raw_processor.go b/services/store/memstore/watch/raw_processor.go
index 3de157d..50af5ac 100644
--- a/services/store/memstore/watch/raw_processor.go
+++ b/services/store/memstore/watch/raw_processor.go
@@ -25,7 +25,7 @@
// rootID is the id of the root object after processing a change.
rootID storage.ID
// rootVersion is the version of the store root after processing a change.
- rootVersion storage.Version
+ rootVersion raw.Version
// preparedDeletions is the set of ids for which deletion changes have been
// sent by watch, but deleted entries have not been processed from the log.
// This set consists of deleted store roots, because
@@ -79,7 +79,7 @@
}
value := &raw.Mutation{
ID: id,
- PriorVersion: storage.NoVersion,
+ PriorVersion: raw.NoVersion,
Version: cell.Version,
IsRoot: isRoot,
Value: cell.Value,
@@ -116,7 +116,7 @@
value := &raw.Mutation{
ID: p.rootID,
PriorVersion: p.rootVersion,
- Version: storage.NoVersion,
+ Version: raw.NoVersion,
IsRoot: true,
}
// TODO(tilaks): don't clone value.
@@ -128,7 +128,7 @@
p.preparedDeletions[p.rootID] = true
p.rootID = nullID
- p.rootVersion = storage.NoVersion
+ p.rootVersion = raw.NoVersion
}
}
@@ -168,7 +168,7 @@
value := &raw.Mutation{
ID: id,
PriorVersion: precondition,
- Version: storage.NoVersion,
+ Version: raw.NoVersion,
IsRoot: false,
}
// TODO(tilaks): don't clone value.
diff --git a/services/store/memstore/watch/raw_processor_test.go b/services/store/memstore/watch/raw_processor_test.go
index 895baae..f1a94bd 100644
--- a/services/store/memstore/watch/raw_processor_test.go
+++ b/services/store/memstore/watch/raw_processor_test.go
@@ -5,8 +5,7 @@
"veyron/services/store/memstore"
watchtesting "veyron/services/store/memstore/testing"
-
- "veyron2/storage"
+ "veyron/services/store/raw"
)
func TestRawProcessState(t *testing.T) {
@@ -47,8 +46,8 @@
// 1) Contains / with value val1 and implicit directory entry /a
// 2) Contains /a with value val2
changes := expectState(t, log, processor, 2)
- watchtesting.ExpectMutationExists(t, changes, id1, storage.NoVersion, post1, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, storage.NoVersion, post2, false, "val2", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post1, true, "val1", watchtesting.DirOf("a", id2))
+ watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post2, false, "val2", watchtesting.EmptyDir)
}
func TestRawProcessTransactionAddRemove(t *testing.T) {
@@ -77,9 +76,9 @@
// 2) Adds /a with value val2 and implicit directory entry /a/b
// 3) Adds /a/b with value val3
changes := expectTransaction(t, log, processor, 3)
- watchtesting.ExpectMutationExists(t, changes, id1, storage.NoVersion, post1, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, storage.NoVersion, post2, false, "val2", watchtesting.DirOf("b", id3))
- watchtesting.ExpectMutationExists(t, changes, id3, storage.NoVersion, post3, false, "val3", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post1, true, "val1", watchtesting.DirOf("a", id2))
+ watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post2, false, "val2", watchtesting.DirOf("b", id3))
+ watchtesting.ExpectMutationExists(t, changes, id3, raw.NoVersion, post3, false, "val3", watchtesting.EmptyDir)
// Next transaction, remove /a/b
tr = memstore.NewTransaction()
diff --git a/services/store/memstore/watch/watcher_test.go b/services/store/memstore/watch/watcher_test.go
index 6e5172c..6861033 100644
--- a/services/store/memstore/watch/watcher_test.go
+++ b/services/store/memstore/watch/watcher_test.go
@@ -50,7 +50,7 @@
if change.Continued {
t.Error("Expected change to be the last in this transaction")
}
- watchtesting.ExpectMutationExists(t, changes, id1, storage.NoVersion, post1, true, "val1", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post1, true, "val1", watchtesting.EmptyDir)
// Put /a
tr = memstore.NewTransaction()
@@ -77,7 +77,7 @@
t.Error("Expected change to be the last in this transaction")
}
watchtesting.ExpectMutationExists(t, changes, id1, pre1, post1, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, storage.NoVersion, post2, false, "val2", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post2, false, "val2", watchtesting.EmptyDir)
}
func TestWatchGlob(t *testing.T) {
@@ -289,7 +289,7 @@
if change.Continued {
t.Error("Expected change to be the last in this transaction")
}
- watchtesting.ExpectMutationExists(t, changes, id1, storage.NoVersion, post11, true, "val1", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post11, true, "val1", watchtesting.EmptyDir)
// Check that watch detects the changes in the state and the transaction.
if !rStream.Advance() {
@@ -307,7 +307,7 @@
t.Error("Expected change to be the last in this transaction")
}
watchtesting.ExpectMutationExists(t, changes, id1, pre21, post21, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, storage.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
}
func TestTransactionResumeMarker(t *testing.T) {
@@ -372,7 +372,7 @@
if change.Continued {
t.Error("Expected change to be the last in this transaction")
}
- watchtesting.ExpectMutationExists(t, changes, id1, storage.NoVersion, post11, true, "val1", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post11, true, "val1", watchtesting.EmptyDir)
if !rStream.Advance() {
t.Error("Advance() failed: %v", rStream.Err())
@@ -390,7 +390,7 @@
}
resumeMarker2 := change.ResumeMarker
watchtesting.ExpectMutationExists(t, changes, id1, pre21, post21, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, storage.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
// Cancel the watch request.
ws.Cancel()
@@ -417,7 +417,7 @@
t.Error("Expected change to be the last in this transaction")
}
watchtesting.ExpectMutationExists(t, changes, id1, pre21, post21, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, storage.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
}
func TestNowResumeMarker(t *testing.T) {
@@ -486,7 +486,7 @@
t.Error("Expected change to be the last in this transaction")
}
watchtesting.ExpectMutationExists(t, changes, id2, pre32, post32, false, "val2", watchtesting.DirOf("b", id3))
- watchtesting.ExpectMutationExists(t, changes, id3, storage.NoVersion, post33, false, "val3", watchtesting.EmptyDir)
+ watchtesting.ExpectMutationExists(t, changes, id3, raw.NoVersion, post33, false, "val3", watchtesting.EmptyDir)
}
func TestUnknownResumeMarkers(t *testing.T) {
diff --git a/services/store/raw/service.vdl b/services/store/raw/service.vdl
index 20f0cad..4af33bb 100644
--- a/services/store/raw/service.vdl
+++ b/services/store/raw/service.vdl
@@ -14,8 +14,18 @@
// The raw Store has Object name "<mount>/.store.raw", where <mount> is the
// Object name of the mount point.
RawStoreSuffix = ".store.raw"
+
+ // NoVersion means the entry is not present in the store.
+ NoVersion = Version(0)
)
+// Version identifies the value in the store for a key at some point in time.
+// The version is a numeric identifier that is globally unique within the space
+// of a single ID, meaning that if two stores contain an entry with the same ID
+// and version, then the entries represent the same thing, at the same point in
+// time (as agreed upon by the two stores).
+type Version uint64
+
// Mutation represents an update to an entry in the store, and contains enough
// information for a privileged service to replicate the update elsewhere.
type Mutation struct {
@@ -24,11 +34,11 @@
// The version of the entry immediately before the update. For new entries,
// the PriorVersion is NoVersion.
- PriorVersion storage.Version
+ PriorVersion Version
// The version of the entry immediately after the update. For deleted entries,
// the Version is NoVersion.
- Version storage.Version
+ Version Version
// IsRoot is true if
// 1) The entry was the store root immediately before being deleted, or
diff --git a/services/store/raw/service.vdl.go b/services/store/raw/service.vdl.go
index 9e9c6fb..0a277dd 100644
--- a/services/store/raw/service.vdl.go
+++ b/services/store/raw/service.vdl.go
@@ -18,6 +18,13 @@
_gen_wiretype "veyron2/wiretype"
)
+// Version identifies the value in the store for a key at some point in time.
+// The version is a numeric identifier that is globally unique within the space
+// of a single ID, meaning that if two stores contain an entry with the same ID
+// and version, then the entries represent the same thing, at the same point in
+// time (as agreed upon by the two stores).
+type Version uint64
+
// Mutation represents an update to an entry in the store, and contains enough
// information for a privileged service to replicate the update elsewhere.
type Mutation struct {
@@ -25,10 +32,10 @@
ID storage.ID
// The version of the entry immediately before the update. For new entries,
// the PriorVersion is NoVersion.
- PriorVersion storage.Version
+ PriorVersion Version
// The version of the entry immediately after the update. For deleted entries,
// the Version is NoVersion.
- Version storage.Version
+ Version Version
// IsRoot is true if
// 1) The entry was the store root immediately before being deleted, or
// 2) The entry is the store root immediately after the update.
@@ -51,6 +58,9 @@
// Object name of the mount point.
const RawStoreSuffix = ".store.raw"
+// NoVersion means the entry is not present in the store.
+const NoVersion = Version(0)
+
// TODO(bprosnitz) Remove this line once signatures are updated to use typevals.
// It corrects a bug where _gen_wiretype is unused in VDL pacakges where only bootstrap types are used on interfaces.
const _ = _gen_wiretype.TypeIDInvalid
@@ -514,7 +524,7 @@
_gen_wiretype.FieldType{Type: 0x47, Name: "Changes"},
},
"veyron2/services/watch/types.ChangeBatch", []string(nil)},
- _gen_wiretype.ArrayType{Elem: 0x41, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron2/storage.Version", Tags: []string(nil)}, _gen_wiretype.StructType{
+ _gen_wiretype.ArrayType{Elem: 0x41, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/services/store/raw.Version", Tags: []string(nil)}, _gen_wiretype.StructType{
[]_gen_wiretype.FieldType{
_gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
_gen_wiretype.FieldType{Type: 0x49, Name: "ID"},
diff --git a/services/store/raw/version.go b/services/store/raw/version.go
new file mode 100644
index 0000000..aa5d385
--- /dev/null
+++ b/services/store/raw/version.go
@@ -0,0 +1,23 @@
+package raw
+
+import (
+ "math/rand"
+ "time"
+)
+
+var rng *rand.Rand
+
+func init() {
+ rng = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+}
+
+// NewVersion returns a new version number.
+//
+// TODO(jyh): Choose a better version generator.
+func NewVersion() Version {
+ for {
+ if v := Version(rng.Int63()); v != 0 {
+ return v
+ }
+ }
+}