Merge "veyron2/ipc: add RoamingListen support."
diff --git a/examples/bank/pbankd/main.go b/examples/bank/pbankd/main.go
index d8a7e3a..1806daa 100644
--- a/examples/bank/pbankd/main.go
+++ b/examples/bank/pbankd/main.go
@@ -39,8 +39,8 @@
// "veyron2/naming"
// "veyron2/rt"
// "veyron2/security"
-// "veyron2/storage"
-// "veyron2/storage/vstore"
+// "veyron.io/store/veyron2/storage"
+// "veyron.io/store/veyron2/storage/vstore"
// "veyron2/vlog"
// )
diff --git a/examples/boxes/android/src/boxesp2p/main.go b/examples/boxes/android/src/boxesp2p/main.go
index 24a82e6..9654184 100644
--- a/examples/boxes/android/src/boxesp2p/main.go
+++ b/examples/boxes/android/src/boxesp2p/main.go
@@ -73,7 +73,7 @@
inaming "veyron/runtimes/google/naming"
vsync "veyron/runtimes/google/vsync"
vsecurity "veyron/security"
- sstore "veyron/services/store/server"
+ sstore "veyron.io/store/veyron/services/store/server"
"veyron2"
"veyron2/context"
@@ -82,8 +82,8 @@
"veyron2/rt"
"veyron2/security"
"veyron2/services/watch/types"
- "veyron2/storage"
- "veyron2/storage/vstore"
+ "veyron.io/store/veyron2/storage"
+ "veyron.io/store/veyron2/storage/vstore"
"veyron2/vom"
)
diff --git a/examples/mdb/Makefile b/examples/mdb/Makefile
index c215080..49f36aa 100644
--- a/examples/mdb/Makefile
+++ b/examples/mdb/Makefile
@@ -1,5 +1,5 @@
build:
- ${VEYRON_ROOT}/veyron/scripts/build/go install veyron/examples/mdb/... veyron/services/mounttable/mounttabled veyron/services/store/stored veyron/tools/findunusedport veyron/tools/identity
+ ${VEYRON_ROOT}/veyron/scripts/build/go install veyron/examples/mdb/... veyron/services/mounttable/mounttabled veyron.io/store/veyron/services/store/stored veyron/tools/findunusedport veyron/tools/identity
run: build
./run.sh
diff --git a/examples/mdb/mdb_init/main.go b/examples/mdb/mdb_init/main.go
index 45e9b85..dc671eb 100644
--- a/examples/mdb/mdb_init/main.go
+++ b/examples/mdb/mdb_init/main.go
@@ -28,8 +28,8 @@
"veyron/examples/mdb/schema"
"veyron2/context"
"veyron2/rt"
- "veyron2/storage"
- "veyron2/storage/vstore"
+ "veyron.io/store/veyron2/storage"
+ "veyron.io/store/veyron2/storage/vstore"
"veyron2/vlog"
)
diff --git a/examples/mdb/schema/schema.vdl b/examples/mdb/schema/schema.vdl
index 01e96dc..d90ed86 100644
--- a/examples/mdb/schema/schema.vdl
+++ b/examples/mdb/schema/schema.vdl
@@ -1,7 +1,7 @@
package schema
import (
- "veyron2/storage"
+ "veyron.io/store/veyron2/storage"
)
// Dir is used to represent directories.
diff --git a/examples/mdb/schema/schema.vdl.go b/examples/mdb/schema/schema.vdl.go
index f270d44..6867e83 100644
--- a/examples/mdb/schema/schema.vdl.go
+++ b/examples/mdb/schema/schema.vdl.go
@@ -4,7 +4,7 @@
package schema
import (
- "veyron2/storage"
+ "veyron.io/store/veyron2/storage"
)
// Dir is used to represent directories.
diff --git a/examples/stfortune/schema/schema.vdl b/examples/stfortune/schema/schema.vdl
index c87ae0d..f23b0fc 100644
--- a/examples/stfortune/schema/schema.vdl
+++ b/examples/stfortune/schema/schema.vdl
@@ -1,7 +1,7 @@
package schema
import (
- "veyron2/storage"
+ "veyron.io/store/veyron2/storage"
)
// User contains the information corresponding to a particular UserName in the store.
diff --git a/examples/stfortune/schema/schema.vdl.go b/examples/stfortune/schema/schema.vdl.go
index 597c940..09cef86 100644
--- a/examples/stfortune/schema/schema.vdl.go
+++ b/examples/stfortune/schema/schema.vdl.go
@@ -4,7 +4,7 @@
package schema
import (
- "veyron2/storage"
+ "veyron.io/store/veyron2/storage"
)
// User contains the information corresponding to a particular UserName in the store.
diff --git a/examples/stfortune/stfortune/main.go b/examples/stfortune/stfortune/main.go
index 12ba537..18e193c 100644
--- a/examples/stfortune/stfortune/main.go
+++ b/examples/stfortune/stfortune/main.go
@@ -16,7 +16,7 @@
"os"
"strings"
"time"
- "veyron2/query"
+ "veyron.io/store/veyron2/query"
"veyron/examples/stfortune/schema"
@@ -24,8 +24,8 @@
"veyron2/naming"
"veyron2/rt"
"veyron2/services/watch/types"
- "veyron2/storage"
- "veyron2/storage/vstore"
+ "veyron.io/store/veyron2/storage"
+ "veyron.io/store/veyron2/storage/vstore"
"veyron2/vom"
)
diff --git a/examples/todos/Makefile b/examples/todos/Makefile
index cece7f9..3ddf791 100644
--- a/examples/todos/Makefile
+++ b/examples/todos/Makefile
@@ -11,7 +11,7 @@
(cd todos_appd && npm install)
buildgo:
- ${VEYRON_ROOT}/veyron/scripts/build/go install veyron/examples/todos/... veyron/services/mounttable/mounttabled veyron/services/store/stored veyron/tools/findunusedport veyron/tools/identity
+ ${VEYRON_ROOT}/veyron/scripts/build/go install veyron/examples/todos/... veyron/services/mounttable/mounttabled veyron.io/store/veyron/services/store/stored veyron/tools/findunusedport veyron/tools/identity
buildapp: node_modules
browserify -d todos_appd/browser/*.js -p [minifyify --map bundle.js.map --output ${BUNDLE_JS}.map] -o ${BUNDLE_JS}
diff --git a/examples/todos/todos_init/main.go b/examples/todos/todos_init/main.go
index ea80441..e2f9c99 100644
--- a/examples/todos/todos_init/main.go
+++ b/examples/todos/todos_init/main.go
@@ -16,8 +16,8 @@
"veyron/examples/todos/schema"
"veyron2/context"
"veyron2/rt"
- "veyron2/storage"
- "veyron2/storage/vstore"
+ "veyron.io/store/veyron2/storage"
+ "veyron.io/store/veyron2/storage/vstore"
"veyron2/vlog"
)
diff --git a/runtimes/google/vsync/dag.go b/runtimes/google/vsync/dag.go
deleted file mode 100644
index 956ff28..0000000
--- a/runtimes/google/vsync/dag.go
+++ /dev/null
@@ -1,957 +0,0 @@
-package vsync
-
-// Veyron Sync DAG (directed acyclic graph) utility functions.
-// The DAG is used to track the version history of objects in order to
-// detect and resolve conflicts (concurrent changes on different devices).
-//
-// Terminology:
-// * An object is a unique value in the Veyron Store represented by its UID.
-// * As an object mutates, its version number is updated by the Store.
-// * Each (object, version) tuple is represented by a node in the Sync DAG.
-// * The previous version of an object is its parent in the DAG, i.e. the
-// new version is derived from that parent.
-// * When there are no conflicts, the node has a single reference back to
-// a parent node.
-// * When a conflict between two concurrent object versions is resolved,
-// the new version has references back to each of the two parents to
-// indicate that it is derived from both nodes.
-// * During a sync operation from a source device to a target device, the
-// target receives a DAG fragment from the source. That fragment has to
-// be incorporated (grafted) into the target device's DAG. It may be a
-// continuation of the DAG of an object, with the attachment (graft) point
-// being the current head of DAG, in which case there are no conflicts.
-// Or the graft point(s) may be older nodes, which means the new fragment
-// is a divergence in the graph causing a conflict that must be resolved
-// in order to re-converge the two DAG fragments.
-//
-// In the diagrams below:
-// (h) represents the head node in the local device.
-// (nh) represents the new head node received from the remote device.
-// (g) represents a graft node, where new nodes attach to the existing DAG.
-// <- represents a derived-from mutation, i.e. a child-to-parent pointer
-//
-// a- No-conflict example: the new nodes (v3, v4) attach to the head node (v2).
-// In this case the new head becomes the head node, the new DAG fragment
-// being a continuation of the existing DAG.
-//
-// Before:
-// v0 <- v1 <- v2(h)
-//
-// Sync updates applied, no conflict detected:
-// v0 <- v1 <- v2(h,g) <- v3 <- v4 (nh)
-//
-// After:
-// v0 <- v1 <- v2 <- v3 <- v4 (h)
-//
-// b- Conflict example: the new nodes (v3, v4) attach to an old node (v1).
-// The current head node (v2) and the new head node (v4) are divergent
-// (concurrent) mutations that need to be resolved. The conflict
-// resolution function is passed the old head (v2), new head (v4), and
-// the common ancestor (v1) and resolves the conflict with (v5) which
-// is represented in the DAG as derived from both v2 and v4 (2 parents).
-//
-// Before:
-// v0 <- v1 <- v2(h)
-//
-// Sync updates applied, conflict detected (v2 not a graft node):
-// v0 <- v1(g) <- v2(h)
-// <- v3 <- v4 (nh)
-//
-// After, conflict resolver creates v5 having 2 parents (v2, v4):
-// v0 <- v1(g) <- v2 <------- v5(h)
-// <- v3 <- v4 <-
-//
-// Note: the DAG does not grow indefinitely. During a sync operation each
-// device learns what the other device already knows -- where it's at in
-// the version history for the objects. When a device determines that all
-// devices that sync an object (as per the definitions of replication groups
-// in the Veyron Store) have moved past some version for that object, the
-// DAG for that object can be pruned, deleting all prior (ancestor) nodes.
-//
-// The DAG DB contains three tables persisted to disk (nodes, heads, trans)
-// and three in-memory (ephemeral) maps (graft, txSet, txGC):
-// * nodes: one entry per (object, version) with references to the
-// parent node(s) it is derived from, a reference to the
-// log record identifying that change, a reference to its
-// transaction set (or NoTxID if none), and a boolean to
-// indicate whether this change was a deletion of the object.
-// * heads: one entry per object pointing to its most recent version
-// in the nodes table
-// * trans: one entry per transaction ID containing the set of objects
-// that forms the transaction and their versions.
-// * graft: during a sync operation, it tracks the nodes where the new
-// DAG fragments are attached to the existing graph for each
-// mutated object. This map is used to determine whether a
-// conflict happened for an object and, if yes, select the most
-// recent common ancestor from these graft points to use when
-// resolving the conflict. At the end of a sync operation the
-// graft map is destroyed.
-// * txSet: used to incrementally construct the transaction sets that
-// are stored in the "trans" table once all the nodes of a
-// transaction have been added. Multiple transaction sets
-// can be constructed to support the concurrency between the
-// Sync Initiator and Watcher threads.
-// * txGC: used to track the transactions impacted by objects being
-// pruned. At the end of the pruning operation the records
-// of the "trans" table are updated from the txGC information.
-//
-// Note: for regular (no-conflict) changes, a node has a reference to
-// one parent from which it was derived. When a conflict is resolved,
-// the new node has references to the two concurrent parents that triggered
-// the conflict. The states of the parents[] array are:
-// * [] The earliest/first version of an object
-// * [XYZ] Regular non-conflict version derived from XYZ
-// * [XYZ, ABC] Resolution version caused by XYZ-vs-ABC conflict
-
-import (
- "container/list"
- "errors"
- "fmt"
- "math/rand"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/storage"
- "veyron2/vlog"
-)
-
-const (
- NoTxID = TxID(0)
-)
-
-type TxID uint64
-type dagTxMap map[storage.ID]raw.Version
-
-type dag struct {
- fname string // file pathname
- store *kvdb // underlying K/V store
- heads *kvtable // pointer to "heads" table in the store
- nodes *kvtable // pointer to "nodes" table in the store
- trans *kvtable // pointer to "trans" table in the store
- graft map[storage.ID]*graftInfo // in-memory state of DAG object grafting
- txSet map[TxID]dagTxMap // in-memory construction of transaction sets
- txGC map[TxID]dagTxMap // in-memory tracking of transaction sets to cleanup
- txGen *rand.Rand // transaction ID random number generator
-}
-
-type dagNode struct {
- Level uint64 // node distance from root
- Parents []raw.Version // references to parent versions
- Logrec string // reference to log record change
- TxID TxID // ID of a transaction set
- Deleted bool // true if the change was a delete
-}
-
-type graftInfo struct {
- newNodes map[raw.Version]struct{} // set of newly added nodes during a sync
- graftNodes map[raw.Version]uint64 // set of graft nodes and their level
- newHeads map[raw.Version]struct{} // set of candidate new head nodes
-}
-
-// openDAG opens or creates a DAG for the given filename.
-func openDAG(filename string) (*dag, error) {
- // Open the file and create it if it does not exist.
- // Also initialize the store and its tables.
- db, tbls, err := kvdbOpen(filename, []string{"heads", "nodes", "trans"})
- if err != nil {
- return nil, err
- }
-
- d := &dag{
- fname: filename,
- store: db,
- heads: tbls[0],
- nodes: tbls[1],
- trans: tbls[2],
- txGen: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
- txSet: make(map[TxID]dagTxMap),
- }
-
- d.clearGraft()
- d.clearTxGC()
-
- return d, nil
-}
-
-// close closes the DAG and invalidates its structure.
-func (d *dag) close() {
- if d.store != nil {
- d.store.close() // this also closes the tables
- }
- *d = dag{} // zero out the DAG struct
-}
-
-// flush flushes the DAG store to disk.
-func (d *dag) flush() {
- if d.store != nil {
- d.store.flush()
- }
-}
-
-// compact compacts dag's kvdb file.
-func (d *dag) compact() error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- db, tbls, err := d.store.compact(d.fname, []string{"heads", "nodes", "trans"})
- if err != nil {
- return err
- }
- d.store = db
- d.heads = tbls[0]
- d.nodes = tbls[1]
- d.trans = tbls[2]
- return nil
-}
-
-// clearGraft clears the temporary in-memory grafting maps.
-func (d *dag) clearGraft() {
- if d.store != nil {
- d.graft = make(map[storage.ID]*graftInfo)
- }
-}
-
-// clearTxGC clears the temporary in-memory transaction garbage collection maps.
-func (d *dag) clearTxGC() {
- if d.store != nil {
- d.txGC = make(map[TxID]dagTxMap)
- }
-}
-
-// getObjectGraft returns the graft structure for an object ID.
-// The graftInfo struct for an object is ephemeral (in-memory) and it
-// tracks the following information:
-// - newNodes: the set of newly added nodes used to detect the type of
-// edges between nodes (new-node to old-node or vice versa).
-// - newHeads: the set of new candidate head nodes used to detect conflicts.
-// - graftNodes: the set of nodes used to find common ancestors between
-// conflicting nodes.
-//
-// After the received Sync logs are applied, if there are two new heads in
-// the newHeads set, there is a conflict to be resolved for this object.
-// Otherwise if there is only one head, no conflict was triggered and the
-// new head becomes the current version for the object.
-//
-// In case of conflict, the graftNodes set is used to select the common
-// ancestor to pass to the conflict resolver.
-//
-// Note: if an object's graft structure does not exist only create it
-// if the "create" parameter is set to true.
-func (d *dag) getObjectGraft(oid storage.ID, create bool) *graftInfo {
- graft := d.graft[oid]
- if graft == nil && create {
- graft = &graftInfo{
- newNodes: make(map[raw.Version]struct{}),
- graftNodes: make(map[raw.Version]uint64),
- newHeads: make(map[raw.Version]struct{}),
- }
-
- // If a current head node exists for this object, initialize
- // the set of candidate new heads to include it.
- head, err := d.getHead(oid)
- if err == nil {
- graft.newHeads[head] = struct{}{}
- }
-
- d.graft[oid] = graft
- }
- return graft
-}
-
-// addNodeTxStart generates a transaction ID and returns it to the caller.
-// The transaction ID is purely internal to the DAG. It is used to track
-// DAG nodes that are part of the same transaction.
-func (d *dag) addNodeTxStart() TxID {
- if d.store == nil {
- return NoTxID
- }
-
- // Generate a random 64-bit transaction ID different than NoTxID.
- // Also make sure the ID is not already being used.
- tid := NoTxID
- for (tid == NoTxID) || (d.txSet[tid] != nil) {
- // Generate an unsigned 64-bit random value by combining a
- // random 63-bit value and a random 1-bit value.
- tid = (TxID(d.txGen.Int63()) << 1) | TxID(d.txGen.Int63n(2))
- }
-
- // Initialize the in-memory object/version map for that transaction ID.
- d.txSet[tid] = make(dagTxMap)
-
- return tid
-}
-
-// addNodeTxEnd marks the end of a given transaction.
-// The DAG completes its internal tracking of the transaction information.
-func (d *dag) addNodeTxEnd(tid TxID) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- if tid == NoTxID {
- return fmt.Errorf("invalid TxID: %v", tid)
- }
-
- txMap, ok := d.txSet[tid]
- if !ok {
- return fmt.Errorf("unknown transaction ID: %v", tid)
- }
-
- if err := d.setTransaction(tid, txMap); err != nil {
- return err
- }
-
- delete(d.txSet, tid)
- return nil
-}
-
-// addNode adds a new node for an object in the DAG, linking it to its parent nodes.
-// It verifies that this node does not exist and that its parent nodes are valid.
-// It also determines the DAG level of the node from its parent nodes (max() + 1).
-//
-// If the node is due to a local change (from the Watcher API), no need to
-// update the grafting structure. Otherwise the node is due to a remote change
-// (from the Sync protocol) being grafted on the DAG:
-// - If a parent node is not new, mark it as a DAG graft point.
-// - Mark this version as a new node.
-// - Update the new head node pointer of the grafted DAG.
-//
-// If the transaction ID is set to NoTxID, this node is not part of a transaction.
-// Otherwise, track its membership in the given transaction ID.
-func (d *dag) addNode(oid storage.ID, version raw.Version, remote, deleted bool,
- parents []raw.Version, logrec string, tid TxID) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
-
- if parents != nil {
- if len(parents) > 2 {
- return fmt.Errorf("cannot have more than 2 parents, not %d", len(parents))
- }
- if len(parents) == 0 {
- // Replace an empty array with a nil.
- parents = nil
- }
- }
-
- // The new node must not exist.
- if d.hasNode(oid, version) {
- return fmt.Errorf("node %d:%d already exists in the DAG", oid, version)
- }
-
- // A new root node (no parents) is allowed only for new objects.
- if parents == nil {
- _, err := d.getHead(oid)
- if err == nil {
- return fmt.Errorf("cannot add another root node %d:%d for this object in the DAG", oid, version)
- }
- }
-
- // For a remote change, make sure the object has a graft info entry.
- // During a sync operation, each mutated object gets new nodes added
- // in its DAG. These new nodes are either derived from nodes that
- // were previously known on this device (i.e. their parent nodes are
- // pre-existing), or they are derived from other new DAG nodes being
- // discovered during this sync (i.e. their parent nodes were also
- // just added to the DAG).
- //
- // To detect a conflict and find the most recent common ancestor to
- // pass to the conflict resolver callback, the DAG keeps track of the
- // new nodes that have old parent nodes. These old-to-new edges are
- // the points where new DAG fragments are attached (grafted) onto the
- // existing DAG. The old nodes are the "graft nodes" and they form
- // the set of possible common ancestors to use in case of conflict:
- // 1- A conflict happens when the current "head node" for an object
- // is not in the set of graft nodes. It means the object mutations
- // were not derived from what the device knows, but where divergent
- // changes from a prior point (from one of the graft nodes).
- // 2- The most recent common ancestor to use in resolving the conflict
- // is the object graft node with the deepest level (furthest from
- // the origin root node), representing the most up-to-date common
- // knowledge between this device and the divergent changes.
- //
- // Note: at the end of a sync operation between 2 devices, the whole
- // graft info is cleared (Syncd calls clearGraft()) to prepare it for
- // the new pairwise sync operation.
- graft := d.getObjectGraft(oid, remote)
-
- // Verify the parents and determine the node level.
- // Update the graft info in the DAG for this object.
- var level uint64
- for _, parent := range parents {
- node, err := d.getNode(oid, parent)
- if err != nil {
- return err
- }
- if level <= node.Level {
- level = node.Level + 1
- }
- if remote {
- // If this parent is an old node, it's a graft point in the DAG
- // and may be a common ancestor used during conflict resolution.
- if _, ok := graft.newNodes[parent]; !ok {
- graft.graftNodes[parent] = node.Level
- }
-
- // The parent nodes can no longer be candidates for new head versions.
- if _, ok := graft.newHeads[parent]; ok {
- delete(graft.newHeads, parent)
- }
- }
- }
-
- if remote {
- // This new node is a candidate for new head version.
- graft.newNodes[version] = struct{}{}
- graft.newHeads[version] = struct{}{}
- }
-
- // If this node is part of a transaction, add it to that set.
- if tid != NoTxID {
- txMap, ok := d.txSet[tid]
- if !ok {
- return fmt.Errorf("unknown transaction ID: %v", tid)
- }
-
- txMap[oid] = version
- }
-
- // Insert the new node in the kvdb.
- node := &dagNode{Level: level, Parents: parents, Logrec: logrec, TxID: tid, Deleted: deleted}
- return d.setNode(oid, version, node)
-}
-
-// hasNode returns true if the node (oid, version) exists in the DAG DB.
-func (d *dag) hasNode(oid storage.ID, version raw.Version) bool {
- if d.store == nil {
- return false
- }
- key := objNodeKey(oid, version)
- return d.nodes.hasKey(key)
-}
-
-// addParent adds to the DAG node (oid, version) linkage to this parent node.
-// If the parent linkage is due to a local change (from conflict resolution
-// by blessing an existing version), no need to update the grafting structure.
-// Otherwise a remote change (from the Sync protocol) updates the graft.
-//
-// TODO(rdaoud): recompute the levels of reachable child-nodes if the new
-// parent's level is greater or equal to the node's current level.
-func (d *dag) addParent(oid storage.ID, version, parent raw.Version, remote bool) error {
- if version == parent {
- return fmt.Errorf("addParent: object %v: node %d cannot be its own parent", oid, version)
- }
-
- node, err := d.getNode(oid, version)
- if err != nil {
- return err
- }
-
- pnode, err := d.getNode(oid, parent)
- if err != nil {
- vlog.VI(1).Infof("addParent: object %v, node %d, parent %d: parent node not found", oid, version, parent)
- return err
- }
-
- // Check if the parent is already linked to this node.
- found := false
- for i := range node.Parents {
- if node.Parents[i] == parent {
- found = true
- break
- }
- }
-
- // If the parent is not yet linked (local or remote) add it.
- if !found {
- // Make sure that adding the link does not create a cycle in the DAG.
- // This is done by verifying that the node is not an ancestor of the
- // parent that it is being linked to.
- err = d.ancestorIter(oid, pnode.Parents, func(oid storage.ID, v raw.Version, nd *dagNode) error {
- if v == version {
- return fmt.Errorf("addParent: cycle on object %v: node %d is an ancestor of parent node %d",
- oid, version, parent)
- }
- return nil
- })
- if err != nil {
- return err
- }
- node.Parents = append(node.Parents, parent)
- err = d.setNode(oid, version, node)
- if err != nil {
- return err
- }
- }
-
- // For local changes we are done, the grafting structure is not updated.
- if !remote {
- return nil
- }
-
- // If the node and its parent are new/old or old/new then add
- // the parent as a graft point (a potential common ancestor).
- graft := d.getObjectGraft(oid, true)
-
- _, nodeNew := graft.newNodes[version]
- _, parentNew := graft.newNodes[parent]
- if (nodeNew && !parentNew) || (!nodeNew && parentNew) {
- graft.graftNodes[parent] = pnode.Level
- }
-
- // The parent node can no longer be a candidate for a new head version.
- // The addParent() function only removes candidates from newHeads that
- // have become parents. It does not add the child nodes to newHeads
- // because they are not necessarily new-head candidates. If they are
- // new nodes, the addNode() function handles adding them to newHeads.
- // For old nodes, only the current head could be a candidate and it is
- // added to newHeads when the graft struct is initialized.
- if _, ok := graft.newHeads[parent]; ok {
- delete(graft.newHeads, parent)
- }
-
- return nil
-}
-
-// moveHead moves the object head node in the DAG.
-func (d *dag) moveHead(oid storage.ID, head raw.Version) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
-
- // Verify that the node exists.
- if !d.hasNode(oid, head) {
- return fmt.Errorf("node %d:%d does not exist in the DAG", oid, head)
- }
-
- return d.setHead(oid, head)
-}
-
-// hasConflict determines if there is a conflict for this object between its
-// new and old head nodes.
-// - Yes: return (true, newHead, oldHead, ancestor)
-// - No: return (false, newHead, oldHead, NoVersion)
-// A conflict exists when there are two new-head nodes. It means the newly
-// added object versions are not derived in part from this device's current
-// knowledge. If there is a single new-head, the object changes were applied
-// without triggering a conflict.
-func (d *dag) hasConflict(oid storage.ID) (isConflict bool, newHead, oldHead, ancestor raw.Version, err error) {
- oldHead = raw.NoVersion
- newHead = raw.NoVersion
- ancestor = raw.NoVersion
- if d.store == nil {
- err = errors.New("invalid DAG")
- return
- }
-
- graft := d.graft[oid]
- if graft == nil {
- err = fmt.Errorf("node %d has no DAG graft information", oid)
- return
- }
-
- numHeads := len(graft.newHeads)
- if numHeads < 1 || numHeads > 2 {
- err = fmt.Errorf("node %d has invalid number of new head candidates %d: %v", oid, numHeads, graft.newHeads)
- return
- }
-
- // Fetch the current head for this object if it exists. The error from getHead()
- // is ignored because a newly received object is not yet known on this device and
- // will not trigger a conflict.
- oldHead, _ = d.getHead(oid)
-
- // If there is only one new head node there is no conflict.
- // The new head is that single one, even if it might also be the same old node.
- if numHeads == 1 {
- for k := range graft.newHeads {
- newHead = k
- }
- return
- }
-
- // With two candidate head nodes, the new one is the node that is
- // not the current (old) head node.
- for k := range graft.newHeads {
- if k != oldHead {
- newHead = k
- break
- }
- }
-
- // There is a conflict: the best choice ancestor is the graft point
- // node with the largest level (farthest from the root). It is
- // possible in some corner cases to have multiple graft nodes at
- // the same level. This would still be a single conflict, but the
- // multiple same-level graft points representing equivalent conflict
- // resolutions on different devices that are now merging their
- // resolutions. In such a case it does not matter which node is
- // chosen as the ancestor because the conflict resolver function
- // is assumed to be convergent. However it's nicer to make that
- // selection deterministic so all devices see the same choice.
- // For this the version number is used as a tie-breaker.
- isConflict = true
- var maxLevel uint64
- for node, level := range graft.graftNodes {
- if maxLevel < level ||
- (maxLevel == level && ancestor < node) {
- maxLevel = level
- ancestor = node
- }
- }
- return
-}
-
-// ancestorIter iterates over the DAG ancestor nodes for an object in a
-// breadth-first traversal starting from given version node(s). In its
-// traversal it invokes the callback function once for each node, passing
-// the object ID, version number and a pointer to the dagNode.
-func (d *dag) ancestorIter(oid storage.ID, startVersions []raw.Version,
- cb func(storage.ID, raw.Version, *dagNode) error) error {
- visited := make(map[raw.Version]bool)
- queue := list.New()
- for _, version := range startVersions {
- queue.PushBack(version)
- visited[version] = true
- }
-
- for queue.Len() > 0 {
- version := queue.Remove(queue.Front()).(raw.Version)
- node, err := d.getNode(oid, version)
- if err != nil {
- // Ignore it, the parent was previously pruned.
- continue
- }
- for _, parent := range node.Parents {
- if !visited[parent] {
- queue.PushBack(parent)
- visited[parent] = true
- }
- }
- if err = cb(oid, version, node); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// hasDeletedDescendant returns true if the node (oid, version) exists in the
-// DAG DB and one of its descendants is a deleted node (i.e. has its "Deleted"
-// flag set true). This means that at some object mutation after this version,
-// the object was deleted.
-func (d *dag) hasDeletedDescendant(oid storage.ID, version raw.Version) bool {
- if d.store == nil {
- return false
- }
- if !d.hasNode(oid, version) {
- return false
- }
-
- // Do a breadth-first traversal from the object's head node back to
- // the given version. Along the way, track whether a deleted node is
- // traversed. Return true only if a traversal reaches the given version
- // and had seen a deleted node along the way.
-
- // nodeStep tracks a step along a traversal. It stores the node to visit
- // when taking that step and a boolean tracking whether a deleted node
- // was seen so far along that trajectory.
- head, err := d.getHead(oid)
- if err != nil {
- return false
- }
-
- type nodeStep struct {
- node raw.Version
- deleted bool
- }
-
- visited := make(map[nodeStep]struct{})
- queue := list.New()
-
- step := nodeStep{node: head, deleted: false}
- queue.PushBack(&step)
- visited[step] = struct{}{}
-
- for queue.Len() > 0 {
- step := queue.Remove(queue.Front()).(*nodeStep)
- if step.node == version {
- if step.deleted {
- return true
- }
- continue
- }
- node, err := d.getNode(oid, step.node)
- if err != nil {
- // Ignore it, the parent was previously pruned.
- continue
- }
- nextDel := step.deleted || node.Deleted
-
- for _, parent := range node.Parents {
- nextStep := nodeStep{node: parent, deleted: nextDel}
- if _, ok := visited[nextStep]; !ok {
- queue.PushBack(&nextStep)
- visited[nextStep] = struct{}{}
- }
- }
- }
-
- return false
-}
-
-// prune trims the DAG of an object at a given version (node) by deleting
-// all its ancestor nodes, making it the new root node. For each deleted
-// node it calls the given callback function to delete its log record.
-// This function should only be called when Sync determines that all devices
-// that know about the object have gotten past this version.
-// Also track any transaction sets affected by deleting DAG objects that
-// have transaction IDs. This is later used to do garbage collection
-// on transaction sets when pruneDone() is called.
-func (d *dag) prune(oid storage.ID, version raw.Version, delLogRec func(logrec string) error) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
-
- // Get the node at the pruning point and set its parents to nil.
- // It will become the oldest DAG node (root) for the object.
- node, err := d.getNode(oid, version)
- if err != nil {
- return err
- }
- if node.Parents == nil {
- // Nothing to do, this node is already the root.
- return nil
- }
-
- iterVersions := node.Parents
-
- node.Parents = nil
- if err = d.setNode(oid, version, node); err != nil {
- return err
- }
-
- // Delete all ancestor nodes and their log records.
- // Delete as many as possible and track the error counts.
- // Keep track of objects deleted from transaction in order
- // to cleanup transaction sets when pruneDone() is called.
- numNodeErrs, numLogErrs := 0, 0
- err = d.ancestorIter(oid, iterVersions, func(oid storage.ID, v raw.Version, node *dagNode) error {
- if tid := node.TxID; tid != NoTxID {
- if d.txGC[tid] == nil {
- d.txGC[tid] = make(dagTxMap)
- }
- d.txGC[tid][oid] = v
- }
-
- if err := delLogRec(node.Logrec); err != nil {
- numLogErrs++
- }
- if err := d.delNode(oid, v); err != nil {
- numNodeErrs++
- }
- return nil
- })
- if err != nil {
- return err
- }
- if numNodeErrs != 0 || numLogErrs != 0 {
- return fmt.Errorf("prune failed to delete %d nodes and %d log records", numNodeErrs, numLogErrs)
- }
- return nil
-}
-
-// pruneDone is called when object pruning is finished within a single pass
-// of the Sync garbage collector. It updates the transaction sets affected
-// by the objects deleted by the prune() calls.
-func (d *dag) pruneDone() error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
-
- // Update transaction sets by removing from them the objects that
- // were pruned. If the resulting set is empty, delete it.
- for tid, txMapGC := range d.txGC {
- txMap, err := d.getTransaction(tid)
- if err != nil {
- return err
- }
-
- for oid := range txMapGC {
- delete(txMap, oid)
- }
-
- if len(txMap) > 0 {
- err = d.setTransaction(tid, txMap)
- } else {
- err = d.delTransaction(tid)
- }
- if err != nil {
- return err
- }
- }
-
- d.clearTxGC()
- return nil
-}
-
-// getLogrec returns the log record information for a given object version.
-func (d *dag) getLogrec(oid storage.ID, version raw.Version) (string, error) {
- node, err := d.getNode(oid, version)
- if err != nil {
- return "", err
- }
- return node.Logrec, nil
-}
-
-// objNodeKey returns the key used to access the object node (oid, version)
-// in the DAG DB.
-func objNodeKey(oid storage.ID, version raw.Version) string {
- return fmt.Sprintf("%s:%d", oid.String(), version)
-}
-
-// setNode stores the dagNode structure for the object node (oid, version)
-// in the DAG DB.
-func (d *dag) setNode(oid storage.ID, version raw.Version, node *dagNode) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- key := objNodeKey(oid, version)
- return d.nodes.set(key, node)
-}
-
-// getNode retrieves the dagNode structure for the object node (oid, version)
-// from the DAG DB.
-func (d *dag) getNode(oid storage.ID, version raw.Version) (*dagNode, error) {
- if d.store == nil {
- return nil, errors.New("invalid DAG")
- }
- var node dagNode
- key := objNodeKey(oid, version)
- if err := d.nodes.get(key, &node); err != nil {
- return nil, err
- }
- return &node, nil
-}
-
-// delNode deletes the object node (oid, version) from the DAG DB.
-func (d *dag) delNode(oid storage.ID, version raw.Version) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- key := objNodeKey(oid, version)
- return d.nodes.del(key)
-}
-
-// objHeadKey returns the key used to access the object head in the DAG DB.
-func objHeadKey(oid storage.ID) string {
- return oid.String()
-}
-
-// setHead stores version as the object head in the DAG DB.
-func (d *dag) setHead(oid storage.ID, version raw.Version) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- key := objHeadKey(oid)
- return d.heads.set(key, version)
-}
-
-// getHead retrieves the object head from the DAG DB.
-func (d *dag) getHead(oid storage.ID) (raw.Version, error) {
- var version raw.Version
- if d.store == nil {
- return version, errors.New("invalid DAG")
- }
- key := objHeadKey(oid)
- err := d.heads.get(key, &version)
- if err != nil {
- version = raw.NoVersion
- }
- return version, err
-}
-
-// dagTransactionKey returns the key used to access the transaction in the DAG DB.
-func dagTransactionKey(tid TxID) string {
- return fmt.Sprintf("%v", tid)
-}
-
-// setTransaction stores the transaction object/version map in the DAG DB.
-func (d *dag) setTransaction(tid TxID, txMap dagTxMap) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- if tid == NoTxID {
- return fmt.Errorf("invalid TxID: %v", tid)
- }
- key := dagTransactionKey(tid)
- return d.trans.set(key, txMap)
-}
-
-// getTransaction retrieves the transaction object/version map from the DAG DB.
-func (d *dag) getTransaction(tid TxID) (dagTxMap, error) {
- if d.store == nil {
- return nil, errors.New("invalid DAG")
- }
- if tid == NoTxID {
- return nil, fmt.Errorf("invalid TxID: %v", tid)
- }
- var txMap dagTxMap
- key := dagTransactionKey(tid)
- if err := d.trans.get(key, &txMap); err != nil {
- return nil, err
- }
- return txMap, nil
-}
-
-// delTransaction deletes the transation object/version map from the DAG DB.
-func (d *dag) delTransaction(tid TxID) error {
- if d.store == nil {
- return errors.New("invalid DAG")
- }
- if tid == NoTxID {
- return fmt.Errorf("invalid TxID: %v", tid)
- }
- key := dagTransactionKey(tid)
- return d.trans.del(key)
-}
-
-// getParentMap is a testing and debug helper function that returns for
-// an object a map of all the object version in the DAG and their parents.
-// The map represents the graph of the object version history.
-func (d *dag) getParentMap(oid storage.ID) map[raw.Version][]raw.Version {
- parentMap := make(map[raw.Version][]raw.Version)
- var iterVersions []raw.Version
-
- if head, err := d.getHead(oid); err == nil {
- iterVersions = append(iterVersions, head)
- }
- if graft := d.graft[oid]; graft != nil {
- for k := range graft.newHeads {
- iterVersions = append(iterVersions, k)
- }
- }
-
- // Breadth-first traversal starting from the object head.
- d.ancestorIter(oid, iterVersions, func(oid storage.ID, v raw.Version, node *dagNode) error {
- parentMap[v] = node.Parents
- return nil
- })
-
- return parentMap
-}
-
-// getGraftNodes is a testing and debug helper function that returns for
-// an object the graft information built and used during a sync operation.
-// The newHeads map identifies the candidate head nodes based on the data
-// reported by the other device during a sync operation. The graftNodes map
-// identifies the set of old nodes where the new DAG fragments were attached
-// and their depth level in the DAG.
-func (d *dag) getGraftNodes(oid storage.ID) (map[raw.Version]struct{}, map[raw.Version]uint64) {
- if d.store != nil {
- if ginfo := d.graft[oid]; ginfo != nil {
- return ginfo.newHeads, ginfo.graftNodes
- }
- }
- return nil, nil
-}
diff --git a/runtimes/google/vsync/dag_test.go b/runtimes/google/vsync/dag_test.go
deleted file mode 100644
index ff2ebab..0000000
--- a/runtimes/google/vsync/dag_test.go
+++ /dev/null
@@ -1,1990 +0,0 @@
-package vsync
-
-// Tests for the Veyron Sync DAG component.
-
-import (
- "errors"
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-
- "veyron/lib/testutil"
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// dagFilename generates a filename for a temporary (per unit test) DAG file.
-// Do not replace this function with TempFile because TempFile creates the new
-// file and the tests must verify that the DAG can create a non-existing file.
-func dagFilename() string {
- return fmt.Sprintf("%s/sync_dag_test_%d_%d", os.TempDir(), os.Getpid(), time.Now().UnixNano())
-}
-
-// fileSize returns the size of a file.
-func fileSize(fname string) int64 {
- finfo, err := os.Stat(fname)
- if err != nil {
- return -1
- }
- return finfo.Size()
-}
-
-// TestDAGOpen tests the creation of a DAG, closing and re-opening it. It also
-// verifies that its backing file is created and that a 2nd close is safe.
-func TestDAGOpen(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- fsize := fileSize(dagfile)
- if fsize < 0 {
- t.Fatalf("DAG file %s not created", dagfile)
- }
-
- dag.flush()
- oldfsize := fsize
- fsize = fileSize(dagfile)
- if fsize <= oldfsize {
- t.Fatalf("DAG file %s not flushed", dagfile)
- }
-
- dag.close()
-
- dag, err = openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot re-open existing DAG file %s", dagfile)
- }
-
- oldfsize = fsize
- fsize = fileSize(dagfile)
- if fsize != oldfsize {
- t.Fatalf("DAG file %s size changed across re-open", dagfile)
- }
-
- dag.close()
- dag.close() // multiple closes should be a safe NOP
-
- fsize = fileSize(dagfile)
- if fsize != oldfsize {
- t.Fatalf("DAG file %s size changed across close", dagfile)
- }
-
- // Fail opening a DAG in a non-existent directory.
- _, err = openDAG("/not/really/there/junk.dag")
- if err == nil {
- t.Fatalf("openDAG() did not fail when using a bad pathname")
- }
-}
-
-// TestInvalidDAG tests using DAG methods on an invalid (closed) DAG.
-func TestInvalidDAG(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- dag.close()
-
- oid, err := strToObjID("6789")
- if err != nil {
- t.Error(err)
- }
-
- err = dag.addNode(oid, 4, false, false, []raw.Version{2, 3}, "foobar", NoTxID)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("addNode() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.moveHead(oid, 4)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("moveHead() did not fail on a closed DAG: %v", err)
- }
-
- _, _, _, _, err = dag.hasConflict(oid)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("hasConflict() did not fail on a closed DAG: %v", err)
- }
-
- _, err = dag.getLogrec(oid, 4)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("getLogrec() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.prune(oid, 4, func(lr string) error {
- return nil
- })
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("prune() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.pruneDone()
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("pruneDone() did not fail on a closed DAG: %v", err)
- }
-
- node := &dagNode{Level: 15, Parents: []raw.Version{444, 555}, Logrec: "logrec-23"}
- err = dag.setNode(oid, 4, node)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("setNode() did not fail on a closed DAG: %v", err)
- }
-
- _, err = dag.getNode(oid, 4)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("getNode() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.delNode(oid, 4)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("delNode() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.addParent(oid, 4, 2, true)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("addParent() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.setHead(oid, 4)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("setHead() did not fail on a closed DAG: %v", err)
- }
-
- _, err = dag.getHead(oid)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("getHead() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.compact()
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("compact() did not fail on a closed DAG: %v", err)
- }
-
- if tid := dag.addNodeTxStart(); tid != NoTxID {
- t.Errorf("addNodeTxStart() did not fail on a closed DAG: TxID %v", tid)
- }
-
- err = dag.addNodeTxEnd(1)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("addNodeTxEnd() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.setTransaction(1, nil)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("setTransaction() did not fail on a closed DAG: %v", err)
- }
-
- _, err = dag.getTransaction(1)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("getTransaction() did not fail on a closed DAG: %v", err)
- }
-
- err = dag.delTransaction(1)
- if err == nil || err.Error() != "invalid DAG" {
- t.Errorf("delTransaction() did not fail on a closed DAG: %v", err)
- }
-
- // These calls should be harmless NOPs.
- dag.clearGraft()
- dag.clearTxGC()
- dag.flush()
- dag.close()
- if dag.hasNode(oid, 4) {
- t.Errorf("hasNode() found an object on a closed DAG")
- }
- if dag.hasDeletedDescendant(oid, 3) {
- t.Errorf("hasDeletedDescendant() returned true on a closed DAG")
- }
- if pmap := dag.getParentMap(oid); len(pmap) != 0 {
- t.Errorf("getParentMap() found data on a closed DAG: %v", pmap)
- }
- if hmap, gmap := dag.getGraftNodes(oid); hmap != nil || gmap != nil {
- t.Errorf("getGraftNodes() found data on a closed DAG: head map: %v, graft map: %v", hmap, gmap)
- }
-}
-
-// TestSetNode tests setting and getting a DAG node across DAG open/close/reopen.
-func TestSetNode(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- version := raw.Version(0)
- oid, err := strToObjID("111")
- if err != nil {
- t.Fatal(err)
- }
-
- node, err := dag.getNode(oid, version)
- if err == nil || node != nil {
- t.Errorf("Found non-existent object %d:%d in DAG file %s: %v", oid, version, dagfile, node)
- }
-
- if dag.hasNode(oid, version) {
- t.Errorf("hasNode() found non-existent object %d:%d in DAG file %s", oid, version, dagfile)
- }
-
- if logrec, err := dag.getLogrec(oid, version); err == nil || logrec != "" {
- t.Errorf("Non-existent object %d:%d has a logrec in DAG file %s: %v", oid, version, dagfile, logrec)
- }
-
- node = &dagNode{Level: 15, Parents: []raw.Version{444, 555}, Logrec: "logrec-23"}
- if err = dag.setNode(oid, version, node); err != nil {
- t.Fatalf("Cannot set object %d:%d (%v) in DAG file %s", oid, version, node, dagfile)
- }
-
- for i := 0; i < 2; i++ {
- node2, err := dag.getNode(oid, version)
- if err != nil || node2 == nil {
- t.Errorf("Cannot find stored object %d:%d (i=%d) in DAG file %s", oid, version, i, dagfile)
- }
-
- if !dag.hasNode(oid, version) {
- t.Errorf("hasNode() did not find object %d:%d (i=%d) in DAG file %s", oid, version, i, dagfile)
- }
-
- if !reflect.DeepEqual(node, node2) {
- t.Errorf("Object %d:%d has wrong data (i=%d) in DAG file %s: %v instead of %v",
- oid, version, i, dagfile, node2, node)
- }
-
- if logrec, err := dag.getLogrec(oid, version); err != nil || logrec != "logrec-23" {
- t.Errorf("Object %d:%d has wrong logrec (i=%d) in DAG file %s: %v",
- oid, version, i, dagfile, logrec)
- }
-
- if i == 0 {
- dag.flush()
- dag.close()
- dag, err = openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot re-open DAG file %s", dagfile)
- }
- }
- }
-
- dag.close()
-}
-
-// TestDelNode tests deleting a DAG node across DAG open/close/reopen.
-func TestDelNode(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- version := raw.Version(1)
- oid, err := strToObjID("222")
- if err != nil {
- t.Fatal(err)
- }
-
- node := &dagNode{Level: 123, Parents: []raw.Version{333}, Logrec: "logrec-789"}
- if err = dag.setNode(oid, version, node); err != nil {
- t.Fatalf("Cannot set object %d:%d (%v) in DAG file %s", oid, version, node, dagfile)
- }
-
- dag.flush()
-
- err = dag.delNode(oid, version)
- if err != nil {
- t.Fatalf("Cannot delete object %d:%d in DAG file %s", oid, version, dagfile)
- }
-
- dag.flush()
-
- for i := 0; i < 2; i++ {
- node2, err := dag.getNode(oid, version)
- if err == nil || node2 != nil {
- t.Errorf("Found deleted object %d:%d (%v) (i=%d) in DAG file %s", oid, version, node2, i, dagfile)
- }
-
- if dag.hasNode(oid, version) {
- t.Errorf("hasNode() found deleted object %d:%d (i=%d) in DAG file %s", oid, version, i, dagfile)
- }
-
- if logrec, err := dag.getLogrec(oid, version); err == nil || logrec != "" {
- t.Errorf("Deleted object %d:%d (i=%d) has logrec in DAG file %s: %v", oid, version, i, dagfile, logrec)
- }
-
- if i == 0 {
- dag.close()
- dag, err = openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot re-open DAG file %s", dagfile)
- }
- }
- }
-
- dag.close()
-}
-
-// TestAddParent tests adding parents to a DAG node.
-func TestAddParent(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- version := raw.Version(7)
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if err = dag.addParent(oid, version, 1, true); err == nil {
- t.Errorf("addParent() did not fail for an unknown object %d:%d in DAG file %s", oid, version, dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.log.sync"); err != nil {
- t.Fatal(err)
- }
-
- node := &dagNode{Level: 15, Logrec: "logrec-22"}
- if err = dag.setNode(oid, version, node); err != nil {
- t.Fatalf("Cannot set object %d:%d (%v) in DAG file %s", oid, version, node, dagfile)
- }
-
- if err = dag.addParent(oid, version, version, true); err == nil {
- t.Errorf("addParent() did not fail on a self-parent for object %d:%d in DAG file %s", oid, version, dagfile)
- }
-
- for _, parent := range []raw.Version{4, 5, 6} {
- if err = dag.addParent(oid, version, parent, true); err == nil {
- t.Errorf("addParent() did not reject invalid parent %d for object %d:%d in DAG file %s",
- parent, oid, version, dagfile)
- }
-
- pnode := &dagNode{Level: 11, Logrec: fmt.Sprint("logrec-%d", parent), Parents: []raw.Version{3}}
- if err = dag.setNode(oid, parent, pnode); err != nil {
- t.Fatalf("Cannot set parent object %d:%d (%v) in DAG file %s", oid, parent, pnode, dagfile)
- }
-
- remote := parent%2 == 0
- for i := 0; i < 2; i++ {
- if err = dag.addParent(oid, version, parent, remote); err != nil {
- t.Errorf("addParent() failed on parent %d, remote %d (i=%d) for object %d:%d in DAG file %s: %v",
- parent, remote, i, oid, version, dagfile, err)
- }
- }
- }
-
- node2, err := dag.getNode(oid, version)
- if err != nil || node2 == nil {
- t.Errorf("Cannot find stored object %d:%d in DAG file %s", oid, version, dagfile)
- }
-
- expParents := []raw.Version{4, 5, 6}
- if !reflect.DeepEqual(node2.Parents, expParents) {
- t.Errorf("invalid parents for object %d:%d in DAG file %s: %v instead of %v",
- oid, version, dagfile, node2.Parents, expParents)
- }
-
- // Creating cycles should fail.
- for v := raw.Version(1); v < version; v++ {
- if err = dag.addParent(oid, v, version, false); err == nil {
- t.Errorf("addParent() failed to reject a cycle for object %d: from ancestor %d to node %d in DAG file %s",
- oid, v, version, dagfile)
- }
- }
-
- dag.close()
-}
-
-// TestSetHead tests setting and getting a DAG head node across DAG open/close/reopen.
-func TestSetHead(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- oid, err := strToObjID("333")
- if err != nil {
- t.Fatal(err)
- }
-
- version, err := dag.getHead(oid)
- if err == nil {
- t.Errorf("Found non-existent object head %d in DAG file %s: %d", oid, dagfile, version)
- }
-
- version = 555
- if err = dag.setHead(oid, version); err != nil {
- t.Fatalf("Cannot set object head %d (%d) in DAG file %s", oid, version, dagfile)
- }
-
- dag.flush()
-
- for i := 0; i < 3; i++ {
- version2, err := dag.getHead(oid)
- if err != nil {
- t.Errorf("Cannot find stored object head %d (i=%d) in DAG file %s", oid, i, dagfile)
- }
- if version != version2 {
- t.Errorf("Object %d has wrong head data (i=%d) in DAG file %s: %d instead of %d",
- oid, i, dagfile, version2, version)
- }
-
- if i == 0 {
- dag.close()
- dag, err = openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot re-open DAG file %s", dagfile)
- }
- } else if i == 1 {
- version = 888
- if err = dag.setHead(oid, version); err != nil {
- t.Fatalf("Cannot set new object head %d (%d) in DAG file %s", oid, version, dagfile)
- }
- dag.flush()
- }
- }
-
- dag.close()
-}
-
-// checkEndOfSync simulates and check the end-of-sync operations: clear the
-// node grafting metadata and verify that it is empty and that HasConflict()
-// detects this case and fails, then close the DAG.
-func checkEndOfSync(d *dag, oid storage.ID) error {
- // Clear grafting info; this happens at the end of a sync log replay.
- d.clearGraft()
-
- // There should be no grafting or transaction info, and hasConflict() should fail.
- newHeads, grafts := d.getGraftNodes(oid)
- if newHeads != nil || grafts != nil {
- return fmt.Errorf("Object %d: graft info not cleared: newHeads (%v), grafts (%v)", oid, newHeads, grafts)
- }
-
- if n := len(d.txSet); n != 0 {
- return fmt.Errorf("transaction set not empty: %d entries found", n)
- }
-
- isConflict, newHead, oldHead, ancestor, errConflict := d.hasConflict(oid)
- if errConflict == nil {
- return fmt.Errorf("Object %d: conflict did not fail: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- d.close()
- return nil
-}
-
-// TestLocalUpdates tests the sync handling of initial local updates: an object
-// is created (v0) and updated twice (v1, v2) on this device. The DAG should
-// show: v0 -> v1 -> v2 and the head should point to v2.
-func TestLocalUpdates(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must have moved to "v2" and the parent map shows the updated DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 2 {
- t.Errorf("Invalid object %d head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Make sure an existing node cannot be added again.
- if err = dag.addNode(oid, 1, false, false, []raw.Version{0, 2}, "foobar", NoTxID); err == nil {
- t.Errorf("addNode() did not fail when given an existing node")
- }
-
- // Make sure a new node cannot have more than 2 parents.
- if err = dag.addNode(oid, 3, false, false, []raw.Version{0, 1, 2}, "foobar", NoTxID); err == nil {
- t.Errorf("addNode() did not fail when given 3 parents")
- }
-
- // Make sure a new node cannot have an invalid parent.
- if err = dag.addNode(oid, 3, false, false, []raw.Version{0, 555}, "foobar", NoTxID); err == nil {
- t.Errorf("addNode() did not fail when using an invalid parent")
- }
-
- // Make sure a new root node (no parents) cannot be added once a root exists.
- // For the parents array, check both the "nil" and the empty array as input.
- if err = dag.addNode(oid, 6789, false, false, nil, "foobar", NoTxID); err == nil {
- t.Errorf("Adding a 2nd root node (nil parents) for object %d in DAG file %s did not fail", oid, dagfile)
- }
- if err = dag.addNode(oid, 6789, false, false, []raw.Version{}, "foobar", NoTxID); err == nil {
- t.Errorf("Adding a 2nd root node (empty parents) for object %d in DAG file %s did not fail", oid, dagfile)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteUpdates tests the sync handling of initial remote updates:
-// an object is created (v0) and updated twice (v1, v2) on another device and
-// we learn about it during sync. The updated DAG should show: v0 -> v1 -> v2
-// and report no conflicts with the new head pointing at v2.
-func TestRemoteUpdates(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "remote-init-00.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still undefined) and the parent
- // map shows the newly grafted DAG fragment.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e == nil {
- t.Errorf("Object %d head found in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{2: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be no conflict.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 2 && oldHead == 0 && ancestor == 0 && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if logrec, e := dag.getLogrec(oid, newHead); e != nil || logrec != "logrec-02" {
- t.Errorf("Invalid logrec for newhead object %d:%d in DAG file %s: %v", oid, newHead, dagfile, logrec)
- }
-
- // Make sure an unknown node cannot become the new head.
- if err = dag.moveHead(oid, 55); err == nil {
- t.Errorf("moveHead() did not fail on an invalid node")
- }
-
- // Then we can move the head and clear the grafting data.
- if err = dag.moveHead(oid, newHead); err != nil {
- t.Errorf("Object %d cannot move head to %d in DAG file %s: %v", oid, newHead, dagfile, err)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteNoConflict tests sync of remote updates on top of a local initial
-// state without conflict. An object is created locally and updated twice
-// (v0 -> v1 -> v2). Another device, having gotten this info, makes 3 updates
-// on top of that (v2 -> v3 -> v4 -> v5) and sends this info in a later sync.
-// The updated DAG should show (v0 -> v1 -> v2 -> v3 -> v4 -> v5) and report
-// no conflicts with the new head pointing at v5. It should also report v2 as
-// the graft point on which the new fragment (v3 -> v4 -> v5) gets attached.
-func TestRemoteNoConflict(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-noconf-00.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v2) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 2 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {2}, 4: {3}, 5: {4}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{5: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{2: 2}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be no conflict.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 5 && oldHead == 2 && ancestor == 0 && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if logrec, e := dag.getLogrec(oid, oldHead); e != nil || logrec != "logrec-02" {
- t.Errorf("Invalid logrec for oldhead object %d:%d in DAG file %s: %v", oid, oldHead, dagfile, logrec)
- }
- if logrec, e := dag.getLogrec(oid, newHead); e != nil || logrec != "logrec-05" {
- t.Errorf("Invalid logrec for newhead object %d:%d in DAG file %s: %v", oid, newHead, dagfile, logrec)
- }
-
- // Then we can move the head and clear the grafting data.
- if err = dag.moveHead(oid, newHead); err != nil {
- t.Errorf("Object %d cannot move head to %d in DAG file %s: %v", oid, newHead, dagfile, err)
- }
-
- // Clear the grafting data and verify that hasConflict() fails without it.
- dag.clearGraft()
- isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if errConflict == nil {
- t.Errorf("hasConflict() did not fail w/o graft info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteConflict tests sync handling remote updates that build on the
-// local initial state and trigger a conflict. An object is created locally
-// and updated twice (v0 -> v1 -> v2). Another device, having only gotten
-// the v0 -> v1 history, makes 3 updates on top of v1 (v1 -> v3 -> v4 -> v5)
-// and sends this info during a later sync. Separately, the local device
-// makes a conflicting (concurrent) update v1 -> v2. The updated DAG should
-// show the branches: (v0 -> v1 -> v2) and (v0 -> v1 -> v3 -> v4 -> v5) and
-// report the conflict between v2 and v5 (current and new heads). It should
-// also report v1 as the graft point and the common ancestor in the conflict.
-// The conflict is resolved locally by creating v6 that is derived from both
-// v2 and v5 and it becomes the new head.
-func TestRemoteConflict(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-conf-00.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v2) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 2 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {1}, 4: {3}, 5: {4}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{2: struct{}{}, 5: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{1: 1}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be a conflict between v2 and v5 with v1 as ancestor.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(isConflict && newHead == 5 && oldHead == 2 && ancestor == 1 && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if logrec, e := dag.getLogrec(oid, oldHead); e != nil || logrec != "logrec-02" {
- t.Errorf("Invalid logrec for oldhead object %d:%d in DAG file %s: %v", oid, oldHead, dagfile, logrec)
- }
- if logrec, e := dag.getLogrec(oid, newHead); e != nil || logrec != "logrec-05" {
- t.Errorf("Invalid logrec for newhead object %d:%d in DAG file %s: %v", oid, newHead, dagfile, logrec)
- }
- if logrec, e := dag.getLogrec(oid, ancestor); e != nil || logrec != "logrec-01" {
- t.Errorf("Invalid logrec for ancestor object %d:%d in DAG file %s: %v", oid, ancestor, dagfile, logrec)
- }
-
- // Resolve the conflict by adding a new local v6 derived from v2 and v5 (this replay moves the head).
- if err = dagReplayCommands(dag, "local-resolve-00.sync"); err != nil {
- t.Fatal(err)
- }
-
- // Verify that the head moved to v6 and the parent map shows the resolution.
- if head, e := dag.getHead(oid); e != nil || head != 6 {
- t.Errorf("Object %d has wrong head after conflict resolution in DAG file %s: %d", oid, dagfile, head)
- }
-
- exp[6] = []raw.Version{2, 5}
- pmap = dag.getParentMap(oid)
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map after conflict resolution in DAG file %s: (%v) instead of (%v)",
- oid, dagfile, pmap, exp)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteConflictTwoGrafts tests sync handling remote updates that build
-// on the local initial state and trigger a conflict with 2 graft points.
-// An object is created locally and updated twice (v0 -> v1 -> v2). Another
-// device, first learns about v0 and makes it own conflicting update v0 -> v3.
-// That remote device later learns about v1 and resolves the v1/v3 confict by
-// creating v4. Then it makes a last v4 -> v5 update -- which will conflict
-// with v2 but it doesn't know that.
-// Now the sync order is reversed and the local device learns all of what
-// happened on the remote device. The local DAG should get be augmented by
-// a subtree with 2 graft points: v0 and v1. It receives this new branch:
-// v0 -> v3 -> v4 -> v5. Note that v4 is also derived from v1 as a remote
-// conflict resolution. This should report a conflict between v2 and v5
-// (current and new heads), with v0 and v1 as graft points, and v1 as the
-// most-recent common ancestor for that conflict. The conflict is resolved
-// locally by creating v6, derived from both v2 and v5, becoming the new head.
-func TestRemoteConflictTwoGrafts(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-conf-01.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v2) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 2 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {0}, 4: {1, 3}, 5: {4}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{2: struct{}{}, 5: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{0: 0, 1: 1}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be a conflict between v2 and v5 with v1 as ancestor.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(isConflict && newHead == 5 && oldHead == 2 && ancestor == 1 && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if logrec, e := dag.getLogrec(oid, oldHead); e != nil || logrec != "logrec-02" {
- t.Errorf("Invalid logrec for oldhead object %d:%d in DAG file %s: %v", oid, oldHead, dagfile, logrec)
- }
- if logrec, e := dag.getLogrec(oid, newHead); e != nil || logrec != "logrec-05" {
- t.Errorf("Invalid logrec for newhead object %d:%d in DAG file %s: %v", oid, newHead, dagfile, logrec)
- }
- if logrec, e := dag.getLogrec(oid, ancestor); e != nil || logrec != "logrec-01" {
- t.Errorf("Invalid logrec for ancestor object %d:%d in DAG file %s: %v", oid, ancestor, dagfile, logrec)
- }
-
- // Resolve the conflict by adding a new local v6 derived from v2 and v5 (this replay moves the head).
- if err = dagReplayCommands(dag, "local-resolve-00.sync"); err != nil {
- t.Fatal(err)
- }
-
- // Verify that the head moved to v6 and the parent map shows the resolution.
- if head, e := dag.getHead(oid); e != nil || head != 6 {
- t.Errorf("Object %d has wrong head after conflict resolution in DAG file %s: %d", oid, dagfile, head)
- }
-
- exp[6] = []raw.Version{2, 5}
- pmap = dag.getParentMap(oid)
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map after conflict resolution in DAG file %s: (%v) instead of (%v)",
- oid, dagfile, pmap, exp)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestAncestorIterator checks that the iterator goes over the correct set
-// of ancestor nodes for an object given a starting node. It should traverse
-// reconvergent DAG branches only visiting each ancestor once:
-// v0 -> v1 -> v2 -> v4 -> v5 -> v7 -> v8
-// |--> v3 ---| |
-// +--> v6 ---------------+
-// - Starting at v0 it should only cover v0.
-// - Starting at v2 it should only cover v0-v2.
-// - Starting at v5 it should only cover v0-v5.
-// - Starting at v8 it should cover all nodes (v0-v8).
-func TestAncestorIterator(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-01.sync"); err != nil {
- t.Fatal(err)
- }
-
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- // Loop checking the iteration behavior for different starting nodes.
- for _, start := range []raw.Version{0, 2, 5, 8} {
- visitCount := make(map[raw.Version]int)
- err = dag.ancestorIter(oid, []raw.Version{start},
- func(oid storage.ID, v raw.Version, node *dagNode) error {
- visitCount[v]++
- return nil
- })
-
- // Check that all prior nodes are visited only once.
- for i := raw.Version(0); i < (start + 1); i++ {
- if visitCount[i] != 1 {
- t.Errorf("wrong visit count for iter on object %d node %d starting from node %d: %d instead of 1",
- oid, i, start, visitCount[i])
- }
- }
- }
-
- // Make sure an error in the callback is returned through the iterator.
- cbErr := errors.New("callback error")
- err = dag.ancestorIter(oid, []raw.Version{8}, func(oid storage.ID, v raw.Version, node *dagNode) error {
- if v == 0 {
- return cbErr
- }
- return nil
- })
- if err != cbErr {
- t.Errorf("wrong error returned from callback: %v instead of %v", err, cbErr)
- }
-
- if err = checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestPruning tests sync pruning of the DAG for an object with 3 concurrent
-// updates (i.e. 2 conflict resolution convergent points). The pruning must
-// get rid of the DAG branches across the reconvergence points:
-// v0 -> v1 -> v2 -> v4 -> v5 -> v7 -> v8
-// |--> v3 ---| |
-// +--> v6 ---------------+
-// By pruning at v0, nothing is deleted.
-// Then by pruning at v1, only v0 is deleted.
-// Then by pruning at v5, v1-v4 are deleted leaving v5 and "v6 -> v7 -> v8".
-// Then by pruning at v7, v5-v6 are deleted leaving "v7 -> v8".
-// Then by pruning at v8, v7 is deleted leaving v8 as the head.
-// Then by pruning again at v8 nothing changes.
-func TestPruning(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-01.sync"); err != nil {
- t.Fatal(err)
- }
-
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- exp := map[raw.Version][]raw.Version{0: nil, 1: {0}, 2: {1}, 3: {1}, 4: {2, 3}, 5: {4}, 6: {1}, 7: {5, 6}, 8: {7}}
-
- // Loop pruning at an invalid version (333) then at v0, v5, v8 and again at v8.
- testVersions := []raw.Version{333, 0, 1, 5, 7, 8, 8}
- delCounts := []int{0, 0, 1, 4, 2, 1, 0}
-
- for i, version := range testVersions {
- del := 0
- err = dag.prune(oid, version, func(lr string) error {
- del++
- return nil
- })
-
- if i == 0 && err == nil {
- t.Errorf("pruning non-existent object %d:%d did not fail in DAG file %s", oid, version, dagfile)
- } else if i > 0 && err != nil {
- t.Errorf("pruning object %d:%d failed in DAG file %s: %v", oid, version, dagfile, err)
- }
-
- if del != delCounts[i] {
- t.Errorf("pruning object %d:%d deleted %d log records instead of %d", oid, version, del, delCounts[i])
- }
-
- if head, err := dag.getHead(oid); err != nil || head != 8 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- err = dag.pruneDone()
- if err != nil {
- t.Errorf("pruneDone() failed in DAG file %s: %v", dagfile, err)
- }
-
- // Remove pruned nodes from the expected parent map used to validate
- // and set the parents of the pruned node to nil.
- if version < 10 {
- for j := raw.Version(0); j < version; j++ {
- delete(exp, j)
- }
- exp[version] = nil
- }
-
- pmap := dag.getParentMap(oid)
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
- }
-
- if err = checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestPruningCallbackError tests sync pruning of the DAG when the callback
-// function returns an error. The pruning must try to delete as many nodes
-// and log records as possible and properly adjust the parent pointers of
-// the pruning node. The object DAG is:
-// v0 -> v1 -> v2 -> v4 -> v5 -> v7 -> v8
-// |--> v3 ---| |
-// +--> v6 ---------------+
-// By pruning at v8 and having the callback function fail for v3, all other
-// nodes must be deleted and only v8 remains as the head.
-func TestPruningCallbackError(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-01.sync"); err != nil {
- t.Fatal(err)
- }
-
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- exp := map[raw.Version][]raw.Version{8: nil}
-
- // Prune at v8 with a callback function that fails for v3.
- del, expDel := 0, 8
- version := raw.Version(8)
- err = dag.prune(oid, version, func(lr string) error {
- del++
- if lr == "logrec-03" {
- return fmt.Errorf("refuse to delete %s", lr)
- }
- return nil
- })
-
- if err == nil {
- t.Errorf("pruning object %d:%d did not fail in DAG file %s", oid, version, dagfile)
- }
- if del != expDel {
- t.Errorf("pruning object %d:%d deleted %d log records instead of %d", oid, version, del, expDel)
- }
-
- err = dag.pruneDone()
- if err != nil {
- t.Errorf("pruneDone() failed in DAG file %s: %v", dagfile, err)
- }
-
- if head, err := dag.getHead(oid); err != nil || head != 8 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- if err = checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestDAGCompact tests compacting of dag's kvdb file.
-func TestDAGCompact(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- // Put some data in "heads" table.
- headMap := make(map[storage.ID]raw.Version)
- for i := 0; i < 10; i++ {
- // Generate a random object id in [0, 1000).
- oid, err := strToObjID(fmt.Sprintf("%d", testutil.Rand.Intn(1000)))
- if err != nil {
- t.Fatal(err)
- }
- // Generate a random version number for this object.
- vers := raw.Version(testutil.Rand.Intn(5000))
-
- // Cache this <oid,version> pair to verify with getHead().
- headMap[oid] = vers
-
- if err = dag.setHead(oid, vers); err != nil {
- t.Fatalf("Cannot set object head %d (%d) in DAG file %s", oid, vers, dagfile)
- }
-
- // Flush immediately to let the kvdb file grow.
- dag.flush()
- }
-
- // Put some data in "nodes" table.
- type nodeKey struct {
- oid storage.ID
- vers raw.Version
- }
- nodeMap := make(map[nodeKey]*dagNode)
- for oid, vers := range headMap {
- // Generate a random dag node for this <oid, vers>.
- l := uint64(testutil.Rand.Intn(20))
- p1 := raw.Version(testutil.Rand.Intn(5000))
- p2 := raw.Version(testutil.Rand.Intn(5000))
- log := fmt.Sprintf("%d", testutil.Rand.Intn(1000))
- node := &dagNode{Level: l, Parents: []raw.Version{p1, p2}, Logrec: log}
-
- // Cache this <oid,version, dagNode> to verify with getNode().
- key := nodeKey{oid: oid, vers: vers}
- nodeMap[key] = node
-
- if err = dag.setNode(oid, vers, node); err != nil {
- t.Fatalf("Cannot set object %d:%d (%v) in DAG file %s", oid, vers, node, dagfile)
- }
-
- // Flush immediately to let the kvdb file grow.
- dag.flush()
- }
-
- // Get size before compaction.
- oldSize := fileSize(dagfile)
- if oldSize < 0 {
- t.Fatalf("DAG file %s not created", dagfile)
- }
-
- if err = dag.compact(); err != nil {
- t.Fatalf("Cannot compact DAG file %s", dagfile)
- }
-
- // Verify size of kvdb file is reduced.
- size := fileSize(dagfile)
- if size < 0 {
- t.Fatalf("DAG file %s not created", dagfile)
- }
- if size > oldSize {
- t.Fatalf("DAG file %s not compacted", dagfile)
- }
-
- // Check data exists after compaction.
- for oid, vers := range headMap {
- vers2, err := dag.getHead(oid)
- if err != nil {
- t.Errorf("Cannot find stored object head %d in DAG file %s", oid, dagfile)
- }
- if vers != vers2 {
- t.Errorf("Object %d has wrong head data in DAG file %s: %d instead of %d",
- oid, dagfile, vers2, vers)
- }
- }
- for key, node := range nodeMap {
- node2, err := dag.getNode(key.oid, key.vers)
- if err != nil || node2 == nil {
- t.Errorf("Cannot find stored object %d:%d in DAG file %s", key.oid, key.vers, dagfile)
- }
- if !reflect.DeepEqual(node, node2) {
- t.Errorf("Object %d:%d has wrong data in DAG file %s: %v instead of %v",
- key.oid, key.vers, dagfile, node2, node)
- }
- }
- dag.close()
-}
-
-// TestRemoteLinkedNoConflictSameHead tests sync of remote updates that contain
-// linked nodes (conflict resolution by selecting an existing version) on top of
-// a local initial state without conflict. An object is created locally and
-// updated twice (v1 -> v2 -> v3). Another device has learned about v1, created
-// (v1 -> v4), then learned about (v1 -> v2) and resolved that conflict by selecting
-// v2 over v4. Now it sends that new info (v4 and the v2/v4 link) back to the
-// original (local) device. Instead of a v3/v4 conflict, the device sees that
-// v2 was chosen over v4 and resolves it as a no-conflict case.
-func TestRemoteLinkedNoConflictSameHead(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.log.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-noconf-link-00.log.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v3) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 3 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{1: nil, 2: {1, 4}, 3: {2}, 4: {1}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{3: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{1: 0, 4: 1}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be no conflict.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 3 && oldHead == 3 && ancestor == raw.NoVersion && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- // Clear the grafting data and verify that hasConflict() fails without it.
- dag.clearGraft()
- isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if errConflict == nil {
- t.Errorf("hasConflict() did not fail w/o graft info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteLinkedConflict tests sync of remote updates that contain linked
-// nodes (conflict resolution by selecting an existing version) on top of a local
-// initial state triggering a local conflict. An object is created locally and
-// updated twice (v1 -> v2 -> v3). Another device has along the way learned about v1,
-// created (v1 -> v4), then learned about (v1 -> v2) and resolved that conflict by
-// selecting v4 over v2. Now it sends that new info (v4 and the v4/v2 link) back
-// to the original (local) device. The device sees a v3/v4 conflict.
-func TestRemoteLinkedConflict(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.log.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-conf-link.log.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v2) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 3 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{1: nil, 2: {1}, 3: {2}, 4: {1, 2}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{3: struct{}{}, 4: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{1: 0, 2: 1}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be a conflict.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(isConflict && newHead == 4 && oldHead == 3 && ancestor == 2 && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- // Clear the grafting data and verify that hasConflict() fails without it.
- dag.clearGraft()
- isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if errConflict == nil {
- t.Errorf("hasConflict() did not fail w/o graft info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteLinkedNoConflictNewHead tests sync of remote updates that contain
-// linked nodes (conflict resolution by selecting an existing version) on top of
-// a local initial state without conflict, but moves the head node to a new one.
-// An object is created locally and updated twice (v1 -> v2 -> v3). Another device
-// has along the way learned about v1, created (v1 -> v4), then learned about
-// (v1 -> v2 -> v3) and resolved that conflict by selecting v4 over v3. Now it
-// sends that new info (v4 and the v4/v3 link) back to the original (local) device.
-// The device sees that the new head v4 is "derived" from v3 thus no conflict.
-func TestRemoteLinkedConflictNewHead(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.log.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-noconf-link-01.log.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v2) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 3 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{1: nil, 2: {1}, 3: {2}, 4: {1, 3}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{4: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{1: 0, 3: 2}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be no conflict.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 4 && oldHead == 3 && ancestor == raw.NoVersion && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- // Clear the grafting data and verify that hasConflict() fails without it.
- dag.clearGraft()
- isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if errConflict == nil {
- t.Errorf("hasConflict() did not fail w/o graft info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRemoteLinkedNoConflictNewHeadOvertake tests sync of remote updates that
-// contain linked nodes (conflict resolution by selecting an existing version)
-// on top of a local initial state without conflict, but moves the head node
-// to a new one that overtook the linked node.
-// An object is created locally and updated twice (v1 -> v2 -> v3). Another
-// device has along the way learned about v1, created (v1 -> v4), then learned
-// about (v1 -> v2 -> v3) and resolved that conflict by selecting v3 over v4.
-// Then it creates a new update v5 from v3 (v3 -> v5). Now it sends that new
-// info (v4, the v3/v4 link, and v5) back to the original (local) device.
-// The device sees that the new head v5 is "derived" from v3 thus no conflict.
-func TestRemoteLinkedConflictNewHeadOvertake(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-00.log.sync"); err != nil {
- t.Fatal(err)
- }
- if err = dagReplayCommands(dag, "remote-noconf-link-02.log.sync"); err != nil {
- t.Fatal(err)
- }
-
- // The head must not have moved (i.e. still at v2) and the parent map
- // shows the newly grafted DAG fragment on top of the prior DAG.
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 3 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- pmap := dag.getParentMap(oid)
-
- exp := map[raw.Version][]raw.Version{1: nil, 2: {1}, 3: {2, 4}, 4: {1}, 5: {3}}
-
- if !reflect.DeepEqual(pmap, exp) {
- t.Errorf("Invalid object %d parent map in DAG file %s: (%v) instead of (%v)", oid, dagfile, pmap, exp)
- }
-
- // Verify the grafting of remote nodes.
- newHeads, grafts := dag.getGraftNodes(oid)
-
- expNewHeads := map[raw.Version]struct{}{5: struct{}{}}
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts := map[raw.Version]uint64{1: 0, 3: 2, 4: 1}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- // There should be no conflict.
- isConflict, newHead, oldHead, ancestor, errConflict := dag.hasConflict(oid)
- if !(!isConflict && newHead == 5 && oldHead == 3 && ancestor == raw.NoVersion && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- // Then we can move the head and clear the grafting data.
- if err = dag.moveHead(oid, newHead); err != nil {
- t.Errorf("Object %d cannot move head to %d in DAG file %s: %v", oid, newHead, dagfile, err)
- }
-
- // Clear the grafting data and verify that hasConflict() fails without it.
- dag.clearGraft()
- isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if errConflict == nil {
- t.Errorf("hasConflict() did not fail w/o graft info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- // Now new info comes from another device repeating the v2/v3 link.
- // Verify that it is a NOP (no changes).
- if err = dagReplayCommands(dag, "remote-noconf-link-repeat.log.sync"); err != nil {
- t.Fatal(err)
- }
-
- if head, e := dag.getHead(oid); e != nil || head != 5 {
- t.Errorf("Object %d has wrong head in DAG file %s: %d", oid, dagfile, head)
- }
-
- newHeads, grafts = dag.getGraftNodes(oid)
- if !reflect.DeepEqual(newHeads, expNewHeads) {
- t.Errorf("Object %d has invalid newHeads in DAG file %s: (%v) instead of (%v)", oid, dagfile, newHeads, expNewHeads)
- }
-
- expgrafts = map[raw.Version]uint64{}
- if !reflect.DeepEqual(grafts, expgrafts) {
- t.Errorf("Invalid object %d graft in DAG file %s: (%v) instead of (%v)", oid, dagfile, grafts, expgrafts)
- }
-
- isConflict, newHead, oldHead, ancestor, errConflict = dag.hasConflict(oid)
- if !(!isConflict && newHead == 5 && oldHead == 5 && ancestor == raw.NoVersion && errConflict == nil) {
- t.Errorf("Object %d wrong conflict info: flag %t, newHead %d, oldHead %d, ancestor %d, err %v",
- oid, isConflict, newHead, oldHead, ancestor, errConflict)
- }
-
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestAddNodeTransactional tests adding multiple DAG nodes grouped within a transaction.
-func TestAddNodeTransactional(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-02.sync"); err != nil {
- t.Fatal(err)
- }
-
- oid_a, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
- oid_b, err := strToObjID("67890")
- if err != nil {
- t.Fatal(err)
- }
- oid_c, err := strToObjID("222")
- if err != nil {
- t.Fatal(err)
- }
-
- // Verify NoTxID is reported as an error.
- if err := dag.addNodeTxEnd(NoTxID); err == nil {
- t.Errorf("addNodeTxEnd() did not fail for invalid 'NoTxID' value")
- }
- if _, err := dag.getTransaction(NoTxID); err == nil {
- t.Errorf("getTransaction() did not fail for invalid 'NoTxID' value")
- }
- if err := dag.setTransaction(NoTxID, nil); err == nil {
- t.Errorf("setTransaction() did not fail for invalid 'NoTxID' value")
- }
- if err := dag.delTransaction(NoTxID); err == nil {
- t.Errorf("delTransaction() did not fail for invalid 'NoTxID' value")
- }
-
- // Mutate 2 objects within a transaction.
- tid_1 := dag.addNodeTxStart()
- if tid_1 == NoTxID {
- t.Fatal("Cannot start 1st DAG addNode() transaction")
- }
-
- txMap, ok := dag.txSet[tid_1]
- if !ok {
- t.Errorf("Transactions map for Tx ID %v not found in DAG file %s", tid_1, dagfile)
- }
- if n := len(txMap); n != 0 {
- t.Errorf("Transactions map for Tx ID %v has length %d instead of 0 in DAG file %s", tid_1, n, dagfile)
- }
-
- if err := dag.addNode(oid_a, 3, false, false, []raw.Version{2}, "logrec-a-03", tid_1); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
- }
- if err := dag.addNode(oid_b, 3, false, false, []raw.Version{2}, "logrec-b-03", tid_1); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_1, dagfile, err)
- }
-
- // At the same time mutate the 3rd object in another transaction.
- tid_2 := dag.addNodeTxStart()
- if tid_2 == NoTxID {
- t.Fatal("Cannot start 2nd DAG addNode() transaction")
- }
-
- txMap, ok = dag.txSet[tid_2]
- if !ok {
- t.Errorf("Transactions map for Tx ID %v not found in DAG file %s", tid_2, dagfile)
- }
- if n := len(txMap); n != 0 {
- t.Errorf("Transactions map for Tx ID %v has length %d instead of 0 in DAG file %s", tid_2, n, dagfile)
- }
-
- if err := dag.addNode(oid_c, 2, false, false, []raw.Version{1}, "logrec-c-02", tid_2); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_c, tid_2, dagfile, err)
- }
-
- // Verify the in-memory transaction sets constructed.
- txMap, ok = dag.txSet[tid_1]
- if !ok {
- t.Errorf("Transactions map for Tx ID %v not found in DAG file %s", tid_1, dagfile)
- }
-
- expTxMap := dagTxMap{oid_a: 3, oid_b: 3}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map for Tx ID %v in DAG file %s: %v instead of %v", tid_1, dagfile, txMap, expTxMap)
- }
-
- txMap, ok = dag.txSet[tid_2]
- if !ok {
- t.Errorf("Transactions map for Tx ID %v not found in DAG file %s", tid_2, dagfile)
- }
-
- expTxMap = dagTxMap{oid_c: 2}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map for Tx ID %v in DAG file %s: %v instead of %v", tid_2, dagfile, txMap, expTxMap)
- }
-
- // Verify failing to use a Tx ID not returned by addNodeTxStart().
- bad_tid := tid_1 + 1
- for bad_tid == NoTxID || bad_tid == tid_2 {
- bad_tid++
- }
-
- if err := dag.addNode(oid_c, 3, false, false, []raw.Version{2}, "logrec-c-03", bad_tid); err == nil {
- t.Errorf("addNode() did not fail on object %d for a bad Tx ID %v in DAG file %s", oid_c, bad_tid, dagfile)
- }
- if err := dag.addNodeTxEnd(bad_tid); err == nil {
- t.Errorf("addNodeTxEnd() did not fail for a bad Tx ID %v in DAG file %s", bad_tid, dagfile)
- }
-
- // End the 1st transaction and verify the in-memory and in-DAG data.
- if err := dag.addNodeTxEnd(tid_1); err != nil {
- t.Errorf("Cannot addNodeTxEnd() for Tx ID %v in DAG file %s: %v", tid_1, dagfile, err)
- }
-
- if _, ok = dag.txSet[tid_1]; ok {
- t.Errorf("Transactions map for Tx ID %v still exists in DAG file %s", tid_1, dagfile)
- }
-
- txMap, err = dag.getTransaction(tid_1)
- if err != nil {
- t.Errorf("Cannot getTransaction() for Tx ID %v in DAG file %s: %v", tid_1, dagfile, err)
- }
-
- expTxMap = dagTxMap{oid_a: 3, oid_b: 3}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map from DAG storage for Tx ID %v in DAG file %s: %v instead of %v",
- tid_1, dagfile, txMap, expTxMap)
- }
-
- txMap, ok = dag.txSet[tid_2]
- if !ok {
- t.Errorf("Transactions map for Tx ID %v not found in DAG file %s", tid_2, dagfile)
- }
-
- expTxMap = dagTxMap{oid_c: 2}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map for Tx ID %v in DAG file %s: %v instead of %v", tid_2, dagfile, txMap, expTxMap)
- }
-
- // End the 2nd transaction and re-verify the in-memory and in-DAG data.
- if err := dag.addNodeTxEnd(tid_2); err != nil {
- t.Errorf("Cannot addNodeTxEnd() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
- }
-
- if _, ok = dag.txSet[tid_2]; ok {
- t.Errorf("Transactions map for Tx ID %v still exists in DAG file %s", tid_2, dagfile)
- }
-
- txMap, err = dag.getTransaction(tid_2)
- if err != nil {
- t.Errorf("Cannot getTransaction() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
- }
-
- expTxMap = dagTxMap{oid_c: 2}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map for Tx ID %v in DAG file %s: %v instead of %v", tid_2, dagfile, txMap, expTxMap)
- }
-
- if n := len(dag.txSet); n != 0 {
- t.Errorf("Transaction sets in-memory: %d entries found, should be empty in DAG file %s", n, dagfile)
- }
-
- // Get the 3 new nodes from the DAG and verify their Tx IDs.
- node, err := dag.getNode(oid_a, 3)
- if err != nil {
- t.Errorf("Cannot find object %d:3 in DAG file %s: %v", oid_a, dagfile, err)
- }
- if node.TxID != tid_1 {
- t.Errorf("Invalid TxID for object %d:3 in DAG file %s: %v instead of %v", oid_a, dagfile, node.TxID, tid_1)
- }
- node, err = dag.getNode(oid_b, 3)
- if err != nil {
- t.Errorf("Cannot find object %d:3 in DAG file %s: %v", oid_b, dagfile, err)
- }
- if node.TxID != tid_1 {
- t.Errorf("Invalid TxID for object %d:3 in DAG file %s: %v instead of %v", oid_b, dagfile, node.TxID, tid_1)
- }
- node, err = dag.getNode(oid_c, 2)
- if err != nil {
- t.Errorf("Cannot find object %d:2 in DAG file %s: %v", oid_c, dagfile, err)
- }
- if node.TxID != tid_2 {
- t.Errorf("Invalid TxID for object %d:2 in DAG file %s: %v instead of %v", oid_c, dagfile, node.TxID, tid_2)
- }
-
- for _, oid := range []storage.ID{oid_a, oid_b, oid_c} {
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestPruningTransactions tests pruning DAG nodes grouped within transactions.
-func TestPruningTransactions(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-02.sync"); err != nil {
- t.Fatal(err)
- }
-
- oid_a, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
- oid_b, err := strToObjID("67890")
- if err != nil {
- t.Fatal(err)
- }
- oid_c, err := strToObjID("222")
- if err != nil {
- t.Fatal(err)
- }
-
- // Mutate objects in 2 transactions then add non-transactional mutations
- // to act as the pruning points. Before pruning the DAG is:
- // a1 -- a2 -- (a3) --- a4
- // b1 -- b2 -- (b3) -- (b4) -- b5
- // c1 ---------------- (c2)
- // Now by pruning at (a4, b5, c2), the new DAG should be:
- // a4
- // b5
- // (c2)
- // Transaction 1 (a3, b3) gets deleted, but transaction 2 (b4, c2) still
- // has (c2) dangling waiting for a future pruning.
- tid_1 := dag.addNodeTxStart()
- if tid_1 == NoTxID {
- t.Fatal("Cannot start 1st DAG addNode() transaction")
- }
- if err := dag.addNode(oid_a, 3, false, false, []raw.Version{2}, "logrec-a-03", tid_1); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
- }
- if err := dag.addNode(oid_b, 3, false, false, []raw.Version{2}, "logrec-b-03", tid_1); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_1, dagfile, err)
- }
- if err := dag.addNodeTxEnd(tid_1); err != nil {
- t.Errorf("Cannot addNodeTxEnd() for Tx ID %v in DAG file %s: %v", tid_1, dagfile, err)
- }
-
- tid_2 := dag.addNodeTxStart()
- if tid_2 == NoTxID {
- t.Fatal("Cannot start 2nd DAG addNode() transaction")
- }
- if err := dag.addNode(oid_b, 4, false, false, []raw.Version{3}, "logrec-b-04", tid_2); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_2, dagfile, err)
- }
- if err := dag.addNode(oid_c, 2, false, false, []raw.Version{1}, "logrec-c-02", tid_2); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_c, tid_2, dagfile, err)
- }
- if err := dag.addNodeTxEnd(tid_2); err != nil {
- t.Errorf("Cannot addNodeTxEnd() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
- }
-
- if err := dag.addNode(oid_a, 4, false, false, []raw.Version{3}, "logrec-a-04", NoTxID); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_a, tid_1, dagfile, err)
- }
- if err := dag.addNode(oid_b, 5, false, false, []raw.Version{4}, "logrec-b-05", NoTxID); err != nil {
- t.Errorf("Cannot addNode() on object %d and Tx ID %v in DAG file %s: %v", oid_b, tid_2, dagfile, err)
- }
-
- if err = dag.moveHead(oid_a, 4); err != nil {
- t.Errorf("Object %d cannot move head in DAG file %s: %v", oid_a, dagfile, err)
- }
- if err = dag.moveHead(oid_b, 5); err != nil {
- t.Errorf("Object %d cannot move head in DAG file %s: %v", oid_b, dagfile, err)
- }
- if err = dag.moveHead(oid_c, 2); err != nil {
- t.Errorf("Object %d cannot move head in DAG file %s: %v", oid_c, dagfile, err)
- }
-
- // Verify the transaction sets.
- txMap, err := dag.getTransaction(tid_1)
- if err != nil {
- t.Errorf("Cannot getTransaction() for Tx ID %v in DAG file %s: %v", tid_1, dagfile, err)
- }
-
- expTxMap := dagTxMap{oid_a: 3, oid_b: 3}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map from DAG storage for Tx ID %v in DAG file %s: %v instead of %v",
- tid_1, dagfile, txMap, expTxMap)
- }
-
- txMap, err = dag.getTransaction(tid_2)
- if err != nil {
- t.Errorf("Cannot getTransaction() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
- }
-
- expTxMap = dagTxMap{oid_b: 4, oid_c: 2}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map for Tx ID %v in DAG file %s: %v instead of %v", tid_2, dagfile, txMap, expTxMap)
- }
-
- // Prune the 3 objects at their head nodes.
- for _, oid := range []storage.ID{oid_a, oid_b, oid_c} {
- head, err := dag.getHead(oid)
- if err != nil {
- t.Errorf("Cannot getHead() on object %d in DAG file %s: %v", oid, dagfile, err)
- }
- err = dag.prune(oid, head, func(lr string) error {
- return nil
- })
- if err != nil {
- t.Errorf("Cannot prune() on object %d in DAG file %s: %v", oid, dagfile, err)
- }
- }
-
- if err = dag.pruneDone(); err != nil {
- t.Errorf("pruneDone() failed in DAG file %s: %v", dagfile, err)
- }
-
- if n := len(dag.txGC); n != 0 {
- t.Errorf("Transaction GC map not empty after pruneDone() in DAG file %s: %d", dagfile, n)
- }
-
- // Verify that Tx-1 was deleted and Tx-2 still has c2 in it.
- txMap, err = dag.getTransaction(tid_1)
- if err == nil {
- t.Errorf("getTransaction() did not fail for Tx ID %v in DAG file %s: %v", tid_1, dagfile, txMap)
- }
-
- txMap, err = dag.getTransaction(tid_2)
- if err != nil {
- t.Errorf("Cannot getTransaction() for Tx ID %v in DAG file %s: %v", tid_2, dagfile, err)
- }
-
- expTxMap = dagTxMap{oid_c: 2}
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Invalid transaction map for Tx ID %v in DAG file %s: %v instead of %v", tid_2, dagfile, txMap, expTxMap)
- }
-
- // Add c3 as a new head and prune at that point. This should GC Tx-2.
- if err := dag.addNode(oid_c, 3, false, false, []raw.Version{2}, "logrec-c-03", NoTxID); err != nil {
- t.Errorf("Cannot addNode() on object %d in DAG file %s: %v", oid_c, dagfile, err)
- }
- if err = dag.moveHead(oid_c, 3); err != nil {
- t.Errorf("Object %d cannot move head in DAG file %s: %v", oid_c, dagfile, err)
- }
-
- err = dag.prune(oid_c, 3, func(lr string) error {
- return nil
- })
- if err != nil {
- t.Errorf("Cannot prune() on object %d in DAG file %s: %v", oid_c, dagfile, err)
- }
- if err = dag.pruneDone(); err != nil {
- t.Errorf("pruneDone() #2 failed in DAG file %s: %v", dagfile, err)
- }
- if n := len(dag.txGC); n != 0 {
- t.Errorf("Transaction GC map not empty after pruneDone() in DAG file %s: %d", dagfile, n)
- }
-
- txMap, err = dag.getTransaction(tid_2)
- if err == nil {
- t.Errorf("getTransaction() did not fail for Tx ID %v in DAG file %s: %v", tid_2, dagfile, txMap)
- }
-
- for _, oid := range []storage.ID{oid_a, oid_b, oid_c} {
- if err := checkEndOfSync(dag, oid); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestHasDeletedDescendant tests lookup of DAG deleted nodes descending from a given node.
-func TestHasDeletedDescendant(t *testing.T) {
- dagfile := dagFilename()
- defer os.Remove(dagfile)
-
- dag, err := openDAG(dagfile)
- if err != nil {
- t.Fatalf("Cannot open new DAG file %s", dagfile)
- }
-
- if err = dagReplayCommands(dag, "local-init-03.sync"); err != nil {
- t.Fatal(err)
- }
-
- oid, err := strToObjID("12345")
- if err != nil {
- t.Fatal(err)
- }
-
- // Delete node v3 to create a dangling parent link from v7 (increase code coverage).
- if err = dag.delNode(oid, 3); err != nil {
- t.Errorf("cannot delete node %d:3 in DAG file %s: %v", oid, dagfile, err)
- }
-
- type hasDelDescTest struct {
- node raw.Version
- result bool
- }
- tests := []hasDelDescTest{
- {raw.NoVersion, false},
- {999, false},
- {1, true},
- {2, true},
- {3, false},
- {4, false},
- {5, false},
- {6, false},
- {7, false},
- {8, false},
- }
-
- for _, test := range tests {
- result := dag.hasDeletedDescendant(oid, test.node)
- if result != test.result {
- t.Errorf("hasDeletedDescendant() for node %d in DAG file %s: %v instead of %v",
- test.node, dagfile, result, test.result)
- }
- }
-
- dag.close()
-}
diff --git a/runtimes/google/vsync/devtable.go b/runtimes/google/vsync/devtable.go
deleted file mode 100644
index 4a2fe71..0000000
--- a/runtimes/google/vsync/devtable.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package vsync
-
-// Package vsync provides veyron sync DevTable utility functions.
-// DevTable is indexed by the device id and stores device level
-// information needed by sync. Main component of a device's info is
-// its generation vector. Generation vector is the version vector for
-// a device's store, representing all the different generations (from
-// different devices) seen by a given device. A generation represents
-// a collection of updates that originated on a device during an
-// interval of time. It serves as a checkpoint when communicating with
-// other devices. Generations do not overlap and all updates belong to
-// a generation.
-//
-// Synchronization between two devices A and B uses generation vectors
-// as follows:
-// A B
-// <== B's generation vector
-// diff(A's generation vector, B's generation vector)
-// log records of missing generations ==>
-// cache B's generation vector (for space reclamation)
-//
-// Implementation notes: DevTable is stored in a persistent K/V
-// database in the current implementation. Generation vector is
-// implemented as a map of (Device ID -> Generation ID), one entry for
-// every known device. If the generation vector contains an entry
-// (Device ID -> Generation ID), it implies that the device has
-// learned of all the generations until and including Generation
-// ID. Generation IDs start from 1. A generation ID of 0 is a
-// reserved boot strap value, and indicates the device has no updates.
-import (
- "errors"
- "sort"
- "time"
-
- "veyron2/vlog"
-)
-
-var (
- errInvalidDTab = errors.New("invalid devtable db")
-)
-
-// devInfo is the information stored per device.
-type devInfo struct {
- Vector GenVector // device generation vector.
- Ts time.Time // last communication time stamp.
-}
-
-// devTableHeader contains the header metadata.
-type devTableHeader struct {
- Resmark []byte // resume marker for watch.
- // Generation vector for space reclamation. All generations
- // less than this generation vector are deleted from storage.
- ReclaimVec GenVector
-}
-
-// devTable contains the metadata for the device table db.
-type devTable struct {
- fname string // file pathname.
- db *kvdb // underlying K/V DB.
- devices *kvtable // pointer to the "devices" table in the kvdb. Contains device info.
-
- // Key:"Head" Value:devTableHeader
- header *kvtable // pointer to the "header" table in the kvdb. Contains device table header.
- head *devTableHeader // devTable head cached in memory.
-
- s *syncd // pointer to the sync daemon object.
-}
-
-// genOrder represents a generation along with its position in the log.
-type genOrder struct {
- devID DeviceID
- genID GenID
- order uint32
-}
-
-// byOrder is used to sort the genOrder array.
-type byOrder []*genOrder
-
-func (a byOrder) Len() int {
- return len(a)
-}
-
-func (a byOrder) Swap(i, j int) {
- a[i], a[j] = a[j], a[i]
-}
-
-func (a byOrder) Less(i, j int) bool {
- return a[i].order < a[j].order
-}
-
-// openDevTable opens or creates a devTable for the given filename.
-func openDevTable(filename string, sin *syncd) (*devTable, error) {
- dtab := &devTable{
- fname: filename,
- s: sin,
- }
- // Open the file and create it if it does not exist.
- // Also initialize the kvdb and its collection.
- db, tbls, err := kvdbOpen(filename, []string{"devices", "header"})
- if err != nil {
- return nil, err
- }
-
- dtab.db = db
- dtab.devices = tbls[0]
- dtab.header = tbls[1]
-
- // Initialize local gen vector with own device id and
- // generation id of 0 if local vector doesn't exist.
- if !dtab.hasDevInfo(dtab.s.id) {
- vector := GenVector{
- dtab.s.id: 0,
- }
- if err := dtab.putGenVec(dtab.s.id, vector); err != nil {
- dtab.db.close() // this also closes the tables.
- return nil, err
- }
- }
-
- // Initialize the devTable header.
- dtab.head = &devTableHeader{
- ReclaimVec: GenVector{
- dtab.s.id: 0,
- },
- }
- // If header already exists in db, read it back from db.
- if dtab.hasHead() {
- if err := dtab.getHead(); err != nil {
- dtab.db.close() // this also closes the tables.
- return nil, err
- }
- }
-
- return dtab, nil
-}
-
-// close closes the devTable and invalidates its struct.
-func (dt *devTable) close() error {
- if dt.db == nil {
- return errInvalidDTab
- }
- // Flush the dirty data.
- if err := dt.flush(); err != nil {
- return err
- }
- dt.db.close() // this also closes the tables.
-
- *dt = devTable{} // zero out the devTable struct.
- return nil
-}
-
-// flush flushes the devTable db to storage.
-func (dt *devTable) flush() error {
- if dt.db == nil {
- return errInvalidDTab
- }
- // Set the head from memory before flushing.
- if err := dt.putHead(); err != nil {
- return err
- }
- dt.db.flush()
- return nil
-}
-
-// compact compacts the file associated with kvdb.
-func (dt *devTable) compact() error {
- if dt.db == nil {
- return errInvalidDTab
- }
- db, tbls, err := dt.db.compact(dt.fname, []string{"devices", "header"})
- if err != nil {
- return err
- }
- dt.db = db
- dt.devices = tbls[0]
- dt.header = tbls[1]
- return nil
-}
-
-// putHead puts the devTable head into the devTable db.
-func (dt *devTable) putHead() error {
- return dt.header.set("Head", dt.head)
-}
-
-// getHead gets the devTable head from the devTable db.
-func (dt *devTable) getHead() error {
- if dt.head == nil {
- return errors.New("nil devTable header")
- }
- return dt.header.get("Head", dt.head)
-}
-
-// hasHead returns true if the devTable db has a devTable head.
-func (dt *devTable) hasHead() bool {
- return dt.header.hasKey("Head")
-}
-
-// putDevInfo puts a devInfo struct in the devTable db.
-func (dt *devTable) putDevInfo(devid DeviceID, info *devInfo) error {
- if dt.db == nil {
- return errInvalidDTab
- }
- return dt.devices.set(string(devid), info)
-}
-
-// getDevInfo gets a devInfo struct from the devTable db.
-func (dt *devTable) getDevInfo(devid DeviceID) (*devInfo, error) {
- if dt.db == nil {
- return nil, errInvalidDTab
- }
- var info devInfo
- if err := dt.devices.get(string(devid), &info); err != nil {
- return nil, err
- }
- if info.Vector == nil {
- return nil, errors.New("nil genvector")
- }
- return &info, nil
-}
-
-// hasDevInfo returns true if the device (devid) has any devInfo in the devTable db.
-func (dt *devTable) hasDevInfo(devid DeviceID) bool {
- if dt.db == nil {
- return false
- }
- return dt.devices.hasKey(string(devid))
-}
-
-// putGenVec puts a generation vector in the devTable db.
-func (dt *devTable) putGenVec(devid DeviceID, v GenVector) error {
- if dt.db == nil {
- return errInvalidDTab
- }
- var info *devInfo
- if dt.hasDevInfo(devid) {
- var err error
- if info, err = dt.getDevInfo(devid); err != nil {
- return err
- }
- info.Vector = v
- } else {
- info = &devInfo{
- Vector: v,
- Ts: time.Now().UTC(),
- }
- }
- return dt.putDevInfo(devid, info)
-}
-
-// getGenVec gets a generation vector from the devTable db.
-func (dt *devTable) getGenVec(devid DeviceID) (GenVector, error) {
- if dt.db == nil {
- return nil, errInvalidDTab
- }
- info, err := dt.getDevInfo(devid)
- if err != nil {
- return nil, err
- }
- return info.Vector, nil
-}
-
-// populateGenOrderEntry populates a genOrder entry.
-func (dt *devTable) populateGenOrderEntry(e *genOrder, id DeviceID, gnum GenID) error {
- e.devID = id
- e.genID = gnum
-
- o, err := dt.s.log.getGenMetadata(id, gnum)
- if err != nil {
- return err
- }
- e.order = o.Pos
- return nil
-}
-
-// updateGeneration updates a single generation (upID, upGen) in a device's generation vector.
-func (dt *devTable) updateGeneration(key, upID DeviceID, upGen GenID) error {
- if dt.db == nil {
- return errInvalidDTab
- }
- info, err := dt.getDevInfo(key)
- if err != nil {
- return err
- }
-
- info.Vector[upID] = upGen
-
- return dt.putDevInfo(key, info)
-}
-
-// updateLocalGenVector updates local generation vector based on the remote generation vector.
-func (dt *devTable) updateLocalGenVector(local, remote GenVector) error {
- if dt.db == nil {
- return errInvalidDTab
- }
- if local == nil || remote == nil {
- return errors.New("invalid input args to function")
- }
- for rid, rgen := range remote {
- lgen, ok := local[rid]
- if !ok || lgen < rgen {
- local[rid] = rgen
- }
- }
- return nil
-}
-
-// diffGenVectors diffs generation vectors belonging to src and dest
-// and returns the generations known to src and not known to dest. In
-// addition, sync needs to maintain the order in which device
-// generations are created/received. Hence, when two generation
-// vectors are diffed, the differing generations are returned in a
-// sorted order based on their position in the src's log. genOrder
-// array consists of every generation that is missing between src and
-// dest sorted using its position in the src's log.
-// Example: Generation vector for device A (src) AVec = {A:10, B:5, C:1}
-// Generation vector for device B (dest) BVec = {A:5, B:10, D:2}
-// Missing generations in unsorted order: {A:6, A:7, A:8, A:9, A:10,
-// C:1} TODO(hpucha): Revisit for the case of a lot of generations to
-// send back (say during bootstrap).
-func (dt *devTable) diffGenVectors(srcVec, destVec GenVector) ([]*genOrder, error) {
- if dt.db == nil {
- return nil, errInvalidDTab
- }
-
- // Create an array for the generations that need to be returned.
- var gens []*genOrder
-
- // Compute missing generations for devices that are in destination and source vector.
- for devid, genid := range destVec {
- srcGenID, ok := srcVec[devid]
- // Skip since src doesn't know of this device.
- if !ok {
- continue
- }
- // Need to include all generations in the interval [genid+1, srcGenID],
- // genid+1 and srcGenID inclusive.
- // Check against reclaimVec to see if required generations are already GCed.
- // Starting gen is then max(oldGen, genid+1)
- startGen := genid + 1
- oldGen := dt.getOldestGen(devid) + 1
- if startGen < oldGen {
- vlog.VI(1).Infof("diffGenVectors:: Adjusting starting generations from %d to %d",
- startGen, oldGen)
- startGen = oldGen
- }
- for i := startGen; i <= srcGenID; i++ {
- // Populate the genorder entry.
- var entry genOrder
- if err := dt.populateGenOrderEntry(&entry, devid, i); err != nil {
- return nil, err
- }
- gens = append(gens, &entry)
- }
- }
- // Compute missing generations for devices not in destination vector but in source vector.
- for devid, genid := range srcVec {
- // Add devices destination does not know about.
- if _, ok := destVec[devid]; !ok {
- // Bootstrap generation to oldest available.
- destGenID := dt.getOldestGen(devid) + 1
- // Need to include all generations in the interval [destGenID, genid],
- // destGenID and genid inclusive.
- for i := destGenID; i <= genid; i++ {
- // Populate the genorder entry.
- var entry genOrder
- if err := dt.populateGenOrderEntry(&entry, devid, i); err != nil {
- return nil, err
- }
- gens = append(gens, &entry)
- }
- }
- }
-
- // Sort generations in log order.
- sort.Sort(byOrder(gens))
- return gens, nil
-}
-
-// getOldestGen returns the most recent gc'ed generation for the device "dev".
-func (dt *devTable) getOldestGen(dev DeviceID) GenID {
- return dt.head.ReclaimVec[dev]
-}
-
-// computeReclaimVector computes a generation vector such that the
-// generations less than or equal to those in the vector can be
-// garbage collected. Caller holds a lock on s.lock.
-//
-// Approach: For each device in the system, we compute its maximum
-// generation known to all the other devices in the system. This is a
-// O(N^2) algorithm where N is the number of devices in the system. N
-// is assumed to be small, of the order of hundreds of devices.
-func (dt *devTable) computeReclaimVector() (GenVector, error) {
- // Get local generation vector to create the set of devices in
- // the system. Local generation vector is a good bootstrap
- // device set since it contains all the devices whose log
- // records were ever stored locally.
- devSet, err := dt.getGenVec(dt.s.id)
- if err != nil {
- return nil, err
- }
-
- newReclaimVec := GenVector{}
- for devid := range devSet {
- if !dt.hasDevInfo(devid) {
- // This node knows of devid, but hasn't yet
- // contacted the device. Do not garbage
- // collect any further. For instance, when
- // node A learns of node C's generations from
- // node B, node A may not have an entry for
- // node C yet, but node C will be part of its
- // devSet.
- for dev := range devSet {
- newReclaimVec[dev] = dt.getOldestGen(dev)
- }
- return newReclaimVec, nil
- }
-
- vec, err := dt.getGenVec(devid)
- if err != nil {
- return nil, err
- }
- for dev := range devSet {
- gen1, ok := vec[dev]
- // Device "devid" does not know about device "dev".
- if !ok {
- newReclaimVec[dev] = dt.getOldestGen(dev)
- continue
- }
- gen2, ok := newReclaimVec[dev]
- if !ok || (gen1 < gen2) {
- newReclaimVec[dev] = gen1
- }
- }
- }
- return newReclaimVec, nil
-}
-
-// addDevice adds a newly learned device to the devTable state.
-func (dt *devTable) addDevice(newDev DeviceID) error {
- // Create an entry in the device table for the new device.
- vector := GenVector{
- newDev: 0,
- }
- if err := dt.putGenVec(newDev, vector); err != nil {
- return err
- }
-
- // Update local generation vector with the new device.
- local, err := dt.getDevInfo(dt.s.id)
- if err != nil {
- return err
- }
- if err := dt.updateLocalGenVector(local.Vector, vector); err != nil {
- return err
- }
- if err := dt.putDevInfo(dt.s.id, local); err != nil {
- return err
- }
- return nil
-}
-
-// updateReclaimVec updates the reclaim vector to track gc'ed generations.
-func (dt *devTable) updateReclaimVec(minGens GenVector) error {
- for dev, min := range minGens {
- gen, ok := dt.head.ReclaimVec[dev]
- if !ok {
- if min < 1 {
- vlog.Errorf("updateReclaimVec:: Received bad generation %s %d",
- dev, min)
- dt.head.ReclaimVec[dev] = 0
- } else {
- dt.head.ReclaimVec[dev] = min - 1
- }
- continue
- }
-
- // We obtained a generation that is already reclaimed.
- if min <= gen {
- return errors.New("requested gen smaller than GC'ed gen")
- }
- }
- return nil
-}
diff --git a/runtimes/google/vsync/devtable_test.go b/runtimes/google/vsync/devtable_test.go
deleted file mode 100644
index 99220d7..0000000
--- a/runtimes/google/vsync/devtable_test.go
+++ /dev/null
@@ -1,1250 +0,0 @@
-package vsync
-
-// Tests for the Veyron Sync devTable component.
-import (
- "os"
- "reflect"
- "testing"
- "time"
-)
-
-// TestDevTabStore tests creating a backing file for devTable.
-func TestDevTabStore(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- fsize := getFileSize(devfile)
- if fsize < 0 {
- t.Errorf("DevTable file %s not created", devfile)
- }
-
- if err := dtab.flush(); err != nil {
- t.Errorf("Cannot flush devTable file %s, err %v", devfile, err)
- }
-
- oldfsize := fsize
- fsize = getFileSize(devfile)
- if fsize <= oldfsize {
- t.Errorf("DevTable file %s not flushed", devfile)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-
- oldfsize = getFileSize(devfile)
-
- dtab, err = openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot re-open existing devTable file %s, err %v", devfile, err)
- }
-
- fsize = getFileSize(devfile)
- if fsize != oldfsize {
- t.Errorf("DevTable file %s size changed across re-open (%d %d)", devfile, fsize, oldfsize)
- }
-
- if err := dtab.flush(); err != nil {
- t.Errorf("Cannot flush devTable file %s, err %v", devfile, err)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestInvalidDTab tests devTable methods on an invalid (closed) devTable ptr.
-func TestInvalidDTab(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-
- err = dtab.close()
- if err == nil || err != errInvalidDTab {
- t.Errorf("Close did not fail on a closed : %v", err)
- }
-
- err = dtab.flush()
- if err == nil || err != errInvalidDTab {
- t.Errorf("Flush did not fail on a closed devTable: %v", err)
- }
-
- err = dtab.compact()
- if err == nil || err != errInvalidDTab {
- t.Errorf("Compact did not fail on a closed devTable: %v", err)
- }
-
- var devid DeviceID = "VeyronPhone"
-
- err = dtab.putDevInfo(devid, &devInfo{})
- if err == nil || err != errInvalidDTab {
- t.Errorf("PutDevInfo did not fail on a closed devTable: %v", err)
- }
-
- _, err = dtab.getDevInfo(devid)
- if err == nil || err != errInvalidDTab {
- t.Errorf("GetDevInfo did not fail on a closed devTable: %v", err)
- }
-
- if dtab.hasDevInfo(devid) {
- if err == nil || err != errInvalidDTab {
- t.Errorf("HasDevInfo did not fail on a closed devTable: %v", err)
- }
- }
-
- err = dtab.putGenVec(devid, GenVector{})
- if err == nil || err != errInvalidDTab {
- t.Errorf("PutGenVec did not fail on a closed devTable: %v", err)
- }
-
- _, err = dtab.getGenVec(devid)
- if err == nil || err != errInvalidDTab {
- t.Errorf("GetGenVec did not fail on a closed devTable: %v", err)
- }
-
- err = dtab.updateGeneration(devid, devid, 0)
- if err == nil || err != errInvalidDTab {
- t.Errorf("UpdateGeneration did not fail on a closed devTable: %v", err)
- }
-
- err = dtab.updateLocalGenVector(GenVector{}, GenVector{})
- if err == nil || err != errInvalidDTab {
- t.Errorf("UpdateLocalGenVector did not fail on a closed devTable: %v", err)
- }
-
- _, err = dtab.diffGenVectors(GenVector{}, GenVector{})
- if err == nil || err != errInvalidDTab {
- t.Errorf("DiffGenVectors did not fail on a closed devTable: %v", err)
- }
-}
-
-// TestPutGetDevTableHeader tests setting and getting devTable header across devTable open/close/reopen.
-func TestPutGetDevTableHeader(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- // In memory head should be initialized.
- if dtab.head.Resmark != nil {
- t.Errorf("First time log create should reset header: %v", dtab.head.Resmark)
- }
- expVec := GenVector{dtab.s.id: 0}
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- // No head should be there in db.
- if err = dtab.getHead(); err == nil {
- t.Errorf("getHead() found non-existent head in devTable file %s, err %v", devfile, err)
- }
-
- if dtab.hasHead() {
- t.Errorf("hasHead() found non-existent head in devTable file %s", devfile)
- }
-
- expMark := []byte{1, 2, 3}
- expVec = GenVector{
- "VeyronTab": 30,
- "VeyronPhone": 10,
- }
- dtab.head = &devTableHeader{
- Resmark: expMark,
- ReclaimVec: expVec,
- }
-
- if err := dtab.putHead(); err != nil {
- t.Errorf("Cannot put head %v in devTable file %s, err %v", dtab.head, devfile, err)
- }
-
- // Reset values.
- dtab.head.Resmark = nil
- dtab.head.ReclaimVec = GenVector{}
-
- for i := 0; i < 2; i++ {
- if err := dtab.getHead(); err != nil {
- t.Fatalf("getHead() can not find head (i=%d) in devTable file %s, err %v", i, devfile, err)
- }
-
- if !dtab.hasHead() {
- t.Errorf("hasHead() can not find head (i=%d) in devTable file %s", i, devfile)
- }
-
- if !reflect.DeepEqual(dtab.head.Resmark, expMark) {
- t.Errorf("Data mismatch for resmark (i=%d) in devTable file %s: %v instead of %v",
- i, devfile, dtab.head.Resmark, expMark)
- }
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec (i=%d) in devTable file %s: %v instead of %v",
- i, devfile, dtab.head.ReclaimVec, expVec)
- }
-
- if i == 0 {
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
- dtab, err = openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot re-open devTable file %s, err %v", devfile, err)
- }
- }
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestPersistDevTableHeader tests that devTable header is
-// automatically persisted across devTable open/close/reopen.
-func TestPersistDevTableHeader(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- // In memory head should be initialized.
- if dtab.head.Resmark != nil {
- t.Errorf("First time log create should reset header: %v", dtab.head.Resmark)
- }
- expVec := GenVector{dtab.s.id: 0}
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- expMark := []byte{0, 2, 255}
- expVec = GenVector{
- "VeyronTab": 100,
- "VeyronPhone": 10000,
- }
- dtab.head = &devTableHeader{
- Resmark: expMark,
- ReclaimVec: expVec,
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-
- dtab, err = openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- // In memory head should be initialized from db.
- if !reflect.DeepEqual(dtab.head.Resmark, expMark) {
- t.Errorf("Data mismatch for resmark in devTable file %s: %v instead of %v",
- devfile, dtab.head.Resmark, expMark)
- }
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- expMark = []byte{60, 180, 7}
- expVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 1987,
- }
- dtab.head = &devTableHeader{
- Resmark: expMark,
- ReclaimVec: expVec,
- }
-
- if err := dtab.flush(); err != nil {
- t.Errorf("Cannot flush devTable file %s, err %v", devfile, err)
- }
-
- // Reset values.
- dtab.head.Resmark = nil
- dtab.head.ReclaimVec = GenVector{}
-
- if err := dtab.getHead(); err != nil {
- t.Fatalf("getHead() can not find head in devTable file %s, err %v", devfile, err)
- }
-
- // In memory head should be initialized from db.
- if !reflect.DeepEqual(dtab.head.Resmark, expMark) {
- t.Errorf("Data mismatch for resmark in devTable file %s: %v instead of %v",
- devfile, dtab.head.Resmark, expMark)
- }
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestPutGetDevInfo tests setting and getting devInfo across devTable open/close/reopen.
-func TestPutGetDevInfo(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
-
- info, err := dtab.getDevInfo(devid)
- if err == nil || info != nil {
- t.Errorf("GetDevInfo() found non-existent object %s in devTable file %s: %v, err %v",
- devid, devfile, info, err)
- }
-
- if dtab.hasDevInfo(devid) {
- t.Errorf("HasDevInfo() found non-existent object %s in devTable file %s",
- devid, devfile)
- }
-
- v := GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 10,
- }
-
- info = &devInfo{
- Vector: v,
- Ts: time.Now().UTC(),
- }
-
- if err := dtab.putDevInfo(devid, info); err != nil {
- t.Errorf("Cannot put object %s (%v) in devTable file %s, err %v", devid, info, devfile, err)
- }
-
- for i := 0; i < 2; i++ {
- curInfo, err := dtab.getDevInfo(devid)
- if err != nil || curInfo == nil {
- t.Fatalf("GetDevInfo() can not find object %s (i=%d) in devTable file %s: %v, err: %v",
- devid, i, devfile, curInfo, err)
- }
-
- if !dtab.hasDevInfo(devid) {
- t.Errorf("HasDevInfo() can not find object %s (i=%d) in devTable file %s",
- devid, i, devfile)
- }
-
- if !reflect.DeepEqual(info, curInfo) {
- t.Errorf("Data mismatch for object %s (i=%d) in devTable file %s: %v instead of %v",
- devid, i, devfile, curInfo, info)
- }
-
- if i == 0 {
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
- dtab, err = openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot re-open devTable file %s, err %v", devfile, err)
- }
- }
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestPutGetGenVec tests setting and getting generation vector across dtab open/close/reopen.
-func TestPutGetGenVec(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- local := GenVector{
- "VeyronPhone": 0,
- }
-
- var devid DeviceID = "VeyronTab"
- vec, err := dtab.getGenVec(devid)
- if err == nil || vec != nil {
- t.Errorf("GetGenVec() found non-existent object %s in devTable file %s: %v, err %v",
- devid, devfile, vec, err)
- }
-
- v := GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
-
- if err := dtab.putGenVec(devid, v); err != nil {
- t.Errorf("Cannot put object %s (%v) in devTable file %s, err %v", devid, v, devfile, err)
- }
-
- for i := 0; i < 2; i++ {
- // Check for devid.
- curVec, err := dtab.getGenVec(devid)
- if err != nil || curVec == nil {
- t.Fatalf("GetGenVec() can not find object %s (i=%d) in devTable file %s, err %v",
- devid, i, devfile, err)
- }
-
- if !reflect.DeepEqual(v, curVec) {
- t.Errorf("Data mismatch for object %s (i=%d) in devTable file %s: %v instead of %v",
- devid, i, devfile, curVec, v)
- }
-
- // Check for s.id.
- curLocal, err := dtab.getGenVec(s.id)
- if err != nil || curLocal == nil {
- t.Fatalf("GetGenVec() can not find object %s (i=%d) in devTable file %s: %v, err: %v",
- s.id, i, devfile, curLocal, err)
- }
-
- if !reflect.DeepEqual(local, curLocal) {
- t.Errorf("Data mismatch for object %s (i=%d) in devTable file %s: %v instead of %v",
- s.id, i, devfile, curLocal, local)
- }
-
- if i == 0 {
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
- dtab, err = openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot re-open devTable file %s, err %v", devfile, err)
- }
- }
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestUpdateGeneration tests updating a generation.
-func TestUpdateGeneration(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
- err = dtab.updateGeneration(devid, devid, 10)
- if err == nil {
- t.Errorf("UpdateGeneration() found non-existent object %s in devTable file %s, err %v",
- devid, devfile, err)
- }
- v := GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
-
- if err := dtab.putGenVec(devid, v); err != nil {
- t.Errorf("Cannot put object %s (%v) in devTable file %s, err %v", devid, v, devfile, err)
- }
- err = dtab.updateGeneration(devid, devid, 10)
- if err != nil {
- t.Errorf("UpdateGeneration() failed for %s in devTable file %s with error %v",
- devid, devfile, err)
- }
- err = dtab.updateGeneration(devid, "VeyronLaptop", 18)
- if err != nil {
- t.Errorf("UpdateGeneration() failed for %s in devTable file %s with error %v",
- devid, devfile, err)
- }
- curVec, err := dtab.getGenVec(devid)
- if err != nil || curVec == nil {
- t.Fatalf("GetGenVec() can not find object %s in devTable file %s, err %v",
- devid, devfile, err)
- }
- vExp := GenVector{
- "VeyronTab": 10,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 18,
- }
-
- if !reflect.DeepEqual(curVec, vExp) {
- t.Errorf("Data mismatch for object %s in devTable file %s: %v instead of %v",
- devid, devfile, v, vExp)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestUpdateLocalGenVector tests updating a gen vector.
-func TestUpdateLocalGenVector(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- // Test nil args.
- if err := dtab.updateLocalGenVector(nil, nil); err == nil {
- t.Errorf("UpdateLocalGenVector() failed in devTable file %s with error %v",
- devfile, err)
- }
-
- // Nothing to update.
- local := GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 1,
- }
- remote := GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 1,
- }
- if err := dtab.updateLocalGenVector(local, remote); err != nil {
- t.Errorf("UpdateLocalGenVector() failed in devTable file %s with error %v",
- devfile, err)
- }
-
- if !reflect.DeepEqual(local, remote) {
- t.Errorf("Data mismatch for object %v instead of %v",
- local, remote)
- }
-
- // local is missing a generation.
- local = GenVector{
- "VeyronPhone": 1,
- }
- if err := dtab.updateLocalGenVector(local, remote); err != nil {
- t.Errorf("UpdateLocalGenVector() failed in devTable file %s with error %v",
- devfile, err)
- }
- if !reflect.DeepEqual(local, remote) {
- t.Errorf("Data mismatch for object %v instead of %v",
- local, remote)
- }
-
- // local is stale compared to remote.
- local = GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 0,
- }
- remote = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 0,
- "VeyronLaptop": 2,
- }
- if err := dtab.updateLocalGenVector(local, remote); err != nil {
- t.Errorf("UpdateLocalGenVector() failed in devTable file %s with error %v",
- devfile, err)
- }
- if !reflect.DeepEqual(local, remote) {
- t.Errorf("Data mismatch for object %v instead of %v",
- local, remote)
- }
-
- // local is partially stale.
- local = GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 0,
- "VeyronDesktop": 20,
- }
- remote = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 10,
- "VeyronLaptop": 2,
- }
- localExp := GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
- if err := dtab.updateLocalGenVector(local, remote); err != nil {
- t.Errorf("UpdateLocalGenVector() failed in devTable file %s with error %v",
- devfile, err)
- }
- if !reflect.DeepEqual(local, localExp) {
- t.Errorf("Data mismatch for object %v instead of %v",
- local, localExp)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestDiffGenVectors tests diffing gen vectors.
-func TestDiffGenVectors(t *testing.T) {
- logOrder := []DeviceID{"VeyronTab", "VeyronPhone", "VeyronDesktop", "VeyronLaptop"}
- var expGens []*genOrder
-
- // set reclaimVec such that it doesn't affect diffs.
- reclaimVec := GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 0,
- "VeyronDesktop": 0,
- "VeyronLaptop": 0,
- }
-
- // src and dest are identical vectors.
- vec := GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
- setupAndTestDiff(t, vec, vec, reclaimVec, logOrder, expGens)
-
- // src has no updates.
- srcVec := GenVector{
- "VeyronTab": 0,
- }
- remoteVec := GenVector{
- "VeyronTab": 5,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 8,
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, []DeviceID{}, expGens)
-
- // src and remote have no updates.
- srcVec = GenVector{
- "VeyronTab": 0,
- }
- remoteVec = GenVector{
- "VeyronTab": 0,
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, []DeviceID{}, expGens)
-
- // set reclaimVec such that it doesn't affect diffs.
- reclaimVec = GenVector{
- "VeyronTab": 0,
- }
-
- // src is staler than remote.
- srcVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
- remoteVec = GenVector{
- "VeyronTab": 5,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 8,
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, logOrder, expGens)
-
- // src is fresher than remote.
- srcVec = GenVector{
- "VeyronTab": 5,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
- remoteVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- }
- expGens = make([]*genOrder, 4)
- for i := 0; i < 4; i++ {
- expGens[i] = &genOrder{
- devID: "VeyronTab",
- genID: GenID(i + 2),
- order: uint32(i + 1),
- }
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, logOrder, expGens)
-
- // src is fresher than remote in all but one device.
- srcVec = GenVector{
- "VeyronTab": 5,
- "VeyronPhone": 10,
- "VeyronDesktop": 22,
- "VeyronLaptop": 2,
- }
- remoteVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 10,
- "VeyronDesktop": 20,
- "VeyronLaptop": 2,
- "VeyronCloud": 40,
- }
- expGens = make([]*genOrder, 6)
- for i := 0; i < 6; i++ {
- switch {
- case i < 4:
- expGens[i] = &genOrder{
- devID: "VeyronTab",
- genID: GenID(i + 2),
- order: uint32(i + 1),
- }
- default:
- expGens[i] = &genOrder{
- devID: "VeyronDesktop",
- genID: GenID(i - 4 + 21),
- order: uint32(i - 4 + 35),
- }
- }
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, logOrder, expGens)
-
- // src is fresher than dest, scramble log order.
- o := []DeviceID{"VeyronTab", "VeyronLaptop", "VeyronPhone", "VeyronDesktop"}
- srcVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 2,
- "VeyronDesktop": 3,
- "VeyronLaptop": 4,
- }
- remoteVec = GenVector{
- "VeyronTab": 0,
- "VeyronPhone": 2,
- "VeyronDesktop": 0,
- }
- expGens = make([]*genOrder, 8)
- for i := 0; i < 8; i++ {
- switch {
- case i < 1:
- expGens[i] = &genOrder{
- devID: "VeyronTab",
- genID: GenID(i + 1),
- order: uint32(i),
- }
- case i >= 1 && i < 5:
- expGens[i] = &genOrder{
- devID: "VeyronLaptop",
- genID: GenID(i),
- order: uint32(i),
- }
- default:
- expGens[i] = &genOrder{
- devID: "VeyronDesktop",
- genID: GenID(i - 4),
- order: uint32(i - 5 + 7),
- }
- }
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, o, expGens)
-
- // remote has no updates.
- srcVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 2,
- "VeyronDesktop": 3,
- "VeyronLaptop": 4,
- }
- remoteVec = GenVector{
- "VeyronPhone": 0,
- }
- expGens = make([]*genOrder, 10)
- for i := 0; i < 10; i++ {
- switch {
- case i < 1:
- expGens[i] = &genOrder{
- devID: "VeyronTab",
- genID: GenID(i + 1),
- order: uint32(i),
- }
- case i >= 1 && i < 3:
- expGens[i] = &genOrder{
- devID: "VeyronPhone",
- genID: GenID(i),
- order: uint32(i),
- }
- case i >= 3 && i < 6:
- expGens[i] = &genOrder{
- devID: "VeyronDesktop",
- genID: GenID(i - 2),
- order: uint32(i),
- }
- default:
- expGens[i] = &genOrder{
- devID: "VeyronLaptop",
- genID: GenID(i - 5),
- order: uint32(i),
- }
- }
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, logOrder, expGens)
-
- // Test with reclaimVec fast-fwded.
- reclaimVec = GenVector{
- "VeyronPhone": 1,
- "VeyronLaptop": 2,
- }
- srcVec = GenVector{
- "VeyronTab": 1,
- "VeyronPhone": 2,
- "VeyronDesktop": 3,
- "VeyronLaptop": 4,
- }
- remoteVec = GenVector{
- "VeyronPhone": 0,
- }
- expGens = make([]*genOrder, 7)
- for i := 0; i < 7; i++ {
- switch {
- case i < 1:
- expGens[i] = &genOrder{
- devID: "VeyronTab",
- genID: GenID(i + 1),
- order: uint32(i),
- }
- case i == 1:
- expGens[i] = &genOrder{
- devID: "VeyronPhone",
- genID: GenID(i + 1),
- order: uint32(i + 1),
- }
- case i >= 2 && i < 5:
- expGens[i] = &genOrder{
- devID: "VeyronDesktop",
- genID: GenID(i - 1),
- order: uint32(i + 1),
- }
- default:
- expGens[i] = &genOrder{
- devID: "VeyronLaptop",
- genID: GenID(i - 2),
- order: uint32(i + 3),
- }
- }
- }
- setupAndTestDiff(t, srcVec, remoteVec, reclaimVec, logOrder, expGens)
-}
-
-// setupAndTestDiff is an utility function to test diffing generation vectors.
-func setupAndTestDiff(t *testing.T, srcVec, remoteVec, reclaimVec GenVector, logOrder []DeviceID, expGens []*genOrder) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- logfile := getFileName()
- defer os.Remove(logfile)
-
- var srcid DeviceID = "VeyronTab"
- var destid DeviceID = "VeyronPhone"
-
- var err error
- s := &syncd{id: srcid}
- s.log, err = openILog(logfile, s)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
- dtab.head.ReclaimVec = reclaimVec
-
- // Populate generations in log order.
- var order uint32
- for _, k := range logOrder {
- v, ok := (srcVec)[k]
- if !ok {
- t.Errorf("Cannot find key %s in srcVec %v", k, srcVec)
- }
- for i := GenID(1); i <= v; i++ {
- val := &genMetadata{Pos: order}
- if err := dtab.s.log.putGenMetadata(k, i, val); err != nil {
- t.Errorf("Cannot put object %s:%d in log file %s, err %v", k, v, logfile, err)
- }
- order++
- }
- }
- gens, err := dtab.diffGenVectors(srcVec, remoteVec)
- if err != nil {
- t.Fatalf("DiffGenVectors() failed src: %s %v dest: %s %v in devTable file %s, err %v",
- srcid, srcVec, destid, remoteVec, devfile, err)
- }
-
- if !reflect.DeepEqual(gens, expGens) {
- t.Fatalf("Data mismatch for genorder %v instead of %v, src %v dest %v reclaim %v",
- gens, expGens, srcVec, remoteVec, reclaimVec)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestGetOldestGen tests obtaining generations from reclaimVec.
-func TestGetOldestGen(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- var srcid DeviceID = "VeyronTab"
- s := &syncd{id: srcid}
- var err error
- s.devtab, err = openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- if s.devtab.getOldestGen(srcid) != 0 {
- t.Errorf("Cannot get generation for device %s in devTable file %s",
- srcid, devfile)
- }
-
- var destid DeviceID = "VeyronPhone"
- if s.devtab.getOldestGen(destid) != 0 {
- t.Errorf("Cannot get generation for device %s in devTable file %s",
- destid, devfile)
- }
-
- s.devtab.head.ReclaimVec[srcid] = 10
- if s.devtab.getOldestGen(srcid) != 10 {
- t.Errorf("Cannot get generation for device %s in devTable file %s",
- srcid, devfile)
- }
- if s.devtab.getOldestGen(destid) != 0 {
- t.Errorf("Cannot get generation for device %s in devTable file %s",
- destid, devfile)
- }
-
- if err := s.devtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestComputeReclaimVector tests reclaim vector computation.
-func TestComputeReclaimVector(t *testing.T) {
- devArr := []DeviceID{"VeyronTab", "VeyronPhone", "VeyronDesktop", "VeyronLaptop"}
- genVecArr := make([]GenVector, 4)
-
- // All devices are up-to-date.
- genVecArr[0] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[1] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[2] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[3] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- setupAndTestReclaimVector(t, devArr, genVecArr, nil, genVecArr[0])
-
- // Every device is missing at least one other device. Not possible to gc.
- genVecArr[0] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[1] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronLaptop": 4}
- genVecArr[2] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3}
- genVecArr[3] = GenVector{"VeyronDesktop": 3, "VeyronLaptop": 4}
- expReclaimVec := GenVector{"VeyronTab": 0, "VeyronPhone": 0, "VeyronDesktop": 0, "VeyronLaptop": 0}
- setupAndTestReclaimVector(t, devArr, genVecArr, nil, expReclaimVec)
-
- // All devices know at least one generation from other devices.
- genVecArr[0] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[1] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 2, "VeyronLaptop": 2}
- genVecArr[2] = GenVector{"VeyronTab": 1, "VeyronPhone": 1, "VeyronDesktop": 3, "VeyronLaptop": 1}
- genVecArr[3] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 1, "VeyronLaptop": 4}
- expReclaimVec = GenVector{"VeyronTab": 1, "VeyronPhone": 1, "VeyronDesktop": 1, "VeyronLaptop": 1}
- setupAndTestReclaimVector(t, devArr, genVecArr, nil, expReclaimVec)
-
- // One device is missing from one other device.
- genVecArr[0] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[1] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 2}
- genVecArr[2] = GenVector{"VeyronTab": 1, "VeyronPhone": 1, "VeyronDesktop": 3, "VeyronLaptop": 1}
- genVecArr[3] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 1, "VeyronLaptop": 4}
- expReclaimVec = GenVector{"VeyronTab": 1, "VeyronPhone": 1, "VeyronDesktop": 1, "VeyronLaptop": 0}
- setupAndTestReclaimVector(t, devArr, genVecArr, nil, expReclaimVec)
-
- // All devices know at least "n" generations from other devices.
- var n GenID = 10
- genVecArr[0] = GenVector{"VeyronTab": n + 10, "VeyronPhone": n,
- "VeyronDesktop": n + 8, "VeyronLaptop": n + 4}
- genVecArr[1] = GenVector{"VeyronTab": n + 6, "VeyronPhone": n + 10,
- "VeyronDesktop": n, "VeyronLaptop": n + 3}
- genVecArr[2] = GenVector{"VeyronTab": n, "VeyronPhone": n + 2,
- "VeyronDesktop": n + 10, "VeyronLaptop": n}
- genVecArr[3] = GenVector{"VeyronTab": n + 7, "VeyronPhone": n + 1,
- "VeyronDesktop": n + 5, "VeyronLaptop": n + 10}
- expReclaimVec = GenVector{"VeyronTab": n, "VeyronPhone": n, "VeyronDesktop": n, "VeyronLaptop": n}
- setupAndTestReclaimVector(t, devArr, genVecArr, nil, expReclaimVec)
-
- // Never contacted a device.
- devArr = []DeviceID{"VeyronTab", "VeyronDesktop", "VeyronLaptop"}
- genVecArr[0] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[1] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[2] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- expReclaimVec = GenVector{"VeyronTab": 0, "VeyronPhone": 0, "VeyronDesktop": 0, "VeyronLaptop": 0}
- setupAndTestReclaimVector(t, devArr, genVecArr, nil, expReclaimVec)
-
- // Start from existing reclaim vector.
- devArr = []DeviceID{"VeyronTab", "VeyronPhone", "VeyronDesktop", "VeyronLaptop"}
- reclaimVec := GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 3, "VeyronLaptop": 4}
- genVecArr[0] = GenVector{"VeyronTab": 6, "VeyronPhone": 6, "VeyronDesktop": 6, "VeyronLaptop": 6}
- genVecArr[1] = GenVector{"VeyronTab": 6, "VeyronPhone": 6, "VeyronDesktop": 3, "VeyronLaptop": 6}
- genVecArr[2] = GenVector{"VeyronTab": 6, "VeyronPhone": 6, "VeyronDesktop": 6, "VeyronLaptop": 4}
- genVecArr[3] = GenVector{"VeyronTab": 1, "VeyronPhone": 2, "VeyronDesktop": 6, "VeyronLaptop": 6}
-
- setupAndTestReclaimVector(t, devArr, genVecArr, reclaimVec, reclaimVec)
-}
-
-// setupAndTestReclaimVector is an utility function to test reclaim vector computation.
-func setupAndTestReclaimVector(t *testing.T, devArr []DeviceID, genVecArr []GenVector, reclaimStart, expReclaimVec GenVector) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronTab"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
- if reclaimStart != nil {
- dtab.head.ReclaimVec = reclaimStart
- }
-
- for i := range devArr {
- if err := dtab.putGenVec(devArr[i], genVecArr[i]); err != nil {
- t.Errorf("Cannot put object %s (%v) in devTable file %s, err %v",
- devArr[i], genVecArr[i], devfile, err)
- }
- }
-
- reclaimVec, err := dtab.computeReclaimVector()
- if err != nil {
- t.Fatalf("computeReclaimVector() failed devices: %v, vectors: %v in devTable file %s, err %v",
- devArr, genVecArr, devfile, err)
- }
-
- if !reflect.DeepEqual(reclaimVec, expReclaimVec) {
- t.Fatalf("Data mismatch for reclaimVec %v instead of %v",
- reclaimVec, expReclaimVec)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestAddDevice tests adding a device to the devTable.
-func TestAddDevice(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- var dev DeviceID = "VeyronLaptop"
- if err := dtab.addDevice(dev); err != nil {
- t.Fatalf("Cannot add new device in devTable file %s, err %v", devfile, err)
- }
-
- vec, err := dtab.getGenVec(dev)
- if err != nil || vec == nil {
- t.Fatalf("GetGenVec() can not find object %s in devTable file %s, err %v",
- dev, devfile, err)
- }
- expVec := GenVector{dev: 0}
- if !reflect.DeepEqual(vec, expVec) {
- t.Errorf("Data mismatch for object %s in devTable file %s: %v instead of %v",
- dev, devfile, vec, expVec)
- }
-
- vec, err = dtab.getGenVec(dtab.s.id)
- if err != nil || vec == nil {
- t.Fatalf("GetGenVec() can not find object %s in devTable file %s, err %v",
- dtab.s.id, devfile, err)
- }
- expVec = GenVector{dtab.s.id: 0, dev: 0}
- if !reflect.DeepEqual(vec, expVec) {
- t.Errorf("Data mismatch for object %s in devTable file %s: %v instead of %v",
- dtab.s.id, devfile, vec, expVec)
- }
-
- expVec = GenVector{dtab.s.id: 10, "VeyronDesktop": 40, dev: 80}
- if err := dtab.putGenVec(dtab.s.id, expVec); err != nil {
- t.Fatalf("PutGenVec() can not put object %s in devTable file %s, err %v",
- dtab.s.id, devfile, err)
- }
- dev = "VeyronTab"
- if err := dtab.addDevice(dev); err != nil {
- t.Fatalf("Cannot add new device in devTable file %s, err %v", devfile, err)
- }
- expVec[dev] = 0
-
- vec, err = dtab.getGenVec(dtab.s.id)
- if err != nil || vec == nil {
- t.Fatalf("GetGenVec() can not find object %s in devTable file %s, err %v",
- dtab.s.id, devfile, err)
- }
- if !reflect.DeepEqual(vec, expVec) {
- t.Errorf("Data mismatch for object %s in devTable file %s: %v instead of %v",
- dtab.s.id, devfile, vec, expVec)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestUpdateReclaimVec tests updating the reclaim vector.
-func TestUpdateReclaimVec(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- minGens := GenVector{"VeyronTab": 1, "VeyronDesktop": 3, "VeyronLaptop": 4}
- if err := dtab.updateReclaimVec(minGens); err != nil {
- t.Fatalf("Cannot update reclaimvec in devTable file %s, err %v", devfile, err)
- }
- expVec := GenVector{dtab.s.id: 0, "VeyronTab": 0, "VeyronDesktop": 2, "VeyronLaptop": 3}
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- dtab.head.ReclaimVec[DeviceID("VeyronTab")] = 4
- minGens = GenVector{"VeyronTab": 1}
- if err := dtab.updateReclaimVec(minGens); err == nil {
- t.Fatalf("Update reclaimvec didn't fail in devTable file %s", devfile)
- }
-
- minGens = GenVector{"VeyronTab": 5}
- if err := dtab.updateReclaimVec(minGens); err != nil {
- t.Fatalf("Cannot update reclaimvec in devTable file %s, err %v", devfile, err)
- }
- expVec = GenVector{dtab.s.id: 0, "VeyronTab": 4, "VeyronDesktop": 2, "VeyronLaptop": 3}
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
-
-// TestDevTableCompact tests compacting of devtable's kvdb file.
-func TestDevTableCompact(t *testing.T) {
- devfile := getFileName()
- defer os.Remove(devfile)
-
- s := &syncd{id: "VeyronPhone"}
- dtab, err := openDevTable(devfile, s)
- if err != nil {
- t.Fatalf("Cannot open new devTable file %s, err %v", devfile, err)
- }
-
- // Put some data in "devices" table.
- devices := []DeviceID{"Phone", "Tab", "Laptop", "Desktop"}
- for i, dev := range devices {
- v := GenVector{}
- for j, d := range devices {
- v[d] = GenID(i*100 + j)
- }
- if err := dtab.putGenVec(dev, v); err != nil {
- t.Errorf("Cannot put object %s (%v) in devTable file %s, err %v", dev, v, devfile, err)
- }
- if err := dtab.flush(); err != nil {
- t.Errorf("Cannot flush devTable file %s, err %v", devfile, err)
- }
- }
-
- // Put some data in "header" table.
- expMark := []byte{1, 2, 3}
- expVec := GenVector{
- "VeyronTab": 30,
- "VeyronPhone": 10,
- }
- dtab.head = &devTableHeader{
- Resmark: expMark,
- ReclaimVec: expVec,
- }
- if err := dtab.flush(); err != nil {
- t.Errorf("Cannot flush devTable file %s, err %v", devfile, err)
- }
-
- // Get size before compaction.
- oldSize := getFileSize(devfile)
- if oldSize < 0 {
- t.Fatalf("DevTable file %s not created", devfile)
- }
-
- if err := dtab.compact(); err != nil {
- t.Errorf("Cannot compact devTable file %s, err %v", devfile, err)
- }
-
- // Verify size of kvdb file is reduced.
- size := getFileSize(devfile)
- if size < 0 {
- t.Fatalf("DevTable file %s not created", devfile)
- }
- if size > oldSize {
- t.Fatalf("DevTable file %s not compacted", devfile)
- }
-
- // Check data exists after compaction.
- for i, dev := range devices {
- exp := GenVector{}
- for j, d := range devices {
- exp[d] = GenID(i*100 + j)
- }
- curVec, err := dtab.getGenVec(dev)
- if err != nil || curVec == nil {
- t.Fatalf("GetGenVec() can not find object %s in devTable file %s, err %v",
- dev, devfile, err)
- }
- if !reflect.DeepEqual(exp, curVec) {
- t.Errorf("Data mismatch for object %s in devTable file %s: %v instead of %v",
- dev, devfile, curVec, exp)
- }
- }
-
- dtab.head.Resmark = nil
- dtab.head.ReclaimVec = GenVector{}
- if err := dtab.getHead(); err != nil {
- t.Fatalf("getHead() can not find head in devTable file %s, err %v", devfile, err)
- }
- if !reflect.DeepEqual(dtab.head.Resmark, expMark) {
- t.Errorf("Data mismatch for resmark in devTable file %s: %v instead of %v",
- devfile, dtab.head.Resmark, expMark)
- }
- if !reflect.DeepEqual(dtab.head.ReclaimVec, expVec) {
- t.Errorf("Data mismatch for reclaimVec in devTable file %s: %v instead of %v",
- devfile, dtab.head.ReclaimVec, expVec)
- }
-
- if err := dtab.close(); err != nil {
- t.Errorf("Cannot close devTable file %s, err %v", devfile, err)
- }
-}
diff --git a/runtimes/google/vsync/doc.go b/runtimes/google/vsync/doc.go
deleted file mode 100644
index 4fd8a17..0000000
--- a/runtimes/google/vsync/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// Package vsync provides a cross-device replication and synchronization
-// service for data in the Veyron Store. See document below:
-// https://docs.google.com/a/google.com/document/d/16ioEAI65GV9-pL8M9bnVgR21xQgQ3ioVQrWXKWqqQMk/
-package vsync
diff --git a/runtimes/google/vsync/gc.go b/runtimes/google/vsync/gc.go
deleted file mode 100644
index dedd575..0000000
--- a/runtimes/google/vsync/gc.go
+++ /dev/null
@@ -1,516 +0,0 @@
-package vsync
-
-// Garbage collection (GC) in sync reclaims space occupied by sync's
-// data structures when possible. For its operation, sync keeps every
-// version of every object produced locally and remotely in its dag
-// and log data structures. Keeping these versions indefinitely is not
-// feasible given space constraints on any device. Thus to reclaim
-// space, a GC thread periodically checks to see if any state can be
-// deleted. GC looks for generations that every device in the system
-// already knows about and deletes state belonging to those
-// generations. Since every device in the system already knows about
-// these generations, it is safe to delete them. Newly added devices
-// will only get state starting from the generations not yet
-// reclaimed. Policies are needed to handle devices that were part of
-// the system, but are no longer available. Such devices will prevent
-// GC from moving forward since they will not request new generations.
-//
-// GC in sync happens in 3 stages:
-// ** reclamation phase
-// ** object pruning phase
-// ** online consistency check phase
-//
-// Reclamation phase: GC learns of the state of the other devices when
-// it talks to those devices to obtain missing updates
-// (initiator). The generation vectors of these devices are stored in
-// the device table. In the reclamation phase, we go through the
-// generation vectors of all the devices and compute the maximum
-// generation of each device known to every other device. This maximum
-// generation for each device is stored in the reclaim generation
-// vector. We then iterate through each generation between the old
-// reclaim vector to the new reclaim vector, and create for each
-// object belonging to those generations, the history of versions that
-// will be reclaimed and the most recent version that can be
-// reclaimed.
-//
-// Object pruning phase: In this phase, for an object marked for
-// reclamation, we prune its dag starting from the most recent version
-// that is being reclaimed and delete all the versions that are
-// older. As we prune the dag, we also delete the corresponding log
-// records and update the generation metadata. Note that since the
-// actual deletes proceed object by object, the generations will start
-// to have missing log records, and we use the generation metadata to
-// ensure that the generation deletes are tracked accurately. Thus,
-// the decision of what to prune happens top down using generation
-// information, while the actual pruning happens bottom up from the
-// dag. Pruning bottom up ensures that the object dags are consistent.
-//
-// Online consistency check phase: GC stages need write access to the
-// sync data structures since they perform delete operations. Hence,
-// GC is executed under a write lock and excludes other goroutines in
-// syncd. In order to control the impact of GC on foreground
-// operations, GC is designed to be incremental in its execution. Once
-// objects are marked for deletion, only a small batch of objects are
-// pruned and persisted and the lock is released. Thus objects are
-// incrementally deleted, a small batch every
-// garbageCollectInterval. To persist the changes from a round of GC,
-// we immediately persist the new reclaim vector. For the batch of
-// objects gc'ed in a round, we also persist their deletions. However,
-// if the system restarts or crashes when all the dirty objects from a
-// round of GC are not processed, there will be state from generations
-// older than the reclaim vector still persisted in kvdb. Since the
-// reclaim vector has already been advanced, this state cannot be
-// detected, resulting in leakage of space. To prevent this, we could
-// have persisted the GC state to support restartability. However, to
-// keep GC light weight, we went with the approach of not persisting
-// the transient GC state but lazily performing a consistency check on
-// kvdb to detect dangling records. Online consistency check phase
-// performs this checking. It checks every generation older than the
-// reclaim vector snapshotted at bootstrap to see if it has any state
-// left over in kvdb. If it finds dangling state, it marks the
-// corresponding objects as dirty for pruning. This consistency check
-// happens only once upon reboot. Once all generations lower than the
-// reclaim vector snapshot are verified, this phase is a noop. Once
-// again, to limit the overhead of this phase, it processes only a
-// small batch of generations in each round of GC invocation.
-//
-// Finally, the underlying kvdb store persists state by writing to a
-// log file upon flush. Thus, as we continue to flush to kvdb, the log
-// file will keep growing. In addition, upon restart, this log file
-// must be processed to reconstruct the kvdb state. To keep this log
-// file from becoming large, we need to periodically compact kvdb.
-import (
- "errors"
- "fmt"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/storage"
- "veyron2/vlog"
-)
-
-var (
- // garbage collection (space reclamation) is invoked every
- // garbageCollectInterval.
- garbageCollectInterval = 3600 * time.Second
-
- // strictCheck when enabled performs strict checking of every
- // log record being deleted to confirm that it should be in
- // fact deleted.
- // TODO(hpucha): Support strictCheck in the presence
- // of Link log records.
- strictCheck = false
-
- // Every compactCount iterations of garbage collection, kvdb
- // is compacted. This value has performance implications as
- // kvdb compaction is expensive.
- compactCount = 100
-
- // Batch size for the number of objects that are garbage
- // collected every gc iteration. This value impacts the
- // amount of time gc is running. GC holds a write lock on the
- // data structures, blocking out all other operations in the
- // system while it is running.
- objBatchSize = 20
-
- // Batch size for the number of generations that are verified
- // every gc iteration.
- genBatchSize = 100
-
- // Errors.
- errBadMetadata = errors.New("bad metadata")
-)
-
-// objGCState tracks the per-object GC state.
-// "version" is the most recent version of the object that can be
-// pruned (and hence all older versions can be pruned as well).
-//
-// "pos" is used to compute the most recent version of the object
-// among all the versions that can be pruned (version with the highest
-// pos is the most recent). "version" of the object belongs to a
-// generation. "pos" is the position of that generation in the local
-// log.
-type objGCState struct {
- version raw.Version
- pos uint32
-}
-
-// objVersHist tracks all the versions of the object that need to be
-// gc'ed when strictCheck is enabled.
-type objVersHist struct {
- versions map[raw.Version]struct{}
-}
-
-// syncGC contains the metadata and state for the Sync GC thread.
-type syncGC struct {
- // syncd is a pointer to the Syncd instance owning this GC.
- syncd *syncd
-
- // checkConsistency controls whether online consistency checks are run.
- checkConsistency bool
-
- // reclaimSnap is the snapshot of the reclaim vector at startup.
- reclaimSnap GenVector
-
- // pruneObjects holds the per-object state for garbage collection.
- pruneObjects map[storage.ID]*objGCState
-
- // verifyPruneMap holds the per-object version history to verify GC operations
- // on an object. It is used when strictCheck is enabled.
- verifyPruneMap map[storage.ID]*objVersHist
-}
-
-// newGC creates a new syncGC instance attached to the given Syncd instance.
-func newGC(syncd *syncd) *syncGC {
- g := &syncGC{
- syncd: syncd,
- checkConsistency: true,
- reclaimSnap: GenVector{},
- pruneObjects: make(map[storage.ID]*objGCState),
- }
-
- if strictCheck {
- g.verifyPruneMap = make(map[storage.ID]*objVersHist)
- }
-
- // Take a snapshot (copy) of the reclaim vector at startup.
- for dev, gnum := range syncd.devtab.head.ReclaimVec {
- g.reclaimSnap[dev] = gnum
- }
-
- return g
-}
-
-// garbageCollect wakes up every garbageCollectInterval to check if it
-// can reclaim space.
-func (g *syncGC) garbageCollect() {
- gcIters := 0
- ticker := time.NewTicker(garbageCollectInterval)
- for {
- select {
- case <-g.syncd.closed:
- ticker.Stop()
- g.syncd.pending.Done()
- return
- case <-ticker.C:
- gcIters++
- if gcIters == compactCount {
- gcIters = 0
- g.doGC(true)
- } else {
- g.doGC(false)
- }
- }
- }
-}
-
-// doGC performs the actual garbage collection steps.
-// If "compact" is true, also compact the Sync DBs.
-func (g *syncGC) doGC(compact bool) {
- vlog.VI(1).Infof("doGC:: Started at %v", time.Now().UTC())
-
- g.syncd.lock.Lock()
- defer g.syncd.lock.Unlock()
-
- if err := g.onlineConsistencyCheck(); err != nil {
- vlog.Fatalf("onlineConsistencyCheck:: failed with err %v", err)
- }
-
- if err := g.reclaimSpace(); err != nil {
- vlog.Fatalf("reclaimSpace:: failed with err %v", err)
- }
- // TODO(hpucha): flush devtable state.
-
- if err := g.pruneObjectBatch(); err != nil {
- vlog.Fatalf("pruneObjectBatch:: failed with err %v", err)
- }
- // TODO(hpucha): flush log and dag state.
-
- if compact {
- if err := g.compactDB(); err != nil {
- vlog.Fatalf("compactDB:: failed with err %v", err)
- }
- }
-}
-
-// onlineConsistencyCheck checks if generations lower than the
-// ReclaimVec (snapshotted at startup) are deleted from the log and
-// dag data structures. It is needed to prevent space leaks when the
-// system crashes while pruning a batch of objects. GC state is not
-// aggressively persisted to make it efficient. Instead, upon reboot,
-// onlineConsistencyCheck executes incrementally checking all the
-// generations lower than the ReclaimVec snap to ensure that they are
-// deleted. Each iteration of onlineConsistencyCheck checks a small
-// batch of generations. Once all generations below the ReclaimVec
-// snap are verified once, onlineConsistencyCheck is a noop.
-func (g *syncGC) onlineConsistencyCheck() error {
- vlog.VI(1).Infof("onlineConsistencyCheck:: called with %v", g.checkConsistency)
- if !g.checkConsistency {
- return nil
- }
-
- vlog.VI(2).Infof("onlineConsistencyCheck:: reclaimSnap is %v", g.reclaimSnap)
- genCount := 0
- for dev, gen := range g.reclaimSnap {
- if gen == 0 {
- continue
- }
- for i := gen; i > 0; i-- {
- if genCount == genBatchSize {
- g.reclaimSnap[dev] = i
- return nil
- }
- if g.syncd.log.hasGenMetadata(dev, i) {
- if err := g.garbageCollectGeneration(dev, i); err != nil {
- return err
- }
- }
-
- genCount++
- }
- g.reclaimSnap[dev] = 0
- }
-
- // Done with all the generations of all devices. Consistency
- // check is no longer needed.
- g.checkConsistency = false
- vlog.VI(1).Infof("onlineConsistencyCheck:: exited with %v", g.checkConsistency)
- return nil
-}
-
-// garbageCollectGeneration garbage collects any existing log records
-// for a particular generation.
-func (g *syncGC) garbageCollectGeneration(devid DeviceID, gnum GenID) error {
- vlog.VI(2).Infof("garbageCollectGeneration:: processing generation %s:%d", devid, gnum)
- // Bootstrap generation for a device. Nothing to GC.
- if gnum == 0 {
- return nil
- }
- gen, err := g.syncd.log.getGenMetadata(devid, gnum)
- if err != nil {
- return err
- }
- if gen.Count <= 0 {
- return errBadMetadata
- }
-
- var count uint64
- // Check for log records for this generation.
- for l := LSN(0); l <= gen.MaxLSN; l++ {
- if !g.syncd.log.hasLogRec(devid, gnum, l) {
- continue
- }
-
- count++
- rec, err := g.syncd.log.getLogRec(devid, gnum, l)
- if err != nil {
- return err
- }
-
- if rec.RecType == LinkRec {
- // For a link log record, gc it right away.
- g.dagPruneCallBack(logRecKey(devid, gnum, l))
- continue
- }
-
- // Insert the object in this log record to the prune
- // map if needed.
- // If this object does not exist, create it.
- // If the object exists, update the object version to
- // prune at if the current gen is greater than the gen
- // in the prune map or if the gen is the same but the
- // current lsn is greater than the previous lsn.
- gcState, ok := g.pruneObjects[rec.ObjID]
- if !ok || gcState.pos <= gen.Pos {
- if !ok {
- gcState = &objGCState{}
- g.pruneObjects[rec.ObjID] = gcState
- }
- gcState.pos = gen.Pos
- gcState.version = rec.CurVers
- vlog.VI(2).Infof("Replacing for obj %v pos %d version %d",
- rec.ObjID, gcState.pos, gcState.version)
- }
-
- // When strictCheck is enabled, track object's version
- // history so that we can check against the versions
- // being deleted.
- if strictCheck {
- objHist, ok := g.verifyPruneMap[rec.ObjID]
- if !ok {
- objHist = &objVersHist{
- versions: make(map[raw.Version]struct{}),
- }
- g.verifyPruneMap[rec.ObjID] = objHist
- }
- // Add this version to the versions that need to be pruned.
- objHist.versions[rec.CurVers] = struct{}{}
- }
- }
-
- if count != gen.Count {
- return errors.New("incorrect number of log records")
- }
-
- return nil
-}
-
-// reclaimSpace performs periodic space reclamation by deleting
-// generations known to all devices.
-//
-// Approach: For each device in the system, we compute its maximum
-// generation known to all the other devices in the system. We then
-// delete all log and dag records below this generation. This is a
-// O(N^2) algorithm where N is the number of devices in the system.
-func (g *syncGC) reclaimSpace() error {
- newReclaimVec, err := g.syncd.devtab.computeReclaimVector()
- if err != nil {
- return err
- }
-
- vlog.VI(1).Infof("reclaimSpace:: reclaimVectors: new %v old %v",
- newReclaimVec, g.syncd.devtab.head.ReclaimVec)
- // Clean up generations from reclaimVec+1 to newReclaimVec.
- for dev, high := range newReclaimVec {
- low := g.syncd.devtab.getOldestGen(dev)
-
- // Garbage collect from low+1 to high.
- for i := GenID(low + 1); i <= high; i++ {
- if err := g.garbageCollectGeneration(dev, i); err != nil {
- return err
- }
- }
- }
-
- // Update reclaimVec.
- g.syncd.devtab.head.ReclaimVec = newReclaimVec
- return nil
-}
-
-// pruneObjectBatch processes a batch of objects to be pruned from log
-// and dag.
-func (g *syncGC) pruneObjectBatch() error {
- vlog.VI(1).Infof("pruneObjectBatch:: Called at %v", time.Now().UTC())
- count := 0
- for obj, gcState := range g.pruneObjects {
- if count == objBatchSize {
- return nil
- }
- vlog.VI(1).Infof("pruneObjectBatch:: Pruning obj %v at version %v", obj, gcState.version)
- // Call dag prune on this object.
- if err := g.syncd.dag.prune(obj, gcState.version, g.dagPruneCallBack); err != nil {
- return err
- }
-
- if strictCheck {
- // Ensure that all but one version in the object version history are gc'ed.
- objHist, ok := g.verifyPruneMap[obj]
- if !ok {
- return fmt.Errorf("missing object in verification map %v", obj)
- }
- if len(objHist.versions) != 1 {
- return fmt.Errorf("leftover/no versions %d", len(objHist.versions))
- }
- for v, _ := range objHist.versions {
- if v != gcState.version {
- return fmt.Errorf("leftover version %d %v", v, obj)
- }
- }
- }
-
- delete(g.pruneObjects, obj)
- count++
- }
-
- return (g.syncd.dag.pruneDone())
-}
-
-// dagPruneCallBack deletes the log record associated with the dag
-// node being pruned and updates the generation metadata for the
-// generation that this log record belongs to.
-func (g *syncGC) dagPruneCallBack(logKey string) error {
- dev, gnum, lsn, err := splitLogRecKey(logKey)
- if err != nil {
- return err
- }
- vlog.VI(2).Infof("dagPruneCallBack:: called for key %s (%s %d %d)", logKey, dev, gnum, lsn)
-
- // Check if the log record being deleted is correct as per GC state.
- oldestGen := g.syncd.devtab.getOldestGen(dev)
- if gnum > oldestGen {
- vlog.VI(2).Infof("gnum is %d oldest is %d", gnum, oldestGen)
- return errors.New("deleting incorrect log record")
- }
-
- if !g.syncd.log.hasLogRec(dev, gnum, lsn) {
- return errors.New("missing log record")
- }
-
- if strictCheck {
- rec, err := g.syncd.log.getLogRec(dev, gnum, lsn)
- if err != nil {
- return err
- }
- if rec.RecType == NodeRec {
- objHist, ok := g.verifyPruneMap[rec.ObjID]
- if !ok {
- return errors.New("obj not found in verifyMap")
- }
- _, found := objHist.versions[rec.CurVers]
- // If online consistency check is in progress, we
- // cannot strictly verify all the versions to be
- // deleted, and we ignore the failure to find a
- // version.
- if found {
- delete(objHist.versions, rec.CurVers)
- } else if !g.checkConsistency {
- return errors.New("verification failed")
- }
- }
- }
-
- if err := g.syncd.log.delLogRec(dev, gnum, lsn); err != nil {
- return err
- }
-
- // Update generation metadata.
- gen, err := g.syncd.log.getGenMetadata(dev, gnum)
- if err != nil {
- return err
- }
- if gen.Count <= 0 {
- return errBadMetadata
- }
- gen.Count--
- if gen.Count == 0 {
- if err := g.syncd.log.delGenMetadata(dev, gnum); err != nil {
- return err
- }
- } else {
- if err := g.syncd.log.putGenMetadata(dev, gnum, gen); err != nil {
- return err
- }
- }
- return nil
-}
-
-// compactDB compacts the underlying kvdb store for all data structures.
-func (g *syncGC) compactDB() error {
- vlog.VI(1).Infof("compactDB:: Compacting DBs")
- if err := g.syncd.log.compact(); err != nil {
- return err
- }
- if err := g.syncd.devtab.compact(); err != nil {
- return err
- }
- if err := g.syncd.dag.compact(); err != nil {
- return err
- }
- return nil
-}
-
-// deleteDevice takes care of permanently deleting a device from its sync peers.
-// TODO(hpucha): to be implemented.
-func (g *syncGC) deleteDevice() {
-}
diff --git a/runtimes/google/vsync/gc_test.go b/runtimes/google/vsync/gc_test.go
deleted file mode 100644
index a0d2643..0000000
--- a/runtimes/google/vsync/gc_test.go
+++ /dev/null
@@ -1,1064 +0,0 @@
-package vsync
-
-// Tests for sync garbage collection.
-import (
- "os"
- "reflect"
- "testing"
-
- _ "veyron/lib/testutil"
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// TestGCOnlineConsistencyCheck tests the online consistency check in GC.
-func TestGCOnlineConsistencyCheck(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
-
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- // No objects should be marked for GC.
- if s.hdlGC.checkConsistency {
- t.Errorf("onlineConsistencyCheck didn't finish in test %s", testFile)
- }
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("onlineConsistencyCheck created unexpected objects in test %s, map: %v",
- testFile, s.hdlGC.pruneObjects)
- }
- if strictCheck && len(s.hdlGC.verifyPruneMap) > 0 {
- t.Errorf("onlineConsistencyCheck created unexpected objects in test %s, map: %v",
- testFile, s.hdlGC.verifyPruneMap)
- }
-
- // Fast-forward the reclaimSnap.
- s.hdlGC.reclaimSnap = GenVector{"A": 2, "B": 1, "C": 2}
-
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- // Nothing should change since ock is false.
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("onlineConsistencyCheck created unexpected objects in test %s, map: %v",
- testFile, s.hdlGC.pruneObjects)
- }
- if strictCheck && len(s.hdlGC.verifyPruneMap) > 0 {
- t.Errorf("onlineConsistencyCheck created unexpected objects in test %s, map: %v",
- testFile, s.hdlGC.verifyPruneMap)
- }
- expVec := GenVector{"A": 2, "B": 1, "C": 2}
- // Ensuring reclaimSnap didn't get modified in onlineConsistencyCheck().
- if !reflect.DeepEqual(expVec, s.hdlGC.reclaimSnap) {
- t.Errorf("Data mismatch for reclaimSnap: %v instead of %v", s.hdlGC.reclaimSnap, expVec)
- }
-
- s.hdlGC.checkConsistency = true
- genBatchSize = 0
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- // Nothing should change since genBatchSize is 0.
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("onlineConsistencyCheck created unexpected objects in test %s, map: %v",
- testFile, s.hdlGC.pruneObjects)
- }
- if strictCheck && len(s.hdlGC.verifyPruneMap) > 0 {
- t.Errorf("onlineConsistencyCheck created unexpected objects in test %s, map: %v",
- testFile, s.hdlGC.verifyPruneMap)
- }
- if !reflect.DeepEqual(expVec, s.hdlGC.reclaimSnap) {
- t.Errorf("Data mismatch for reclaimSnap: %v instead of %v", s.hdlGC.reclaimSnap, expVec)
- }
-
- // Test batching.
- genBatchSize = 1
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- if !s.hdlGC.checkConsistency {
- t.Errorf("onlineConsistencyCheck finished in test %s", testFile)
- }
- total := (expVec[DeviceID("A")] - s.hdlGC.reclaimSnap[DeviceID("A")]) +
- (expVec[DeviceID("B")] - s.hdlGC.reclaimSnap[DeviceID("B")]) +
- (expVec[DeviceID("C")] - s.hdlGC.reclaimSnap[DeviceID("C")])
- if total != 1 {
- t.Errorf("onlineConsistencyCheck failed in test %s, %v", testFile, s.hdlGC.reclaimSnap)
- }
-
- genBatchSize = 4
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- expMap := make(map[storage.ID]*objGCState)
- expMap[objid] = &objGCState{pos: 4, version: 4}
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map in vsyncd: %v instead of %v",
- s.hdlGC.pruneObjects[objid], expMap[objid])
- }
- expMap1 := make(map[storage.ID]*objVersHist)
- if strictCheck {
- expMap1[objid] = &objVersHist{versions: make(map[raw.Version]struct{})}
- for i := 0; i < 5; i++ {
- expMap1[objid].versions[raw.Version(i)] = struct{}{}
- }
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
- }
- }
- expVec = GenVector{"A": 0, "B": 0, "C": 0}
- if !reflect.DeepEqual(expVec, s.hdlGC.reclaimSnap) {
- t.Errorf("Data mismatch for reclaimSnap: %v instead of %v", s.hdlGC.reclaimSnap, expVec)
- }
- if s.hdlGC.checkConsistency {
- t.Errorf("onlineConsistencyCheck didn't finish in test %s", testFile)
- }
-}
-
-// TestGCGeneration tests the garbage collection of a generation.
-func TestGCGeneration(t *testing.T) {
- // Run the test for both values of strictCheck flag.
- flags := []bool{true, false}
- for _, val := range flags {
- strictCheck = val
- setupGarbageCollectGeneration(t)
- }
-}
-
-// setupGarbageCollectGeneration performs the setup to test garbage collection of a generation.
-func setupGarbageCollectGeneration(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
-
- // Test GenID of 0.
- if err := s.hdlGC.garbageCollectGeneration(DeviceID("A"), 0); err != nil {
- t.Errorf("garbageCollectGeneration failed for test %s, dev A gnum 0, err %v\n", testFile, err)
- }
-
- // Test a non-existent generation.
- if err := s.hdlGC.garbageCollectGeneration(DeviceID("A"), 10); err == nil {
- t.Errorf("garbageCollectGeneration failed for test %s, dev A gnum 10\n", testFile)
- }
-
- if err := s.hdlGC.garbageCollectGeneration(DeviceID("A"), 2); err != nil {
- t.Errorf("garbageCollectGeneration failed for test %s, dev A gnum 2, err %v\n", testFile, err)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
-
- expMap := make(map[storage.ID]*objGCState)
- expMap[objid] = &objGCState{pos: 3, version: 3}
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects, expMap)
- }
-
- expMap1 := make(map[storage.ID]*objVersHist)
- if strictCheck {
- expMap1[objid] = &objVersHist{versions: make(map[raw.Version]struct{})}
- expMap1[objid].versions[raw.Version(3)] = struct{}{}
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
- }
- }
-
- // Test GC'ing a generation lower than A:2.
- if err := s.hdlGC.garbageCollectGeneration(DeviceID("A"), 1); err != nil {
- t.Errorf("garbageCollectGeneration failed for test %s, dev A gnum 1, err %v\n", testFile, err)
- }
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects, expMap)
- }
-
- if strictCheck {
- expMap1[objid].versions[raw.Version(2)] = struct{}{}
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
- }
- }
-
- // Test GC'ing a generation higher than A:2.
- if err := s.hdlGC.garbageCollectGeneration(DeviceID("B"), 3); err != nil {
- t.Errorf("garbageCollectGeneration failed for test %s, dev B gnum 3, err %v\n", testFile, err)
- }
- expMap[objid].pos = 6
- expMap[objid].version = 6
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects, expMap)
- }
-
- if strictCheck {
- expMap1[objid].versions[raw.Version(6)] = struct{}{}
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v",
- s.hdlGC.verifyPruneMap[objid], expMap1[objid])
- }
- }
-}
-
-// TestGCReclaimSpace tests the space reclamation algorithm in GC.
-func TestGCReclaimSpace(t *testing.T) {
- // Run the tests for both values of strictCheck flag.
- flags := []bool{true, false}
- for _, val := range flags {
- strictCheck = val
- setupGCReclaimSpace1Obj(t)
- setupGCReclaimSpace3Objs(t)
- }
-}
-
-// setupGCReclaimSpace performs the setup to test space reclamation for a scenario with 1 object.
-func setupGCReclaimSpace1Obj(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
-
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
-
- expMap := make(map[storage.ID]*objGCState)
- expMap[objid] = &objGCState{pos: 4, version: 4}
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects[objid], expMap[objid])
- }
-
- expMap1 := make(map[storage.ID]*objVersHist)
- expMap1[objid] = &objVersHist{versions: make(map[raw.Version]struct{})}
- if strictCheck {
- for i := 0; i < 5; i++ {
- expMap1[objid].versions[raw.Version(i)] = struct{}{}
- }
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v",
- s.hdlGC.verifyPruneMap[objid], expMap1[objid])
- }
- }
- expVec := GenVector{"A": 2, "B": 1, "C": 2}
- if !reflect.DeepEqual(expVec, s.devtab.head.ReclaimVec) {
- t.Errorf("Data mismatch for reclaimVec: %v instead of %v",
- s.devtab.head.ReclaimVec, expVec)
- }
-
- // Allow for GCing B:3 incrementally.
- cVec := GenVector{"A": 2, "B": 3, "C": 2}
- if err := s.devtab.putGenVec(DeviceID("C"), cVec); err != nil {
- t.Errorf("putGenVec failed for test %s, err %v\n", testFile, err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
- expMap[objid] = &objGCState{pos: 6, version: 6}
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects[objid], expMap[objid])
- }
- if strictCheck {
- expMap1[objid].versions[raw.Version(5)] = struct{}{}
- expMap1[objid].versions[raw.Version(6)] = struct{}{}
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v",
- s.hdlGC.verifyPruneMap[objid], expMap[objid])
- }
- }
- if !reflect.DeepEqual(cVec, s.devtab.head.ReclaimVec) {
- t.Errorf("Data mismatch for reclaimVec: %v instead of %v",
- s.devtab.head.ReclaimVec, cVec)
- }
-}
-
-// setupGCReclaimSpace3Objs performs the setup to test space reclamation for a scenario with 3 objects.
-func setupGCReclaimSpace3Objs(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
-
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- expMap := make(map[storage.ID]*objGCState)
- expMap1 := make(map[storage.ID]*objVersHist)
-
- obj1, err := strToObjID("123")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- expMap[obj1] = &objGCState{pos: 8, version: 6}
- expMap1[obj1] = &objVersHist{versions: make(map[raw.Version]struct{})}
- for i := 1; i < 7; i++ {
- expMap1[obj1].versions[raw.Version(i)] = struct{}{}
- }
-
- obj2, err := strToObjID("456")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- expMap[obj2] = &objGCState{pos: 10, version: 7}
- expMap1[obj2] = &objVersHist{versions: make(map[raw.Version]struct{})}
- for i := 1; i < 6; i++ {
- expMap1[obj2].versions[raw.Version(i)] = struct{}{}
- }
- expMap1[obj2].versions[raw.Version(7)] = struct{}{}
-
- obj3, err := strToObjID("789")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- expMap[obj3] = &objGCState{pos: 8, version: 4}
- expMap1[obj3] = &objVersHist{versions: make(map[raw.Version]struct{})}
- for i := 1; i < 5; i++ {
- expMap1[obj3].versions[raw.Version(i)] = struct{}{}
- }
-
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects, expMap)
- }
- if strictCheck {
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
- }
- }
- expVec := GenVector{"A": 4, "B": 3, "C": 4}
- if !reflect.DeepEqual(expVec, s.devtab.head.ReclaimVec) {
- t.Errorf("Data mismatch for reclaimVec: %v instead of %v",
- s.devtab.head.ReclaimVec, expVec)
- }
-
- // Advance GC by one more generation.
- expVec[DeviceID("A")] = 5
- expVec[DeviceID("C")] = 5
- if err := s.devtab.putGenVec(DeviceID("C"), expVec); err != nil {
- t.Errorf("putGenVec failed for test %s, err %v\n", testFile, err)
- }
- if err := s.devtab.putGenVec(DeviceID("B"), expVec); err != nil {
- t.Errorf("putGenVec failed for test %s, err %v\n", testFile, err)
- }
-
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- expMap[obj1] = &objGCState{pos: 12, version: 9}
- for i := 7; i < 10; i++ {
- expMap1[obj1].versions[raw.Version(i)] = struct{}{}
- }
- expMap[obj2] = &objGCState{pos: 12, version: 8}
- for i := 6; i < 9; i++ {
- expMap1[obj2].versions[raw.Version(i)] = struct{}{}
- }
- expMap[obj3] = &objGCState{pos: 12, version: 6}
- for i := 5; i < 7; i++ {
- expMap1[obj3].versions[raw.Version(i)] = struct{}{}
- }
-
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects, expMap)
- }
- if strictCheck {
- if !reflect.DeepEqual(expMap1, s.hdlGC.verifyPruneMap) {
- t.Errorf("Data mismatch for verifyPruneMap: %v instead of %v", s.hdlGC.verifyPruneMap, expMap1)
- }
- }
- expVec = GenVector{"A": 5, "B": 3, "C": 5}
- if !reflect.DeepEqual(expVec, s.devtab.head.ReclaimVec) {
- t.Errorf("Data mismatch for reclaimVec: %v instead of %v",
- s.devtab.head.ReclaimVec, expVec)
- }
-}
-
-// TestGCDAGPruneCallBack tests the callback function called by dag prune.
-func TestGCDAGPruneCallBack(t *testing.T) {
- // Run the tests for both values of strictCheck flag.
- flags := []bool{true, false}
- for _, val := range flags {
- strictCheck = val
- setupGCDAGPruneCallBack(t)
- setupGCDAGPruneCallBackStrict(t)
- setupGCDAGPruneCBPartGen(t)
- }
-}
-
-// setupGCDAGPruneCallBack performs the setup to test the callback function given to dag prune.
-func setupGCDAGPruneCallBack(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if err := s.hdlGC.dagPruneCallBack("A:1:0"); err == nil {
- t.Errorf("dagPruneCallBack error check failed\n")
- }
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- s.devtab.head.ReclaimVec = GenVector{"A": 2, "B": 1, "C": 2}
-
- // Call should succeed irrespective of strictCheck since "ock" is true.
- if strictCheck {
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[raw.Version]struct{}),
- }
- }
- if err := s.hdlGC.dagPruneCallBack("A:1:0"); err != nil {
- t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
- }
-
- // Calling the same key after success should fail.
- if err := s.hdlGC.dagPruneCallBack("A:1:0"); err == nil {
- t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
- }
-
- if s.log.hasLogRec(DeviceID("A"), GenID(1), LSN(0)) {
- t.Errorf("Log record still exists for test %s\n", testFile)
- }
- if s.log.hasGenMetadata(DeviceID("A"), GenID(1)) {
- t.Errorf("Gen metadata still exists for test %s\n", testFile)
- }
-}
-
-// setupGCDAGPruneCallBackStrict performs the setup to test the
-// callback function called by dag prune when strictCheck is true.
-func setupGCDAGPruneCallBackStrict(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- s.devtab.head.ReclaimVec = GenVector{"A": 2, "B": 1, "C": 2}
- if !strictCheck {
- return
- }
-
- s.hdlGC.checkConsistency = false
- if err := s.hdlGC.dagPruneCallBack("A:1:0"); err == nil {
- t.Errorf("dagPruneCallBack should have failed for test %s\n", testFile)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[raw.Version]struct{}),
- }
- s.hdlGC.verifyPruneMap[objid].versions[raw.Version(2)] = struct{}{}
- if err := s.hdlGC.dagPruneCallBack("A:1:0"); err != nil {
- t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
- }
-
- // Calling the same key after success should fail.
- if err := s.hdlGC.dagPruneCallBack("A:1:0"); err == nil {
- t.Errorf("dagPruneCallBack should have failed for test %s, err %v\n", testFile, err)
- }
- if s.log.hasLogRec(DeviceID("A"), GenID(1), LSN(0)) {
- t.Errorf("Log record still exists for test %s\n", testFile)
- }
- if s.log.hasGenMetadata(DeviceID("A"), GenID(1)) {
- t.Errorf("Gen metadata still exists for test %s\n", testFile)
- }
- if len(s.hdlGC.verifyPruneMap[objid].versions) > 0 {
- t.Errorf("Unexpected object version in test %s, map: %v",
- testFile, s.hdlGC.verifyPruneMap[objid])
- }
-}
-
-// setupGCDAGPruneCBPartGen performs the setup to test the callback
-// function called by dag prune when only one entry from a generation
-// (partial gen) is pruned.
-func setupGCDAGPruneCBPartGen(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- s.devtab.head.ReclaimVec = GenVector{"A": 4, "B": 3, "C": 4}
- s.hdlGC.checkConsistency = false
- if strictCheck {
- objid, err := strToObjID("789")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[raw.Version]struct{}),
- }
- s.hdlGC.verifyPruneMap[objid].versions[raw.Version(4)] = struct{}{}
- }
-
- // Before pruning.
- expGen := &genMetadata{Pos: 8, Count: 3, MaxLSN: 2}
- gen, err := s.log.getGenMetadata(DeviceID("A"), GenID(3))
- if err != nil {
- t.Errorf("getGenMetadata failed for test %s, err %v\n", testFile, err)
- }
- if !reflect.DeepEqual(expGen, gen) {
- t.Errorf("Data mismatch for genMetadata: %v instead of %v",
- gen, expGen)
- }
-
- if err := s.hdlGC.dagPruneCallBack("A:3:2"); err != nil {
- t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
- }
-
- if s.log.hasLogRec(DeviceID("A"), GenID(3), LSN(2)) {
- t.Errorf("Log record still exists for test %s\n", testFile)
- }
- if !s.log.hasGenMetadata(DeviceID("A"), GenID(3)) {
- t.Errorf("Gen metadata still exists for test %s\n", testFile)
- }
- expGen = &genMetadata{Pos: 8, Count: 2, MaxLSN: 2}
- gen, err = s.log.getGenMetadata(DeviceID("A"), GenID(3))
- if err != nil {
- t.Errorf("getGenMetadata failed for test %s, err %v\n", testFile, err)
- }
- if !reflect.DeepEqual(expGen, gen) {
- t.Errorf("Data mismatch for genMetadata: %v instead of %v",
- gen, expGen)
- }
-
- if strictCheck {
- objid, err := strToObjID("123")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- s.hdlGC.verifyPruneMap[objid] = &objVersHist{
- versions: make(map[raw.Version]struct{}),
- }
- s.hdlGC.verifyPruneMap[objid].versions[raw.Version(6)] = struct{}{}
- }
- if err := s.hdlGC.dagPruneCallBack("A:3:0"); err != nil {
- t.Errorf("dagPruneCallBack failed for test %s, err %v\n", testFile, err)
- }
- expGen = &genMetadata{Pos: 8, Count: 1, MaxLSN: 2}
- gen, err = s.log.getGenMetadata(DeviceID("A"), GenID(3))
- if err != nil {
- t.Errorf("getGenMetadata failed for test %s, err %v\n", testFile, err)
- }
- if !reflect.DeepEqual(expGen, gen) {
- t.Errorf("Data mismatch for genMetadata: %v instead of %v",
- gen, expGen)
- }
-}
-
-// TestGCPruning tests the object pruning in GC.
-func TestGCPruning(t *testing.T) {
- // Run the tests for both values of strictCheck flag.
- flags := []bool{true, false}
- for _, val := range flags {
- strictCheck = val
- setupGCPruneObject(t)
- setupGCPruneObjectBatching(t)
- setupGCPrune3Objects(t)
- }
-}
-
-// setupGCPruneObject performs the setup to test pruning an object.
-func setupGCPruneObject(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- s.hdlGC.checkConsistency = false
- objBatchSize = 0
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
- if len(s.hdlGC.pruneObjects) != 1 {
- t.Errorf("pruneObjectBatch deleted object in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
-
- objBatchSize = 1
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
-
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("pruneObjectBatch left unexpected objects in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
-
- // Generations that should have been deleted.
- expVec := GenVector{"A": 2, "B": 1, "C": 1}
- for dev, gen := range expVec {
- for i := GenID(1); i <= gen; i++ {
- if s.log.hasGenMetadata(dev, i) {
- t.Errorf("pruneObjectBatch left unexpected generation in test %s, %s %d",
- testFile, dev, i)
- }
- if s.log.hasLogRec(dev, i, 0) {
- t.Errorf("pruneObjectBatch left unexpected logrec in test %s, %s %d 0",
- testFile, dev, i)
- }
- }
- }
-
- // Generations that should remain.
- devArr := []DeviceID{"B", "B", "C"}
- genArr := []GenID{2, 3, 2}
- for pos, dev := range devArr {
- if _, err := s.log.getGenMetadata(dev, genArr[pos]); err != nil {
- t.Errorf("pruneObjectBatch didn't find expected generation in test %s, %s %d",
- testFile, dev, genArr[pos])
- }
- if _, err := s.log.getLogRec(dev, genArr[pos], 0); err != nil {
- t.Errorf("pruneObjectBatch didn't find expected logrec in test %s, %s %d 0",
- testFile, dev, genArr[pos])
- }
- }
-
- // Verify DAG state.
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- if head, err := s.dag.getHead(objid); err != nil || head != 6 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// setupGCPruneObjectBatching performs the setup to test batching while pruning objects.
-func setupGCPruneObjectBatching(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- s.hdlGC.checkConsistency = false
- objBatchSize = 1
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
- if len(s.hdlGC.pruneObjects) != 2 {
- t.Errorf("pruneObjectBatch didn't remove expected objects in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
-
- // Add a spurious version to the version history and verify error under strictCheck.
- if strictCheck {
- for _, obj := range s.hdlGC.verifyPruneMap {
- obj.versions[80] = struct{}{}
- }
- if err := s.hdlGC.pruneObjectBatch(); err == nil {
- t.Errorf("pruneObjectBatch didn't fail for test %s, err %v\n", testFile, err)
- }
- }
-}
-
-// TestGCPruneObjCheckError checks the error path in pruneObjectBatch under strictCheck.
-func TestGCPruneObjCheckError(t *testing.T) {
- if !strictCheck {
- return
- }
-
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- s.hdlGC.checkConsistency = false
- objBatchSize = 1
-
- // Remove the prune point, add a spurious version.
- for id, obj := range s.hdlGC.verifyPruneMap {
- v := s.hdlGC.pruneObjects[id].version
- obj.versions[80] = struct{}{}
- delete(obj.versions, v)
- }
-
- if err = s.hdlGC.pruneObjectBatch(); err == nil {
- t.Errorf("pruneObjectBatch didn't fail for test %s, err %v\n", testFile, err)
- }
-}
-
-// setupGCPrune3Objects performs the setup to test pruning in a 3 object scenario.
-func setupGCPrune3Objects(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
-
- objBatchSize = 5
- s.hdlGC.checkConsistency = false
- gen, err := s.log.getGenMetadata("A", 3)
- if err != nil {
- t.Errorf("getGenMetadata failed for test %s, err %v\n", testFile, err)
- }
- if gen.Count != 3 {
- t.Errorf("GenMetadata has incorrect value for test %s\n", testFile)
- }
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("pruneObjectBatch didn't remove expected objects in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
- // Generations that should have been deleted.
- expVec := GenVector{"A": 2, "B": 3, "C": 4}
- for dev, gnum := range expVec {
- for i := GenID(1); i <= gnum; i++ {
- if s.log.hasGenMetadata(dev, i) {
- t.Errorf("pruneObjectBatch left unexpected generation in test %s, %s %d",
- testFile, dev, i)
- }
- // Check the first log record.
- if s.log.hasLogRec(dev, i, 0) {
- t.Errorf("pruneObjectBatch left unexpected logrec in test %s, %s %d 0",
- testFile, dev, i)
- }
- }
- }
- // Check the partial generation.
- if gen, err = s.log.getGenMetadata("A", 3); err != nil {
- t.Errorf("getGenMetadata failed for test %s, err %v\n", testFile, err)
- }
- if gen.Count != 2 {
- t.Errorf("GenMetadata has incorrect value for test %s\n", testFile)
- }
- // Verify DAG state.
- objArr := []string{"123", "456", "789"}
- heads := []raw.Version{10, 8, 6}
- for pos, o := range objArr {
- objid, err := strToObjID(o)
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- if head, err := s.dag.getHead(objid); err != nil || head != heads[pos] {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
- }
-}
-
-// TestGCStages tests the interactions across the different stages in GC.
-func TestGCStages(t *testing.T) {
- // Run the tests for both values of strictCheck flag.
- flags := []bool{true, false}
- for _, val := range flags {
- strictCheck = val
- setupGCReclaimAndOnlineCk(t)
- setupGCReclaimAndOnlineCkIncr(t)
- setupGCOnlineCkAndPrune(t)
- }
-}
-
-// setupGCReclaimAndOnlineCk performs the setup to test interaction between reclaimSpace and consistency check.
-func setupGCReclaimAndOnlineCk(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
- objBatchSize = 1
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
-
- // Clean up state to simulate a reboot. Given that 1 object
- // is already GC'ed, there are now partial generations left in
- // the state.
- for obj, _ := range s.hdlGC.pruneObjects {
- delete(s.hdlGC.pruneObjects, obj)
- delete(s.hdlGC.verifyPruneMap, obj)
- }
- s.hdlGC.reclaimSnap = GenVector{"A": 4, "B": 3, "C": 4}
- genBatchSize = 1
- for s.hdlGC.checkConsistency == true {
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- }
-
- // Since dag prunes everything older than a version, all 3
- // objects show up once again in the pruneObjects map.
- if len(s.hdlGC.pruneObjects) != 3 {
- t.Errorf("onlineConsistencyCheck didn't add objects in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
-
- objBatchSize = 3
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("pruneObjectBatch didn't remove expected objects in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
- // Generations that should have been deleted.
- expVec := GenVector{"A": 2, "B": 3, "C": 4}
- for dev, gnum := range expVec {
- for i := GenID(1); i <= gnum; i++ {
- if s.log.hasGenMetadata(dev, i) {
- t.Errorf("pruneObjectBatch left unexpected generation in test %s, %s %d",
- testFile, dev, i)
- }
- // Check the first log record.
- if s.log.hasLogRec(dev, i, 0) {
- t.Errorf("pruneObjectBatch left unexpected logrec in test %s, %s %d 0",
- testFile, dev, i)
- }
- }
- }
- // Check the partial generation.
- gen, err := s.log.getGenMetadata("A", 3)
- if err != nil {
- t.Errorf("getGenMetadata failed for test %s, err %v\n", testFile, err)
- }
- if gen.Count != 2 {
- t.Errorf("GenMetadata has incorrect value for test %s\n", testFile)
- }
- // Verify DAG state.
- objArr := []string{"123", "456", "789"}
- heads := []raw.Version{10, 8, 6}
- for pos, o := range objArr {
- objid, err := strToObjID(o)
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- if head, err := s.dag.getHead(objid); err != nil || head != heads[pos] {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
- }
-}
-
-// setupGCReclaimAndOnlineCkIncr tests interaction between reclaimSpace
-// and consistency check when both are running one after the other
-// incrementally.
-func setupGCReclaimAndOnlineCkIncr(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-1obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- // Fast-forward the reclaimSnap and ReclaimVec.
- s.hdlGC.reclaimSnap = GenVector{"A": 2, "B": 1, "C": 2}
- s.devtab.head.ReclaimVec = GenVector{"A": 2, "B": 1, "C": 2}
-
- genBatchSize = 1
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- objBatchSize = 1
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
-
- cVec := GenVector{"A": 2, "B": 3, "C": 2}
- if err := s.devtab.putGenVec(DeviceID("C"), cVec); err != nil {
- t.Errorf("putGenVec failed for test %s, err %v\n", testFile, err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- expMap := make(map[storage.ID]*objGCState)
- expMap[objid] = &objGCState{pos: 6, version: 6}
- if !reflect.DeepEqual(expMap, s.hdlGC.pruneObjects) {
- t.Errorf("Data mismatch for pruneObjects map: %v instead of %v", s.hdlGC.pruneObjects[objid], expMap[objid])
- }
- objBatchSize = 1
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
- for s.hdlGC.checkConsistency == true {
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- }
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("onlineConsistencyCheck created an object in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
-}
-
-// setupGCOnlineCkAndPrune performs the setup to test interaction
-// between consistency check and object pruning (when both are
-// incremental).
-func setupGCOnlineCkAndPrune(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- s := NewSyncd("", "", "A", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- testFile := "test-3obj.gc.sync"
- if err := vsyncInitState(s, testFile); err != nil {
- t.Fatal(err)
- }
- if err := s.hdlGC.reclaimSpace(); err != nil {
- t.Errorf("reclaimSpace failed for test %s, err %v\n", testFile, err)
- }
- objBatchSize = 1
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
-
- // Clean up state to simulate a reboot. Given that 1 object
- // is already GC'ed, there are now partial generations left in
- // the state.
- for obj, _ := range s.hdlGC.pruneObjects {
- delete(s.hdlGC.pruneObjects, obj)
- delete(s.hdlGC.verifyPruneMap, obj)
- }
- s.hdlGC.reclaimSnap = GenVector{"A": 4, "B": 3, "C": 4}
- genBatchSize = 3
- objBatchSize = 3
- for s.hdlGC.checkConsistency == true {
- if err := s.hdlGC.onlineConsistencyCheck(); err != nil {
- t.Fatalf("onlineConsistencyCheck failed for test %s, err %v", testFile, err)
- }
- if err := s.hdlGC.pruneObjectBatch(); err != nil {
- t.Errorf("pruneObjectBatch failed for test %s, err %v\n", testFile, err)
- }
- }
- if len(s.hdlGC.pruneObjects) > 0 {
- t.Errorf("pruneObjectBatch didn't remove expected objects in test %s, map: %v", testFile, s.hdlGC.pruneObjects)
- }
-}
diff --git a/runtimes/google/vsync/ilog.go b/runtimes/google/vsync/ilog.go
deleted file mode 100644
index 764b6e5..0000000
--- a/runtimes/google/vsync/ilog.go
+++ /dev/null
@@ -1,466 +0,0 @@
-package vsync
-
-// Package vsync provides veyron sync ILog utility functions. ILog
-// (Indexed Log) provides log functionality with indexing support.
-// ILog stores log records that are locally generated or obtained over
-// the network. Indexing is needed since sync needs to selectively
-// retrieve log records that belong to a particular device and
-// generation during synchronization.
-//
-// When a device receives a request to send log records, it first
-// computes the missing generations between itself and the incoming
-// request. It then sends all the log records belonging to each
-// missing generation. A device that receives log records over the
-// network replays all the records received from another device in a
-// single batch. Each replayed log record adds a new version to the
-// dag of the object contained in the log record. At the end of
-// replaying all the log records, conflict detection and resolution is
-// carried out for all the objects learned during this
-// iteration. Conflict detection and resolution is carried out after a
-// batch of log records are replayed, instead of incrementally after
-// each record is replayed, to avoid repeating conflict resolution
-// already performed by other devices.
-//
-// New log records are created when objects in the local store are
-// created/updated. Local log records are also replayed to keep the
-// per-object dags consistent with the local store state.
-//
-// Implementation notes: ILog records are stored in a persistent K/V
-// database in the current implementation. ILog db consists of 3
-// tables:
-// ** records: table consists of all the log records indexed
-// by deviceid:genid:lsn referring to the device that creates the log
-// record, the generation on the device the log record is part of, and
-// its sequence number in that generation. Note that lsn in each
-// generation starts from 0 and genid starts from 1.
-// ** gens: table consists of the generation metadata for each
-// generation, and is indexed by deviceid:genid.
-// ** head: table consists of the log header.
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
-
- "veyron/services/store/raw"
-
- "veyron2/storage"
- "veyron2/vlog"
-)
-
-var (
- errNoUpdates = errors.New("no new local updates")
- errInvalidLog = errors.New("invalid log db")
-)
-
-// iLogHeader contains the log header metadata.
-type iLogHeader struct {
- Curgen GenID // generation id for a device's current generation.
- Curlsn LSN // log sequence number for a device's current generation.
- Curorder uint32 // position in log for the next generation.
-}
-
-// genMetadata contains the metadata for a generation.
-type genMetadata struct {
- // All generations stored in the log are ordered wrt each
- // other and this order needs to be preserved.
- // Position of this generation in the log.
- Pos uint32
-
- // Number of log records in this generation still stored in the log.
- // This count is used during garbage collection.
- Count uint64
-
- // Maximum LSN that was part of this generation.
- // This is useful during garbage collection to track any unclaimed log records.
- MaxLSN LSN
-}
-
-// iLog contains the metadata for the ILog db.
-type iLog struct {
- fname string // file pathname.
- db *kvdb // underlying k/v db.
-
- // Key:deviceid-genid-lsn Value:LogRecord
- records *kvtable // pointer to the "records" table in the kvdb. Contains log records.
-
- // Key:deviceid-genid Value:genMetadata
- gens *kvtable // pointer to the "gens" table in the kvdb. Contains generation metadata for each generation.
-
- // Key:"Head" Value:iLogHeader
- header *kvtable // pointer to the "header" table in the kvdb. Contains logheader.
-
- head *iLogHeader // log head cached in memory.
-
- s *syncd // pointer to the sync daemon object.
-}
-
-// openILog opens or creates a ILog for the given filename.
-func openILog(filename string, sin *syncd) (*iLog, error) {
- ilog := &iLog{
- fname: filename,
- head: nil,
- s: sin,
- }
- // Open the file and create it if it does not exist.
- // Also initialize the kvdb and its three collections.
- db, tbls, err := kvdbOpen(filename, []string{"records", "gens", "header"})
- if err != nil {
- return nil, err
- }
-
- ilog.db = db
- ilog.records = tbls[0]
- ilog.gens = tbls[1]
- ilog.header = tbls[2]
-
- // Initialize the log header.
- // First generation to be created is generation 1. A generation of 0
- // represents no updates on the device.
- ilog.head = &iLogHeader{
- Curgen: 1,
- }
- // If header already exists in db, read it back from db.
- if ilog.hasHead() {
- if err := ilog.getHead(); err != nil {
- ilog.db.close() // this also closes the tables.
- return nil, err
- }
- }
- return ilog, nil
-}
-
-// close closes the ILog and invalidate its struct.
-func (l *iLog) close() error {
- if l.db == nil {
- return errInvalidLog
- }
- // Flush the dirty data.
- if err := l.flush(); err != nil {
- return err
- }
-
- l.db.close() // this also closes the tables.
-
- *l = iLog{} // zero out the ILog struct.
- return nil
-}
-
-// flush flushes the ILog db to disk.
-func (l *iLog) flush() error {
- if l.db == nil {
- return errInvalidLog
- }
- // Set the head from memory before flushing.
- if err := l.putHead(); err != nil {
- return err
- }
-
- l.db.flush()
-
- return nil
-}
-
-// compact compacts the file associated with kvdb.
-func (l *iLog) compact() error {
- if l.db == nil {
- return errInvalidLog
- }
- db, tbls, err := l.db.compact(l.fname, []string{"records", "gens", "header"})
- if err != nil {
- return err
- }
- l.db = db
- l.records = tbls[0]
- l.gens = tbls[1]
- l.header = tbls[2]
- return nil
-}
-
-// putHead puts the log head into the ILog db.
-func (l *iLog) putHead() error {
- return l.header.set("Head", l.head)
-}
-
-// getHead gets the log head from the ILog db.
-func (l *iLog) getHead() error {
- if l.head == nil {
- return errors.New("nil log header")
- }
- err := l.header.get("Head", l.head)
- return err
-}
-
-// hasHead returns true if the ILog db has a log head.
-func (l *iLog) hasHead() bool {
- return l.header.hasKey("Head")
-}
-
-// logRecKey creates a key for a log record.
-func logRecKey(devid DeviceID, gnum GenID, lsn LSN) string {
- return fmt.Sprintf("%s:%d:%d", devid, uint64(gnum), uint64(lsn))
-}
-
-// splitLogRecKey splits a : separated logrec key into its components.
-func splitLogRecKey(key string) (DeviceID, GenID, LSN, error) {
- args := strings.Split(key, ":")
- if len(args) != 3 {
- return "", 0, 0, fmt.Errorf("bad logrec key %s", key)
- }
- gnum, _ := strconv.ParseUint(args[1], 10, 64)
- lsn, _ := strconv.ParseUint(args[2], 10, 64)
- return DeviceID(args[0]), GenID(gnum), LSN(lsn), nil
-}
-
-// putLogRec puts the log record into the ILog db.
-func (l *iLog) putLogRec(rec *LogRec) (string, error) {
- if l.db == nil {
- return "", errInvalidLog
- }
- key := logRecKey(rec.DevID, rec.GNum, rec.LSN)
- return key, l.records.set(key, rec)
-}
-
-// getLogRec gets the log record from the ILog db.
-func (l *iLog) getLogRec(devid DeviceID, gnum GenID, lsn LSN) (*LogRec, error) {
- if l.db == nil {
- return nil, errInvalidLog
- }
- key := logRecKey(devid, gnum, lsn)
- var rec LogRec
- if err := l.records.get(key, &rec); err != nil {
- return nil, err
- }
- return &rec, nil
-}
-
-// hasLogRec returns true if the ILog db has a log record matching (devid, gnum, lsn).
-func (l *iLog) hasLogRec(devid DeviceID, gnum GenID, lsn LSN) bool {
- if l.db == nil {
- return false
- }
- key := logRecKey(devid, gnum, lsn)
- return l.records.hasKey(key)
-}
-
-// delLogRec deletes the log record matching (devid, gnum, lsn) from the ILog db.
-func (l *iLog) delLogRec(devid DeviceID, gnum GenID, lsn LSN) error {
- if l.db == nil {
- return errInvalidLog
- }
- key := logRecKey(devid, gnum, lsn)
- return l.records.del(key)
-}
-
-// generationKey creates a key for a generation.
-func generationKey(devid DeviceID, gnum GenID) string {
- return fmt.Sprintf("%s:%d", devid, gnum)
-}
-
-// splitGenerationKey splits a : separated logrec key into its components.
-func splitGenerationKey(key string) (DeviceID, GenID, error) {
- args := strings.Split(key, ":")
- if len(args) != 2 {
- return "", 0, fmt.Errorf("bad generation key %s", key)
- }
- gnum, _ := strconv.ParseUint(args[1], 10, 64)
- return DeviceID(args[0]), GenID(gnum), nil
-}
-
-// putGenMetadata puts the metadata of the generation (devid, gnum) into the ILog db.
-func (l *iLog) putGenMetadata(devid DeviceID, gnum GenID, val *genMetadata) error {
- key := generationKey(devid, gnum)
- return l.gens.set(key, val)
-}
-
-// getGenMetadata gets the metadata of the generation (devid, gnum) from the ILog db.
-func (l *iLog) getGenMetadata(devid DeviceID, gnum GenID) (*genMetadata, error) {
- if l.db == nil {
- return nil, errInvalidLog
- }
- key := generationKey(devid, gnum)
- var val genMetadata
- if err := l.gens.get(key, &val); err != nil {
- return nil, err
- }
- return &val, nil
-}
-
-// hasGenMetadata returns true if the ILog db has the generation (devid, gnum).
-func (l *iLog) hasGenMetadata(devid DeviceID, gnum GenID) bool {
- key := generationKey(devid, gnum)
- return l.gens.hasKey(key)
-}
-
-// delGenMetadata deletes the generation (devid, gnum) metadata from the ILog db.
-func (l *iLog) delGenMetadata(devid DeviceID, gnum GenID) error {
- if l.db == nil {
- return errInvalidLog
- }
- key := generationKey(devid, gnum)
- return l.gens.del(key)
-}
-
-// createLocalLogRec creates a new local log record of type NodeRec.
-func (l *iLog) createLocalLogRec(obj storage.ID, vers raw.Version, par []raw.Version, val *LogValue) (*LogRec, error) {
- rec := &LogRec{
- DevID: l.s.id,
- GNum: l.head.Curgen,
- LSN: l.head.Curlsn,
- RecType: NodeRec,
-
- ObjID: obj,
- CurVers: vers,
- Parents: par,
- Value: *val,
- }
-
- // Increment the LSN for the local log.
- l.head.Curlsn++
-
- return rec, nil
-}
-
-// createLocalLinkLogRec creates a new local log record of type LinkRec.
-func (l *iLog) createLocalLinkLogRec(obj storage.ID, vers, par raw.Version) (*LogRec, error) {
- rec := &LogRec{
- DevID: l.s.id,
- GNum: l.head.Curgen,
- LSN: l.head.Curlsn,
- RecType: LinkRec,
-
- ObjID: obj,
- CurVers: vers,
- Parents: []raw.Version{par},
- }
-
- // Increment the LSN for the local log.
- l.head.Curlsn++
-
- return rec, nil
-}
-
-// createRemoteGeneration adds a new remote generation.
-func (l *iLog) createRemoteGeneration(dev DeviceID, gnum GenID, gen *genMetadata) error {
- if l.db == nil {
- return errInvalidLog
- }
-
- if gen.Count != uint64(gen.MaxLSN+1) {
- return errors.New("mismatch in count and lsn")
- }
-
- gen.Pos = l.head.Curorder
- l.head.Curorder++
-
- return l.putGenMetadata(dev, gnum, gen)
-}
-
-// createLocalGeneration creates a new local generation.
-// createLocalGeneration is currently called when there is an incoming GetDeltas request.
-func (l *iLog) createLocalGeneration() (GenID, error) {
- if l.db == nil {
- return 0, errInvalidLog
- }
-
- g := l.head.Curgen
-
- // If there are no updates, there will be no new generation.
- if l.head.Curlsn == 0 {
- return g - 1, errNoUpdates
- }
-
- // Add the current generation to the db.
- val := &genMetadata{
- Pos: l.head.Curorder,
- Count: uint64(l.head.Curlsn),
- MaxLSN: l.head.Curlsn - 1,
- }
- err := l.putGenMetadata(l.s.id, g, val)
-
- vlog.VI(2).Infof("createLocalGeneration:: created gen %d %v", g, val)
- // Move to the next generation irrespective of err.
- l.head.Curorder++
- l.head.Curgen++
- l.head.Curlsn = 0
-
- return g, err
-}
-
-// processWatchRecord processes new object versions obtained from the local store.
-func (l *iLog) processWatchRecord(objID storage.ID, vers, parent raw.Version, val *LogValue, txID TxID) error {
- if l.db == nil {
- return errInvalidLog
- }
-
- vlog.VI(2).Infof("processWatchRecord:: adding object %v %v", objID, vers)
-
- if vers != raw.NoVersion {
- // Check if the object's vers already exists in the DAG.
- if l.s.dag.hasNode(objID, vers) {
- return nil
- }
- } else {
- // Check if the parent version has a deleted
- // descendant already in the DAG.
- if l.s.dag.hasDeletedDescendant(objID, parent) {
- return nil
- }
- }
-
- var pars []raw.Version
- if parent != raw.NoVersion {
- pars = []raw.Version{parent}
- }
-
- // If the current version is a deletion, generate a new version number.
- if val.Delete {
- if vers != raw.NoVersion {
- return fmt.Errorf("deleted vers is %v", vers)
- }
- vers = raw.NewVersion()
- val.Mutation.Version = vers
- }
-
- // Create a log record from Watch's Change Record.
- rec, err := l.createLocalLogRec(objID, vers, pars, val)
- if err != nil {
- return err
- }
-
- // Insert the new log record into the log.
- logKey, err := l.putLogRec(rec)
- if err != nil {
- return err
- }
-
- // Insert the new log record into dag.
- if err = l.s.dag.addNode(rec.ObjID, rec.CurVers, false, val.Delete, rec.Parents, logKey, txID); err != nil {
- return err
- }
-
- // Move the head.
- if err := l.s.dag.moveHead(rec.ObjID, rec.CurVers); err != nil {
- return err
- }
-
- return nil
-}
-
-// dumpILog dumps the ILog data structure.
-func (l *iLog) dumpILog() {
- fmt.Println("In-memory Header")
- fmt.Println("Current generation: ", l.head.Curgen, l.head.Curlsn, " Current order: ",
- l.head.Curorder)
-
- fmt.Println("================================================")
-
- fmt.Println("In-DB Header")
- head := &iLogHeader{}
- if err := l.header.get("Head", head); err != nil {
- fmt.Println("Couldn't access in DB header")
- } else {
- fmt.Println("Current generation: ", head.Curgen, head.Curlsn, " Current order: ", head.Curorder)
- }
- fmt.Println("================================================")
-}
diff --git a/runtimes/google/vsync/ilog_test.go b/runtimes/google/vsync/ilog_test.go
deleted file mode 100644
index ea1ba6e..0000000
--- a/runtimes/google/vsync/ilog_test.go
+++ /dev/null
@@ -1,933 +0,0 @@
-package vsync
-
-// Tests for the Veyron Sync ILog component.
-import (
- "os"
- "reflect"
- "testing"
-
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// TestILogStore tests creating a backing file for ILog.
-func TestILogStore(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new ILog file %s, err %v", logfile, err)
- }
-
- fsize := getFileSize(logfile)
- if fsize < 0 {
- t.Errorf("Log file %s not created", logfile)
- }
-
- if err := log.flush(); err != nil {
- t.Errorf("Cannot flush ILog file %s, err %v", logfile, err)
- }
-
- oldfsize := fsize
- fsize = getFileSize(logfile)
- if fsize <= oldfsize {
- t.Errorf("Log file %s not flushed", logfile)
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close ILog file %s, err %v", logfile)
- }
-
- oldfsize = getFileSize(logfile)
-
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open existing log file %s, err %v", logfile)
- }
-
- fsize = getFileSize(logfile)
- if fsize != oldfsize {
- t.Errorf("Log file %s size changed across re-open (%d %d)", logfile, fsize, oldfsize)
- }
-
- if err := log.flush(); err != nil {
- t.Errorf("Cannot flush ILog file %s, err %v", logfile)
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close ILog file %s, err %v", logfile)
- }
-}
-
-// TestInvalidLog tests log methods on an invalid (closed) log ptr.
-func TestInvalidLog(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new ILog file %s, err %v", logfile, err)
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close ILog file %s, err %v", logfile, err)
- }
-
- err = log.close()
- if err == nil || err != errInvalidLog {
- t.Errorf("Close did not fail on a closed log: %v", err)
- }
-
- err = log.flush()
- if err == nil || err != errInvalidLog {
- t.Errorf("Flush did not fail on a closed log: %v", err)
- }
-
- err = log.compact()
- if err == nil || err != errInvalidLog {
- t.Errorf("Compact did not fail on a closed log: %v", err)
- }
-
- _, err = log.putLogRec(&LogRec{})
- if err == nil || err != errInvalidLog {
- t.Errorf("PutLogRec did not fail on a closed log: %v", err)
- }
-
- var devid DeviceID = "VeyronPhone"
- var gnum GenID = 1
- var lsn LSN
-
- _, err = log.getLogRec(devid, gnum, lsn)
- if err == nil || err != errInvalidLog {
- t.Errorf("GetLogRec did not fail on a closed log: %v", err)
- }
-
- if log.hasLogRec(devid, gnum, lsn) {
- if err == nil || err != errInvalidLog {
- t.Errorf("HasLogRec did not fail on a closed log: %v", err)
- }
- }
-
- err = log.delLogRec(devid, gnum, lsn)
- if err == nil || err != errInvalidLog {
- t.Errorf("DelLogRec did not fail on a closed log: %v", err)
- }
-
- _, err = log.getGenMetadata(devid, gnum)
- if err == nil || err != errInvalidLog {
- t.Errorf("GetGenMetadata did not fail on a closed log: %v", err)
- }
-
- err = log.delGenMetadata(devid, gnum)
- if err == nil || err != errInvalidLog {
- t.Errorf("DelGenMetadata did not fail on a closed log: %v", err)
- }
-
- err = log.createRemoteGeneration(devid, gnum, &genMetadata{})
- if err == nil || err != errInvalidLog {
- t.Errorf("CreateRemoteGeneration did not fail on a closed log: %v", err)
- }
-
- _, err = log.createLocalGeneration()
- if err == nil || err != errInvalidLog {
- t.Errorf("CreateLocalGeneration did not fail on a closed log: %v", err)
- }
-
- err = log.processWatchRecord(storage.NewID(), 2, raw.Version(999), &LogValue{}, NoTxID)
- if err == nil || err != errInvalidLog {
- t.Errorf("ProcessWatchRecord did not fail on a closed log: %v", err)
- }
-}
-
-// TestPutGetLogHeader tests setting and getting log header across log open/close/reopen.
-func TestPutGetLogHeader(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- // In memory head should be initialized.
- if log.head.Curgen != 1 || log.head.Curlsn != 0 || log.head.Curorder != 0 {
- t.Errorf("First time log create should reset header")
- }
-
- // No head should be there in db.
- if err = log.getHead(); err == nil {
- t.Errorf("getHead() found non-existent head in log file %s, err %v", logfile, err)
- }
-
- if log.hasHead() {
- t.Errorf("hasHead() found non-existent head in log file %s", logfile)
- }
-
- log.head = &iLogHeader{
- Curgen: 10,
- Curlsn: 100,
- Curorder: 1000,
- }
-
- if err := log.putHead(); err != nil {
- t.Errorf("Cannot put head %v in log file %s, err %v", log.head, logfile, err)
- }
-
- // Reset values.
- log.head.Curgen = 0
- log.head.Curlsn = 0
- log.head.Curorder = 0
-
- for i := 0; i < 2; i++ {
- if err := log.getHead(); err != nil {
- t.Fatalf("getHead() can not find head (i=%d) in log file %s, err %v", i, logfile, err)
- }
-
- if !log.hasHead() {
- t.Errorf("hasHead() can not find head (i=%d) in log file %s", i, logfile)
- }
-
- if log.head.Curgen != 10 && log.head.Curlsn != 100 && log.head.Curorder != 1000 {
- t.Errorf("Data mismatch for head (i=%d) in log file %s: %v",
- i, logfile, log.head)
- }
-
- if i == 0 {
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open log file %s, err %v", logfile, err)
- }
- }
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestPersistLogHeader tests that log header is automatically persisted across log open/close/reopen.
-func TestPersistLogHeader(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- // In memory head should be initialized.
- if log.head.Curgen != 1 || log.head.Curlsn != 0 || log.head.Curorder != 0 {
- t.Errorf("First time log create should reset header")
- }
-
- log.head = &iLogHeader{
- Curgen: 10,
- Curlsn: 100,
- Curorder: 1000,
- }
-
- if err = log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- // In memory head should be initialized from db.
- if log.head.Curgen != 10 && log.head.Curlsn != 100 && log.head.Curorder != 1000 {
- t.Errorf("Data mismatch for head in log file %s: %v", logfile, log.head)
- }
-
- log.head = &iLogHeader{
- Curgen: 1000,
- Curlsn: 10,
- Curorder: 100,
- }
-
- if err := log.flush(); err != nil {
- t.Errorf("Cannot flush ILog file %s, err %v", logfile, err)
- }
-
- // Reset values.
- log.head.Curgen = 0
- log.head.Curlsn = 0
- log.head.Curorder = 0
-
- if err := log.getHead(); err != nil {
- t.Fatalf("getHead() can not find head in log file %s, err %v", logfile, err)
- }
-
- // In memory head should be initialized from db.
- if log.head.Curgen != 1000 && log.head.Curlsn != 10 && log.head.Curorder != 100 {
- t.Errorf("Data mismatch for head in log file %s: %v", logfile, log.head)
- }
-
- if err = log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestPutGetLogRec tests setting and getting a log record across log open/close/reopen.
-func TestPutGetLogRec(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
- var gnum GenID = 100
- var lsn LSN = 1000
-
- rec, err := log.getLogRec(devid, gnum, lsn)
- if err == nil || rec != nil {
- t.Errorf("GetLogRec() found non-existent object %s:%d:%d in log file %s: %v, err %v",
- devid, gnum, lsn, logfile, rec, err)
- }
-
- if log.hasLogRec(devid, gnum, lsn) {
- t.Errorf("HasLogRec() found non-existent object %s:%d:%d in log file %s",
- devid, gnum, lsn, logfile)
- }
-
- objID := storage.NewID()
- rec = &LogRec{
- DevID: devid,
- GNum: gnum,
- LSN: lsn,
- ObjID: objID,
- CurVers: 2,
- Parents: []raw.Version{0, 1},
- Value: LogValue{},
- }
-
- if _, err := log.putLogRec(rec); err != nil {
- t.Errorf("Cannot put object %s:%d:%d (%v) in log file %s, err %v", devid, gnum, lsn, rec, logfile, err)
- }
-
- for i := 0; i < 2; i++ {
- curRec, err := log.getLogRec(devid, gnum, lsn)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d:%d (i=%d) in log file %s, err %v",
- devid, gnum, lsn, i, logfile, err)
- }
-
- if !log.hasLogRec(devid, gnum, lsn) {
- t.Errorf("HasLogRec() can not find object %s:%d:%d (i=%d) in log file %s",
- devid, gnum, lsn, i, logfile)
- }
-
- if !reflect.DeepEqual(rec, curRec) {
- t.Errorf("Data mismatch for object %s:%d:%d (i=%d) in log file %s: %v instead of %v",
- devid, gnum, lsn, i, logfile, curRec, rec)
- }
-
- if i == 0 {
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open log file %s, err %v", logfile, err)
- }
- }
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestDelLogRec tests deleting a log record across log open/close/reopen.
-func TestDelLogRec(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
- var gnum GenID = 100
- var lsn LSN = 1000
-
- objID := storage.NewID()
- rec := &LogRec{
- DevID: devid,
- GNum: gnum,
- LSN: lsn,
- ObjID: objID,
- CurVers: 2,
- Parents: []raw.Version{0, 1},
- Value: LogValue{},
- }
-
- if _, err := log.putLogRec(rec); err != nil {
- t.Errorf("Cannot put object %s:%d:%d (%v) in log file %s, err %v", devid, gnum, lsn, rec, logfile, err)
- }
-
- curRec, err := log.getLogRec(devid, gnum, lsn)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d:%d in log file %s, err %v",
- devid, gnum, lsn, logfile, err)
- }
-
- if err := log.delLogRec(devid, gnum, lsn); err != nil {
- t.Fatalf("DelLogRec() can not delete object %s:%d:%d in log file %s, err %v",
- devid, gnum, lsn, logfile, err)
- }
-
- for i := 0; i < 2; i++ {
- curRec, err = log.getLogRec(devid, gnum, lsn)
- if err == nil || curRec != nil {
- t.Fatalf("GetLogRec() finds deleted object %s:%d:%d (i=%d) in log file %s, err %v",
- devid, gnum, lsn, i, logfile, err)
- }
-
- if log.hasLogRec(devid, gnum, lsn) {
- t.Errorf("HasLogRec() finds deleted object %s:%d:%d (i=%d) in log file %s",
- devid, gnum, lsn, i, logfile)
- }
-
- if i == 0 {
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open log file %s, err %v", logfile, err)
- }
- }
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-
-}
-
-// TestPutGetGenMetadata tests setting and getting generation metadata across log open/close/reopen.
-func TestPutGetGenMetadata(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
- var gnum GenID = 100
-
- val, err := log.getGenMetadata(devid, gnum)
- if err == nil || val != nil {
- t.Errorf("GetGenMetadata() found non-existent object %s:%d in log file %s: %v, err %v",
- devid, gnum, logfile, val, err)
- }
-
- if log.hasGenMetadata(devid, gnum) {
- t.Errorf("hasGenMetadata() found non-existent object %s:%d in log file %s",
- devid, gnum, logfile)
- }
-
- val = &genMetadata{Pos: 40, Count: 100, MaxLSN: 99}
- if err := log.putGenMetadata(devid, gnum, val); err != nil {
- t.Errorf("Cannot put object %s:%d in log file %s, err %v", devid, gnum, logfile, err)
- }
-
- for i := 0; i < 2; i++ {
- curVal, err := log.getGenMetadata(devid, gnum)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d (i=%d) in log file %s, err %v",
- devid, gnum, i, logfile, err)
- }
-
- if !log.hasGenMetadata(devid, gnum) {
- t.Errorf("hasGenMetadata() can not find object %s:%d (i=%d) in log file %s",
- devid, gnum, i, logfile)
- }
-
- if !reflect.DeepEqual(val, curVal) {
- t.Errorf("Data mismatch for object %s:%d (i=%d) in log file %s: %v instead of %v",
- devid, gnum, i, logfile, curVal, val)
- }
-
- if i == 0 {
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open log file %s, err %v", logfile, err)
- }
- }
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestDelGenMetadata tests deleting generation metadata across log open/close/reopen.
-func TestDelGenMetadata(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
- var gnum GenID = 100
-
- val := &genMetadata{Pos: 40, Count: 100, MaxLSN: 99}
- if err := log.putGenMetadata(devid, gnum, val); err != nil {
- t.Errorf("Cannot put object %s:%d in log file %s, err %v", devid, gnum, logfile, err)
- }
-
- curVal, err := log.getGenMetadata(devid, gnum)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d in log file %s, err %v",
- devid, gnum, logfile, err)
- }
-
- if err := log.delGenMetadata(devid, gnum); err != nil {
- t.Fatalf("DelGenMetadata() can not delete object %s:%d in log file %s, err %v",
- devid, gnum, logfile, err)
- }
-
- for i := 0; i < 2; i++ {
- curVal, err := log.getGenMetadata(devid, gnum)
- if err == nil || curVal != nil {
- t.Fatalf("GetGenMetadata() finds deleted object %s:%d (i=%d) in log file %s, err %v",
- devid, gnum, i, logfile, err)
- }
-
- if log.hasGenMetadata(devid, gnum) {
- t.Errorf("hasGenMetadata() finds deleted object %s:%d (i=%d) in log file %s",
- devid, gnum, i, logfile)
- }
-
- if i == 0 {
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open log file %s, err %v", logfile, err)
- }
- }
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestPersistLogState tests that generation metadata and record state
-// is persisted across log open/close/reopen.
-func TestPersistLogState(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- log, err := openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- var devid DeviceID = "VeyronTab"
-
- // Add several generations.
- for i := uint32(0); i < 10; i++ {
- val := &genMetadata{Pos: i}
- if err := log.putGenMetadata(devid, GenID(i+10), val); err != nil {
- t.Errorf("Cannot put object %s:%d in log file %s, err %v", devid, i, logfile, err)
- }
- }
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
- log, err = openILog(logfile, nil)
- if err != nil {
- t.Fatalf("Cannot re-open log file %s, err %v", logfile, err)
- }
- for i := uint32(0); i < 10; i++ {
- curVal, err := log.getGenMetadata(devid, GenID(i+10))
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d in log file %s, err %v",
- devid, i, logfile, err)
- }
- if curVal.Pos != i {
- t.Errorf("Data mismatch for object %s:%d in log file %s: %v",
- devid, i, logfile, curVal)
- }
- // Should safely overwrite the same keys.
- curVal.Pos = i + 10
- if err := log.putGenMetadata(devid, GenID(i+10), curVal); err != nil {
- t.Errorf("Cannot put object %s:%d in log file %s, err %v", devid, i, logfile, err)
- }
- }
- for i := uint32(0); i < 10; i++ {
- curVal, err := log.getGenMetadata(devid, GenID(i+10))
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d in log file %s, err %v",
- devid, i, logfile, err)
- }
- if curVal.Pos != (i + 10) {
- t.Errorf("Data mismatch for object %s:%d in log file %s: %v, err %v",
- devid, i, logfile, curVal, err)
- }
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// fillFakeLogRecords fills fake log records for testing purposes.
-func (l *iLog) fillFakeLogRecords() {
- const num = 10
- var parvers []raw.Version
- id := storage.NewID()
- for i := int(0); i < num; i++ {
- // Create a local log record.
- curvers := raw.Version(i)
- rec, err := l.createLocalLogRec(id, curvers, parvers, &LogValue{})
- if err != nil {
- return
- }
- // Insert the new log record into the log.
- _, err = l.putLogRec(rec)
- if err != nil {
- return
- }
- parvers = []raw.Version{curvers}
- }
-}
-
-// TestCreateGeneration tests that local log records and local
-// generations are created uniquely and remote generations are
-// correctly inserted in log order.
-func TestCreateGeneration(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- s := &syncd{id: "VeyronTab"}
- log, err := openILog(logfile, s)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- if g, err := log.createLocalGeneration(); err != errNoUpdates {
- t.Errorf("Should not find local updates gen %d with error %v", g, err)
- }
-
- const num = 10
- var parvers []raw.Version
- id := storage.NewID()
- for i := int(0); i < num; i++ {
- // Create a local log record.
- curvers := raw.Version(i)
- rec, err := log.createLocalLogRec(id, curvers, parvers, &LogValue{})
- if err != nil {
- t.Fatalf("Cannot create local log rec ObjID: %v Current: %s Parents: %v Error: %v",
- id, curvers, parvers, err)
- }
-
- temprec := &LogRec{
- DevID: log.s.id,
- GNum: GenID(1),
- LSN: LSN(i),
- ObjID: id,
- CurVers: curvers,
- Parents: parvers,
- Value: LogValue{},
- }
- // Verify that the log record has the right values.
- if !reflect.DeepEqual(rec, temprec) {
- t.Errorf("Data mismtach in log record %v instead of %v", rec, temprec)
- }
-
- // Insert the new log record into the log.
- _, err = log.putLogRec(rec)
- if err != nil {
- t.Errorf("Cannot put log record:: failed with err %v", err)
- }
-
- parvers = []raw.Version{curvers}
- }
-
- if err = log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-
- log, err = openILog(logfile, s)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- if log.head.Curgen != 1 || log.head.Curlsn != num {
- t.Errorf("Data mismatch in log header %v", log.head)
- }
-
- g, err := log.createLocalGeneration()
- if g != 1 || err != nil {
- t.Errorf("Could not create local generation gen %d with error %v", g, err)
- }
- curVal, err := log.getGenMetadata(log.s.id, g)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d in log file %s, err %v",
- log.s.id, g, logfile, err)
- }
- expVal := &genMetadata{Pos: 0, Count: 10, MaxLSN: 9}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for object %s:%d in log file %s: %v instead of %v",
- log.s.id, g, logfile, curVal, expVal)
- }
- if log.head.Curgen != 2 || log.head.Curlsn != 0 || log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", log.head)
- }
-
- if g, err := log.createLocalGeneration(); err != errNoUpdates {
- t.Errorf("Should not find local updates gen %d with error %v", g, err)
- }
-
- // Populate one more generation.
- log.fillFakeLogRecords()
-
- g, err = log.createLocalGeneration()
- if g != 2 || err != nil {
- t.Errorf("Could not create local generation gen %d with error %v", g, err)
- }
-
- // Create a remote generation.
- expGen := &genMetadata{Count: 1, MaxLSN: 50}
- if err = log.createRemoteGeneration("VeyronPhone", 1, expGen); err == nil {
- t.Errorf("Remote generation create should have failed", g, err)
- }
- expGen.MaxLSN = 0
- if err = log.createRemoteGeneration("VeyronPhone", 1, expGen); err != nil {
- t.Errorf("createRemoteGeneration failed with err %v", err)
- }
- if expGen.Pos != 2 {
- t.Errorf("createRemoteGeneration created incorrect log order %d", expGen.Pos)
- }
-
- if err = log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-
- // Reopen the log and ensure that all log records for generations exist.
- // Also ensure that generation metadata is accurate.
- log, err = openILog(logfile, s)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- for g := GenID(1); g < 3; g++ {
- // Check all log records.
- for i := LSN(0); i < 10; i++ {
- curRec, err := log.getLogRec(log.s.id, g, i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d:%d in log file %s, err %v",
- log.s.id, g, i, logfile, err)
- }
- }
- // Check generation metadata.
- curVal, err := log.getGenMetadata(log.s.id, g)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d in log file %s, err %v",
- log.s.id, g, logfile, err)
- }
- expVal.Pos = uint32(g - 1)
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for object %s:%d in log file %s: %v instead of %v",
- log.s.id, g, logfile, curVal, expVal)
- }
- }
-
- // Check remote generation metadata.
- curVal, err = log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file %s, err %v",
- logfile, err)
- }
- if !reflect.DeepEqual(expGen, curVal) {
- t.Errorf("Data mismatch for object in log file %s: %v instead of %v",
- logfile, curVal, expGen)
- }
- if log.head.Curgen != 3 || log.head.Curlsn != 0 || log.head.Curorder != 3 {
- t.Errorf("Data mismatch in log header %v", log.head)
- }
- if err = log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestProcessWatchRecord tests that local updates are correctly handled.
-// Commands are in file testdata/local-init-00.log.sync.
-func TestProcessWatchRecord(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- dagfile := getFileName()
- defer os.Remove(dagfile)
-
- var err error
- s := &syncd{id: "VeyronTab"}
- if s.dag, err = openDAG(dagfile); err != nil {
- t.Fatalf("Cannot open new dag file %s, err %v", logfile, err)
- }
- log, err := openILog(logfile, s)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- if _, err = logReplayCommands(log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
-
- if log.head.Curgen != 1 || log.head.Curlsn != 3 || log.head.Curorder != 0 {
- t.Errorf("Data mismatch in log header %v", log.head)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
-
- // Check all log records.
- for i := LSN(0); i < 3; i++ {
- curRec, err := log.getLogRec(log.s.id, GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d:%d in log file %s, err %v",
- log.s.id, 0, i, logfile, err)
- }
-
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- }
-
- // Verify DAG state.
- if head, err := log.s.dag.getHead(objid); err != nil || head != 3 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-
- s.dag.flush()
- s.dag.close()
- if err = log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
-
-// TestILogCompact tests compacting of ilog's kvdb file.
-func TestILogCompact(t *testing.T) {
- logfile := getFileName()
- defer os.Remove(logfile)
-
- s := &syncd{id: "VeyronTab"}
- log, err := openILog(logfile, s)
- if err != nil {
- t.Fatalf("Cannot open new log file %s, err %v", logfile, err)
- }
-
- // Put some data in "records" table.
- log.fillFakeLogRecords()
- if err := log.flush(); err != nil {
- t.Errorf("Cannot flush ILog file %s, err %v", logfile, err)
- }
-
- // Put some data in "gens" table.
- for i := uint32(0); i < 10; i++ {
- val := &genMetadata{Pos: i, Count: uint64(i + 100)}
- if err := log.putGenMetadata(s.id, GenID(i+10), val); err != nil {
- t.Errorf("Cannot put object %s:%d in log file %s, err %v", s.id, i, logfile, err)
- }
- // Flush immediately to let the kvdb file grow.
- if err := log.flush(); err != nil {
- t.Errorf("Cannot flush ILog file %s, err %v", logfile, err)
- }
- }
-
- // Put some data in "header" table.
- log.head = &iLogHeader{
- Curgen: 1000,
- Curlsn: 10,
- Curorder: 100,
- }
- if err := log.flush(); err != nil {
- t.Errorf("Cannot flush ILog file %s, err %v", logfile, err)
- }
-
- // Get size before compaction.
- oldSize := getFileSize(logfile)
- if oldSize < 0 {
- t.Fatalf("Log file %s not created", logfile)
- }
-
- if err := log.compact(); err != nil {
- t.Errorf("Cannot compact ILog file %s, err %v", logfile, err)
- }
-
- // Verify size of kvdb file is reduced.
- size := getFileSize(logfile)
- if size < 0 {
- t.Fatalf("ILog file %s not created", logfile)
- }
- if size > oldSize {
- t.Fatalf("ILog file %s not compacted", logfile)
- }
-
- // Check data exists after compaction.
- for i := LSN(0); i < 10; i++ {
- curRec, err := log.getLogRec(log.s.id, GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:1:%d in log file %s, err %v",
- log.s.id, i, logfile, err)
- }
- if curRec.CurVers != raw.Version(i) {
- t.Errorf("Data mismatch for logrec %s:1:%d in log file %s: %v",
- log.s.id, i, logfile, curRec)
- }
- }
-
- for i := uint32(0); i < 10; i++ {
- curVal, err := log.getGenMetadata(log.s.id, GenID(i+10))
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object %s:%d in log file %s, err %v",
- log.s.id, i, logfile, err)
- }
- if curVal.Pos != i || curVal.Count != uint64(i+100) {
- t.Errorf("Data mismatch for object %s:%d in log file %s: %v",
- log.s.id, i, logfile, curVal)
- }
- }
-
- log.head.Curgen = 0
- log.head.Curlsn = 0
- log.head.Curorder = 0
-
- if err := log.getHead(); err != nil {
- t.Fatalf("getHead() can not find head in log file %s, err %v", logfile, err)
- }
-
- if log.head.Curgen != 1000 && log.head.Curlsn != 10 && log.head.Curorder != 100 {
- t.Errorf("Data mismatch for head in log file %s: %v", logfile, log.head)
- }
-
- if err := log.close(); err != nil {
- t.Errorf("Cannot close log file %s, err %v", logfile, err)
- }
-}
diff --git a/runtimes/google/vsync/initiator.go b/runtimes/google/vsync/initiator.go
deleted file mode 100644
index b1d39e0..0000000
--- a/runtimes/google/vsync/initiator.go
+++ /dev/null
@@ -1,736 +0,0 @@
-package vsync
-
-import (
- "errors"
- "fmt"
- "math/rand"
- "strings"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/context"
- "veyron2/naming"
- "veyron2/rt"
- "veyron2/storage"
- "veyron2/vlog"
-)
-
-// Policies to pick a peer to sync with.
-const (
- // Picks a peer at random from the available set.
- selectRandom = iota
-
- // TODO(hpucha): implement other policies.
- // Picks a peer with most differing generations.
- selectMostDiff
-
- // Picks a peer that was synced with the furthest in the past.
- selectOldest
-)
-
-// Policies for conflict resolution.
-const (
- // Resolves conflicts by picking the mutation with the most recent timestamp.
- useTime = iota
-
- // TODO(hpucha): implement other policies.
- // Resolves conflicts by using the app conflict resolver callbacks via store.
- useCallback
-)
-
-var (
- // peerSyncInterval is the duration between two consecutive
- // sync events. In every sync event, the initiator contacts
- // one of its peers to obtain any pending updates.
- peerSyncInterval = 50 * time.Millisecond
-
- // peerSelectionPolicy is the policy used to select a peer when
- // the initiator gets a chance to sync.
- peerSelectionPolicy = selectRandom
-
- // conflictResolutionPolicy is the policy used to resolve conflicts.
- conflictResolutionPolicy = useTime
-
- errNoUsefulPeer = errors.New("no useful peer to contact")
-)
-
-// syncInitiator contains the metadata and state for the initiator thread.
-type syncInitiator struct {
- syncd *syncd
-
- // State to contact peers periodically and get deltas.
- // TODO(hpucha): This is an initial version with command line arguments.
- // Next steps are to tie this up into mount table and auto-discover neighbors.
- neighbors []string
- neighborIDs []string
-
- updObjects map[storage.ID]*objConflictState
-}
-
-// objConflictState contains the conflict state for objects that are
-// updated during an initiator run.
-type objConflictState struct {
- isConflict bool
- newHead raw.Version
- oldHead raw.Version
- ancestor raw.Version
- resolvVal *LogValue
-}
-
-// newInitiator creates a new initiator instance attached to the given syncd instance.
-func newInitiator(syncd *syncd, peerEndpoints, peerDeviceIDs string, syncTick time.Duration) *syncInitiator {
- i := &syncInitiator{syncd: syncd,
- updObjects: make(map[storage.ID]*objConflictState),
- }
-
- // Bootstrap my peer list.
- if peerEndpoints != "" || peerDeviceIDs != "" {
- i.neighbors = strings.Split(peerEndpoints, ",")
- i.neighborIDs = strings.Split(peerDeviceIDs, ",")
- if len(i.neighbors) != len(i.neighborIDs) {
- vlog.Fatalf("newInitiator: Mismatch between number of endpoints and IDs")
- }
-
- // Neighbor IDs must be distinct and different from my ID.
- neighborIDs := make(map[string]struct{})
- for _, nID := range i.neighborIDs {
- if DeviceID(nID) == i.syncd.id {
- vlog.Fatalf("newInitiator: neighboor ID %v cannot be the same as my ID %v", nID, i.syncd.id)
- }
- if _, ok := neighborIDs[nID]; ok {
- vlog.Fatalf("newInitiator: neighboor ID %v is duplicated", nID)
- }
- neighborIDs[nID] = struct{}{}
- }
- }
-
- // Override the default peerSyncInterval value if syncTick is specified.
- if syncTick > 0 {
- peerSyncInterval = syncTick
- }
-
- vlog.VI(1).Infof("newInitiator: My device ID: %s", i.syncd.id)
- vlog.VI(1).Infof("newInitiator: Peer endpoints: %v", i.neighbors)
- vlog.VI(1).Infof("newInitiator: Peer IDs: %v", i.neighborIDs)
- vlog.VI(1).Infof("newInitiator: Sync interval: %v", peerSyncInterval)
-
- return i
-}
-
-// contactPeers wakes up every peerSyncInterval to contact peers and get deltas from them.
-func (i *syncInitiator) contactPeers() {
- ticker := time.NewTicker(peerSyncInterval)
- for {
- select {
- case <-i.syncd.closed:
- ticker.Stop()
- i.syncd.pending.Done()
- return
- case <-ticker.C:
- }
-
- id, ep, err := i.pickPeer()
- if err != nil {
- continue
- }
-
- // Freeze the most recent batch of local changes
- // before fetching remote changes from a peer.
- //
- // We only allow an initiator to create new local
- // generations (not responders/watcher) in order to
- // maintain a static baseline for the duration of a
- // sync. This addresses the following race condition:
- // If we allow responders to create new local
- // generations while the initiator is in progress,
- // they may beat the initiator and send these new
- // generations to remote devices. These remote
- // devices in turn can send these generations back to
- // the initiator in progress which was started with
- // older generation information.
- local, err := i.updateLocalGeneration()
- if err != nil {
- vlog.Fatalf("contactPeers:: error updating local generation: err %v", err)
- }
-
- i.getDeltasFromPeer(id, ep, local)
- }
-}
-
-// pickPeer picks a sync endpoint in the neighborhood to sync with.
-func (i *syncInitiator) pickPeer() (string, string, error) {
- switch peerSelectionPolicy {
- case selectRandom:
- // Pick a neighbor at random.
- if i.neighbors == nil {
- return "", "", errNoUsefulPeer
- }
- ind := rand.Intn(len(i.neighbors))
- return i.neighborIDs[ind], i.neighbors[ind], nil
- default:
- return "", "", fmt.Errorf("unknown peer selection policy")
- }
-}
-
-// updateLocalGeneration creates a new local generation if needed and
-// returns the newest local generation vector.
-func (i *syncInitiator) updateLocalGeneration() (GenVector, error) {
- // TODO(hpucha): Eliminate reaching into syncd's lock.
- i.syncd.lock.Lock()
- defer i.syncd.lock.Unlock()
-
- // Create a new local generation if there are any local updates.
- gen, err := i.syncd.log.createLocalGeneration()
- if err == errNoUpdates {
- vlog.VI(1).Infof("createLocalGeneration:: No new updates. Local at %d", gen)
- return i.syncd.devtab.getGenVec(i.syncd.id)
- }
- if err != nil {
- return GenVector{}, err
- }
-
- vlog.VI(2).Infof("updateLocalGeneration:: created gen %d", gen)
- // Update local generation vector in devTable.
- if err = i.syncd.devtab.updateGeneration(i.syncd.id, i.syncd.id, gen); err != nil {
- return GenVector{}, err
- }
- return i.syncd.devtab.getGenVec(i.syncd.id)
-}
-
-// getDeltasFromPeer contacts the specified endpoint to obtain deltas wrt its current generation vector.
-func (i *syncInitiator) getDeltasFromPeer(dID, ep string, local GenVector) {
- ctx, cancel := rt.R().NewContext().WithTimeout(time.Minute)
- defer cancel()
-
- vlog.VI(1).Infof("GetDeltasFromPeer:: From server %s with DeviceID %s at %v", ep, dID, time.Now().UTC())
-
- // Construct a new stub that binds to peer endpoint.
- c, err := BindSync(naming.JoinAddressName(ep, "sync"))
- if err != nil {
- vlog.Errorf("GetDeltasFromPeer:: error binding to server: err %v", err)
- return
- }
-
- vlog.VI(1).Infof("GetDeltasFromPeer:: Sending local information: %v", local)
-
- // Issue a GetDeltas() rpc.
- stream, err := c.GetDeltas(ctx, local, i.syncd.id)
- if err != nil {
- vlog.Errorf("GetDeltasFromPeer:: error getting deltas: err %v", err)
- return
- }
-
- minGens, err := i.processLogStream(stream)
- if err != nil {
- vlog.Fatalf("GetDeltasFromPeer:: error processing logs: err %v", err)
- }
-
- remote, err := stream.Finish()
- if err != nil {
- vlog.Fatalf("GetDeltasFromPeer:: finish failed with err %v", err)
- }
-
- if err := i.processUpdatedObjects(ctx, local, minGens, remote, DeviceID(dID)); err != nil {
- vlog.Fatalf("GetDeltasFromPeer:: error processing objects: err %v", err)
- }
-
- vlog.VI(1).Infof("GetDeltasFromPeer:: Local vector %v", local)
- vlog.VI(1).Infof("GetDeltasFromPeer:: Remote vector %v", remote)
-}
-
-// processLogStream replays an entire log stream spanning multiple
-// generations from different devices received from a single GetDeltas
-// call. It does not perform any conflict resolution during replay.
-// This avoids resolving conflicts that have already been resolved by
-// other devices.
-func (i *syncInitiator) processLogStream(stream SyncGetDeltasCall) (GenVector, error) {
- // Map to track new generations received in the Call reply.
- // TODO(hpucha): If needed, this can be optimized under the
- // assumption that an entire generation is received
- // sequentially. We can then parse a generation at a time.
- newGens := make(map[string]*genMetadata)
- // Array to track order of arrival for the generations.
- // We need to preserve this order.
- var orderGens []string
- // Compute the minimum generation for every device in this set.
- minGens := GenVector{}
-
- curTx := NoTxID
- rStream := stream.RecvStream()
- for rStream.Advance() {
- rec := rStream.Value()
-
- // Begin a new transaction if needed.
- if curTx == NoTxID && rec.Value.Continued {
- curTx = i.syncd.dag.addNodeTxStart()
- vlog.VI(2).Infof("processLogStream:: Begin Tx %v", curTx)
- }
-
- if err := i.insertRecInLogAndDag(&rec, curTx); err != nil {
- return GenVector{}, err
- }
-
- // End the previous transaction if any.
- if curTx != NoTxID && !rec.Value.Continued {
- if err := i.syncd.dag.addNodeTxEnd(curTx); err != nil {
- return GenVector{}, err
- }
- vlog.VI(2).Infof("processLogStream:: End Tx %v", curTx)
- curTx = NoTxID
- }
-
- // Mark object dirty.
- i.updObjects[rec.ObjID] = &objConflictState{}
-
- // Populate the generation metadata.
- genKey := generationKey(rec.DevID, rec.GNum)
- if gen, ok := newGens[genKey]; !ok {
- // New generation in the stream.
- orderGens = append(orderGens, genKey)
- newGens[genKey] = &genMetadata{
- Count: 1,
- MaxLSN: rec.LSN,
- }
- g, ok := minGens[rec.DevID]
- if !ok || g > rec.GNum {
- minGens[rec.DevID] = rec.GNum
- }
- } else {
- gen.Count++
- if rec.LSN > gen.MaxLSN {
- gen.MaxLSN = rec.LSN
- }
- }
- }
-
- if err := rStream.Err(); err != nil {
- return GenVector{}, err
- }
- if curTx != NoTxID {
- return GenVector{}, fmt.Errorf("incomplete transaction in a generation")
- }
- if err := i.createGenMetadataBatch(newGens, orderGens); err != nil {
- return GenVector{}, err
- }
-
- return minGens, nil
-}
-
-// insertLogAndDag adds a new log record to log and dag data structures.
-func (i *syncInitiator) insertRecInLogAndDag(rec *LogRec, txID TxID) error {
- // TODO(hpucha): Eliminate reaching into syncd's lock.
- i.syncd.lock.Lock()
- defer i.syncd.lock.Unlock()
-
- logKey, err := i.syncd.log.putLogRec(rec)
- if err != nil {
- return err
- }
-
- vlog.VI(2).Infof("insertRecInLogAndDag:: Adding log record %v, Tx %v", rec, txID)
- switch rec.RecType {
- case NodeRec:
- return i.syncd.dag.addNode(rec.ObjID, rec.CurVers, true, rec.Value.Delete, rec.Parents, logKey, txID)
- case LinkRec:
- return i.syncd.dag.addParent(rec.ObjID, rec.CurVers, rec.Parents[0], true)
- default:
- return fmt.Errorf("unknown log record type")
- }
-}
-
-// createGenMetadataBatch inserts a batch of generations into the log.
-func (i *syncInitiator) createGenMetadataBatch(newGens map[string]*genMetadata, orderGens []string) error {
- // TODO(hpucha): Eliminate reaching into syncd's lock.
- i.syncd.lock.Lock()
- defer i.syncd.lock.Unlock()
-
- for _, key := range orderGens {
- gen := newGens[key]
- // Insert the generation metadata.
- dev, gnum, err := splitGenerationKey(key)
- if err != nil {
- return err
- }
- if err := i.syncd.log.createRemoteGeneration(dev, gnum, gen); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// processUpdatedObjects processes all the updates received by the
-// initiator, one object at a time. For each updated object, we first
-// check if the object has any conflicts, resulting in three
-// possibilities:
-//
-// * There is no conflict, and no updates are needed to the store
-// (isConflict=false, newHead == oldHead). All changes received convey
-// information that still keeps the local head as the most recent
-// version. This occurs when conflicts are resolved by blessings.
-//
-// * There is no conflict, but a remote version is discovered that
-// builds on the local head (isConflict=false, newHead != oldHead). In
-// this case, we generate a store mutation to simply update the store
-// to the latest value.
-//
-// * There is a conflict and we call into the app or the system to
-// resolve the conflict, resulting in three possibilties: (a) conflict
-// was resolved by blessing the local version. In this case, store
-// need not be updated, but a link is added to record the
-// blessing. (b) conflict was resolved by blessing the remote
-// version. In this case, store is updated with the remote version and
-// a link is added as well. (c) conflict was resolved by generating a
-// new store mutation. In this case, store is updated with the new
-// version.
-//
-// We then put all these mutations in the store. If the put succeeds,
-// we update the log and dag state suitably (move the head ptr of the
-// object in the dag to the latest version, and create a new log
-// record reflecting conflict resolution if any). Puts to store can
-// fail since preconditions on the objects may have been violated. In
-// this case, we wait to get the latest versions of objects from the
-// store, and recheck if the object has any conflicts and repeat the
-// above steps, until put to store succeeds.
-func (i *syncInitiator) processUpdatedObjects(ctx context.T, local, minGens, remote GenVector, dID DeviceID) error {
- for {
- if err := i.detectConflicts(); err != nil {
- return err
- }
-
- if err := i.resolveConflicts(); err != nil {
- return err
- }
-
- err := i.updateStoreAndSync(ctx, local, minGens, remote, dID)
- if err == nil {
- break
- }
-
- vlog.Errorf("PutMutations failed %v. Will retry", err)
- // TODO(hpucha): Sleeping and retrying is a temporary
- // solution. Next iteration will have coordination
- // with watch thread to intelligently retry. Hence
- // this value is not a config param.
- time.Sleep(1 * time.Second)
- }
-
- // Remove any pending state.
- i.updObjects = make(map[storage.ID]*objConflictState)
- i.syncd.dag.clearGraft()
- return nil
-}
-
-// detectConflicts iterates through all the updated objects to detect
-// conflicts.
-func (i *syncInitiator) detectConflicts() error {
- // TODO(hpucha): Eliminate reaching into syncd's lock.
- i.syncd.lock.RLock()
- defer i.syncd.lock.RUnlock()
-
- for obj, st := range i.updObjects {
- // Check if object has a conflict.
- var err error
- st.isConflict, st.newHead, st.oldHead, st.ancestor, err = i.syncd.dag.hasConflict(obj)
- vlog.VI(2).Infof("detectConflicts:: object %v state %v err %v",
- obj, st, err)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// resolveConflicts resolves conflicts for updated objects. Conflicts
-// may be resolved by adding new versions or blessing either the local
-// or the remote version.
-func (i *syncInitiator) resolveConflicts() error {
- switch conflictResolutionPolicy {
- case useTime:
- if err := i.resolveConflictsByTime(); err != nil {
- return err
- }
- default:
- return fmt.Errorf("unknown conflict resolution policy")
- }
- return nil
-}
-
-// resolveConflictsByTime resolves conflicts using the timestamps
-// of the conflicting mutations. It picks a mutation with the larger
-// timestamp, i.e. the most recent update. If the timestamps are equal,
-// it uses the mutation version numbers as a tie-breaker, picking the
-// mutation with the larger version.
-//
-// TODO(hpucha): Based on a few more policies, reconsider nesting
-// order of the conflict resolution loop and switch-on-policy.
-func (i *syncInitiator) resolveConflictsByTime() error {
- for obj, st := range i.updObjects {
- if !st.isConflict {
- continue
- }
-
- versions := make([]raw.Version, 3)
- versions[0] = st.oldHead
- versions[1] = st.newHead
- versions[2] = st.ancestor
-
- lrecs, err := i.getLogRecsBatch(obj, versions)
- if err != nil {
- return err
- }
-
- res := 0
- switch {
- case lrecs[0].Value.SyncTime > lrecs[1].Value.SyncTime:
- res = 0
- case lrecs[0].Value.SyncTime < lrecs[1].Value.SyncTime:
- res = 1
- case lrecs[0].Value.Mutation.Version > lrecs[1].Value.Mutation.Version:
- res = 0
- case lrecs[0].Value.Mutation.Version < lrecs[1].Value.Mutation.Version:
- res = 1
- }
-
- // Instead of creating a new version that resolves the
- // conflict, we are blessing an existing version as
- // the conflict resolution.
- st.resolvVal = &lrecs[res].Value
- }
-
- return nil
-}
-
-// getLogRecsBatch gets the log records for an array of versions.
-func (i *syncInitiator) getLogRecsBatch(obj storage.ID, versions []raw.Version) ([]*LogRec, error) {
- // TODO(hpucha): Eliminate reaching into syncd's lock.
- i.syncd.lock.RLock()
- defer i.syncd.lock.RUnlock()
-
- lrecs := make([]*LogRec, len(versions))
- var err error
- for p, v := range versions {
- lrecs[p], err = i.getLogRec(obj, v)
- if err != nil {
- return nil, err
- }
- }
- return lrecs, nil
-}
-
-// updateStoreAndSync updates the store, and if that is successful,
-// updates log and dag data structures.
-func (i *syncInitiator) updateStoreAndSync(ctx context.T, local, minGens, remote GenVector, dID DeviceID) error {
- // TODO(hpucha): Eliminate reaching into syncd's lock.
- i.syncd.lock.Lock()
- defer i.syncd.lock.Unlock()
-
- var m []raw.Mutation
- for obj, st := range i.updObjects {
- if !st.isConflict {
- rec, err := i.getLogRec(obj, st.newHead)
- if err != nil {
- return err
- }
- st.resolvVal = &rec.Value
- // Sanity check.
- if st.resolvVal.Mutation.Version != st.newHead {
- return fmt.Errorf("bad mutation %d %d",
- st.resolvVal.Mutation.Version, st.newHead)
- }
- }
-
- // If the local version is picked, no further updates
- // to the store are needed. If the remote version is
- // picked, we put it in the store.
- if st.resolvVal.Mutation.Version != st.oldHead {
- st.resolvVal.Mutation.PriorVersion = st.oldHead
-
- // Convert resolvVal.Mutation into a mutation for the Store.
- stMutation, err := i.storeMutation(obj, st.resolvVal)
- if err != nil {
- return err
- }
-
- vlog.VI(2).Infof("updateStoreAndSync:: Try to append mutation %v (%v) for obj %v (nh %v, oh %v)",
- st.resolvVal.Mutation, stMutation, obj, st.newHead, st.oldHead)
-
- // Append to mutations, skipping a delete following a delete mutation.
- if stMutation.Version != raw.NoVersion ||
- stMutation.PriorVersion != raw.NoVersion {
- vlog.VI(2).Infof("updateStoreAndSync:: appending mutation %v for obj %v",
- stMutation, obj)
- m = append(m, stMutation)
- }
- }
- }
-
- // TODO(hpucha): We will hold the lock across PutMutations rpc
- // to prevent a race with watcher. The next iteration will
- // clean up this coordination.
- if store := i.syncd.store; store != nil && len(m) > 0 {
- stream, err := store.PutMutations(ctx)
- if err != nil {
- vlog.Errorf("updateStoreAndSync:: putmutations err %v", err)
- return err
- }
- sender := stream.SendStream()
- for i := range m {
- if err := sender.Send(m[i]); err != nil {
- vlog.Errorf("updateStoreAndSync:: send err %v", err)
- return err
- }
- }
- if err := sender.Close(); err != nil {
- vlog.Errorf("updateStoreAndSync:: closesend err %v", err)
- return err
- }
- if err := stream.Finish(); err != nil {
- vlog.Errorf("updateStoreAndSync:: finish err %v", err)
- return err
- }
- }
-
- vlog.VI(2).Infof("updateStoreAndSync:: putmutations succeeded")
- if err := i.updateLogAndDag(); err != nil {
- return err
- }
-
- if err := i.updateGenVecs(local, minGens, remote, DeviceID(dID)); err != nil {
- return err
- }
-
- return nil
-}
-
-// storeMutation converts a resolved mutation generated by syncd to
-// one that can be sent to the store. To send to the store, it
-// converts the version numbers corresponding to object deletions to
-// raw.NoVersion when required.
-func (i *syncInitiator) storeMutation(obj storage.ID, resolvVal *LogValue) (raw.Mutation, error) {
- curDelete := resolvVal.Delete
- priorDelete := false
- if resolvVal.Mutation.PriorVersion != raw.NoVersion {
- oldRec, err := i.getLogRec(obj, resolvVal.Mutation.PriorVersion)
- if err != nil {
- return raw.Mutation{}, err
- }
- priorDelete = oldRec.Value.Delete
- }
-
- // Current version and prior versions are not deletes.
- if !curDelete && !priorDelete {
- return resolvVal.Mutation, nil
- }
-
- // Creating a new copy of the mutation to adjust version
- // numbers when handling deletions.
- stMutation := resolvVal.Mutation
- // Adjust the current version if this a deletion.
- if curDelete {
- stMutation.Version = raw.NoVersion
- }
- // Adjust the prior version if it is a deletion.
- if priorDelete {
- stMutation.PriorVersion = raw.NoVersion
- }
-
- return stMutation, nil
-}
-
-// getLogRec returns the log record corresponding to a given object and its version.
-func (i *syncInitiator) getLogRec(obj storage.ID, vers raw.Version) (*LogRec, error) {
- logKey, err := i.syncd.dag.getLogrec(obj, vers)
- if err != nil {
- return nil, err
- }
- dev, gen, lsn, err := splitLogRecKey(logKey)
- if err != nil {
- return nil, err
- }
- rec, err := i.syncd.log.getLogRec(dev, gen, lsn)
- if err != nil {
- return nil, err
- }
- return rec, nil
-}
-
-// updateLogAndDag updates the log and dag data structures on a successful store put.
-func (i *syncInitiator) updateLogAndDag() error {
- for obj, st := range i.updObjects {
- if st.isConflict {
- // Object had a conflict, which was resolved successfully.
- // Put is successful, create a log record.
- var err error
- var rec *LogRec
-
- switch {
- case st.resolvVal.Mutation.Version == st.oldHead:
- // Local version was blessed as the conflict resolution.
- rec, err = i.syncd.log.createLocalLinkLogRec(obj, st.oldHead, st.newHead)
- case st.resolvVal.Mutation.Version == st.newHead:
- // Remote version was blessed as the conflict resolution.
- rec, err = i.syncd.log.createLocalLinkLogRec(obj, st.newHead, st.oldHead)
- default:
- // New version was created to resolve the conflict.
- parents := []raw.Version{st.newHead, st.oldHead}
- rec, err = i.syncd.log.createLocalLogRec(obj, st.resolvVal.Mutation.Version, parents, st.resolvVal)
-
- }
- if err != nil {
- return err
- }
- logKey, err := i.syncd.log.putLogRec(rec)
- if err != nil {
- return err
- }
- // Add a new DAG node.
- switch rec.RecType {
- case NodeRec:
- // TODO(hpucha): addNode operations arising out of conflict resolution
- // may need to be part of a transaction when app-driven resolution
- // is introduced.
- err = i.syncd.dag.addNode(obj, rec.CurVers, false, rec.Value.Delete, rec.Parents, logKey, NoTxID)
- case LinkRec:
- err = i.syncd.dag.addParent(obj, rec.CurVers, rec.Parents[0], false)
- default:
- return fmt.Errorf("unknown log record type")
- }
- if err != nil {
- return err
- }
- }
-
- // Move the head. This should be idempotent. We may
- // move head to the local head in some cases.
- if err := i.syncd.dag.moveHead(obj, st.resolvVal.Mutation.Version); err != nil {
- return err
- }
- }
- return nil
-}
-
-// updateGenVecs updates local, reclaim and remote vectors at the end of an initiator cycle.
-func (i *syncInitiator) updateGenVecs(local, minGens, remote GenVector, dID DeviceID) error {
- // Update the local gen vector and put it in kvdb only if we have new updates.
- if len(i.updObjects) > 0 {
- if err := i.syncd.devtab.updateLocalGenVector(local, remote); err != nil {
- return err
- }
-
- if err := i.syncd.devtab.putGenVec(i.syncd.id, local); err != nil {
- return err
- }
-
- if err := i.syncd.devtab.updateReclaimVec(minGens); err != nil {
- return err
- }
- }
-
- // Cache the remote generation vector for space reclamation.
- if err := i.syncd.devtab.putGenVec(dID, remote); err != nil {
- return err
- }
- return nil
-}
diff --git a/runtimes/google/vsync/initiator_test.go b/runtimes/google/vsync/initiator_test.go
deleted file mode 100644
index d2668e5..0000000
--- a/runtimes/google/vsync/initiator_test.go
+++ /dev/null
@@ -1,1330 +0,0 @@
-package vsync
-
-// Tests for sync initiator.
-import (
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// TestGetLogRec tests getting a log record from kvdb based on object id and version.
-func TestGetLogRec(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- s.lock.Lock()
- defer s.lock.Unlock()
- defer s.Close()
- defer os.RemoveAll(dir)
-
- // Create some data.
- objID := storage.NewID()
- expRec := &LogRec{
- DevID: "VeyronTab",
- GNum: 50,
- LSN: 100,
- ObjID: objID,
- CurVers: 20,
- Value: LogValue{Mutation: raw.Mutation{Version: 20}},
- }
- if _, err := s.hdlInitiator.getLogRec(objID, expRec.CurVers); err == nil {
- t.Errorf("GetLogRec didn't fail")
- }
- logKey, err := s.log.putLogRec(expRec)
- if err != nil {
- t.Errorf("PutLogRec failed with err %v", err)
- }
- if _, err := s.hdlInitiator.getLogRec(objID, expRec.CurVers); err == nil {
- t.Errorf("GetLogRec didn't fail")
- }
- if err = s.dag.addNode(objID, expRec.CurVers, false, false, expRec.Parents, logKey, NoTxID); err != nil {
- t.Errorf("AddNode failed with err %v", err)
- }
- curRec, err := s.hdlInitiator.getLogRec(objID, expRec.CurVers)
- if err != nil {
- t.Errorf("GetLogRec failed with err %v", err)
- }
- if !reflect.DeepEqual(curRec, expRec) {
- t.Errorf("Data mismatch for %v instead of %v",
- curRec, expRec)
- }
-}
-
-// TestResolveConflictByTime tests the timestamp-based conflict resolution policy.
-func TestResolveConflictByTime(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- objID := storage.NewID()
- s.hdlInitiator.updObjects[objID] = &objConflictState{
- isConflict: true,
- oldHead: 40,
- newHead: 20,
- ancestor: 10,
- }
- versions := []raw.Version{10, 40, 20}
- for _, v := range versions {
- expRec := &LogRec{
- DevID: "VeyronTab",
- GNum: GenID(50 + v),
- LSN: LSN(100 + v),
- ObjID: objID,
- CurVers: v,
- Value: LogValue{Mutation: raw.Mutation{Version: v, PriorVersion: 500 + v}, SyncTime: int64(v)},
- }
- logKey, err := s.log.putLogRec(expRec)
- if err != nil {
- t.Errorf("PutLogRec failed with err %v", err)
- }
- if err = s.dag.addNode(objID, expRec.CurVers, false, false, expRec.Parents, logKey, NoTxID); err != nil {
- t.Errorf("AddNode failed with err %v", err)
- }
- }
-
- if err := s.hdlInitiator.resolveConflictsByTime(); err != nil {
- t.Errorf("ResolveConflictsByTime failed with err %v", err)
- }
- if s.hdlInitiator.updObjects[objID].resolvVal.Mutation.PriorVersion != 540 {
- t.Errorf("Data mismatch for resolution %v", s.hdlInitiator.updObjects[objID].resolvVal)
- }
-}
-
-// TODO(hpucha): Add more tests around retrying failed puts in the next pass (processUpdatedObjects).
-// TestLogStreamRemoteOnly tests processing of a remote log stream. Commands are in file
-// testdata/remote-init-00.log.sync.
-func TestLogStreamRemoteOnly(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- stream, err := createReplayStream("remote-init-00.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 3, MaxLSN: 2}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- // Check all log records.
- for i := LSN(0); i < 3; i++ {
- curRec, err := s.log.getLogRec("VeyronPhone", GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %d in log file err %v",
- i, err)
- }
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if _, err := s.dag.getNode(objid, raw.Version(i+1)); err != nil {
- t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
- }
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 3 || st.oldHead != raw.NoVersion {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.PriorVersion != raw.NoVersion || st.resolvVal.Mutation.Version != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 0 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 3 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestLogStreamGCedRemote tests that a remote log stream can be
-// correctly applied when its generations don't start at 1 and have
-// been GC'ed already. Commands are in file
-// testdata/remote-init-01.log.sync.
-func TestLogStreamGCedRemote(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- stream, err := createReplayStream("remote-init-01.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 5}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronPhone", 5)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 3, MaxLSN: 2}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- // Check all log records.
- for i := LSN(0); i < 3; i++ {
- curRec, err := s.log.getLogRec("VeyronPhone", GenID(5), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %d in log file err %v",
- i, err)
- }
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if _, err := s.dag.getNode(objid, raw.Version(i+1)); err != nil {
- t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
- }
- }
-
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 3 || st.oldHead != raw.NoVersion {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.PriorVersion != raw.NoVersion || st.resolvVal.Mutation.Version != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 0 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 3 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestLogStreamRemoteWithTx tests processing of a remote log stream
-// that contains transactions. Commands are in file
-// testdata/remote-init-02.log.sync.
-func TestLogStreamRemoteWithTx(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- stream, err := createReplayStream("remote-init-02.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1, "VeyronTab": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Verify transaction state.
- objs := []string{"123", "456", "789"}
- objids := make([]storage.ID, 3)
- maxVers := []raw.Version{3, 2, 4}
- txVers := map[string]struct{}{
- "123-2": struct{}{},
- "123-3": struct{}{},
- "456-1": struct{}{},
- "456-2": struct{}{},
- "789-1": struct{}{},
- }
- for pos, o := range objs {
- var err error
- objids[pos], err = strToObjID(o)
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- for i := raw.Version(1); i <= raw.Version(maxVers[pos]); i++ {
- node, err := s.dag.getNode(objids[pos], i)
- if err != nil {
- t.Errorf("cannot find dag node for object %d %v: %s", objids[pos], i, err)
- }
- key := fmt.Sprintf("%s-%d", objs[pos], i)
- _, ok := txVers[key]
- if !ok && node.TxID != NoTxID {
- t.Errorf("expecting NoTxID, found txid %v for object %d:%v", node.TxID, objids[pos], i)
- }
- if ok && node.TxID == NoTxID {
- t.Errorf("expecting non nil txid for object %d:%v", objids[pos], i)
- }
- }
- }
-
- // Verify transaction state for the first transaction.
- node, err := s.dag.getNode(objids[0], raw.Version(2))
- if err != nil {
- t.Errorf("cannot find dag node for object %d v1: %s", objids[0], err)
- }
- if node.TxID == NoTxID {
- t.Errorf("expecting non nil txid for object %d:v1", objids[0])
- }
- txMap, err := s.dag.getTransaction(node.TxID)
- if err != nil {
- t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
- }
- expTxMap := dagTxMap{
- objids[0]: raw.Version(2),
- objids[1]: raw.Version(1),
- objids[2]: raw.Version(1),
- }
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
- node.TxID, txMap, expTxMap)
- }
-
- // Verify transaction state for the second transaction.
- node, err = s.dag.getNode(objids[0], raw.Version(3))
- if err != nil {
- t.Errorf("cannot find dag node for object %d v1: %s", objids[0], err)
- }
- if node.TxID == NoTxID {
- t.Errorf("expecting non nil txid for object %d:v1", objids[0])
- }
- txMap, err = s.dag.getTransaction(node.TxID)
- if err != nil {
- t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
- }
- expTxMap = dagTxMap{
- objids[0]: raw.Version(3),
- objids[1]: raw.Version(2),
- }
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
- node.TxID, txMap, expTxMap)
- }
-
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if len(s.hdlInitiator.updObjects) != 3 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
-}
-
-// TestLogStreamRemoteWithDel tests processing of a remote log stream
-// that contains object deletion. Commands are in file
-// testdata/remote-init-03.log.sync.
-func TestLogStreamRemoteWithDel(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- stream, err := createReplayStream("remote-init-03.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 3, MaxLSN: 2}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- // Check all log records.
- for i := LSN(0); i < 3; i++ {
- curRec, err := s.log.getLogRec("VeyronPhone", GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %d in log file err %v",
- i, err)
- }
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if _, err := s.dag.getNode(objid, raw.Version(i+1)); err != nil {
- t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
- }
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 3 || st.oldHead != raw.NoVersion {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.PriorVersion != raw.NoVersion || st.resolvVal.Mutation.Version != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- m, err := s.hdlInitiator.storeMutation(objid, st.resolvVal)
- if err != nil {
- t.Errorf("Could not translate mutation %v", err)
- }
- if m.Version != raw.NoVersion || m.PriorVersion != raw.NoVersion {
- t.Errorf("Data mismatch in mutation translation %v", m)
- }
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 0 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
-
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 3 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
- node, err := s.dag.getNode(objid, raw.Version(3))
- if err != nil {
- t.Errorf("cannot find dag node for object %d v3: %s", objid, err)
- }
- if !node.Deleted {
- t.Errorf("deleted node not found for object %d v3", objid)
- }
- if !s.dag.hasDeletedDescendant(objid, raw.Version(2)) {
- t.Errorf("link to deleted node not found for object %d from v2", objid)
- }
- if !s.dag.hasDeletedDescendant(objid, raw.Version(1)) {
- t.Errorf("link to deleted node not found for object %d from v1", objid)
- }
-}
-
-// TestLogStreamDel2Objs tests that a local and a remote log stream
-// can be correctly applied when there is local and a remote delete on
-// 2 different objects. Commands are in files
-// testdata/<local-init-01.log.sync,remote-2obj-del.log.sync>.
-func TestLogStreamDel2Objs(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-01.log.sync"); err != nil {
- t.Error(err)
- }
-
- stream, err := createReplayStream("remote-2obj-del.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file for VeyronPhone err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 4, MaxLSN: 3}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if len(s.hdlInitiator.updObjects) != 2 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
-
- objs := []string{"123", "456"}
- newHeads := []raw.Version{6, 2}
- conflicts := []bool{false, true}
- for pos, o := range objs {
- objid, err := strToObjID(o)
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
-
- st := s.hdlInitiator.updObjects[objid]
-
- if st.isConflict != conflicts[pos] {
- t.Errorf("Detected a wrong conflict %v", st)
- }
- if st.newHead != newHeads[pos] {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if pos == 1 {
- // Force a blessing to remote version for testing.
- st.resolvVal.Mutation.Version = st.newHead
- st.resolvVal.Mutation.PriorVersion = st.oldHead
- st.resolvVal.Delete = false
- }
- m, err := s.hdlInitiator.storeMutation(objid, st.resolvVal)
- if err != nil {
- t.Errorf("Could not translate mutation %v", err)
- }
-
- if pos == 0 {
- if st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed for obj123 %v", st)
- }
- if m.Version != raw.NoVersion || m.PriorVersion != 3 {
- t.Errorf("Data mismatch in mutation translation for obj123 %v", m)
- }
- // Test echo back from watch for these mutations.
- if err := s.log.processWatchRecord(objid, 0, raw.Version(3), &LogValue{}, NoTxID); err != nil {
- t.Errorf("Echo processing from watch failed %v", err)
- }
- }
-
- if pos == 1 {
- if st.oldHead == raw.NoVersion {
- t.Errorf("Conflict detection didn't succeed for obj456 %v", st)
- }
- if m.Version != 2 || m.PriorVersion != raw.NoVersion {
- t.Errorf("Data mismatch in mutation translation for obj456 %v", m)
- }
- // Test echo back from watch for these mutations.
- if err := s.log.processWatchRecord(objid, raw.Version(2), 0, &LogValue{}, NoTxID); err != nil {
- t.Errorf("Echo processing from watch failed %v", err)
- }
- }
- }
-
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 6 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
-}
-
-// TestLogStreamNoConflict tests that a local and a remote log stream
-// can be correctly applied (when there are no conflicts). Commands
-// are in files
-// testdata/<local-init-00.log.sync,remote-noconf-00.log.sync>.
-func TestLogStreamNoConflict(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
-
- stream, err := createReplayStream("remote-noconf-00.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file for VeyronPhone err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 3, MaxLSN: 2}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- // Check all log records.
- for _, devid := range []DeviceID{"VeyronPhone", "VeyronTab"} {
- v := raw.Version(1)
- for i := LSN(0); i < 3; i++ {
- curRec, err := s.log.getLogRec(devid, GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d in log file err %v",
- devid, i, err)
- }
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if _, err := s.dag.getNode(objid, v); err != nil {
- t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
- }
- v = v + 1
- }
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 6 || st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.PriorVersion != 3 || st.resolvVal.Mutation.Version != 6 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 3 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 6 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestLogStreamConflict tests that a local and a remote log stream
-// can be correctly applied (when there are conflicts). Commands are
-// in files testdata/<local-init-00.log.sync,remote-conf-00.log.sync>.
-func TestLogStreamConflict(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- conflictResolutionPolicy = useTime
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
-
- stream, err := createReplayStream("remote-conf-00.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file for VeyronPhone err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 3, MaxLSN: 2}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- lcount := []LSN{3, 4}
- // Check all log records.
- for index, devid := range []DeviceID{"VeyronPhone", "VeyronTab"} {
- v := raw.Version(1)
- for i := LSN(0); i < lcount[index]; i++ {
- curRec, err := s.log.getLogRec(devid, GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d in log file err %v",
- devid, i, err)
- }
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- if devid == "VeyronTab" && index == 3 && curRec.RecType != LinkRec {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if _, err := s.dag.getNode(objid, v); err != nil {
- t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
- }
- v = v + 1
- }
- }
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- st := s.hdlInitiator.updObjects[objid]
- if !st.isConflict {
- t.Errorf("Didn't detect a conflict %v", st)
- }
- if st.newHead != 6 || st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if st.resolvVal.Mutation.PriorVersion != 3 || st.resolvVal.Mutation.Version != 6 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // Curlsn == 4 for the log record that resolves conflict.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 4 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 6 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestMultipleLogStream tests that a local and 2 remote log streams
-// can be correctly applied (when there are conflicts). Commands are
-// in file testdata/<local-init-00.log.sync,remote-conf-01.log.sync>.
-func TestMultipleLogStream(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- conflictResolutionPolicy = useTime
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
-
- stream, err := createReplayStream("remote-conf-01.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
-
- // Check minGens.
- expVec := GenVector{"VeyronPhone": 1, "VeyronLaptop": 1}
- if !reflect.DeepEqual(expVec, minGens) {
- t.Errorf("Data mismatch for minGens: %v instead of %v",
- minGens, expVec)
- }
-
- // Check generation metadata.
- curVal, err := s.log.getGenMetadata("VeyronLaptop", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file for VeyronPhone err %v", err)
- }
- expVal := &genMetadata{Pos: 0, Count: 1, MaxLSN: 0}
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- curVal, err = s.log.getGenMetadata("VeyronPhone", 1)
- if err != nil || curVal == nil {
- t.Fatalf("GetGenMetadata() can not find object in log file for VeyronPhone err %v", err)
- }
- expVal.Pos = 1
- expVal.Count = 2
- expVal.MaxLSN = 1
- if !reflect.DeepEqual(expVal, curVal) {
- t.Errorf("Data mismatch for generation metadata: %v instead of %v",
- curVal, expVal)
- }
-
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
-
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- // Check all log records.
- lcount := []LSN{2, 4, 1}
- for index, devid := range []DeviceID{"VeyronPhone", "VeyronTab", "VeyronLaptop"} {
- v := raw.Version(1)
- for i := LSN(0); i < lcount[index]; i++ {
- curRec, err := s.log.getLogRec(devid, GenID(1), i)
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:%d in log file err %v",
- devid, i, err)
- }
- if curRec.ObjID != objid {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- if devid == "VeyronTab" && index == 3 && curRec.RecType != LinkRec {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if _, err := s.dag.getNode(objid, v); err != nil {
- t.Errorf("GetNode() can not find object %d %d in DAG, err %v", objid, i, err)
- }
- v = v + 1
- }
- }
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Unexpected number of updated objects %d", len(s.hdlInitiator.updObjects))
- }
- st := s.hdlInitiator.updObjects[objid]
- if !st.isConflict {
- t.Errorf("Didn't detect a conflict %v", st)
- }
- if st.newHead != 6 || st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
- if st.resolvVal.Mutation.PriorVersion != 3 || st.resolvVal.Mutation.Version != 6 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // Curlsn == 4 for the log record that resolves conflict.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 4 || s.log.head.Curorder != 2 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 6 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestInitiatorBlessNoConf0 tests that a local and a remote log
-// record stream can be correctly applied, when the conflict is
-// resolved by a blessing. In this test, local head of the object is
-// unchanged at the end of replay. Commands are in files
-// testdata/<local-init-00.log.sync,remote-noconf-link-00.log.sync>.
-func TestInitiatorBlessNoConf0(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
- stream, err := createReplayStream("remote-noconf-link-00.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- // Check that there are no conflicts.
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Too many objects %v", len(s.hdlInitiator.updObjects))
- }
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 3 || st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
-
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.Version != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // No new log records should be added.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 3 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 3 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestInitiatorBlessNoConf1 tests that a local and a remote log
-// record stream can be correctly applied, when the conflict is
-// resolved by a blessing. In this test, local head of the object is
-// updated at the end of the replay. Commands are in files
-// testdata/<local-init-00.log.sync,remote-noconf-link-01.log.sync>.
-func TestInitiatorBlessNoConf1(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
- stream, err := createReplayStream("remote-noconf-link-01.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- // Check that there are no conflicts.
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Too many objects %v", len(s.hdlInitiator.updObjects))
- }
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 4 || st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
-
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.Version != 4 || st.resolvVal.Mutation.PriorVersion != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // No new log records should be added.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 3 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 4 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestInitiatorBlessNoConf2 tests that a local and a remote log
-// record stream can be correctly applied, when the conflict is
-// resolved by a blessing. In this test, local head of the object is
-// updated at the end of the first replay. In the second replay, a
-// conflict resolved locally is rediscovered since it was also
-// resolved remotely. Commands are in files
-// testdata/<local-init-00.log.sync,remote-noconf-link-02.log.sync,
-// remote-noconf-link-repeat.log.sync>.
-func TestInitiatorBlessNoConf2(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
- stream, err := createReplayStream("remote-noconf-link-02.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- // Check that there are no conflicts.
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Too many objects %v", len(s.hdlInitiator.updObjects))
- }
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- st := s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 5 || st.oldHead != 3 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
-
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{"VeyronTab": 0}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.Version != 5 || st.resolvVal.Mutation.PriorVersion != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // No new log records should be added.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 3 || s.log.head.Curorder != 2 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 5 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-
- // Test simultaneous conflict resolution.
- stream, err = createReplayStream("remote-noconf-link-repeat.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- // Check that there are no conflicts.
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Too many objects %v", len(s.hdlInitiator.updObjects))
- }
- st = s.hdlInitiator.updObjects[objid]
- if st.isConflict {
- t.Errorf("Detected a conflict %v", st)
- }
- if st.newHead != 5 || st.oldHead != 5 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
-
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronLaptop"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.Version != 5 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // No new log records should be added.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 3 || s.log.head.Curorder != 3 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 5 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
-
-// TestInitiatorBlessConf tests that a local and a remote log record
-// stream can be correctly applied, when the conflict is resolved by a
-// blessing. Commands are in files
-// testdata/<local-init-00.log.sync,remote-conf-link.log.sync>.
-func TestInitiatorBlessConf(t *testing.T) {
- dir, err := createTempDir()
- if err != nil {
- t.Errorf("Could not create tempdir %v", err)
- }
- // Set a large value to prevent the threads from firing.
- // Test is not thread safe.
- peerSyncInterval = 1 * time.Hour
- garbageCollectInterval = 1 * time.Hour
- s := NewSyncd("", "", "VeyronTab", dir, "", 0)
-
- defer s.Close()
- defer os.RemoveAll(dir)
-
- if _, err = logReplayCommands(s.log, "local-init-00.log.sync"); err != nil {
- t.Error(err)
- }
- stream, err := createReplayStream("remote-conf-link.log.sync")
- if err != nil {
- t.Fatalf("createReplayStream failed with err %v", err)
- }
-
- var minGens GenVector
- if minGens, err = s.hdlInitiator.processLogStream(stream); err != nil {
- t.Fatalf("processLogStream failed with err %v", err)
- }
- if err := s.hdlInitiator.detectConflicts(); err != nil {
- t.Fatalf("detectConflicts failed with err %v", err)
- }
- // Check that there are no conflicts.
- if len(s.hdlInitiator.updObjects) != 1 {
- t.Errorf("Too many objects %v", len(s.hdlInitiator.updObjects))
- }
- objid, err := strToObjID("12345")
- if err != nil {
- t.Errorf("Could not create objid %v", err)
- }
- st := s.hdlInitiator.updObjects[objid]
- if !st.isConflict {
- t.Errorf("Didn't detect a conflict %v", st)
- }
- if st.newHead != 4 || st.oldHead != 3 || st.ancestor != 2 {
- t.Errorf("Conflict detection didn't succeed %v", st)
- }
-
- if err := s.hdlInitiator.resolveConflicts(); err != nil {
- t.Fatalf("resolveConflicts failed with err %v", err)
- }
- if st.resolvVal.Mutation.Version != 4 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
-
- if err := s.hdlInitiator.updateStoreAndSync(nil, GenVector{}, minGens, GenVector{}, "VeyronPhone"); err != nil {
- t.Fatalf("updateStoreAndSync failed with err %v", err)
- }
- if st.resolvVal.Mutation.Version != 4 || st.resolvVal.Mutation.PriorVersion != 3 {
- t.Errorf("Mutation generation is not accurate %v", st)
- }
- // New log records should be added.
- if s.log.head.Curgen != 1 || s.log.head.Curlsn != 4 || s.log.head.Curorder != 1 {
- t.Errorf("Data mismatch in log header %v", s.log.head)
- }
- curRec, err := s.log.getLogRec(s.id, GenID(1), LSN(3))
- if err != nil || curRec == nil {
- t.Fatalf("GetLogRec() can not find object %s:1:3 in log file err %v",
- s.id, err)
- }
- if curRec.ObjID != objid || curRec.RecType != LinkRec {
- t.Errorf("Data mismatch in log record %v", curRec)
- }
- // Verify DAG state.
- if head, err := s.dag.getHead(objid); err != nil || head != 4 {
- t.Errorf("Invalid object %d head in DAG %s, err %v", objid, head, err)
- }
-}
diff --git a/runtimes/google/vsync/kvdb.go b/runtimes/google/vsync/kvdb.go
deleted file mode 100644
index 7efefde..0000000
--- a/runtimes/google/vsync/kvdb.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package vsync
-
-// Helpful wrappers to a persistent key/value (K/V) DB used by Veyron Sync.
-// The current underlying DB is gkvlite.
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/steveyen/gkvlite"
-
- "veyron2/vom"
-)
-
-type kvdb struct {
- store *gkvlite.Store
- fdesc *os.File
-}
-
-type kvtable struct {
- coll *gkvlite.Collection
-}
-
-// kvdbOpen opens or creates a K/V DB for the given filename and tables names
-// within the DB. It returns the DB handler and handlers for each table.
-func kvdbOpen(filename string, tables []string) (*kvdb, []*kvtable, error) {
- // Open the file and create it if it does not exist.
- fdesc, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
- if err != nil {
- return nil, nil, err
- }
-
- // Initialize the DB (store) and its tables (collections).
- // The store takes ownership of fdesc on success.
- store, err := gkvlite.NewStore(fdesc)
- if err != nil {
- fdesc.Close()
- return nil, nil, err
- }
-
- flush := false
- tbls := make([]*kvtable, len(tables))
-
- for i, table := range tables {
- coll := store.GetCollection(table)
- if coll == nil {
- if coll = store.SetCollection(table, nil); coll == nil {
- store.Close()
- return nil, nil, fmt.Errorf("cannot create K/V DB table %s in file %s", table, filename)
- }
- flush = true
- }
- tbls[i] = &kvtable{coll: coll}
- }
-
- if flush {
- store.Flush() // Flush newly created collections.
- }
-
- db := &kvdb{store: store, fdesc: fdesc}
- return db, tbls, nil
-}
-
-// close closes the given K/V DB.
-func (db *kvdb) close() {
- db.store.Close()
- db.fdesc.Close()
-}
-
-// flush flushes the given K/V DB to disk.
-func (db *kvdb) flush() {
- db.store.Flush()
- db.fdesc.Sync()
-}
-
-// set stores (or overwrites) the given key/value pair in the DB table.
-func (t *kvtable) set(key string, value interface{}) error {
- val := new(bytes.Buffer)
- if err := vom.NewEncoder(val).Encode(value); err != nil {
- return err
- }
- return t.coll.Set([]byte(key), val.Bytes())
-}
-
-// create stores the given key/value pair in the DB table only if
-// the key does not already exist. Otherwise it returns an error.
-func (t *kvtable) create(key string, value interface{}) error {
- if t.hasKey(key) {
- return fmt.Errorf("key %s exists", key)
- }
- return t.set(key, value)
-}
-
-// update stores the given key/value pair in the DB table only if
-// the key already exists. Otherwise it returns an error.
-func (t *kvtable) update(key string, value interface{}) error {
- if !t.hasKey(key) {
- return fmt.Errorf("key %s does not exist", key)
- }
- return t.set(key, value)
-}
-
-// get retrieves the value of a key from the DB table.
-func (t *kvtable) get(key string, value interface{}) error {
- val, err := t.coll.Get([]byte(key))
- if err != nil {
- return err
- }
- if val == nil {
- return fmt.Errorf("entry %s not found in the K/V DB table", key)
- }
- return vom.NewDecoder(bytes.NewBuffer(val)).Decode(value)
-}
-
-// del deletes the entry in the DB table given its key.
-func (t *kvtable) del(key string) error {
- _, err := t.coll.Delete([]byte(key))
- return err
-}
-
-// hasKey returns true if the given key exists in the DB table.
-func (t *kvtable) hasKey(key string) bool {
- item, err := t.coll.GetItem([]byte(key), false)
- return err == nil && item != nil
-}
-
-// keyIter iterates over all keys in a DB table invoking the given callback
-// function for each one. The key iterator callback is passed the item key.
-func (t *kvtable) keyIter(keyIterCB func(key string)) error {
- return t.coll.VisitItemsAscend(nil, false, func(item *gkvlite.Item) bool {
- keyIterCB(string(item.Key))
- return true
- })
-}
-
-// compact compacts the K/V DB file on disk. It flushs the DB file, creates
-// a compact copy of the file under /tmp, then closes the DB, moves the new
-// file to replace the old one, then re-opens the new DB file.
-func (db *kvdb) compact(filename string, tables []string) (*kvdb, []*kvtable, error) {
- db.store.Flush()
-
- // Create a unique temporary filename to copy the compact store into.
- prefix := path.Base(filename)
- if prefix == "." || prefix == "/" {
- return nil, nil, fmt.Errorf("invalid DB filename %s", filename)
- }
-
- fdesc, err := ioutil.TempFile("", prefix)
- if err != nil {
- return nil, nil, err
- }
- tmpfile := fdesc.Name()
- defer os.Remove(tmpfile)
- defer fdesc.Close()
-
- // Make a compact copy of the store.
- store, err := db.store.CopyTo(fdesc, 0)
- if err == nil {
- err = store.Flush()
- }
- store.Close()
- if err != nil {
- return nil, nil, err
- }
-
- // Swap the files and re-open the new store.
- if err = os.Rename(tmpfile, filename); err != nil {
- return nil, nil, err
- }
-
- db.close() // close it, after the rename there is no turning back
- return kvdbOpen(filename, tables)
-}
diff --git a/runtimes/google/vsync/kvdb_test.go b/runtimes/google/vsync/kvdb_test.go
deleted file mode 100644
index 5b0b6e1..0000000
--- a/runtimes/google/vsync/kvdb_test.go
+++ /dev/null
@@ -1,454 +0,0 @@
-package vsync
-
-// Tests for the Veyron Sync K/V DB component.
-
-import (
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-)
-
-// A user structure stores info in the "users" table.
-type user struct {
- Username string
- Drinks []string
-}
-
-// A drink structure stores info in the "drinks" table.
-type drink struct {
- Name string
- Alcohol bool
-}
-
-var (
- users = []user{
- {Username: "lancelot", Drinks: []string{"beer", "coffee"}},
- {Username: "arthur", Drinks: []string{"coke", "beer", "coffee"}},
- {Username: "robin", Drinks: []string{"pepsi"}},
- {Username: "galahad"},
- }
- drinks = []drink{
- {Name: "coke", Alcohol: false},
- {Name: "pepsi", Alcohol: false},
- {Name: "beer", Alcohol: true},
- {Name: "coffee", Alcohol: false},
- }
-)
-
-// createTestDB creates a K/V DB with 2 tables.
-func createTestDB(t *testing.T) (fname string, db *kvdb, usersTbl, drinksTbl *kvtable) {
- fname = fmt.Sprintf("%s/sync_kvdb_test_%d_%d", os.TempDir(), os.Getpid(), time.Now().UnixNano())
- db, tbls, err := kvdbOpen(fname, []string{"users", "drinks"})
- if err != nil {
- os.Remove(fname)
- t.Fatalf("cannot create new K/V DB file %s: %v", fname, err)
- }
-
- if _, err = os.Stat(fname); err != nil {
- os.Remove(fname)
- t.Fatalf("newly created K/V DB file %s not found: %v", fname, err)
- }
-
- usersTbl, drinksTbl = tbls[0], tbls[1]
- return
-}
-
-// initTestTables initializes the K/V tables used by the tests.
-func initTestTables(t *testing.T, usersTbl, drinksTbl *kvtable, useCreate bool) {
- userPut, drinkPut, funcName := usersTbl.set, drinksTbl.set, "set()"
- if useCreate {
- userPut, drinkPut, funcName = usersTbl.create, drinksTbl.create, "create()"
- }
-
- for _, uu := range users {
- if err := userPut(uu.Username, &uu); err != nil {
- t.Fatalf("%s failed for user %s", funcName, uu.Username)
- }
- }
-
- for _, dd := range drinks {
- if err := drinkPut(dd.Name, &dd); err != nil {
- t.Fatalf("%s failed for drink %s", funcName, dd.Name)
- }
- }
-
- return
-}
-
-// checkTestTables verifies the contents of the K/V tables.
-func checkTestTables(t *testing.T, usersTbl, drinksTbl *kvtable) {
- for _, uu := range users {
- var u2 user
- if err := usersTbl.get(uu.Username, &u2); err != nil {
- t.Fatalf("get() failed for user %s", uu.Username)
- }
- if !reflect.DeepEqual(u2, uu) {
- t.Fatalf("got wrong data for user %s: %#v instead of %#v", uu.Username, u2, uu)
- }
- if !usersTbl.hasKey(uu.Username) {
- t.Fatalf("hasKey() did not find user %s", uu.Username)
- }
- }
- for _, dd := range drinks {
- var d2 drink
- if err := drinksTbl.get(dd.Name, &d2); err != nil {
- t.Fatalf("get() failed for drink %s", dd.Name)
- }
- if !reflect.DeepEqual(d2, dd) {
- t.Fatalf("got wrong data for drink %s: %#v instead of %#v", dd.Name, d2, dd)
- }
- if !drinksTbl.hasKey(dd.Name) {
- t.Fatalf("hasKey() did not find drink %s", dd.Name)
- }
- }
-}
-
-func TestKVDBSet(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
-
- db.flush()
-}
-
-func TestKVDBCreate(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, true)
-
- db.flush()
-}
-
-func TestKVDBBadGet(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- // The DB is empty, all gets must fail.
- for _, uu := range users {
- var u2 user
- if err := usersTbl.get(uu.Username, &u2); err == nil {
- t.Fatalf("get() found non-existent user %s in file %s: %v", uu.Username, kvdbfile, u2)
- }
- }
- for _, dd := range drinks {
- var d2 drink
- if err := drinksTbl.get(dd.Name, &d2); err == nil {
- t.Fatalf("get() found non-existent drink %s in file %s: %v", dd.Name, kvdbfile, d2)
- }
- }
-}
-
-func TestKVDBBadUpdate(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- // The DB is empty, all updates must fail.
- for _, uu := range users {
- u2 := user{Username: uu.Username}
- if err := usersTbl.update(uu.Username, &u2); err == nil {
- t.Fatalf("update() worked for a non-existent user %s in file %s", uu.Username, kvdbfile)
- }
- }
- for _, dd := range drinks {
- d2 := drink{Name: dd.Name}
- if err := drinksTbl.update(dd.Name, &d2); err == nil {
- t.Fatalf("update() worked for a non-existent drink %s in file %s", dd.Name, kvdbfile)
- }
- }
-}
-
-func TestKVDBBadHasKey(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- // The DB is empty, all key-checks must fail.
- for _, uu := range users {
- if usersTbl.hasKey(uu.Username) {
- t.Fatalf("hasKey() found non-existent user %s in file %s", uu.Username, kvdbfile)
- }
- }
- for _, dd := range drinks {
- if drinksTbl.hasKey(dd.Name) {
- t.Fatalf("hasKey() found non-existent drink %s in file %s", dd.Name, kvdbfile)
- }
- }
-}
-
-func TestKVDBGet(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
- checkTestTables(t, usersTbl, drinksTbl)
-
- db.flush()
- checkTestTables(t, usersTbl, drinksTbl)
-}
-
-func TestKVDBBadCreate(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
-
- // Must not be able to re-create the same entries.
- for _, uu := range users {
- u2 := user{Username: uu.Username}
- if err := usersTbl.create(uu.Username, &u2); err == nil {
- t.Fatalf("create() worked for an existing user %s in file %s", uu.Username, kvdbfile)
- }
- }
- for _, dd := range drinks {
- d2 := drink{Name: dd.Name}
- if err := drinksTbl.create(dd.Name, &d2); err == nil {
- t.Fatalf("create() worked for an existing drink %s in file %s", dd.Name, kvdbfile)
- }
- }
-
- db.flush()
-}
-
-func TestKVDBReopen(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
-
- initTestTables(t, usersTbl, drinksTbl, true)
-
- // Close the re-open the file.
- db.flush()
- db.close()
-
- db, tbls, err := kvdbOpen(kvdbfile, []string{"users", "drinks"})
- if err != nil {
- t.Fatalf("Cannot re-open existing K/V DB file %s", kvdbfile)
- }
- defer db.close()
-
- usersTbl, drinksTbl = tbls[0], tbls[1]
- checkTestTables(t, usersTbl, drinksTbl)
-}
-
-func TestKVDBKeyIter(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
-
- // Get the list of all entry keys in each table.
- keylist := ""
- err := usersTbl.keyIter(func(key string) {
- keylist += key + ","
- })
- if err != nil || keylist != "arthur,galahad,lancelot,robin," {
- t.Fatalf("keyIter() failed in file %s: err %v, user names: %v", kvdbfile, err, keylist)
- }
- keylist = ""
- err = drinksTbl.keyIter(func(key string) {
- keylist += key + ","
- })
- if err != nil || keylist != "beer,coffee,coke,pepsi," {
- t.Fatalf("keyIter() failed in file %s: err %v, drink names: %v", kvdbfile, err, keylist)
- }
-
- db.flush()
-}
-
-func TestKVDBUpdate(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
-
- initTestTables(t, usersTbl, drinksTbl, false)
- db.flush()
- db.close()
-
- db, tbls, err := kvdbOpen(kvdbfile, []string{"users", "drinks"})
- if err != nil {
- t.Fatalf("Cannot re-open existing K/V DB file %s", kvdbfile)
- }
- defer db.close()
-
- usersTbl, drinksTbl = tbls[0], tbls[1]
-
- for _, uu := range users {
- key := uu.Username
- u2 := uu
- u2.Username += "-New"
-
- if err = usersTbl.update(key, &u2); err != nil {
- t.Fatalf("update() failed for user %s in file %s", key, kvdbfile)
- }
-
- var u3 user
- if err = usersTbl.get(key, &u3); err != nil {
- t.Fatalf("get() failed for user %s in file %s", key, kvdbfile)
- }
- if !reflect.DeepEqual(u3, u2) {
- t.Fatalf("got wrong new data for user %s in file %s: %#v instead of %#v", key, kvdbfile, u3, u2)
- }
- }
-
- for _, dd := range drinks {
- key := dd.Name
- d2 := dd
- d2.Alcohol = !d2.Alcohol
-
- if err = drinksTbl.update(key, &d2); err != nil {
- t.Fatalf("update() failed for drink %s in file %s", key, kvdbfile)
- }
-
- var d3 drink
- if err = drinksTbl.get(key, &d3); err != nil {
- t.Fatalf("get() failed for drink %s in file %s", key, kvdbfile)
- }
- if !reflect.DeepEqual(d3, d2) {
- t.Fatalf("got wrong new data for drink %s in file %s: %#v instead of %#v", key, kvdbfile, d3, d2)
- }
- }
-
- db.flush()
-}
-
-func TestKVDBSetAgain(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
-
- for _, uu := range users {
- key := uu.Username
- u2 := uu
- u2.Username += "-New"
-
- if err := usersTbl.set(key, &u2); err != nil {
- t.Fatalf("set() again failed for user %s in file %s", key, kvdbfile)
- }
-
- var u3 user
- if err := usersTbl.get(key, &u3); err != nil {
- t.Fatalf("get() failed for user %s in file %s", key, kvdbfile)
- }
- if !reflect.DeepEqual(u3, u2) {
- t.Fatalf("got wrong new data for user %s in file %s: %#v instead of %#v", key, kvdbfile, u3, u2)
- }
- }
-
- for _, dd := range drinks {
- key := dd.Name
- d2 := dd
- d2.Alcohol = !d2.Alcohol
-
- if err := drinksTbl.update(key, &d2); err != nil {
- t.Fatalf("set() again failed for drink %s in file %s", key, kvdbfile)
- }
-
- var d3 drink
- if err := drinksTbl.get(key, &d3); err != nil {
- t.Fatalf("get() failed for drink %s in file %s", key, kvdbfile)
- }
- if !reflect.DeepEqual(d3, d2) {
- t.Fatalf("got wrong new data for drink %s in file %s: %#v instead of %#v", key, kvdbfile, d3, d2)
- }
- }
-
- db.flush()
-}
-
-func TestKVDBCompact(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
-
- db.flush()
-
- // Make some NOP changes and flush the DB after each change to grow the file.
- for _, uu := range users {
- key := uu.Username
- if err := usersTbl.set(key, &uu); err != nil {
- t.Fatalf("set() NOP failed for user %s in file %s", key, kvdbfile)
- }
- db.flush()
- }
-
- for _, dd := range drinks {
- key := dd.Name
- if err := drinksTbl.update(key, &dd); err != nil {
- t.Fatalf("set() NOP failed for drink %s in file %s", key, kvdbfile)
- }
- db.flush()
- }
-
- // Compact the DB file and verify that it contains the same data and has a smaller size.
- oldSize := fileSize(kvdbfile)
-
- db, tbls, err := db.compact(kvdbfile, []string{"users", "drinks"})
- if err != nil {
- t.Fatalf("compact() failed to create a smaller version of K/V DB file %s: %v", kvdbfile, err)
- }
- defer db.close()
-
- usersTbl, drinksTbl = tbls[0], tbls[1]
-
- newSize := fileSize(kvdbfile)
- if newSize >= oldSize {
- t.Errorf("compact() failed to shrink file %s: old size %d, new size %d", kvdbfile, oldSize, newSize)
- }
-
- checkTestTables(t, usersTbl, drinksTbl)
-
- // Verify that compact() rejects a filename without a basename.
- for _, badfile := range []string{"/", ".", ".."} {
- _, _, err := db.compact(badfile, []string{"users", "drinks"})
- if err == nil {
- t.Errorf("compact() did not fail when given the invalid filename: %s", badfile)
- }
- }
-}
-
-func TestKVDBDelete(t *testing.T) {
- kvdbfile, db, usersTbl, drinksTbl := createTestDB(t)
- defer os.Remove(kvdbfile)
- defer db.close()
-
- initTestTables(t, usersTbl, drinksTbl, false)
-
- db.flush()
-
- // Delete entries and verify that they no longer exist.
-
- for _, uu := range users {
- key := uu.Username
- if err := usersTbl.del(key); err != nil {
- t.Errorf("del() failed for user %s in file %s", key, kvdbfile)
- }
- if usersTbl.hasKey(key) {
- t.Errorf("hasKey() still finds deleted user %s in file %s", key, kvdbfile)
- }
- }
-
- for _, dd := range drinks {
- key := dd.Name
- if err := drinksTbl.del(key); err != nil {
- t.Errorf("del() failed for drink %s in file %s", key, kvdbfile)
- }
- if drinksTbl.hasKey(key) {
- t.Errorf("hasKey() still finds deleted drink %s in file %s", key, kvdbfile)
- }
- }
-
- db.flush()
-}
diff --git a/runtimes/google/vsync/replay_test.go b/runtimes/google/vsync/replay_test.go
deleted file mode 100644
index 1904f31..0000000
--- a/runtimes/google/vsync/replay_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package vsync
-
-// Used to ease the setup of Veyron Sync test scenarios.
-// Parses a sync command file and returns a vector of commands to execute.
-//
-// Used by different test replay engines:
-// - dagReplayCommands() executes the parsed commands at the DAG API level.
-// - logReplayCommands() executes the parsed commands at the Log API level.
-
-import (
- "bufio"
- "encoding/binary"
- "fmt"
- "os"
- "strconv"
- "strings"
-
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-const (
- addLocal = iota
- addRemote
- setDevTable
- linkLocal
- linkRemote
-)
-
-type syncCommand struct {
- cmd int
- objID storage.ID
- version raw.Version
- parents []raw.Version
- logrec string
- devID DeviceID
- genVec GenVector
- continued bool
- deleted bool
-}
-
-func strToObjID(objStr string) (storage.ID, error) {
- var objID storage.ID
- id, err := strconv.ParseUint(objStr, 10, 64)
- if err != nil {
- return objID, err
- }
- idbuf := make([]byte, len(objID))
- if binary.PutUvarint(idbuf, id) == 0 {
- return objID, fmt.Errorf("cannot store ID %d into a binary buffer", id)
- }
- for i := 0; i < len(objID); i++ {
- objID[i] = idbuf[i]
- }
- return objID, nil
-}
-
-func strToVersion(verStr string) (raw.Version, error) {
- ver, err := strconv.ParseUint(verStr, 10, 64)
- if err != nil {
- return 0, err
- }
- return raw.Version(ver), nil
-}
-
-func strToGenID(genIDStr string) (GenID, error) {
- id, err := strconv.ParseUint(genIDStr, 10, 64)
- if err != nil {
- return 0, err
- }
- return GenID(id), nil
-}
-
-func parseSyncCommands(file string) ([]syncCommand, error) {
- cmds := []syncCommand{}
- sf, err := os.Open("testdata/" + file)
- if err != nil {
- return nil, err
- }
- defer sf.Close()
-
- scanner := bufio.NewScanner(sf)
- lineno := 0
- for scanner.Scan() {
- lineno++
- line := strings.TrimSpace(scanner.Text())
- if line == "" || line[0] == '#' {
- continue
- }
-
- args := strings.Split(line, "|")
- nargs := len(args)
-
- switch args[0] {
- case "addl", "addr":
- expNargs := 8
- if nargs != expNargs {
- return nil, fmt.Errorf("%s:%d: need %d args instead of %d", file, lineno, expNargs, nargs)
- }
- version, err := strToVersion(args[2])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid version: %s", file, lineno, args[2])
- }
- var parents []raw.Version
- for i := 3; i <= 4; i++ {
- if args[i] != "" {
- pver, err := strToVersion(args[i])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid parent: %s", file, lineno, args[i])
- }
- parents = append(parents, pver)
- }
- }
-
- continued, err := strconv.ParseBool(args[6])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid continued bit: %s", file, lineno, args[6])
- }
- del, err := strconv.ParseBool(args[7])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid deleted bit: %s", file, lineno, args[7])
- }
- cmd := syncCommand{version: version, parents: parents, logrec: args[5], continued: continued, deleted: del}
- if args[0] == "addl" {
- cmd.cmd = addLocal
- } else {
- cmd.cmd = addRemote
- }
- if cmd.objID, err = strToObjID(args[1]); err != nil {
- return nil, fmt.Errorf("%s:%d: invalid object ID: %s", file, lineno, args[1])
- }
- cmds = append(cmds, cmd)
-
- case "setdev":
- expNargs := 3
- if nargs != expNargs {
- return nil, fmt.Errorf("%s:%d: need %d args instead of %d", file, lineno, expNargs, nargs)
- }
-
- genVec := make(GenVector)
- for _, elem := range strings.Split(args[2], ",") {
- kv := strings.Split(elem, ":")
- if len(kv) != 2 {
- return nil, fmt.Errorf("%s:%d: invalid gen vector key/val: %s", file, lineno, elem)
- }
- genID, err := strToGenID(kv[1])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid gen ID: %s", file, lineno, kv[1])
- }
- genVec[DeviceID(kv[0])] = genID
- }
-
- cmd := syncCommand{cmd: setDevTable, devID: DeviceID(args[1]), genVec: genVec}
- cmds = append(cmds, cmd)
-
- case "linkl", "linkr":
- expNargs := 6
- if nargs != expNargs {
- return nil, fmt.Errorf("%s:%d: need %d args instead of %d", file, lineno, expNargs, nargs)
- }
-
- version, err := strToVersion(args[2])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid version: %s", file, lineno, args[2])
- }
- if args[3] == "" {
- return nil, fmt.Errorf("%s:%d: parent (to-node) version not specified", file, lineno)
- }
- if args[4] != "" {
- return nil, fmt.Errorf("%s:%d: cannot specify a 2nd parent (to-node): %s", file, lineno, args[4])
- }
- parent, err := strToVersion(args[3])
- if err != nil {
- return nil, fmt.Errorf("%s:%d: invalid parent (to-node) version: %s", file, lineno, args[3])
- }
-
- cmd := syncCommand{version: version, parents: []raw.Version{parent}, logrec: args[5]}
- if args[0] == "linkl" {
- cmd.cmd = linkLocal
- } else {
- cmd.cmd = linkRemote
- }
- if cmd.objID, err = strToObjID(args[1]); err != nil {
- return nil, fmt.Errorf("%s:%d: invalid object ID: %s", file, lineno, args[1])
- }
- cmds = append(cmds, cmd)
-
- default:
- return nil, fmt.Errorf("%s:%d: invalid operation: %s", file, lineno, args[0])
- }
- }
-
- err = scanner.Err()
- return cmds, err
-}
-
-func dagReplayCommands(dag *dag, syncfile string) error {
- cmds, err := parseSyncCommands(syncfile)
- if err != nil {
- return err
- }
-
- for _, cmd := range cmds {
- switch cmd.cmd {
- case addLocal:
- err = dag.addNode(cmd.objID, cmd.version, false, cmd.deleted, cmd.parents, cmd.logrec, NoTxID)
- if err != nil {
- return fmt.Errorf("cannot add local node %d:%d to DAG: %v", cmd.objID, cmd.version, err)
- }
- if err := dag.moveHead(cmd.objID, cmd.version); err != nil {
- return fmt.Errorf("cannot move head to %d:%d in DAG: %v", cmd.objID, cmd.version, err)
- }
- dag.flush()
-
- case addRemote:
- err = dag.addNode(cmd.objID, cmd.version, true, cmd.deleted, cmd.parents, cmd.logrec, NoTxID)
- if err != nil {
- return fmt.Errorf("cannot add remote node %d:%d to DAG: %v", cmd.objID, cmd.version, err)
- }
- dag.flush()
-
- case linkLocal:
- if err = dag.addParent(cmd.objID, cmd.version, cmd.parents[0], false); err != nil {
- return fmt.Errorf("cannot add local parent %d to DAG node %d:%d: %v",
- cmd.parents[0], cmd.objID, cmd.version, err)
- }
- dag.flush()
-
- case linkRemote:
- if err = dag.addParent(cmd.objID, cmd.version, cmd.parents[0], true); err != nil {
- return fmt.Errorf("cannot add remote parent %d to DAG node %d:%d: %v",
- cmd.parents[0], cmd.objID, cmd.version, err)
- }
- dag.flush()
- }
- }
- return nil
-}
diff --git a/runtimes/google/vsync/sgtable.go b/runtimes/google/vsync/sgtable.go
deleted file mode 100644
index aa15b92..0000000
--- a/runtimes/google/vsync/sgtable.go
+++ /dev/null
@@ -1,495 +0,0 @@
-package vsync
-
-// The SyncGroup Table stores the group information in a K/V DB. It also
-// maintains an index to provide access by SyncGroup ID or name.
-//
-// The SyncGroup info is fetched from the SyncGroup server by the create or
-// join operations, and is regularly updated after that.
-//
-// The DB contains two tables persisted to disk (data, names) and one
-// in-memory (ephemeral) map (members):
-// * data: one entry per SyncGroup ID containing the SyncGroup data
-// * names: one entry per SyncGroup name pointing to its SyncGroup ID
-// * members: an inverted index of SyncGroup members to SyncGroup IDs
-// built from the list of SyncGroup joiners
-// * peerSGs: an inverted index of SyncGroup RootOIDs to sets of peer
-// SyncGroup IDs, i.e. SyncGroups defined on the same root
-// path in the Store (RootOID)
-
-import (
- "errors"
- "fmt"
-
- "veyron/services/syncgroup"
-
- "veyron2/storage"
-)
-
-var (
- errBadSGTable = errors.New("invalid SyncGroup Table")
-)
-
-type syncGroupTable struct {
- fname string // file pathname
- store *kvdb // underlying K/V store
- sgData *kvtable // pointer to "data" table in the kvdb
- sgNames *kvtable // pointer to "names" table in the kvdb
- members map[string]*memberInfo // in-memory tracking of SyncGroup member info
- peerSGs map[storage.ID]sgSet // in-memory tracking of peer SyncGroups per RootOID
-}
-
-type syncGroupData struct {
- SrvInfo syncgroup.SyncGroupInfo // SyncGroup info from SyncGroupServer
- LocalPath string // local path of the SyncGroup in the Store
-}
-
-type memberInfo struct {
- gids map[syncgroup.ID]*memberMetaData // map of SyncGroup IDs joined and their metadata
-}
-
-type memberMetaData struct {
- metaData syncgroup.JoinerMetaData // joiner metadata at the SyncGroup server
- identity string // joiner security identity
-}
-
-type sgSet map[syncgroup.ID]struct{} // a set of SyncGroups
-
-// openSyncGroupTable opens or creates a syncGroupTable for the given filename.
-func openSyncGroupTable(filename string) (*syncGroupTable, error) {
- // Open the file and create it if it does not exist.
- // Also initialize the store and its tables.
- db, tbls, err := kvdbOpen(filename, []string{"data", "names"})
- if err != nil {
- return nil, err
- }
-
- s := &syncGroupTable{
- fname: filename,
- store: db,
- sgData: tbls[0],
- sgNames: tbls[1],
- members: make(map[string]*memberInfo),
- peerSGs: make(map[storage.ID]sgSet),
- }
-
- // Reconstruct the in-memory tracking maps by iterating over the SyncGroups.
- // This is needed when an existing SyncGroup Table file is re-opened.
- s.sgData.keyIter(func(gidStr string) {
- // Get the SyncGroup data given the group ID in string format (as the data table key).
- gid, err := syncgroup.ParseID(gidStr)
- if err != nil {
- return
- }
-
- data, err := s.getSyncGroupByID(gid)
- if err != nil {
- return
- }
-
- // Add all SyncGroup members to the members inverted index.
- s.addAllMembers(data)
-
- // Add the SyncGroup ID to its peer SyncGroup set based on the RootOID.
- s.addPeerSyncGroup(gid, data.SrvInfo.RootOID)
- })
-
- return s, nil
-}
-
-// close closes the syncGroupTable and invalidates its structure.
-func (s *syncGroupTable) close() {
- if s.store != nil {
- s.store.close() // this also closes the tables
- }
- *s = syncGroupTable{} // zero out the structure
-}
-
-// flush flushes the syncGroupTable store to disk.
-func (s *syncGroupTable) flush() {
- if s.store != nil {
- s.store.flush()
- }
-}
-
-// compact compacts the kvdb file of the syncGroupTable.
-func (s *syncGroupTable) compact() error {
- if s.store == nil {
- return errBadSGTable
- }
- db, tbls, err := s.store.compact(s.fname, []string{"data", "names"})
- if err != nil {
- return err
- }
- s.store = db
- s.sgData = tbls[0]
- s.sgNames = tbls[1]
- return nil
-}
-
-// addGroup adds a new SyncGroup given its information.
-func (s *syncGroupTable) addSyncGroup(sgData *syncGroupData) error {
- if s.store == nil {
- return errBadSGTable
- }
- if sgData == nil {
- return errors.New("group information not specified")
- }
- gid, name := sgData.SrvInfo.SGOID, sgData.SrvInfo.Name
- if name == "" {
- return errors.New("group name not specified")
- }
- if len(sgData.SrvInfo.Joiners) == 0 {
- return errors.New("group has no joiners")
- }
-
- if s.hasSGDataEntry(gid) {
- return fmt.Errorf("group %d already exists", gid)
- }
- if s.hasSGNameEntry(name) {
- return fmt.Errorf("group name %s already exists", name)
- }
-
- // Add the group name and data entries.
- if err := s.setSGNameEntry(name, gid); err != nil {
- return err
- }
-
- if err := s.setSGDataEntry(gid, sgData); err != nil {
- s.delSGNameEntry(name)
- return err
- }
-
- s.addAllMembers(sgData)
- s.addPeerSyncGroup(gid, sgData.SrvInfo.RootOID)
- return nil
-}
-
-// getSyncGroupID retrieves the SyncGroup ID given its name.
-func (s *syncGroupTable) getSyncGroupID(name string) (syncgroup.ID, error) {
- return s.getSGNameEntry(name)
-}
-
-// getSyncGroupName retrieves the SyncGroup name given its ID.
-func (s *syncGroupTable) getSyncGroupName(gid syncgroup.ID) (string, error) {
- data, err := s.getSyncGroupByID(gid)
- if err != nil {
- return "", err
- }
-
- return data.SrvInfo.Name, nil
-}
-
-// getSyncGroupByID retrieves the SyncGroup given its ID.
-func (s *syncGroupTable) getSyncGroupByID(gid syncgroup.ID) (*syncGroupData, error) {
- return s.getSGDataEntry(gid)
-}
-
-// getSyncGroupByName retrieves the SyncGroup given its name.
-func (s *syncGroupTable) getSyncGroupByName(name string) (*syncGroupData, error) {
- gid, err := s.getSyncGroupID(name)
- if err != nil {
- return nil, err
- }
- return s.getSyncGroupByID(gid)
-}
-
-// updateSyncGroup updates the SyncGroup data.
-func (s *syncGroupTable) updateSyncGroup(data *syncGroupData) error {
- if s.store == nil {
- return errBadSGTable
- }
- if data == nil {
- return errors.New("SyncGroup data not specified")
- }
- if data.SrvInfo.Name == "" {
- return errors.New("group name not specified")
- }
- if len(data.SrvInfo.Joiners) == 0 {
- return errors.New("group has no joiners")
- }
-
- oldData, err := s.getSyncGroupByName(data.SrvInfo.Name)
- if err != nil {
- return err
- }
-
- if data.SrvInfo.SGOID != oldData.SrvInfo.SGOID {
- return fmt.Errorf("cannot change ID of SyncGroup name %s", data.SrvInfo.Name)
- }
- if data.SrvInfo.RootOID != oldData.SrvInfo.RootOID {
- return fmt.Errorf("cannot change root ID of SyncGroup name %s", data.SrvInfo.Name)
- }
-
- // Get the old set of SyncGroup joiners and diff it with the new set.
- // Add all the current members because this inserts the new members and
- // updates the metadata of the existing ones (addMember() is like a "put").
- // Delete the members that are no longer part of the SyncGroup.
- gid := oldData.SrvInfo.SGOID
- newJoiners, oldJoiners := data.SrvInfo.Joiners, oldData.SrvInfo.Joiners
-
- for member, memberData := range newJoiners {
- s.addMember(member.Name, gid, member.Identity, memberData)
- }
-
- for member := range oldJoiners {
- if _, ok := newJoiners[member]; !ok {
- s.delMember(member.Name, gid)
- }
- }
-
- return s.setSGDataEntry(gid, data)
-}
-
-// delSyncGroupByID deletes the SyncGroup given its ID.
-func (s *syncGroupTable) delSyncGroupByID(gid syncgroup.ID) error {
- data, err := s.getSyncGroupByID(gid)
- if err != nil {
- return err
- }
- if err = s.delSGNameEntry(data.SrvInfo.Name); err != nil {
- return err
- }
-
- s.delAllMembers(data)
- s.delPeerSyncGroup(gid, data.SrvInfo.RootOID)
- return s.delSGDataEntry(gid)
-}
-
-// delSyncGroupByName deletes the SyncGroup given its name.
-func (s *syncGroupTable) delSyncGroupByName(name string) error {
- gid, err := s.getSyncGroupID(name)
- if err != nil {
- return err
- }
-
- return s.delSyncGroupByID(gid)
-}
-
-// getMembers returns all SyncGroup members and the count of SyncGroups each one joined.
-func (s *syncGroupTable) getMembers() (map[string]uint32, error) {
- if s.store == nil {
- return nil, errBadSGTable
- }
-
- members := make(map[string]uint32)
- for member, info := range s.members {
- members[member] = uint32(len(info.gids))
- }
-
- return members, nil
-}
-
-// getMemberInfo returns SyncGroup information for a given member.
-func (s *syncGroupTable) getMemberInfo(member string) (*memberInfo, error) {
- if s.store == nil {
- return nil, errBadSGTable
- }
-
- info, ok := s.members[member]
- if !ok {
- return nil, fmt.Errorf("unknown member: %s", member)
- }
-
- return info, nil
-}
-
-// addMember inserts or updates a (member, group ID) entry in the in-memory
-// structure that indexes SyncGroup memberships based on member names and stores
-// in it the member's joiner metadata.
-func (s *syncGroupTable) addMember(member string, gid syncgroup.ID, identity string, metadata syncgroup.JoinerMetaData) {
- if s.store == nil {
- return
- }
-
- info, ok := s.members[member]
- if !ok {
- info = &memberInfo{gids: make(map[syncgroup.ID]*memberMetaData)}
- s.members[member] = info
- }
-
- info.gids[gid] = &memberMetaData{metaData: metadata, identity: identity}
-}
-
-// delMember removes a (member, group ID) entry from the in-memory structure
-// that indexes SyncGroup memberships based on member names.
-func (s *syncGroupTable) delMember(member string, gid syncgroup.ID) {
- if s.store == nil {
- return
- }
-
- info, ok := s.members[member]
- if !ok {
- return
- }
-
- delete(info.gids, gid)
- if len(info.gids) == 0 {
- delete(s.members, member)
- }
-}
-
-// addAllMembers inserts all members of a SyncGroup in the in-memory structure
-// that indexes SyncGroup memberships based on member names.
-func (s *syncGroupTable) addAllMembers(data *syncGroupData) {
- if s.store == nil || data == nil {
- return
- }
-
- gid := data.SrvInfo.SGOID
- for member, memberData := range data.SrvInfo.Joiners {
- s.addMember(member.Name, gid, member.Identity, memberData)
- }
-}
-
-// delAllMembers removes all members of a SyncGroup from the in-memory structure
-// that indexes SyncGroup memberships based on member names.
-func (s *syncGroupTable) delAllMembers(data *syncGroupData) {
- if s.store == nil || data == nil {
- return
- }
-
- gid := data.SrvInfo.SGOID
- for member := range data.SrvInfo.Joiners {
- s.delMember(member.Name, gid)
- }
-}
-
-// addPeerSyncGroup inserts the group ID into the in-memory set of peer SyncGroups
-// that use the same RootOID in the Store.
-func (s *syncGroupTable) addPeerSyncGroup(gid syncgroup.ID, rootOID storage.ID) {
- if s.store == nil {
- return
- }
-
- peers, ok := s.peerSGs[rootOID]
- if !ok {
- peers = make(sgSet)
- s.peerSGs[rootOID] = peers
- }
-
- peers[gid] = struct{}{}
-}
-
-// delPeerSyncGroup removes the group ID from the in-memory set of peer SyncGroups
-// that use the same RootOID in the Store.
-func (s *syncGroupTable) delPeerSyncGroup(gid syncgroup.ID, rootOID storage.ID) {
- if s.store == nil {
- return
- }
-
- peers, ok := s.peerSGs[rootOID]
- if !ok {
- return
- }
-
- delete(peers, gid)
- if len(peers) == 0 {
- delete(s.peerSGs, rootOID)
- }
-}
-
-// getPeerSyncGroups returns the set of peer SyncGroups for a given SyncGroup.
-// The given SyncGroup ID is included in that set.
-func (s *syncGroupTable) getPeerSyncGroups(gid syncgroup.ID) (sgSet, error) {
- if s.store == nil {
- return sgSet{}, errBadSGTable
- }
-
- data, err := s.getSyncGroupByID(gid)
- if err != nil {
- return sgSet{}, err
- }
-
- return s.peerSGs[data.SrvInfo.RootOID], nil
-}
-
-// Low-level functions to access the tables in the K/V DB.
-// They directly access the table entries without tracking their relationships.
-
-// sgDataKey returns the key used to access the SyncGroup data in the DB.
-func sgDataKey(gid syncgroup.ID) string {
- return gid.String()
-}
-
-// hasSGDataEntry returns true if the SyncGroup data entry exists in the DB.
-func (s *syncGroupTable) hasSGDataEntry(gid syncgroup.ID) bool {
- if s.store == nil {
- return false
- }
- key := sgDataKey(gid)
- return s.sgData.hasKey(key)
-}
-
-// setSGDataEntry stores the SyncGroup data in the DB.
-func (s *syncGroupTable) setSGDataEntry(gid syncgroup.ID, data *syncGroupData) error {
- if s.store == nil {
- return errBadSGTable
- }
- key := sgDataKey(gid)
- return s.sgData.set(key, data)
-}
-
-// getSGDataEntry retrieves from the DB the SyncGroup data for a given group ID.
-func (s *syncGroupTable) getSGDataEntry(gid syncgroup.ID) (*syncGroupData, error) {
- if s.store == nil {
- return nil, errBadSGTable
- }
- var data syncGroupData
- key := sgDataKey(gid)
- if err := s.sgData.get(key, &data); err != nil {
- return nil, err
- }
- return &data, nil
-}
-
-// delSGDataEntry deletes the SyncGroup data from the DB.
-func (s *syncGroupTable) delSGDataEntry(gid syncgroup.ID) error {
- if s.store == nil {
- return errBadSGTable
- }
- key := sgDataKey(gid)
- return s.sgData.del(key)
-}
-
-// sgNameKey returns the key used to access the SyncGroup name in the DB.
-func sgNameKey(name string) string {
- return name
-}
-
-// hasSGNameEntry returns true if the SyncGroup name entry exists in the DB.
-func (s *syncGroupTable) hasSGNameEntry(name string) bool {
- if s.store == nil {
- return false
- }
- key := sgNameKey(name)
- return s.sgNames.hasKey(key)
-}
-
-// setSGNameEntry stores the SyncGroup name to ID mapping in the DB.
-func (s *syncGroupTable) setSGNameEntry(name string, gid syncgroup.ID) error {
- if s.store == nil {
- return errBadSGTable
- }
- key := sgNameKey(name)
- return s.sgNames.set(key, gid)
-}
-
-// getSGNameEntry retrieves the SyncGroup name to ID mapping from the DB.
-func (s *syncGroupTable) getSGNameEntry(name string) (syncgroup.ID, error) {
- var gid syncgroup.ID
- if s.store == nil {
- return gid, errBadSGTable
- }
- key := sgNameKey(name)
- err := s.sgNames.get(key, &gid)
- return gid, err
-}
-
-// delSGNameEntry deletes the SyncGroup name to ID mapping from the DB.
-func (s *syncGroupTable) delSGNameEntry(name string) error {
- if s.store == nil {
- return errBadSGTable
- }
- key := sgNameKey(name)
- return s.sgNames.del(key)
-}
diff --git a/runtimes/google/vsync/sgtable_test.go b/runtimes/google/vsync/sgtable_test.go
deleted file mode 100644
index 1f914b4..0000000
--- a/runtimes/google/vsync/sgtable_test.go
+++ /dev/null
@@ -1,894 +0,0 @@
-package vsync
-
-// Tests for the Veyron SyncGroup Table.
-
-import (
- "os"
- "reflect"
- "testing"
-
- "veyron/services/syncgroup"
-)
-
-// TestSyncGroupTableOpen tests the creation of a SyncGroup Table, closing and re-opening it.
-// It also verifies that its backing file is created and that a 2nd close is safe.
-func TestSyncGroupTableOpen(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- fsize := getFileSize(sgfile)
- if fsize < 0 {
- t.Fatalf("SyncGroup Table file %s not created", sgfile)
- }
-
- sg.flush()
- oldfsize := fsize
- fsize = getFileSize(sgfile)
- if fsize <= oldfsize {
- t.Fatalf("SyncGroup Table file %s not flushed", sgfile)
- }
-
- sg.close()
-
- sg, err = openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot re-open existing SyncGroup Table file %s", sgfile)
- }
-
- oldfsize = fsize
- fsize = getFileSize(sgfile)
- if fsize != oldfsize {
- t.Fatalf("SyncGroup Table file %s size changed across re-open", sgfile)
- }
-
- sg.close()
- sg.close() // multiple closes should be a safe NOP
-
- fsize = getFileSize(sgfile)
- if fsize != oldfsize {
- t.Fatalf("SyncGroup Table file %s size changed across close", sgfile)
- }
-
- // Fail opening a SyncGroup Table in a non-existent directory.
- _, err = openSyncGroupTable("/not/really/there/junk.sg")
- if err == nil {
- t.Fatalf("openSyncGroupTable() did not fail when using a bad pathname")
- }
-}
-
-// TestInvalidSyncGroupTable tests using methods on an invalid (closed) SyncGroup Table.
-func TestInvalidSyncGroupTable(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- sg.close()
-
- sgid, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Error(err)
- }
- rootid, err := strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- validateError := func(t *testing.T, err error, funcName string) {
- if err == nil || err.Error() != "invalid SyncGroup Table" {
- t.Errorf("%s() did not fail on a closed SyncGroup Table: %v", funcName, err)
- }
- }
-
- err = sg.compact()
- validateError(t, err, "compact")
-
- err = sg.addSyncGroup(&syncGroupData{})
- validateError(t, err, "addSyncGroup")
-
- _, err = sg.getSyncGroupID("foobar")
- validateError(t, err, "getSyncGroupID")
-
- _, err = sg.getSyncGroupName(sgid)
- validateError(t, err, "getSyncGroupName")
-
- _, err = sg.getSyncGroupByID(sgid)
- validateError(t, err, "getSyncGroupByID")
-
- _, err = sg.getSyncGroupByName("foobar")
- validateError(t, err, "getSyncGroupByName")
-
- err = sg.updateSyncGroup(&syncGroupData{})
- validateError(t, err, "updateSyncGroup")
-
- err = sg.delSyncGroupByID(sgid)
- validateError(t, err, "delSyncGroupByID")
-
- err = sg.delSyncGroupByName("foobar")
- validateError(t, err, "delSyncGroupByName")
-
- _, err = sg.getMembers()
- validateError(t, err, "getMembers")
-
- _, err = sg.getMemberInfo("foobar")
- validateError(t, err, "getMemberInfo")
-
- _, err = sg.getPeerSyncGroups(sgid)
- validateError(t, err, "getPeerSyncGroups")
-
- err = sg.setSGDataEntry(sgid, &syncGroupData{})
- validateError(t, err, "setSGDataEntry")
-
- _, err = sg.getSGDataEntry(sgid)
- validateError(t, err, "getSGDataEntry")
-
- err = sg.delSGDataEntry(sgid)
- validateError(t, err, "delSGDataEntry")
-
- err = sg.setSGNameEntry("foobar", sgid)
- validateError(t, err, "setSGNameEntry")
-
- _, err = sg.getSGNameEntry("foobar")
- validateError(t, err, "getSGNameEntry")
-
- err = sg.delSGNameEntry("foobar")
- validateError(t, err, "delSGNameEntry")
-
- // These calls should be harmless NOPs.
- sg.flush()
- sg.close()
- sg.addMember("foobar", sgid, "foobar identity", syncgroup.JoinerMetaData{})
- sg.delMember("foobar", sgid)
- sg.addAllMembers(&syncGroupData{})
- sg.delAllMembers(&syncGroupData{})
- sg.addPeerSyncGroup(sgid, rootid)
- sg.delPeerSyncGroup(sgid, rootid)
-
- if sg.hasSGDataEntry(sgid) {
- t.Errorf("hasSGDataEntry() found an entry on a closed SyncGroup Table")
- }
- if sg.hasSGNameEntry("foobar") {
- t.Errorf("hasSGNameEntry() found an entry on a closed SyncGroup Table")
- }
-}
-
-// TestAddSyncGroup tests adding SyncGroups.
-func TestAddSyncGroup(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- sgname := "foobar"
- sgid, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Fatal(err)
- }
- rootid, err := strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- sgData := &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid,
- RootOID: rootid,
- Name: sgname,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "A"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 25},
- syncgroup.NameIdentity{Name: "cloud", Identity: "C"}: syncgroup.JoinerMetaData{SyncPriority: 1},
- },
- },
- }
-
- err = sg.addSyncGroup(sgData)
- if err != nil {
- t.Errorf("adding SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- // Verify SyncGroup ID, name, and data.
- if id, err := sg.getSyncGroupID(sgname); err != nil || id != sgid {
- t.Errorf("cannot get back ID of SyncGroup %s: got ID %d instead of %d; err: %v", sgname, id, sgid, err)
- }
- if name, err := sg.getSyncGroupName(sgid); err != nil || name != sgname {
- t.Errorf("cannot get back name of SyncGroup ID %d: got %s instead of %s; err: %v", sgid, name, sgname, err)
- }
-
- data, err := sg.getSyncGroupByID(sgid)
- if err != nil {
- t.Errorf("cannot get SyncGroup by ID %d: %v", sgid, err)
- }
- if !reflect.DeepEqual(data, sgData) {
- t.Errorf("invalid SyncGroup data for group ID %d: got %v instead of %v", sgid, data, sgData)
- }
-
- data, err = sg.getSyncGroupByName(sgname)
- if err != nil {
- t.Errorf("cannot get SyncGroup by Name %s: %v", sgname, err)
- }
- if !reflect.DeepEqual(data, sgData) {
- t.Errorf("invalid SyncGroup data for group name %s: got %v instead of %v", sgname, data, sgData)
- }
-
- // Verify membership data.
- members, err := sg.getMembers()
- if err != nil {
- t.Errorf("cannot get all SyncGroup members: %v", err)
- }
- expMembers := map[string]uint32{"phone": 1, "tablet": 1, "cloud": 1}
- if !reflect.DeepEqual(members, expMembers) {
- t.Errorf("invalid SyncGroup members: got %v instead of %v", members, expMembers)
- }
-
- expMetaData := map[string]*memberMetaData{
- "phone": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 10}, identity: "A"},
- "tablet": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 25}, identity: "B"},
- "cloud": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 1}, identity: "C"},
- }
- for mm := range members {
- info, err := sg.getMemberInfo(mm)
- if err != nil || info == nil {
- t.Errorf("cannot get info for SyncGroup member %s: info: %v, err: %v", mm, info, err)
- }
- if len(info.gids) != 1 {
- t.Errorf("invalid info for SyncGroup member %s: %v", mm, info)
- }
- expJoinerMetaData := expMetaData[mm]
- joinerMetaData := info.gids[sgid]
- if !reflect.DeepEqual(joinerMetaData, expJoinerMetaData) {
- t.Errorf("invalid joiner Data for SyncGroup member %s under group ID %d: got %v instead of %v",
- mm, sgid, joinerMetaData, expJoinerMetaData)
- }
- }
-
- // Use a non-existent member.
- if info, err := sg.getMemberInfo("should-not-be-there"); err == nil {
- t.Errorf("found info for invalid SyncGroup member: %v", info)
- }
-
- // Verify peer SyncGroup info.
- peers, err := sg.getPeerSyncGroups(sgid)
- if err != nil {
- t.Errorf("cannot get peer SyncGroups for group ID %d: %v", sgid, err)
- }
- expPeers := sgSet{sgid: struct{}{}}
- if !reflect.DeepEqual(peers, expPeers) {
- t.Errorf("invalid peer SyncGroups: got %v instead of %v", peers, expPeers)
- }
-
- // Use a non-existent group ID.
- xid, err := syncgroup.ParseID("7788998877")
- if err != nil {
- t.Fatal(err)
- }
- if peers, err := sg.getPeerSyncGroups(xid); err == nil {
- t.Errorf("found peer SyncGroup for invalid SyncGroup: %v", peers)
- }
-
- // Adding a SyncGroup for a pre-existing group ID or name should fail.
- err = sg.addSyncGroup(sgData)
- if err == nil {
- t.Errorf("re-adding SyncGroup %d did not fail", sgid)
- }
-
- sgData.SrvInfo.SGOID, err = syncgroup.ParseID("5555")
- if err != nil {
- t.Fatal(err)
- }
- err = sg.addSyncGroup(sgData)
- if err == nil {
- t.Errorf("adding SyncGroup %s with a different ID did not fail", sgname)
- }
-
- sg.close()
-}
-
-// TestInvalidAddSyncGroup tests adding SyncGroups.
-func TestInvalidAddSyncGroup(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- sgname := "foobar"
- sgid, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Fatal(err)
- }
-
- err = sg.addSyncGroup(nil)
- if err == nil {
- t.Errorf("adding a nil SyncGroup did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sgData := &syncGroupData{}
- sgData.SrvInfo.SGOID = sgid
- sgData.SrvInfo.RootOID, err = strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- err = sg.addSyncGroup(sgData)
- if err == nil {
- t.Errorf("adding a SyncGroup with an empty name did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sgData.SrvInfo.Name = sgname
-
- err = sg.addSyncGroup(sgData)
- if err == nil {
- t.Errorf("adding a SyncGroup with no joiners did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sg.close()
-}
-
-// TestUpdateSyncGroup tests updating a SyncGroup.
-func TestUpdateSyncGroup(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- err = sg.updateSyncGroup(nil)
- if err == nil {
- t.Errorf("updating a nil SyncGroup did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sgData := &syncGroupData{}
- err = sg.updateSyncGroup(sgData)
- if err == nil {
- t.Errorf("updating a SyncGroup with an empty name did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sgData.SrvInfo.Name = "blabla"
- err = sg.updateSyncGroup(sgData)
- if err == nil {
- t.Errorf("updating a SyncGroup with no joiners did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sgData.SrvInfo.Joiners = map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "X"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- }
- err = sg.updateSyncGroup(sgData)
- if err == nil {
- t.Errorf("updating a SyncGroup with a non-existing name did not fail in SyncGroup Table file %s", sgfile)
- }
-
- // Create the SyncGroup to update later.
- sgname := "foobar"
- sgid, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Fatal(err)
- }
- rootid, err := strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- sgData = &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid,
- RootOID: rootid,
- Name: sgname,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "A"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 25},
- syncgroup.NameIdentity{Name: "cloud", Identity: "C"}: syncgroup.JoinerMetaData{SyncPriority: 1},
- },
- },
- }
-
- err = sg.addSyncGroup(sgData)
- if err != nil {
- t.Errorf("creating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- // Update it using different group or root IDs, which is not allowed.
- xid, err := syncgroup.ParseID("9999")
- if err != nil {
- t.Fatal(err)
- }
-
- sgData.SrvInfo.SGOID = xid
-
- err = sg.updateSyncGroup(sgData)
- if err == nil {
- t.Errorf("updating a SyncGroup with an ID mismatch did not fail in SyncGroup Table file %s", sgfile)
- }
-
- sgData.SrvInfo.SGOID = sgid
- sgData.SrvInfo.RootOID, err = strToObjID("9999")
- if err != nil {
- t.Fatal(err)
- }
-
- err = sg.updateSyncGroup(sgData)
- if err == nil {
- t.Errorf("updating a SyncGroup with a root ID mismatch did not fail in SyncGroup Table file %s", sgfile)
- }
-
- // Update it using a modified set of joiners.
- sgData.SrvInfo.RootOID = rootid
- sgData.SrvInfo.Joiners[syncgroup.NameIdentity{Name: "universe", Identity: "Y"}] = syncgroup.JoinerMetaData{SyncPriority: 0}
- delete(sgData.SrvInfo.Joiners, syncgroup.NameIdentity{Name: "cloud", Identity: "C"})
-
- err = sg.updateSyncGroup(sgData)
- if err != nil {
- t.Errorf("updating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- // Do some NOP member deletions (bad member, bad group ID).
- // SyncGroup verification (below) should see the expected info asserting these were NOPs.
- sg.delMember("blablablablabla", sgid)
- sg.delMember("phone", xid)
-
- // Verify updated SyncGroup.
- if id, err := sg.getSyncGroupID(sgname); err != nil || id != sgid {
- t.Errorf("cannot get back ID of updated SyncGroup %s: got ID %d instead of %d; err: %v", sgname, id, sgid, err)
- }
- if name, err := sg.getSyncGroupName(sgid); err != nil || name != sgname {
- t.Errorf("cannot get back name of updated SyncGroup ID %d: got %s instead of %s; err: %v", sgid, name, sgname, err)
- }
-
- expData := &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid,
- RootOID: rootid,
- Name: sgname,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "A"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 25},
- syncgroup.NameIdentity{Name: "universe", Identity: "Y"}: syncgroup.JoinerMetaData{SyncPriority: 0},
- },
- },
- }
-
- data, err := sg.getSyncGroupByID(sgid)
- if err != nil {
- t.Errorf("cannot get updated SyncGroup by ID %d: %v", sgid, err)
- }
- if !reflect.DeepEqual(data, expData) {
- t.Errorf("invalid SyncGroup data for updated group ID %d: got %v instead of %v", sgid, data, expData)
- }
-
- data, err = sg.getSyncGroupByName(sgname)
- if err != nil {
- t.Errorf("cannot get updated SyncGroup by Name %s: %v", sgname, err)
- }
- if !reflect.DeepEqual(data, expData) {
- t.Errorf("invalid SyncGroup data for updated group name %s: got %v instead of %v", sgname, data, expData)
- }
-
- // Verify membership data.
- members, err := sg.getMembers()
- if err != nil {
- t.Errorf("cannot get all SyncGroup members after update: %v", err)
- }
- expMembers := map[string]uint32{"phone": 1, "tablet": 1, "universe": 1}
- if !reflect.DeepEqual(members, expMembers) {
- t.Errorf("invalid SyncGroup members after update: got %v instead of %v", members, expMembers)
- }
-
- expMetaData := map[string]*memberMetaData{
- "phone": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 10}, identity: "A"},
- "tablet": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 25}, identity: "B"},
- "universe": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 0}, identity: "Y"},
- }
- for mm := range members {
- info, err := sg.getMemberInfo(mm)
- if err != nil || info == nil {
- t.Errorf("cannot get info for SyncGroup member %s: info: %v, err: %v", mm, info, err)
- }
- if len(info.gids) != 1 {
- t.Errorf("invalid info for SyncGroup member %s: %v", mm, info)
- }
- expJoinerMetaData := expMetaData[mm]
- joinerMetaData := info.gids[sgid]
- if !reflect.DeepEqual(joinerMetaData, expJoinerMetaData) {
- t.Errorf("invalid joiner Data for SyncGroup member %s under group ID %d: got %v instead of %v",
- mm, sgid, joinerMetaData, expJoinerMetaData)
- }
- }
-
- // Verify peer SyncGroup info.
- peers, err := sg.getPeerSyncGroups(sgid)
- if err != nil {
- t.Errorf("cannot get peer SyncGroups for group ID %d after an update: %v", sgid, err)
- }
- expPeers := sgSet{sgid: struct{}{}}
- if !reflect.DeepEqual(peers, expPeers) {
- t.Errorf("invalid peer SyncGroups after an update: got %v instead of %v", peers, expPeers)
- }
-
- sg.close()
-}
-
-// TestDeleteSyncGroup tests deleting a SyncGroup.
-func TestDeleteSyncGroup(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- sgname := "foobar"
- sgid, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Fatal(err)
- }
-
- // Delete non-existing SyncGroups.
- err = sg.delSyncGroupByID(sgid)
- if err == nil {
- t.Errorf("deleting a non-existing SyncGroup ID did not fail in SyncGroup Table file %s", sgfile)
- }
-
- err = sg.delSyncGroupByName(sgname)
- if err == nil {
- t.Errorf("deleting a non-existing SyncGroup name did not fail in SyncGroup Table file %s", sgfile)
- }
-
- // Create the SyncGroup to delete later.
- rootid, err := strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- sgData := &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid,
- RootOID: rootid,
- Name: sgname,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "A"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 25},
- syncgroup.NameIdentity{Name: "cloud", Identity: "C"}: syncgroup.JoinerMetaData{SyncPriority: 1},
- },
- },
- }
-
- err = sg.addSyncGroup(sgData)
- if err != nil {
- t.Errorf("creating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- // Delete it by ID.
- err = sg.delSyncGroupByID(sgid)
- if err != nil {
- t.Errorf("deleting SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- // Create it again then delete it by name.
- err = sg.addSyncGroup(sgData)
- if err != nil {
- t.Errorf("creating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- err = sg.delSyncGroupByName(sgname)
- if err != nil {
- t.Errorf("deleting SyncGroup name %s failed in SyncGroup Table file %s: %v", sgname, sgfile, err)
- }
-
- // Verify peer SyncGroup info.
- if peers, err := sg.getPeerSyncGroups(sgid); err == nil {
- t.Errorf("got get peer SyncGroups for deleted group ID %d: %v", sgid, peers)
- }
-
- sg.close()
-}
-
-// TestSyncGroupTableCompact tests compacting the SyncGroup Table K/V DB file.
-func TestSyncGroupTableCompact(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- sgname := "foobar"
- sgid, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Fatal(err)
- }
- rootid, err := strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- // Add a SyncGroup and use flushes to increase the K/V DB file size.
- sgData := &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid,
- RootOID: rootid,
- Name: sgname,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "A"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 25},
- syncgroup.NameIdentity{Name: "cloud", Identity: "C"}: syncgroup.JoinerMetaData{SyncPriority: 1},
- },
- },
- }
-
- sg.flush()
-
- err = sg.addSyncGroup(sgData)
- if err != nil {
- t.Errorf("creating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid, sgfile, err)
- }
-
- sg.flush()
-
- // Verify SyncGroup and membership info after 2 steps: close/reopen, and compact.
-
- for i := 0; i < 2; i++ {
- switch i {
- case 0:
- sg.close()
- sg, err = openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot re-open SyncGroup Table file %s", sgfile)
- }
-
- case 1:
- if err = sg.compact(); err != nil {
- t.Fatalf("cannot compact SyncGroup Table file %s", sgfile)
- }
- }
-
- // Verify SyncGroup data.
- data, err := sg.getSyncGroupByID(sgid)
- if err != nil {
- t.Errorf("cannot get SyncGroup ID %d (iter %d) in SyncGroup Table file %s: %v", sgid, i, sgfile, err)
- }
- if !reflect.DeepEqual(data, sgData) {
- t.Errorf("invalid SyncGroup data for ID %d (iter %d): got %v instead of %v", sgid, i, data, sgData)
- }
-
- data, err = sg.getSyncGroupByName(sgname)
- if err != nil {
- t.Errorf("cannot get SyncGroup name %s (iter %d) in SyncGroup Table file %s: %v", sgname, i, sgfile, err)
- }
- if !reflect.DeepEqual(data, sgData) {
- t.Errorf("invalid SyncGroup data for name %s (iter %d): got %v instead of %v", sgname, i, data, sgData)
- }
-
- // Verify membership data.
- members, err := sg.getMembers()
- if err != nil {
- t.Errorf("cannot get all SyncGroup members (iter %d): %v", i, err)
- }
- expMembers := map[string]uint32{"phone": 1, "tablet": 1, "cloud": 1}
- if !reflect.DeepEqual(members, expMembers) {
- t.Errorf("invalid SyncGroup members (iter %d): got %v instead of %v", i, members, expMembers)
- }
-
- expMetaData := map[string]*memberMetaData{
- "phone": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 10}, identity: "A"},
- "tablet": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 25}, identity: "B"},
- "cloud": &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 1}, identity: "C"},
- }
- for mm := range members {
- info, err := sg.getMemberInfo(mm)
- if err != nil || info == nil {
- t.Errorf("cannot get info for SyncGroup member %s (iter %d): info: %v, err: %v", mm, i, info, err)
- }
- if len(info.gids) != 1 {
- t.Errorf("invalid info for SyncGroup member %s (iter %d): %v", mm, i, info)
- }
- expJoinerMetaData := expMetaData[mm]
- joinerMetaData := info.gids[sgid]
- if !reflect.DeepEqual(joinerMetaData, expJoinerMetaData) {
- t.Errorf("invalid joiner Data for SyncGroup member %s (iter %d) in group ID %d: %v instead of %v",
- mm, i, sgid, joinerMetaData, expJoinerMetaData)
- }
- }
- }
-
- sg.close()
-}
-
-// TestPeerSyncGroups tests creating peer SyncGroup on the same root OID.
-func TestPeerSyncGroups(t *testing.T) {
- sgfile := getFileName()
- defer os.Remove(sgfile)
-
- sg, err := openSyncGroupTable(sgfile)
- if err != nil {
- t.Fatalf("cannot open new SyncGroup Table file %s", sgfile)
- }
-
- sgname1, sgname2 := "foo", "bar"
- sgid1, err := syncgroup.ParseID("1234")
- if err != nil {
- t.Fatal(err)
- }
- sgid2, err := syncgroup.ParseID("8888")
- if err != nil {
- t.Fatal(err)
- }
- rootid, err := strToObjID("5678")
- if err != nil {
- t.Fatal(err)
- }
-
- // Add both SyncGroups using the same root OID.
- sgData1 := &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid1,
- RootOID: rootid,
- Name: sgname1,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "phone", Identity: "A"}: syncgroup.JoinerMetaData{SyncPriority: 10},
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 25},
- syncgroup.NameIdentity{Name: "cloud", Identity: "C"}: syncgroup.JoinerMetaData{SyncPriority: 1},
- },
- },
- }
-
- sgData2 := &syncGroupData{
- SrvInfo: syncgroup.SyncGroupInfo{
- SGOID: sgid2,
- RootOID: rootid,
- Name: sgname2,
- Joiners: map[syncgroup.NameIdentity]syncgroup.JoinerMetaData{
- syncgroup.NameIdentity{Name: "tablet", Identity: "B"}: syncgroup.JoinerMetaData{SyncPriority: 111},
- syncgroup.NameIdentity{Name: "door", Identity: "X"}: syncgroup.JoinerMetaData{SyncPriority: 33},
- syncgroup.NameIdentity{Name: "lamp", Identity: "Z"}: syncgroup.JoinerMetaData{SyncPriority: 9},
- },
- },
- }
-
- err = sg.addSyncGroup(sgData1)
- if err != nil {
- t.Errorf("creating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid1, sgfile, err)
- }
- err = sg.addSyncGroup(sgData2)
- if err != nil {
- t.Errorf("creating SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid2, sgfile, err)
- }
-
- // Verify peer SyncGroup info.
- expPeers := sgSet{sgid1: struct{}{}, sgid2: struct{}{}}
-
- for _, sgid := range []syncgroup.ID{sgid1, sgid2} {
- peers, err := sg.getPeerSyncGroups(sgid)
- if err != nil {
- t.Errorf("cannot get peer SyncGroups for group ID %d: %v", sgid, err)
- }
- if !reflect.DeepEqual(peers, expPeers) {
- t.Errorf("invalid peer SyncGroups got group ID %d: got %v instead of %v", sgid, peers, expPeers)
- }
- }
-
- // Verify SyncGroup membership data.
- members, err := sg.getMembers()
- if err != nil {
- t.Errorf("cannot get all SyncGroup members: %v", err)
- }
-
- expMembers := map[string]uint32{"phone": 1, "tablet": 2, "cloud": 1, "door": 1, "lamp": 1}
- if !reflect.DeepEqual(members, expMembers) {
- t.Errorf("invalid SyncGroup members: got %v instead of %v", members, expMembers)
- }
-
- expMemberInfo := map[string]*memberInfo{
- "phone": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid1: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 10}, identity: "A"},
- },
- },
- "tablet": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid1: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 25}, identity: "B"},
- sgid2: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 111}, identity: "B"},
- },
- },
- "cloud": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid1: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 1}, identity: "C"},
- },
- },
- "door": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid2: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 33}, identity: "X"},
- },
- },
- "lamp": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid2: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 9}, identity: "Z"},
- },
- },
- }
-
- for mm := range members {
- info, err := sg.getMemberInfo(mm)
- if err != nil || info == nil {
- t.Errorf("cannot get info for SyncGroup member %s: info: %v, err: %v", mm, info, err)
- }
- expInfo := expMemberInfo[mm]
- if !reflect.DeepEqual(info, expInfo) {
- t.Errorf("invalid info for SyncGroup member %s: got %v instead of %v", mm, info, expInfo)
- }
- }
-
- // Delete the 1st SyncGroup.
- err = sg.delSyncGroupByID(sgid1)
- if err != nil {
- t.Errorf("deleting SyncGroup ID %d failed in SyncGroup Table file %s: %v", sgid1, sgfile, err)
- }
-
- // Verify peer SyncGroup info.
- expPeers = sgSet{sgid2: struct{}{}}
- peers, err := sg.getPeerSyncGroups(sgid2)
- if err != nil {
- t.Errorf("cannot get peer SyncGroups for group ID %d: %v", sgid2, err)
- }
- if !reflect.DeepEqual(peers, expPeers) {
- t.Errorf("invalid peer SyncGroups got group ID %d: got %v instead of %v", sgid2, peers, expPeers)
- }
-
- // Verify SyncGroup membership data.
- members, err = sg.getMembers()
- if err != nil {
- t.Errorf("cannot get all SyncGroup members: %v", err)
- }
-
- expMembers = map[string]uint32{"tablet": 1, "door": 1, "lamp": 1}
- if !reflect.DeepEqual(members, expMembers) {
- t.Errorf("invalid SyncGroup members: got %v instead of %v", members, expMembers)
- }
-
- expMemberInfo = map[string]*memberInfo{
- "tablet": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid2: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 111}, identity: "B"},
- },
- },
- "door": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid2: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 33}, identity: "X"},
- },
- },
- "lamp": &memberInfo{
- gids: map[syncgroup.ID]*memberMetaData{
- sgid2: &memberMetaData{metaData: syncgroup.JoinerMetaData{SyncPriority: 9}, identity: "Z"},
- },
- },
- }
-
- for mm := range members {
- info, err := sg.getMemberInfo(mm)
- if err != nil || info == nil {
- t.Errorf("cannot get info for SyncGroup member %s: info: %v, err: %v", mm, info, err)
- }
- expInfo := expMemberInfo[mm]
- if !reflect.DeepEqual(info, expInfo) {
- t.Errorf("invalid info for SyncGroup member %s: got %v instead of %v", mm, info, expInfo)
- }
- }
-
- sg.close()
-}
diff --git a/runtimes/google/vsync/testdata/local-init-00.log.sync b/runtimes/google/vsync/testdata/local-init-00.log.sync
deleted file mode 100644
index 3eb51de..0000000
--- a/runtimes/google/vsync/testdata/local-init-00.log.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Create an object locally and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|12345|1|||logrec-00|false|false
-addl|12345|2|1||logrec-01|false|false
-addl|12345|3|2||logrec-02|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-00.sync b/runtimes/google/vsync/testdata/local-init-00.sync
deleted file mode 100644
index d1dace2..0000000
--- a/runtimes/google/vsync/testdata/local-init-00.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Create an object locally and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|12345|0|||logrec-00|false|false
-addl|12345|1|0||logrec-01|false|false
-addl|12345|2|1||logrec-02|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-01.log.sync b/runtimes/google/vsync/testdata/local-init-01.log.sync
deleted file mode 100644
index 578f795..0000000
--- a/runtimes/google/vsync/testdata/local-init-01.log.sync
+++ /dev/null
@@ -1,9 +0,0 @@
-# Create objects locally and update one and delete another.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|123|1|||logrec-00|false|false
-addl|123|2|1||logrec-01|false|false
-addl|123|3|2||logrec-02|false|false
-
-addl|456|1|||logrec-00|false|false
-addl|456|0|1||logrec-00|false|true
\ No newline at end of file
diff --git a/runtimes/google/vsync/testdata/local-init-01.sync b/runtimes/google/vsync/testdata/local-init-01.sync
deleted file mode 100644
index 525dd09..0000000
--- a/runtimes/google/vsync/testdata/local-init-01.sync
+++ /dev/null
@@ -1,12 +0,0 @@
-# Create an object DAG locally with branches and resolved conflicts.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|12345|0|||logrec-00|false|false
-addl|12345|1|0||logrec-01|false|false
-addl|12345|2|1||logrec-02|false|false
-addl|12345|3|1||logrec-03|false|false
-addl|12345|4|2|3|logrec-04|false|false
-addl|12345|5|4||logrec-05|false|false
-addl|12345|6|1||logrec-06|false|false
-addl|12345|7|5|6|logrec-07|false|false
-addl|12345|8|7||logrec-08|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-02.sync b/runtimes/google/vsync/testdata/local-init-02.sync
deleted file mode 100644
index 70b1319..0000000
--- a/runtimes/google/vsync/testdata/local-init-02.sync
+++ /dev/null
@@ -1,10 +0,0 @@
-# Create DAGs for 3 objects locally.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|12345|1|||logrec-a-01|false|false
-addl|12345|2|1||logrec-a-02|false|false
-
-addl|67890|1|||logrec-b-01|false|false
-addl|67890|2|1||logrec-b-02|false|false
-
-addl|222|1|||logrec-c-01|false|false
diff --git a/runtimes/google/vsync/testdata/local-init-03.sync b/runtimes/google/vsync/testdata/local-init-03.sync
deleted file mode 100644
index 3b3fba3..0000000
--- a/runtimes/google/vsync/testdata/local-init-03.sync
+++ /dev/null
@@ -1,10 +0,0 @@
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|12345|1|||logrec-01|false|false
-addl|12345|2|1||logrec-02|false|false
-addl|12345|3|1||logrec-03|false|false
-addl|12345|4|2||logrec-04|false|false
-addl|12345|5|2||logrec-05|false|true
-addl|12345|6|4|5|logrec-06|false|false
-addl|12345|7|3|5|logrec-07|false|false
-addl|12345|8|6|7|logrec-08|false|false
diff --git a/runtimes/google/vsync/testdata/local-resolve-00.sync b/runtimes/google/vsync/testdata/local-resolve-00.sync
deleted file mode 100644
index 02e1c88..0000000
--- a/runtimes/google/vsync/testdata/local-resolve-00.sync
+++ /dev/null
@@ -1,4 +0,0 @@
-# Create an object locally and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addl|12345|6|2|5|logrec-06|false|false
diff --git a/runtimes/google/vsync/testdata/remote-2obj-del.log.sync b/runtimes/google/vsync/testdata/remote-2obj-del.log.sync
deleted file mode 100644
index 403da95..0000000
--- a/runtimes/google/vsync/testdata/remote-2obj-del.log.sync
+++ /dev/null
@@ -1,7 +0,0 @@
-# Update one object and delete another object remotely.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|123|4|3||VeyronPhone:1:0|false|false
-addr|123|5|4||VeyronPhone:1:1|false|false
-addr|123|6|5||VeyronPhone:1:2|false|true
-addr|456|2|1||VeyronPhone:1:3|false|false
\ No newline at end of file
diff --git a/runtimes/google/vsync/testdata/remote-conf-00.log.sync b/runtimes/google/vsync/testdata/remote-conf-00.log.sync
deleted file mode 100644
index a391c14..0000000
--- a/runtimes/google/vsync/testdata/remote-conf-00.log.sync
+++ /dev/null
@@ -1,8 +0,0 @@
-# Update an object remotely three times triggering one conflict after
-# it was created locally up to v3 (i.e. assume the remote sync received
-# it from the local sync at v2, then updated separately).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|2||VeyronPhone:1:0|false|false
-addr|12345|5|4||VeyronPhone:1:1|false|false
-addr|12345|6|5||VeyronPhone:1:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-00.sync b/runtimes/google/vsync/testdata/remote-conf-00.sync
deleted file mode 100644
index 7e555a6..0000000
--- a/runtimes/google/vsync/testdata/remote-conf-00.sync
+++ /dev/null
@@ -1,8 +0,0 @@
-# Update an object remotely three times triggering one conflict after
-# it was created locally up to v2 (i.e. assume the remote sync received
-# it from the local sync at v1, then updated separately).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|3|1||logrec-03|false|false
-addr|12345|4|3||logrec-04|false|false
-addr|12345|5|4||logrec-05|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-01.log.sync b/runtimes/google/vsync/testdata/remote-conf-01.log.sync
deleted file mode 100644
index 63117bf..0000000
--- a/runtimes/google/vsync/testdata/remote-conf-01.log.sync
+++ /dev/null
@@ -1,11 +0,0 @@
-# Update an object remotely three times triggering a conflict with
-# 2 graft points: v1 and v4. This assumes that the remote sync got
-# v1, made its own conflicting v4 that it resolved into v5 (against v2)
-# then made a v6 change. When the local sync gets back this info it
-# sees 2 graft points: v1-v4 and v2-v5.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|1||VeyronLaptop:1:0|false|false
-addr|12345|5|2|4|VeyronPhone:1:0|false|false
-addr|12345|6|5||VeyronPhone:1:1|false|false
-
diff --git a/runtimes/google/vsync/testdata/remote-conf-01.sync b/runtimes/google/vsync/testdata/remote-conf-01.sync
deleted file mode 100644
index 4655a1e..0000000
--- a/runtimes/google/vsync/testdata/remote-conf-01.sync
+++ /dev/null
@@ -1,10 +0,0 @@
-# Update an object remotely three times triggering a conflict with
-# 2 graft points: v0 and v2. This assumes that the remote sync got
-# v0, made its own conflicting v3 that it resolved into v4 (against v1)
-# then made a v5 change. When the local sync gets back this info it
-# sees 2 graft points: v0-v3 and v1-v4.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|3|0||logrec-03|false|false
-addr|12345|4|1|3|logrec-04|false|false
-addr|12345|5|4||logrec-05|false|false
diff --git a/runtimes/google/vsync/testdata/remote-conf-link.log.sync b/runtimes/google/vsync/testdata/remote-conf-link.log.sync
deleted file mode 100644
index e6093a2..0000000
--- a/runtimes/google/vsync/testdata/remote-conf-link.log.sync
+++ /dev/null
@@ -1,5 +0,0 @@
-# Update an object remotely, detect conflict, and bless the local version.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|1||VeyronPhone:1:0|false|false
-linkr|12345|4|2||VeyronPhone:1:1
diff --git a/runtimes/google/vsync/testdata/remote-init-00.log.sync b/runtimes/google/vsync/testdata/remote-init-00.log.sync
deleted file mode 100644
index 5e12809..0000000
--- a/runtimes/google/vsync/testdata/remote-init-00.log.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|1|||VeyronPhone:1:0|false|false
-addr|12345|2|1||VeyronPhone:1:1|false|false
-addr|12345|3|2||VeyronPhone:1:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-00.sync b/runtimes/google/vsync/testdata/remote-init-00.sync
deleted file mode 100644
index 7f2189a..0000000
--- a/runtimes/google/vsync/testdata/remote-init-00.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|0|||logrec-00|false|false
-addr|12345|1|0||logrec-01|false|false
-addr|12345|2|1||logrec-02|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-01.log.sync b/runtimes/google/vsync/testdata/remote-init-01.log.sync
deleted file mode 100644
index ad022b4..0000000
--- a/runtimes/google/vsync/testdata/remote-init-01.log.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Create an object remotely and update it twice (linked-list).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|1|||VeyronPhone:5:0|false|false
-addr|12345|2|1||VeyronPhone:5:1|false|false
-addr|12345|3|2||VeyronPhone:5:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-02.log.sync b/runtimes/google/vsync/testdata/remote-init-02.log.sync
deleted file mode 100644
index 8885949..0000000
--- a/runtimes/google/vsync/testdata/remote-init-02.log.sync
+++ /dev/null
@@ -1,17 +0,0 @@
-# Create objects and transactions remotely.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|123|1|||VeyronPhone:1:0|false|false
-
-addr|123|2|1||VeyronPhone:1:1|true|false
-addr|456|1|||VeyronPhone:1:2|true|false
-addr|789|1|||VeyronPhone:1:3|false|false
-
-addr|789|2|1||VeyronPhone:1:4|false|false
-
-addr|789|3|1||VeyronTab:1:0|false|false
-
-addr|789|4|2|3|VeyronPhone:2:0|false|false
-
-addr|123|3|2||VeyronPhone:2:1|true|false
-addr|456|2|1||VeyronPhone:2:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-init-03.log.sync b/runtimes/google/vsync/testdata/remote-init-03.log.sync
deleted file mode 100644
index 85083c7..0000000
--- a/runtimes/google/vsync/testdata/remote-init-03.log.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Create an object remotely and delete it.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|1|||VeyronPhone:1:0|false|false
-addr|12345|2|1||VeyronPhone:1:1|false|false
-addr|12345|3|2||VeyronPhone:1:2|false|true
diff --git a/runtimes/google/vsync/testdata/remote-noconf-00.log.sync b/runtimes/google/vsync/testdata/remote-noconf-00.log.sync
deleted file mode 100644
index c56208b..0000000
--- a/runtimes/google/vsync/testdata/remote-noconf-00.log.sync
+++ /dev/null
@@ -1,8 +0,0 @@
-# Update an object remotely three times without triggering a conflict
-# after it was created locally up to v3 (i.e. assume the remote sync
-# received it from the local sync first, then updated it).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|3||VeyronPhone:1:0|false|false
-addr|12345|5|4||VeyronPhone:1:1|false|false
-addr|12345|6|5||VeyronPhone:1:2|false|false
diff --git a/runtimes/google/vsync/testdata/remote-noconf-00.sync b/runtimes/google/vsync/testdata/remote-noconf-00.sync
deleted file mode 100644
index 9b95c86..0000000
--- a/runtimes/google/vsync/testdata/remote-noconf-00.sync
+++ /dev/null
@@ -1,8 +0,0 @@
-# Update an object remotely three times without triggering a conflict
-# after it was created locally up to v2 (i.e. assume the remote sync
-# received it from the local sync first, then updated it).
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|3|2||logrec-03|false|false
-addr|12345|4|3||logrec-04|false|false
-addr|12345|5|4||logrec-05|false|false
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync
deleted file mode 100644
index 1da37e3..0000000
--- a/runtimes/google/vsync/testdata/remote-noconf-link-00.log.sync
+++ /dev/null
@@ -1,6 +0,0 @@
-# Update an object remotely, detect conflict, and bless the remote version.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|1||VeyronPhone:1:0|false|false
-linkr|12345|2|4||VeyronPhone:1:1
-
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync
deleted file mode 100644
index 5530231..0000000
--- a/runtimes/google/vsync/testdata/remote-noconf-link-01.log.sync
+++ /dev/null
@@ -1,5 +0,0 @@
-# Update an object remotely, detect conflict, and bless the local version.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|1||VeyronPhone:1:0|false|false
-linkr|12345|4|3||VeyronPhone:1:1
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync
deleted file mode 100644
index c628985..0000000
--- a/runtimes/google/vsync/testdata/remote-noconf-link-02.log.sync
+++ /dev/null
@@ -1,7 +0,0 @@
-# Update an object remotely, detect conflict, and bless the remote version, and continue updating.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-addr|12345|4|1||VeyronPhone:1:0|false|false
-linkr|12345|3|4||VeyronPhone:1:1
-addr|12345|5|3||VeyronPhone:2:0|false|false
-
diff --git a/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync b/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync
deleted file mode 100644
index 1ab977d..0000000
--- a/runtimes/google/vsync/testdata/remote-noconf-link-repeat.log.sync
+++ /dev/null
@@ -1,5 +0,0 @@
-# Resolve the same conflict on two different devices.
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-
-linkr|12345|3|4||VeyronLaptop:1:0
-
diff --git a/runtimes/google/vsync/testdata/test-1obj.gc.sync b/runtimes/google/vsync/testdata/test-1obj.gc.sync
deleted file mode 100644
index 93eca9e..0000000
--- a/runtimes/google/vsync/testdata/test-1obj.gc.sync
+++ /dev/null
@@ -1,13 +0,0 @@
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-# Local node is A. Remote nodes are B and C.
-addr|12345|0|||C:1:0|false|false
-addr|12345|1|0||B:1:0|false|false
-addl|12345|2|0||A:1:0|false|false
-addl|12345|3|1|2|A:2:0|false|false
-addr|12345|4|3||C:2:0|false|false
-addr|12345|5|3||B:2:0|false|false
-addr|12345|6|4|5|B:3:0|false|false
-# Devtable state
-setdev|A|A:2,B:3,C:2
-setdev|B|A:2,B:3,C:2
-setdev|C|A:2,B:1,C:2
diff --git a/runtimes/google/vsync/testdata/test-3obj.gc.sync b/runtimes/google/vsync/testdata/test-3obj.gc.sync
deleted file mode 100644
index d9a3899..0000000
--- a/runtimes/google/vsync/testdata/test-3obj.gc.sync
+++ /dev/null
@@ -1,44 +0,0 @@
-# The format is: <cmd>|<objid>|<version>|<parent1>|<parent2>|<logrec>|<continued>|<deleted>
-# Local node is A. Remote nodes are B and C.
-addl|123|1|||A:1:0|false|false
-
-addr|456|1|||B:1:0|false|false
-
-addr|456|2|1||B:2:0|false|false
-addr|123|2|1||B:2:1|false|false
-
-addl|456|3|2||A:2:0|false|false
-addl|123|4|2||A:2:1|false|false
-
-addr|789|1|||C:1:0|false|false
-
-addr|789|2|1||C:2:0|false|false
-
-addr|123|3|1||C:3:0|false|false
-addr|789|3|2||C:3:1|false|false
-
-addr|123|5|3|2|C:4:0|false|false
-
-addl|123|6|4|5|A:3:0|false|false
-addl|456|4|3||A:3:1|false|false
-addl|789|4|3||A:3:2|false|false
-
-addr|456|5|2||B:3:0|false|false
-
-addl|456|7|4|5|A:4:0|false|false
-
-addr|456|6|2||C:5:0|false|false
-addr|123|7|5||C:5:1|false|false
-addr|123|8|7||C:5:2|false|false
-addr|789|5|3||C:5:3|false|false
-
-addl|123|9|6|8|A:5:0|false|false
-addl|456|8|6|7|A:5:1|false|false
-addl|789|6|4|5|A:5:2|false|false
-
-addl|123|10|9||A:6:0|false|false
-
-# Devtable state
-setdev|A|A:6,B:3,C:5
-setdev|B|A:4,B:3,C:4
-setdev|C|A:4,B:3,C:4
diff --git a/runtimes/google/vsync/util_test.go b/runtimes/google/vsync/util_test.go
deleted file mode 100644
index b669ffe..0000000
--- a/runtimes/google/vsync/util_test.go
+++ /dev/null
@@ -1,259 +0,0 @@
-package vsync
-
-// Utilities for testing.
-import (
- "container/list"
- "fmt"
- "os"
- "time"
-
- "veyron/services/store/raw"
-)
-
-// getFileName generates a filename for a temporary (per unit test) kvdb file.
-func getFileName() string {
- return fmt.Sprintf("%s/sync_test_%d_%d", os.TempDir(), os.Getpid(), time.Now().UnixNano())
-}
-
-// createTempDir creates a unique temporary directory to store kvdb files.
-func createTempDir() (string, error) {
- dir := fmt.Sprintf("%s/sync_test_%d_%d/", os.TempDir(), os.Getpid(), time.Now().UnixNano())
- if err := os.MkdirAll(dir, 0700); err != nil {
- return "", err
- }
- return dir, nil
-}
-
-// getFileSize returns the size of a file.
-func getFileSize(fname string) int64 {
- finfo, err := os.Stat(fname)
- if err != nil {
- return -1
- }
- return finfo.Size()
-}
-
-// dummyStream struct emulates stream of log records received from RPC.
-type dummyStream struct {
- l *list.List
- value LogRec
-}
-
-func newStream() *dummyStream {
- ds := &dummyStream{
- l: list.New(),
- }
- return ds
-}
-
-func (ds *dummyStream) Advance() bool {
- if ds.l.Len() > 0 {
- ds.value = ds.l.Remove(ds.l.Front()).(LogRec)
- return true
- }
- return false
-}
-
-func (ds *dummyStream) Value() LogRec {
- return ds.value
-}
-
-func (ds *dummyStream) RecvStream() interface {
- Advance() bool
- Value() LogRec
- Err() error
-} {
- return ds
-}
-
-func (*dummyStream) Err() error { return nil }
-
-func (ds *dummyStream) Finish() (GenVector, error) {
- return GenVector{}, nil
-}
-
-func (ds *dummyStream) Cancel() {
-}
-
-func (ds *dummyStream) add(rec LogRec) {
- ds.l.PushBack(rec)
-}
-
-// logReplayCommands replays log records parsed from the input file.
-func logReplayCommands(log *iLog, syncfile string) (GenVector, error) {
- cmds, err := parseSyncCommands(syncfile)
- if err != nil {
- return nil, err
- }
-
- var minGens GenVector
- remote := false
- var stream *dummyStream
- for _, cmd := range cmds {
- switch cmd.cmd {
- case addLocal:
- parent := raw.NoVersion
- if cmd.parents != nil {
- parent = cmd.parents[0]
- }
- err = log.processWatchRecord(cmd.objID, cmd.version, parent, &LogValue{Mutation: raw.Mutation{Version: cmd.version}, Delete: cmd.deleted}, NoTxID)
- if err != nil {
- return nil, fmt.Errorf("cannot replay local log records %d:%s err %v",
- cmd.objID, cmd.version, err)
- }
-
- case addRemote:
- // TODO(hpucha): This code is no longer
- // used. Will be deleted when
- // processWatchRecord is moved to watcher.go
- if !remote {
- stream = newStream()
- }
- remote = true
- id, gnum, lsn, err := splitLogRecKey(cmd.logrec)
- if err != nil {
- return nil, err
- }
- rec := LogRec{
- DevID: id,
- GNum: gnum,
- LSN: lsn,
- ObjID: cmd.objID,
- CurVers: cmd.version,
- Parents: cmd.parents,
- Value: LogValue{},
- }
- stream.add(rec)
- }
- }
-
- return minGens, nil
-}
-
-// createReplayStream creates a dummy stream of log records parsed from the input file.
-func createReplayStream(syncfile string) (*dummyStream, error) {
- cmds, err := parseSyncCommands(syncfile)
- if err != nil {
- return nil, err
- }
-
- stream := newStream()
- for _, cmd := range cmds {
- id, gnum, lsn, err := splitLogRecKey(cmd.logrec)
- if err != nil {
- return nil, err
- }
- rec := LogRec{
- DevID: id,
- GNum: gnum,
- LSN: lsn,
- ObjID: cmd.objID,
- CurVers: cmd.version,
- Parents: cmd.parents,
- Value: LogValue{
- Mutation: raw.Mutation{Version: cmd.version},
- Continued: cmd.continued,
- Delete: cmd.deleted,
- },
- }
-
- switch cmd.cmd {
- case addRemote:
- rec.RecType = NodeRec
- case linkRemote:
- rec.RecType = LinkRec
- default:
- return nil, err
- }
- stream.add(rec)
- }
-
- return stream, nil
-}
-
-// populates the log and dag state as part of state initialization.
-func populateLogAndDAG(s *syncd, rec *LogRec) error {
- logKey, err := s.log.putLogRec(rec)
- if err != nil {
- return err
- }
-
- if err := s.dag.addNode(rec.ObjID, rec.CurVers, false, rec.Value.Delete, rec.Parents, logKey, NoTxID); err != nil {
- return err
- }
- if err := s.dag.moveHead(rec.ObjID, rec.CurVers); err != nil {
- return err
- }
- return nil
-}
-
-// vsyncInitState initializes log, dag and devtable state obtained from an input trace-like file.
-func vsyncInitState(s *syncd, syncfile string) error {
- cmds, err := parseSyncCommands(syncfile)
- if err != nil {
- return err
- }
-
- var curGen GenID
- genMap := make(map[string]*genMetadata)
-
- for _, cmd := range cmds {
- switch cmd.cmd {
- case addLocal, addRemote:
- id, gnum, lsn, err := splitLogRecKey(cmd.logrec)
- if err != nil {
- return err
- }
- rec := &LogRec{
- DevID: id,
- GNum: gnum,
- LSN: lsn,
- ObjID: cmd.objID,
- CurVers: cmd.version,
- Parents: cmd.parents,
- Value: LogValue{Continued: cmd.continued, Delete: cmd.deleted},
- }
- if err := populateLogAndDAG(s, rec); err != nil {
- return err
- }
- key := generationKey(id, gnum)
- if m, ok := genMap[key]; !ok {
- genMap[key] = &genMetadata{
- Pos: s.log.head.Curorder,
- Count: 1,
- MaxLSN: rec.LSN,
- }
- s.log.head.Curorder++
- } else {
- m.Count++
- if rec.LSN > m.MaxLSN {
- m.MaxLSN = rec.LSN
- }
- }
- if cmd.cmd == addLocal {
- curGen = gnum
- }
-
- case setDevTable:
- if err := s.devtab.putGenVec(cmd.devID, cmd.genVec); err != nil {
- return err
- }
- }
- }
-
- // Initializing genMetadata.
- for key, gen := range genMap {
- dev, gnum, err := splitGenerationKey(key)
- if err != nil {
- return err
- }
- if err := s.log.putGenMetadata(dev, gnum, gen); err != nil {
- return err
- }
- }
-
- // Initializing generation in log header.
- s.log.head.Curgen = curGen + 1
-
- return nil
-}
diff --git a/runtimes/google/vsync/vsync.vdl b/runtimes/google/vsync/vsync.vdl
deleted file mode 100644
index e5ac7e9..0000000
--- a/runtimes/google/vsync/vsync.vdl
+++ /dev/null
@@ -1,73 +0,0 @@
-package vsync
-
-import (
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// DeviceID is the globally unique ID of a device.
-type DeviceID string
-// GenID is the unique ID per generation per device.
-type GenID uint64
-// LSN is the log sequence number.
-type LSN uint64
-// GenVector is the generation vector.
-type GenVector map[DeviceID]GenID
-
-const (
- // NodeRec type log record adds a new node in the dag.
- NodeRec = byte(0)
- // LinkRec type log record adds a new link in the dag.
- LinkRec = byte(1)
-)
-
-// LogRec represents a single log record that is exchanged between two
-// peers.
-//
-// It contains log related metadata: DevID is the id of the
-// device that created the log record, GNum is the ID of the
-// generation that the log record is part of, LSN is the log
-// sequence number of the log record in the generation GNum,
-// and RecType is the type of log record.
-//
-// It also contains information relevant to the updates to an object
-// in the store: ObjID is the id of the object that was
-// updated. CurVers is the current version number of the
-// object. Parents can contain 0, 1 or 2 parent versions that the
-// current version is derived from, and Value is the actual value of
-// the object mutation.
-type LogRec struct {
- // Log related information.
- DevID DeviceID
- GNum GenID
- LSN LSN
- RecType byte
- // Object related information.
- ObjID storage.ID
- CurVers raw.Version
- Parents []raw.Version
- Value LogValue
-}
-
-// LogValue represents an object mutation within a transaction.
-type LogValue struct {
- // Mutation is the store mutation representing the change in the object.
- Mutation raw.Mutation
- // SyncTime is the timestamp of the mutation when it arrives at the Sync server.
- SyncTime int64
- // Delete indicates whether the mutation resulted in the object being
- // deleted from the store.
- Delete bool
- // Continued tracks the transaction boundaries in a range of mutations.
- // It is set to true in all transaction mutations except the last one
- // in which it is set to false to mark the end of the transaction.
- Continued bool
-}
-
-// Sync allows a device to GetDeltas from another device.
-type Sync interface {
- // GetDeltas returns a device's current generation vector and all the missing log records
- // when compared to the incoming generation vector.
- GetDeltas(In GenVector, ClientID DeviceID) stream<_, LogRec> (Out GenVector, Err error)
-}
diff --git a/runtimes/google/vsync/vsync.vdl.go b/runtimes/google/vsync/vsync.vdl.go
deleted file mode 100644
index 057838e..0000000
--- a/runtimes/google/vsync/vsync.vdl.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// This file was auto-generated by the veyron vdl tool.
-// Source: vsync.vdl
-
-package vsync
-
-import (
- "veyron/services/store/raw"
-
- "veyron2/storage"
-
- // The non-user imports are prefixed with "_gen_" to prevent collisions.
- _gen_io "io"
- _gen_veyron2 "veyron2"
- _gen_context "veyron2/context"
- _gen_ipc "veyron2/ipc"
- _gen_naming "veyron2/naming"
- _gen_vdlutil "veyron2/vdl/vdlutil"
- _gen_wiretype "veyron2/wiretype"
-)
-
-// DeviceID is the globally unique ID of a device.
-type DeviceID string
-
-// GenID is the unique ID per generation per device.
-type GenID uint64
-
-// LSN is the log sequence number.
-type LSN uint64
-
-// GenVector is the generation vector.
-type GenVector map[DeviceID]GenID
-
-// LogRec represents a single log record that is exchanged between two
-// peers.
-//
-// It contains log related metadata: DevID is the id of the
-// device that created the log record, GNum is the ID of the
-// generation that the log record is part of, LSN is the log
-// sequence number of the log record in the generation GNum,
-// and RecType is the type of log record.
-//
-// It also contains information relevant to the updates to an object
-// in the store: ObjID is the id of the object that was
-// updated. CurVers is the current version number of the
-// object. Parents can contain 0, 1 or 2 parent versions that the
-// current version is derived from, and Value is the actual value of
-// the object mutation.
-type LogRec struct {
- // Log related information.
- DevID DeviceID
- GNum GenID
- LSN LSN
- RecType byte
- // Object related information.
- ObjID storage.ID
- CurVers raw.Version
- Parents []raw.Version
- Value LogValue
-}
-
-// LogValue represents an object mutation within a transaction.
-type LogValue struct {
- // Mutation is the store mutation representing the change in the object.
- Mutation raw.Mutation
- // SyncTime is the timestamp of the mutation when it arrives at the Sync server.
- SyncTime int64
- // Delete indicates whether the mutation resulted in the object being
- // deleted from the store.
- Delete bool
- // Continued tracks the transaction boundaries in a range of mutations.
- // It is set to true in all transaction mutations except the last one
- // in which it is set to false to mark the end of the transaction.
- Continued bool
-}
-
-// NodeRec type log record adds a new node in the dag.
-const NodeRec = byte(0)
-
-// LinkRec type log record adds a new link in the dag.
-const LinkRec = byte(1)
-
-// TODO(bprosnitz) Remove this line once signatures are updated to use typevals.
-// It corrects a bug where _gen_wiretype is unused in VDL pacakges where only bootstrap types are used on interfaces.
-const _ = _gen_wiretype.TypeIDInvalid
-
-// Sync allows a device to GetDeltas from another device.
-// Sync is the interface the client binds and uses.
-// Sync_ExcludingUniversal is the interface without internal framework-added methods
-// to enable embedding without method collisions. Not to be used directly by clients.
-type Sync_ExcludingUniversal interface {
- // GetDeltas returns a device's current generation vector and all the missing log records
- // when compared to the incoming generation vector.
- GetDeltas(ctx _gen_context.T, In GenVector, ClientID DeviceID, opts ..._gen_ipc.CallOpt) (reply SyncGetDeltasCall, err error)
-}
-type Sync interface {
- _gen_ipc.UniversalServiceMethods
- Sync_ExcludingUniversal
-}
-
-// SyncService is the interface the server implements.
-type SyncService interface {
-
- // GetDeltas returns a device's current generation vector and all the missing log records
- // when compared to the incoming generation vector.
- GetDeltas(context _gen_ipc.ServerContext, In GenVector, ClientID DeviceID, stream SyncServiceGetDeltasStream) (reply GenVector, err error)
-}
-
-// SyncGetDeltasCall is the interface for call object of the method
-// GetDeltas in the service interface Sync.
-type SyncGetDeltasCall interface {
- // RecvStream returns the recv portion of the stream
- RecvStream() interface {
- // Advance stages an element so the client can retrieve it
- // with Value. Advance returns true iff there is an
- // element to retrieve. The client must call Advance before
- // calling Value. Advance may block if an element is not
- // immediately available.
- Advance() bool
-
- // Value returns the element that was staged by Advance.
- // Value may panic if Advance returned false or was not
- // called at all. Value does not block.
- Value() LogRec
-
- // Err returns a non-nil error iff the stream encountered
- // any errors. Err does not block.
- Err() error
- }
-
- // Finish blocks until the server is done and returns the positional
- // return values for call.
- //
- // If Cancel has been called, Finish will return immediately; the output of
- // Finish could either be an error signalling cancelation, or the correct
- // positional return values from the server depending on the timing of the
- // call.
- //
- // Calling Finish is mandatory for releasing stream resources, unless Cancel
- // has been called or any of the other methods return an error.
- // Finish should be called at most once.
- Finish() (reply GenVector, err error)
-
- // Cancel cancels the RPC, notifying the server to stop processing. It
- // is safe to call Cancel concurrently with any of the other stream methods.
- // Calling Cancel after Finish has returned is a no-op.
- Cancel()
-}
-
-type implSyncGetDeltasStreamIterator struct {
- clientCall _gen_ipc.Call
- val LogRec
- err error
-}
-
-func (c *implSyncGetDeltasStreamIterator) Advance() bool {
- c.val = LogRec{}
- c.err = c.clientCall.Recv(&c.val)
- return c.err == nil
-}
-
-func (c *implSyncGetDeltasStreamIterator) Value() LogRec {
- return c.val
-}
-
-func (c *implSyncGetDeltasStreamIterator) Err() error {
- if c.err == _gen_io.EOF {
- return nil
- }
- return c.err
-}
-
-// Implementation of the SyncGetDeltasCall interface that is not exported.
-type implSyncGetDeltasCall struct {
- clientCall _gen_ipc.Call
- readStream implSyncGetDeltasStreamIterator
-}
-
-func (c *implSyncGetDeltasCall) RecvStream() interface {
- Advance() bool
- Value() LogRec
- Err() error
-} {
- return &c.readStream
-}
-
-func (c *implSyncGetDeltasCall) Finish() (reply GenVector, err error) {
- if ierr := c.clientCall.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (c *implSyncGetDeltasCall) Cancel() {
- c.clientCall.Cancel()
-}
-
-type implSyncServiceGetDeltasStreamSender struct {
- serverCall _gen_ipc.ServerCall
-}
-
-func (s *implSyncServiceGetDeltasStreamSender) Send(item LogRec) error {
- return s.serverCall.Send(item)
-}
-
-// SyncServiceGetDeltasStream is the interface for streaming responses of the method
-// GetDeltas in the service interface Sync.
-type SyncServiceGetDeltasStream interface {
- // SendStream returns the send portion of the stream.
- SendStream() interface {
- // Send places the item onto the output stream, blocking if there is no buffer
- // space available. If the client has canceled, an error is returned.
- Send(item LogRec) error
- }
-}
-
-// Implementation of the SyncServiceGetDeltasStream interface that is not exported.
-type implSyncServiceGetDeltasStream struct {
- writer implSyncServiceGetDeltasStreamSender
-}
-
-func (s *implSyncServiceGetDeltasStream) SendStream() interface {
- // Send places the item onto the output stream, blocking if there is no buffer
- // space available. If the client has canceled, an error is returned.
- Send(item LogRec) error
-} {
- return &s.writer
-}
-
-// BindSync returns the client stub implementing the Sync
-// interface.
-//
-// If no _gen_ipc.Client is specified, the default _gen_ipc.Client in the
-// global Runtime is used.
-func BindSync(name string, opts ..._gen_ipc.BindOpt) (Sync, error) {
- var client _gen_ipc.Client
- switch len(opts) {
- case 0:
- // Do nothing.
- case 1:
- if clientOpt, ok := opts[0].(_gen_ipc.Client); opts[0] == nil || ok {
- client = clientOpt
- } else {
- return nil, _gen_vdlutil.ErrUnrecognizedOption
- }
- default:
- return nil, _gen_vdlutil.ErrTooManyOptionsToBind
- }
- stub := &clientStubSync{defaultClient: client, name: name}
-
- return stub, nil
-}
-
-// NewServerSync creates a new server stub.
-//
-// It takes a regular server implementing the SyncService
-// interface, and returns a new server stub.
-func NewServerSync(server SyncService) interface{} {
- return &ServerStubSync{
- service: server,
- }
-}
-
-// clientStubSync implements Sync.
-type clientStubSync struct {
- defaultClient _gen_ipc.Client
- name string
-}
-
-func (__gen_c *clientStubSync) client(ctx _gen_context.T) _gen_ipc.Client {
- if __gen_c.defaultClient != nil {
- return __gen_c.defaultClient
- }
- return _gen_veyron2.RuntimeFromContext(ctx).Client()
-}
-
-func (__gen_c *clientStubSync) GetDeltas(ctx _gen_context.T, In GenVector, ClientID DeviceID, opts ..._gen_ipc.CallOpt) (reply SyncGetDeltasCall, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "GetDeltas", []interface{}{In, ClientID}, opts...); err != nil {
- return
- }
- reply = &implSyncGetDeltasCall{clientCall: call, readStream: implSyncGetDeltasStreamIterator{clientCall: call}}
- return
-}
-
-func (__gen_c *clientStubSync) UnresolveStep(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply []string, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "UnresolveStep", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSync) Signature(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply _gen_ipc.ServiceSignature, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Signature", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSync) GetMethodTags(ctx _gen_context.T, method string, opts ..._gen_ipc.CallOpt) (reply []interface{}, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "GetMethodTags", []interface{}{method}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-// ServerStubSync wraps a server that implements
-// SyncService and provides an object that satisfies
-// the requirements of veyron2/ipc.ReflectInvoker.
-type ServerStubSync struct {
- service SyncService
-}
-
-func (__gen_s *ServerStubSync) GetMethodTags(call _gen_ipc.ServerCall, method string) ([]interface{}, error) {
- // TODO(bprosnitz) GetMethodTags() will be replaces with Signature().
- // Note: This exhibits some weird behavior like returning a nil error if the method isn't found.
- // This will change when it is replaced with Signature().
- switch method {
- case "GetDeltas":
- return []interface{}{}, nil
- default:
- return nil, nil
- }
-}
-
-func (__gen_s *ServerStubSync) Signature(call _gen_ipc.ServerCall) (_gen_ipc.ServiceSignature, error) {
- result := _gen_ipc.ServiceSignature{Methods: make(map[string]_gen_ipc.MethodSignature)}
- result.Methods["GetDeltas"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "In", Type: 67},
- {Name: "ClientID", Type: 65},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "Out", Type: 67},
- {Name: "Err", Type: 68},
- },
-
- OutStream: 79,
- }
-
- result.TypeDefs = []_gen_vdlutil.Any{
- _gen_wiretype.NamedPrimitiveType{Type: 0x3, Name: "veyron/runtimes/google/vsync.DeviceID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.GenID", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x41, Elem: 0x42, Name: "veyron/runtimes/google/vsync.GenVector", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/runtimes/google/vsync.LSN", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.ArrayType{Elem: 0x46, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/services/store/raw.Version", Tags: []string(nil)}, _gen_wiretype.SliceType{Elem: 0x48, Name: "", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
- _gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
- },
- "veyron/services/store/raw.DEntry", []string(nil)},
- _gen_wiretype.SliceType{Elem: 0x4b, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
- _gen_wiretype.FieldType{Type: 0x48, Name: "PriorVersion"},
- _gen_wiretype.FieldType{Type: 0x48, Name: "Version"},
- _gen_wiretype.FieldType{Type: 0x2, Name: "IsRoot"},
- _gen_wiretype.FieldType{Type: 0x4a, Name: "Value"},
- _gen_wiretype.FieldType{Type: 0x4c, Name: "Dir"},
- },
- "veyron/services/store/raw.Mutation", []string(nil)},
- _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x4d, Name: "Mutation"},
- _gen_wiretype.FieldType{Type: 0x25, Name: "SyncTime"},
- _gen_wiretype.FieldType{Type: 0x2, Name: "Delete"},
- _gen_wiretype.FieldType{Type: 0x2, Name: "Continued"},
- },
- "veyron/runtimes/google/vsync.LogValue", []string(nil)},
- _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x41, Name: "DevID"},
- _gen_wiretype.FieldType{Type: 0x42, Name: "GNum"},
- _gen_wiretype.FieldType{Type: 0x45, Name: "LSN"},
- _gen_wiretype.FieldType{Type: 0x46, Name: "RecType"},
- _gen_wiretype.FieldType{Type: 0x47, Name: "ObjID"},
- _gen_wiretype.FieldType{Type: 0x48, Name: "CurVers"},
- _gen_wiretype.FieldType{Type: 0x49, Name: "Parents"},
- _gen_wiretype.FieldType{Type: 0x4e, Name: "Value"},
- },
- "veyron/runtimes/google/vsync.LogRec", []string(nil)},
- }
-
- return result, nil
-}
-
-func (__gen_s *ServerStubSync) UnresolveStep(call _gen_ipc.ServerCall) (reply []string, err error) {
- if unresolver, ok := __gen_s.service.(_gen_ipc.Unresolver); ok {
- return unresolver.UnresolveStep(call)
- }
- if call.Server() == nil {
- return
- }
- var published []string
- if published, err = call.Server().Published(); err != nil || published == nil {
- return
- }
- reply = make([]string, len(published))
- for i, p := range published {
- reply[i] = _gen_naming.Join(p, call.Name())
- }
- return
-}
-
-func (__gen_s *ServerStubSync) GetDeltas(call _gen_ipc.ServerCall, In GenVector, ClientID DeviceID) (reply GenVector, err error) {
- stream := &implSyncServiceGetDeltasStream{writer: implSyncServiceGetDeltasStreamSender{serverCall: call}}
- reply, err = __gen_s.service.GetDeltas(call, In, ClientID, stream)
- return
-}
diff --git a/runtimes/google/vsync/vsyncd.go b/runtimes/google/vsync/vsyncd.go
deleted file mode 100644
index ceb0785..0000000
--- a/runtimes/google/vsync/vsyncd.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package vsync
-
-// Package vsync provides veyron sync daemon utility functions. Sync
-// daemon serves incoming GetDeltas requests and contacts other peers
-// to get deltas from them. When it receives a GetDeltas request, the
-// incoming generation vector is diffed with the local generation
-// vector, and missing generations are sent back. When it receives
-// log records in response to a GetDeltas request, it replays those
-// log records to get in sync with the sender.
-import (
- "fmt"
- "strings"
- "sync"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/ipc"
- "veyron2/naming"
- "veyron2/security"
- "veyron2/vlog"
- "veyron2/vom"
-
- _ "veyron/services/store/typeregistryhack"
-)
-
-// syncd contains the metadata for the sync daemon.
-type syncd struct {
- // Pointers to metadata structures.
- log *iLog
- devtab *devTable
- dag *dag
-
- // Local device id.
- id DeviceID
-
- // RWlock to concurrently access log and device table data structures.
- lock sync.RWMutex
- // State to coordinate shutting down all spawned goroutines.
- pending sync.WaitGroup
- closed chan struct{}
-
- // Local Veyron store.
- storeEndpoint string
- store raw.Store
-
- // Handlers for goroutine procedures.
- hdlGC *syncGC
- hdlWatcher *syncWatcher
- hdlInitiator *syncInitiator
-}
-
-type syncDispatcher struct {
- server ipc.Invoker
- auth security.Authorizer
-}
-
-// NewSyncDispatcher returns an object dispatcher.
-func NewSyncDispatcher(s interface{}, auth security.Authorizer) ipc.Dispatcher {
- return &syncDispatcher{ipc.ReflectInvoker(s), auth}
-}
-
-func (d *syncDispatcher) Lookup(suffix, method string) (ipc.Invoker, security.Authorizer, error) {
- if strings.HasSuffix(suffix, "sync") {
- return d.server, d.auth, nil
- }
- return nil, nil, fmt.Errorf("Lookup:: failed on suffix: %s", suffix)
-}
-
-// NewSyncd creates a new syncd instance.
-//
-// Syncd concurrency: syncd initializes three goroutines at
-// startup. The "watcher" thread is responsible for watching the store
-// for changes to its objects. The "initiator" thread is responsible
-// for periodically checking the neighborhood and contacting a peer to
-// obtain changes from that peer. The "gc" thread is responsible for
-// periodically checking if any log records and dag state can be
-// pruned. All these 3 threads perform write operations to the data
-// structures, and synchronize by acquiring a write lock on s.lock. In
-// addition, when syncd receives an incoming RPC, it responds to the
-// request by acquiring a read lock on s.lock. Thus, at any instant in
-// time, either one of the watcher, initiator or gc threads is active,
-// or any number of responders can be active, serving incoming
-// requests. Fairness between these threads follows from
-// sync.RWMutex. The spec says that the writers cannot be starved by
-// the readers but it does not guarantee FIFO. We may have to revisit
-// this in the future.
-func NewSyncd(peerEndpoints, peerDeviceIDs, devid, storePath, storeEndpoint string, syncTick time.Duration) *syncd {
- // Connect to the local Veyron store.
- // At present this is optional to allow testing (from the command-line) w/o Veyron store running.
- // TODO: connecting to Veyron store should be mandatory.
- var store raw.Store
- if storeEndpoint != "" {
- var err error
- store, err = raw.BindStore(naming.JoinAddressName(storeEndpoint, raw.RawStoreSuffix))
- if err != nil {
- vlog.Fatalf("NewSyncd: cannot connect to Veyron store endpoint (%s): %s", storeEndpoint, err)
- }
- }
-
- return newSyncdCore(peerEndpoints, peerDeviceIDs, devid, storePath, storeEndpoint, store, syncTick)
-}
-
-// newSyncdCore is the internal function that creates the Syncd
-// structure and initilizes its thread (goroutines). It takes a
-// Veyron Store parameter to separate the core of Syncd setup from the
-// external dependency on Veyron Store.
-func newSyncdCore(peerEndpoints, peerDeviceIDs, devid, storePath, storeEndpoint string,
- store raw.Store, syncTick time.Duration) *syncd {
- s := &syncd{}
-
- // Bootstrap my own DeviceID.
- s.id = DeviceID(devid)
-
- var err error
- // Log init.
- if s.log, err = openILog(storePath+"/ilog", s); err != nil {
- vlog.Fatalf("newSyncd: ILogInit failed: err %v", err)
- }
-
- // DevTable init.
- if s.devtab, err = openDevTable(storePath+"/dtab", s); err != nil {
- vlog.Fatalf("newSyncd: DevTableInit failed: err %v", err)
- }
-
- // Dag Init.
- if s.dag, err = openDAG(storePath + "/dag"); err != nil {
- vlog.Fatalf("newSyncd: OpenDag failed: err %v", err)
- }
-
- // Veyron Store.
- s.storeEndpoint = storeEndpoint
- s.store = store
- vlog.VI(1).Infof("newSyncd: Local Veyron store: %s", s.storeEndpoint)
-
- // Register these Watch data types with VOM.
- // TODO(tilaks): why aren't they auto-retrieved from the IDL?
- vom.Register(&raw.Mutation{})
- vom.Register(&raw.DEntry{})
-
- // Channel to propagate close event to all threads.
- s.closed = make(chan struct{})
-
- s.pending.Add(3)
-
- // Get deltas every peerSyncInterval.
- s.hdlInitiator = newInitiator(s, peerEndpoints, peerDeviceIDs, syncTick)
- go s.hdlInitiator.contactPeers()
-
- // Garbage collect every garbageCollectInterval.
- s.hdlGC = newGC(s)
- go s.hdlGC.garbageCollect()
-
- // Start a watcher thread that will get updates from local store.
- s.hdlWatcher = newWatcher(s)
- go s.hdlWatcher.watchStore()
-
- return s
-}
-
-// Close cleans up syncd state.
-func (s *syncd) Close() {
- close(s.closed)
- s.pending.Wait()
-
- // TODO(hpucha): close without flushing.
-}
-
-// isSyncClosing returns true if Close() was called i.e. the "closed" channel is closed.
-func (s *syncd) isSyncClosing() bool {
- select {
- case <-s.closed:
- return true
- default:
- return false
- }
-}
-
-// GetDeltas responds to the incoming request from a client by sending missing generations to the client.
-func (s *syncd) GetDeltas(_ ipc.ServerContext, In GenVector, ClientID DeviceID, Stream SyncServiceGetDeltasStream) (GenVector, error) {
- vlog.VI(1).Infof("GetDeltas:: Received vector %v from client %s", In, ClientID)
-
- // Handle misconfiguration: the client cannot have the same ID as me.
- if ClientID == s.id {
- vlog.VI(1).Infof("GetDeltas:: impostor alert: client ID %s is the same as mine %s", ClientID, s.id)
- return GenVector{}, fmt.Errorf("impostor: you cannot be %s, for I am %s", ClientID, s.id)
- }
-
- if err := s.updateDeviceInfo(ClientID, In); err != nil {
- vlog.Fatalf("GetDeltas:: updateDeviceInfo failed with err %v", err)
- }
-
- out, gens, gensInfo, err := s.prepareGensToReply(In)
- if err != nil {
- vlog.Fatalf("GetDeltas:: prepareGensToReply failed with err %v", err)
- }
-
- for pos, v := range gens {
- gen := gensInfo[pos]
- var count uint64
- for i := LSN(0); i <= gen.MaxLSN; i++ {
- count++
- rec, err := s.getLogRec(v.devID, v.genID, i)
- if err != nil {
- vlog.Fatalf("GetDeltas:: Couldn't get log record %s %d %d, err %v",
- v.devID, v.genID, i, err)
- }
- vlog.VI(1).Infof("Sending log record %v", rec)
- if err := Stream.SendStream().Send(*rec); err != nil {
- vlog.Errorf("GetDeltas:: Couldn't send stream err: %v", err)
- return GenVector{}, err
- }
- }
- if count != gen.Count {
- vlog.Fatalf("GetDeltas:: GenMetadata has incorrect log records for generation %s %d %v",
- v.devID, v.genID, gen)
- }
- }
- return out, nil
-}
-
-// updateDeviceInfo updates the remote device's information based on
-// the incoming GetDeltas request.
-func (s *syncd) updateDeviceInfo(ClientID DeviceID, In GenVector) error {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- // Note that the incoming client generation vector cannot be
- // used for garbage collection. We can only garbage collect
- // based on the generations we receive from other
- // devices. Receiving a set of generations assures that all
- // updates branching from those generations are also received
- // and hence generations present on all devices can be
- // GC'ed. This function sends generations to other devices and
- // hence does not use the generation vector for GC.
- //
- // TODO(hpucha): Cache the client's incoming generation vector
- // to assist in tracking missing generations and hence next
- // peer to contact.
- if !s.devtab.hasDevInfo(ClientID) {
- if err := s.devtab.addDevice(ClientID); err != nil {
- return err
- }
- }
- return nil
-}
-
-// prepareGensToReply processes the incoming generation vector and
-// returns the metadata of all the missing generations between the
-// incoming and the local generation vector.
-func (s *syncd) prepareGensToReply(In GenVector) (GenVector, []*genOrder, []*genMetadata, error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- // Get local generation vector.
- out, err := s.devtab.getGenVec(s.id)
- if err != nil {
- return GenVector{}, nil, nil, err
- }
-
- // Diff the two generation vectors.
- gens, err := s.devtab.diffGenVectors(out, In)
- if err != nil {
- return GenVector{}, nil, nil, err
- }
-
- // Get the metadata for all the generations in the reply.
- gensInfo := make([]*genMetadata, len(gens))
- for pos, v := range gens {
- gen, err := s.log.getGenMetadata(v.devID, v.genID)
- if err != nil || gen.Count <= 0 {
- return GenVector{}, nil, nil, err
- }
- gensInfo[pos] = gen
- }
-
- return out, gens, gensInfo, nil
-}
-
-// getLogRec gets the log record for a given generation and lsn.
-func (s *syncd) getLogRec(dev DeviceID, gen GenID, lsn LSN) (*LogRec, error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
- return s.log.getLogRec(dev, gen, lsn)
-}
diff --git a/runtimes/google/vsync/vsyncd/main.go b/runtimes/google/vsync/vsyncd/main.go
deleted file mode 100644
index a915550..0000000
--- a/runtimes/google/vsync/vsyncd/main.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Binary vsyncd is the sync daemon.
-package main
-
-import (
- "flag"
- "os"
-
- "veyron/runtimes/google/vsync"
- vflag "veyron/security/flag"
-
- "veyron2/rt"
- "veyron2/vlog"
-)
-
-var (
- // TODO(rthellend): Remove the protocol and address flags when the config
- // manager is working.
- protocol = flag.String("protocol", "tcp", "protocol to listen on")
- address = flag.String("address", ":0", "address to listen on")
-
- peerEndpoints = flag.String("peers", "",
- "comma separated list of endpoints of the vsync peer")
- peerDeviceIDs = flag.String("peerids", "",
- "comma separated list of deviceids of the vsync peer")
- devid = flag.String("devid", "", "Device ID")
- storePath = flag.String("store", os.TempDir(), "path to store files")
- vstoreEndpoint = flag.String("vstore", "", "endpoint of the local Veyron store")
- syncTick = flag.Duration("synctick", 0, "clock tick duration for sync with a peer (e.g. 10s)")
-)
-
-func main() {
- flag.Parse()
- if *devid == "" {
- vlog.Fatalf("syncd:: --devid needs to be specified")
- }
-
- // Create the runtime.
- r := rt.Init()
- defer r.Cleanup()
-
- // Create a new server instance.
- s, err := r.NewServer()
- if err != nil {
- vlog.Fatalf("syncd:: failure creating server: err %v", err)
- }
-
- // Create a new SyncService.
- syncd := vsync.NewSyncd(*peerEndpoints, *peerDeviceIDs, *devid, *storePath, *vstoreEndpoint, *syncTick)
- syncService := vsync.NewServerSync(syncd)
-
- // Create the authorizer.
- auth := vflag.NewAuthorizerOrDie()
-
- // Register the service.
- syncDisp := vsync.NewSyncDispatcher(syncService, auth)
-
- // Create an endpoint and begin listening.
- if endpoint, err := s.Listen(*protocol, *address); err == nil {
- vlog.VI(0).Infof("syncd:: Listening now at %v", endpoint)
- } else {
- vlog.Fatalf("syncd:: error listening to service: err %v", err)
- }
-
- // Publish the vsync service.
- name := "global/vsync/" + *devid
- if err := s.Serve(name, syncDisp); err != nil {
- vlog.Fatalf("syncd: error publishing service: err %v", err)
- }
-
- // Wait forever.
- done := make(chan struct{})
- <-done
-}
diff --git a/runtimes/google/vsync/watcher.go b/runtimes/google/vsync/watcher.go
deleted file mode 100644
index 8d3f461..0000000
--- a/runtimes/google/vsync/watcher.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package vsync
-
-// Veyron Sync hook to the Store Watch API. When applications update objects
-// in the local Veyron Store, Sync learns about these via the Watch API stream
-// of object mutations. In turn, this Sync watcher thread updates the DAG and
-// ILog records to track the object change histories.
-
-import (
- "fmt"
- "io"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/context"
- "veyron2/rt"
- "veyron2/services/watch"
- "veyron2/services/watch/types"
- "veyron2/vlog"
-)
-
-var (
- // watchRetryDelay is how long the watcher waits before calling the Watch() RPC again
- // after the previous call fails.
- watchRetryDelay = 2 * time.Second
- // streamRetryDelay is how long the watcher waits before creating a new Watch stream
- // after the previous stream ends.
- streamRetryDelay = 1 * time.Second
-)
-
-// syncWatcher contains the metadata for the Sync Watcher thread.
-type syncWatcher struct {
- // syncd is a pointer to the Syncd instance owning this Watcher.
- syncd *syncd
-
- // curTx is the transaction ID of the latest transaction being processed by the Watcher.
- curTx TxID
-
- // curTxSyncTime is the timestamp of the latest transaction being processed by the Watcher.
- curTxSyncTime int64
-}
-
-// newWatcher creates a new Sync Watcher instance attached to the given Syncd instance.
-func newWatcher(syncd *syncd) *syncWatcher {
- return &syncWatcher{syncd: syncd, curTx: NoTxID}
-}
-
-// watchStreamCanceler is a helper goroutine that cancels the watcher RPC when Syncd notifies
-// its goroutines to exit by closing its internal channel. This in turn unblocks the watcher
-// enabling it to exit. If the RPC fails, the watcher notifies the canceler to exit by
-// closing a private "done" channel between them.
-func (w *syncWatcher) watchStreamCanceler(stream watch.GlobWatcherWatchGlobCall, done chan struct{}) {
- select {
- case <-w.syncd.closed:
- vlog.VI(1).Info("watchStreamCanceler: Syncd channel closed, cancel stream and exit")
- stream.Cancel()
- case <-done:
- vlog.VI(1).Info("watchStreamCanceler: done, exit")
- }
-}
-
-// watchStore consumes change records obtained by watching the store
-// for updates and applies them to the sync DBs.
-func (w *syncWatcher) watchStore() {
- defer w.syncd.pending.Done()
-
- // If no Veyron store is configured, there is nothing to watch.
- if w.syncd.store == nil {
- vlog.VI(1).Info("watchStore: Veyron store not configured; skipping the watcher")
- return
- }
-
- // Get a Watch stream, process it, repeat till end-of-life.
- ctx := rt.R().NewContext()
- for {
- stream := w.getWatchStream(ctx)
- if stream == nil {
- return // Syncd is exiting.
- }
-
- // Spawn a goroutine to detect the Syncd "closed" channel and cancel the RPC stream
- // to unblock the watcher. The "done" channel lets the watcher terminate the goroutine.
- done := make(chan struct{})
- go w.watchStreamCanceler(stream, done)
-
- // Process the stream of Watch updates until it closes (similar to "tail -f").
- w.processWatchStream(stream)
-
- if w.syncd.isSyncClosing() {
- return // Syncd is exiting.
- }
-
- stream.Finish()
- close(done)
-
- // TODO(rdaoud): we need a rate-limiter here in case the stream closes too quickly.
- // If the stream stays up long enough, no need to sleep before getting a new one.
- time.Sleep(streamRetryDelay)
- }
-}
-
-// getWatchStream() returns a Watch API stream and handles retries if the Watch() call fails.
-// If the stream is nil, it means Syncd is exiting cleanly and the caller should terminate.
-func (w *syncWatcher) getWatchStream(ctx context.T) watch.GlobWatcherWatchGlobCall {
- for {
- req := raw.Request{}
- if resmark := w.syncd.devtab.head.Resmark; resmark != nil {
- req.ResumeMarker = resmark
- }
-
- stream, err := w.syncd.store.Watch(ctx, req)
- if err == nil {
- return stream
- }
-
- if w.syncd.isSyncClosing() {
- vlog.VI(1).Info("getWatchStream: exiting, Syncd closed its channel: ", err)
- return nil
- }
-
- vlog.VI(1).Info("getWatchStream: call to Watch() failed, retrying a bit later: ", err)
- time.Sleep(watchRetryDelay)
- }
-}
-
-// processWatchStream reads the stream of Watch updates and applies the object mutations.
-// Ideally this call does not return as the stream should be un-ending (like "tail -f").
-// If the stream is closed, distinguish between the cases of end-of-stream vs Syncd canceling
-// the stream to trigger a clean exit.
-func (w *syncWatcher) processWatchStream(call watch.GlobWatcherWatchGlobCall) {
- w.curTx = NoTxID
- stream := call.RecvStream()
- for stream.Advance() {
- change := stream.Value()
-
- // Timestamp of the change arriving at the Sync server.
- syncTime := time.Now().UnixNano()
-
- if err := w.processChange(change, syncTime); err != nil {
- // TODO(rdaoud): don't crash, instead add retry policies to attempt some degree of
- // self-healing from a data corruption where feasible, otherwise quarantine this device
- // from the cluster and stop Syncd to avoid propagating data corruptions.
- vlog.Fatal("processWatchStream:", err)
- }
- }
-
- err := stream.Err()
- if err == nil {
- err = io.EOF
- }
- if w.syncd.isSyncClosing() {
- vlog.VI(1).Info("processWatchStream: exiting, Syncd closed its channel: ", err)
- } else {
- vlog.VI(1).Info("processWatchStream: RPC stream error, re-issue Watch(): ", err)
- }
-}
-
-// processChange applies a change (object mutation) received from the Watch API.
-// The function grabs the write-lock to access the Log and DAG DBs.
-func (w *syncWatcher) processChange(ch types.Change, syncTime int64) error {
- w.syncd.lock.Lock()
- defer w.syncd.lock.Unlock()
-
- vlog.VI(1).Infof("processChange:: ready to process change")
-
- mu, ok := ch.Value.(*raw.Mutation)
- if !ok {
- return fmt.Errorf("invalid change value, not a mutation: %#v", ch)
- }
-
- // Begin a new transaction if needed.
- if w.curTx == NoTxID && ch.Continued {
- w.curTx = w.syncd.dag.addNodeTxStart()
- w.curTxSyncTime = syncTime
- }
-
- time := syncTime
- if w.curTx != NoTxID {
- // All LogValues belonging to the same transaction get the same timestamp.
- time = w.curTxSyncTime
- }
- val := &LogValue{Mutation: *mu, SyncTime: time, Delete: ch.State == types.DoesNotExist, Continued: ch.Continued}
- vlog.VI(2).Infof("processChanges:: processing record %v, Tx %v", val, w.curTx)
- if err := w.syncd.log.processWatchRecord(mu.ID, mu.Version, mu.PriorVersion, val, w.curTx); err != nil {
- return fmt.Errorf("cannot process mutation: %#v: %s", ch, err)
- }
-
- // End the previous transaction if any.
- if w.curTx != NoTxID && !ch.Continued {
- if err := w.syncd.dag.addNodeTxEnd(w.curTx); err != nil {
- return err
- }
- w.curTx = NoTxID
- }
-
- // If the resume marker changed, update the device table.
- if !ch.Continued {
- w.syncd.devtab.head.Resmark = ch.ResumeMarker
- }
-
- return nil
-}
diff --git a/runtimes/google/vsync/watcher_test.go b/runtimes/google/vsync/watcher_test.go
deleted file mode 100644
index 899d58c..0000000
--- a/runtimes/google/vsync/watcher_test.go
+++ /dev/null
@@ -1,505 +0,0 @@
-package vsync
-
-// Tests for the Veyron Sync watcher.
-
-import (
- "bytes"
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-
- "veyron/services/store/raw"
-
- "veyron2/context"
- "veyron2/ipc"
- "veyron2/rt"
- "veyron2/services/watch/types"
- "veyron2/storage"
-)
-
-var (
- info testInfo
- recvBlocked chan struct{}
-)
-
-// testInfo controls the flow through the fake store and fake reply stream used
-// to simulate the Watch API.
-type testInfo struct {
- failWatch bool
- failWatchCount int
- failRecv bool
- failRecvCount int
- eofRecv bool
- blockRecv bool
- watchResmark []byte
-}
-
-// fakeVStore is used to simulate the Watch() API of the store and stubs the other store APIs.
-type fakeVStore struct {
-}
-
-func (*fakeVStore) GetMethodTags(_ context.T, _ string, _ ...ipc.CallOpt) ([]interface{}, error) {
- panic("not implemented")
-}
-
-func (*fakeVStore) UnresolveStep(_ context.T, _ ...ipc.CallOpt) ([]string, error) {
- panic("not implemented")
-}
-
-func (*fakeVStore) Signature(_ context.T, _ ...ipc.CallOpt) (ipc.ServiceSignature, error) {
- panic("not implemented")
-}
-
-func (v *fakeVStore) Watch(_ context.T, req raw.Request, _ ...ipc.CallOpt) (raw.StoreWatchCall, error) {
- // If "failWatch" is set, simulate a failed RPC call.
- if info.failWatch {
- info.failWatchCount++
- return nil, fmt.Errorf("fakeWatch forced error: %d", info.failWatchCount)
- }
-
- // Save the resmark from the Watch request.
- info.watchResmark = req.ResumeMarker
-
- // Return a fake stream to access the batch of changes (store mutations).
- return newFakeStream(), nil
-}
-
-func (*fakeVStore) PutMutations(_ context.T, _ ...ipc.CallOpt) (raw.StorePutMutationsCall, error) {
- panic("not implemented")
-}
-
-// fakeStream is used to simulate the reply stream of the Watch() API.
-type fakeStream struct {
- changes []types.Change
- i int
- resmark byte
- canceled chan struct{}
- err error
-}
-
-func newFakeStream() *fakeStream {
- s := &fakeStream{}
- if info.watchResmark != nil {
- s.resmark = info.watchResmark[0]
- }
- s.changes = getChanges()
- s.i = -1
- s.canceled = make(chan struct{})
- return s
-}
-
-func (s *fakeStream) RecvStream() interface {
- Advance() bool
- Value() types.Change
- Err() error
-} {
- return s
-}
-
-func (s *fakeStream) Advance() bool {
- // If "failRecv" is set, simulate a failed call.
- if info.failRecv {
- info.failRecvCount++
- s.err = fmt.Errorf("fake recv error on fake stream: %d", info.failRecvCount)
- return false
- }
-
- if s.i+1 == len(s.changes) {
- // Make sure the next Recv() call returns EOF on the stream.
- info.eofRecv = true
- }
-
- // If "eofRecv" is set, simulate a closed stream and make sure the next Recv() call blocks.
- if info.eofRecv {
- info.eofRecv, info.blockRecv = false, true
- s.err = nil
- return false
- }
-
- // If "blockRecv" is set, simulate blocking the call until the stream is canceled.
- if info.blockRecv {
- close(recvBlocked)
- <-s.canceled
- s.err = nil
- return false
- }
-
- // Otherwise return a change.
- // Adjust the resume marker of the change records to follow the one given to the Watch request.
- s.i++
- return true
-}
-
-func (s *fakeStream) Value() types.Change {
- ch := s.changes[s.i]
-
- if !ch.Continued {
- s.resmark++
- resmark := []byte{s.resmark, 0, 0, 0, 0, 0, 0, 0}
- ch.ResumeMarker = resmark
- }
-
- return ch
-}
-
-func (s *fakeStream) Err() error {
- return s.err
-}
-func (s *fakeStream) Finish() error {
- return nil
-}
-
-func (s *fakeStream) Cancel() {
- close(s.canceled)
-}
-
-// getChanges returns a batch of store mutations used to simulate the Watch API.
-// The batch contains two transactions to verify both new-object creation and the
-// mutation of an existing object.
-func getChanges() []types.Change {
- return []types.Change{
- // 1st transaction: create "/" and "/a" and "/a/b" as 3 new objects (prior versions are 0).
- types.Change{
- Name: "",
- State: 0,
- Value: &raw.Mutation{
- ID: storage.ID{0x4c, 0x6d, 0xb5, 0x1a, 0xa7, 0x40, 0xd8, 0xc6,
- 0x2b, 0x90, 0xdf, 0x87, 0x45, 0x3, 0xe2, 0x85},
- PriorVersion: 0x0,
- Version: 0x4d65822107fcfd52,
- Value: "value-root",
- Dir: []raw.DEntry{
- raw.DEntry{
- Name: "a",
- ID: storage.ID{0x8, 0x2b, 0xc4, 0x2e, 0x15, 0xaf, 0x4f, 0xcf,
- 0x61, 0x1d, 0x7f, 0x19, 0xa8, 0xd7, 0x83, 0x1f},
- },
- },
- },
- ResumeMarker: nil,
- Continued: true,
- },
- types.Change{
- Name: "",
- State: 0,
- Value: &raw.Mutation{
- ID: storage.ID{0x8, 0x2b, 0xc4, 0x2e, 0x15, 0xaf, 0x4f, 0xcf,
- 0x61, 0x1d, 0x7f, 0x19, 0xa8, 0xd7, 0x83, 0x1f},
- PriorVersion: 0x0,
- Version: 0x57e9d1860d1d68d8,
- Value: "value-a",
- Dir: []raw.DEntry{
- raw.DEntry{
- Name: "b",
- ID: storage.ID{0x6e, 0x4a, 0x32, 0x7c, 0x29, 0x7d, 0x76, 0xfb,
- 0x51, 0x42, 0xb1, 0xb1, 0xd9, 0x5b, 0x2d, 0x7},
- },
- },
- },
- ResumeMarker: nil,
- Continued: true,
- },
- types.Change{
- Name: "",
- State: 0,
- Value: &raw.Mutation{
- ID: storage.ID{0x6e, 0x4a, 0x32, 0x7c, 0x29, 0x7d, 0x76, 0xfb,
- 0x51, 0x42, 0xb1, 0xb1, 0xd9, 0x5b, 0x2d, 0x7},
- PriorVersion: 0x0,
- Version: 0x55104dc76695721d,
- Value: "value-b",
- Dir: nil,
- },
- ResumeMarker: nil,
- Continued: false,
- },
-
- // 2nd transaction: create "/a/c" as a new object, which also updates "a" (its "Dir" field).
- types.Change{
- Name: "",
- State: 0,
- Value: &raw.Mutation{
- ID: storage.ID{0x8, 0x2b, 0xc4, 0x2e, 0x15, 0xaf, 0x4f, 0xcf,
- 0x61, 0x1d, 0x7f, 0x19, 0xa8, 0xd7, 0x83, 0x1f},
- PriorVersion: 0x57e9d1860d1d68d8,
- Version: 0x365a858149c6e2d1,
- Value: "value-a",
- Dir: []raw.DEntry{
- raw.DEntry{
- Name: "b",
- ID: storage.ID{0x6e, 0x4a, 0x32, 0x7c, 0x29, 0x7d, 0x76, 0xfb,
- 0x51, 0x42, 0xb1, 0xb1, 0xd9, 0x5b, 0x2d, 0x7},
- },
- raw.DEntry{
- Name: "c",
- ID: storage.ID{0x70, 0xff, 0x65, 0xec, 0xf, 0x82, 0x5f, 0x44,
- 0xb6, 0x9f, 0x89, 0x5e, 0xea, 0x75, 0x9d, 0x71},
- },
- },
- },
- ResumeMarker: nil,
- Continued: true,
- },
- types.Change{
- Name: "",
- State: 0,
- Value: &raw.Mutation{
- ID: storage.ID{0x70, 0xff, 0x65, 0xec, 0xf, 0x82, 0x5f, 0x44,
- 0xb6, 0x9f, 0x89, 0x5e, 0xea, 0x75, 0x9d, 0x71},
- PriorVersion: 0x0,
- Version: 0x380704bb7b4d7c03,
- Value: "value-c",
- Dir: nil,
- },
- ResumeMarker: nil,
- Continued: false,
- },
-
- // 3rd transaction: remove "/a/b" which updates "a" (its "Dir" field) and deletes "b".
- types.Change{
- Name: "",
- State: 0,
- Value: &raw.Mutation{
- ID: storage.ID{0x8, 0x2b, 0xc4, 0x2e, 0x15, 0xaf, 0x4f, 0xcf,
- 0x61, 0x1d, 0x7f, 0x19, 0xa8, 0xd7, 0x83, 0x1f},
- PriorVersion: 0x365a858149c6e2d1,
- Version: 0xa858149c6e2d1000,
- Value: "value-a",
- Dir: []raw.DEntry{
- raw.DEntry{
- Name: "c",
- ID: storage.ID{0x70, 0xff, 0x65, 0xec, 0xf, 0x82, 0x5f, 0x44,
- 0xb6, 0x9f, 0x89, 0x5e, 0xea, 0x75, 0x9d, 0x71},
- },
- },
- },
- ResumeMarker: nil,
- Continued: true,
- },
- types.Change{
- Name: "",
- State: types.DoesNotExist,
- Value: &raw.Mutation{
- ID: storage.ID{0x6e, 0x4a, 0x32, 0x7c, 0x29, 0x7d, 0x76, 0xfb,
- 0x51, 0x42, 0xb1, 0xb1, 0xd9, 0x5b, 0x2d, 0x7},
- PriorVersion: 0x55104dc76695721d,
- Version: 0x0,
- Value: "",
- Dir: nil,
- },
- ResumeMarker: nil,
- Continued: false,
- },
- }
-}
-
-// initTestDir creates a per-test directory to store the Sync DB files and returns it.
-// It also initializes (resets) the test control metadata.
-func initTestDir(t *testing.T) string {
- info = testInfo{}
- recvBlocked = make(chan struct{})
- watchRetryDelay = 10 * time.Millisecond
- streamRetryDelay = 5 * time.Millisecond
-
- path := fmt.Sprintf("%s/sync_test_%d_%d/", os.TempDir(), os.Getpid(), time.Now().UnixNano())
- if err := os.Mkdir(path, 0775); err != nil {
- t.Fatalf("makeTestDir: cannot create directory %s: %s", path, err)
- }
- return path
-}
-
-// fakeSyncd creates a Syncd server structure with enough metadata to be used
-// in watcher unit tests. If "withStore" is true, create a fake store entry.
-// Otherwise simulate a no-store Sync server.
-func fakeSyncd(t *testing.T, storeDir string, withStore bool) *syncd {
- var s *syncd
- if withStore {
- s = newSyncdCore("", "", "fake-dev", storeDir, "", &fakeVStore{}, 0)
- } else {
- s = newSyncdCore("", "", "fake-dev", storeDir, "", nil, 0)
- }
- if s == nil {
- t.Fatal("cannot create a Sync server")
- }
- return s
-}
-
-// TestWatcherNoStore tests the watcher without a connection to a local store.
-// It verifies that the watcher exits without side-effects.
-func TestWatcherNoStore(t *testing.T) {
- dir := initTestDir(t)
- defer os.RemoveAll(dir)
-
- s := fakeSyncd(t, dir, false)
- s.Close()
-}
-
-// TestWatcherRPCError tests the watcher reacting to an error from the Watch() RPC.
-// It verifies that the watcher retries the RPC after a delay.
-func TestWatcherRPCError(t *testing.T) {
- rt.Init()
- dir := initTestDir(t)
- defer os.RemoveAll(dir)
-
- info.failWatch = true
- s := fakeSyncd(t, dir, true)
-
- n := 4
- time.Sleep(time.Duration(n) * watchRetryDelay)
-
- s.Close()
-
- if info.failWatchCount == 0 {
- t.Fatal("Watch() RPC retry count is zero")
- }
-}
-
-// TestWatcherRecvError tests the watcher reacting to an error from the stream receive.
-// It verifies that the watcher retries the Watch() RPC after a delay.
-func TestWatcherRecvError(t *testing.T) {
- rt.Init()
- dir := initTestDir(t)
- defer os.RemoveAll(dir)
-
- info.failRecv = true
- s := fakeSyncd(t, dir, true)
-
- n := 2
- time.Sleep(time.Duration(n) * streamRetryDelay)
-
- s.Close()
-
- if info.failRecvCount == 0 {
- t.Fatal("Recv() retry count is zero")
- }
-}
-
-// TestWatcherChanges tests the watcher applying changes received from store.
-func TestWatcherChanges(t *testing.T) {
- rt.Init()
- dir := initTestDir(t)
- defer os.RemoveAll(dir)
-
- s := fakeSyncd(t, dir, true)
-
- // Wait for the watcher to block on the Recv(), i.e. it finished processing the updates.
- <-recvBlocked
-
- // Verify the state of the Sync DAG and Device Table before terminating it.
- oidRoot := storage.ID{0x4c, 0x6d, 0xb5, 0x1a, 0xa7, 0x40, 0xd8, 0xc6, 0x2b, 0x90, 0xdf, 0x87, 0x45, 0x3, 0xe2, 0x85}
- oidA := storage.ID{0x8, 0x2b, 0xc4, 0x2e, 0x15, 0xaf, 0x4f, 0xcf, 0x61, 0x1d, 0x7f, 0x19, 0xa8, 0xd7, 0x83, 0x1f}
- oidB := storage.ID{0x6e, 0x4a, 0x32, 0x7c, 0x29, 0x7d, 0x76, 0xfb, 0x51, 0x42, 0xb1, 0xb1, 0xd9, 0x5b, 0x2d, 0x7}
- oidC := storage.ID{0x70, 0xff, 0x65, 0xec, 0xf, 0x82, 0x5f, 0x44, 0xb6, 0x9f, 0x89, 0x5e, 0xea, 0x75, 0x9d, 0x71}
-
- oids := []storage.ID{oidRoot, oidA, oidC}
- heads := []raw.Version{0x4d65822107fcfd52, 0xa858149c6e2d1000, 0x380704bb7b4d7c03}
-
- for i, oid := range oids {
- expHead := heads[i]
- head, err := s.dag.getHead(oid)
- if err != nil {
- t.Errorf("cannot find head node for object %d: %s", oid, err)
- } else if head != expHead {
- t.Errorf("wrong head for object %d: %d instead of %d", oid, head, expHead)
- }
- }
-
- // Verify oidB.
- headB, err := s.dag.getHead(oidB)
- if err != nil {
- t.Errorf("cannot find head node for object %d: %s", oidB, err)
- }
- if headB == raw.NoVersion || headB == raw.Version(0x55104dc76695721d) {
- t.Errorf("wrong head for object B %d: %d ", oidB, headB)
- }
-
- // Verify transaction state for the first transaction.
- node, err := s.dag.getNode(oidRoot, heads[0])
- if err != nil {
- t.Errorf("cannot find dag node for object %d %v: %s", oidRoot, heads[0], err)
- }
- if node.TxID == NoTxID {
- t.Errorf("expecting non nil txid for object %d:%v", oidRoot, heads[0])
- }
- txMap, err := s.dag.getTransaction(node.TxID)
- if err != nil {
- t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
- }
- expTxMap := dagTxMap{
- oidRoot: heads[0],
- oidA: raw.Version(0x57e9d1860d1d68d8),
- oidB: raw.Version(0x55104dc76695721d),
- }
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
- node.TxID, txMap, expTxMap)
- }
-
- // Verify transaction state for the second transaction.
- node, err = s.dag.getNode(oidC, heads[2])
- if err != nil {
- t.Errorf("cannot find dag node for object %d %v: %s", oidC, heads[2], err)
- }
- if node.TxID == NoTxID {
- t.Errorf("expecting non nil txid for object %d:%v", oidC, heads[2])
- }
- txMap, err = s.dag.getTransaction(node.TxID)
- if err != nil {
- t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
- }
- expTxMap = dagTxMap{
- oidA: raw.Version(0x365a858149c6e2d1),
- oidC: heads[2],
- }
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
- node.TxID, txMap, expTxMap)
- }
-
- // Verify transaction state for the third transaction.
- node, err = s.dag.getNode(oidA, heads[1])
- if err != nil {
- t.Errorf("cannot find dag node for object %d %v: %s", oidA, heads[1], err)
- }
- if node.TxID == NoTxID {
- t.Errorf("expecting non nil txid for object %d:%v", oidA, heads[1])
- }
- txMap, err = s.dag.getTransaction(node.TxID)
- if err != nil {
- t.Errorf("cannot find transaction for id %v: %s", node.TxID, err)
- }
- expTxMap = dagTxMap{
- oidA: heads[1],
- oidB: headB,
- }
- if !reflect.DeepEqual(txMap, expTxMap) {
- t.Errorf("Data mismatch for txid %v txmap %v instead of %v",
- node.TxID, txMap, expTxMap)
- }
-
- // Verify deletion tracking.
- node, err = s.dag.getNode(oidB, headB)
- if err != nil {
- t.Errorf("cannot find dag node for object %d %v: %s", oidB, headB, err)
- }
- if !node.Deleted {
- t.Errorf("deleted node not found for object %d %v: %s", oidB, headB, err)
- }
- if !s.dag.hasDeletedDescendant(oidB, raw.Version(0x55104dc76695721d)) {
- t.Errorf("link to deleted node not found for object %d %v: %s", oidB, headB, err)
- }
-
- expResmark := []byte{3, 0, 0, 0, 0, 0, 0, 0}
-
- if bytes.Compare(s.devtab.head.Resmark, expResmark) != 0 {
- t.Errorf("error in watch device table resume marker: %v instead of %v", s.devtab.head.Resmark, expResmark)
- }
-
- if bytes.Compare(info.watchResmark, expResmark) != 0 {
- t.Errorf("error in watch call final resume marker: %v instead of %v", info.watchResmark, expResmark)
- }
-
- s.Close()
-}
diff --git a/services/identity/blesser/oauth.go b/services/identity/blesser/oauth.go
index e865b30..fb35bb0 100644
--- a/services/identity/blesser/oauth.go
+++ b/services/identity/blesser/oauth.go
@@ -21,7 +21,7 @@
type googleOAuth struct {
rt veyron2.Runtime
authcodeClient struct{ ID, Secret string }
- accessTokenClient struct{ ID string }
+ accessTokenClients []struct{ ID string }
duration time.Duration
domain string
dischargerLocation string
@@ -36,8 +36,8 @@
AuthorizationCodeClient struct {
ID, Secret string
}
- // The OAuth client ID for the chrome-extension that will make BlessUsingAccessToken RPCs.
- AccessTokenClient struct {
+ // The OAuth client IDs for the clients of the BlessUsingAccessToken RPCs.
+ AccessTokenClients []struct {
ID string
}
// The duration for which blessings will be valid.
@@ -68,7 +68,7 @@
}
b.authcodeClient.ID = p.AuthorizationCodeClient.ID
b.authcodeClient.Secret = p.AuthorizationCodeClient.Secret
- b.accessTokenClient.ID = p.AccessTokenClient.ID
+ b.accessTokenClients = p.AccessTokenClients
return identity.NewServerOAuthBlesser(b)
}
@@ -85,7 +85,7 @@
}
func (b *googleOAuth) BlessUsingAccessToken(ctx ipc.ServerContext, accesstoken string) (vdlutil.Any, error) {
- if len(b.accessTokenClient.ID) == 0 {
+ if len(b.accessTokenClients) == 0 {
return nil, fmt.Errorf("server not configured for blessing based on access tokens")
}
// URL from: https://developers.google.com/accounts/docs/OAuth2UserAgent#validatetoken
@@ -110,8 +110,15 @@
if err := json.NewDecoder(tokeninfo.Body).Decode(&token); err != nil {
return "", fmt.Errorf("invalid JSON response from Google's tokeninfo API: %v", err)
}
- if token.Audience != b.accessTokenClient.ID {
- vlog.Infof("Got access token [%+v], wanted client id %v", token, b.accessTokenClient.ID)
+ audienceMatch := false
+ for _, c := range b.accessTokenClients {
+ if token.Audience == c.ID {
+ audienceMatch = true
+ break
+ }
+ }
+ if !audienceMatch {
+ vlog.Infof("Got access token [%+v], wanted one of client ids %v", token, b.accessTokenClients)
return "", fmt.Errorf("token not meant for this purpose, confused deputy? https://developers.google.com/accounts/docs/OAuth2UserAgent#validatetoken")
}
if !token.VerifiedEmail {
@@ -122,7 +129,7 @@
func (b *googleOAuth) bless(ctx ipc.ServerContext, name string) (vdlutil.Any, error) {
if len(b.domain) > 0 && !strings.HasSuffix(name, "@"+b.domain) {
- return nil, fmt.Errorf("blessings for %q are not allowed", name)
+ return nil, fmt.Errorf("blessings for name %q are not allowed due to domain restriction", name)
}
self := b.rt.Identity()
var err error
diff --git a/services/identity/googleoauth/utils.go b/services/identity/googleoauth/utils.go
index dbd5026..6f5230a 100644
--- a/services/identity/googleoauth/utils.go
+++ b/services/identity/googleoauth/utils.go
@@ -6,32 +6,60 @@
"io"
)
+// ClientIDFromJSON parses JSON-encoded API access information in 'r' and returns
+// the extracted ClientID.
+// This JSON-encoded data is typically available as a download from the Google
+// API Access console for your application
+// (https://code.google.com/apis/console).
+func ClientIDFromJSON(r io.Reader) (id string, err error) {
+ var data map[string]interface{}
+ var typ string
+ if data, typ, err = decodeAccessMapFromJSON(r); err != nil {
+ return
+ }
+ var ok bool
+ if id, ok = data["client_id"].(string); !ok {
+ err = fmt.Errorf("%s.client_id not found", typ)
+ return
+ }
+ return
+}
+
// ClientIDAndSecretFromJSON parses JSON-encoded API access information in 'r'
// and returns the extracted ClientID and ClientSecret.
// This JSON-encoded data is typically available as a download from the Google
// API Access console for your application
// (https://code.google.com/apis/console).
func ClientIDAndSecretFromJSON(r io.Reader) (id, secret string, err error) {
- var full, x map[string]interface{}
+ var data map[string]interface{}
+ var typ string
+ if data, typ, err = decodeAccessMapFromJSON(r); err != nil {
+ return
+ }
+ var ok bool
+ if id, ok = data["client_id"].(string); !ok {
+ err = fmt.Errorf("%s.client_id not found", typ)
+ return
+ }
+ if secret, ok = data["client_secret"].(string); !ok {
+ err = fmt.Errorf("%s.client_secret not found", typ)
+ return
+ }
+ return
+}
+
+func decodeAccessMapFromJSON(r io.Reader) (data map[string]interface{}, typ string, err error) {
+ var full map[string]interface{}
if err = json.NewDecoder(r).Decode(&full); err != nil {
return
}
var ok bool
- typ := "web"
- if x, ok = full[typ].(map[string]interface{}); !ok {
+ typ = "web"
+ if data, ok = full[typ].(map[string]interface{}); !ok {
typ = "installed"
- if x, ok = full[typ].(map[string]interface{}); !ok {
+ if data, ok = full[typ].(map[string]interface{}); !ok {
err = fmt.Errorf("web or installed configuration not found")
- return
}
}
- if id, ok = x["client_id"].(string); !ok {
- err = fmt.Errorf("%s.client_id not found", typ)
- return
- }
- if secret, ok = x["client_secret"].(string); !ok {
- err = fmt.Errorf("%s.client_secret not found", typ)
- return
- }
return
}
diff --git a/services/identity/identityd/main.go b/services/identity/identityd/main.go
index 20c40af..84b752b 100644
--- a/services/identity/identityd/main.go
+++ b/services/identity/identityd/main.go
@@ -47,6 +47,7 @@
googleConfigWeb = flag.String("google_config_web", "", "Path to JSON-encoded OAuth client configuration for the web application that renders the audit log for blessings provided by this provider.")
googleConfigInstalled = flag.String("google_config_installed", "", "Path to the JSON-encoded OAuth client configuration for installed client applications that obtain blessings (via the OAuthBlesser.BlessUsingAuthorizationCode RPC) from this server (like the 'identity' command like tool and its 'seekblessing' command.")
googleConfigChrome = flag.String("google_config_chrome", "", "Path to the JSON-encoded OAuth client configuration for Chrome browser applications that obtain blessings from this server (via the OAuthBlesser.BlessUsingAccessToken RPC) from this server.")
+ googleConfigAndroid = flag.String("google_config_android", "", "Path to the JSON-encoded OAuth client configuration for Android applications that obtain blessings from this server (via the OAuthBlesser.BlessUsingAccessToken RPC) from this server.")
googleDomain = flag.String("google_domain", "", "An optional domain name. When set, only email addresses from this domain are allowed to authenticate via Google OAuth")
// Revoker/Discharger configuration
@@ -84,7 +85,7 @@
if ipcServer != nil {
defer ipcServer.Stop()
}
- if enabled, clientID, clientSecret := enableGoogleOAuth(*googleConfigWeb); enabled && len(*auditprefix) > 0 {
+ if clientID, clientSecret, ok := getOAuthClientIDAndSecret(*googleConfigWeb); ok && len(*auditprefix) > 0 {
n := "/google/"
http.Handle(n, googleoauth.NewHandler(googleoauth.HandlerArgs{
Addr: fmt.Sprintf("%s%s", httpaddress(), n),
@@ -167,14 +168,18 @@
DomainRestriction: *googleDomain,
RevocationManager: revocationManager,
}
- if authcode, clientID, clientSecret := enableGoogleOAuth(*googleConfigInstalled); authcode {
+ if clientID, clientSecret, ok := getOAuthClientIDAndSecret(*googleConfigInstalled); ok {
enable = true
params.AuthorizationCodeClient.ID = clientID
params.AuthorizationCodeClient.Secret = clientSecret
}
- if accesstoken, clientID, _ := enableGoogleOAuth(*googleConfigChrome); accesstoken {
+ if clientID, ok := getOAuthClientID(*googleConfigChrome); ok {
enable = true
- params.AccessTokenClient.ID = clientID
+ params.AccessTokenClients = append(params.AccessTokenClients, struct{ ID string }{clientID})
+ }
+ if clientID, ok := getOAuthClientID(*googleConfigAndroid); ok {
+ enable = true
+ params.AccessTokenClients = append(params.AccessTokenClients, struct{ ID string }{clientID})
}
if !enable {
return nil, nil, nil
@@ -199,12 +204,28 @@
func enableTLS() bool { return len(*tlsconfig) > 0 }
func enableRandomHandler() bool {
- return len(*googleConfigInstalled)+len(*googleConfigWeb)+len(*googleConfigChrome) == 0
+ return len(*googleConfigInstalled)+len(*googleConfigWeb)+len(*googleConfigChrome)+len(*googleConfigAndroid) == 0
}
-func enableGoogleOAuth(config string) (enabled bool, clientID, clientSecret string) {
+func getOAuthClientID(config string) (clientID string, ok bool) {
fname := config
if len(fname) == 0 {
- return false, "", ""
+ return "", false
+ }
+ f, err := os.Open(fname)
+ if err != nil {
+ vlog.Fatalf("Failed to open %q: %v", fname, err)
+ }
+ defer f.Close()
+ clientID, err = googleoauth.ClientIDFromJSON(f)
+ if err != nil {
+ vlog.Fatalf("Failed to decode JSON in %q: %v", fname, err)
+ }
+ return clientID, true
+}
+func getOAuthClientIDAndSecret(config string) (clientID, clientSecret string, ok bool) {
+ fname := config
+ if len(fname) == 0 {
+ return "", "", false
}
f, err := os.Open(fname)
if err != nil {
@@ -215,9 +236,8 @@
if err != nil {
vlog.Fatalf("Failed to decode JSON in %q: %v", fname, err)
}
- return true, clientID, clientSecret
+ return clientID, clientSecret, true
}
-
func runHTTPServer(addr string) {
if !enableTLS() {
if err := http.ListenAndServe(addr, nil); err != nil {
diff --git a/services/mgmt/application/applicationd/test.sh b/services/mgmt/application/applicationd/test.sh
old mode 100755
new mode 100644
index af10d2a..e73fcd6
--- a/services/mgmt/application/applicationd/test.sh
+++ b/services/mgmt/application/applicationd/test.sh
@@ -11,7 +11,7 @@
build() {
local -r GO="${REPO_ROOT}/scripts/build/go"
"${GO}" build veyron/services/mgmt/application/applicationd || shell_test::fail "line ${LINENO}: failed to build 'applicationd'"
- "${GO}" build veyron/services/store/stored || shell_test::fail "line ${LINENO}: failed to build 'stored'"
+ "${GO}" build veyron.io/store/veyron/services/store/stored || shell_test::fail "line ${LINENO}: failed to build 'stored'"
"${GO}" build veyron/tools/application || shell_test::fail "line ${LINENO}: failed to build 'application'"
}
diff --git a/services/mgmt/application/impl/impl_test.go b/services/mgmt/application/impl/impl_test.go
index de8d13b..eb228eb 100644
--- a/services/mgmt/application/impl/impl_test.go
+++ b/services/mgmt/application/impl/impl_test.go
@@ -4,8 +4,8 @@
"reflect"
"testing"
+ "veyron.io/store/veyron/services/store/testutil"
"veyron/services/mgmt/repository"
- "veyron/services/store/testutil"
"veyron2/naming"
"veyron2/rt"
diff --git a/services/mgmt/application/impl/invoker.go b/services/mgmt/application/impl/invoker.go
index a0f3d2f..b79b136 100644
--- a/services/mgmt/application/impl/invoker.go
+++ b/services/mgmt/application/impl/invoker.go
@@ -4,8 +4,8 @@
"errors"
"strings"
+ _ "veyron.io/store/veyron/services/store/typeregistryhack"
"veyron/services/mgmt/lib/fs"
- _ "veyron/services/store/typeregistryhack"
"veyron2/ipc"
"veyron2/naming"
"veyron2/services/mgmt/application"
diff --git a/services/mgmt/profile/impl/impl_test.go b/services/mgmt/profile/impl/impl_test.go
index 8a2fb7f..44e17e4 100644
--- a/services/mgmt/profile/impl/impl_test.go
+++ b/services/mgmt/profile/impl/impl_test.go
@@ -4,9 +4,9 @@
"reflect"
"testing"
+ "veyron.io/store/veyron/services/store/testutil"
"veyron/services/mgmt/profile"
"veyron/services/mgmt/repository"
- "veyron/services/store/testutil"
"veyron2/naming"
"veyron2/rt"
diff --git a/services/mgmt/profile/impl/invoker.go b/services/mgmt/profile/impl/invoker.go
index 3e51159..cf79341 100644
--- a/services/mgmt/profile/impl/invoker.go
+++ b/services/mgmt/profile/impl/invoker.go
@@ -5,7 +5,7 @@
"veyron/services/mgmt/lib/fs"
"veyron/services/mgmt/profile"
- // _ "veyron/services/store/typeregistryhack"
+ // _ "veyron.io/store/veyron/services/store/typeregistryhack"
"veyron2/ipc"
"veyron2/naming"
diff --git a/services/mgmt/profile/profiled/test.sh b/services/mgmt/profile/profiled/test.sh
old mode 100755
new mode 100644
index 72bccce..31fd7e5
--- a/services/mgmt/profile/profiled/test.sh
+++ b/services/mgmt/profile/profiled/test.sh
@@ -12,7 +12,7 @@
build() {
local -r GO="${REPO_ROOT}/scripts/build/go"
"${GO}" build veyron/services/mgmt/profile/profiled || shell_test::fail "line ${LINENO}: failed to build 'profiled'"
- "${GO}" build veyron/services/store/stored || shell_test::fail "line ${LINENO}: failed to build 'stored'"
+ "${GO}" build veyron.io/store/veyron/services/store/stored || shell_test::fail "line ${LINENO}: failed to build 'stored'"
"${GO}" build veyron/tools/profile || shell_test::fail "line ${LINENO}: failed to build 'profile'"
}
diff --git a/services/store/memstore/blackbox/graph_test.go b/services/store/memstore/blackbox/graph_test.go
deleted file mode 100644
index 6afbb4b..0000000
--- a/services/store/memstore/blackbox/graph_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package blackbox
-
-import (
- "testing"
-
- _ "veyron/lib/testutil"
- "veyron/services/store/memstore"
-
- "veyron2/storage"
-)
-
-// A Node has a Label and some Children.
-type Node struct {
- Label string
- Children map[string]storage.ID
-}
-
-// Create a linear graph and truncate it.
-func TestLinear(t *testing.T) {
- const linearNodeCount = 10
-
- st, err := memstore.New(rootPublicID, "")
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
- if v, err := st.Bind("/").Get(rootPublicID, nil); v != nil || err == nil {
- t.Errorf("Unexpected root")
- }
-
- if _, err := st.Bind("/").Put(rootPublicID, nil, &Node{}); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
-
- // Create a linked list.
- path := ""
- var nodes [linearNodeCount]*Node
- var ids [linearNodeCount]storage.ID
- tr := memstore.NewTransaction()
- for i := 0; i != linearNodeCount; i++ {
- path = path + "/Children/a"
- node := &Node{Label: path}
- nodes[i] = node
- stat, err := st.Bind(path).Put(rootPublicID, tr, node)
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- ids[i] = stat.ID
- if _, err := st.Bind(path).Get(rootPublicID, tr); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- }
- tr.Commit()
-
- // Verify that all the nodes still exist.
- st.GC()
- for i, node := range nodes {
- path := node.Label
- id := ids[i]
- ExpectExists(t, st, path, id)
- }
-
- // Truncate the graph to length 3.
- {
- node := nodes[2]
- node.Children = nil
- if _, err := st.Bind("/Children/a/Children/a/Children/a").Put(rootPublicID, nil, node); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- }
-
- st.GC()
- for i, node := range nodes {
- path := node.Label
- id := ids[i]
- if i < 3 {
- ExpectExists(t, st, path, id)
- } else {
- ExpectNotExists(t, st, path, id)
- }
- }
-}
-
-// Create a lollipop graph and remove part of the cycle.
-func TestLollipop(t *testing.T) {
- const linearNodeCount = 10
- const loopNodeIndex = 5
- const cutNodeIndex = 7
-
- st, err := memstore.New(rootPublicID, "")
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
- if v, err := st.Bind("/").Get(rootPublicID, nil); v != nil || err == nil {
- t.Errorf("Unexpected root")
- }
-
- stat, err := st.Bind("/").Put(rootPublicID, nil, &Node{})
- if err != nil || stat == nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- id := stat.ID
-
- // Create a linked list.
- path := ""
- var nodes [linearNodeCount]*Node
- var ids [linearNodeCount]storage.ID
- tr := memstore.NewTransaction()
- for i := 0; i != linearNodeCount; i++ {
- path = path + "/Children/a"
- node := &Node{Label: path}
- nodes[i] = node
- stat, err := st.Bind(path).Put(rootPublicID, tr, node)
- if err != nil || stat == nil {
- t.Errorf("Unexpected error: %s: %s", path, err)
- }
- id = stat.ID
- ids[i] = id
- }
-
- // Add a back-loop.
- {
- node := nodes[linearNodeCount-1]
- node.Children = map[string]storage.ID{"a": ids[loopNodeIndex]}
- if _, err := st.Bind(node.Label).Put(rootPublicID, tr, node); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- }
- tr.Commit()
-
- // Verify that all the nodes still exist.
- st.GC()
- for i, node := range nodes {
- path := node.Label
- id := ids[i]
- ExpectExists(t, st, path, id)
- }
-
- // Truncate part of the loop.
- {
- node := nodes[cutNodeIndex]
- node.Children = nil
- if _, err := st.Bind(node.Label).Put(rootPublicID, nil, node); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- }
-
- st.GC()
- for i, node := range nodes {
- path := node.Label
- id := ids[i]
- if i <= cutNodeIndex {
- ExpectExists(t, st, path, id)
- } else {
- ExpectNotExists(t, st, path, id)
- }
- }
-}
diff --git a/services/store/memstore/blackbox/many_to_many/doc.go b/services/store/memstore/blackbox/many_to_many/doc.go
deleted file mode 100644
index b1a48ba..0000000
--- a/services/store/memstore/blackbox/many_to_many/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// Package blackbox is placed under its own directory many_to_many to separate
-// it from memstore/blackbox, since they'd otherwise clash on the names of the
-// types and methods they each define for their specific tests.
-package blackbox
diff --git a/services/store/memstore/blackbox/many_to_many/many_to_many_test.go b/services/store/memstore/blackbox/many_to_many/many_to_many_test.go
deleted file mode 100644
index a925681..0000000
--- a/services/store/memstore/blackbox/many_to_many/many_to_many_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package blackbox
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore"
- "veyron/services/store/memstore/blackbox"
-
- "veyron2/security"
- "veyron2/storage"
- "veyron2/vom"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
-)
-
-func init() {
- vom.Register(&Person{})
- vom.Register(&Player{})
- vom.Register(&Team{})
- vom.Register(&Role{})
- vom.Register(&DirectPlayer{})
- vom.Register(&DirectTeam{})
-}
-
-// This schema uses a Role relation to indicate who plays for what team.
-// There are also indexes; each Player and each Team have a list of Roles/
-//
-// Person : a person.
-// Player : belongs to many teams.
-// Team : contains many players.
-// Role : (Player, Team) pair.
-//
-// / : Dir
-// /People : Dir
-// /People/John : Person
-// /Players : Dir
-// /Players/John : Player
-// /Teams : Dir
-// /Teams/Rockets : Team
-
-// Dir is a type of directories.
-type Dir struct{}
-
-// Person is a person.
-type Person struct {
- FullName string
- SSN int
-}
-
-// Player is a person who has a Role.
-type Player struct {
- Person storage.ID
- Roles []storage.ID // Role
-}
-
-// Team has a set of Roles/
-type Team struct {
- Roles []storage.ID // Role
-}
-
-// Role associates a Player with a Team.
-type Role struct {
- Position string
- Player storage.ID // Player
- Team storage.ID
-}
-
-func newDir() *Dir {
- return &Dir{}
-}
-
-func newPerson(name string, ssn int) *Person {
- return &Person{FullName: name, SSN: ssn}
-}
-
-func newPlayer(personID storage.ID) *Player {
- return &Player{Person: personID}
-}
-
-func newTeam() *Team {
- return &Team{}
-}
-
-func newRole(pos string, playerID, teamID storage.ID) *Role {
- return &Role{Position: pos, Player: playerID, Team: teamID}
-}
-
-func getPerson(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *Person) {
- _, file, line, _ := runtime.Caller(1)
- e := blackbox.Get(t, st, tr, path)
- v := e.Value
- p, ok := v.(*Person)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Person: %v", file, line, path, v)
- }
- return e.Stat.ID, p
-}
-
-func getPlayer(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *Player) {
- _, file, line, _ := runtime.Caller(1)
- e := blackbox.Get(t, st, tr, path)
- v := e.Value
- p, ok := v.(*Player)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Player: %v", file, line, path, v)
- }
- return e.Stat.ID, p
-}
-
-func getTeam(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *Team) {
- _, file, line, _ := runtime.Caller(1)
- e := blackbox.Get(t, st, tr, path)
- v := e.Value
- p, ok := v.(*Team)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Team: %v", file, line, path, v)
- }
- return e.Stat.ID, p
-}
-
-func getRole(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *Role) {
- _, file, line, _ := runtime.Caller(1)
- e := blackbox.Get(t, st, tr, path)
- v := e.Value
- p, ok := v.(*Role)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Role: %v", file, line, path, v)
- }
- return e.Stat.ID, p
-}
-
-func TestManyToManyWithRole(t *testing.T) {
- st, err := memstore.New(rootPublicID, "")
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
-
- // Create a player John who plays for the Rockets team.
- {
- tr := memstore.NewTransaction()
- blackbox.Put(t, st, tr, "/", newDir())
- blackbox.Put(t, st, tr, "/People", newDir())
- blackbox.Put(t, st, tr, "/Players", newDir())
- blackbox.Put(t, st, tr, "/Teams", newDir())
-
- person := newPerson("John", 1234567809)
- personID := blackbox.Put(t, st, tr, "/People/John", person)
- player := newPlayer(personID)
- playerID := blackbox.Put(t, st, tr, "/Players/John", player)
- team := newTeam()
- teamID := blackbox.Put(t, st, tr, "/Teams/Rockets", team)
-
- // XXX(jyh): we have to update the team/player to add the cyclic
- // links. Consider whether individual operations in a transaction
- // should be exempt from the dangling-reference check.
- //
- // Note: the @ means to append the role to the Roles array.
- role := newRole("center", playerID, teamID)
- roleID := blackbox.Put(t, st, tr, "/Players/John/Roles/@", role)
- blackbox.Put(t, st, tr, "/Teams/Rockets/Roles/@", roleID)
-
- blackbox.Commit(t, tr)
- }
-
- // Verify the state.
- {
- tr := memstore.NewTransaction()
- pID, p := getPerson(t, st, tr, "/People/John")
- if p.FullName != "John" {
- t.Errorf("Expected %q, got %q", "John", p.FullName)
- }
-
- plID, pl := getPlayer(t, st, tr, "/Players/John")
- if pl.Person != pID {
- t.Errorf("Expected %s, got %s", pID, pl.Person)
- }
-
- teamID, team := getTeam(t, st, tr, "/Teams/Rockets")
- if len(team.Roles) != 1 || len(pl.Roles) != 1 || team.Roles[0] != pl.Roles[0] {
- t.Errorf("Expected one role: %v, %v", team, pl)
- }
-
- role1ID, role1 := getRole(t, st, tr, "/Players/John/Roles/0")
- role2ID, _ := getRole(t, st, tr, "/Teams/Rockets/Roles/0")
- if role1ID != role2ID {
- t.Errorf("Expected %s, got %s", role1ID, role2ID)
- }
- if role1.Player != plID {
- t.Errorf("Expected %s, got %s", plID, role1.Player)
- }
- if role1.Team != teamID {
- t.Errorf("Expected %s, got %s", teamID, role1.Team)
- }
- }
-}
-
-////////////////////////////////////////////////////////////////////////
-// This schema removes the separate Role object. Instead the Player refers
-// directly to the Teams, and vice versa.
-
-// DirectPlayer is a person who plays on a team.
-type DirectPlayer struct {
- Person storage.ID
- Teams []storage.ID
-}
-
-// DirectTeam has a set of players.
-type DirectTeam struct {
- Players []storage.ID
-}
-
-func newDirectPlayer(personID storage.ID) *DirectPlayer {
- return &DirectPlayer{Person: personID}
-}
-
-func newDirectTeam() *DirectTeam {
- return &DirectTeam{}
-}
-
-func getDirectPlayer(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *DirectPlayer) {
- _, file, line, _ := runtime.Caller(1)
- e := blackbox.Get(t, st, tr, path)
- v := e.Value
- p, ok := v.(*DirectPlayer)
- if !ok {
- t.Fatalf("%s(%d): %s: not a DirectPlayer: %v", file, line, path, v)
- }
- return e.Stat.ID, p
-}
-
-func getDirectTeam(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *DirectTeam) {
- _, file, line, _ := runtime.Caller(1)
- e := blackbox.Get(t, st, tr, path)
- v := e.Value
- p, ok := v.(*DirectTeam)
- if !ok {
- t.Fatalf("%s(%d): %s: not a DirectTeam: %v", file, line, path, v)
- }
- return e.Stat.ID, p
-}
-
-func TestManyToManyDirect(t *testing.T) {
- st, err := memstore.New(rootPublicID, "")
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
-
- // Create a player John who plays for the Rockets team.
- {
- tr := memstore.NewTransaction()
- blackbox.Put(t, st, tr, "/", newDir())
- blackbox.Put(t, st, tr, "/People", newDir())
- blackbox.Put(t, st, tr, "/Players", newDir())
- blackbox.Put(t, st, tr, "/Teams", newDir())
-
- person := newPerson("John", 1234567809)
- personID := blackbox.Put(t, st, tr, "/People/John", person)
- player := newDirectPlayer(personID)
- playerID := blackbox.Put(t, st, tr, "/Players/John", player)
- team := newDirectTeam()
- teamID := blackbox.Put(t, st, tr, "/Teams/Rockets", team)
-
- // XXX(jyh): we have to update the team/player to add the cyclic
- // links. Consider whether individual operations in a transaction
- // should be exempt from the dangling-reference check.
- blackbox.Put(t, st, tr, "/Players/John/Teams/@", teamID)
- blackbox.Put(t, st, tr, "/Teams/Rockets/Players/@", playerID)
-
- blackbox.Commit(t, tr)
- }
-
- // Verify the state.
- {
- tr := memstore.NewTransaction()
- pID, p := getPerson(t, st, tr, "/People/John")
- plID, pl := getDirectPlayer(t, st, tr, "/Players/John")
- teamID, team := getDirectTeam(t, st, tr, "/Teams/Rockets")
-
- if p.FullName != "John" {
- t.Errorf("Expected %q, got %q", "John", p.FullName)
- }
- if pl.Person != pID {
- t.Errorf("Expected %s, got %s", pID, pl.Person)
- }
- if len(pl.Teams) != 1 || pl.Teams[0] != teamID {
- t.Errorf("Expected one team: %v, %v", pl.Teams, team.Players)
- }
- if len(team.Players) != 1 || team.Players[0] != plID {
- t.Errorf("Expected one player: %v, %v", team, pl)
- }
- }
-}
diff --git a/services/store/memstore/blackbox/photoalbum_test.go b/services/store/memstore/blackbox/photoalbum_test.go
deleted file mode 100644
index b1deb61..0000000
--- a/services/store/memstore/blackbox/photoalbum_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package blackbox
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore"
-
- "veyron2/storage"
- "veyron2/vom"
-)
-
-func init() {
- vom.Register(&Dir{})
- vom.Register(&User{})
- vom.Register(&Photo{})
- vom.Register(&Edit{})
- vom.Register(&Album{})
-}
-
-// Dir is a "directory" containg a dictionaries of entries.
-type Dir struct{}
-
-// User represents a "user", with a username and a "home" directory.
-// The name of the user is part of the path to the object.
-type User struct {
- Dir
- SSN int
-}
-
-// Photo represents an image. It contains the Object name for the data,
-// stored elsewhere on some content server.
-type Photo struct {
- Dir
- Comment string
- Content string // Object name
- Edits []Edit
-}
-
-// Edit is an edit to a Photo.
-type Edit struct {
- // ...
-}
-
-// Album is a photoalbum.
-type Album struct {
- Title string
- Photos map[string]storage.ID
-}
-
-func newDir() *Dir {
- return &Dir{}
-}
-
-func newUser(ssn int) *User {
- return &User{SSN: ssn}
-}
-
-func newAlbum(title string) *Album {
- return &Album{Title: title}
-}
-
-func newPhoto(content, comment string, edits ...Edit) *Photo {
- return &Photo{Content: content, Comment: comment}
-}
-
-func getPhoto(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) *Photo {
- _, file, line, _ := runtime.Caller(1)
- v := Get(t, st, tr, path).Value
- p, ok := v.(*Photo)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Photo: %#v", file, line, path, v)
- }
- return p
-}
-
-func TestPhotoAlbum(t *testing.T) {
- st, err := memstore.New(rootPublicID, "")
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
-
- // Create directories.
- {
- tr := memstore.NewTransaction()
- Put(t, st, tr, "/", newDir())
- Put(t, st, tr, "/Users", newDir())
- Put(t, st, tr, "/Users/jyh", newUser(1234567890))
- Put(t, st, tr, "/Users/jyh/ByDate", newDir())
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01", newDir())
- Put(t, st, tr, "/Users/jyh/Albums", newDir())
- Commit(t, tr)
- }
-
- // Add some photos by date.
- {
- p1 := newPhoto("/global/contentd/DSC1000.jpg", "Half Dome")
- p2 := newPhoto("/global/contentd/DSC1001.jpg", "I don't want to hike")
- p3 := newPhoto("/global/contentd/DSC1002.jpg", "Crying kids")
- p4 := newPhoto("/global/contentd/DSC1003.jpg", "Ice cream")
- p5 := newPhoto("/global/contentd/DSC1004.jpg", "Let's go home")
-
- tr := memstore.NewTransaction()
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01/09:00", p1)
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01/09:15", p2)
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01/09:16", p3)
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01/10:00", p4)
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01/10:05", p5)
- Commit(t, tr)
- }
-
- // Add an Album with some of the photos.
- {
- tr := memstore.NewTransaction()
- Put(t, st, tr, "/Users/jyh/Albums/Yosemite", newAlbum("Yosemite selected photos"))
- e5 := Get(t, st, tr, "/Users/jyh/ByDate/2014_01_01/10:05")
- Put(t, st, tr, "/Users/jyh/Albums/Yosemite/Photos/1", e5.Stat.ID)
- e3 := Get(t, st, tr, "/Users/jyh/ByDate/2014_01_01/09:16")
- Put(t, st, tr, "/Users/jyh/Albums/Yosemite/Photos/2", e3.Stat.ID)
- Commit(t, tr)
- }
-
- // Verify some of the photos.
- {
- p1 := getPhoto(t, st, nil, "/Users/jyh/ByDate/2014_01_01/09:00")
- if p1.Comment != "Half Dome" {
- t.Errorf("Expected %q, got %q", "Half Dome", p1.Comment)
- }
- }
-
- {
- p3 := getPhoto(t, st, nil, "/Users/jyh/Albums/Yosemite/Photos/2")
- if p3.Comment != "Crying kids" {
- t.Errorf("Expected %q, got %q", "Crying kids", p3.Comment)
- }
- }
-
- // Update p3.Comment = "Happy"
- {
- tr := memstore.NewTransaction()
- p3 := getPhoto(t, st, tr, "/Users/jyh/ByDate/2014_01_01/09:16")
- p3.Comment = "Happy"
- Put(t, st, tr, "/Users/jyh/ByDate/2014_01_01/09:16", p3)
- Commit(t, tr)
- }
-
- // Verify that the photo in the album has also changed.
- {
- p3 := getPhoto(t, st, nil, "/Users/jyh/Albums/Yosemite/Photos/2")
- if p3.Comment != "Happy" {
- t.Errorf("Expected %q, got %q", "Happy", p3.Comment)
- }
- }
-}
diff --git a/services/store/memstore/blackbox/sync_integration_test.go b/services/store/memstore/blackbox/sync_integration_test.go
deleted file mode 100644
index 98b23e9..0000000
--- a/services/store/memstore/blackbox/sync_integration_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package blackbox
-
-import (
- "testing"
-
- "veyron/services/store/memstore"
- watchtesting "veyron/services/store/memstore/testing"
- "veyron/services/store/raw"
-
- "veyron2/rt"
- "veyron2/services/watch/types"
-)
-
-func recv(t *testing.T, call raw.StoreWatchCall, n int) []types.Change {
- rStream := call.RecvStream()
- changes := make([]types.Change, n)
- for i := 0; i < n; i++ {
- if !rStream.Advance() {
- t.Error("Advance() failed: %v", rStream.Err())
- }
- changes[i] = rStream.Value()
- }
- return changes
-}
-
-func TestSyncState(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := CreateStore(t, "vstore_source")
- defer cleanup()
-
- // Put /, /a, /a/b
- tr := memstore.NewTransaction()
- id1 := Put(t, st, tr, "/", "val1")
- id2 := Put(t, st, tr, "/a", "val2")
- id3 := Put(t, st, tr, "/a/b", "val3")
- Commit(t, tr)
-
- // Remove /a/b
- tr = memstore.NewTransaction()
- Remove(t, st, tr, "/a/b")
- Commit(t, tr)
- GC(t, st)
-
- if err := st.Close(); err != nil {
- t.Fatalf("Close() failed: %v", err)
- }
-
- // Create a target store for integration testing.
- _, target, cleanup := CreateStore(t, "vstore_target")
- defer cleanup()
-
- // Re-create a new store. This should compress the log, creating an initial
- // state containing / and /a.
- st, cleanup = OpenStore(t, dbName)
- defer cleanup()
-
- // Create the watcher
- w, cleanup := OpenWatch(t, dbName)
- defer cleanup()
- // Create a sync request
- stream := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, raw.Request{})
-
- // Update target
- changes := recv(t, stream, 2)
- PutMutations(t, target, Mutations(changes))
- GC(t, target)
-
- // Expect that the target contains id1 and id2 but not id3
- ExpectExists(t, target, "/", id1)
- ExpectExists(t, target, "/a", id2)
- ExpectNotExists(t, target, "/a/b", id3)
-}
-
-func TestSyncTransaction(t *testing.T) {
- rt.Init()
-
- _, target, cleanup := CreateStore(t, "vstore_target")
- defer cleanup()
-
- dbName, st, cleanup := CreateStore(t, "vstore_source")
- defer cleanup()
-
- // Create the watcher
- w, cleanup := OpenWatch(t, dbName)
- defer cleanup()
- // Create a sync request
- stream := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, raw.Request{})
-
- // First transaction, put /, /a, /a/b
- tr := memstore.NewTransaction()
- id1 := Put(t, st, tr, "/", "val1")
- id2 := Put(t, st, tr, "/a", "val2")
- id3 := Put(t, st, tr, "/a/b", "val3")
- Commit(t, tr)
-
- // Update target
- changes := recv(t, stream, 3)
- PutMutations(t, target, Mutations(changes))
- GC(t, target)
-
- // Expect that the target contains id1, id2, id3
- ExpectExists(t, target, "/", id1)
- ExpectExists(t, target, "/a", id2)
- ExpectExists(t, target, "/a/b", id3)
-
- // Next transaction, remove /a/b
- tr = memstore.NewTransaction()
- Remove(t, st, tr, "/a/b")
- Commit(t, tr)
-
- // Update target
- changes = recv(t, stream, 1)
- PutMutations(t, target, Mutations(changes))
- GC(t, target)
-
- // Expect that the target contains id1, id2, but not id3
- ExpectExists(t, target, "/", id1)
- ExpectExists(t, target, "/a", id2)
- ExpectNotExists(t, target, "/a/b", id3)
-}
diff --git a/services/store/memstore/blackbox/team_player_test.go b/services/store/memstore/blackbox/team_player_test.go
deleted file mode 100644
index a086129..0000000
--- a/services/store/memstore/blackbox/team_player_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package blackbox
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore"
- "veyron/services/store/memstore/state"
-
- "veyron2/storage"
- "veyron2/vom"
-)
-
-func init() {
- vom.Register(&Player{})
- vom.Register(&Team{})
-}
-
-// This schema uses the Team/Player schema in the query doc.
-//
-// Player : belongs to many teams.
-// Team : contains many players.
-
-// Player is a person who has a Role.
-type Player struct {
- FullName string
-}
-
-// Team has a set of Roles/
-type Team struct {
- FullName string
- Players []storage.ID
-}
-
-func newPlayer(name string) *Player {
- return &Player{FullName: name}
-}
-
-func newTeam(name string) *Team {
- return &Team{FullName: name}
-}
-
-func getPlayer(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *Player) {
- _, file, line, _ := runtime.Caller(1)
- e := Get(t, st, tr, path)
- p, ok := e.Value.(*Player)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Player: %v", file, line, path, e.Value)
- }
- return e.Stat.ID, p
-}
-
-func getTeam(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) (storage.ID, *Team) {
- _, file, line, _ := runtime.Caller(1)
- e := Get(t, st, tr, path)
- p, ok := e.Value.(*Team)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Team: %v", file, line, path, e.Value)
- }
- return e.Stat.ID, p
-}
-
-func TestManyToManyWithRole(t *testing.T) {
- st, err := memstore.New(rootPublicID, "")
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
-
- john := newPlayer("John")
- jane := newPlayer("Jane")
- joan := newPlayer("Joan")
-
- rockets := newTeam("Rockets")
- hornets := newTeam("Hornets")
-
- // Create the state.
- {
- tr := memstore.NewTransaction()
- Put(t, st, tr, "/", newDir())
- Put(t, st, tr, "/teamsapp", newDir())
- Put(t, st, tr, "/teamsapp/teams", newDir())
- Put(t, st, tr, "/teamsapp/teams/rockets", rockets)
- Put(t, st, tr, "/teamsapp/teams/hornets", hornets)
- Put(t, st, tr, "/teamsapp/players", newDir())
-
- johnID := Put(t, st, tr, "/teamsapp/teams/rockets/Players/@", john)
- janeID := Put(t, st, tr, "/teamsapp/teams/rockets/Players/@", jane)
- Put(t, st, tr, "/teamsapp/teams/hornets/Players/@", janeID)
- joanID := Put(t, st, tr, "/teamsapp/teams/hornets/Players/@", joan)
-
- Put(t, st, tr, "/teamsapp/players/John", johnID)
- Put(t, st, tr, "/teamsapp/players/Jane", janeID)
- Put(t, st, tr, "/teamsapp/players/Joan", joanID)
-
- Commit(t, tr)
- }
-
- rocketsID := Get(t, st, nil, "/teamsapp/teams/rockets").Stat.ID
- hornetsID := Get(t, st, nil, "/teamsapp/teams/hornets").Stat.ID
- johnID := Get(t, st, nil, "/teamsapp/players/John").Stat.ID
- janeID := Get(t, st, nil, "/teamsapp/players/Jane").Stat.ID
- joanID := Get(t, st, nil, "/teamsapp/players/Joan").Stat.ID
-
- // Verify some of the state.
- {
- tr := memstore.NewTransaction()
- _, john := getPlayer(t, st, tr, "/teamsapp/players/John")
- _, rockets := getTeam(t, st, tr, "/teamsapp/teams/rockets")
-
- if john.FullName != "John" {
- t.Errorf("Expected %q, got %q", "John", john.FullName)
- }
- if len(rockets.Players) != 2 {
- t.Fatalf("Expected two players: got %v", rockets.Players)
- }
- if rockets.Players[0] != johnID {
- t.Errorf("Expected %s, got %s", johnID, rockets.Players[0])
- }
- }
-
- // Iterate over the rockets.
- players := make(map[storage.ID]*Player)
- name := storage.ParsePath("/teamsapp/players")
- for it := st.Snapshot().NewIterator(rootPublicID, name,
- state.ListPaths, nil); it.IsValid(); it.Next() {
-
- e := it.Get()
- if p, ok := e.Value.(*Player); ok {
- if _, ok := players[e.Stat.ID]; ok {
- t.Errorf("Player already exists: %v", p)
- continue
- }
- players[e.Stat.ID] = p
- }
- }
- if len(players) != 3 {
- t.Errorf("Should have 3 players: have %v", players)
- }
- if p, ok := players[johnID]; !ok || p.FullName != "John" {
- t.Errorf("Should have John, have %v", p)
- }
- if p, ok := players[janeID]; !ok || p.FullName != "Jane" {
- t.Errorf("Should have Jane, have %v", p)
- }
- if p, ok := players[joanID]; !ok || p.FullName != "Joan" {
- t.Errorf("Should have Joan, have %v", p)
- }
-
- // Iterate over all teams, nonrecursively.
- teams := make(map[storage.ID]*Team)
- name = storage.ParsePath("/teamsapp/teams")
- for it := st.Snapshot().NewIterator(rootPublicID, name,
- state.ListPaths, state.ImmediateFilter); it.IsValid(); it.Next() {
-
- e := it.Get()
- v := e.Value
- if _, ok := v.(*Player); ok {
- t.Errorf("Nonrecursive iteration should not show players")
- }
- if team, ok := v.(*Team); ok {
- if _, ok := teams[e.Stat.ID]; ok {
- t.Errorf("Team already exists: %v", team)
- continue
- }
- teams[e.Stat.ID] = team
- }
- }
- if len(teams) != 2 {
- t.Errorf("Should have 2 teams: have %v", teams)
- }
- if team, ok := teams[rocketsID]; !ok || team.FullName != "Rockets" {
- t.Errorf("Should have Rockets, have %v", team)
- }
- if team, ok := teams[hornetsID]; !ok || team.FullName != "Hornets" {
- t.Errorf("Should have Hornets, have %v", team)
- }
-
- // Iterate over all teams, recursively.
- contractCount := 0
- teamCount := 0
- players = make(map[storage.ID]*Player)
- teams = make(map[storage.ID]*Team)
- name = storage.ParsePath("/teamsapp/teams")
- for it := st.Snapshot().NewIterator(rootPublicID, name,
- state.ListPaths, nil); it.IsValid(); it.Next() {
-
- e := it.Get()
- v := e.Value
- if p, ok := v.(*Player); ok {
- players[e.Stat.ID] = p
- contractCount++
- }
- if team, ok := v.(*Team); ok {
- teams[e.Stat.ID] = team
- teamCount++
- }
- }
- if teamCount != 2 {
- t.Errorf("Should have 2 teams: have %d", teamCount)
- }
- if len(teams) != 2 {
- t.Errorf("Should have 2 teams: have %v", teams)
- }
- if team, ok := teams[rocketsID]; !ok || team.FullName != "Rockets" {
- t.Errorf("Should have Rockets, have %v", team)
- }
- if team, ok := teams[hornetsID]; !ok || team.FullName != "Hornets" {
- t.Errorf("Should have Hornets, have %v", team)
- }
- if contractCount != 4 {
- t.Errorf("Should have 4 contracts: have %d", contractCount)
- }
- if len(players) != 3 {
- t.Errorf("Should have 3 players: have %v", players)
- }
- if p, ok := players[johnID]; !ok || p.FullName != "John" {
- t.Errorf("Should have John, have %v", p)
- }
- if p, ok := players[janeID]; !ok || p.FullName != "Jane" {
- t.Errorf("Should have Jane, have %v", p)
- }
- if p, ok := players[joanID]; !ok || p.FullName != "Joan" {
- t.Errorf("Should have Joan, have %v", p)
- }
-}
diff --git a/services/store/memstore/blackbox/util.go b/services/store/memstore/blackbox/util.go
deleted file mode 100644
index c494310..0000000
--- a/services/store/memstore/blackbox/util.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package blackbox
-
-import (
- "io/ioutil"
- "os"
- "runtime"
- "testing"
-
- "veyron/services/store/memstore"
- mtest "veyron/services/store/memstore/testing"
- memwatch "veyron/services/store/memstore/watch"
- "veyron/services/store/raw"
-
- "veyron2/security"
- "veyron2/services/watch/types"
- "veyron2/storage"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
- nullMutation = raw.Mutation{}
-)
-
-func Get(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) *storage.Entry {
- _, file, line, _ := runtime.Caller(1)
- e, err := st.Bind(path).Get(rootPublicID, tr)
- if err != nil {
- t.Fatalf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- return e
-}
-
-func Put(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string, v interface{}) storage.ID {
- _, file, line, _ := runtime.Caller(1)
- stat, err := st.Bind(path).Put(rootPublicID, tr, v)
- if err != nil {
- t.Fatalf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- if stat != nil {
- return stat.ID
- }
- if id, ok := v.(storage.ID); ok {
- return id
- }
- t.Errorf("%s(%d): can't find id", file, line)
- return storage.ID{}
-}
-
-func Remove(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) {
- _, file, line, _ := runtime.Caller(1)
- if err := st.Bind(path).Remove(rootPublicID, tr); err != nil {
- t.Fatalf("%s(%d): can't remove %s: %s", file, line, path, err)
- }
-}
-
-func Commit(t *testing.T, tr *memstore.Transaction) {
- if err := tr.Commit(); err != nil {
- t.Fatalf("Transaction aborted: %s", err)
- }
-}
-
-func ExpectExists(t *testing.T, st *memstore.Store, path string, id storage.ID) {
- _, file, line, _ := runtime.Caller(1)
- e, err := st.Bind(path).Get(rootPublicID, nil)
- if err != nil {
- t.Errorf("%s(%d): Expected value for ID: %x", file, line, id)
- }
- if e.Stat.ID != id {
- t.Errorf("%s(%d): expected id to be %v, but was %v", file, line, id, e.Stat.ID)
- }
-}
-
-func ExpectNotExists(t *testing.T, st *memstore.Store, path string, id storage.ID) {
- _, file, line, _ := runtime.Caller(1)
- e, err := st.Bind(path).Get(rootPublicID, nil)
- if err == nil {
- t.Errorf("%s(%d): Unexpected value: %v", file, line, e)
- }
-}
-
-func GC(t *testing.T, st *memstore.Store) {
- if err := st.GC(); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't gc: %s", file, line, err)
- }
-}
-
-func CreateStore(t *testing.T, dbSuffix string) (string, *memstore.Store, func()) {
- dbName, err := ioutil.TempDir(os.TempDir(), dbSuffix)
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- cleanup := func() {
- os.RemoveAll(dbName)
- }
-
- st, err := memstore.New(rootPublicID, dbName)
- if err != nil {
- cleanup()
- t.Fatalf("New() failed: %v", err)
- }
-
- return dbName, st, cleanup
-}
-
-func OpenStore(t *testing.T, dbName string) (*memstore.Store, func()) {
- st, err := memstore.New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- return st, func() {
- os.RemoveAll(dbName)
- }
-}
-
-func OpenWatch(t *testing.T, dbName string) (*memwatch.Watcher, func()) {
- w, err := memwatch.New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- return w, func() {
- w.Close()
- }
-}
-
-// putMutationsStream implements raw.StoreServicePutMutationsStream.
-type putMutationsStream struct {
- mus []raw.Mutation
- index int
-}
-
-func newPutMutationsStream(mus []raw.Mutation) raw.StoreServicePutMutationsStream {
- return &putMutationsStream{
- mus: mus,
- index: -1,
- }
-}
-
-func (s *putMutationsStream) RecvStream() interface {
- Advance() bool
- Value() raw.Mutation
- Err() error
-} {
- return s
-}
-
-func (s *putMutationsStream) Advance() bool {
- s.index++
- return s.index < len(s.mus)
-}
-
-func (s *putMutationsStream) Value() raw.Mutation {
- return s.mus[s.index]
-}
-
-func (*putMutationsStream) Err() error {
- return nil
-}
-
-func PutMutations(t *testing.T, st *memstore.Store, mus []raw.Mutation) {
- stream := newPutMutationsStream(mus)
- rootCtx := mtest.NewFakeServerContext(rootPublicID)
- if err := st.PutMutations(rootCtx, stream); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): can't put mutations %s: %s", file, line, mus, err)
- }
-}
-
-func Mutations(changes []types.Change) []raw.Mutation {
- mutations := make([]raw.Mutation, len(changes))
- for i, change := range changes {
- mutations[i] = *(change.Value.(*raw.Mutation))
- }
- return mutations
-}
diff --git a/services/store/memstore/doc.go b/services/store/memstore/doc.go
deleted file mode 100644
index fd50ad8..0000000
--- a/services/store/memstore/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Package memstore implements an in-memory version of the veyron2/storage API.
-// The store is logged to a file, and it is persistent, but the entire state is
-// also kept in memory at all times.
-//
-// The purpose of this fake is to explore the model. It is for testing; it
-// isn't intended to be deployed.
-package memstore
diff --git a/services/store/memstore/field/reflect.go b/services/store/memstore/field/reflect.go
deleted file mode 100644
index 891cb31..0000000
--- a/services/store/memstore/field/reflect.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package field
-
-// reflect.go uses reflection to access the fields in a value.
-//
-// Get(v, path)
-// Set(v, path, x)
-// Remove(v, path)
-//
-// The path is JSON-style, using field names. For example, consider the
-// following value.
-//
-// type MyValue struct {
-// A int
-// B []int
-// C map[string]int
-// D map[string]struct{E int}
-// }
-//
-// Here are some possible paths:
-//
-// var x MyValue = ...
-// Get(x, storage.PathName{"A"}) == x.A
-// Get(x, storage.PathName{"B", "7"}) == x.B[7]
-// Get(x, storage.PathName{"C", "a"}) == x.C["a"]
-// Get(x, storage.PathName{"D", "a", "E"}) == x.D["a"].E
-//
-// Set(x, storage.PathName{"A"}, 17)
-// Get(x, storage.PathName{"A"}) == 17
-//
-// Set(x, storage.PathName{"D", "a"}, struct{E: 12})
-// Get(x, storage.PathName{"D", "a", "E"}) == 12
-//
-// Remove(x, storage.PathName{"D", "a"}
-// Get(x, storage.PathName{"D", "a", "E"}) fails
-
-import (
- "reflect"
- "strconv"
-
- "veyron2/storage"
-)
-
-type SetResult uint32
-
-const (
- SetFailedNotFound SetResult = iota
- SetFailedWrongType
- SetAsValue
- SetAsID
-)
-
-const (
- SliceAppendSuffix = "@"
-)
-
-var (
- nullID storage.ID
- nullValue reflect.Value
-
- tyID = reflect.TypeOf(nullID)
-)
-
-// Get returns the value associated with a path, stopping at values that
-// can't be resolved. It returns the value at the maximum prefix of the path
-// that can be resolved, and any suffix that remains.
-func Get(val interface{}, path storage.PathName) (reflect.Value, storage.PathName) {
- v, suffix := findField(reflect.ValueOf(val), path)
- return v, path[suffix:]
-}
-
-// findField returns the field specified by the path, using reflection to
-// traverse the value. Returns the field, and how many components of the path
-// were resolved.
-func findField(v reflect.Value, path storage.PathName) (reflect.Value, int) {
- for i, field := range path {
- v1 := followPointers(v)
- if !v1.IsValid() {
- return v, i
- }
- v2 := findNextField(v1, field)
- if !v2.IsValid() {
- return v1, i
- }
- v = v2
- }
- return v, len(path)
-}
-
-func followPointers(v reflect.Value) reflect.Value {
- if !v.IsValid() {
- return v
- }
- kind := v.Type().Kind()
- for kind == reflect.Ptr || kind == reflect.Interface {
- v = v.Elem()
- if !v.IsValid() {
- return v
- }
- kind = v.Type().Kind()
- }
- return v
-}
-
-func findNextField(v reflect.Value, field string) reflect.Value {
- switch v.Type().Kind() {
- case reflect.Array, reflect.Slice:
- return findSliceField(v, field)
- case reflect.Map:
- return findMapField(v, field)
- case reflect.Struct:
- return v.FieldByName(field)
- default:
- return reflect.Value{}
- }
-}
-
-func findSliceField(v reflect.Value, field string) reflect.Value {
- l := v.Len()
- i, err := strconv.Atoi(field)
- if err != nil || i < 0 || i >= l {
- return reflect.Value{}
- }
- return v.Index(i)
-}
-
-func findMapField(v reflect.Value, field string) reflect.Value {
- tyKey := v.Type().Key()
- if v.IsNil() || tyKey.Kind() != reflect.String {
- return reflect.Value{}
- }
- return v.MapIndex(reflect.ValueOf(field).Convert(tyKey))
-}
-
-// Set assigns the value associated with a subfield of an object.
-// If the field has type storage.ID, then id is stored instead of xval.
-//
-// Here are the possible cases:
-//
-// 1. SetFailedNotFound if the operation failed because the path doesn't exist.
-//
-// 2. SetFailedWrongType if the operation failed because the value has the wrong type.
-//
-// 3. SetAsValue if the operation was successful, and the value xval was
-// stored. The returned storage.ID is null.
-//
-// 4. SetAsId if the operation was successful, but the type of the field is
-// storage.ID and xval does not have type storage.ID. In this case, the value
-// xval is not stored; the storage.ID is returned instead. If the field does
-// not already exist, a new storage.ID is created (and returned).
-//
-// The setAsID case means that the value xval is to be stored as a separate
-// value in the store, not as a subfield of the current value.
-//
-// As a special case, if the field type is storage.ID, and xval has type storage.ID,
-// then it is case #2, setAsValue. The returned storage.ID is zero.
-func Set(v reflect.Value, name string, xval interface{}) (SetResult, storage.ID) {
- v = followPointers(v)
- if !v.IsValid() {
- return SetFailedNotFound, nullID
- }
- switch v.Type().Kind() {
- case reflect.Map:
- return setMapField(v, name, xval)
- case reflect.Array, reflect.Slice:
- return setSliceField(v, name, xval)
- case reflect.Struct:
- return setStructField(v, name, xval)
- default:
- return SetFailedNotFound, nullID
- }
-}
-
-func setMapField(v reflect.Value, name string, xval interface{}) (SetResult, storage.ID) {
- tyV := v.Type()
- tyKey := tyV.Key()
- if tyKey.Kind() != reflect.String {
- return SetFailedNotFound, nullID
- }
- key := reflect.ValueOf(name).Convert(tyKey)
- r, x, id := coerceValue(tyV.Elem(), v.MapIndex(key), xval)
- if r == SetFailedWrongType {
- return SetFailedWrongType, nullID
- }
- v.SetMapIndex(key, x)
- return r, id
-}
-
-func setSliceField(v reflect.Value, field string, xval interface{}) (SetResult, storage.ID) {
- if field == SliceAppendSuffix {
- r, x, id := coerceValue(v.Type().Elem(), nullValue, xval)
- if r == SetFailedWrongType {
- return SetFailedWrongType, nullID
- }
- // This can panic if v is not settable. It is a requirement that users of this method
- // ensure that it is settable.
- v.Set(reflect.Append(v, x))
- return r, id
- }
- l := v.Len()
- i, err := strconv.Atoi(field)
- if err != nil || i < 0 || i >= l {
- return SetFailedNotFound, nullID
- }
- r, x, id := coerceValue(v.Type().Elem(), v.Index(i), xval)
- if r == SetFailedWrongType {
- return SetFailedWrongType, nullID
- }
- v.Index(i).Set(x)
- return r, id
-}
-
-func setStructField(v reflect.Value, name string, xval interface{}) (SetResult, storage.ID) {
- field, found := v.Type().FieldByName(name)
- if !found {
- return SetFailedNotFound, nullID
- }
- fieldVal := v.FieldByName(name)
- r, x, id := coerceValue(field.Type, fieldVal, xval)
- if r == SetFailedWrongType {
- return SetFailedWrongType, nullID
- }
- fieldVal.Set(x)
- return r, id
-}
-
-func coerceValue(ty reflect.Type, prev reflect.Value, xval interface{}) (SetResult, reflect.Value, storage.ID) {
- x := reflect.ValueOf(xval)
- switch {
- case ty == tyID:
- if x.Type() == tyID {
- return SetAsValue, x, xval.(storage.ID)
- }
- var id storage.ID
- if prev.IsValid() {
- var ok bool
- if id, ok = prev.Interface().(storage.ID); !ok {
- return SetFailedWrongType, nullValue, nullID
- }
- } else {
- id = storage.NewID()
- }
- return SetAsID, reflect.ValueOf(id), id
- case x.Type().AssignableTo(ty):
- return SetAsValue, x, nullID
- case x.Type().ConvertibleTo(ty):
- return SetAsValue, x.Convert(ty), nullID
- default:
- return SetFailedWrongType, nullValue, nullID
- }
-}
-
-// Remove removes a field associated with a path.
-// Return the old value and true iff the update succeeded.
-func Remove(v reflect.Value, name string) bool {
- v = followPointers(v)
- if !v.IsValid() || v.Type().Kind() != reflect.Map || v.IsNil() {
- return false
- }
- return removeMapField(v, name)
-}
-
-func removeMapField(v reflect.Value, name string) bool {
- // TODO(jyh): Also handle cases where field is a primitive scalar type like
- // int or bool.
- tyKey := v.Type().Key()
- if tyKey.Kind() != reflect.String {
- return false
- }
- v.SetMapIndex(reflect.ValueOf(name).Convert(tyKey), reflect.Value{})
- return true
-}
diff --git a/services/store/memstore/field/reflect_test.go b/services/store/memstore/field/reflect_test.go
deleted file mode 100644
index 5c85253..0000000
--- a/services/store/memstore/field/reflect_test.go
+++ /dev/null
@@ -1,382 +0,0 @@
-package field_test
-
-import (
- "reflect"
- "testing"
-
- _ "veyron/lib/testutil"
- "veyron/services/store/memstore/field"
-
- "veyron2/storage"
-)
-
-type A struct {
- B int
- C []int
- D map[string]int
-
- E storage.ID
- F []storage.ID
- G map[string]storage.ID
-}
-
-type V struct {
- UID storage.ID
-}
-
-func pathEq(p1, p2 storage.PathName) bool {
- if len(p1) != len(p2) {
- return false
- }
- for i, x := range p1 {
- if p2[i] != x {
- return false
- }
- }
- return true
-}
-
-func TestGetField(t *testing.T) {
- v := &A{
- B: 5,
- C: []int{6, 7},
- D: map[string]int{"a": 8, "b": 9},
- E: storage.NewID(),
- F: []storage.ID{storage.NewID()},
- G: map[string]storage.ID{"a": storage.NewID()},
- }
-
- // Identity.
- x, s := field.Get(v, storage.PathName{})
- if x.Interface() != v || !pathEq(storage.PathName{}, s) {
- t.Errorf("Identity failed: %v", s)
- }
-
- // B field.
- x, s = field.Get(v, storage.PathName{"B"})
- if x.Interface() != 5 || !pathEq(storage.PathName{}, s) {
- t.Errorf("Expected 5, got %v, suffix=%v", x.Interface(), s)
- }
-
- // C field.
- x, s = field.Get(v, storage.PathName{"C"})
- if !pathEq(storage.PathName{}, s) {
- t.Errorf("Failed to get C: %v")
- }
- {
- y, ok := x.Interface().([]int)
- if !ok || len(y) != 2 || y[0] != 6 || y[1] != 7 {
- t.Errorf("C has the wrong value: %v", x)
- }
- }
- x, s = field.Get(v, storage.PathName{"C", "0"})
- if x.Interface() != 6 || !pathEq(storage.PathName{}, s) {
- t.Errorf("Expected 6, got %v, %v", x, s)
- }
- x, s = field.Get(v, storage.PathName{"C", "1"})
- if x.Interface() != 7 || !pathEq(storage.PathName{}, s) {
- t.Errorf("Expected 7, got %v, %v", x, s)
- }
- x, s = field.Get(v, storage.PathName{"C", "2"})
- if !pathEq(storage.PathName{"2"}, s) {
- t.Errorf("Expected %v, got %v", storage.PathName{"2"}, s)
- }
- {
- y, ok := x.Interface().([]int)
- if !ok || len(y) != 2 || y[0] != 6 || y[1] != 7 {
- t.Errorf("C has the wrong value: %v", x)
- }
- }
-
- // D field.
- x, s = field.Get(v, storage.PathName{"D"})
- if !pathEq(storage.PathName{}, s) {
- t.Errorf("Failed to get D")
- }
- {
- y, ok := x.Interface().(map[string]int)
- if !ok || len(y) != 2 || y["a"] != 8 || y["b"] != 9 {
- t.Errorf("Bad value: %v", y)
- }
- }
- x, s = field.Get(v, storage.PathName{"D", "a"})
- if x.Interface() != 8 || !pathEq(storage.PathName{}, s) {
- t.Errorf("Expected 8, got %v", x)
- }
- x, s = field.Get(v, storage.PathName{"D", "b"})
- if x.Interface() != 9 || !pathEq(storage.PathName{}, s) {
- t.Errorf("Expected 9, got %v", x)
- }
- x, s = field.Get(v, storage.PathName{"D", "c"})
- if !pathEq(storage.PathName{"c"}, s) {
- t.Errorf("Expected %v, got %v", storage.PathName{"c"}, s)
- }
- {
- y, ok := x.Interface().(map[string]int)
- if !ok || len(y) != 2 || y["a"] != 8 || y["b"] != 9 {
- t.Errorf("Bad value: %v", y)
- }
- }
-
- // E field.
- x, s = field.Get(v, storage.PathName{"E"})
- if x.Interface() != v.E || !pathEq(storage.PathName{}, s) {
- t.Errorf("Failed to get E: %v", x.Interface())
- }
- x, s = field.Get(v, storage.PathName{"E", "a", "b", "c"})
- if x.Interface() != v.E || !pathEq(storage.PathName{"a", "b", "c"}, s) {
- t.Errorf("Failed to get E: %v, %v", x.Interface(), s)
- }
-}
-
-func TestSetSliceField(t *testing.T) {
- v := &[]string{"a", "b", "c"}
- rv := reflect.ValueOf(v)
-
- // Test simple get and set.
- b, _ := field.Get(v, storage.PathName{"1"})
- if "b" != b.Interface() {
- t.Errorf(`Expected "b", got %v`, b.Interface())
- }
- if ok, _ := field.Set(rv, "1", "other"); ok != field.SetAsValue {
- t.Errorf("field.Set failed on slice: %v", ok)
- }
- b, _ = field.Get(v, storage.PathName{"1"})
- if "other" != b.Interface() {
- t.Errorf(`Expected "a", got %v`, b.Interface())
- }
-
- // Test get on a non-existant field.
- ne, _ := field.Get(v, storage.PathName{"4"})
- if ne.Kind() != reflect.Slice {
- t.Errorf("Expected to get a top level slice, got: %v", ne.Interface())
- }
- ne, _ = field.Get(v, storage.PathName{"-1"})
- if ne.Kind() != reflect.Slice {
- t.Errorf("Expected to get a top level slice, got: %v", ne.Interface())
- }
- nepath := storage.PathName{"X"}
- ne, s := field.Get(v, nepath)
- if ne.Kind() != reflect.Slice {
- t.Errorf("Expected to get a top level slice, got: %v", ne.Interface())
- }
- if !reflect.DeepEqual(s, nepath) {
- t.Errorf("Expected path %v, got %v.", nepath, s)
- }
-
- // Test adding a value.
- if ok, _ := field.Set(rv, "@", "AppendedVal"); ok != field.SetAsValue {
- t.Errorf("Expected to succeed in appending value: %v", ok)
- }
- appended, _ := field.Get(v, storage.PathName{"3"})
- if "AppendedVal" != appended.Interface() {
- t.Errorf(`Expected "AppendedVal", got %v`, appended.Interface())
- }
-
- // Test set of an incompatible value fails.
- if ok, _ := field.Set(rv, "1", true); ok == field.SetAsValue {
- t.Errorf("Expected field.Set to fail when an incompatible value is being set.")
- }
-}
-
-func TestSetEmptySliceField(t *testing.T) {
- v := &[]string{}
- rv := reflect.ValueOf(v)
-
- ne, _ := field.Get(v, storage.PathName{"0"})
- if ne.Kind() != reflect.Slice {
- t.Errorf("Expected to get a top level slice, got: %v", ne.Interface())
- }
- if ok, _ := field.Set(rv, "0", "a"); ok == field.SetAsValue {
- t.Errorf("Expected field.Set to fail")
- }
-}
-
-func TestSetMapField(t *testing.T) {
- v := &map[string]string{
- "A": "a",
- "B": "b",
- }
- rv := reflect.ValueOf(v)
-
- // Test simple get and set.
- a, _ := field.Get(v, storage.PathName{"A"})
- if "a" != a.Interface() {
- t.Errorf(`Expected "a", got %v`, a.Interface())
- }
- if ok, _ := field.Set(rv, "A", "other"); ok != field.SetAsValue {
- t.Errorf("field.Set failed on map: %v", ok)
- }
- a, _ = field.Get(v, storage.PathName{"A"})
- if "other" != a.Interface() {
- t.Errorf(`Expected "a", got %v`, a.Interface())
- }
-
- // Test get on a non-existant field.
- nepath := storage.PathName{"NonExistant"}
- ne, s := field.Get(v, nepath)
- if !reflect.DeepEqual(s, nepath) {
- t.Errorf("Expected path %v, got %v.", nepath, s)
- }
- if ne.Kind() != reflect.Map {
- t.Errorf("Expected to get a top level map, got: %v", ne.Interface())
- }
-
- // Test that set on a non-existant field adds the field.
- if ok, _ := field.Set(rv, "C", "c"); ok != field.SetAsValue {
- t.Errorf("Expected field.Set to succeed: %v", ok)
- }
- c, _ := field.Get(v, storage.PathName{"C"})
- if "c" != c.Interface() {
- t.Errorf(`Expected "c", got %v`, c.Interface())
- }
-
- // Test set of an incompatible value fails.
- if ok, _ := field.Set(rv, "A", true); ok == field.SetAsValue {
- t.Errorf("Expected field.Set to fail when an incompatible value is being set.")
- }
-}
-
-func TestSetEmptyMapField(t *testing.T) {
- v := &map[string]interface{}{}
- rv := reflect.ValueOf(v)
-
- ne, _ := field.Get(v, storage.PathName{"A"})
- if ne.Kind() != reflect.Map {
- t.Errorf("Expected to get a top level map, got: %v", ne.Interface())
- }
- if ok, _ := field.Set(rv, "A", "a"); ok != field.SetAsValue {
- t.Errorf("Expected field.Set to succeed: %v", ok)
- }
- a, _ := field.Get(v, storage.PathName{"A"})
- if "a" != a.Interface() {
- t.Errorf(`Expected "a", got %v`, a.Interface())
- }
-}
-
-func TestSetStructField(t *testing.T) {
- a := &A{
- B: 5,
- C: []int{6, 7},
- D: map[string]int{"a": 8, "b": 9},
- E: storage.NewID(),
- F: []storage.ID{storage.NewID()},
- G: map[string]storage.ID{"a": storage.NewID()},
- }
- v := reflect.ValueOf(a)
-
- // B field.
- x, _ := field.Get(a, storage.PathName{"B"})
- if x.Interface() != 5 {
- t.Errorf("Expected 5, got %v", x)
- }
- if ok, _ := field.Set(v, "B", 15); ok != field.SetAsValue {
- t.Errorf("field.Set failed: %v", ok)
- }
- x, _ = field.Get(a, storage.PathName{"B"})
- if x.Interface() != 15 {
- t.Errorf("Expected 15, got %v", x)
- }
-
- // C field.
- if ok, _ := field.Set(v, "C", []int{7}); ok != field.SetAsValue {
- t.Errorf("Failed to set C: %v", ok)
- }
- x, _ = field.Get(a, storage.PathName{"C", "0"})
- if x.Interface() != 7 {
- t.Errorf("Expected 6, got %v", x)
- }
-
- p, _ := field.Get(a, storage.PathName{"C"})
- if ok, _ := field.Set(p, "0", 8); ok != field.SetAsValue {
- t.Errorf("Failed to set C: %v", ok)
- }
- x, _ = field.Get(a, storage.PathName{"C", "0"})
- if x.Interface() != 8 {
- t.Errorf("Expected 8, got %v", x)
- }
-
- p, _ = field.Get(a, storage.PathName{"C"})
- if ok, _ := field.Set(p, "@", 9); ok != field.SetAsValue {
- t.Errorf("Failed to set C")
- }
- x, _ = field.Get(a, storage.PathName{"C", "1"})
- if x.Interface() != 9 {
- t.Errorf("Expected 9, got %v", x)
- }
-
- // D field.
- if ok, _ := field.Set(v, "D", map[string]int{"a": 1}); ok != field.SetAsValue {
- t.Errorf("Failed to set D")
- }
- x, _ = field.Get(a, storage.PathName{"D", "a"})
- if x.Interface() != 1 {
- t.Errorf("Expected 1, got %v", x)
- }
-
- p, _ = field.Get(a, storage.PathName{"D"})
- if ok, _ := field.Set(p, "a", 2); ok != field.SetAsValue {
- t.Errorf("Failed to set D")
- }
- x, _ = field.Get(a, storage.PathName{"D", "a"})
- if x.Interface() != 2 {
- t.Errorf("Expected 2, got %v", x)
- }
-
- // E field.
- id := storage.NewID()
- ok, id2 := field.Set(v, "E", id)
- if ok != field.SetAsValue || id2 != id {
- t.Errorf("Failed to set E: %b, %s/%s", ok, id, id2)
- }
-
- // F field.
- p, _ = field.Get(a, storage.PathName{"F"})
- ok, fid := field.Set(p, "0", "fail")
- if ok != field.SetAsID {
- t.Errorf("Failed to set F: %v", x)
- }
- x, _ = field.Get(a, storage.PathName{"F", "0"})
- if x.Interface() != fid {
- t.Errorf("Expected %v, got %v", id, x.Interface())
- }
-
- // G field.
- p, _ = field.Get(a, storage.PathName{"G"})
- ok, fid = field.Set(p, "key", "fail")
- if ok != field.SetAsID {
- t.Errorf("Failed to set G")
- }
- x, _ = field.Get(a, storage.PathName{"G", "key"})
- if x.Interface() != fid {
- t.Errorf("Expected %v, got %v", id, x)
- }
-}
-
-func TestRemoveField(t *testing.T) {
- a := &A{
- B: 5,
- C: []int{6, 7},
- D: map[string]int{"a": 8, "b": 9},
- E: storage.NewID(),
- F: []storage.ID{storage.NewID()},
- G: map[string]storage.ID{"a": storage.NewID()},
- }
- v := reflect.ValueOf(a)
-
- if field.Remove(v, "B") {
- t.Errorf("Unexpected success")
- }
- p, _ := field.Get(a, storage.PathName{"C"})
- if field.Remove(p, "0") {
- t.Errorf("Unexpected success")
- }
- p, _ = field.Get(a, storage.PathName{"D"})
- if !field.Remove(p, "a") {
- t.Errorf("Unexpected failure")
- }
- x, s := field.Get(a, storage.PathName{"D", "a"})
- if !pathEq(storage.PathName{"a"}, s) {
- t.Errorf("Unexpected value: %v", x)
- }
-}
diff --git a/services/store/memstore/log.go b/services/store/memstore/log.go
deleted file mode 100644
index 3b69326..0000000
--- a/services/store/memstore/log.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package memstore
-
-// A log consist of:
-// 1. A header.
-// 2. A state snapshot.
-// 3. A sequence of transactions.
-//
-// The header includes information about the log version, the size of the state
-// snapshot, the root storage.ID, etc. The snapshot is a sequence of entries for
-// each of the values in the state. A transaction is a *mutations object.
-//
-// There are separate interfaces for reading writing; *wlog is used for writing,
-// and *rlog is used for reading.
-import (
- "io"
- "os"
- "path"
- "sync"
- "time"
-
- "veyron/runtimes/google/lib/follow"
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/verror"
- "veyron2/vom"
-)
-
-const (
- logfileName = "storage.log"
- // TODO(tilaks): determine correct permissions for the logs.
- dirPerm = os.FileMode(0700)
- filePerm = os.FileMode(0600)
-)
-
-var (
- errLogIsClosed = verror.Abortedf("log is closed")
-)
-
-// wlog is the type of log writers.
-type wlog struct {
- file *os.File
- enc *vom.Encoder
-}
-
-// RLog is the type of log readers.
-type RLog struct {
- mu sync.Mutex
- closed bool // GUARDED_BY(mu)
- reader io.ReadCloser
- dec *vom.Decoder
-}
-
-// createLog creates a log writer. dbName is the path of the database directory,
-// to which transaction logs are written.
-func createLog(dbName string) (*wlog, error) {
- // Create the log file at the default path in the database directory.
- filePath := path.Join(dbName, logfileName)
- if err := os.MkdirAll(dbName, dirPerm); err != nil {
- return nil, err
- }
- file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, filePerm)
- if err != nil {
- return nil, err
- }
- return &wlog{
- file: file,
- enc: vom.NewEncoder(file),
- }, nil
-}
-
-// close closes the log file.
-func (l *wlog) close() {
- l.file.Close()
- l.file = nil
- l.enc = nil
-}
-
-// writeState writes the initial state.
-func (l *wlog) writeState(st *Store) error {
- // If writeState returns a nil error, the caller should assume that
- // future reads from the log file will discover the new state.
- // Therefore we commit the log file's new content to stable storage.
- // Note: l.enc does not buffer output, and doesn't need to be flushed.
- if l.file == nil {
- return errLogIsClosed
- }
- return st.State.Write(l.enc)
-}
-
-// appendTransaction adds a transaction to the end of the log.
-func (l *wlog) appendTransaction(m *state.Mutations) error {
- // If appendTransaction returns a nil error, the caller should assume that
- // future reads from the log file will discover the new transaction.
- // Therefore we commit the log file's new content to stable storage.
- // Note: l.enc does not buffer output, and doesn't need to be flushed.
- if l.file == nil {
- return errLogIsClosed
- }
- return l.enc.Encode(m)
-}
-
-// OpenLog opens a log for reading. dbName is the path of the database directory.
-// If followLog is true, reads block until records can be read. Otherwise,
-// reads return EOF when no record can be read.
-func OpenLog(dbName string, followLog bool) (*RLog, error) {
- // Open the log file at the default path in the database directory.
- filePath := path.Join(dbName, logfileName)
- var reader io.ReadCloser
- var err error
- if followLog {
- reader, err = follow.NewReader(filePath)
- } else {
- reader, err = os.Open(filePath)
- }
- if err != nil {
- return nil, err
- }
- return &RLog{
- reader: reader,
- dec: vom.NewDecoder(reader),
- }, nil
-}
-
-// Close closes the log. If Close is called concurrently with ReadState or
-// ReadTransaction, ongoing reads will terminate. Close is idempotent.
-func (l *RLog) Close() {
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.closed {
- return
- }
- l.closed = true
- l.reader.Close()
-}
-
-func (l *RLog) isClosed() bool {
- l.mu.Lock()
- defer l.mu.Unlock()
- return l.closed
-}
-
-// ReadState reads the initial state state. ReadState returns an error if the
-// log is closed before or during the read. ReadState should not be invoked
-// concurrently with other reads.
-func (l *RLog) ReadState(adminID security.PublicID) (*Store, error) {
- if l.isClosed() {
- return nil, errLogIsClosed
- }
-
- // Create the state struct.
- st, err := New(adminID, "")
- if err != nil {
- return nil, err
- }
-
- // Create the state without refcounts.
- if err := st.State.Read(l.dec); err != nil {
- return nil, err
- }
-
- return st, nil
-}
-
-// ReadTransaction reads a transaction entry from the log. ReadTransaction
-// returns an error if the log is closed before or during the read.
-// ReadTransaction should not be invoked concurrently with other reads.
-func (l *RLog) ReadTransaction() (*state.Mutations, error) {
- if l.isClosed() {
- return nil, errLogIsClosed
- }
-
- var ms state.Mutations
- if err := l.dec.Decode(&ms); err != nil {
- return nil, err
- }
- for _, m := range ms.Delta {
- m.UpdateRefs()
- }
- return &ms, nil
-}
-
-// backup the log file.
-func backupLog(dbName string) error {
- srcPath := path.Join(dbName, logfileName)
- dstPath := srcPath + "." + time.Now().Format(time.RFC3339)
- return os.Rename(srcPath, dstPath)
-}
-
-// openDB opens the log file if it exists. dbName is the path of the database
-// directory. If followLog is true, reads block until records can be read.
-// Otherwise, reads return EOF when no record can be read.
-func openDB(dbName string, followLog bool) (*RLog, error) {
- if dbName == "" {
- return nil, nil
- }
- rlog, err := OpenLog(dbName, followLog)
- if err != nil && os.IsNotExist(err) {
- // It is not an error for the log not to exist.
- err = nil
- }
- return rlog, err
-}
-
-// readAndCloseDB reads the state from the log file.
-func readAndCloseDB(admin security.PublicID, rlog *RLog) (*Store, error) {
- defer rlog.Close()
- st, err := rlog.ReadState(admin)
- if err != nil {
- return nil, err
- }
- for {
- mu, err := rlog.ReadTransaction()
- if err == io.EOF {
- break
- }
- if err != nil {
- return nil, err
- }
- if err := st.ApplyMutations(mu); err != nil {
- return nil, err
- }
- }
- return st, nil
-}
diff --git a/services/store/memstore/log_test.go b/services/store/memstore/log_test.go
deleted file mode 100644
index 923e27a..0000000
--- a/services/store/memstore/log_test.go
+++ /dev/null
@@ -1,291 +0,0 @@
-package memstore
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "runtime"
- "testing"
-
- "veyron/services/store/memstore/refs"
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/storage"
- "veyron2/vom"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
-)
-
-type Dir struct {
- Entries map[string]storage.ID
-}
-
-type Data struct {
- Comment string
-}
-
-func init() {
- vom.Register(&Data{})
-}
-
-func newData(c string) *Data {
- return &Data{Comment: c}
-}
-
-func expectEqDir(t *testing.T, file string, line int, d1, d2 *Dir) {
- for name, id1 := range d1.Entries {
- id2, ok := d2.Entries[name]
- if !ok {
- t.Errorf("%s(%d): does not exist: %s", file, line, name)
- } else if id2 != id1 {
- t.Errorf("%s(%d): expected ID %s, got %s", file, line, id1, id2)
- }
- }
- for name, _ := range d2.Entries {
- _, ok := d1.Entries[name]
- if !ok {
- t.Errorf("%s(%d): should not exist: %s", file, line, name)
- }
- }
-}
-
-func expectEqData(t *testing.T, file string, line int, d1, d2 *Data) {
- if d1.Comment != d2.Comment {
- t.Errorf("%s(%d): expected %q, got %q", d1.Comment, d2.Comment)
- }
-}
-
-// expectEqValues compares two items. They are equal only if they have the same
-// type, and their contents are equal.
-func expectEqValues(t *testing.T, file string, line int, v1, v2 interface{}) {
- switch x1 := v1.(type) {
- case *Dir:
- x2, ok := v2.(*Dir)
- if !ok {
- t.Errorf("%s(%d): not a Dir: %v", file, line, v2)
- } else {
- expectEqDir(t, file, line, x1, x2)
- }
- case *Data:
- x2, ok := v2.(*Data)
- if !ok {
- t.Errorf("%s(%d): not a Data: %v", file, line, v2)
- } else {
- expectEqData(t, file, line, x1, x2)
- }
- default:
- t.Errorf("Unknown type: %T, %v", v1, v1)
- }
-}
-
-// expectEqImplicitDir compares two directories.
-func expectEqImplicitDir(t *testing.T, file string, line int, d1, d2 refs.Dir) {
- l1 := refs.FlattenDir(d1)
- l2 := refs.FlattenDir(d2)
- i1 := 0
- i2 := 0
- for i1 < len(l1) && i2 < len(l2) {
- e1 := l1[i1]
- e2 := l2[i2]
- if e1.Name == e2.Name {
- if e1.ID != e2.ID {
- t.Errorf("%s(%d): expected id %s, got %s", file, line, e1.ID, e2.ID)
- }
- i1++
- i2++
- } else if e1.Name < e2.Name {
- t.Errorf("%s(%d): missing directory %s", file, line, e1.Name)
- i1++
- } else {
- t.Errorf("%s(%d): unexpected directory %s", file, line, e2.Name)
- i2++
- }
- }
- for _, e1 := range l1[i1:] {
- t.Errorf("%s(%d): missing directory %s", file, line, e1.Name)
- }
- for _, e2 := range l2[i2:] {
- t.Errorf("%s(%d): unexpected directory %s", file, line, e2.Name)
- }
-}
-
-func readTransaction(r *RLog) (*state.Mutations, error) {
- type result struct {
- mu *state.Mutations
- err error
- }
- results := make(chan result)
- go func() {
- defer close(results)
- mu, err := r.ReadTransaction()
- results <- result{mu: mu, err: err}
- }()
- res := <-results
- return res.mu, res.err
-}
-
-// readTransactions reads and applies the transactions. Returns the last transaction read.
-func readTransactions(t *testing.T, r *RLog, st *Store, n int) *state.Mutations {
- _, file, line, _ := runtime.Caller(1)
- var m *state.Mutations
- for i := 0; i < n; i++ {
- var err error
- m, err = readTransaction(r)
- if err != nil {
- t.Errorf("%s(%d): error in readTransaction(): %s", file, line, err)
- return nil
- }
- if err := st.ApplyMutations(m); err != nil {
- t.Errorf("%s(%d): error in ApplyMutations(): %s", file, line, err)
- return nil
- }
- }
- return m
-}
-
-func TestTransaction(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- w, err := createLog(dbName)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
-
- // Create an initial state.
- st1, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("newState() failed: %v", err)
- }
- tr := &Transaction{}
- mkdir(t, st1, tr, "/")
- mkdir(t, st1, tr, "/a")
- mkdir(t, st1, tr, "/a/b")
- mkdir(t, st1, tr, "/a/b/c")
- commit(t, tr)
-
- // Write the initial state.
- defer w.close()
- w.writeState(st1)
-
- // Write some Transactions.
- var data [5]*Data
- var paths [5]string
- for i := 0; i != 5; i++ {
- name := fmt.Sprintf("data%d", i)
- tr := &Transaction{}
- data[i] = newData(name)
- paths[i] = "/a/b/c/" + name
- put(t, st1, tr, paths[i], data[i])
- commit(t, tr)
- w.appendTransaction(tr.snapshot.Mutations())
- }
-
- r, err := OpenLog(dbName, true)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- defer r.Close()
- st2, err := r.ReadState(rootPublicID)
- if err != nil {
- t.Fatalf("Can't read state: %s", err)
- }
- readTransactions(t, r, st2, 5)
-
- // Remove data3.
- {
- tr := &Transaction{}
- remove(t, st1, tr, "/a/b/c/data3")
- commit(t, tr)
- w.appendTransaction(tr.snapshot.Mutations())
- }
- readTransactions(t, r, st2, 1)
- {
- tr := &Transaction{}
- expectExists(t, st1, tr, paths[0])
- expectNotExists(t, st1, tr, paths[3])
- }
-
- // Remove all entries.
- {
- tr := &Transaction{}
- remove(t, st1, tr, "/a/b/c")
- commit(t, tr)
- w.appendTransaction(tr.snapshot.Mutations())
- }
- readTransactions(t, r, st2, 1)
- {
- tr := &Transaction{}
- for i := 0; i != 5; i++ {
- expectNotExists(t, st1, tr, paths[i])
- }
- }
-}
-
-func TestDeletions(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create an initial state.
- st1, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("newState() failed: %v", err)
- }
- tr := &Transaction{}
- mkdir(t, st1, tr, "/")
- mkdir(t, st1, tr, "/a")
- ids := make(map[string]storage.ID)
- ids["/a/b"], _ = mkdir(t, st1, tr, "/a/b")
- ids["/a/b/c"], _ = mkdir(t, st1, tr, "/a/b/c")
- ids["/a/b/d"], _ = mkdir(t, st1, tr, "/a/b/d")
- commit(t, tr)
-
- // Reconstruct the state from the log.
- r, err := OpenLog(dbName, true)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- defer r.Close()
- st2, err := r.ReadState(rootPublicID)
- if err != nil {
- t.Fatalf("Can't read state: %s", err)
- }
- readTransactions(t, r, st2, 1)
-
- // Remove b.
- {
- tr := &Transaction{}
- remove(t, st1, tr, "/a/b")
- commit(t, tr)
- }
- readTransactions(t, r, st2, 1)
- {
- tr := &Transaction{}
- expectExists(t, st1, tr, "/a")
- expectNotExists(t, st1, tr, "/a/b")
- }
-
- // Perform a GC.
- if err := st1.GC(); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- // The transaction should include deletions.
- mu := readTransactions(t, r, st2, 1)
- for name, id := range ids {
- if _, ok := mu.Deletions[id]; !ok {
- t.Errorf("Expected deletion for path %s", name)
- }
- if len(mu.Deletions) != len(ids) {
- t.Errorf("Unexpected deletion: %v", mu.Deletions)
- }
- }
-}
diff --git a/services/store/memstore/object.go b/services/store/memstore/object.go
deleted file mode 100644
index 9fb0540..0000000
--- a/services/store/memstore/object.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package memstore
-
-import (
- iquery "veyron/services/store/memstore/query"
-
- "veyron2/query"
- "veyron2/security"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-// Object is a binding to a store value. This is currently represented as a
-// path. This means that in different transactions, the object may refer to
-// different values with different store.IDs.
-type Object struct {
- path storage.PathName
- store *Store
-}
-
-// Exists returns true iff the object has a value in the current transaction.
-func (o *Object) Exists(pid security.PublicID, trans *Transaction) (bool, error) {
- tr, _, err := o.store.getTransaction(trans)
- if err != nil {
- return false, err
- }
- v, err := tr.snapshot.Get(pid, o.path)
- ok := v != nil && err == nil
- return ok, nil
-}
-
-// Get returns the value for an object.
-func (o *Object) Get(pid security.PublicID, trans *Transaction) (*storage.Entry, error) {
- tr, _, err := o.store.getTransaction(trans)
- if err != nil {
- return nil, err
- }
- return tr.snapshot.Get(pid, o.path)
-}
-
-// Put updates the value for an object.
-func (o *Object) Put(pid security.PublicID, trans *Transaction, v interface{}) (*storage.Stat, error) {
- tr, commit, err := o.store.getTransaction(trans)
- if err != nil {
- return nil, err
- }
- st, err := tr.snapshot.Put(pid, o.path, v)
- if err != nil {
- return nil, err
- }
- if commit {
- err = tr.Commit()
- }
- return st, err
-}
-
-// Remove removes the value for an object.
-func (o *Object) Remove(pid security.PublicID, trans *Transaction) error {
- tr, commit, err := o.store.getTransaction(trans)
- if err != nil {
- return err
- }
- if err := tr.snapshot.Remove(pid, o.path); err != nil {
- return err
- }
- if commit {
- return tr.Commit()
- }
- return nil
-}
-
-// Stat returns entry info.
-func (o *Object) Stat(pid security.PublicID, trans *Transaction) (*storage.Stat, error) {
- return nil, verror.Internalf("Stat not yet implemented")
-}
-
-// Query returns entries matching the given query.
-func (o *Object) Query(pid security.PublicID, trans *Transaction, q query.Query) (iquery.QueryStream, error) {
- tr, _, err := o.store.getTransaction(trans)
- if err != nil {
- return nil, err
- }
- stream := iquery.Eval(tr.snapshot.GetSnapshot(), pid, o.path, q)
- return stream, nil
-}
-
-// Glob returns names that match the given pattern.
-func (o *Object) Glob(pid security.PublicID, trans *Transaction, pattern string) (iquery.GlobStream, error) {
- tr, _, err := o.store.getTransaction(trans)
- if err != nil {
- return nil, err
- }
- return iquery.Glob(tr.snapshot.GetSnapshot(), pid, o.path, pattern)
-}
diff --git a/services/store/memstore/object_test.go b/services/store/memstore/object_test.go
deleted file mode 100644
index c5232b9..0000000
--- a/services/store/memstore/object_test.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package memstore
-
-import (
- "runtime"
- "testing"
-
- "veyron2/query"
-)
-
-func newValue() interface{} {
- return &Dir{}
-}
-
-func TestPutGetRemoveRoot(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- o := st.Bind("/")
- testGetPutRemove(t, st, o)
-}
-
-func TestPutGetRemoveChild(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- tr := NewTransaction()
- mkdir(t, st, tr, "/")
- commit(t, tr)
- o := st.Bind("/Entries/a")
- testGetPutRemove(t, st, o)
-}
-
-func testGetPutRemove(t *testing.T, st *Store, o *Object) {
- value := newValue()
-
- {
- // Check that the root object does not exist.
- tr := &Transaction{}
- if ok, err := o.Exists(rootPublicID, tr); ok && err == nil {
- t.Errorf("Should not exist")
- }
- if v, err := o.Get(rootPublicID, tr); v != nil && err == nil {
- t.Errorf("Should not exist: %v, %s", v, err)
- }
- }
-
- {
- // Add the root object.
- tr1 := &Transaction{}
- s, err := o.Put(rootPublicID, tr1, value)
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- if ok, err := o.Exists(rootPublicID, tr1); !ok || err != nil {
- t.Errorf("Should exist: %s", err)
- }
- e, err := o.Get(rootPublicID, tr1)
- if err != nil {
- t.Errorf("Object should exist: %s", err)
- }
- if e.Stat.ID != s.ID {
- t.Errorf("Expected %s, got %s", s.ID, e.Stat.ID)
- }
-
- // Transactions are isolated.
- tr2 := &Transaction{}
- if ok, err := o.Exists(rootPublicID, tr2); ok && err != nil {
- t.Errorf("Should not exist")
- }
- if v, err := o.Get(rootPublicID, tr2); v != nil && err == nil {
- t.Errorf("Should not exist: %v, %s", v, err)
- }
-
- // Apply tr1.
- if err := tr1.Commit(); err != nil {
- t.Errorf("Unexpected error")
- }
- if ok, err := o.Exists(rootPublicID, tr1); !ok || err != nil {
- t.Errorf("Should exist: %s", err)
- }
- if _, err := o.Get(rootPublicID, tr1); err != nil {
- t.Errorf("Object should exist: %s", err)
- }
-
- // tr2 is still isolated.
- if ok, err := o.Exists(rootPublicID, tr2); ok && err == nil {
- t.Errorf("Should not exist")
- }
- if v, err := o.Get(rootPublicID, tr2); v != nil && err == nil {
- t.Errorf("Should not exist: %v, %s", v, err)
- }
-
- // tr3 observes the commit.
- tr3 := &Transaction{}
- if ok, err := o.Exists(rootPublicID, tr3); !ok || err != nil {
- t.Errorf("Should exist")
- }
- if _, err := o.Get(rootPublicID, tr3); err != nil {
- t.Errorf("Object should exist: %s", err)
- }
- }
-
- {
- // Remove the root object.
- tr1 := &Transaction{}
- if err := o.Remove(rootPublicID, tr1); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- if ok, err := o.Exists(rootPublicID, tr1); ok && err != nil {
- t.Errorf("Should not exist")
- }
- if v, err := o.Get(rootPublicID, tr1); v != nil || err == nil {
- t.Errorf("Object should exist: %v", v)
- }
-
- // The removal is isolated.
- tr2 := &Transaction{}
- if ok, err := o.Exists(rootPublicID, tr2); !ok || err != nil {
- t.Errorf("Should exist")
- }
- if _, err := o.Get(rootPublicID, tr2); err != nil {
- t.Errorf("Object should exist: %s", err)
- }
-
- // Apply tr1.
- if err := tr1.Commit(); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- if ok, err := o.Exists(rootPublicID, tr1); ok && err == nil {
- t.Errorf("Should not exist")
- }
- if v, err := o.Get(rootPublicID, tr1); v != nil || err == nil {
- t.Errorf("Object should exist: %v", v)
- }
-
- // The removal is isolated.
- if ok, err := o.Exists(rootPublicID, tr2); !ok || err != nil {
- t.Errorf("Should exist: %s", err)
- }
- if _, err := o.Get(rootPublicID, tr2); err != nil {
- t.Errorf("Object should exist: %s", err)
- }
- }
-
- {
- // Check that the root object does not exist.
- tr1 := &Transaction{}
- if ok, err := o.Exists(rootPublicID, tr1); ok && err == nil {
- t.Errorf("Should not exist")
- }
- if v, err := o.Get(rootPublicID, tr1); v != nil && err == nil {
- t.Errorf("Should not exist: %v, %s", v, err)
- }
- }
-}
-
-func TestConcurrentOK(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- {
- tr := NewTransaction()
- mkdir(t, st, tr, "/")
- mkdir(t, st, tr, "/Entries/a")
- mkdir(t, st, tr, "/Entries/b")
- commit(t, tr)
- }
-
- o1 := st.Bind("/Entries/a/Entries/c")
- tr1 := &Transaction{}
- v1 := newValue()
- s1, err := o1.Put(rootPublicID, tr1, v1)
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
-
- o2 := st.Bind("/Entries/b/Entries/d")
- tr2 := &Transaction{}
- v2 := newValue()
- s2, err := o2.Put(rootPublicID, tr2, v2)
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
-
- if err := tr1.Commit(); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- if err := tr2.Commit(); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
-
- tr3 := &Transaction{}
- if x, err := o1.Get(rootPublicID, tr3); err != nil || x.Stat.ID != s1.ID {
- t.Errorf("Value should exist: %v, %s", x, err)
- }
- if x, err := o2.Get(rootPublicID, tr3); err != nil || x.Stat.ID != s2.ID {
- t.Errorf("Value should exist: %v, %s", x, err)
- }
-}
-
-func TestQuery(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- {
- tr := NewTransaction()
- mkdir(t, st, tr, "/")
- mkdir(t, st, tr, "/Entries/a")
- mkdir(t, st, tr, "/Entries/b")
- commit(t, tr)
- }
-
- o1 := st.Bind("/Entries")
- tr1 := &Transaction{}
-
- stream, err := o1.Query(rootPublicID, tr1, query.Query{"*"})
- results := map[string]bool{}
- expected := []string{"", "a", "b"}
- for stream.Next() {
- results[stream.Get().Name] = true
- }
- if nresults, nexpected := len(results), len(expected); nresults != nexpected {
- t.Errorf("Unexpected number of query results. Want %d, got %d. %v",
- nexpected, nresults, results)
- }
- for _, expect := range expected {
- if !results[expect] {
- t.Errorf("Missing query result %s in %v.", expect, results)
- }
- }
-}
-
-func TestConcurrentConflict(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- {
- tr := NewTransaction()
- mkdir(t, st, tr, "/")
- mkdir(t, st, tr, "/Entries/a")
- commit(t, tr)
- }
-
- o := st.Bind("/Entries/a/Entries/c")
- tr1 := &Transaction{}
- v1 := newValue()
- s1, err := o.Put(rootPublicID, tr1, v1)
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
-
- tr2 := &Transaction{}
- v2 := newValue()
- if _, err = o.Put(rootPublicID, tr2, v2); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
-
- if err := tr1.Commit(); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- if err := tr2.Commit(); err == nil || err.Error() != "precondition failed" {
- t.Errorf("Expected precondition failed, got %q", err)
- }
-
- tr3 := &Transaction{}
- if x, err := o.Get(rootPublicID, tr3); err != nil || x.Stat.ID != s1.ID {
- t.Errorf("Value should exist: %v, %s", x, err)
- }
-}
-
-type Foo struct{}
-
-func newFoo() *Foo {
- return &Foo{}
-}
-
-func getFoo(t *testing.T, st *Store, tr *Transaction, path string) *Foo {
- _, file, line, _ := runtime.Caller(1)
- v := get(t, st, tr, path)
- res, ok := v.(*Foo)
- if !ok {
- t.Fatalf("%s(%d): %s: not a Foo: %v", file, line, path, v)
- }
- return res
-}
-
-func TestSimpleMove(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- // Create / and /x.
- {
- tr := &Transaction{}
- put(t, st, tr, "/", newFoo())
- put(t, st, tr, "/x", newFoo())
- commit(t, tr)
- }
-
- // Move /x to /y.
- {
- tr := &Transaction{}
- x := getFoo(t, st, tr, "/x")
- remove(t, st, tr, "/x")
- put(t, st, tr, "/y", x)
- commit(t, tr)
- }
-}
-
-// Test a path conflict where some directory along the path to a value has been
-// mutated concurrently.
-func TestPathConflict(t *testing.T) {
- st, err := New(rootPublicID, "")
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- {
- tr := &Transaction{}
- put(t, st, tr, "/", newFoo())
- put(t, st, tr, "/a", newFoo())
- put(t, st, tr, "/a/b", newFoo())
- put(t, st, tr, "/a/b/c", newFoo())
- commit(t, tr)
- }
-
- // Add a new value.
- tr1 := &Transaction{}
- put(t, st, tr1, "/a/b/c/d", newFoo())
-
- // Change a directory along the path.
- tr2 := &Transaction{}
- put(t, st, tr2, "/a/b", newFoo())
- commit(t, tr2)
-
- // First Transaction should abort.
- if err := tr1.Commit(); err == nil {
- t.Errorf("Expected transaction to abort")
- }
-}
diff --git a/services/store/memstore/pathregex/doc.go b/services/store/memstore/pathregex/doc.go
deleted file mode 100644
index 3098f38..0000000
--- a/services/store/memstore/pathregex/doc.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Package pathregex implements path regular expressions. These are like glob
-// patterns, with the following kinds of expressions.
-//
-// Each component of a path is specified using a glob expression, where:
-// * matches any sequence of characters.
-// ? matches a single character.
-// [...] matches any single character in a range, where the ranges include:
-// c: matches the single character c
-// c1-c2: matches all characters in the range { c1,...,c2 }
-// ]: can be specified as the first character in the range
-// ^: a leading ^ inverts the range
-// {r1,r2,...,rN} matches r1 OR r1 OR ... OR rN.
-//
-// A path regular expression composes a path.
-//
-// R ::= r // a single component glob expression
-// | ... // matches any path
-// | R1 / R2 // sequential composition
-// | { R1, R2, ..., RN } // alternation, R1 OR R2 OR ... OR RN
-//
-// Examples:
-//
-// x.jpg - matches a path with one component x.jpg.
-// a/b - matches a two component path.
-// .../a/b/... - matches a path containing an a/b somewhere within it.
-// .../[abc]/... - matches a path containing an "a" or "b" or "c" component.
-// .../{a,b,c}/... - same as above.
-// {.../a/...,.../b/...,.../c/...} - same as above.
-//
-// The usage is to compile a path expression to a finite automaton, then execute
-// the automaton.
-//
-// ss, err := pathregex.Compile(".../{a,b/c,d/e/f}/...")
-// ss = ss.Step("x") // Match against x/d/e/f/y.
-// ss = ss.Step("d")
-// ss = ss.Step("e")
-// ss = ss.Step("f")
-// ss = ss.Step("y")
-// if ss.IsFinal() { log.Printf("Matched") }
-package pathregex
diff --git a/services/store/memstore/pathregex/nfa.go b/services/store/memstore/pathregex/nfa.go
deleted file mode 100644
index 80b0f3d..0000000
--- a/services/store/memstore/pathregex/nfa.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package pathregex
-
-import (
- "regexp"
- "sort"
- "unsafe"
-)
-
-// state is a state of the NFA.
-type state struct {
- // isFinal is true iff the state is a final state.
- isFinal bool
-
- // isClosed is true iff the epsilon transitions are transitively closed.
- isClosed bool
-
- // trans is a non-epsilon transitions, or nil.
- trans *transition
-
- // epsilon are epsilon transitions, meaning the automation can take a
- // state transition without reading any input.
- epsilon hashStateSet
-}
-
-// transition represents a non-empty transition. If the current input symbol matches the
-// regular expression, the NFA can take a transition to the next states.
-type transition struct {
- re *regexp.Regexp // nil means accept anything.
- next hashStateSet
-}
-
-// hashStateSet represents a set of states using a map.
-type hashStateSet map[*state]struct{}
-
-// StateSet represents a set of states of the NFA as a sorted slice of state pointers.
-type StateSet []*state
-
-// byPointer is used to sort the pointers in StateSet.
-type byPointer StateSet
-
-// tt accepts anything.
-func (r tt) compile(final *state) *state {
- t := &transition{next: hashStateSet{final: struct{}{}}}
- return &state{trans: t, epsilon: make(hashStateSet)}
-}
-
-// ff has no transititons.
-func (r *ff) compile(final *state) *state {
- return &state{epsilon: make(hashStateSet)}
-}
-
-// epsilon doesn't require a transition.
-func (r *epsilon) compile(final *state) *state {
- return final
-}
-
-// single has a single transition from initial to final state.
-func (r *single) compile(final *state) *state {
- t := &transition{re: r.r, next: hashStateSet{final: struct{}{}}}
- return &state{trans: t, epsilon: make(hashStateSet)}
-}
-
-// sequence composes the automata.
-func (r *sequence) compile(final *state) *state {
- return r.r1.compile(r.r2.compile(final))
-}
-
-// alt constructs the separate automata, then defines a new start state that
-// includes epsilon transitions to the two separate automata.
-func (r *alt) compile(final *state) *state {
- s1 := r.r1.compile(final)
- s2 := r.r2.compile(final)
- return &state{epsilon: hashStateSet{s1: struct{}{}, s2: struct{}{}}}
-}
-
-// star contains a loop to accepts 0-or-more occurrences of r.re. There is an
-// epsilon transition from the start state s1 to the final state (for 0
-// occurrences), and a back epsilon-transition for 1-or-more occurrences.
-func (r *star) compile(final *state) *state {
- s2 := &state{epsilon: make(hashStateSet)}
- s1 := r.re.compile(s2)
- s2.epsilon = hashStateSet{s1: struct{}{}, final: struct{}{}}
- s1.epsilon[s2] = struct{}{}
- return s1
-}
-
-// close takes the transitive closure of the epsilon transitions.
-func (s *state) close() {
- if !s.isClosed {
- s.isClosed = true
- s.epsilon[s] = struct{}{}
- s.epsilon.closeEpsilon()
- if s.trans != nil {
- s.trans.next.closeEpsilon()
- }
- }
-}
-
-// isUseless returns true iff the state has only epsilon transitions and it is
-// not a final state.
-func (s *state) isUseless() bool {
- return s.trans == nil && !s.isFinal
-}
-
-// addStates folds the src states into the dst.
-func (dst hashStateSet) addStates(src hashStateSet) {
- for s, _ := range src {
- dst[s] = struct{}{}
- }
-}
-
-// stateSet converts the hashStateSet to a StateSet.
-func (set hashStateSet) stateSet() StateSet {
- states := StateSet{}
- for s, _ := range set {
- if !s.isUseless() {
- states = append(states, s)
- }
- }
- sort.Sort(byPointer(states))
- return states
-}
-
-// closeEpsilon closes the state set under epsilon transitions.
-func (states hashStateSet) closeEpsilon() {
- for changed := true; changed; {
- size := len(states)
- for s, _ := range states {
- s.close()
- states.addStates(s.epsilon)
- }
- changed = len(states) != size
- }
-
- // Remove useless states.
- for s, _ := range states {
- if s.isUseless() {
- delete(states, s)
- }
- }
-}
-
-// Step takes a transition for input name.
-func (ss StateSet) Step(name string) StateSet {
- states := make(hashStateSet)
- for _, s := range ss {
- if s.trans != nil && (s.trans.re == nil || s.trans.re.MatchString(name)) {
- s.close()
- states.addStates(s.trans.next)
- }
- }
- return states.stateSet()
-}
-
-// IsFinal returns true iff the StateSet contains a final state.
-func (ss StateSet) IsFinal() bool {
- for _, s := range ss {
- if s.isFinal {
- return true
- }
- }
- return false
-}
-
-// IsReject returns true iff the StateSet is empty.
-func (ss StateSet) IsReject() bool {
- return len(ss) == 0
-}
-
-// Union combines the state sets, returning a new StateSet.
-func (s1 StateSet) Union(s2 StateSet) StateSet {
- // As a space optimization, detect the case where the two states sets are
- // equal. If so, return s1 unchanged.
- if s1.Equals(s2) {
- return s1
- }
-
- i1 := 0
- i2 := 0
- var result StateSet
- for i1 < len(s1) && i2 < len(s2) {
- p1 := uintptr(unsafe.Pointer(s1[i1]))
- p2 := uintptr(unsafe.Pointer(s2[i2]))
- switch {
- case p1 == p2:
- i2++
- fallthrough
- case p1 < p2:
- result = append(result, s1[i1])
- i1++
- case p2 < p1:
- result = append(result, s2[i2])
- i2++
- }
- }
- result = append(result, s1[i1:]...)
- result = append(result, s2[i2:]...)
- return result
-}
-
-// Equals returns true iff the state sets are equal.
-func (s1 StateSet) Equals(s2 StateSet) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, s := range s1 {
- if s2[i] != s {
- return false
- }
- }
- return true
-}
-
-// sorting methods.
-func (a byPointer) Len() int { return len(a) }
-func (a byPointer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byPointer) Less(i, j int) bool {
- return uintptr(unsafe.Pointer(a[i])) < uintptr(unsafe.Pointer(a[j]))
-}
-
-// Compile compiles the Regex to a NFA.
-func compileNFA(r regex) StateSet {
- s := r.compile(&state{isFinal: true, epsilon: make(hashStateSet)})
- s.close()
- return s.epsilon.stateSet()
-}
diff --git a/services/store/memstore/pathregex/parser.go b/services/store/memstore/pathregex/parser.go
deleted file mode 100644
index 101cce1..0000000
--- a/services/store/memstore/pathregex/parser.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package pathregex
-
-import (
- "bytes"
- "io"
- "regexp"
- "strings"
-
- "veyron2/verror"
-)
-
-// parser is a recursive-descent parser for path regular expressions.
-type parser struct {
- reader *strings.Reader
-
- // commas are treated specially within braces, but are otherwise normal
- // characters.
- commaIsSpecial bool
-
- // isErrored is true iff an error has been encountered.
- isErrored bool
-}
-
-// Compile compiles a string to a finite automaton. Returns a StateSet of the
-// initial states of the automaton.
-func Compile(s string) (StateSet, error) {
- re, err := compileRegex(s)
- if err != nil {
- return nil, err
- }
- return compileNFA(re), nil
-}
-
-// CompileReverse compiles a path regular expression to a finite automaton,
-// reversing the regular expression. Returns a StateSet of the initial states
-// of the automaton.
-func CompileReverse(s string) (StateSet, error) {
- re, err := compileRegex(s)
- if err != nil {
- return nil, err
- }
- re.reverse()
- return compileNFA(re), nil
-}
-
-func compileRegex(s string) (regex, error) {
- p := &parser{reader: strings.NewReader(s)}
- re := p.parsePath()
- if p.isErrored || p.reader.Len() != 0 {
- pos, _ := p.reader.Seek(0, 1)
- err := verror.BadArgf("Syntax error at char %d: %q", pos, s)
- return nil, err
- }
- return re, nil
-}
-
-// parsePath reads a path regular expression. Reads as much of the input as
-// possible, stopping at special characters, like '}' or ',' (if
-// commaIsSpecial).
-func (p *parser) parsePath() regex {
- var path []regex
- for !p.isErrored {
- // If the next rune is '{', parse as an alternation; otherwise, parse
- // the next component.
- c, ok := p.readRune()
- if !ok {
- break
- }
- var re regex
- if c == '{' {
- re = p.parsePathAlt()
- } else {
- p.unreadRune()
- re = p.parseComponent()
- }
- if re != nil {
- path = append(path, re)
- }
-
- c, ok = p.readRune()
- if !ok {
- break
- }
- if c != '/' {
- p.unreadRune()
- break
- }
- }
- return newSequence(path)
-}
-
-// parsePathAlt reads an alternation {p1,p2,...,pn}. Assumes the opening brace
-// has already been read; consumes the closing brace.
-func (p *parser) parsePathAlt() regex {
- s := p.commaIsSpecial
- defer func() { p.commaIsSpecial = s }()
- p.commaIsSpecial = true
-
- var choices []regex
-parseLoop:
- for !p.isErrored {
- if re := p.parsePath(); re != nil {
- choices = append(choices, re)
- }
- c, ok := p.readRune()
- if !ok {
- break parseLoop
- }
- switch c {
- case ',':
- // skip
- case '}':
- return newAlt(choices)
- default:
- break parseLoop
- }
- }
- p.setError()
- return nil
-}
-
-// parseComponent parses a single component of a path. This is a glob
-// expression.
-func (p *parser) parseComponent() regex {
- // p.reader.Seek(0, 1) just returns the current position.
- startPos, _ := p.reader.Seek(0, 1)
- var buf bytes.Buffer
- p.parseComponentPiece(&buf)
- if buf.Len() == 0 {
- return nil
- }
- endPos, _ := p.reader.Seek(0, 1)
-
- // The ... component name is special.
- if endPos-startPos == 3 {
- var literal [3]byte
- p.reader.ReadAt(literal[:], startPos)
- if string(literal[:]) == "..." {
- return dotDotDotRegex
- }
- }
-
- // Everything else is a regular expression.
- re, err := regexp.Compile("^" + buf.String() + "$")
- if err != nil {
- p.setError()
- return nil
- }
- return newSingle(re)
-}
-
-// parseComponentPiece reads a glob expression and converts it to a regular
-// expression.
-func (p *parser) parseComponentPiece(buf *bytes.Buffer) {
- for !p.isErrored {
- c, ok := p.readRune()
- if !ok {
- return
- }
- switch c {
- case '*':
- buf.WriteString(".*")
- case '?':
- buf.WriteString(".")
- case '.', '(', ')':
- buf.WriteRune('\\')
- buf.WriteRune(c)
- case '\\':
- p.parseEscapedRune(buf)
- case '[':
- buf.WriteRune('[')
- p.parseCharRange(buf)
- buf.WriteRune(']')
- case '{':
- buf.WriteRune('(')
- p.parseComponentAlt(buf)
- buf.WriteRune(')')
- case '}', ']', '/':
- p.unreadRune()
- return
- case ',':
- if p.commaIsSpecial {
- p.unreadRune()
- return
- } else {
- buf.WriteRune(c)
- }
- default:
- buf.WriteRune(c)
- }
- }
-}
-
-// parseCharRange copies the input range literally.
-//
-// TODO(jyh): Translate the glob range.
-func (p *parser) parseCharRange(buf *bytes.Buffer) {
- // Initial ] does not close the range.
- c, ok := p.readRune()
- if !ok {
- p.setError()
- return
- }
- buf.WriteRune(c)
- for {
- c, ok := p.readRune()
- if !ok {
- p.setError()
- return
- }
- if c == ']' {
- break
- }
- buf.WriteRune(c)
- }
-}
-
-// parseComponentAlt parses an alternation.
-func (p *parser) parseComponentAlt(buf *bytes.Buffer) {
- s := p.commaIsSpecial
- p.commaIsSpecial = true
- defer func() { p.commaIsSpecial = s }()
- for {
- p.parseComponentPiece(buf)
- c, ok := p.readRune()
- if !ok {
- p.setError()
- return
- }
- switch c {
- case ',':
- buf.WriteRune('|')
- case '}':
- return
- default:
- p.setError()
- return
- }
- }
-}
-
-// parseEscapedRune parses a rune immediately after a backslash.
-func (p *parser) parseEscapedRune(buf *bytes.Buffer) {
- c, ok := p.readRune()
- if !ok {
- return
- }
- // TODO(jyh): Are there any special escape sequences?
- buf.WriteRune('\\')
- buf.WriteRune(c)
-}
-
-// readRune reads the next rune from the input reader. If there is an error,
-// returns false and sets the isErrored flag if the error is not io.EOF.
-func (p *parser) readRune() (rune, bool) {
- if p.isErrored {
- return 0, false
- }
- c, _, err := p.reader.ReadRune()
- switch {
- case err == io.EOF:
- return c, false
- case err != nil:
- p.setError()
- return c, false
- }
- return c, true
-}
-
-// unreadRune pushes back the last rune read.
-func (p *parser) unreadRune() {
- p.reader.UnreadRune()
-}
-
-// setError sets the error flag.
-func (p *parser) setError() {
- p.isErrored = true
-}
diff --git a/services/store/memstore/pathregex/parser_test.go b/services/store/memstore/pathregex/parser_test.go
deleted file mode 100644
index 616f195..0000000
--- a/services/store/memstore/pathregex/parser_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package pathregex
-
-import (
- "runtime"
- "strings"
- "testing"
-)
-
-type testCase struct {
- // path expression.
- path string
-
- // expected regular expression afer parsing.
- expected string
-
- // expected reversed regular expression.
- reversed string
-}
-
-type errorCase struct {
- // path expression.
- path string
-
- // expected error position.
- pos int64
-}
-
-var (
- testCases = []testCase{
- testCase{"a", "^a$", "^a$"},
- testCase{"*", "^.*$", "^.*$"},
- testCase{"?", "^.$", "^.$"},
- testCase{".", "^\\.$", "^\\.$"},
- testCase{"[a]", "^[a]$", "^[a]$"},
- testCase{"[]]", "^[]]$", "^[]]$"},
- testCase{"[a-z]", "^[a-z]$", "^[a-z]$"},
- testCase{"[^a-z]", "^[^a-z]$", "^[^a-z]$"},
- testCase{
- "DSC[0-9][0-9][0-9][0-9].jpg",
- "^DSC[0-9][0-9][0-9][0-9]\\.jpg$",
- "^DSC[0-9][0-9][0-9][0-9]\\.jpg$",
- },
- testCase{"abc/def", "^abc$/^def$", "^def$/^abc$"},
- testCase{"abc{foo,bar}def", "^abc(foo|bar)def$", "^abc(foo|bar)def$"},
- testCase{"{abc,def}", "(^abc$|^def$)", "(^abc$|^def$)"},
- testCase{
- "abc/{bbb,ccc,ddd}/def",
- "^abc$/(^bbb$|(^ccc$|^ddd$))/^def$",
- "^def$/(^bbb$|(^ccc$|^ddd$))/^abc$",
- },
- testCase{
- ".../abc/def/...",
- "(<true>)*/^abc$/^def$/(<true>)*",
- "(<true>)*/^def$/^abc$/(<true>)*",
- },
- testCase{
- "abc/{bbb,ccc}/ddd,eee",
- "^abc$/(^bbb$|^ccc$)/^ddd,eee$",
- "^ddd,eee$/(^bbb$|^ccc$)/^abc$",
- },
- testCase{
- "aaa/bbb{ccc,ddd}eee,fff/ggg",
- "^aaa$/^bbb(ccc|ddd)eee,fff$/^ggg$",
- "^ggg$/^bbb(ccc|ddd)eee,fff$/^aaa$",
- },
- testCase{
- "aaa/{bbb/ccc,ddd{eee,fff}ggg,ggg/{hhh,iii}/jjj}/kkk",
- "^aaa$/(^bbb$/^ccc$|(^ddd(eee|fff)ggg$|^ggg$/(^hhh$|^iii$)/^jjj$))/^kkk$",
- "^kkk$/(^ccc$/^bbb$|(^ddd(eee|fff)ggg$|^jjj$/(^hhh$|^iii$)/^ggg$))/^aaa$",
- },
- }
-
- errorCases = []errorCase{
- errorCase{"[", 1},
- errorCase{"[]", 2},
- errorCase{"{", 1},
- errorCase{"{,", 2},
- errorCase{"aaa{bbb/ccc}ddd", 8},
- errorCase{"aaa/bbb/ccc}", 11},
- }
-)
-
-func parsePath(t *testing.T, c *testCase) {
- p := &parser{reader: strings.NewReader(c.path)}
- re := p.parsePath()
- pos, _ := p.reader.Seek(0, 1)
- if p.isErrored || p.reader.Len() != 0 {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): syntax error at char %d: %q", file, line, pos, c.path)
- }
- s := re.String()
- if s != c.expected {
- t.Errorf("Expected %q, got %q", c.expected, s)
- }
-
- re.reverse()
- s = re.String()
- if s != c.reversed {
- t.Errorf("Expected %q, got %q", c.reversed, s)
- }
-}
-
-func parsePathError(t *testing.T, e *errorCase) {
- _, file, line, _ := runtime.Caller(1)
- p := &parser{reader: strings.NewReader(e.path)}
- p.parsePath()
- if p.reader.Len() != 0 {
- p.setError()
- }
- if !p.isErrored {
- t.Errorf("%s(%d): expected error: %q", file, line, e.path)
- }
- if pos, _ := p.reader.Seek(0, 1); pos != e.pos {
- t.Errorf("%s(%d): %q: expected error at pos %d, got %d", file, line, e.path, e.pos, pos)
- }
-}
-
-func TestParser(t *testing.T) {
- for _, c := range testCases {
- parsePath(t, &c)
- }
-}
-
-func TestParserErrors(t *testing.T) {
- for _, e := range errorCases {
- parsePathError(t, &e)
- }
-}
diff --git a/services/store/memstore/pathregex/pathregex_test.go b/services/store/memstore/pathregex/pathregex_test.go
deleted file mode 100644
index c43a0ef..0000000
--- a/services/store/memstore/pathregex/pathregex_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package pathregex_test
-
-import (
- "runtime"
- "strings"
- "testing"
-
- "veyron/services/store/memstore/pathregex"
-)
-
-func pathMatch(ss pathregex.StateSet, s string) bool {
- var path []string
- if s != "" {
- path = strings.Split(s, "/")
- }
- for _, s := range path {
- ss = ss.Step(s)
- }
- return ss.IsFinal()
-}
-
-func expectMatch(t *testing.T, ss pathregex.StateSet, s string) {
- if !pathMatch(ss, s) {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): expected match: %q", file, line, s)
- }
-}
-
-func expectNoMatch(t *testing.T, ss pathregex.StateSet, s string) {
- if pathMatch(ss, s) {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): unexpected match: %q", file, line, s)
- }
-}
-
-func TestSingle(t *testing.T) {
- ss, err := pathregex.Compile("a*b*c")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectNoMatch(t, ss, "b")
- expectMatch(t, ss, "aabccc")
- expectNoMatch(t, ss, "aabccc/b")
-}
-
-func TestSequence(t *testing.T) {
- ss, err := pathregex.Compile("abc/def")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectNoMatch(t, ss, "abcdef")
- expectMatch(t, ss, "abc/def")
- expectNoMatch(t, ss, "abc/def/ghi")
-}
-
-func TestAlt(t *testing.T) {
- ss, err := pathregex.Compile("{abc,def}")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectNoMatch(t, ss, "abcdef")
- expectMatch(t, ss, "abc")
- expectMatch(t, ss, "def")
- expectNoMatch(t, ss, "abc/def")
-}
-
-func TestAltPath(t *testing.T) {
- ss, err := pathregex.Compile("{abc/def,def/abc}")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectNoMatch(t, ss, "abcdef")
- expectNoMatch(t, ss, "abc")
- expectNoMatch(t, ss, "def")
- expectMatch(t, ss, "abc/def")
- expectMatch(t, ss, "def/abc")
-}
-
-func TestUnanchored(t *testing.T) {
- ss, err := pathregex.Compile(".../abc/...")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectMatch(t, ss, "abc")
- expectNoMatch(t, ss, "def")
- expectMatch(t, ss, "x/x/abc/x/x")
-}
-
-func TestStar(t *testing.T) {
- ss, err := pathregex.Compile("{,a,*/aa,*/*/aaa,*/*/*/aaaa}")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectMatch(t, ss, "")
- expectMatch(t, ss, "a")
- expectNoMatch(t, ss, "b/a")
- expectMatch(t, ss, "b/aa")
- expectMatch(t, ss, "c/b/aaa")
- expectMatch(t, ss, "d/c/b/aaaa")
- expectNoMatch(t, ss, "d/c/b/aaa")
-}
-
-func TestSequenceReverse(t *testing.T) {
- ss, err := pathregex.CompileReverse("abc/def")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectNoMatch(t, ss, "abcdef")
- expectMatch(t, ss, "def/abc")
- expectNoMatch(t, ss, "def/abc/ghi")
-}
-
-func TestAltReverse(t *testing.T) {
- ss, err := pathregex.CompileReverse(".../123/{abc/def,g/*/i}/456/...")
- if err != nil {
- t.Fatalf("Bad regex: %s", err)
- }
- expectNoMatch(t, ss, "")
- expectNoMatch(t, ss, "123/abc/def/456")
- expectMatch(t, ss, "456/def/abc/123")
- expectMatch(t, ss, "x/y/456/i/am/g/123/z/w")
-}
diff --git a/services/store/memstore/pathregex/regex.go b/services/store/memstore/pathregex/regex.go
deleted file mode 100644
index 3a50219..0000000
--- a/services/store/memstore/pathregex/regex.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package pathregex
-
-import (
- "regexp"
-)
-
-// regex is the type of regular expressions.
-type regex interface {
- // compile constructs an NFA. The argument is the final state. Returns the
- // initial state.
- compile(final *state) *state
-
- // reverse reverses the regular expression in place.
- reverse()
-
- // String returns a string representation of the regular expression.
- String() string
-}
-
-// regexCommon is a common base type of regular expressions.
-type regexCommon struct{}
-
-// tt accepts everything.
-type tt struct {
- regexCommon
-}
-
-// ff rejects everything.
-type ff struct {
- regexCommon
-}
-
-// epsilon matches the empty path.
-type epsilon struct {
- regexCommon
-}
-
-// single matches a single component of a path, using a regexp.Regexp for the
-// match.
-type single struct {
- regexCommon
- r *regexp.Regexp
-}
-
-// sequence matches a sequence of components.
-type sequence struct {
- regexCommon
- r1, r2 regex
-}
-
-// alt matches one of two regular expressions.
-type alt struct {
- regexCommon
- r1, r2 regex
-}
-
-// star is the Kleene closure.
-type star struct {
- regexCommon
- re regex
-}
-
-var (
- trueRegex = &tt{}
- falseRegex = &ff{}
- epsilonRegex = &epsilon{}
- dotDotDotRegex = &star{re: trueRegex}
-)
-
-// newSingle returns a Regex that matches one component of a path.
-func newSingle(r *regexp.Regexp) regex {
- return &single{r: r}
-}
-
-// newSequence returns a Regex that matches a sequence of path components.
-func newSequence(rl []regex) regex {
- if len(rl) == 0 {
- return epsilonRegex
- }
- r := rl[len(rl)-1]
- for i := len(rl) - 2; i >= 0; i-- {
- r = &sequence{r1: rl[i], r2: r}
- }
- return r
-}
-
-// newAlt returns a Regex that matches one of a set of regular expressions.
-func newAlt(rl []regex) regex {
- if len(rl) == 0 {
- return falseRegex
- }
- r := rl[len(rl)-1]
- for i := len(rl) - 2; i >= 0; i-- {
- r = &alt{r1: rl[i], r2: r}
- }
- return r
-}
-
-// newStar returns the Kleene closure of a regular expression.
-func newStar(re regex) regex {
- return &star{re: re}
-}
-
-func (re *tt) String() string {
- return "<true>"
-}
-
-func (re *ff) String() string {
- return "<false>"
-}
-
-func (re *epsilon) String() string {
- return "<epsilon>"
-}
-
-func (re *single) String() string {
- return re.r.String()
-}
-
-func (re *sequence) String() string {
- return re.r1.String() + "/" + re.r2.String()
-}
-
-func (re *alt) String() string {
- return "(" + re.r1.String() + "|" + re.r2.String() + ")"
-}
-
-func (re *star) String() string {
- return "(" + re.re.String() + ")*"
-}
-
-func (re *regexCommon) reverse() {
-}
-
-func (re *sequence) reverse() {
- re.r1, re.r2 = re.r2, re.r1
- re.r1.reverse()
- re.r2.reverse()
-}
-
-func (re *alt) reverse() {
- re.r1.reverse()
- re.r2.reverse()
-}
-
-func (re *star) reverse() {
- re.re.reverse()
-}
diff --git a/services/store/memstore/pathregex/regex_test.go b/services/store/memstore/pathregex/regex_test.go
deleted file mode 100644
index d96e5bf..0000000
--- a/services/store/memstore/pathregex/regex_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package pathregex
-
-import (
- "regexp"
- "runtime"
- "strings"
- "testing"
-)
-
-func pathMatch(re regex, s string) bool {
- var path []string
- if s != "" {
- path = strings.Split(s, "/")
- }
- ss := compileNFA(re)
- for _, s := range path {
- ss = ss.Step(s)
- }
- return ss.IsFinal()
-}
-
-func expectMatch(t *testing.T, re regex, s string) {
- if !pathMatch(re, s) {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): expected match: %s, %q", file, line, re, s)
- }
-}
-
-func expectNoMatch(t *testing.T, re regex, s string) {
- if pathMatch(re, s) {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): unexpected match: %s, %q", file, line, re, s)
- }
-}
-
-func TestSingle(t *testing.T) {
- p, err := regexp.Compile("a*bc*")
- if err != nil {
- t.Fatalf("Bad regular expression: %s", err)
- }
- re := newSingle(p)
- expectNoMatch(t, re, "")
- expectMatch(t, re, "b")
- expectMatch(t, re, "aabccc")
- expectNoMatch(t, re, "aabccc/b")
-}
-
-func TestSequence(t *testing.T) {
- re := newSequence([]regex{
- newSingle(regexp.MustCompile("abc")),
- newSingle(regexp.MustCompile("def")),
- })
- expectNoMatch(t, re, "")
- expectNoMatch(t, re, "abcdef")
- expectMatch(t, re, "abc/def")
- expectNoMatch(t, re, "abc/def/ghi")
-}
-
-func TestAlt(t *testing.T) {
- re := newAlt([]regex{
- newSingle(regexp.MustCompile("abc")),
- newSingle(regexp.MustCompile("def")),
- })
- expectNoMatch(t, re, "")
- expectMatch(t, re, "abc")
- expectMatch(t, re, "def")
- expectNoMatch(t, re, "x/abc")
-}
-
-func TestStar(t *testing.T) {
- re := newStar(newSingle(regexp.MustCompile("abc")))
- expectMatch(t, re, "")
- expectMatch(t, re, "abc")
- expectMatch(t, re, "abc/abc")
- expectNoMatch(t, re, "abc/abc/a/abc")
-}
-
-func TestComplex(t *testing.T) {
- // Case-insensitive match on (j/a/s/o/n)*.
- var rl [5]regex
- for i, s := range []string{"j", "a", "s", "o", "n"} {
- rl[i] = newAlt([]regex{
- newSingle(regexp.MustCompile(s)),
- newSingle(regexp.MustCompile(strings.ToUpper(s))),
- })
- }
- re := newStar(newSequence(rl[:]))
- expectMatch(t, re, "")
- expectMatch(t, re, "j/A/s/O/n")
- expectNoMatch(t, re, "j/A/O/s/n")
- expectMatch(t, re, "j/A/s/O/n/j/a/S/o/N")
-}
diff --git a/services/store/memstore/query/eval.go b/services/store/memstore/query/eval.go
deleted file mode 100644
index e059390..0000000
--- a/services/store/memstore/query/eval.go
+++ /dev/null
@@ -1,1223 +0,0 @@
-package query
-
-import (
- "crypto/rand"
- "fmt"
- "math/big"
- "reflect"
- "runtime"
- "sort"
- "strings"
- "sync"
-
- vsync "veyron/runtimes/google/lib/sync"
- "veyron/services/store/memstore/state"
-
- "veyron2/naming"
- "veyron2/query"
- "veyron2/query/parse"
- "veyron2/security"
- "veyron2/services/store"
- "veyron2/storage"
- "veyron2/vdl/vdlutil"
- "veyron2/vlog"
-)
-
-// maxChannelSize is the maximum size of the channels used for concurrent
-// query evaluation.
-const maxChannelSize = 100
-
-// QueryStream yields the results of a query. Usage:
-// qs, _ := obj.Query
-// for qs.Next() {
-// result := qs.Get()
-// ...
-// if enough_results {
-// qs.Abort()
-// }
-// }
-// if err := qs.Err(); err != nil {
-// ...
-// }
-// Iterator is thread-safe.
-type QueryStream interface {
- // Next advances the iterator. It must be called before calling Get.
- // Returns true if there is a value to be retrieved with Get. Returns
- // false when iteration is finished.
- Next() bool
-
- // Get retrieves a query result. It is idempotent.
- Get() *store.QueryResult
-
- // Err returns the first error encountered during query evaluation. It is
- // idempotent.
- Err() error
-
- // Abort stops query evaluation early. The client must call Abort unless
- // iteration goes to completion (i.e. Next returns false). It is
- // idempotent and can be called from any thread.
- Abort()
-}
-
-// evalIterator implements QueryStream.
-type evalIterator struct {
- // mu guards 'result', 'err', and the closing of 'abort'.
- mu sync.Mutex
- // result is what Get will return. It will be nil if there are no more
- // query results. Guarded by mu.
- result *store.QueryResult
- // err is the first error encountered during query evaluation.
- // Guarded by mu.
- err error
- // abort is used as the signal to query evaluation to terminate early.
- // evaluator implementations will test for abort closing. The close()
- // call is guarded by mu.
- abort chan struct{}
-
- // results represents a stack of result channels. evalIterator is
- // essentially an iterator of iterators. Each subquery adds a nestedChannel
- // to this stack. The top of the stack is the end of the slice.
- results []nestedChannel
- // cleanup is used for testing to ensure that no goroutines are leaked.
- cleanup vsync.WaitGroup
- // maxNesting is the largest value used for nestedChannel.nesting. It
- // is the maximum nesting over the duration of the query while len(results)
- // is just the instantaneous nesting.
- maxNesting int
-}
-
-// nestedChannel contains the results of a subquery.
-type nestedChannel struct {
- // nesting is the value to use for the NestedResult field of all
- // objects that come out of the results channel.
- nesting int
- // results is the stream of results for this subquery.
- results <-chan *store.QueryResult
-}
-
-// hiddenResult wraps a value so evalIterator can elide it from
-// storage.QueryResult.Fields that are sent to the client.
-type hiddenResult struct {
- value vdlutil.Any
-}
-
-// Next implements the QueryStream method.
-func (it *evalIterator) Next() bool {
- it.mu.Lock()
- if it.err != nil {
- it.mu.Unlock()
- return false
- }
- it.mu.Unlock()
-
- depth := len(it.results) - 1
- select {
- case result, ok := <-it.results[depth].results:
- if !ok {
- it.results = it.results[:depth]
- if len(it.results) > 0 {
- return it.Next()
- }
- return false
- }
-
- // Set the correct value for NestedResult.
- result.NestedResult = store.NestedResult(it.results[depth].nesting)
-
- it.enqueueNestedChannels(result)
-
- it.mu.Lock()
- defer it.mu.Unlock()
- it.result = result
- return true
- case <-it.abort:
- return false
- }
-}
-
-// enqueueNestedChannels looks through result.Fields for nested result
-// channels. If there are any, they are appended to it.results. We use
-// the result.Fields key to append them in reverse alphabetical order.
-// We use reverse alphabetical order because it.results is a stack--
-// we want to process them in alphabetical order.
-//
-// enqueueNestedChannels also removes any result.Fields that are of the
-// type hiddenResult.
-func (it *evalIterator) enqueueNestedChannels(result *store.QueryResult) {
- if result.Fields == nil {
- return
- }
- var nestingKeys []string
- for key, val := range result.Fields {
- switch val.(type) {
- case chan *store.QueryResult:
- nestingKeys = append(nestingKeys, key)
- case hiddenResult:
- // If a field is "hidden", the value will be wrapped in the type
- // hiddenResult to make it possible for evalIterator to elide it
- // from the results sent to the client.
- delete(result.Fields, key)
- }
- }
- // Figure out the store.NestedResult values based on alphabetical order of
- // the keys.
- sort.Sort(sort.StringSlice(nestingKeys))
- var nested []nestedChannel
- for _, key := range nestingKeys {
- it.maxNesting++
- nested = append(nested,
- nestedChannel{it.maxNesting, result.Fields[key].(chan *store.QueryResult)})
- result.Fields[key] = store.NestedResult(it.maxNesting)
- }
- // Add the nested result channels in reverse alphabetical order.
- for i := len(nested) - 1; i >= 0; i-- {
- it.results = append(it.results, nested[i])
- }
-}
-
-// Get implements the QueryStream method.
-func (it *evalIterator) Get() *store.QueryResult {
- it.mu.Lock()
- defer it.mu.Unlock()
- return it.result
-}
-
-// Abort implements the QueryStream method.
-func (it *evalIterator) Abort() {
- it.mu.Lock()
- defer it.mu.Unlock()
- select {
- case <-it.abort:
- // Already closed.
- default:
- close(it.abort)
- }
- it.result = nil
-}
-
-// Err implements the QueryStream method.
-func (it *evalIterator) Err() error {
- it.mu.Lock()
- defer it.mu.Unlock()
- return it.err
-}
-
-func (it *evalIterator) setErrorf(format string, args ...interface{}) {
- it.setError(fmt.Errorf(format, args...))
-}
-
-func (it *evalIterator) setError(err error) {
- it.mu.Lock()
- if it.err == nil {
- it.err = err
- }
- it.mu.Unlock()
- it.Abort()
-}
-
-// wait blocks until all children goroutines are finished. This is useful in
-// tests to ensure that an abort cleans up correctly.
-func (it *evalIterator) wait() {
- it.cleanup.Wait()
-}
-
-func newErrorIterator(err error) QueryStream {
- return &evalIterator{
- err: err,
- abort: make(chan struct{}),
- }
-}
-
-// Eval evaluates a query and returns a QueryStream for the results. If there is
-// an error parsing the query, it will show up as an error in the QueryStream.
-// Query evaluation is concurrent, so it is important to call QueryStream.Abort
-// if the client does not consume all of the results.
-func Eval(sn state.Snapshot, clientID security.PublicID, name storage.PathName, q query.Query) QueryStream {
- ast, err := parse.Parse(q)
- if err != nil {
- return newErrorIterator(err)
- }
- evaluator, err := convert(ast)
- if err != nil {
- return newErrorIterator(err)
- }
-
- // Seed the input with the root entity.
- in := make(chan *store.QueryResult, 1)
- in <- &store.QueryResult{Name: ""}
- close(in)
-
- out := make(chan *store.QueryResult, maxChannelSize)
- it := &evalIterator{
- results: []nestedChannel{nestedChannel{0, out}},
- abort: make(chan struct{}),
- }
- if !it.cleanup.TryAdd() {
- // The query has been aborted by a call to Cancel.
- return it
- }
- go evaluator.eval(&context{
- sn: sn,
- suffix: name.String(),
- clientID: clientID,
- in: in,
- out: out,
- evalIt: it,
- abort: it.abort,
- cleanup: &it.cleanup,
- })
-
- return it
-}
-
-// context is a wrapper of all the variables that need to be passed around
-// during evaluation.
-type context struct {
- // sn is the snapshot of the store's state to use to find query results.
- sn state.Snapshot
- // suffix is the suffix we're evaluating relative to.
- suffix string
- // clientID is the identity of the client that issued the query.
- clientID security.PublicID
- // in produces the intermediate results from the previous stage of the
- // query. It will be closed when the evaluator should stop processing
- // results. It is not necessary to select on 'in' and 'errc'.
- in <-chan *store.QueryResult
- // out is where the evaluator should write the intermediate results.
- // evaluators should use context.emit instead of writing directly
- // to out.
- out chan<- *store.QueryResult
- // evalIt is the iterator that interfaces with the client. It is here
- // to allow the evaluation code to propagate errors via setError().
- evalIt *evalIterator
- // abort will be closed if query evaluation should terminate early.
- // evaluator implementations should regularly test if it is still open.
- abort chan struct{}
- // cleanup is used for testing to ensure that no goroutines are leaked.
- // evaluator.eval implementations should call Done when finished processing.
- cleanup *vsync.WaitGroup
-}
-
-// emit sends result on c.out. It is careful to watch for aborts. result can be
-// nil. Returns true if the caller should continue iterating, returns
-// false if it is time to abort.
-func (c *context) emit(result *store.QueryResult) bool {
- vlog.VI(2).Info("emit", result)
- if result == nil {
- // Check for an abort before continuing iteration.
- select {
- case <-c.abort:
- return false
- default:
- return true
- }
- } else {
- // If c.out is full, we don't want to block on it forever and ignore
- // aborts.
- select {
- case <-c.abort:
- return false
- case c.out <- result:
- return true
- }
- }
-}
-
-// evaluator is a node in the query evaluation flow. It takes intermediate
-// results produced by the previous node and produces a new set of results.
-type evaluator interface {
- // eval does the work or processing intermediate results to produce a new
- // set of results. It is expected that the client run eval in its own
- // goroutine (i.e. "go eval(ctxt)").
- eval(c *context)
-
- // singleResult returns true if this evaluator returns a single result
- // (e.g. an aggregate or a specific field). This is useful in selection
- // because we want to unbox those results. For example,
- // "teams/* | { players/* | count as numplayers }" should return
- // { Name: "teams/cardinals", Fields: {"numplayers": 5}}
- // and not
- // { Name: "teams/cardinals", Fields: {"numplayers": [{Name: "numplayers", Value: 5}]}}
- singleResult() bool
-
- // name returns a relative Object name that is appropriate for the query
- // results produced by this evaluator.
- name() string
-}
-
-// convert transforms the AST produced by parse.Parse into an AST that supports
-// evaluation specific to memstore. This transformation should not produce
-// any errors since we know all of the types that parse.Parse can produce.
-// Just in case one was overlooked, we use the panic/recover idiom to handle
-// unexpected errors. The conversion functions in the remainder of this file
-// do not return errors. Instead, they are allowed to panic, and this function
-// will recover.
-func convert(q parse.Pipeline) (ev evaluator, err error) {
- defer func() {
- if r := recover(); r != nil {
- if _, ok := r.(runtime.Error); ok {
- panic(r)
- }
- ev = nil
- err = r.(error)
- }
- }()
- return convertPipeline(q), nil
-}
-
-// convertPipeline transforms a parse.Pipeline into an evaluator.
-func convertPipeline(q parse.Pipeline) evaluator {
- switch q := q.(type) {
- case *parse.PipelineName:
- return &nameEvaluator{q.WildcardName, q.Pos}
- case *parse.PipelineType:
- return &typeEvaluator{convertPipeline(q.Src), q.Type, q.Pos}
- case *parse.PipelineFilter:
- return &filterEvaluator{convertPipeline(q.Src), convertPredicate(q.Pred), q.Pos}
- case *parse.PipelineSelection:
- return convertSelection(q)
- case *parse.PipelineFunc:
- return convertPipelineFunc(q)
- default:
- panic(fmt.Errorf("unexpected type %T", q))
- }
-}
-
-// nameEvaluator is the evaluator version of parse.PipelineName.
-type nameEvaluator struct {
- wildcardName *parse.WildcardName
-
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// eval implements the evaluator method.
-func (e *nameEvaluator) eval(c *context) {
- defer c.cleanup.Done()
- defer close(c.out)
-
- for result := range c.in {
- basepath := naming.Join(result.Name, e.wildcardName.VName)
- path := storage.ParsePath(naming.Join(c.suffix, basepath))
- vlog.VI(2).Infof("nameEvaluator suffix: %s, result.Name: %s, VName: %s",
- c.suffix, result.Name, e.wildcardName.VName)
- for it := c.sn.NewIterator(c.clientID, path,
- state.ListObjects, state.ImmediateFilter); it.IsValid(); it.Next() {
-
- entry := it.Get()
- result := &store.QueryResult{
- Name: naming.Join(basepath, it.Name()),
- Value: entry.Value,
- }
- if !c.emit(result) {
- return
- }
- if e.singleResult() {
- return
- }
- }
- }
-}
-
-// singleResult implements the evaluator method.
-func (e *nameEvaluator) singleResult() bool {
- return e.wildcardName.Exp == parse.Self
-}
-
-// name implements the evaluator method.
-func (e *nameEvaluator) name() string {
- return e.wildcardName.VName
-}
-
-// startSource creates a goroutine for src.eval(). It returns the
-// output channel for src.
-func startSource(c *context, src evaluator) chan *store.QueryResult {
- srcOut := make(chan *store.QueryResult, maxChannelSize)
- srcContext := context{
- sn: c.sn,
- suffix: c.suffix,
- clientID: c.clientID,
- in: c.in,
- out: srcOut,
- evalIt: c.evalIt,
- abort: c.abort,
- cleanup: c.cleanup,
- }
- if !c.cleanup.TryAdd() {
- // The query has been aborted by a call to Cancel.
- close(srcOut)
- return srcOut
- }
- go src.eval(&srcContext)
- return srcOut
-}
-
-// typeEvaluator is the evaluator version of parse.PipelineType.
-type typeEvaluator struct {
- // src produces the results to be filtered by type.
- src evaluator
- // ty restricts the results to a specific type of object.
- ty string
- // Pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// eval implements the evaluator method.
-func (e *typeEvaluator) eval(c *context) {
- defer c.cleanup.Done()
- defer close(c.out)
-
- for result := range startSource(c, e.src) {
- vlog.VI(2).Info("typeEvaluator", result)
- if val := reflect.ValueOf(result.Value); e.ty != val.Type().Name() {
- continue
- }
- if !c.emit(result) {
- return
- }
- }
-}
-
-// singleResult implements the evaluator method.
-func (e *typeEvaluator) singleResult() bool {
- return false
-}
-
-// name implements the evaluator method.
-func (e *typeEvaluator) name() string {
- return e.src.name()
-}
-
-// filterEvaluator is the evaluator version of parse.PipelineFilter.
-type filterEvaluator struct {
- // src produces intermediate results that will be filtered by pred.
- src evaluator
- // pred determines whether an intermediate result produced by src should be
- // filtered out.
- pred predicate
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// eval implements the evaluator method.
-func (e *filterEvaluator) eval(c *context) {
- defer c.cleanup.Done()
- defer close(c.out)
-
- for result := range startSource(c, e.src) {
- vlog.VI(2).Info("filterEvaluator", result)
- if e.pred.match(c, result) {
- if !c.emit(result) {
- return
- }
- }
- }
-}
-
-// singleResult implements the evaluator method.
-func (e *filterEvaluator) singleResult() bool {
- return false
-}
-
-// name implements the evaluator method.
-func (e *filterEvaluator) name() string {
- return e.src.name()
-}
-
-// convertSelection transforms a parse.PipelineSelection into a
-// selectionEvaluator.
-func convertSelection(p *parse.PipelineSelection) evaluator {
- e := &selectionEvaluator{
- src: convertPipeline(p.Src),
- subpipelines: make([]alias, len(p.SubPipelines), len(p.SubPipelines)),
- pos: p.Pos,
- }
- for i, a := range p.SubPipelines {
- e.subpipelines[i] = alias{convertPipeline(a.Pipeline), a.Alias, a.Hidden}
- }
- return e
-}
-
-// alias is the evaluator version of parse.Alias. It represents a pipeline
-// that has an alternate name inside of a selection using the 'as' keyword.
-type alias struct {
- // evaluator is the evaluator to be aliased.
- evaluator evaluator
- // alias is the new name for the output of evaluator.
- alias string
- // hidden is true if this field in the selection should not be included
- // in the results sent to the client.
- hidden bool
-}
-
-// selectionEvaluator is the evaluator version of parse.PipelineSelection.
-type selectionEvaluator struct {
- // src produces intermediate results on which to select.
- src evaluator
- // subpipelines is the list of pipelines to run for each result produced
- // by src.
- subpipelines []alias
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// eval implements the evaluator method.
-func (e *selectionEvaluator) eval(c *context) {
- defer c.cleanup.Done()
- defer close(c.out)
-
- for result := range startSource(c, e.src) {
- if !e.processSubpipelines(c, result) {
- return
- }
- }
-}
-
-func (e *selectionEvaluator) processSubpipelines(c *context, result *store.QueryResult) bool {
- vlog.VI(2).Info("selection: ", result.Name)
- sel := &store.QueryResult{
- Name: result.Name,
- Fields: make(map[string]vdlutil.Any),
- }
- for _, a := range e.subpipelines {
- // We create a new channel for each intermediate result, so there's no need to
- // create a large buffer.
- in := make(chan *store.QueryResult, 1)
- in <- result
- close(in)
- out := make(chan *store.QueryResult, maxChannelSize)
- ctxt := &context{
- sn: c.sn,
- suffix: c.suffix,
- clientID: c.clientID,
- in: in,
- out: out,
- evalIt: c.evalIt,
- abort: c.abort,
- cleanup: c.cleanup,
- }
- if !c.cleanup.TryAdd() {
- // The query has been aborted by a call to Cancel.
- return false
- }
- go a.evaluator.eval(ctxt)
-
- // If the subpipeline would produce a single result, use that single result
- // as the field value. Otherwise, put the channel as the field value and let
- // evalIterator do the right thing with the sub-results.
- var value interface{}
- if a.evaluator.singleResult() {
- select {
- case <-c.abort:
- return false
- case sub, ok := <-out:
- if ok {
- value = sub.Value
- }
- }
- } else {
- value = out
- }
-
- if a.alias != "" {
- if a.hidden {
- // If a field is "hidden", the value will be wrapped in the type
- // hiddenResult to make it possible for evalIterator to elide it
- // from the results sent to the client.
- value = hiddenResult{value}
- }
- sel.Fields[a.alias] = value
- } else {
- sel.Fields[a.evaluator.name()] = value
- }
- }
- return c.emit(sel)
-}
-
-// singleResult implements the evaluator method.
-func (e *selectionEvaluator) singleResult() bool {
- return false
-}
-
-// name implements the evaluator method.
-func (e *selectionEvaluator) name() string {
- return e.src.name()
-}
-
-// convertPipelineFunc transforms a parse.PipelineFunc into a funcEvaluator.
-func convertPipelineFunc(p *parse.PipelineFunc) evaluator {
- args := make([]expr, len(p.Args), len(p.Args))
- for i, a := range p.Args {
- args[i] = convertExpr(a)
- }
- src := convertPipeline(p.Src)
- switch p.FuncName {
- case "sort":
- if src.singleResult() {
- panic(fmt.Errorf("%v: sort expects multiple inputs not a single input", p.Pos))
- }
- return &funcSortEvaluator{
- src: convertPipeline(p.Src),
- args: args,
- pos: p.Pos,
- }
- case "sample":
- return convertSampleFunc(src, args, p.Pos)
- default:
- panic(fmt.Errorf("unknown function %s at Pos %v", p.FuncName, p.Pos))
- }
-}
-
-type funcSortEvaluator struct {
- // src produces intermediate results that will be sorted.
- src evaluator
- // args is the list of arguments passed to sort().
- args []expr
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-func (e *funcSortEvaluator) eval(c *context) {
- defer c.cleanup.Done()
- defer close(c.out)
- srcOut := startSource(c, e.src)
-
- sorter := argsSorter{e, c, nil}
- for result := range srcOut {
- sorter.results = append(sorter.results, result)
- }
- sort.Sort(sorter)
- for _, result := range sorter.results {
- if !c.emit(result) {
- return
- }
- }
-}
-
-// singleResult implements the evaluator method.
-func (e *funcSortEvaluator) singleResult() bool {
- // During construction, we tested that e.src is not singleResult.
- return false
-}
-
-// name implements the evaluator method.
-func (e *funcSortEvaluator) name() string {
- // A sorted resultset is still the same as the original resultset, so it
- // should have the same name.
- return e.src.name()
-}
-
-// argsSorter implements sort.Interface to sort results by e.args.
-type argsSorter struct {
- e *funcSortEvaluator
- c *context
- results []*store.QueryResult
-}
-
-func (a argsSorter) Len() int { return len(a.results) }
-func (a argsSorter) Swap(i, j int) { a.results[i], a.results[j] = a.results[j], a.results[i] }
-func (a argsSorter) Less(i, j int) bool {
- for n, arg := range a.e.args {
- // Normally, exprUnary only supports numeric operands. As part of a sort
- // expression however, it is possible to negate a string operand to
- // cause a descending sort.
- ascending := true
- unaryArg, ok := arg.(*exprUnary)
- if ok {
- // Remove the +/- operator.
- arg = unaryArg.operand
- ascending = unaryArg.op == parse.OpPos
- }
- ival := arg.value(a.c, a.results[i])
- jval := arg.value(a.c, a.results[j])
- res, err := compare(a.c, ival, jval)
- if err != nil {
- a.c.evalIt.setErrorf("error while sorting (Pos %v Arg: %d) left: %s, right: %s; %v",
- a.e.pos, n, a.results[i].Name, a.results[j].Name, err)
- return false
- }
- if res == 0 {
- continue
- }
- if ascending {
- return res < 0
- } else {
- return res > 0
- }
- }
- // Break ties based on name to get a deterministic order.
- return a.results[i].Name < a.results[j].Name
-}
-
-// funcSampleEvaluator is an evaluator that uses reservior sampling to
-// filter results to a desired number.
-type funcSampleEvaluator struct {
- // src produces intermediate results that will be transformed by func.
- src evaluator
- // numSamples is the number of samples to send to the output.
- numSamples int64
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-func convertSampleFunc(src evaluator, args []expr, pos parse.Pos) evaluator {
- if src.singleResult() {
- panic(fmt.Errorf("%v: sample expects multiple inputs not a single input", pos))
- }
- if len(args) != 1 {
- panic(fmt.Errorf("%v: sample expects exactly one integer argument specifying the number of results to include in the sample", pos))
- }
- n, ok := args[0].(*exprInt)
- if !ok {
- panic(fmt.Errorf("%v: sample expects exactly one integer argument specifying the number of results to include in the sample", pos))
- }
- return &funcSampleEvaluator{src, n.i.Int64(), pos}
-}
-
-// eval implements the evaluator method.
-func (e *funcSampleEvaluator) eval(c *context) {
- defer c.cleanup.Done()
- defer close(c.out)
- srcOut := startSource(c, e.src)
-
- reservoir := make([]*store.QueryResult, e.numSamples)
- i := int64(0)
- for result := range srcOut {
- if i < e.numSamples {
- // Fill the reservoir.
- reservoir[i] = result
- } else {
- // Sample with decreasing probability.
- bigJ, err := rand.Int(rand.Reader, big.NewInt(i+1))
- if err != nil {
- c.evalIt.setErrorf("error while sampling: %v", err)
- return
- }
- j := bigJ.Int64()
- if j < e.numSamples {
- reservoir[j] = result
- }
- }
- i++
- }
- for _, result := range reservoir {
- if !c.emit(result) {
- return
- }
- }
-}
-
-// singleResult implements the evaluator method.
-func (e *funcSampleEvaluator) singleResult() bool {
- // During construction, we tested that e.src is not singleResult.
- return false
-}
-
-// name implements the evaluator method.
-func (e *funcSampleEvaluator) name() string {
- // A sampled resultset is still the same as the original resultset, so it
- // should have the same name.
- return e.src.name()
-}
-
-// predicate determines whether an intermediate query result should be
-// filtered out.
-type predicate interface {
- match(c *context, e *store.QueryResult) bool
-}
-
-// convertPredicate transforms a parse.Predicate into a predicate.
-func convertPredicate(p parse.Predicate) predicate {
- switch p := p.(type) {
- case *parse.PredicateBool:
- return &predicateBool{p.Bool, p.Pos}
- case *parse.PredicateCompare:
- switch p.Comp {
- case parse.CompEQ, parse.CompNE, parse.CompLT, parse.CompGT, parse.CompLE, parse.CompGE:
- default:
- panic(fmt.Errorf("unknown comparator %d at %v", p.Comp, p.Pos))
- }
- return &predicateCompare{convertExpr(p.LHS), convertExpr(p.RHS), p.Comp, p.Pos}
- case *parse.PredicateAnd:
- return &predicateAnd{convertPredicate(p.LHS), convertPredicate(p.RHS), p.Pos}
- case *parse.PredicateOr:
- return &predicateOr{convertPredicate(p.LHS), convertPredicate(p.RHS), p.Pos}
- case *parse.PredicateNot:
- return &predicateNot{convertPredicate(p.Pred), p.Pos}
- // TODO(kash): Support parse.PredicateFunc.
- default:
- panic(fmt.Errorf("unexpected type %T", p))
- }
-}
-
-// predicateBool represents a boolean literal.
-type predicateBool struct {
- b bool
- pos parse.Pos
-}
-
-// match implements the predicate method.
-func (p *predicateBool) match(c *context, e *store.QueryResult) bool {
- return p.b
-}
-
-// predicateCompare handles the comparison on two expressions.
-type predicateCompare struct {
- // lhs is the left-hand-side of the comparison.
- lhs expr
- // rhs is the right-hand-side of the comparison.
- rhs expr
- // comp specifies the operator to use in the comparison.
- comp parse.Comparator
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// match implements the predicate method.
-func (p *predicateCompare) match(c *context, result *store.QueryResult) bool {
- lval := p.lhs.value(c, result)
- rval := p.rhs.value(c, result)
-
- res, err := compare(c, lval, rval)
- if err != nil {
- c.evalIt.setErrorf("error while evaluating predicate (Pos %v) for name '%s': %v",
- p.pos, result.Name, err)
- return false
- }
- switch p.comp {
- case parse.CompEQ:
- return res == 0
- case parse.CompNE:
- return res != 0
- case parse.CompLT:
- return res < 0
- case parse.CompGT:
- return res > 0
- case parse.CompLE:
- return res <= 0
- case parse.CompGE:
- return res >= 0
- default:
- c.evalIt.setErrorf("unknown comparator %d at Pos %v", p.comp, p.pos)
- return false
- }
-}
-
-// compare returns a negative number if lval is less than rval, 0 if they are
-// equal, and a positive number if lval is greater than rval.
-func compare(c *context, lval, rval interface{}) (int, error) {
- switch lval := lval.(type) {
- case string:
- rval, ok := rval.(string)
- if !ok {
- return 0, fmt.Errorf("type mismatch; left: %T, right: %T", lval, rval)
- }
- if lval < rval {
- return -1, nil
- } else if lval > rval {
- return 1, nil
- } else {
- return 0, nil
- }
- case *big.Rat:
- switch rval := rval.(type) {
- case *big.Rat:
- return lval.Cmp(rval), nil
- case *big.Int:
- return lval.Cmp(new(big.Rat).SetInt(rval)), nil
- case int, int8, int16, int32, int64:
- return lval.Cmp(new(big.Rat).SetInt64(toInt64(rval))), nil
- case uint, uint8, uint16, uint32, uint64:
- // It is not possible to convert to a big.Rat from an unsigned. Need to
- // go through big.Int first.
- return lval.Cmp(new(big.Rat).SetInt(new(big.Int).SetUint64(toUint64(rval)))), nil
- }
- case *big.Int:
- switch rval := rval.(type) {
- case *big.Rat:
- return new(big.Rat).SetInt(lval).Cmp(rval), nil
- case *big.Int:
- return lval.Cmp(rval), nil
- case int, int8, int16, int32, int64:
- return lval.Cmp(big.NewInt(toInt64(rval))), nil
- case uint, uint8, uint16, uint32, uint64:
- return lval.Cmp(new(big.Int).SetUint64(toUint64(rval))), nil
- }
- case int, int8, int16, int32, int64:
- switch rval := rval.(type) {
- case *big.Rat:
- return new(big.Rat).SetInt64(toInt64(lval)).Cmp(rval), nil
- case *big.Int:
- return new(big.Int).SetInt64(toInt64(lval)).Cmp(rval), nil
- case int, int8, int16, int32, int64:
- lint, rint := toInt64(lval), toInt64(rval)
- if lint < rint {
- return -1, nil
- } else if lint > rint {
- return 1, nil
- } else {
- return 0, nil
- }
- case uint, uint8, uint16, uint32, uint64:
- lint, rint := toUint64(lval), toUint64(rval)
- if lint < rint {
- return -1, nil
- } else if lint > rint {
- return 1, nil
- } else {
- return 0, nil
- }
- }
- }
- return 0, fmt.Errorf("unexpected type %T", lval)
-}
-
-func toInt64(i interface{}) int64 {
- switch i := i.(type) {
- case int:
- return int64(i)
- case int8:
- return int64(i)
- case int16:
- return int64(i)
- case int32:
- return int64(i)
- case int64:
- return int64(i)
- default:
- panic(fmt.Errorf("unexpected type %T", i))
- }
-}
-
-func toUint64(i interface{}) uint64 {
- switch i := i.(type) {
- case uint:
- return uint64(i)
- case uint8:
- return uint64(i)
- case uint16:
- return uint64(i)
- case uint32:
- return uint64(i)
- case uint64:
- return uint64(i)
- default:
- panic(fmt.Errorf("unexpected type %T", i))
- }
-}
-
-// predicateAnd is a predicate that is the logical conjunction of two
-// predicates.
-type predicateAnd struct {
- // lhs is the left-hand-side of the conjunction.
- lhs predicate
- // rhs is the right-hand-side of the conjunction.
- rhs predicate
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// match implements the predicate method.
-func (p *predicateAnd) match(c *context, result *store.QueryResult) bool {
- // Short circuit to avoid extra processing.
- if !p.lhs.match(c, result) {
- return false
- }
- return p.rhs.match(c, result)
-}
-
-// predicateAnd is a predicate that is the logical disjunction of two
-// predicates.
-type predicateOr struct {
- // lhs is the left-hand-side of the disjunction.
- lhs predicate
- // rhs is the right-hand-side of the disjunction.
- rhs predicate
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// match implements the predicate method.
-func (p *predicateOr) match(c *context, result *store.QueryResult) bool {
- // Short circuit to avoid extra processing.
- if p.lhs.match(c, result) {
- return true
- }
- return p.rhs.match(c, result)
-}
-
-// predicateAnd is a predicate that is the logical negation of another
-// predicate.
-type predicateNot struct {
- // pred is the predicate to be negated.
- pred predicate
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// match implements the predicate method.
-func (p *predicateNot) match(c *context, result *store.QueryResult) bool {
- return !p.pred.match(c, result)
-}
-
-// expr produces a value in the context of a store.QueryResult.
-type expr interface {
- // value returns a value in the context of result.
- value(c *context, result *store.QueryResult) interface{}
-}
-
-// convertExpr transforms a parse.Expr into an expr.
-func convertExpr(e parse.Expr) expr {
- switch e := e.(type) {
- case *parse.ExprString:
- return &exprString{e.Str, e.Pos}
- case *parse.ExprRat:
- return &exprRat{e.Rat, e.Pos}
- case *parse.ExprInt:
- return &exprInt{e.Int, e.Pos}
- case *parse.ExprName:
- return &exprName{e.Name, e.Pos}
- case *parse.ExprUnary:
- return &exprUnary{convertExpr(e.Operand), e.Op, e.Pos}
- // TODO(kash): Support the other types of expressions.
- default:
- panic(fmt.Errorf("unexpected type %T", e))
- }
-}
-
-// exprString is an expr that represents a string constant.
-type exprString struct {
- // str is the string constant specified in the query.
- str string
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// value implements the expr method.
-func (e *exprString) value(c *context, result *store.QueryResult) interface{} {
- return e.str
-}
-
-// exprRat is an expr that represents a rational number constant.
-type exprRat struct {
- rat *big.Rat
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// value implements the expr method.
-func (e *exprRat) value(c *context, result *store.QueryResult) interface{} {
- return e.rat
-}
-
-// exprInt is an expr that represents an integer constant.
-type exprInt struct {
- i *big.Int
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// value implements the expr method.
-func (e *exprInt) value(c *context, result *store.QueryResult) interface{} {
- return e.i
-}
-
-// exprName is an expr for an Object name literal.
-type exprName struct {
- // name is the Object name used in the query.
- name string
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// value implements the expr method.
-func (e *exprName) value(c *context, result *store.QueryResult) interface{} {
- if result.Fields != nil {
- // TODO(kash): Handle multipart names. This currently only works if
- // e.name has no slashes.
- val, ok := result.Fields[e.name]
- if !ok {
- c.evalIt.setErrorf("name '%s' was not selected from '%s', found: [%s]",
- e.name, result.Name, mapKeys(result.Fields))
- return nil
- }
- // If a field is "hidden", the value will be wrapped in the type
- // hiddenResult to make it possible for evalIterator to elide it
- // from the results sent to the client.
- if v, ok := val.(hiddenResult); ok {
- return v.value
- }
- return val
- }
- fullpath := naming.Join(result.Name, e.name)
- entry, err := c.sn.Get(c.clientID, storage.ParsePath(fullpath))
- if err != nil {
- c.evalIt.setErrorf("could not look up name '%s' relative to '%s': %v",
- e.name, result.Name, err)
- return nil
- }
- return entry.Value
-}
-
-func mapKeys(m map[string]vdlutil.Any) string {
- s := make([]string, 0, len(m))
- for key, _ := range m {
- s = append(s, key)
- }
- sort.Strings(s)
- return strings.Join(s, ", ")
-}
-
-// exprUnary is an expr preceded by a '+' or '-'.
-type exprUnary struct {
- // operand is the expression to be modified by Op.
- operand expr
- // op is the operator that modifies operand.
- op parse.Operator
- // pos specifies where in the query string this component started.
- pos parse.Pos
-}
-
-// value implements the expr method.
-func (e *exprUnary) value(c *context, result *store.QueryResult) interface{} {
- v := e.operand.value(c, result)
- switch e.op {
- case parse.OpNeg:
- switch v := v.(type) {
- case *big.Int:
- // Need to create a temporary big.Int since Neg mutates the Int.
- return new(big.Int).Set(v).Neg(v)
- case *big.Rat:
- // Need to create a temporary big.Rat since Neg mutates the Rat.
- return new(big.Rat).Set(v).Neg(v)
- case int:
- return -v
- case int8:
- return -v
- case int16:
- return -v
- case int32:
- return -v
- case int64:
- return -v
- case uint:
- return -v
- case uint8:
- return -v
- case uint16:
- return -v
- case uint32:
- return -v
- case uint64:
- return -v
- default:
- c.evalIt.setErrorf("cannot negate value of type %T for %s", v, result.Name)
- return nil
- }
- case parse.OpPos:
- return v
- default:
- c.evalIt.setErrorf("unknown operator %d at Pos %v", e.op, e.pos)
- return nil
- }
-}
diff --git a/services/store/memstore/query/eval_test.go b/services/store/memstore/query/eval_test.go
deleted file mode 100644
index 62fee97..0000000
--- a/services/store/memstore/query/eval_test.go
+++ /dev/null
@@ -1,686 +0,0 @@
-package query
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- _ "veyron/lib/testutil"
- "veyron/services/store/memstore/state"
-
- "veyron2/query"
- "veyron2/security"
- "veyron2/services/store"
- "veyron2/storage"
- "veyron2/vdl/vdlutil"
- "veyron2/vlog"
-)
-
-type team struct {
- Name string
- Location string
-}
-
-type player struct {
- Name string
- Age int
-}
-
-func populate(t *testing.T) *state.State {
- st := state.New(rootPublicID)
- sn := st.MutableSnapshot()
-
- // Add some objects.
- put(t, sn, "/", "")
-
- put(t, sn, "/players", "")
- alfredID := put(t, sn, "/players/alfred", player{"alfred", 17})
- aliceID := put(t, sn, "/players/alice", player{"alice", 16})
- bettyID := put(t, sn, "/players/betty", player{"betty", 23})
- bobID := put(t, sn, "/players/bob", player{"bob", 21})
-
- put(t, sn, "/players/betty/bio", "")
- put(t, sn, "/players/betty/bio/hometown", "Tampa")
-
- put(t, sn, "/teams", "")
- put(t, sn, "/teams/cardinals", team{"cardinals", "CA"})
- put(t, sn, "/teams/sharks", team{"sharks", "NY"})
- put(t, sn, "/teams/bears", team{"bears", "CO"})
-
- put(t, sn, "/teams/cardinals/players", "")
- put(t, sn, "/teams/sharks/players", "")
- put(t, sn, "/teams/bears/players", "")
-
- put(t, sn, "/teams/cardinals/players/alfred", alfredID)
- put(t, sn, "/teams/sharks/players/alice", aliceID)
- put(t, sn, "/teams/sharks/players/betty", bettyID)
- // Call him something different to make sure we are handling
- // paths correctly in subqueries. We don't want the subquery
- // "teams/sharks | type team | { players/*}" to work with
- // "/players/bob".
- put(t, sn, "/teams/sharks/players/robert", bobID)
-
- commit(t, st, sn)
- return st
-}
-
-func TestEval(t *testing.T) {
- st := populate(t)
-
- type testCase struct {
- suffix string
- query string
- expectedNames []string
- }
-
- tests := []testCase{
- // nameEvaluator:
- {"", "teams", []string{"teams"}},
- {"", "teams/.", []string{"teams"}},
- {"", "teams/*", []string{"teams", "teams/cardinals", "teams/sharks", "teams/bears"}},
-
- // With a non empty prefix:
- {"teams", ".", []string{""}},
- {"teams", "*", []string{"", "cardinals", "sharks", "bears"}},
-
- // typeEvaluator:
- {"", "teams | type team", []string{}},
- {"", "teams/. | type team", []string{}},
- {"", "teams/* | type team", []string{"teams/cardinals", "teams/sharks", "teams/bears"}},
-
- // filterEvaluator/predicateBool:
- {"", "teams | ?true", []string{"teams"}},
- {"", "teams | ?false", []string{}},
-
- // predicateCompare:
- // String constants:
- {"", "teams | ?'foo' > 'bar'", []string{"teams"}},
- {"", "teams | ?'foo' < 'bar'", []string{}},
- {"", "teams | ?'foo' == 'bar'", []string{}},
- {"", "teams | ?'foo' != 'bar'", []string{"teams"}},
- {"", "teams | ?'foo' <= 'bar'", []string{}},
- {"", "teams | ?'foo' >= 'bar'", []string{"teams"}},
- // Rational number constants:
- {"", "teams | ?2.3 > 1.0", []string{"teams"}},
- {"", "teams | ?2.3 < 1.0", []string{}},
- {"", "teams | ?2.3 == 1.0", []string{}},
- {"", "teams | ?2.3 != 1.0", []string{"teams"}},
- {"", "teams | ?2.3 <= 1.0", []string{}},
- {"", "teams | ?2.3 >= 1.0", []string{"teams"}},
- {"", "teams | ?-2.3 >= 1.0", []string{}},
- {"", "teams | ?2.3 <= -1.0", []string{}},
- // Integer constants:
- {"", "teams | ?2 > 1", []string{"teams"}},
- {"", "teams | ?2 < 1", []string{}},
- {"", "teams | ?2 == 1", []string{}},
- {"", "teams | ?2 != 1", []string{"teams"}},
- {"", "teams | ?2 <= 1", []string{}},
- {"", "teams | ?2 >= 1", []string{"teams"}},
- // Compare an integer with a rational number:
- {"", "teams | ?2 > 1.7", []string{"teams"}},
- {"", "teams | ?2.3 > 1", []string{"teams"}},
- {"", "teams | ?-2 > 1.7", []string{}},
- // Object names:
- {"", "teams/* | type team | ?Name > 'bar'", []string{"teams/cardinals", "teams/sharks", "teams/bears"}},
- {"", "teams/* | type team | ?Name > 'foo'", []string{"teams/sharks"}},
- {"", "teams/* | type team | ?Name != 'bears'", []string{"teams/cardinals", "teams/sharks"}},
- {"", "players/* | type player | ?Age > 20", []string{"players/betty", "players/bob"}},
- {"", "players/* | type player | ?-Age < -20", []string{"players/betty", "players/bob"}},
-
- // predicateAnd:
- {"", "teams | ?true && true", []string{"teams"}},
- {"", "teams | ?true && false", []string{}},
-
- // predicateOr:
- {"", "teams | ?true || true", []string{"teams"}},
- {"", "teams | ?true || false", []string{"teams"}},
- {"", "teams | ?false || false", []string{}},
-
- // predicateNot:
- {"", "teams | ?!true", []string{}},
- {"", "teams | ?!false", []string{"teams"}},
- {"", "teams | ?!(false && false)", []string{"teams"}},
- {"", "teams | ?!(true || false)", []string{}},
- }
- for _, test := range tests {
- it := Eval(st.Snapshot(), rootPublicID, storage.ParsePath(test.suffix), query.Query{test.query})
- names := map[string]bool{}
- for it.Next() {
- result := it.Get()
- if _, ok := names[result.Name]; ok {
- t.Errorf("query: %s, duplicate results for %s", test.query, result.Name)
- }
- names[result.Name] = true
- }
- if it.Err() != nil {
- t.Errorf("query: %s, Error during eval: %v", test.query, it.Err())
- continue
- }
- if len(names) != len(test.expectedNames) {
- t.Errorf("query: %s, Wrong number of names. got %v, wanted %v", test.query, names, test.expectedNames)
- continue
- }
- for _, name := range test.expectedNames {
- if !names[name] {
- t.Errorf("Did not find '%s' in %v", name, names)
- }
- }
- // Ensure that all the goroutines are cleaned up.
- it.(*evalIterator).wait()
- }
-}
-
-func TestSample(t *testing.T) {
- st := populate(t)
-
- type testCase struct {
- query string
- expectedNumNames int
- }
-
- tests := []testCase{
- {"teams/* | type team | sample(1)", 1},
- {"teams/* | type team | sample(2)", 2},
- {"teams/* | type team | sample(3)", 3},
- {"teams/* | type team | sample(4)", 3}, // Can't sample more values than exist.
- }
-
- for _, test := range tests {
- it := Eval(st.Snapshot(), rootPublicID, storage.ParsePath(""), query.Query{test.query})
- names := make(map[string]struct{})
- for it.Next() {
- result := it.Get()
- if _, ok := names[result.Name]; ok {
- t.Errorf("query: %s, duplicate results for %s", test.query, result.Name)
- }
- names[result.Name] = struct{}{}
- }
- if it.Err() != nil {
- t.Errorf("query: %s, Error during eval: %v", test.query, it.Err())
- continue
- }
- if len(names) != test.expectedNumNames {
- t.Errorf("query: %s, Wrong number of names. got %v, wanted %v", test.query, names, test.expectedNumNames)
- continue
- }
- possibleNames := map[string]struct{}{
- "teams/cardinals": struct{}{},
- "teams/sharks": struct{}{},
- "teams/bears": struct{}{},
- }
- for name, _ := range names {
- if _, ok := possibleNames[name]; !ok {
- t.Errorf("Did not find '%s' in %v", name, possibleNames)
- }
- }
- // Ensure that all the goroutines are cleaned up.
- it.(*evalIterator).wait()
- }
-}
-
-func TestSorting(t *testing.T) {
- st := populate(t)
- sn := st.MutableSnapshot()
- put(t, sn, "/teams/beavers", team{"beavers", "CO"})
- commit(t, st, sn)
-
- type testCase struct {
- query string
- expectedResults []*store.QueryResult
- }
-
- tests := []testCase{
- {
- "'teams/*' | type team | sort()",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- },
- },
- {
- "'teams/*' | type team | sort(Name)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- },
- },
- {
- "'teams/*' | type team | sort(Location, Name)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- },
- },
- {
- "'teams/*' | type team | sort(Location)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- },
- },
- {
- "'teams/*' | type team | sort(+Location)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- },
- },
- {
- "'teams/*' | type team | sort(-Location)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- },
- },
- {
- "'teams/*' | type team | sort(-Location, Name)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- },
- },
- {
- "'teams/*' | type team | sort(-Location, -Name)",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/sharks", nil, team{"sharks", "NY"}},
- &store.QueryResult{0, "teams/beavers", nil, team{"beavers", "CO"}},
- &store.QueryResult{0, "teams/bears", nil, team{"bears", "CO"}},
- &store.QueryResult{0, "teams/cardinals", nil, team{"cardinals", "CA"}},
- },
- },
- {
- "'players/*' | type player | sort(Age)",
- []*store.QueryResult{
- &store.QueryResult{0, "players/alice", nil, player{"alice", 16}},
- &store.QueryResult{0, "players/alfred", nil, player{"alfred", 17}},
- &store.QueryResult{0, "players/bob", nil, player{"bob", 21}},
- &store.QueryResult{0, "players/betty", nil, player{"betty", 23}},
- },
- },
- {
- "'players/*' | type player | sort(-Age)",
- []*store.QueryResult{
- &store.QueryResult{0, "players/betty", nil, player{"betty", 23}},
- &store.QueryResult{0, "players/bob", nil, player{"bob", 21}},
- &store.QueryResult{0, "players/alfred", nil, player{"alfred", 17}},
- &store.QueryResult{0, "players/alice", nil, player{"alice", 16}},
- },
- },
- }
- for _, test := range tests {
- it := Eval(st.Snapshot(), rootPublicID, storage.ParsePath(""), query.Query{test.query})
- i := 0
- for it.Next() {
- result := it.Get()
- if i >= len(test.expectedResults) {
- t.Errorf("query: %s; not enough expected results (%d); found %v", test.query, len(test.expectedResults), result)
- break
- }
- if got, want := result, test.expectedResults[i]; !reflect.DeepEqual(got, want) {
- t.Errorf("query: %s;\nGOT %v\nWANT %v", test.query, got, want)
- }
- i++
- }
- if it.Err() != nil {
- t.Errorf("query: %s, Error during eval: %v", test.query, it.Err())
- continue
- }
- if i != len(test.expectedResults) {
- t.Errorf("query: %s, Got %d results, expected %d", test.query, i, len(test.expectedResults))
- continue
- }
- // Ensure that all the goroutines are cleaned up.
- it.(*evalIterator).wait()
- }
-}
-
-func TestSelection(t *testing.T) {
- st := populate(t)
-
- type testCase struct {
- suffix string
- query string
- expectedResults []*store.QueryResult
- }
-
- tests := []testCase{
- {
- "", "'teams/cardinals' | {Name: Name}",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/cardinals", map[string]vdlutil.Any{"Name": "cardinals"}, nil},
- },
- },
- {
- "teams", "'cardinals' | {Name: Name}",
- []*store.QueryResult{
- &store.QueryResult{0, "cardinals", map[string]vdlutil.Any{"Name": "cardinals"}, nil},
- },
- },
- {
- "teams/cardinals", ". | {Name: Name}",
- []*store.QueryResult{
- &store.QueryResult{0, "", map[string]vdlutil.Any{"Name": "cardinals"}, nil},
- },
- },
- {
- "", "'teams/cardinals' | {Name: Name}",
- []*store.QueryResult{
- &store.QueryResult{0, "teams/cardinals", map[string]vdlutil.Any{"Name": "cardinals"}, nil},
- },
- },
- {
- "", "'teams/cardinals' | {myname: Name, myloc: Location}",
- []*store.QueryResult{
- &store.QueryResult{
- 0,
- "teams/cardinals",
- map[string]vdlutil.Any{
- "myname": "cardinals",
- "myloc": "CA",
- },
- nil,
- },
- },
- },
- {
- "", "'teams/cardinals' | {myname: Name, myloc: Location} | ? myname == 'cardinals'",
- []*store.QueryResult{
- &store.QueryResult{
- 0,
- "teams/cardinals",
- map[string]vdlutil.Any{
- "myname": "cardinals",
- "myloc": "CA",
- },
- nil,
- },
- },
- },
- {
- "", "'teams/cardinals' | {myname hidden: Name, myloc: Location} | ? myname == 'cardinals'",
- []*store.QueryResult{
- &store.QueryResult{
- 0,
- "teams/cardinals",
- map[string]vdlutil.Any{
- "myloc": "CA",
- },
- nil,
- },
- },
- },
- {
- "", "'teams/cardinals' | {self: ., myname: Name} | ? myname == 'cardinals'",
- []*store.QueryResult{
- &store.QueryResult{
- 0,
- "teams/cardinals",
- map[string]vdlutil.Any{
- "self": team{"cardinals", "CA"},
- "myname": "cardinals",
- },
- nil,
- },
- },
- },
- {
- "",
- "'teams/*' | type team | {" +
- " myname: Name," +
- " drinkers: players/* | type player | ?Age >=21 | sort()," +
- " nondrinkers: players/* | type player | ?Age < 21 | sort()" +
- "} | sort(myname)",
- []*store.QueryResult{
- &store.QueryResult{
- 0,
- "teams/bears",
- map[string]vdlutil.Any{
- "myname": "bears",
- "drinkers": store.NestedResult(1),
- "nondrinkers": store.NestedResult(2),
- },
- nil,
- },
- &store.QueryResult{
- 0,
- "teams/cardinals",
- map[string]vdlutil.Any{
- "myname": "cardinals",
- "drinkers": store.NestedResult(3),
- "nondrinkers": store.NestedResult(4),
- },
- nil,
- },
- &store.QueryResult{
- 4,
- "teams/cardinals/players/alfred",
- nil,
- player{"alfred", 17},
- },
- &store.QueryResult{
- 0,
- "teams/sharks",
- map[string]vdlutil.Any{
- "myname": "sharks",
- "drinkers": store.NestedResult(5),
- "nondrinkers": store.NestedResult(6),
- },
- nil,
- },
- &store.QueryResult{
- 5,
- "teams/sharks/players/betty",
- nil,
- player{"betty", 23},
- },
- &store.QueryResult{
- 5,
- "teams/sharks/players/robert",
- nil,
- player{"bob", 21},
- },
- &store.QueryResult{
- 6,
- "teams/sharks/players/alice",
- nil,
- player{"alice", 16},
- },
- },
- },
- // Test for selection of a nested name ('bio/hometown'). Only betty has this
- // nested name, so other players should get a nil value.
- {
- "", "'players/*' | type player | {Name: Name, hometown: 'bio/hometown'} | ? Name == 'alfred' || Name == 'betty' | sort()",
- []*store.QueryResult{
- &store.QueryResult{
- 0,
- "players/alfred",
- map[string]vdlutil.Any{
- "Name": "alfred",
- "hometown": nil,
- },
- nil,
- },
- &store.QueryResult{
- 0,
- "players/betty",
- map[string]vdlutil.Any{
- "Name": "betty",
- "hometown": "Tampa",
- },
- nil,
- },
- },
- },
- }
- for _, test := range tests {
- vlog.VI(1).Infof("Testing %s\n", test.query)
- it := Eval(st.Snapshot(), rootPublicID, storage.ParsePath(test.suffix), query.Query{test.query})
- i := 0
- for it.Next() {
- result := it.Get()
- if i >= len(test.expectedResults) {
- t.Errorf("query: %s, not enough expected results, need at least %d", i)
- it.Abort()
- break
- }
- if got, want := result, test.expectedResults[i]; !reflect.DeepEqual(got, want) {
- t.Errorf("query: %s;\nGOT %v\nWANT %v", test.query, got, want)
- }
- i++
- }
- if it.Err() != nil {
- t.Errorf("query: %s, Error during eval: %v", test.query, it.Err())
- continue
- }
- if i != len(test.expectedResults) {
- t.Errorf("query: %s, Got %d results, expected %d", test.query, i, len(test.expectedResults))
- continue
- }
- // Ensure that all the goroutines are cleaned up.
- it.(*evalIterator).wait()
- }
-}
-
-func TestError(t *testing.T) {
- st := populate(t)
-
- type testCase struct {
- query string
- expectedError string
- }
-
- tests := []testCase{
- {"teams!foo", "1:6: syntax error at token '!'"},
- {"teams | ?Name > 'foo'", "could not look up name 'Name' relative to 'teams': not found"},
- // This query results in an error because not all of the intermediate
- // results produced by "teams/*" are of type 'team'.
- // TODO(kash): We probably want an error message that says that you must
- // use a type filter.
- {"teams/* | ?Name > 'foo'", "could not look up name 'Name' relative to 'teams': not found"},
- {"'teams/cardinals' | {myname: Name, myloc: Location} | ? Name == 'foo'", "name 'Name' was not selected from 'teams/cardinals', found: [myloc, myname]"},
- {"teams/* | type team | sort(Name) | ?-Name > 'foo'", "cannot negate value of type string for teams/bears"},
- {"teams/* | type team | sample(2, 3)", "1:21: sample expects exactly one integer argument specifying the number of results to include in the sample"},
- {"teams/* | type team | sample(2.0)", "1:21: sample expects exactly one integer argument specifying the number of results to include in the sample"},
- {"teams/* | type team | sample(-1)", "1:21: sample expects exactly one integer argument specifying the number of results to include in the sample"},
-
- // TODO(kash): Selection with conflicting names.
- // TODO(kash): Trying to sort an aggregate. "... | avg | sort()"
- }
- for _, test := range tests {
- it := Eval(st.Snapshot(), rootPublicID, storage.PathName{}, query.Query{test.query})
- for it.Next() {
- }
- if it.Err() == nil {
- t.Errorf("query %s, No error, expected %s", test.query, test.expectedError)
- continue
- }
- if it.Err().Error() != test.expectedError {
- t.Errorf("query %s, got error \"%s\", expected \"%s\"", test.query, it.Err(), test.expectedError)
- continue
- }
- }
-}
-
-type mockSnapshot struct {
- it state.Iterator
-}
-
-func (m *mockSnapshot) NewIterator(pid security.PublicID, path storage.PathName, pathFilter state.PathFilter, filter state.IterFilter) state.Iterator {
- return m.it
-}
-
-func (m *mockSnapshot) Find(id storage.ID) *state.Cell {
- return nil
-}
-
-func (m *mockSnapshot) Get(pid security.PublicID, path storage.PathName) (*storage.Entry, error) {
- return nil, nil
-}
-
-type repeatForeverIterator struct {
- entry *storage.Entry
- snapshot state.Snapshot
-}
-
-func (it *repeatForeverIterator) IsValid() bool {
- return true
-}
-
-func (it *repeatForeverIterator) Get() *storage.Entry {
- return it.entry
-}
-
-func (it *repeatForeverIterator) Name() string {
- return fmt.Sprintf("teams/%v", it.entry.Stat.MTimeNS)
-}
-
-func (it *repeatForeverIterator) Next() {
- it.entry = &storage.Entry{
- storage.Stat{storage.ObjectKind, storage.NewID(), time.Now().UnixNano(), nil},
- it.entry.Value,
- }
-}
-
-func (it *repeatForeverIterator) Snapshot() state.Snapshot {
- return it.snapshot
-}
-
-func TestEvalAbort(t *testing.T) {
- type testCase struct {
- query string
- }
-
- tests := []testCase{
- testCase{"teams/*"},
- testCase{"teams/* | type team"},
- testCase{"teams/* | ?true"},
- }
-
- dummyTeam := team{"cardinals", "CA"}
- sn := &mockSnapshot{
- &repeatForeverIterator{
- entry: &storage.Entry{
- storage.Stat{storage.ObjectKind, storage.NewID(), time.Now().UnixNano(), nil},
- dummyTeam,
- },
- },
- }
- sn.it.(*repeatForeverIterator).snapshot = sn
-
- for _, test := range tests {
- // Test calling Abort immediately vs waiting until the channels are full.
- for i := 0; i < 2; i++ {
- it := Eval(sn, rootPublicID, storage.PathName{}, query.Query{test.query})
- if i == 0 {
- // Give the evaluators time to fill up the channels. Ensure that they
- // don't block forever on a full channel.
- for len(it.(*evalIterator).results[0].results) < maxChannelSize {
- time.Sleep(time.Millisecond)
- }
- }
- it.Abort()
- if it.Err() != nil {
- t.Errorf("query:%q Got non-nil error: %v", test.query, it.Err())
- }
- it.(*evalIterator).wait()
- }
- }
-}
-
-// TODO(kash): Add a test for access control.
diff --git a/services/store/memstore/query/glob.go b/services/store/memstore/query/glob.go
deleted file mode 100644
index 363580a..0000000
--- a/services/store/memstore/query/glob.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package query
-
-import (
- "veyron/lib/glob"
- "veyron/services/store/memstore/refs"
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-type globIterator struct {
- state.Iterator
- pathLen int
- glob *glob.Glob
-}
-
-// GlobStream represents a sequence of results from a glob call.
-type GlobStream interface {
- // IsValid returns true iff the iterator refers to an element.
- IsValid() bool
-
- // Return one possible name for this entry.
- Name() string
-
- // Next advances to the next element.
- Next()
-}
-
-// Glob returns an iterator that emits all values that match the given pattern.
-func Glob(sn state.Snapshot, clientID security.PublicID, path storage.PathName, pattern string) (GlobStream, error) {
- return GlobIterator(sn, clientID, path, pattern)
-}
-
-// GlobIterator returns an iterator that emits all values that match the given pattern.
-func GlobIterator(sn state.Snapshot, clientID security.PublicID, path storage.PathName, pattern string) (state.Iterator, error) {
- parsed, err := glob.Parse(pattern)
- if err != nil {
- return nil, err
- }
-
- g := &globIterator{
- pathLen: len(path),
- glob: parsed,
- }
- g.Iterator = sn.NewIterator(clientID, path, state.ListPaths, state.IterFilter(g.filter))
-
- return g, nil
-}
-
-func (g *globIterator) filter(parent *refs.FullPath, path *refs.Path) (bool, bool) {
- // We never get to a path unless we've first approved its parent.
- // We can therefore only check a suffix of the glob pattern.
- prefixLen := parent.Len() - g.pathLen
- matched, suffix := g.glob.PartialMatch(prefixLen, []string(path.Name()))
- return matched && suffix.Len() == 0, matched
-}
diff --git a/services/store/memstore/query/glob_test.go b/services/store/memstore/query/glob_test.go
deleted file mode 100644
index 6690d98..0000000
--- a/services/store/memstore/query/glob_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package query
-
-import (
- "path/filepath"
- "testing"
-
- "veyron/services/store/memstore/state"
-
- "veyron2/storage"
-)
-
-type globTest struct {
- path string
- pattern string
- expected []string
-}
-
-var globTests = []globTest{
- {"", "...", []string{
- "",
- "mvps",
- "mvps/Links/0",
- "mvps/Links/1",
- "players",
- "players/alfred",
- "players/alice",
- "players/betty",
- "players/bob",
- "teams",
- "teams/bears",
- "teams/cardinals",
- "teams/sharks",
- }},
- {"", "mvps/...", []string{
- "mvps",
- "mvps/Links/0",
- "mvps/Links/1",
- }},
- {"", "players/...", []string{
- "players",
- "players/alfred",
- "players/alice",
- "players/betty",
- "players/bob",
- }},
- // Note(mattr): This test case shows that Glob does not return
- // subfield nodes.
- {"", "mvps/*", []string{}},
- {"", "mvps/Links/*", []string{
- "mvps/Links/0",
- "mvps/Links/1",
- }},
- {"", "players/alfred", []string{
- "players/alfred",
- }},
- {"", "mvps/Links/0", []string{
- "mvps/Links/0",
- }},
- // An empty pattern returns the element referred to by the path.
- {"/mvps/Links/0", "", []string{
- "",
- }},
- {"mvps", "Links/*", []string{
- "Links/0",
- "Links/1",
- }},
- {"mvps/Links", "*", []string{
- "0",
- "1",
- }},
-}
-
-// Test that an iterator doesen't get stuck in cycles.
-func TestGlob(t *testing.T) {
- st := state.New(rootPublicID)
- sn := st.MutableSnapshot()
-
- type dir struct {
- Links []storage.ID
- }
-
- // Add some objects
- put(t, sn, "/", "")
- put(t, sn, "/teams", "")
- put(t, sn, "/teams/cardinals", "")
- put(t, sn, "/teams/sharks", "")
- put(t, sn, "/teams/bears", "")
- put(t, sn, "/players", "")
- alfredID := put(t, sn, "/players/alfred", "")
- put(t, sn, "/players/alice", "")
- bettyID := put(t, sn, "/players/betty", "")
- put(t, sn, "/players/bob", "")
-
- put(t, sn, "/mvps", &dir{[]storage.ID{alfredID, bettyID}})
-
- commit(t, st, sn)
-
- // Test that patterns starting with '/' are errors.
- _, err := Glob(st.Snapshot(), rootPublicID, storage.PathName{}, "/*")
- if err != filepath.ErrBadPattern {
- t.Errorf("Expected bad pattern error, got %v", err)
- }
-
- for _, gt := range globTests {
- path := storage.ParsePath(gt.path)
- it, err := Glob(st.Snapshot(), rootPublicID, path, gt.pattern)
- if err != nil {
- t.Errorf("Unexpected error on Glob: %s", err)
- }
- names := map[string]bool{}
- for ; it.IsValid(); it.Next() {
- names[it.Name()] = true
- }
- if len(names) != len(gt.expected) {
- t.Errorf("Wrong number of names for %s. got %v, wanted %v",
- gt.pattern, names, gt.expected)
- }
- for _, name := range gt.expected {
- if !names[name] {
- t.Errorf("Expected to find %v in %v", name, names)
- }
- }
- }
-}
diff --git a/services/store/memstore/query/util_test.go b/services/store/memstore/query/util_test.go
deleted file mode 100644
index e11b4db..0000000
--- a/services/store/memstore/query/util_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package query
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
-)
-
-type Dir struct{}
-
-func mkdir(t *testing.T, sn *state.MutableSnapshot, name string) (storage.ID, interface{}) {
- _, file, line, _ := runtime.Caller(1)
- dir := &Dir{}
- path := storage.ParsePath(name)
- stat, err := sn.Put(rootPublicID, path, dir)
- if err != nil || stat == nil {
- t.Errorf("%s(%d): mkdir %s: %s", file, line, name, err)
- }
- return stat.ID, dir
-}
-
-func get(t *testing.T, sn *state.MutableSnapshot, name string) interface{} {
- _, file, line, _ := runtime.Caller(1)
- path := storage.ParsePath(name)
- e, err := sn.Get(rootPublicID, path)
- if err != nil {
- t.Fatalf("%s(%d): can't get %s: %s", file, line, name, err)
- }
- return e.Value
-}
-
-func put(t *testing.T, sn *state.MutableSnapshot, name string, v interface{}) storage.ID {
- _, file, line, _ := runtime.Caller(1)
- path := storage.ParsePath(name)
- stat, err := sn.Put(rootPublicID, path, v)
- if err != nil {
- t.Errorf("%s(%d): can't put %s: %s", file, line, name, err)
- }
- if _, err := sn.Get(rootPublicID, path); err != nil {
- t.Errorf("%s(%d): can't get %s: %s", file, line, name, err)
- }
- if stat != nil {
- return stat.ID
- }
- return storage.ID{}
-}
-
-func remove(t *testing.T, sn *state.MutableSnapshot, name string) {
- path := storage.ParsePath(name)
- if err := sn.Remove(rootPublicID, path); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): can't remove %s: %s", file, line, name, err)
- }
-}
-
-func commit(t *testing.T, st *state.State, sn *state.MutableSnapshot) {
- if err := st.ApplyMutations(sn.Mutations()); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): commit failed: %s", file, line, err)
- }
-}
-
-func expectExists(t *testing.T, sn *state.MutableSnapshot, name string) {
- _, file, line, _ := runtime.Caller(1)
- path := storage.ParsePath(name)
- if _, err := sn.Get(rootPublicID, path); err != nil {
- t.Errorf("%s(%d): does not exist: %s", file, line, path)
- }
-}
-
-func expectNotExists(t *testing.T, sn *state.MutableSnapshot, name string) {
- path := storage.ParsePath(name)
- if _, err := sn.Get(rootPublicID, path); err == nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): should not exist: %s", file, line, name)
- }
-}
diff --git a/services/store/memstore/refs/builder.go b/services/store/memstore/refs/builder.go
deleted file mode 100644
index b7d591f..0000000
--- a/services/store/memstore/refs/builder.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package refs
-
-import (
- "reflect"
- "strconv"
-
- "veyron/runtimes/google/lib/functional"
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// Builder is used to collect all the references in a value, using
-// reflection for traversal.
-type Builder struct {
- refs Set
-}
-
-var (
- tyID = reflect.TypeOf(storage.ID{})
- tyString = reflect.TypeOf("")
-)
-
-// NewBuilder constructs a new refs builder.
-func NewBuilder() *Builder {
- return &Builder{refs: Empty}
-}
-
-// Get returns the references.
-func (b *Builder) Get() Set {
- return b.refs
-}
-
-// AddDir adds the references contained in the directory.
-func (b *Builder) AddDir(d functional.Set) {
- if d == nil {
- return
- }
- d.Iter(func(it interface{}) bool {
- b.refs = b.refs.Put(it.(*Ref))
- return true
- })
-}
-
-// AddDEntries adds the references contained in the DEntry list.
-func (b *Builder) AddDEntries(d []*raw.DEntry) {
- for _, de := range d {
- b.refs = b.refs.Put(&Ref{ID: de.ID, Path: NewSingletonPath(de.Name)})
- }
-}
-
-// AddValue adds the references contained in the value, using reflection
-// to traverse it.
-func (b *Builder) AddValue(v interface{}) {
- if v == nil {
- return
- }
- b.addRefs(nil, reflect.ValueOf(v))
-}
-
-func (b *Builder) addRefs(path *Path, v reflect.Value) {
- if !v.IsValid() {
- return
- }
- ty := v.Type()
- if ty == tyID {
- b.refs = b.refs.Put(&Ref{ID: v.Interface().(storage.ID), Path: path})
- return
- }
- switch ty.Kind() {
- case reflect.Map:
- b.addMapRefs(path, v)
- case reflect.Array, reflect.Slice:
- b.addSliceRefs(path, v)
- case reflect.Interface, reflect.Ptr:
- b.addRefs(path, v.Elem())
- case reflect.Struct:
- b.addStructRefs(path, v)
- }
-}
-
-func (b *Builder) addMapRefs(path *Path, v reflect.Value) {
- for _, key := range v.MapKeys() {
- if key.Type().Kind() == reflect.String {
- name := key.Convert(tyString).Interface().(string)
- b.addRefs(path.Append(name), v.MapIndex(key))
- }
- }
-}
-
-func (b *Builder) addSliceRefs(path *Path, v reflect.Value) {
- l := v.Len()
- for i := 0; i < l; i++ {
- b.addRefs(path.Append(strconv.Itoa(i)), v.Index(i))
- }
-}
-
-func (b *Builder) addStructRefs(path *Path, v reflect.Value) {
- l := v.NumField()
- ty := v.Type()
- for i := 0; i < l; i++ {
- name := ty.Field(i).Name
- b.addRefs(path.Append(name), v.Field(i))
- }
-}
diff --git a/services/store/memstore/refs/builder_test.go b/services/store/memstore/refs/builder_test.go
deleted file mode 100644
index 465ba93..0000000
--- a/services/store/memstore/refs/builder_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package refs_test
-
-import (
- "strconv"
- "testing"
-
- "veyron/services/store/memstore/refs"
-
- "veyron2/storage"
-)
-
-type A struct {
- B int
- C []int
- D map[string]int
-
- E storage.ID
- F []storage.ID
- G map[string]storage.ID
-}
-
-func TestID(t *testing.T) {
- id := storage.NewID()
-
- b := refs.NewBuilder()
- b.AddValue(id)
- r := b.Get()
- if r.Len() != 1 {
- t.Errorf("Expected 1 element, got %d", r.Len())
- }
- r.Iter(func(it interface{}) bool {
- r := it.(*refs.Ref)
- if r.ID != id {
- t.Errorf("Expected %s, got %s", id, r.ID)
- }
- if r.Path != nil {
- t.Errorf("Expected nil, got %v", r.Path)
- }
- return true
- })
-}
-
-func TestArray(t *testing.T) {
- a := []storage.ID{storage.NewID(), storage.NewID()}
-
- b := refs.NewBuilder()
- b.AddValue(a)
- r := b.Get()
- if r.Len() != 2 {
- t.Errorf("Expected 2 elements, got %d", r.Len())
- }
- r.Iter(func(it interface{}) bool {
- r := it.(*refs.Ref)
- found := false
- for i, id := range a {
- if r.ID == id {
- p := refs.NewSingletonPath(strconv.Itoa(i))
- if r.Path != p {
- t.Errorf("Expected %s, got %s", p, r.Path)
- }
- found = true
- break
- }
- }
- if !found {
- t.Errorf("Unexpected reference: %v", r)
- }
- return true
- })
-}
-
-func TestStruct(t *testing.T) {
- v := &A{
- B: 5,
- C: []int{6, 7},
- D: map[string]int{"a": 8, "b": 9},
- E: storage.NewID(),
- F: []storage.ID{storage.NewID()},
- G: map[string]storage.ID{"a": storage.NewID()},
- }
- paths := make(map[storage.ID]*refs.Path)
- paths[v.E] = refs.NewSingletonPath("E")
- paths[v.F[0]] = refs.NewSingletonPath("F").Append("0")
- paths[v.G["a"]] = refs.NewSingletonPath("G").Append("a")
-
- b := refs.NewBuilder()
- b.AddValue(v)
- r := b.Get()
- if r.Len() != 3 {
- t.Errorf("Expected 3 elements, got %d", r.Len())
- }
- r.Iter(func(it interface{}) bool {
- r := it.(*refs.Ref)
- p, ok := paths[r.ID]
- if !ok {
- t.Errorf("Unexpected id %s", r.ID)
- }
- if r.Path != p {
- t.Errorf("Expected %s, got %s", p, r.Path)
- }
- return true
- })
-}
diff --git a/services/store/memstore/refs/path.go b/services/store/memstore/refs/path.go
deleted file mode 100644
index 4a151ee..0000000
--- a/services/store/memstore/refs/path.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package refs
-
-import (
- "sync"
- "unsafe"
-
- "veyron2/storage"
-)
-
-// Path represents a path name, interconvertible with storage.PathName, but
-// hash-consed. To maximize sharing, the path is represented in reverse order.
-// Because of the hash-consing, this is most appropriate for common path fragments.
-// For full names, consider FullPath.
-type Path struct {
- hd string // head.
- tl *Path // tail.
-}
-
-type pathHashConsTable struct {
- sync.Mutex
- table map[Path]*Path
-}
-
-var (
- pathTable = &pathHashConsTable{table: make(map[Path]*Path)}
- nilPath *Path
-)
-
-// ComparePaths defines a total order over *Path values, based on pointer
-// comparison. Since paths are hash-consed, this is stable (but arbitrary) with
-// a process, but it is not persistent across processes.
-func ComparePaths(p1, p2 *Path) int {
- i1 := uintptr(unsafe.Pointer(p1))
- i2 := uintptr(unsafe.Pointer(p2))
- switch {
- case i1 < i2:
- return -1
- case i1 > i2:
- return 1
- default:
- return 0
- }
-}
-
-// NewSingletonPath creates a path that has just one component.
-func NewSingletonPath(name string) *Path {
- return nilPath.Append(name)
-}
-
-// String returns a printable representation of a path.
-func (p *Path) String() string {
- if p == nil {
- return ""
- }
- s := p.hd
- for p = p.tl; p != nil; p = p.tl {
- s = p.hd + "/" + s
- }
- return s
-}
-
-// Suffix prints the name corresponding to the last n elements
-// of the path.
-func (p *Path) Suffix(n int) string {
- if p == nil || n == 0 {
- return ""
- }
- s := p.hd
- for i, p := 1, p.tl; i < n && p != nil; i, p = i+1, p.tl {
- s = p.hd + "/" + s
- }
- return s
-}
-
-// Append adds a new string component to the end of a path.
-func (p *Path) Append(name string) *Path {
- pathTable.Lock()
- defer pathTable.Unlock()
- p1 := Path{hd: name, tl: p}
- if p2, ok := pathTable.table[p1]; ok {
- return p2
- }
- pathTable.table[p1] = &p1
- return &p1
-}
-
-// Name returns a storage.PathName corresponding to the path.
-func (p *Path) Name() storage.PathName {
- i := p.Len()
- pl := make(storage.PathName, i)
- for ; p != nil; p = p.tl {
- i--
- pl[i] = p.hd
- }
- return pl
-}
-
-// Len returns the number of components in the path.
-func (p *Path) Len() int {
- i := 0
- for ; p != nil; p = p.tl {
- i++
- }
- return i
-}
-
-// Split splits a path into a directory and file part.
-func (p *Path) Split() (*Path, string) {
- return p.tl, p.hd
-}
-
-// FullPath represents a path name, interconvertible with storage.PathName.
-// To maximize sharing, the path is represented in reverse order.
-type FullPath Path
-
-// NewSingletonFullPath creates a path that has just one component.
-func NewSingletonFullPath(name string) *FullPath {
- return &FullPath{hd: name}
-}
-
-// NewFullPathFromName creates a FullPath that represents the same name as path.
-func NewFullPathFromName(path storage.PathName) *FullPath {
- var fp *FullPath
- for _, el := range path {
- fp = fp.Append(el)
- }
- return fp
-}
-
-// String returns a printable representation of a path.
-func (fp *FullPath) String() string {
- return (*Path)(fp).String()
-}
-
-// Suffix prints the name corresponding to the last n elements
-// of the full path.
-func (fp *FullPath) Suffix(n int) string {
- return (*Path)(fp).Suffix(n)
-}
-
-// Append adds a new string component to the end of a path.
-func (fp *FullPath) Append(name string) *FullPath {
- return &FullPath{hd: name, tl: (*Path)(fp)}
-}
-
-// Name returns a storage.PathName corresponding to the path.
-func (fp *FullPath) Name() storage.PathName {
- return (*Path)(fp).Name()
-}
-
-// Len returns the number of components in the path.
-func (fp *FullPath) Len() int {
- return (*Path)(fp).Len()
-}
-
-// Split splits a path into a directory and file part.
-func (fp *FullPath) Split() (*FullPath, string) {
- return (*FullPath)(fp.tl), fp.hd
-}
-
-// AppendPath returns a FullPath that represents the concatenation of fp and p.
-func (fp *FullPath) AppendPath(p *Path) *FullPath {
- if p == nil {
- return fp
- }
- return fp.AppendPath(p.tl).Append(p.hd)
-}
diff --git a/services/store/memstore/refs/path_test.go b/services/store/memstore/refs/path_test.go
deleted file mode 100644
index 2a50c7f..0000000
--- a/services/store/memstore/refs/path_test.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package refs
-
-import (
- "testing"
-
- "veyron2/storage"
-)
-
-func storePathEqual(p1, p2 storage.PathName) bool {
- if len(p1) != len(p2) {
- return false
- }
- for i, s := range p1 {
- if p2[i] != s {
- return false
- }
- }
- return true
-}
-
-func TestPath(t *testing.T) {
- p1 := NewSingletonPath("a")
- p2 := NewSingletonPath("a")
- if p1 != p2 {
- t.Errorf("Paths should be identical")
- }
- if !storePathEqual(p2.Name(), storage.PathName{"a"}) {
- t.Errorf("Got %s, expected 'a'", p2.Name())
- }
- if p1.Len() != 1 {
- t.Errorf("Got length %d, expected 1", p1.Len())
- }
-
- p1 = p1.Append("b")
- p2 = p2.Append("b")
- if p1 != p2 {
- t.Errorf("Paths should be identical")
- }
- if !storePathEqual(p2.Name(), storage.PathName{"a", "b"}) {
- t.Errorf("Got %s, expected 'a/b'", p2.Name())
- }
-
- p1 = p1.Append("c")
- p2 = p2.Append("c")
- if p1 != p2 {
- t.Errorf("Paths should be identical")
- }
- if !storePathEqual(p2.Name(), storage.PathName{"a", "b", "c"}) {
- t.Errorf("Got %s, expected 'a/b/c'", p2.Name())
- }
-
- p1 = p1.Append("d")
- p2 = p2.Append("e")
- if p1 == p2 {
- t.Errorf("Paths should not be identical, %s, %s", p1, p2)
- }
- if !storePathEqual(p1.Name(), storage.PathName{"a", "b", "c", "d"}) {
- t.Errorf("Got %s, expected 'a/b/c.d'", p1.Name())
- }
- if !storePathEqual(p2.Name(), storage.PathName{"a", "b", "c", "e"}) {
- t.Errorf("Got %s, expected 'a/b/c/e'", p2.Name())
- }
-}
-
-func TestPathSuffix(t *testing.T) {
- p1 := NewSingletonPath("a")
- if s := p1.Suffix(0); s != "" {
- t.Errorf("Got '%s' expected ''", s)
- }
- if s := p1.Suffix(1); s != "a" {
- t.Errorf("Got '%s' expected 'a'", s)
- }
- if s := p1.Suffix(100); s != "a" {
- t.Errorf("Got '%s' expected 'a'", s)
- }
-
- p2 := p1.Append("b").Append("c").Append("d")
- if s := p2.Suffix(0); s != "" {
- t.Errorf("Got '%s' expected ''", s)
- }
- if s := p2.Suffix(1); s != "d" {
- t.Errorf("Got '%s' expected 'd'", s)
- }
- if s := p2.Suffix(2); s != "c/d" {
- t.Errorf("Got '%s' expected 'c/d'", s)
- }
- if s := p2.Suffix(3); s != "b/c/d" {
- t.Errorf("Got '%s' expected 'b/c/d'", s)
- }
- if s := p2.Suffix(4); s != "a/b/c/d" {
- t.Errorf("Got '%s' expected 'a/b/c/d'", s)
- }
- if s := p2.Suffix(100); s != "a/b/c/d" {
- t.Errorf("Got '%s' expected 'a/b/c/d'", s)
- }
-}
-
-func TestFullPath(t *testing.T) {
- p := NewSingletonFullPath("a")
- if s := p.String(); s != "a" {
- t.Errorf("Got %s expected 'a'", s)
- }
-
- p = p.Append("b").Append("c")
- if s := p.String(); s != "a/b/c" {
- t.Errorf("Got %s expected 'a/b/c'", s)
- }
-
- suffix := NewSingletonPath("d").Append("e")
- p = p.AppendPath(suffix)
- if s := p.String(); s != "a/b/c/d/e" {
- t.Errorf("Got %s expected 'a/b/c/d/e'", s)
- }
-
- p = NewFullPathFromName(storage.PathName{"a", "b", "c"})
- if s := p.String(); s != "a/b/c" {
- t.Errorf("Got %s expected 'a/b/c'", s)
- }
-}
-
-func TestFullPathSuffix(t *testing.T) {
- p1 := NewSingletonFullPath("a")
- if s := p1.Suffix(0); s != "" {
- t.Errorf("Got '%s' expected ''", s)
- }
- if s := p1.Suffix(1); s != "a" {
- t.Errorf("Got '%s' expected 'a'", s)
- }
- if s := p1.Suffix(100); s != "a" {
- t.Errorf("Got '%s' expected 'a'", s)
- }
-
- p2 := p1.Append("b").Append("c").Append("d")
- if s := p2.Suffix(0); s != "" {
- t.Errorf("Got '%s' expected ''", s)
- }
- if s := p2.Suffix(1); s != "d" {
- t.Errorf("Got '%s' expected 'd'", s)
- }
- if s := p2.Suffix(2); s != "c/d" {
- t.Errorf("Got '%s' expected 'c/d'", s)
- }
- if s := p2.Suffix(3); s != "b/c/d" {
- t.Errorf("Got '%s' expected 'b/c/d'", s)
- }
- if s := p2.Suffix(4); s != "a/b/c/d" {
- t.Errorf("Got '%s' expected 'a/b/c/d'", s)
- }
- if s := p2.Suffix(100); s != "a/b/c/d" {
- t.Errorf("Got '%s' expected 'a/b/c/d'", s)
- }
-}
diff --git a/services/store/memstore/refs/refs.go b/services/store/memstore/refs/refs.go
deleted file mode 100644
index 9823379..0000000
--- a/services/store/memstore/refs/refs.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Package refs represents references from one value to another.
-package refs
-
-import (
- "veyron/runtimes/google/lib/functional"
- "veyron/runtimes/google/lib/functional/rb"
- "veyron/services/store/raw"
-
- "veyron2/storage"
-)
-
-// Set is a set of *Ref values.
-type Set functional.Set // *Ref
-
-// Ref represents a single reference in a store value. It includes the
-// storage.ID, and the path to the reference.
-type Ref struct {
- ID storage.ID
- Path *Path
-}
-
-// Dir represents a directory, which is a set of *Ref, sorted by path.
-type Dir functional.Set // *Ref
-
-var (
- Empty Set = rb.NewSet(compareRefs)
- EmptyDir Dir = rb.NewSet(compareRefsByPath)
-)
-
-// *ref values are sorted lexicoigraphically by (id, path).
-func compareRefs(it1, it2 interface{}) bool {
- r1 := it1.(*Ref)
- r2 := it2.(*Ref)
- cmp := storage.CompareIDs(r1.ID, r2.ID)
- return cmp < 0 || (cmp == 0 && ComparePaths(r1.Path, r2.Path) < 0)
-}
-
-// compareRefsByPath compares refs using their Path.
-func compareRefsByPath(a, b interface{}) bool {
- return ComparePaths(a.(*Ref).Path, b.(*Ref).Path) < 0
-}
-
-// FlattenDir flattens the directory map into an association list.
-func FlattenDir(d Dir) []*raw.DEntry {
- l := make([]*raw.DEntry, d.Len())
- i := 0
- d.Iter(func(v interface{}) bool {
- r := v.(*Ref)
- l[i] = &raw.DEntry{ID: r.ID, Name: r.Path.hd}
- i++
- return true
- })
- return l
-}
-
-// BuildDir builds a Dir from the association list.
-func BuildDir(l []*raw.DEntry) Dir {
- d := EmptyDir
- for _, de := range l {
- d = d.Put(&Ref{ID: de.ID, Path: NewSingletonPath(de.Name)})
- }
- return d
-}
diff --git a/services/store/memstore/state/cell.go b/services/store/memstore/state/cell.go
deleted file mode 100644
index 0422713..0000000
--- a/services/store/memstore/state/cell.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package state
-
-import (
- "bytes"
-
- "veyron/services/store/memstore/refs"
- "veyron/services/store/raw"
-
- // TODO(cnicolaou): mv lib/functional into veyron somewhere.
- "veyron/runtimes/google/lib/functional"
- "veyron/runtimes/google/lib/functional/rb"
- "veyron2/storage"
-)
-
-// cell represents an entry in the store. It is reference-counted, and it
-// contains the actual value.
-type Cell struct {
- // Unique ID for the cell.
- ID storage.ID
-
- // Value
- Value interface{}
-
- // Implicit directory.
- Dir refs.Set
-
- // refs includes the references in the value and the dir.
- //
- // TODO(jyh): This is simple, but it would be more space efficient to
- // include only the refs in the value, or drop this field entirely.
- refs refs.Set
-
- // inRefs contains all the incoming references -- that is, references
- // from other values to this one.
- inRefs refs.Set
-
- // TODO(jyh): The following fields can be packed into a single word.
- refcount uint
- color color
- buffered bool
-
- // version is the version number.
- Version raw.Version
-
- // TODO(jyh): Add stat info and attributes.
-}
-
-// cellSet is a functional map from storage.ID to *cell.
-type cellSet functional.Set
-
-var (
- emptyIDTable cellSet = rb.NewSet(compareCellsByID)
-)
-
-// compareCellsByID compares the two cells' IDs.
-func compareCellsByID(a, b interface{}) bool {
- return bytes.Compare(a.(*Cell).ID[:], b.(*Cell).ID[:]) < 0
-}
-
-// newSubfieldEntry returns a storage.Entry value, ignoring the Stat info.
-func newSubfieldEntry(v interface{}) *storage.Entry {
- return &storage.Entry{Value: deepcopy(v)}
-}
-
-// setRefs sets the cell's refs field.
-func (c *Cell) setRefs() {
- r := refs.NewBuilder()
- r.AddValue(c.Value)
- r.AddDir(c.Dir)
- c.refs = r.Get()
-}
-
-// get the *storage.Entry for a cell.
-func (c *Cell) GetEntry() *storage.Entry {
- entry := newSubfieldEntry(c.Value)
- c.fillStat(&entry.Stat)
- return entry
-}
-
-// get the *storage.Stat for a cell.
-func (c *Cell) getStat() *storage.Stat {
- var stat storage.Stat
- c.fillStat(&stat)
- return &stat
-}
-
-// fillStat fills the storage.Stat struct with the the cell's metadata. Assumes
-// stat is not nil.
-func (c *Cell) fillStat(stat *storage.Stat) {
- stat.ID = c.ID
- // TODO(jyh): Fill in the missing fields
-}
diff --git a/services/store/memstore/state/doc.go b/services/store/memstore/state/doc.go
deleted file mode 100644
index 85d89bf..0000000
--- a/services/store/memstore/state/doc.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Package state implements an in-memory version of the store state.
-// There are three main types here.
-//
-// Snapshot represents an isolated read-only copy of the state.
-// It supports only iteration.
-//
-// NewIterator() : returns an iterator to traverse the state.
-//
-// MutableSnapshot is an isolated read-write copy of the state. It supports the
-// standard dictionary operations.
-//
-// Read(path) : fetches a value from the snapshot.
-// Put(path, value) : stores a value in the snapshot.
-// Remove(path) : remove a value from the snapshot.
-// Mutations() : returns the set of mutations to the snapshot.
-//
-// State is the actual shared state, with the following methods.
-//
-// Snapshot() : returns a read-only snapshot of the state.
-// MutableSnapshot() : returns a read-write snapshot of the state.
-// ApplyMutations(mutations) : applies the mutations to the state.
-//
-// ApplyMutations can fail due to concurrency where the state was modified
-// (through another call to ApplyMutations) between the time that a
-// MutableSnapshot created to the time that it is applied.
-//
-// The sequence for performing an atomic change is to copy the snapshot,
-// make the changes on the copy, the apply the changes to the original state
-// atomically.
-//
-// // Swap /a/b/c and /d/e atomically.
-// st := New(...)
-// ...
-// sn := st.MutableSnapshot()
-// x, err := sn.Get("/a/b/c")
-// y, err := sn.Get("/d/e")
-// err = sn.Put("/d/e", x)
-// err = sn.Put("/a/b/c", y)
-// err = st.ApplyMutations(sn.Mutations())
-package state
diff --git a/services/store/memstore/state/gc.go b/services/store/memstore/state/gc.go
deleted file mode 100644
index 2642431..0000000
--- a/services/store/memstore/state/gc.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package state
-
-// The store values include references based on pub.ID. We use a garbage
-// collection mechanism to remove values when they are no longer referenced.
-// There are two general techniques for garbage collection: 1) infrequent
-// tracing collection (like mark/sweep, etc.), and 2) reference counting.
-// Reference counting is more responsive but it is harder to deal with cycles.
-//
-// We provide an "instant GC" semantics, meaning that once a value has become
-// garbage, it is no longer returned as the result of any subsequent query.
-//
-// To satisfy the responsiveness requirement, we use a garbage collection scheme
-// based on reference counting. Cells are collected as soon as they become
-// garbage. This happens whenever the reference count becomes zero, but it can
-// also happen when the refcount is nonzero, but the cell is part of a garbage
-// cycle.
-//
-// The scheme here is based on the following.
-//
-// Concurrent Cycle Collection in Reference Counted Systems
-// David F. Bacon and V.T. Rajan
-// IBM T.J. Watson Research Center
-// Proceedings of the European Conference on Object-Oriented Programming
-// June 2001
-// http://researcher.watson.ibm.com/researcher/files/us-bacon/Bacon01Concurrent.pdf
-//
-// This implementation is based on the "synchronous" algorithm, not the
-// concurrent one.
-//
-// The central idea is that garbage collection is invoked when a refcount is
-// decremented. If the refcount becomes zero, then the cell is certainly
-// garbage, and it can be reclaimed. If the refcount is nonzero, then the cell
-// might still be garbage. The cell is added to a "gcRoots" table for later
-// analysis.
-//
-// When GC is forced (due to a new query, for example), the graph reachable from
-// the gcRoots is traversed to see if any of the cells are involved in garbage
-// cycles. The traversal decrements refcounts based on internal references
-// (references reachable through gcRoots). If any the the refcounts now become
-// zero, that means all references are internal and the cell is actually
-// garbage.
-//
-// That means the worst case GC cost is still linear in the number of elements
-// in the store. However, it is observed that in a large number of cases in
-// practice, refcounts are either 0 or 1, so the gcRoots mechanism is not
-// invoked.
-//
-// In our case, some directories will have multiple links, coming from
-// replication group directories for example. Whenver one of the replication
-// group links is removed, we'll have to invoke the garbage collector to
-// traverse the subgraph that was replicated. This is similar to the cost that
-// we would pay if the reachable subgraph became garbage, where we have to
-// delete all of the cells in the subgraph.
-//
-// To improve performance, we delay the traversal, using the gcRoots set, until
-// the GC is forced.
-import (
- "veyron/services/store/memstore/refs"
-
- "veyron2/storage"
-)
-
-// color is the garbage collection color of a cell.
-type color uint
-
-const (
- black color = iota // In use or free.
- gray // Possible member of cycle.
- white // Member of a garbage cycle.
- purple // Possible root of cycle.
-)
-
-// ref increments the reference count. The cell can't be garbage, so color it
-// black.
-func (sn *MutableSnapshot) ref(id storage.ID) {
- c := sn.deref(id)
- c.color = black
- c.refcount++
-}
-
-// unref decrements the reference count. If the count reaches 0, the cell is
-// garbage. Otherwise the cell might be part of a cycle, so add it to the
-// gcRoots.
-func (sn *MutableSnapshot) unref(id storage.ID) {
- c := sn.deref(id)
- if c.refcount == 0 {
- panic("unref(): refcount is already zero")
- }
- c.refcount--
- if c.refcount == 0 {
- sn.release(c)
- } else {
- sn.possibleRoot(c)
- }
-}
-
-// release deletes a cell once the reference count has reached zero. Delete the
-// cell and all of its references.
-func (sn *MutableSnapshot) release(c *Cell) {
- c.color = black
- c.refs.Iter(func(it interface{}) bool {
- sn.unref(it.(*refs.Ref).ID)
- return true
- })
- if !c.buffered {
- sn.delete(c)
- }
-}
-
-// possibleRoot marks the cell as the possible root of a cycle. The cell is
-// colored purple and added to the gcRoots.
-func (sn *MutableSnapshot) possibleRoot(c *Cell) {
- if c.color != purple {
- c.color = purple
- if !c.buffered {
- c.buffered = true
- sn.gcRoots[c.ID] = struct{}{}
- }
- }
-}
-
-// GC is called to perform a garbage collection.
-//
-// - markRoots traverses all of the cells reachable from the roots,
-// decrementing reference counts due to cycles.
-//
-// - scanRoots traverses the cells reachable from the roots, coloring white if
-// the refcount is zero, or coloring black otherwise. Note that a white
-// cell might be restored to black later if is reachable through some other
-// live path.
-//
-// - collectRoots removes all remaining white nodes, which are cyclic garbage.
-func (sn *MutableSnapshot) gc() {
- sn.markRoots()
- sn.scanRoots()
- sn.collectRoots()
-}
-
-// markRoots traverses the cells reachable from the roots, marking them gray.
-// If another root is encountered during a traversal, it is removed from the
-// gcRoots.
-func (sn *MutableSnapshot) markRoots() {
- for id, _ := range sn.gcRoots {
- c := sn.deref(id)
- if c.color == purple {
- sn.markGray(c)
- } else {
- c.buffered = false
- delete(sn.gcRoots, id)
- if c.color == black && c.refcount == 0 {
- sn.delete(c)
- }
- }
- }
-}
-
-// markGray colors a cell gray, the decrements the refcounts of the children,
-// and marks them gray recursively. The result is that counts for "internal"
-// references (references reachable from the roots) are removed. Then, if the
-// reference count of a root reaches zero, it must be reachable only from
-// internal references, so it is part of a garbage cycle, and it can be deleted.
-func (sn *MutableSnapshot) markGray(c *Cell) {
- if c.color == gray {
- return
- }
- c.color = gray
- c.refs.Iter(func(it interface{}) bool {
- id := it.(*refs.Ref).ID
- nc := sn.deref(id)
- nc.refcount--
- sn.markGray(nc)
- return true
- })
-}
-
-// scanRoots traverses the graph from gcRoots. If a cell has a non-zero
-// refcount, that means it is reachable from an external reference, so it is
-// live. In that case, the cell and reachable children are live. Otherwise,
-// the refcount is zero, and the cell is colored white to indicate that it may
-// be garbage.
-func (sn *MutableSnapshot) scanRoots() {
- for id, _ := range sn.gcRoots {
- sn.scan(sn.deref(id))
- }
-}
-
-// scan gray nodes, coloring them black if the refcount is nonzero, or white
-// otherwise.
-func (sn *MutableSnapshot) scan(c *Cell) {
- if c.color != gray {
- return
- }
- if c.refcount > 0 {
- sn.scanBlack(c)
- } else {
- c.color = white
- c.refs.Iter(func(it interface{}) bool {
- id := it.(*refs.Ref).ID
- sn.scan(sn.deref(id))
- return true
- })
- }
-}
-
-// scanBlack colors a cell and its subgraph black, restoring refcounts.
-func (sn *MutableSnapshot) scanBlack(c *Cell) {
- c.color = black
- c.refs.Iter(func(it interface{}) bool {
- id := it.(*refs.Ref).ID
- nc := sn.deref(id)
- nc.refcount++
- if nc.color != black {
- sn.scanBlack(nc)
- }
- return true
- })
-}
-
-// collectRoots frees all the cells that are colored white.
-func (sn *MutableSnapshot) collectRoots() {
- for id, _ := range sn.gcRoots {
- c := sn.deref(id)
- c.buffered = false
- sn.collectWhite(c)
- }
- sn.gcRoots = make(map[storage.ID]struct{})
-}
-
-// collectWhite frees cells that are colored white.
-func (sn *MutableSnapshot) collectWhite(c *Cell) {
- if c.color != white || c.buffered {
- return
- }
- c.color = black
- c.refs.Iter(func(it interface{}) bool {
- id := it.(*refs.Ref).ID
- sn.collectWhite(sn.deref(id))
- return true
- })
- sn.delete(c)
-}
diff --git a/services/store/memstore/state/iterator.go b/services/store/memstore/state/iterator.go
deleted file mode 100644
index e7c5275..0000000
--- a/services/store/memstore/state/iterator.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package state
-
-import (
- "fmt"
-
- "veyron/services/store/memstore/refs"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-// Iterator is used to iterate through the descendents of a value. The order of
-// iteration is breadth-first. Each descendent is visited at most once.
-type Iterator interface {
- // IsValid returns true iff the iterator refers to an element.
- IsValid() bool
-
- // Get returns the current value.
- Get() *storage.Entry
-
- // Return one possible name for this entry.
- Name() string
-
- // Next advances to the next element.
- Next()
-
- // Snapshot returns the iterator's snapshot.
- Snapshot() Snapshot
-}
-
-// iterator implements the Iterator interface.
-//
-// TODO(jyh): This is a simple, naive implementation. We need to implement
-// security, perform type-directed iteration, and we may need pruning (similar
-// to the -prune option to the "find" command).
-type iterator struct {
- snapshot Snapshot
-
- // Set of IDs already visited on this path.
- visited map[storage.ID]struct{}
-
- // Stack of actions to consider next. Actions are one of:
- // - visit a node accessible from the current path (the node may already
- // have been visited on the current path).
- // - unvisit a node (backtrack the current path).
- next []next
-
- // Depth of starting path.
- initialDepth int
-
- // Current value.
- entry *storage.Entry
- path *refs.FullPath
-
- pathFilter PathFilter
-
- filter IterFilter
-}
-
-type next struct {
- parent *refs.FullPath
- path *refs.Path
- id storage.ID
- action action
-}
-
-type action int
-
-const (
- visit = action(iota)
- unvisit
-)
-
-var (
- _ Iterator = (*iterator)(nil)
- _ Iterator = (*errorIterator)(nil)
-)
-
-// A PathFilter automatically limits the traversal of certain paths,
-type PathFilter int
-
-const (
- // ListPaths permits any path that does not visit the same object twice.
- ListPaths = PathFilter(iota)
- // ListObjects permits any path that does not revisit any object on a
- // previously traversed path 'Q', even if Q did not satisfy it.filter.
- ListObjects
-)
-
-// An IterFilter examines entries as they are considered by the
-// iterator and allows it to give two boolean inputs to the process:
-// ret: True if the iterator should return this value in its iteration.
-// expand: True if the iterator should consider children of this value.
-type IterFilter func(*refs.FullPath, *refs.Path) (ret bool, expand bool)
-
-// Recursive filter is an IterFilter that causes all decendents to be
-// returned during iteration. This is the default behavior.
-func RecursiveFilter(*refs.FullPath, *refs.Path) (bool, bool) {
- return true, true
-}
-
-// ImmediateFilter is a filter that causes only the specified path and
-// immediate decendents to be returned from the iterator.
-func ImmediateFilter(_ *refs.FullPath, path *refs.Path) (bool, bool) {
- return true, path == nil
-}
-
-// NewIterator returns an Iterator that starts with the value at <path>.
-// pathFilter is used to automatically limit traversal of certain paths.
-// If filter is given, it is used to limit traversal beneath certain paths and
-// limit the results of the iteration. If filter is nil, all decendents of the
-// specified path are returned.
-func (sn *snapshot) NewIterator(pid security.PublicID, path storage.PathName,
- pathFilter PathFilter, filter IterFilter) Iterator {
-
- cell, suffix, v := sn.resolveCell(path, nil)
- if cell == nil {
- return &errorIterator{snapshot: sn}
- }
-
- if filter == nil {
- filter = RecursiveFilter
- }
-
- it := &iterator{
- snapshot: sn,
- visited: make(map[storage.ID]struct{}),
- initialDepth: len(path),
- path: refs.NewFullPathFromName(path),
- pathFilter: pathFilter,
- filter: filter,
- }
-
- ret, expand := it.filter(it.path, nil)
-
- var set refs.Set
- if len(suffix) != 0 {
- // We're started from a field within cell. Calculate the refs reachable
- // from the value. Allow self references to cell.id.
- it.entry = newSubfieldEntry(v)
- r := refs.NewBuilder()
- r.AddValue(v)
- set = r.Get()
- } else {
- it.entry = cell.GetEntry()
- it.visited[cell.ID] = struct{}{}
- it.pushUnvisit(nil, cell.ID)
- set = cell.refs
- }
-
- if expand {
- it.pushVisitAll(it.path, set)
- }
- if !ret {
- it.Next()
- }
-
- return it
-}
-
-func (it *iterator) pushUnvisit(path *refs.Path, id storage.ID) {
- switch it.pathFilter {
- case ListPaths:
- it.next = append(it.next, next{nil, path, id, unvisit})
- case ListObjects:
- // Do not unvisit the object, as it is on a path already seen by
- // it.filter.
- default:
- panic("unknown PathFilter")
- }
-}
-
-func (it *iterator) pushVisitAll(parentPath *refs.FullPath, set refs.Set) {
-
- set.Iter(func(x interface{}) bool {
- ref := x.(*refs.Ref)
- it.next = append(it.next, next{parentPath, ref.Path, ref.ID, visit})
- return true
- })
-}
-
-// IsValid returns true iff the iterator refers to an element.
-func (it *iterator) IsValid() bool {
- return it.entry != nil
-}
-
-// Name returns a name for the curent value relative to the initial name.
-func (it *iterator) Name() string {
- return it.path.Suffix(it.path.Len() - it.initialDepth)
-}
-
-// Get returns the current value.
-func (it *iterator) Get() *storage.Entry {
- return it.entry
-}
-
-// Next advances to the next element.
-func (it *iterator) Next() {
- var n next
- var fullPath *refs.FullPath
- var c *Cell
- for {
- topIndex := len(it.next) - 1
- if topIndex < 0 {
- it.entry, it.path = nil, nil
- return
- }
- n, it.next = it.next[topIndex], it.next[:topIndex]
-
- if n.action == unvisit {
- delete(it.visited, n.id)
- continue
- }
-
- if _, ok := it.visited[n.id]; ok {
- continue
- }
-
- // Mark as visited.
- it.visited[n.id] = struct{}{}
- it.pushUnvisit(n.path, n.id)
-
- // Fetch the cell.
- c = it.snapshot.Find(n.id)
- if c == nil {
- // The table is inconsistent.
- panic(fmt.Sprintf("Dangling reference: %s", n.id))
- }
-
- // Check the filter
- ret, expand := it.filter(n.parent, n.path)
- fullPath = n.parent.AppendPath(n.path)
- if expand {
- it.pushVisitAll(fullPath, c.refs)
- }
- if ret {
- // Found a value.
- break
- }
- }
-
- it.entry, it.path = c.GetEntry(), fullPath
-}
-
-func (it *iterator) Snapshot() Snapshot {
- return it.snapshot
-}
-
-// errorIterator is the iterator that does nothing, has no values.
-type errorIterator struct {
- snapshot Snapshot
-}
-
-func (it *errorIterator) IsValid() bool {
- return false
-}
-
-func (it *errorIterator) Get() *storage.Entry {
- return nil
-}
-
-func (it *errorIterator) Name() string {
- return ""
-}
-
-func (it *errorIterator) Next() {}
-
-func (it *errorIterator) Snapshot() Snapshot {
- return it.snapshot
-}
diff --git a/services/store/memstore/state/iterator_test.go b/services/store/memstore/state/iterator_test.go
deleted file mode 100644
index ffc4bfc..0000000
--- a/services/store/memstore/state/iterator_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package state_test
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore/refs"
- "veyron/services/store/memstore/state"
- "veyron2/security"
- "veyron2/storage"
-)
-
-// check that the iterator produces a set of names.
-func checkAcyclicIterator(t *testing.T, sn *state.MutableSnapshot, id security.PublicID, filter state.IterFilter, names []string) {
- _, file, line, _ := runtime.Caller(1)
-
- // Construct an index of names.
- index := map[string]bool{}
- for _, name := range names {
- index[name] = false
- }
-
- // Compute the found names.
- for it := sn.NewIterator(id, storage.ParsePath("/"), state.ListPaths, filter); it.IsValid(); it.Next() {
- name := it.Name()
- if found, ok := index[name]; ok {
- if found {
- t.Errorf("%s(%d): duplicate name %q", file, line, name)
- }
- index[name] = true
- } else {
- t.Errorf("%s(%d): unexpected name %q", file, line, name)
- }
- }
-
- // Print the not found names.
- for name, found := range index {
- if !found {
- t.Errorf("%s(%d): expected: %v", file, line, name)
- }
- }
-}
-
-// check that the iterator produces a set of names. Since entries in the store
-// can have multiple names, the names are provided using a set of equivalence
-// classes. The requirement is that the iterator produces exactly one name from
-// each equivalence class. Order doesn't matter.
-func checkUniqueObjectsIterator(t *testing.T, sn *state.MutableSnapshot, id security.PublicID, filter state.IterFilter, names [][]string) {
- _, file, line, _ := runtime.Caller(1)
-
- // Construct an index of name to equivalence class.
- index := map[string]int{}
- for i, equiv := range names {
- for _, name := range equiv {
- index[name] = i
- }
- }
-
- // Compute the found set of equivalence classes.
- found := map[int]bool{}
- for it := sn.NewIterator(id, storage.ParsePath("/"), state.ListObjects, filter); it.IsValid(); it.Next() {
- name := it.Name()
- if i, ok := index[name]; ok {
- if _, ok := found[i]; ok {
- t.Errorf("%s(%d): duplicate name %q", file, line, name)
- }
- found[i] = true
- } else {
- t.Errorf("%s(%d): unexpected name %q", file, line, name)
- }
- }
-
- // Print the not found equivalence classes.
- for i, equiv := range names {
- if !found[i] {
- t.Errorf("%s(%d): expected one of: %v", file, line, equiv)
- }
- }
-}
-
-// Tests that an iterator returns all non-cyclic paths that reach an object.
-func TestDuplicatePaths(t *testing.T) {
- st := state.New(rootPublicID)
- sn := st.MutableSnapshot()
-
- // Add some objects
- put(t, sn, rootPublicID, "/", "")
- put(t, sn, rootPublicID, "/teams", "")
- put(t, sn, rootPublicID, "/teams/cardinals", "")
- put(t, sn, rootPublicID, "/players", "")
- mattID := put(t, sn, rootPublicID, "/players/matt", "")
-
- // Add some hard links
- put(t, sn, rootPublicID, "/teams/cardinals/mvp", mattID)
-
- checkAcyclicIterator(t, sn, rootPublicID, nil, []string{
- "",
- "teams",
- "players",
- "teams/cardinals",
- "players/matt",
- "teams/cardinals/mvp",
- })
- checkUniqueObjectsIterator(t, sn, rootPublicID, nil, [][]string{
- {""},
- {"teams"},
- {"players"},
- {"teams/cardinals"},
- {"players/matt", "teams/cardinals/mvp"},
- })
-
- // Test that the iterator does not revisit objects on previously rejected paths.
- rejected := false
- rejectMatt := func(fullPath *refs.FullPath, path *refs.Path) (bool, bool) {
- name := fullPath.Append(path.Suffix(1)).Name().String()
- if !rejected && (name == "players/matt" || name == "teams/cardinals/mvp") {
- rejected = true
- return false, true
- }
- return true, true
- }
- checkUniqueObjectsIterator(t, sn, rootPublicID, rejectMatt, [][]string{
- {""},
- {"teams"},
- {"players"},
- {"teams/cardinals"},
- })
-}
-
-// Test that an iterator doesn't get stuck in cycles.
-func TestCyclicStructure(t *testing.T) {
- st := state.New(rootPublicID)
- sn := st.MutableSnapshot()
-
- // Add some objects
- put(t, sn, rootPublicID, "/", "")
- put(t, sn, rootPublicID, "/teams", "")
- cardinalsID := put(t, sn, rootPublicID, "/teams/cardinals", "")
- put(t, sn, rootPublicID, "/players", "")
- mattID := put(t, sn, rootPublicID, "/players/matt", "")
- put(t, sn, rootPublicID, "/players/joe", "")
-
- // Add some hard links
- put(t, sn, rootPublicID, "/players/matt/team", cardinalsID)
- put(t, sn, rootPublicID, "/teams/cardinals/mvp", mattID)
-
- checkAcyclicIterator(t, sn, rootPublicID, nil, []string{
- "",
- "teams",
- "players",
- "players/joe",
- "players/matt",
- "teams/cardinals/mvp",
- "teams/cardinals",
- "players/matt/team",
- })
- checkUniqueObjectsIterator(t, sn, rootPublicID, nil, [][]string{
- {""},
- {"teams"},
- {"players"},
- {"players/joe"},
- {"players/matt", "teams/cardinals/mvp"},
- {"teams/cardinals", "players/matt/team"},
- })
-}
diff --git a/services/store/memstore/state/log.go b/services/store/memstore/state/log.go
deleted file mode 100644
index 426a751..0000000
--- a/services/store/memstore/state/log.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package state
-
-import (
- "veyron/services/store/memstore/refs"
- "veyron/services/store/raw"
-
- "veyron2/storage"
- "veyron2/verror"
- "veyron2/vom"
-)
-
-type LogVersion uint32
-
-const (
- CurrentLogVersion LogVersion = 1
-)
-
-var (
- errUnsupportedLogVersion = verror.Internalf("unsupported log version")
-)
-
-// header contains the meta-information for a log file.
-type header struct {
- // Version is the version of the log file.
- Version LogVersion
-
- // RootID is the ID for the root value in the initial snapshot.
- RootID storage.ID
-
- // StateLen is the number of entries in the initial snapshot.
- StateLen uint32
-
- // Timestamp is the timestamp of the snapshot, in nanoseconds since the epoch.
- Timestamp uint64
-}
-
-// value is a single entry in the initial state snapshot. It corresponds to the
-// <cell> type.
-type value struct {
- ID storage.ID
- Value interface{}
- Dir []*raw.DEntry
- Version raw.Version
-}
-
-func (st *State) Write(enc *vom.Encoder) error {
- // Write the header.
- sn := st.snapshot
- h := header{
- Version: CurrentLogVersion,
- StateLen: uint32(sn.idTable.Len()),
- RootID: sn.rootID,
- Timestamp: st.timestamp,
- }
- if err := enc.Encode(&h); err != nil {
- return err
- }
-
- // Write the values.
- var err error
- sn.idTable.Iter(func(it interface{}) bool {
- c := it.(*Cell)
- v := value{ID: c.ID, Value: c.Value, Dir: refs.FlattenDir(c.Dir), Version: c.Version}
- err = enc.Encode(v)
- return err == nil
- })
- return err
-}
-
-func (st *State) Read(dec *vom.Decoder) error {
- sn := st.snapshot
-
- var header header
- if err := dec.Decode(&header); err != nil {
- return err
- }
- if header.Version != CurrentLogVersion {
- return errUnsupportedLogVersion
- }
-
- // Create the state without refcounts.
- t := emptyIDTable
- for i := uint32(0); i < header.StateLen; i++ {
- var v value
- if err := dec.Decode(&v); err != nil {
- return err
- }
- d := refs.BuildDir(v.Dir)
-
- // Calculate refs.
- r := refs.NewBuilder()
- r.AddValue(v.Value)
- r.AddDir(d)
-
- // Add the cell.
- c := &Cell{ID: v.ID, Value: v.Value, Dir: d, Version: v.Version, refs: r.Get(), inRefs: refs.Empty}
- t = t.Put(c)
- }
-
- sn.idTable = t
- sn.rootID = header.RootID
- st.timestamp = header.Timestamp
-
- // Update refcounts.
- t.Iter(func(it interface{}) bool {
- c := it.(*Cell)
- sn.addRefs(c.ID, c.refs)
- return true
- })
- return nil
-}
diff --git a/services/store/memstore/state/log_test.go b/services/store/memstore/state/log_test.go
deleted file mode 100644
index 54708b0..0000000
--- a/services/store/memstore/state/log_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package state
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "runtime"
- "testing"
-
- "veyron/services/store/memstore/refs"
-
- "veyron/runtimes/google/lib/functional"
- "veyron2/vom"
-)
-
-type Data struct {
- Comment string
-}
-
-func init() {
- vom.Register(&Data{})
-}
-
-func newData(c string) *Data {
- return &Data{Comment: c}
-}
-
-func expectEqDir(t *testing.T, file string, line int, d1, d2 *Dir) {
- for name, id1 := range d1.Entries {
- id2, ok := d2.Entries[name]
- if !ok {
- t.Errorf("%s(%d): does not exist: %s", file, line, name)
- } else if id2 != id1 {
- t.Errorf("%s(%d): expected ID %s, got %s", file, line, id1, id2)
- }
- }
- for name, _ := range d2.Entries {
- _, ok := d1.Entries[name]
- if !ok {
- t.Errorf("%s(%d): should not exist: %s", file, line, name)
- }
- }
-}
-
-func expectEqData(t *testing.T, file string, line int, d1, d2 *Data) {
- if d1.Comment != d2.Comment {
- t.Errorf("%s(%d): expected %q, got %q", d1.Comment, d2.Comment)
- }
-}
-
-// expectEqValues compares two items. They are equal only if they have the same
-// type, and their contents are equal.
-func expectEqValues(t *testing.T, file string, line int, v1, v2 interface{}) {
- switch x1 := v1.(type) {
- case *Dir:
- x2, ok := v2.(*Dir)
- if !ok {
- t.Errorf("%s(%d): not a Dir: %v", file, line, v2)
- } else {
- expectEqDir(t, file, line, x1, x2)
- }
- case *Data:
- x2, ok := v2.(*Data)
- if !ok {
- t.Errorf("%s(%d): not a Data: %v", file, line, v2)
- } else {
- expectEqData(t, file, line, x1, x2)
- }
- default:
- t.Errorf("Unknown type: %T, %v", v1, v1)
- }
-}
-
-// expectEqImplicitDir compares two directories.
-func expectEqImplicitDir(t *testing.T, file string, line int, d1, d2 refs.Dir) {
- l1 := refs.FlattenDir(d1)
- l2 := refs.FlattenDir(d2)
- i1 := 0
- i2 := 0
- for i1 < len(l1) && i2 < len(l2) {
- e1 := l1[i1]
- e2 := l2[i2]
- if e1.Name == e2.Name {
- if e1.ID != e2.ID {
- t.Errorf("%s(%d): expected id %s, got %s", file, line, e1.ID, e2.ID)
- }
- i1++
- i2++
- } else if e1.Name < e2.Name {
- t.Errorf("%s(%d): missing directory %s", file, line, e1.Name)
- i1++
- } else {
- t.Errorf("%s(%d): unexpected directory %s", file, line, e2.Name)
- i2++
- }
- }
- for _, e1 := range l1[i1:] {
- t.Errorf("%s(%d): missing directory %s", file, line, e1.Name)
- }
- for _, e2 := range l2[i2:] {
- t.Errorf("%s(%d): unexpected directory %s", file, line, e2.Name)
- }
-}
-
-// expectEqCells compares two *cell values.
-func expectEqCells(t *testing.T, file string, line int, c1, c2 *Cell) {
- expectEqValues(t, file, line, c1.Value, c2.Value)
- expectEqImplicitDir(t, file, line, c1.Dir, c2.Dir)
-}
-
-// expectEqIDTables compares two states.
-func expectEqIDTables(t *testing.T, t1, t2 functional.Set) {
- _, file, line, _ := runtime.Caller(1)
- t1.Iter(func(it1 interface{}) bool {
- c1 := it1.(*Cell)
- it2, ok := t2.Get(c1)
- if !ok {
- t.Errorf("%s(%d): cell does not exist: %v", file, line, c1)
- } else {
- c2 := it2.(*Cell)
- expectEqCells(t, file, line, c1, c2)
- }
- return true
- })
- t2.Iter(func(it2 interface{}) bool {
- _, ok := t1.Get(it2)
- if !ok {
- t.Errorf("%s(%d): cell should not exist: %v", file, line, it2)
- }
- return true
- })
-}
-
-func TestState(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "store")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.Remove(dbName)
-
- dbFile := filepath.Join(dbName, "db")
- ofile, err := os.Create(dbFile)
- if err != nil {
- t.Fatalf("Error opening log file: %s", err)
- }
- defer ofile.Close()
- enc := vom.NewEncoder(ofile)
-
- // Create an initial state.
- st1 := New(rootPublicID)
- sn := st1.MutableSnapshot()
- mkdir(t, sn, "/")
- mkdir(t, sn, "/a")
- mkdir(t, sn, "/a/b")
- mkdir(t, sn, "/a/b/c")
- mkdir(t, sn, "/a/b/c/d")
- if err := st1.ApplyMutations(sn.Mutations()); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- if err := st1.Write(enc); err != nil {
- t.Errorf("Error writing log: %s", err)
- }
-
- ifile, err := os.Open(dbFile)
- if err != nil {
- t.Fatalf("Error opening log file: %s")
- }
- defer ifile.Close()
- dec := vom.NewDecoder(ifile)
- st2 := New(rootPublicID)
- if err := st2.Read(dec); err != nil {
- t.Fatalf("Can't read state: %s", err)
- }
-
- expectEqIDTables(t, st1.snapshot.idTable, st2.snapshot.idTable)
-}
diff --git a/services/store/memstore/state/mutable_snapshot.go b/services/store/memstore/state/mutable_snapshot.go
deleted file mode 100644
index 2ac285b..0000000
--- a/services/store/memstore/state/mutable_snapshot.go
+++ /dev/null
@@ -1,492 +0,0 @@
-package state
-
-import (
- "fmt"
-
- "veyron/services/store/memstore/field"
- "veyron/services/store/memstore/refs"
- "veyron/services/store/raw"
-
- "veyron/runtimes/google/lib/functional"
- "veyron2/security"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-// MutableSnapshot is a mutable version of the snapshot. It contains a Snapshot
-// and a Mutations set.
-//
-// Reference counting is used to collect garbage that is no longer reachable
-// using a pathname. References can be cyclic, so the reference counting
-// includes cycle detection.
-//
-// References never dangle. This restricts what operations can be performed.
-// It isn't allowed to add an object to the state that has a dangling reference;
-// so if you want to set up a cycle atomically, you should add the objects to
-// the state without references then mutate the objects to add the cyclic
-// references. This can be done in a single transaction, so that the
-// intermediate state is not observable.
-//
-// TODO(jyh): Alternatively, we could relax the object operations so that
-// objects can be added to a transaction with dangling references. The
-// references would still be checked at Commit time, aborting the transaction if
-// dangling references are detected. However, it would mean that intermediate
-// states in the transaction would be inconsistent. This might be fine, but we
-// should decide whether transaction operations like Search() are allowed on
-// these inconsistent states while a transaction is being constructed. In the
-// meantime, we keep the strict approach, where intermediate states are
-// consistent.
-type MutableSnapshot struct {
- snapshot
-
- // gcRoots contains the nodes that should be considered for garbage
- // collection.
- gcRoots map[storage.ID]struct{}
-
- // mutations is the current set of changes.
- mutations *Mutations
-
- // deletions is the current set of deletions. The version is at
- // the point of deletion.
- deletions map[storage.ID]raw.Version
-}
-
-// Mutations represents a set of mutations to the state. This is used to
-// collect the operations in a transaction.
-type Mutations struct {
- // Timestamp corresponds to the time that the mutations were applied to the
- // state. It is set when applyMutations() was called. The value is based
- // on Unix time, the number of nanoseconds elapsed since January 1, 1970
- // UTC. However, it is monotonically increasing so that subsequent
- // mutations have increasing timestamps that differ by at least one.
- Timestamp uint64
-
- // RootID is the storage.ID of the root value. Valid only if SetRootID is true.
- RootID storage.ID
- SetRootID bool
-
- // Preconditions is the set of expected versions.
- Preconditions map[storage.ID]raw.Version
-
- // Delta is the set of changes.
- Delta map[storage.ID]*Mutation
-
- // Deletions contains the IDs for values that have been deleted. The
- // version is taken from the time of deletion. It is like a weak
- // precondition, where *if* the value exists, it should have the specified
- // version. The target snapshot is allowed to perform garbage collection
- // too, so the deleted value is not required to exist.
- Deletions map[storage.ID]raw.Version
-}
-
-// mutation is an update to a single value in the state.
-type Mutation struct {
- // Postcondition is the version after the mutation.
- Postcondition raw.Version
-
- // Value is the new value.
- Value interface{}
-
- // Dir is the set of new directory entries.
- //
- // TODO(jyh): Replace this with a delta, to support large directories.
- Dir []*raw.DEntry
-
- // Refs are the set of references in the Value and Dir.
- refs refs.Set
-}
-
-var (
- errBadPath = verror.BadArgf("malformed path")
- errBadRef = verror.BadArgf("value has dangling references")
- errBadValue = verror.BadArgf("value has the wrong type")
- errDuplicatePutMutation = verror.BadArgf("duplicate calls to PutMutation for the same ID")
- errNotFound = verror.NotFoundf("not found")
- errPreconditionFailed = verror.Abortedf("precondition failed")
-
- nullID storage.ID
-)
-
-// newMutations returns a fresh Mutations set.
-func newMutations() *Mutations {
- var m Mutations
- m.reset()
- return &m
-}
-
-// reset resets the Mutations state.
-func (m *Mutations) reset() {
- m.Preconditions = make(map[storage.ID]raw.Version)
- m.Delta = make(map[storage.ID]*Mutation)
- m.Deletions = make(map[storage.ID]raw.Version)
-}
-
-// addPrecondition adds a precondition if it does not already exisn.
-func (m *Mutations) addPrecondition(c *Cell) {
- // Set the precondition if not already set. For cells that have been
- // created in the current Mutations/transaction, the value store in
- // m.Preconditions[c.id] will be zero, but c.version is the initial non-zero
- // version number, so we guard against the override.
- if _, ok := m.Preconditions[c.ID]; !ok {
- m.Preconditions[c.ID] = c.Version
- }
-}
-
-// UpdateRefs updates the refs field in the Mutation.
-func (m *Mutation) UpdateRefs() {
- r := refs.NewBuilder()
- r.AddValue(m.Value)
- r.AddDEntries(m.Dir)
- m.refs = r.Get()
-}
-
-// newSnapshot returns an empty snapshot.
-func newMutableSnapshot(admin security.PublicID) *MutableSnapshot {
- return &MutableSnapshot{
- snapshot: newSnapshot(admin),
- gcRoots: make(map[storage.ID]struct{}),
- mutations: newMutations(),
- deletions: make(map[storage.ID]raw.Version),
- }
-}
-
-// Mutations returns the set of mutations in the snapshot.
-func (sn *MutableSnapshot) Mutations() *Mutations {
- return sn.mutations
-}
-
-// GetSnapshot create a readonly copy of the snapshot.
-func (sn *MutableSnapshot) GetSnapshot() Snapshot {
- // Perform a GC to clear out gcRoots.
- sn.gc()
- cp := sn.snapshot
- return &cp
-}
-
-// deepCopy creates a copy of the snapshot. Mutations to the copy do not affect
-// the original, and vice versa.
-func (sn *MutableSnapshot) deepCopy() *MutableSnapshot {
- // Perform a GC to clear out gcRoots.
- sn.gc()
- cp := *sn
- cp.mutations = newMutations()
- cp.gcRoots = make(map[storage.ID]struct{})
- return &cp
-}
-
-// deref performs a lookup based on storage.ID, panicing if the cell is not found.
-// This is used internally during garbage collection when we can assume that
-// there are no dangling references.
-func (sn *MutableSnapshot) deref(id storage.ID) *Cell {
- c := sn.Find(id)
- if c == nil {
- panic(fmt.Sprintf("Dangling reference: %s", id))
- }
-
- // Copy the cell to ensure the original state is not modified.
- //
- // TODO(jyh): This can be avoided if the cell has already been copied in the
- // current transaction.
- cp := *c
- sn.idTable = sn.idTable.Put(&cp)
- return &cp
-}
-
-// delete removes the cell from the state.
-func (sn *MutableSnapshot) delete(c *Cell) {
- sn.idTable = sn.idTable.Remove(c)
- sn.deletions[c.ID] = c.Version
-}
-
-// put adds a cell to the state, also adding the new value to the Mutations set.
-func (sn *MutableSnapshot) put(c *Cell) {
- mu := sn.mutations
- d := refs.FlattenDir(c.Dir)
- m, ok := mu.Delta[c.ID]
- if ok {
- m.Value = c.Value
- m.refs = c.refs
- m.Dir = d
- } else {
- mu.Preconditions[c.ID] = c.Version
- m = &Mutation{
- Postcondition: raw.NewVersion(),
- Value: c.Value,
- Dir: d,
- refs: c.refs,
- }
- mu.Delta[c.ID] = m
- }
- c.Version = m.Postcondition
- sn.idTable = sn.idTable.Put(c)
-}
-
-// add adds a new Value to the state, updating reference counts. Fails if the
-// new value contains dangling references.
-func (sn *MutableSnapshot) add(id storage.ID, v interface{}) (*Cell, error) {
- c := sn.Find(id)
- if c == nil {
- // There is no current value, so create a new cell for the value and add
- // it.
- c = &Cell{
- ID: id,
- refcount: 0,
- Value: v,
- Dir: refs.EmptyDir,
- inRefs: refs.Empty,
- Version: raw.NoVersion,
- }
- c.setRefs()
- if !sn.refsExist(c.refs) {
- return nil, errBadRef
- }
- sn.put(c)
- sn.addRefs(id, c.refs)
- return c, nil
- }
-
- // There is already a value in the state, so replace it with the new value.
- return sn.replaceValue(c, v)
-}
-
-// replaceValue updates the cell.value.
-func (sn *MutableSnapshot) replaceValue(c *Cell, v interface{}) (*Cell, error) {
- cp := *c
- cp.Value = v
- cp.setRefs()
- if !sn.refsExist(cp.refs) {
- return nil, errBadRef
- }
- sn.put(&cp)
- sn.updateRefs(c.ID, c.refs, cp.refs)
- return &cp, nil
-}
-
-// replaceDir updates the cell.dir.
-func (sn *MutableSnapshot) replaceDir(c *Cell, d functional.Set) (*Cell, error) {
- cp := *c
- cp.Dir = d
- cp.setRefs()
- if !sn.refsExist(cp.refs) {
- return nil, errBadRef
- }
- sn.put(&cp)
- sn.updateRefs(c.ID, c.refs, cp.refs)
- return &cp, nil
-}
-
-// Get returns the value for a path.
-func (sn *MutableSnapshot) Get(pid security.PublicID, path storage.PathName) (*storage.Entry, error) {
- cell, suffix, v := sn.resolveCell(path, sn.mutations)
- if cell == nil {
- return nil, errNotFound
- }
- var e *storage.Entry
- if len(suffix) == 0 {
- e = cell.GetEntry()
- } else {
- e = newSubfieldEntry(v)
- }
- return e, nil
-}
-
-// Put adds a new value to the state or replaces an existing one. Returns
-// the *Stat for the enclosing *cell.
-func (sn *MutableSnapshot) Put(pid security.PublicID, path storage.PathName, v interface{}) (*storage.Stat, error) {
- c, err := sn.putValueByPath(path, v)
- if err != nil {
- return nil, err
- }
- return c.getStat(), nil
-}
-
-func (sn *MutableSnapshot) putValueByPath(path storage.PathName, v interface{}) (*Cell, error) {
- v = deepcopy(v)
-
- if path.IsRoot() {
- return sn.putRoot(v)
- }
- return sn.putValue(path, v)
-}
-
-// putValue is called for a normal Put() operation, where a new value is being
-// added, and as a consequence the containing "parent" value is being modified.
-// There are two cases: 1) the value <v> is written directly into the parent, or
-// 2) the field has type storage.ID. In the latter case, the <id> is assigned
-// into the parent, and the value id->v is added to the idTable.
-func (sn *MutableSnapshot) putValue(path storage.PathName, v interface{}) (*Cell, error) {
- // Find the parent object.
- c, suffix, _ := sn.resolveCell(path[:len(path)-1], sn.mutations)
- if c == nil {
- return nil, errNotFound
- }
- value := deepcopy(c.Value)
- p, s := field.Get(makeInnerReference(value), suffix)
- if len(s) != 0 {
- return nil, errNotFound
- }
-
- // Add value to the parent.
- name := path[len(path)-1]
- result, id := field.Set(p, name, v)
- switch result {
- case field.SetFailedNotFound:
- if len(suffix) != 0 {
- return nil, errNotFound
- }
- return sn.putDirEntry(c, name, v)
- case field.SetFailedWrongType:
- return nil, errBadValue
- case field.SetAsID:
- nc, err := sn.add(id, v)
- if err != nil {
- return nil, err
- }
- // The sn.add may have modified the cell, so fetch it again.
- if _, err = sn.replaceValue(sn.Find(c.ID), value); err != nil {
- return nil, err
- }
- return nc, nil
- case field.SetAsValue:
- return sn.replaceValue(c, value)
- }
- panic("not reached")
-}
-
-// putDirEntry replaces or adds a directory entry.
-func (sn *MutableSnapshot) putDirEntry(c *Cell, name string, v interface{}) (*Cell, error) {
- r := &refs.Ref{Path: refs.NewSingletonPath(name)}
- if id, ok := v.(storage.ID); ok {
- ncell := sn.Find(id)
- if ncell == nil {
- return nil, errNotFound
- }
- r.ID = id
- dir := c.Dir.Put(r)
- if _, err := sn.replaceDir(c, dir); err != nil {
- return nil, err
- }
- return ncell, nil
- }
-
- x, ok := c.Dir.Get(r)
- if !ok {
- // The entry does not exist yet; create it.
- id := storage.NewID()
- ncell, err := sn.add(id, v)
- if err != nil {
- return nil, err
- }
- r.ID = id
- // The sn.add may have modified the cell, so fetch it again.
- c = sn.Find(c.ID)
- dir := c.Dir.Put(r)
- if _, err := sn.replaceDir(c, dir); err != nil {
- return nil, err
- }
- return ncell, nil
- }
-
- // Replace the existing value.
- return sn.add(x.(*refs.Ref).ID, v)
-}
-
-// putRoot replaces the root.
-func (sn *MutableSnapshot) putRoot(v interface{}) (*Cell, error) {
- id := sn.rootID
- c := sn.Find(id)
- if c == nil {
- id = storage.NewID()
- }
-
- // Add the new element.
- ncell, err := sn.add(id, v)
- if err != nil {
- return nil, err
- }
-
- // Redirect the rootID.
- if c == nil {
- sn.ref(id)
- sn.rootID = id
- sn.mutations.RootID = id
- sn.mutations.SetRootID = true
- }
- return ncell, nil
-}
-
-// Remove removes a value.
-func (sn *MutableSnapshot) Remove(pid security.PublicID, path storage.PathName) error {
- if path.IsRoot() {
- sn.unref(sn.rootID)
- sn.rootID = nullID
- sn.mutations.RootID = nullID
- sn.mutations.SetRootID = true
- return nil
- }
-
- // Split the names into directory and field parts.
- cell, suffix, _ := sn.resolveCell(path[:len(path)-1], sn.mutations)
- if cell == nil {
- return errNotFound
- }
-
- // Remove the field.
- name := path[len(path)-1]
- r := &refs.Ref{Path: refs.NewSingletonPath(name)}
- if cell.Dir.Contains(r) {
- _, err := sn.replaceDir(cell, cell.Dir.Remove(r))
- return err
- }
- value := deepcopy(cell.Value)
- p, _ := field.Get(value, suffix)
- if !field.Remove(p, name) {
- return errNotFound
- }
-
- _, err := sn.replaceValue(cell, value)
- return err
-}
-
-// PutMutation puts an externally constructed mutation. Does not update cells
-// or refs, so regular Puts, Gets and Removes may be inconsistent.
-func (sn *MutableSnapshot) PutMutation(extmu raw.Mutation) error {
- mus := sn.mutations
- id := extmu.ID
- // Check that a mutation has not already been put for this id.
- if _, ok := mus.Delta[id]; ok {
- return errDuplicatePutMutation
- }
- // If the object has no version, it was deleted.
- if extmu.Version == raw.NoVersion {
- mus.Deletions[id] = extmu.PriorVersion
- if extmu.IsRoot {
- mus.SetRootID = true
- mus.RootID = nullID
- }
- return nil
- }
- if extmu.IsRoot {
- mus.SetRootID = true
- mus.RootID = id
- }
- mus.Preconditions[id] = extmu.PriorVersion
- mu := &Mutation{
- Postcondition: extmu.Version,
- Value: extmu.Value,
- Dir: unflattenDir(extmu.Dir),
- }
- mu.UpdateRefs()
- mus.Delta[id] = mu
- return nil
-}
-
-// TODO(tilaks): revisit when vsync.Mutation.Dir is of type []*raw.DEntry
-// (once we support optional structs in the idl).
-func unflattenDir(fdir []raw.DEntry) []*raw.DEntry {
- pdir := make([]*raw.DEntry, len(fdir))
- for i, _ := range fdir {
- pdir[i] = &fdir[i]
- }
- return pdir
-}
diff --git a/services/store/memstore/state/mutable_snapshot_test.go b/services/store/memstore/state/mutable_snapshot_test.go
deleted file mode 100644
index df567ba..0000000
--- a/services/store/memstore/state/mutable_snapshot_test.go
+++ /dev/null
@@ -1,383 +0,0 @@
-package state
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore/refs"
-
- "veyron2/storage"
- "veyron2/verror"
- "veyron2/vom"
-)
-
-// Dir is a simple directory.
-type Dir struct {
- Entries map[string]storage.ID
-}
-
-// Value is a simple value.
-type Value struct {
- X int
-}
-
-// Nest is a struct that nests a Value.
-type Nest struct {
- V Value
-}
-
-var (
- root = &Dir{}
- rootPath = storage.ParsePath("/")
-)
-
-func init() {
- vom.Register(&Dir{})
-}
-
-func mkdir(t *testing.T, sn *MutableSnapshot, path string) (storage.ID, interface{}) {
- _, file, line, _ := runtime.Caller(1)
- dir := &Dir{}
- stat, err := sn.Put(rootPublicID, storage.ParsePath(path), dir)
- if err != nil || stat == nil {
- t.Errorf("%s(%d): mkdir %s: %s", file, line, path, err)
- return storage.ID{}, dir
- }
- m, ok := sn.mutations.Delta[stat.ID]
- if !ok {
- t.Errorf("%s(%d): Expected Mutation: %v %v", file, line, stat, sn.mutations)
- } else if _, ok := m.Value.(*Dir); !ok {
- t.Fatalf("%s(%d): %s: not a directory: %v -> %v", file, line, path, stat, m.Value)
- }
- return stat.ID, dir
-}
-
-func expectExists(t *testing.T, sn *MutableSnapshot, path string, id storage.ID) {
- _, file, line, _ := runtime.Caller(1)
- if !sn.idTable.Contains(&Cell{ID: id}) {
- t.Errorf("%s(%d): does not exist: %s", file, line, id)
- }
- e, err := sn.Get(rootPublicID, storage.ParsePath(path))
- if err != nil {
- t.Errorf("%s(%d): does not exist: %s", file, line, path)
- }
- if e.Stat.ID != id {
- t.Errorf("%s(%d): expected id to be %v, but was %v", file, line, id, e.Stat.ID)
- }
-}
-
-func expectNotExists(t *testing.T, sn *MutableSnapshot, id storage.ID) {
- if sn.idTable.Contains(&Cell{ID: id}) {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): should not exist: %s", file, line, id)
- }
-}
-
-func expectValue(t *testing.T, sn *MutableSnapshot, path string, v interface{}) {
- _, file, line, _ := runtime.Caller(1)
- cell, _, _ := sn.resolveCell(storage.ParsePath(path), nil)
- if cell == nil {
- t.Errorf("%s(%d): path does not exist: %s", file, line, path)
- }
- if cell.Value == nil {
- t.Errorf("%s(%d): cell has a nil value: %s", file, line, path)
- }
-}
-
-func checkInRefs(t *testing.T, sn *MutableSnapshot) {
- _, file, line, _ := runtime.Caller(1)
-
- sn.idTable.Iter(func(it interface{}) bool {
- c1 := it.(*Cell)
-
- // Check that each out-ref has an in-ref.
- c1.refs.Iter(func(it interface{}) bool {
- r := it.(*refs.Ref)
- c2 := sn.Find(r.ID)
- if c2 == nil {
- t.Errorf("%s(%d): dangling reference: %s", file, line, r.ID)
- } else if !c2.inRefs.Contains(&refs.Ref{ID: c1.ID, Path: r.Path}) {
- t.Errorf("%s(%d): inRef does not exist: %s <- %s", file, line, c1.ID, c2.ID)
- }
- return true
- })
-
- // Check that each in-ref has an out-ref.
- c1.inRefs.Iter(func(it interface{}) bool {
- r := it.(*refs.Ref)
- c2 := sn.Find(r.ID)
- if c2 == nil {
- t.Errorf("%s(%d): dangling reference: %s", file, line, r.ID)
- } else if !c2.refs.Contains(&refs.Ref{ID: c1.ID, Path: r.Path}) {
- t.Errorf("%s(%d): inRef does not exist: %s -> %s", file, line, c2.ID, c1.ID)
- }
- return true
- })
- return true
- })
-}
-
-// Set up a root directory.
-func TestRoot(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- // There should be no root.
- v, err := sn.Get(rootPublicID, rootPath)
- if v != nil {
- t.Errorf("Expected nil for /: %v", v)
- }
- if err == nil {
- t.Errorf("Expected error")
- }
-
- // Add the root object.
- stat, err := sn.Put(rootPublicID, rootPath, root)
- if err != nil {
- t.Errorf("Error adding root: %s", err)
- }
- if sn.mutations.RootID != stat.ID {
- t.Errorf("Expected root update")
- }
- {
- p, ok := sn.mutations.Preconditions[sn.mutations.RootID]
- if !ok {
- t.Errorf("Error fetching root")
- }
- if p != 0 {
- t.Errorf("Expected 0 precondition: %d", p)
- }
- }
-
- // Fetch the root object, and compare.
- v, err = sn.Get(rootPublicID, rootPath)
- if err != nil {
- t.Errorf("Error fetching root: %s", err)
- }
-
- checkInRefs(t, sn)
-}
-
-// Make a directory tree.
-func TestDirTree(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- id1, d1 := mkdir(t, sn, "/")
- id2, d2 := mkdir(t, sn, "/Entries/a")
- id3, d3 := mkdir(t, sn, "/Entries/a/Entries/b")
- id4, d4 := mkdir(t, sn, "/Entries/a/Entries/b/Entries/c")
- id5, d5 := mkdir(t, sn, "/Entries/a/Entries/b/Entries/d")
- expectExists(t, sn, "/", id1)
- expectExists(t, sn, "/Entries/a", id2)
- expectExists(t, sn, "/Entries/a/Entries/b", id3)
- expectExists(t, sn, "/Entries/a/Entries/b/Entries/c", id4)
- expectExists(t, sn, "/Entries/a/Entries/b/Entries/d", id5)
-
- // Parent directory has to exist.
- d := &Dir{}
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/a/c/e"), d); err == nil {
- t.Errorf("Expected error")
- }
-
- expectValue(t, sn, "/", d1)
- expectValue(t, sn, "/Entries/a", d2)
- expectValue(t, sn, "/Entries/a/Entries/b", d3)
- expectValue(t, sn, "/Entries/a/Entries/b/Entries/c", d4)
- expectValue(t, sn, "/Entries/a/Entries/b/Entries/d", d5)
- checkInRefs(t, sn)
-
- // Remove part of the tree.
- if err := sn.Remove(rootPublicID, storage.ParsePath("/Entries/a/Entries/b")); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- sn.gc()
- expectExists(t, sn, "/", id1)
- expectExists(t, sn, "/Entries/a", id2)
- expectNotExists(t, sn, id3)
- expectNotExists(t, sn, id4)
- expectNotExists(t, sn, id5)
- checkInRefs(t, sn)
-}
-
-// Make some references.
-func TestRef(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- rootID, _ := mkdir(t, sn, "/")
- ePath := storage.ParsePath("/Entries/a")
-
- // Not possible to create a Dir with a dangling reference.
- d := &Dir{Entries: map[string]storage.ID{"ref": storage.NewID()}}
- if _, err := sn.Put(rootPublicID, ePath, d); !verror.Is(err, verror.BadArg) {
- t.Errorf("Error should be %v: got %v", verror.BadArg, err)
- }
-
- // Set the Ref to refer to the root.
- d.Entries["ref"] = rootID
- stat, err := sn.Put(rootPublicID, ePath, d)
- if err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- expectExists(t, sn, "/Entries/a", stat.ID)
- checkInRefs(t, sn)
-
- // Change the ref to refer to itself.
- d.Entries["ref"] = stat.ID
- if stat2, err := sn.Put(rootPublicID, ePath, d); err != nil || stat2.ID != stat.ID {
- t.Errorf("Unexpected error: %s", err)
- }
- sn.gc()
- expectExists(t, sn, "/Entries/a", stat.ID)
- checkInRefs(t, sn)
-
- // Remove it.
- if err := sn.Remove(rootPublicID, ePath); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- sn.gc()
- expectNotExists(t, sn, stat.ID)
- checkInRefs(t, sn)
-}
-
-// Make an implicit directory tree.
-func TestImplicitDirTree(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- id1, d1 := mkdir(t, sn, "/")
- id2, d2 := mkdir(t, sn, "/a")
- id3, d3 := mkdir(t, sn, "/a/b")
- id4, d4 := mkdir(t, sn, "/a/b/c")
- id5, d5 := mkdir(t, sn, "/a/b/c/d")
- expectExists(t, sn, "/", id1)
- expectExists(t, sn, "/a", id2)
- expectExists(t, sn, "/a/b", id3)
- expectExists(t, sn, "/a/b/c", id4)
- expectExists(t, sn, "/a/b/c/d", id5)
- checkInRefs(t, sn)
-
- // Parent directory has to exisn.
- d := &Dir{}
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/a/c/e"), d); err == nil {
- t.Errorf("Expected error")
- }
-
- expectValue(t, sn, "/", d1)
- expectValue(t, sn, "/a", d2)
- expectValue(t, sn, "/a/b", d3)
- expectValue(t, sn, "/a/b/c", d4)
- expectValue(t, sn, "/a/b/c/d", d5)
- checkInRefs(t, sn)
-
- // Remove part of the tree.
- if err := sn.Remove(rootPublicID, storage.ParsePath("/a/b")); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- sn.gc()
- expectExists(t, sn, "/", id1)
- expectExists(t, sn, "/a", id2)
- expectNotExists(t, sn, id3)
- expectNotExists(t, sn, id4)
- expectNotExists(t, sn, id5)
- checkInRefs(t, sn)
-}
-
-// Tests that nil maps are converted to empty maps.
-func TestPutToNilMap(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- var m map[string]interface{}
- if _, err := sn.Put(rootPublicID, storage.PathName{}, m); err != nil {
- t.Error("failure during nil map put: ", err)
- }
- if _, err := sn.Put(rootPublicID, storage.PathName{"z"}, "z"); err != nil {
- t.Error("failure during put of child of nil map: ", err)
- }
-}
-
-// Tests that slices are settable so that we can append.
-func TestAppendToSlice(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/"), []int{}); err != nil {
- t.Error("failure during put of empty slice: ", err)
- }
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/@"), 1); err != nil {
- t.Error("failure during append to slice: ", err)
- }
-}
-
-// Replace a struct value with a hard link.
-func TestReplaceStructWithLink(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- mkdir(t, sn, "/")
- x := &Value{X: 1}
- stat, err := sn.Put(rootPublicID, storage.ParsePath("/a"), x)
- if err != nil {
- t.Errorf("/a: %s", err)
- }
- x.X = 2
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/b"), x); err != nil {
- t.Errorf("/b: %s", err)
- }
- if v, err := sn.Get(rootPublicID, storage.ParsePath("/a")); err != nil || v.Value.(*Value).X != 1 {
- t.Errorf("Expected 1, got %v", v)
- }
- if v, err := sn.Get(rootPublicID, storage.ParsePath("/b")); err != nil || v.Value.(*Value).X != 2 {
- t.Errorf("Expected 2, got %v", v)
- }
-
- // Create a link.
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/b"), stat.ID); err != nil {
- t.Errorf("/b: %s", err)
- }
- if v, err := sn.Get(rootPublicID, storage.ParsePath("/b")); err != nil || v.Value.(*Value).X != 1 {
- t.Errorf("Expected 1, got %v", v)
- }
- x.X = 3
- if _, err := sn.Put(rootPublicID, storage.ParsePath("/b"), x); err != nil {
- t.Errorf("/b: %s", err)
- }
- if v, err := sn.Get(rootPublicID, storage.ParsePath("/a")); err != nil || v.Value.(*Value).X != 3 {
- t.Errorf("Expected 3, got %v", v)
- }
-}
-
-// Put a value of the wrong type to a subfield.
-func TestPutWrongType(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- _, err := sn.Put(rootPublicID, rootPath, Value{7})
- if err != nil {
- t.Errorf("/: %v", err)
- }
- _, err = sn.Put(rootPublicID, storage.ParsePath("/X"), "string")
- if !verror.Is(err, verror.BadArg) {
- t.Errorf("/X: %v", err)
- }
- v, err := sn.Get(rootPublicID, storage.ParsePath("/X"))
- if err != nil {
- t.Errorf("/X: %v", err)
- }
- if v.Value != 7 {
- t.Errorf("Expected 7, got %v", v.Value)
- }
-
- _, err = sn.Put(rootPublicID, storage.ParsePath("/a"), Nest{Value{42}})
- if err != nil {
- t.Errorf("/a: %v", err)
- }
- _, err = sn.Put(rootPublicID, storage.ParsePath("/a/V"), "string")
- if !verror.Is(err, verror.BadArg) {
- t.Errorf("/a/V: %v", err)
- }
- _, err = sn.Put(rootPublicID, storage.ParsePath("/a/V/X"), "string")
- if !verror.Is(err, verror.BadArg) {
- t.Errorf("/a/V/X: %v", err)
- }
- v, err = sn.Get(rootPublicID, storage.ParsePath("/a/V/X"))
- if err != nil {
- t.Errorf("/a/V/X: %v", err)
- }
- if v.Value != 42 {
- t.Errorf("Expected 42, got %v", v.Value)
- }
-}
diff --git a/services/store/memstore/state/perm.go b/services/store/memstore/state/perm.go
deleted file mode 100644
index 7bf2df5..0000000
--- a/services/store/memstore/state/perm.go
+++ /dev/null
@@ -1 +0,0 @@
-package state
diff --git a/services/store/memstore/state/refs.go b/services/store/memstore/state/refs.go
deleted file mode 100644
index 77c8560..0000000
--- a/services/store/memstore/state/refs.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package state
-
-import (
- "veyron/services/store/memstore/refs"
-
- "veyron2/storage"
-)
-
-// refsExist returns true iff there is a value in the state for each reference.
-func (sn *snapshot) refsExist(ids refs.Set) bool {
- for it := ids.Iterator(); it.IsValid(); it.Next() {
- id := it.Get().(*refs.Ref).ID
- if !sn.idTable.Contains(&Cell{ID: id}) {
- return false
- }
- }
- return true
-}
-
-// updateRefs takes a set of references from <before> to <after>. Any reference
-// in (<before> - <after>) should be decremented, and any reference in (<after>
-// - <before>) should be incremented.
-func (sn *MutableSnapshot) updateRefs(id storage.ID, beforeRefs, afterRefs refs.Set) {
- it1 := beforeRefs.Iterator()
- it2 := afterRefs.Iterator()
- for it1.IsValid() && it2.IsValid() {
- r1 := it1.Get().(*refs.Ref)
- r2 := it2.Get().(*refs.Ref)
- cmp := storage.CompareIDs(r1.ID, r2.ID)
- switch {
- case cmp < 0:
- sn.removeRef(id, r1)
- it1.Next()
- case cmp > 0:
- sn.addRef(id, r2)
- it2.Next()
- case cmp == 0:
- it1.Next()
- it2.Next()
- }
- }
- for ; it1.IsValid(); it1.Next() {
- sn.removeRef(id, it1.Get().(*refs.Ref))
- }
- for ; it2.IsValid(); it2.Next() {
- sn.addRef(id, it2.Get().(*refs.Ref))
- }
-}
-
-func (sn *MutableSnapshot) addRefs(id storage.ID, r refs.Set) {
- r.Iter(func(it interface{}) bool {
- sn.addRef(id, it.(*refs.Ref))
- return true
- })
-}
-
-func (sn *MutableSnapshot) removeRefs(id storage.ID, r refs.Set) {
- r.Iter(func(it interface{}) bool {
- sn.removeRef(id, it.(*refs.Ref))
- return true
- })
-}
-
-func (sn *MutableSnapshot) addRef(id storage.ID, r *refs.Ref) {
- // Update refcount.
- sn.ref(r.ID)
-
- // Add the inverse link.
- c := sn.Find(r.ID)
- c.inRefs = c.inRefs.Put(&refs.Ref{ID: id, Path: r.Path})
-}
-
-func (sn *MutableSnapshot) removeRef(id storage.ID, r *refs.Ref) {
- // Remove the inverse link.
- c := sn.deref(r.ID)
- c.inRefs = c.inRefs.Remove(&refs.Ref{ID: id, Path: r.Path})
-
- // Update refcount.
- sn.unref(r.ID)
-}
diff --git a/services/store/memstore/state/snapshot.go b/services/store/memstore/state/snapshot.go
deleted file mode 100644
index 80f6372..0000000
--- a/services/store/memstore/state/snapshot.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package state
-
-import (
- "reflect"
-
- "veyron/services/store/memstore/field"
- "veyron/services/store/memstore/refs"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-type Snapshot interface {
- // NewIterator returns an Iterator that starts with the value at <path>.
- // pathFilter is used to automatically limit traversal of certain paths.
- // If filter is given, it is used to limit traversal beneath certain paths
- // and limit the results of the iteration. If filter is nil, all decendents
- // of the specified path are returned.
- NewIterator(pid security.PublicID, path storage.PathName, pathFilter PathFilter, filter IterFilter) Iterator
-
- // Find performs a lookup based on storage.ID, returning nil if the cell is not found.
- Find(id storage.ID) *Cell
-
- // Get returns the value for a path.
- Get(pid security.PublicID, path storage.PathName) (*storage.Entry, error)
-}
-
-// Snapshot keeps the state for the store. The snapshot contains a dictionary
-// and a root,
-//
-// idTable : storage.ID -> storage.Value
-// rootID : storage.ID
-//
-// Snapshots support isolation by using a functional/immutable dictionary for the
-// idTable.
-//
-// Paths are resolved by traversing the snapshot from the root, using reflection to
-// traverse fields within each of the values. For example, to resolve a path
-// /a/b/c/d/e/f/g/h, we perform the following steps.
-//
-// id1 := idTable[rootID].a.b.c
-// id2 := idTable[id1].d.e
-// id3 := idTable[id2].f.g.h
-// return id3
-//
-// If any of those resolution steps fails (if the idTable doesn't contain an
-// entry, or a field path like .a.b.c doesn't exist), then the resolution fails.
-type snapshot struct {
- // idTable is the dictionary of values. We use functional sets to make it
- // easy to perform snapshotting.
- idTable cellSet
-
- // rootID is the identifier of the root object.
- rootID storage.ID
-}
-
-// newSnapshot returns an empty snapshot.
-func newSnapshot(admin security.PublicID) snapshot {
- sn := snapshot{
- idTable: emptyIDTable,
- }
- return sn
-}
-
-// Find performs a lookup based on storage.ID, returning nil if the cell is not found.
-func (sn *snapshot) Find(id storage.ID) *Cell {
- v, ok := sn.idTable.Get(&Cell{ID: id})
- if !ok {
- return nil
- }
- return v.(*Cell)
-}
-
-// Get implements the Snapshot method.
-func (sn *snapshot) Get(pid security.PublicID, path storage.PathName) (*storage.Entry, error) {
- // Pass nil for 'mutations' since the snapshot is immutable.
- cell, suffix, v := sn.resolveCell(path, nil)
- if cell == nil {
- return nil, errNotFound
- }
- var e *storage.Entry
- if len(suffix) == 0 {
- e = cell.GetEntry()
- } else {
- e = newSubfieldEntry(v)
- }
- return e, nil
-}
-
-// resolveCell performs a path-based lookup, traversing the state from the root.
-//
-// Returns (cell, suffix, v), where cell contains the value, suffix is the path
-// to the value, v is the value itself. If the operation failed, the returned
-// cell is nil.
-func (sn *snapshot) resolveCell(path storage.PathName, mu *Mutations) (*Cell, storage.PathName, interface{}) {
- cell := sn.Find(sn.rootID)
- if cell == nil {
- return nil, nil, nil
- }
- for {
- if mu != nil {
- mu.addPrecondition(cell)
- }
- var v reflect.Value
- var suffix storage.PathName
- v, suffix = field.Get(cell.Value, path)
- x := v.Interface()
- if id, ok := x.(storage.ID); ok {
- // Always dereference IDs.
- cell = sn.Find(id)
- path = suffix
- continue
- }
- switch len(suffix) {
- case 0:
- // The path is fully resolved. We're done.
- return cell, path, x
- case len(path):
- // The path couldn't be resolved at all. It must be an entry in the
- // implicit directory.
- r, ok := cell.Dir.Get(&refs.Ref{Path: refs.NewSingletonPath(path[0])})
- if !ok {
- return nil, nil, nil
- }
- cell = sn.Find(r.(*refs.Ref).ID)
- path = path[1:]
- default:
- // The path is partially resolved, but it does not resolve to a
- // storage.ID. This is an error.
- return nil, nil, nil
- }
- }
-}
diff --git a/services/store/memstore/state/snapshot_test.go b/services/store/memstore/state/snapshot_test.go
deleted file mode 100644
index c5976cd..0000000
--- a/services/store/memstore/state/snapshot_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package state
-
-import (
- "runtime"
- "testing"
- "veyron2/storage"
-)
-
-func expectExistsImmutable(t *testing.T, sn Snapshot, path string) {
- _, file, line, _ := runtime.Caller(1)
- if _, err := sn.Get(rootPublicID, storage.ParsePath(path)); err != nil {
- t.Errorf("%s(%d): does not exist: %s", file, line, path)
- }
-}
-
-func expectNotExistsImmutable(t *testing.T, sn Snapshot, path string) {
- _, file, line, _ := runtime.Caller(1)
- if _, err := sn.Get(rootPublicID, storage.ParsePath(path)); err == nil {
- t.Errorf("%s(%d): should not exist: %s", file, line, path)
- }
-}
-
-// TestImmutableGet is very similar to TestImplicitDirTree in
-// mutable_snapshot_test.go except that it uses the immutable Snapshot type
-// instead of MutableSnapshot. The implementations of Get are different
-// between the two types.
-func TestImmutableGet(t *testing.T) {
- sn := newMutableSnapshot(rootPublicID)
-
- mkdir(t, sn, "/")
- mkdir(t, sn, "/Entries/a")
- mkdir(t, sn, "/Entries/a/Entries/b")
- mkdir(t, sn, "/Entries/a/Entries/b/Entries/c")
- mkdir(t, sn, "/Entries/a/Entries/b/Entries/d")
- expectExistsImmutable(t, &sn.snapshot, "/")
- expectExistsImmutable(t, &sn.snapshot, "/Entries/a")
- expectExistsImmutable(t, &sn.snapshot, "/Entries/a/Entries/b")
- expectExistsImmutable(t, &sn.snapshot, "/Entries/a/Entries/b/Entries/c")
- expectExistsImmutable(t, &sn.snapshot, "/Entries/a/Entries/b/Entries/d")
-
- // Remove part of the tree.
- if err := sn.Remove(rootPublicID, storage.ParsePath("/Entries/a/Entries/b")); err != nil {
- t.Errorf("Unexpected error: %s", err)
- }
- sn.gc()
- expectExistsImmutable(t, &sn.snapshot, "/")
- expectExistsImmutable(t, &sn.snapshot, "/Entries/a")
- expectNotExistsImmutable(t, &sn.snapshot, "/Entries/a/Entries/b")
- expectNotExistsImmutable(t, &sn.snapshot, "/Entries/a/Entries/b/Entries/c")
- expectNotExistsImmutable(t, &sn.snapshot, "/Entries/a/Entries/b/Entries/d")
-}
diff --git a/services/store/memstore/state/state.go b/services/store/memstore/state/state.go
deleted file mode 100644
index e6212a8..0000000
--- a/services/store/memstore/state/state.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package state
-
-import (
- "time"
-
- "veyron/services/store/memstore/refs"
- "veyron/services/store/raw"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-type State struct {
- snapshot *MutableSnapshot
-
- // timestamp is the time of the last mutation applied, in nanoseconds since
- // the epoch. See comment for snapshot.Mutations.Timestamp.
- timestamp uint64
-}
-
-// refUpdate represents a reference change to a value.
-type refUpdate struct {
- id storage.ID
- before refs.Set
- after refs.Set
-}
-
-// New returns an empty State.
-func New(admin security.PublicID) *State {
- return &State{snapshot: newMutableSnapshot(admin)}
-}
-
-// Timestamp returns the timestamp of the latest mutation to the state in
-// nanoseconds.
-func (st *State) Timestamp() uint64 {
- return st.timestamp
-}
-
-// DeepCopy creates a copy of the state. Mutations to the copy do not affect
-// the original, and vice versa.
-func (st *State) DeepCopy() *State {
- return &State{st.MutableSnapshot(), st.timestamp}
-}
-
-// GC performs a manual garbage collection.
-func (st *State) GC() {
- st.snapshot.gc()
-}
-
-// Snapshot returns a read-only copy of the state.
-func (st *State) Snapshot() Snapshot {
- return st.snapshot.GetSnapshot()
-}
-
-// MutableSnapshot creates a copy of the state. Mutations to the copy do not
-// affect the original, and vice versa.
-func (st *State) MutableSnapshot() *MutableSnapshot {
- return st.snapshot.deepCopy()
-}
-
-// Deletions returns the set of IDs for values that have been deleted from
-// the state. Returns nil iff there have been no deletions.
-func (st *State) Deletions() *Mutations {
- if len(st.snapshot.deletions) == 0 {
- return nil
- }
-
- // Package the deletions into a transaction.
- var mu Mutations
- ts := st.timestamp + 1
- mu.Timestamp = ts
- mu.Deletions = st.snapshot.deletions
- st.timestamp = ts
- st.snapshot.deletions = make(map[storage.ID]raw.Version)
- return &mu
-}
-
-// ApplyMutations applies a set of mutations atomically.
-func (st *State) ApplyMutations(mu *Mutations) error {
- // Assign a timestamp.
- ts := uint64(time.Now().UnixNano())
- if ts <= st.timestamp {
- ts = st.timestamp + 1
- }
- mu.Timestamp = ts
-
- if err := st.snapshot.applyMutations(mu); err != nil {
- return err
- }
-
- st.timestamp = ts
- return nil
-}
-
-func (sn *MutableSnapshot) applyMutations(mu *Mutations) error {
- // Check the preconditions.
- table := sn.idTable
- for id, pre := range mu.Preconditions {
- c, ok := table.Get(&Cell{ID: id})
- // If the precondition is 0, it means that the cell is being created,
- // and it must not already exist. We get a precondition failure if pre
- // is 0 and the cell already exists, or pre is not 0 and the cell does
- // not already exist or have the expected version.
- if pre == 0 && ok || pre != 0 && (!ok || c.(*Cell).Version != pre) {
- return errPreconditionFailed
- }
- }
- for id, pre := range mu.Deletions {
- c, ok := table.Get(&Cell{ID: id})
- // The target is not required to exist.
- if ok && c.(*Cell).Version != pre {
- return errPreconditionFailed
- }
- }
-
- // Changes to the state begin now. These changes should not fail,
- // as we don't support rollback.
-
- // Apply the mutations.
- updates := make([]*refUpdate, 0, len(mu.Delta))
- for id, m := range mu.Delta {
- d := refs.BuildDir(m.Dir)
- cl, ok := table.Get(&Cell{ID: id})
- if !ok {
- c := &Cell{
- ID: id,
- Version: m.Postcondition,
- Value: m.Value,
- Dir: d,
- refs: m.refs,
- inRefs: refs.Empty,
- }
- table = table.Put(c)
- updates = append(updates, &refUpdate{id: c.ID, before: refs.Empty, after: c.refs})
- } else {
- c := cl.(*Cell)
- cp := *c
- cp.Version = m.Postcondition
- cp.Value = m.Value
- cp.Dir = d
- cp.refs = m.refs
- table = table.Put(&cp)
- updates = append(updates, &refUpdate{id: c.ID, before: c.refs, after: cp.refs})
- }
- }
- sn.idTable = table
-
- // Add the refs.
- for _, u := range updates {
- sn.updateRefs(u.id, u.before, u.after)
- }
-
- // Redirect the rootID.
- if mu.SetRootID && mu.RootID != sn.rootID {
- if mu.RootID != nullID {
- sn.ref(mu.RootID)
- }
- if sn.rootID != nullID {
- sn.unref(sn.rootID)
- }
- sn.rootID = mu.RootID
- }
-
- return nil
-}
diff --git a/services/store/memstore/state/state_test.go b/services/store/memstore/state/state_test.go
deleted file mode 100644
index 7a8045f..0000000
--- a/services/store/memstore/state/state_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package state_test
-
-import (
- "runtime"
- "testing"
-
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-type Node struct {
- E map[string]storage.ID
-}
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
-)
-
-// makeParentNodes creates the parent nodes if they do not already exist.
-func makeParentNodes(t *testing.T, sn *state.MutableSnapshot, user security.PublicID, path string) {
- _, file, line, _ := runtime.Caller(2)
- pl := storage.ParsePath(path)
- for i := 0; i < len(pl); i++ {
- name := pl[:i]
- if _, err := sn.Get(user, name); err != nil {
- if _, err := sn.Put(user, name, &Node{}); err != nil {
- t.Fatalf("%s(%d): can't put %s: %s", file, line, name, err)
- }
- }
- }
-}
-
-// mkdir creates an empty directory. Creates the parent directories if they do
-// not already exist.
-func mkdir(t *testing.T, sn *state.MutableSnapshot, user security.PublicID, path string) storage.ID {
- makeParentNodes(t, sn, user, path)
- name := storage.ParsePath(path)
- stat, err := sn.Put(user, name, &Node{})
- if err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- return stat.ID
-}
-
-// link adds a hard link to another store value. Creates the parent directories
-// if they do not already exist.
-func link(t *testing.T, sn *state.MutableSnapshot, user security.PublicID, path string, id storage.ID) {
- makeParentNodes(t, sn, user, path)
- name := storage.ParsePath(path)
- if _, err := sn.Put(user, name, id); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't put %s: %s", file, line, path, err)
- }
-}
-
-// putPath adds a value to the store. Creates the parent directories if they do
-// not already exist.
-func putPath(t *testing.T, sn *state.MutableSnapshot, user security.PublicID, path string, v interface{}) storage.ID {
- makeParentNodes(t, sn, user, path)
- name := storage.ParsePath(path)
- stat, err := sn.Put(user, name, v)
- if err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- return stat.ID
-}
-
-// put adds a value to the store.
-func put(t *testing.T, sn *state.MutableSnapshot, user security.PublicID, path string, v interface{}) storage.ID {
- name := storage.ParsePath(path)
- stat, err := sn.Put(user, name, v)
- if err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- return stat.ID
-}
-
-// maybePut tries to add a value to the store.
-func maybePut(sn *state.MutableSnapshot, user security.PublicID, path string, v interface{}) (storage.ID, error) {
- name := storage.ParsePath(path)
- stat, err := sn.Put(user, name, v)
- if err != nil {
- return storage.ID{}, err
- }
- return stat.ID, nil
-}
-
-// maybeGet try to fetch a value from the store.
-func maybeGet(sn *state.MutableSnapshot, user security.PublicID, path string) (interface{}, error) {
- name := storage.ParsePath(path)
- e, err := sn.Get(user, name)
- if err != nil {
- return nil, err
- }
- return e.Value, nil
-}
-
-// commit commits the state.
-func commit(t *testing.T, st *state.State, sn *state.MutableSnapshot) {
- if err := st.ApplyMutations(sn.Mutations()); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't commit: %s", file, line, err)
- }
-}
-
-// Simple DAG based on teams and players.
-func TestState(t *testing.T) {
- st := state.New(rootPublicID)
-
- // Add a Node under /a/b/c.
- sn1 := st.MutableSnapshot()
- nid := putPath(t, sn1, rootPublicID, "/a/b/c", &Node{})
- commit(t, st, sn1)
-
- // Create a link.
- {
- sn := st.MutableSnapshot()
- link(t, sn, rootPublicID, "/a/b/d", nid)
- commit(t, st, sn)
- }
-
- // Add a shared Entry.
- {
- sn := st.MutableSnapshot()
- mkdir(t, sn, rootPublicID, "/a/b/c/Entries/foo")
- commit(t, st, sn)
- }
-
- // Check that it exists.
- {
- sn := st.MutableSnapshot()
- if _, err := maybeGet(sn, rootPublicID, "/a/b/d/Entries/foo"); err != nil {
- t.Errorf("entry should exist")
- }
- }
-}
diff --git a/services/store/memstore/state/util.go b/services/store/memstore/state/util.go
deleted file mode 100644
index cb72db1..0000000
--- a/services/store/memstore/state/util.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package state
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "sort"
-
- "veyron2/storage"
-)
-
-// This function returns an interface with a pointer to the element in val.
-// Note that this is different from returning &val which returns a pointer to
-// val. This is required temporarily before we switch to VOM value so that
-// we can append to slices.
-// TODO(bprosnitz) This is hacky -- remove this when we switch to VOM Value
-func makeInnerReference(val interface{}) interface{} {
- rv := reflect.ValueOf(val)
- ptr := reflect.New(rv.Type())
- ptr.Elem().Set(rv)
- return ptr.Interface()
-}
-
-func deepcopyReflect(rv reflect.Value) reflect.Value {
- switch rv.Kind() {
- case reflect.Array:
- arr := reflect.New(rv.Type()).Elem()
- for i := 0; i < rv.Len(); i++ {
- valcopy := deepcopyReflect(rv.Index(i))
- arr.Index(i).Set(valcopy)
- }
- return arr
- case reflect.Slice:
- s := reflect.MakeSlice(rv.Type(), rv.Len(), rv.Cap())
- for i := 0; i < rv.Len(); i++ {
- valcopy := deepcopyReflect(rv.Index(i))
- s.Index(i).Set(valcopy)
- }
- ptr := reflect.New(rv.Type())
- ptr.Elem().Set(s)
- return ptr.Elem()
- case reflect.Map:
- m := reflect.MakeMap(rv.Type())
- keys := rv.MapKeys()
- for _, key := range keys {
- val := rv.MapIndex(key)
- keycopy := deepcopyReflect(key)
- valcopy := deepcopyReflect(val)
- m.SetMapIndex(keycopy, valcopy)
- }
- return m
- case reflect.Struct:
- s := reflect.New(rv.Type()).Elem()
- for i := 0; i < rv.NumField(); i++ {
- valcopy := deepcopyReflect(rv.Field(i))
- s.Field(i).Set(valcopy)
- }
- return s
- case reflect.Ptr:
- ptr := reflect.New(rv.Type()).Elem()
- elem := reflect.New(rv.Type().Elem())
- ptr.Set(elem)
- ptr.Elem().Set(deepcopyReflect(rv.Elem()))
- return ptr
- case reflect.Interface:
- intr := reflect.New(rv.Type()).Elem()
- intr.Set(deepcopyReflect(rv.Elem()))
- return intr
- case reflect.Chan, reflect.Func, reflect.UnsafePointer:
- panic(fmt.Sprintf("deepcopy of kind %v not supported", rv.Kind()))
- default:
- // Primitives (copy it so we can't set the original)
- return reflect.ValueOf(rv.Interface())
- }
-}
-
-// deepcopy performs a deep copy of a value. We need this to simulate secondary
-// storage where each time a value is stored, it is copied to secondary storage;
-// and when it is retrieved, it is copied out of secondary storage.
-func deepcopy(v interface{}) interface{} {
- if v == nil {
- return nil
- }
- return deepcopyReflect(reflect.ValueOf(v)).Interface()
-}
-
-// addIDToSort adds a storage.ID to a sorted array.
-func addIDToSort(id storage.ID, refs []storage.ID) []storage.ID {
- i := findIDInSort(id, refs)
- newRefs := make([]storage.ID, len(refs)+1)
- copy(newRefs[0:i], refs[0:i])
- newRefs[i] = id
- copy(newRefs[i+1:], refs[i:])
- return newRefs
-}
-
-// removeIDFromSort removes a storage.ID from a sorted array.
-func removeIDFromSort(id storage.ID, refs []storage.ID) []storage.ID {
- i := findIDInSort(id, refs)
- if i < len(refs) && refs[i] == id {
- newRefs := make([]storage.ID, len(refs)-1)
- copy(newRefs[0:i], refs[0:i])
- copy(newRefs[i:], refs[i+1:])
- return newRefs
- }
- return refs
-}
-
-func findIDInSort(id storage.ID, refs []storage.ID) int {
- return sort.Search(len(refs), func(i int) bool {
- return bytes.Compare(refs[i][:], id[:]) >= 0
- })
-}
diff --git a/services/store/memstore/state/util_test.go b/services/store/memstore/state/util_test.go
deleted file mode 100644
index 38653ef..0000000
--- a/services/store/memstore/state/util_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Modeled after veyron2/storage/vstore/blackbox/photoalbum_test.go.
-//
-// TODO(sadovsky): Maybe migrate this to be part of the public store API, to
-// help with writing tests that use storage.
-
-package state
-
-import (
- "reflect"
- "runtime"
- "testing"
-
- "veyron2/security"
- "veyron2/storage"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
-)
-
-func get(t *testing.T, sn *MutableSnapshot, path string) interface{} {
- _, file, line, _ := runtime.Caller(1)
- name := storage.ParsePath(path)
- e, err := sn.Get(rootPublicID, name)
- if err != nil {
- t.Fatalf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- return e.Value
-}
-
-func put(t *testing.T, sn *MutableSnapshot, path string, v interface{}) storage.ID {
- _, file, line, _ := runtime.Caller(1)
- name := storage.ParsePath(path)
- stat, err := sn.Put(rootPublicID, name, v)
- if err != nil {
- t.Errorf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- if _, err := sn.Get(rootPublicID, name); err != nil {
- t.Errorf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- if stat != nil {
- return stat.ID
- }
- return storage.ID{}
-}
-
-func remove(t *testing.T, sn *MutableSnapshot, path string) {
- name := storage.ParsePath(path)
- if err := sn.Remove(rootPublicID, name); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): can't remove %s: %s", file, line, path, err)
- }
-}
-
-func TestDeepcopy(t *testing.T) {
- type basicTestStruct struct {
- X int
- }
- type compositeHoldingTestStruct struct {
- A [2]int
- Sl []string
- M map[string]int
- St basicTestStruct
- }
-
- var intr interface{} = []string{"X"}
-
- tests := []interface{}{
- nil,
- 0,
- true,
- "str",
- [3]int{4, 5, 6},
- []string{"A", "B"},
- map[string]int{"A": 4, "B": 3},
- basicTestStruct{7},
- &basicTestStruct{7},
- compositeHoldingTestStruct{
- [2]int{3, 4},
- []string{"A"},
- map[string]int{"A": 5},
- basicTestStruct{X: 3},
- },
- intr,
- }
-
- for _, test := range tests {
- copiedVal := deepcopy(test)
- if !reflect.DeepEqual(copiedVal, test) {
- t.Errorf("failure in deepcopy. Expected %v, got %v", test, copiedVal)
- }
- }
-}
-
-func TestDeepcopySliceSettability(t *testing.T) {
- rvSliceCopy := deepcopyReflect(reflect.ValueOf([]int{3, 4}))
- if !rvSliceCopy.CanSet() {
- t.Errorf("can't set slice. This is required for appending to slices")
- }
-}
-
-func TestDeepcopyNilMap(t *testing.T) {
- var nilMap map[int]int
- mapCopy := deepcopy(nilMap)
- if !reflect.DeepEqual(mapCopy, map[int]int{}) {
- t.Errorf("expected an empty map, got %v", mapCopy)
- }
-
- type structWithMap struct {
- M map[int]int
- }
- s := deepcopy(&structWithMap{})
- if !reflect.DeepEqual(s, &structWithMap{map[int]int{}}) {
- t.Errorf("expected an empty map in the struct, got %v", s)
- }
-}
diff --git a/services/store/memstore/store.go b/services/store/memstore/store.go
deleted file mode 100644
index f1090d1..0000000
--- a/services/store/memstore/store.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package memstore
-
-import (
- "veyron/runtimes/google/lib/sync"
-
- "veyron/services/store/memstore/state"
- "veyron/services/store/raw"
-
- "veyron2/ipc"
- "veyron2/security"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-// Store is the in-memory state of the store.
-type Store struct {
- sync.DebugMutex
-
- // state holds the current state of the store.
- State *state.State
-
- // log persists the state to disk and any committed transactions.
- // An ephemeral state has a nil log, and does not persist to disk.
- log *wlog
-}
-
-var (
- ErrRequestCancelled = verror.Abortedf("request cancelled")
-)
-
-// New creates a new store. admin is the public ID of the administrator, dbName
-// is the path of the database directory, to which logs are written.
-func New(admin security.PublicID, dbName string) (*Store, error) {
- rlog, err := openDB(dbName, false)
- if err != nil {
- return nil, err
- }
- if rlog == nil {
- return newState(admin, dbName)
- }
- st, err := readAndCloseDB(admin, rlog)
- if err != nil {
- return nil, err
- }
- // Rename the log file by date.
- if err := backupLog(dbName); err != nil {
- return nil, err
- }
- if err := st.setLog(dbName); err != nil {
- return nil, err
- }
- return st, nil
-}
-
-// newState returns an empty state. dbName is the path of the database directory,
-// to which logs are written.
-func newState(admin security.PublicID, dbName string) (*Store, error) {
- st := &Store{State: state.New(admin)}
- if err := st.setLog(dbName); err != nil {
- return nil, err
- }
- return st, nil
-}
-
-// setLog creates a fresh log file and writes to it.
-func (st *Store) setLog(dbName string) error {
- if dbName != "" {
- log, err := createLog(dbName)
- if err != nil {
- return err
- }
- err = log.writeState(st)
- if err != nil {
- log.close()
- return err
- }
- st.log = log
- }
- return nil
-}
-
-// Bind returns an Object representing a value in the store. The value need not
-// exist; the Put method can be used to add the value if it doesn't already
-// exist.
-func (st *Store) Bind(path string) *Object {
- return &Object{path: storage.ParsePath(path), store: st}
-}
-
-func (st *Store) Close() error {
- st.Lock()
- st.closeLocked()
- st.Unlock()
- return nil
-}
-
-func (st *Store) closeLocked() {
- st.State = nil
- if st.log != nil {
- st.log.close()
- }
- st.log = nil
-}
-
-// GC performs a manual garbage collection.
-func (st *Store) GC() error {
- st.Lock()
- defer st.Unlock()
- st.State.GC()
-
- // Append a transaction containing deletions, if there are any.
- mu := st.State.Deletions()
- if st.log != nil && mu != nil {
- if err := st.log.appendTransaction(mu); err != nil {
- // We can't continue because the log failed. The state has already been updated,
- // but access to the state is blocked because we have the lock. Close the state
- // to ensure that it is never used again.
- st.closeLocked()
- return err
- }
- }
- return nil
-}
-
-// Snapshot returns a read-only state.
-func (st *Store) Snapshot() state.Snapshot {
- st.Lock()
- defer st.Unlock()
- return st.State.Snapshot()
-}
-
-// ApplyMutations applies the mutations to the state atomically.
-func (st *Store) ApplyMutations(mu *state.Mutations) error {
- st.Lock()
- defer st.Unlock()
- if err := st.State.ApplyMutations(mu); err != nil {
- return err
- }
- if st.log != nil {
- // Append the transaction to the log.
- if err := st.log.appendTransaction(mu); err != nil {
- // We can't continue because the log failed. The state has already been updated,
- // but access to the state is blocked because we have the lock. Close the state
- // to ensure that it is never used again.
- st.closeLocked()
- return err
- }
- }
- return nil
-}
-
-// PutMutations atomically commits a stream of Mutations when the stream is
-// closed. Mutations are not committed if the request is cancelled before the
-// stream has been closed.
-func (st *Store) PutMutations(ctx ipc.ServerContext, stream raw.StoreServicePutMutationsStream) error {
- tr := st.newNilTransaction()
- rStream := stream.RecvStream()
- for rStream.Advance() {
- mu := rStream.Value()
-
- if err := tr.snapshot.PutMutation(mu); err != nil {
- tr.Abort()
- return err
- }
- }
- err := rStream.Err()
- if err != nil {
- tr.Abort()
- return err
- }
-
- select {
- case <-ctx.Done():
- tr.Abort()
- return ErrRequestCancelled
- default:
- return tr.Commit()
- }
-}
diff --git a/services/store/memstore/store_test.go b/services/store/memstore/store_test.go
deleted file mode 100644
index 000dceb..0000000
--- a/services/store/memstore/store_test.go
+++ /dev/null
@@ -1,379 +0,0 @@
-package memstore
-
-import (
- "io/ioutil"
- "os"
- "testing"
-
- storetesting "veyron/services/store/memstore/testing"
- "veyron/services/store/raw"
-
- "veyron2/rt"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-func TestLogWrite(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create the state. This should also initialize the log.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("newState() failed: %v", err)
- }
-
- // Open the log for reading.
- log, err := OpenLog(dbName, true)
- if err != nil {
- t.Fatalf("OpenLog() failed: %v", err)
- }
-
- // The log should be sync'ed, test reading the initial state.
- logst, err := log.ReadState(rootPublicID)
- if err != nil {
- t.Fatalf("ReadState() failed: %v", err)
- }
-
- // Construct a transaction.
- v1 := "v1"
- v2 := "v2"
- v3 := "v3"
- tr := NewTransaction()
- put(t, st, tr, "/", v1)
- put(t, st, tr, "/a", v2)
- put(t, st, tr, "/a/b", v3)
- commit(t, tr)
-
- // Check that the mutations were applied to the state.
- expectValue(t, st, nil, "/", v1)
- expectValue(t, st, nil, "/a", v2)
- expectValue(t, st, nil, "/a/b", v3)
-
- // The log should be sync'ed, test reading the transaction.
- logmu, err := log.ReadTransaction()
- if err != nil {
- t.Fatalf("ReadTransaction() failed: %v", err)
- }
- logst.ApplyMutations(logmu)
- expectValue(t, logst, nil, "/", v1)
- expectValue(t, logst, nil, "/a", v2)
- expectValue(t, logst, nil, "/a/b", v3)
-}
-
-func TestFailedLogWrite(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
-
- // Create the state. This should also initialize the log.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("newState() failed: %v", err)
- }
-
- // Construct a transaction.
- v1 := "v1"
- tr := NewTransaction()
- put(t, st, tr, "/", v1)
- v2 := "v2"
- put(t, st, tr, "/a", v2)
-
- // Close the log file. Subsequent writes to the log should fail.
- st.log.close()
-
- // Commit the state. The call should fail.
- if err := st.log.appendTransaction(nil); !verror.Is(err, verror.Aborted) {
- t.Errorf("Expected error %v, got %v", verror.Aborted, err)
- }
-}
-
-func TestRecoverFromLog(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create the state. This should also initialize the log.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("newState() failed: %v", err)
- }
-
- // Construct a transaction.
- v1 := "v1"
- v2 := "v2"
- v3 := "v3"
- tr := NewTransaction()
- put(t, st, tr, "/", v1)
- put(t, st, tr, "/a", v2)
- put(t, st, tr, "/a/b", v3)
- commit(t, tr)
-
- // Recover state from the log.
- recoverst, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("newState() failed: %v", err)
- }
- expectValue(t, recoverst, nil, "/", v1)
- expectValue(t, recoverst, nil, "/a", v2)
- expectValue(t, recoverst, nil, "/a/b", v3)
-}
-
-func TestPutMutations(t *testing.T) {
- rt.Init()
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create the state.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- // Add /, /a, /a/b
- id1, id2, id3 := storage.NewID(), storage.NewID(), storage.NewID()
- pre1, pre2, pre3 := raw.NoVersion, raw.NoVersion, raw.NoVersion
- post1, post2, post3 := raw.NewVersion(), raw.NewVersion(), raw.NewVersion()
- v1, v2, v3 := "v1", "v2", "v3"
-
- storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
- raw.Mutation{
- ID: id1,
- PriorVersion: pre1,
- Version: post1,
- IsRoot: true,
- Value: v1,
- Dir: dir("a", id2),
- },
- raw.Mutation{
- ID: id2,
- PriorVersion: pre2,
- Version: post2,
- IsRoot: false,
- Value: v2,
- Dir: dir("b", id3),
- },
- raw.Mutation{
- ID: id3,
- PriorVersion: pre3,
- Version: post3,
- IsRoot: false,
- Value: v3,
- Dir: empty,
- },
- })
-
- expectValue(t, st, nil, "/", v1)
- expectValue(t, st, nil, "/a", v2)
- expectValue(t, st, nil, "/a/b", v3)
-
- // Remove /a/b
- pre1, pre2, pre3 = post1, post2, post3
- post2 = raw.NewVersion()
-
- storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
- raw.Mutation{
- ID: id2, PriorVersion: pre2,
- Version: post2,
- IsRoot: false,
- Value: v2,
- Dir: empty,
- }})
-
- expectValue(t, st, nil, "/", v1)
- expectValue(t, st, nil, "/a", v2)
- expectNotExists(t, st, nil, "a/b")
-
- // Garbage-collect /a/b
- post3 = raw.NoVersion
-
- storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
- raw.Mutation{
- ID: id3,
- PriorVersion: pre3,
- Version: post3,
- IsRoot: false,
- }})
-
- expectValue(t, st, nil, "/", v1)
- expectValue(t, st, nil, "/a", v2)
- expectNotExists(t, st, nil, "a/b")
-
- // Remove /
- pre1, pre2, pre3 = post1, post2, post3
- post1 = raw.NoVersion
-
- storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
- raw.Mutation{
- ID: id1,
- PriorVersion: pre1,
- Version: post1,
- IsRoot: true,
- }})
-
- expectNotExists(t, st, nil, "/")
- expectNotExists(t, st, nil, "/a")
- expectNotExists(t, st, nil, "a/b")
-
- // Garbage-collect /a
- post2 = raw.NoVersion
-
- storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
- raw.Mutation{
- ID: id2,
- PriorVersion: pre2,
- Version: post2,
- IsRoot: false,
- }})
-
- expectNotExists(t, st, nil, "/")
- expectNotExists(t, st, nil, "/a")
- expectNotExists(t, st, nil, "a/b")
-}
-
-func TestPutConflictingMutations(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create the state.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- // Add /, /a
- id1, id2 := storage.NewID(), storage.NewID()
- pre1, pre2 := raw.NoVersion, raw.NoVersion
- post1, post2 := raw.NewVersion(), raw.NewVersion()
- v1, v2 := "v1", "v2"
-
- storetesting.PutMutationsBatch(t, rootPublicID, st.PutMutations, []raw.Mutation{
- raw.Mutation{
- ID: id1,
- PriorVersion: pre1,
- Version: post1,
- IsRoot: true,
- Value: v1,
- Dir: dir("a", id2),
- },
- raw.Mutation{
- ID: id2,
- PriorVersion: pre2,
- Version: post2,
- IsRoot: false,
- Value: v2,
- Dir: empty,
- },
- })
-
- expectValue(t, st, nil, "/", v1)
- expectValue(t, st, nil, "/a", v2)
-
- // Attempt to update /a with a bad precondition
- pre2 = raw.NewVersion()
- post2 = raw.NewVersion()
- v2 = "v4"
-
- s := storetesting.PutMutations(rootPublicID, st.PutMutations)
- s.SendStream().Send(raw.Mutation{
- ID: id2,
- PriorVersion: pre2,
- Version: post2,
- IsRoot: true,
- Value: v2,
- Dir: empty,
- })
- if err := s.Finish(); !verror.Is(err, verror.Aborted) {
- t.Errorf("Error should be %v: got %v", verror.Aborted, err)
- }
-
-}
-
-func TestPutDuplicateMutations(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create the state.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- id := storage.NewID()
- s := storetesting.PutMutations(rootPublicID, st.PutMutations)
- s.SendStream().Send(raw.Mutation{
- ID: id,
- PriorVersion: raw.NoVersion,
- Version: raw.NewVersion(),
- IsRoot: true,
- Value: "v1",
- Dir: empty,
- })
- s.SendStream().Send(raw.Mutation{
- ID: id,
- PriorVersion: raw.NoVersion,
- Version: raw.NewVersion(),
- IsRoot: true,
- Value: "v2",
- Dir: empty,
- })
- if err := s.Finish(); !verror.Is(err, verror.BadArg) {
- t.Errorf("Error should be %v: got %v", verror.BadArg, err)
- }
-}
-
-func TestCancelPutMutation(t *testing.T) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- defer os.RemoveAll(dbName)
-
- // Create the state.
- st, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
-
- s := storetesting.PutMutations(rootPublicID, st.PutMutations)
- s.SendStream().Send(raw.Mutation{
- ID: storage.NewID(),
- PriorVersion: raw.NoVersion,
- Version: raw.NewVersion(),
- IsRoot: true,
- Value: "v1",
- Dir: empty,
- })
- s.Cancel()
- if err := s.Finish(); !verror.Is(err, verror.Aborted) {
- t.Errorf("Error should be %v: got %v", verror.Aborted, err)
- }
-
- expectNotExists(t, st, nil, "/")
-}
-
-var (
- empty = []raw.DEntry{}
-)
-
-func dir(name string, id storage.ID) []raw.DEntry {
- return []raw.DEntry{raw.DEntry{
- Name: name,
- ID: id,
- }}
-}
diff --git a/services/store/memstore/testing/util.go b/services/store/memstore/testing/util.go
deleted file mode 100644
index be31dc9..0000000
--- a/services/store/memstore/testing/util.go
+++ /dev/null
@@ -1,431 +0,0 @@
-package testing
-
-import (
- "io"
- "runtime"
- "testing"
-
- "veyron/services/store/raw"
-
- "veyron2/context"
- "veyron2/ipc"
- "veyron2/naming"
- "veyron2/rt"
- "veyron2/security"
- "veyron2/services/watch"
- "veyron2/services/watch/types"
- "veyron2/storage"
-)
-
-// FakeServerContext implements ipc.ServerContext.
-type FakeServerContext struct {
- context.T
- cancel context.CancelFunc
- id security.PublicID
-}
-
-func NewFakeServerContext(id security.PublicID) *FakeServerContext {
- ctx, cancel := rt.R().NewContext().WithCancel()
-
- return &FakeServerContext{
- T: ctx,
- cancel: cancel,
- id: id,
- }
-}
-
-func (*FakeServerContext) Server() ipc.Server { return nil }
-func (*FakeServerContext) Method() string { return "" }
-func (*FakeServerContext) Name() string { return "" }
-func (*FakeServerContext) Suffix() string { return "" }
-func (*FakeServerContext) Label() (l security.Label) { return }
-func (*FakeServerContext) Discharges() map[string]security.Discharge { return nil }
-func (ctx *FakeServerContext) LocalID() security.PublicID { return ctx.id }
-func (ctx *FakeServerContext) RemoteID() security.PublicID { return ctx.id }
-func (*FakeServerContext) Blessing() security.PublicID { return nil }
-func (*FakeServerContext) LocalEndpoint() naming.Endpoint { return nil }
-func (*FakeServerContext) RemoteEndpoint() naming.Endpoint { return nil }
-func (ctx *FakeServerContext) Cancel() { ctx.cancel() }
-
-// Utilities for PutMutations.
-
-// storeServicePutMutationsStream implements raw.StoreServicePutMutationsStream
-type storeServicePutMutationsStream struct {
- mus <-chan raw.Mutation
- value raw.Mutation
-}
-
-func (s *storeServicePutMutationsStream) RecvStream() interface {
- Advance() bool
- Value() raw.Mutation
- Err() error
-} {
- return s
-}
-
-func (s *storeServicePutMutationsStream) Advance() bool {
- var ok bool
- s.value, ok = <-s.mus
- return ok
-}
-
-func (s *storeServicePutMutationsStream) Value() raw.Mutation {
- return s.value
-}
-
-func (s *storeServicePutMutationsStream) Err() error {
- return nil
-}
-
-// storePutMutationsStream implements raw.StorePutMutationsStream
-type storePutMutationsStream struct {
- closed bool
- mus chan<- raw.Mutation
-}
-
-func (s *storePutMutationsStream) Send(mu raw.Mutation) error {
- s.mus <- mu
- return nil
-}
-
-func (s *storePutMutationsStream) Close() error {
- if !s.closed {
- s.closed = true
- close(s.mus)
- }
- return nil
-}
-
-type storePutMutationsCall struct {
- ctx ipc.ServerContext
- stream storePutMutationsStream
- err <-chan error
-}
-
-func (s *storePutMutationsCall) SendStream() interface {
- Send(mu raw.Mutation) error
- Close() error
-} {
- return &s.stream
-}
-
-func (s *storePutMutationsCall) Finish() error {
- s.stream.Close()
- return <-s.err
-}
-
-func (s *storePutMutationsCall) Cancel() {
- s.ctx.(*FakeServerContext).Cancel()
- s.stream.Close()
-}
-
-func PutMutations(id security.PublicID, putMutationsFn func(ipc.ServerContext, raw.StoreServicePutMutationsStream) error) raw.StorePutMutationsCall {
- ctx := NewFakeServerContext(id)
- mus := make(chan raw.Mutation)
- err := make(chan error)
- go func() {
- err <- putMutationsFn(ctx, &storeServicePutMutationsStream{mus: mus})
- close(err)
- }()
- return &storePutMutationsCall{
- ctx: ctx,
- err: err,
- stream: storePutMutationsStream{
- mus: mus,
- },
- }
-}
-
-func PutMutationsBatch(t *testing.T, id security.PublicID, putMutationsFn func(ipc.ServerContext, raw.StoreServicePutMutationsStream) error, mus []raw.Mutation) {
- storePutMutationsStream := PutMutations(id, putMutationsFn)
- for _, mu := range mus {
- storePutMutationsStream.SendStream().Send(mu)
- }
- if err := storePutMutationsStream.Finish(); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): can't put mutations %s: %s", file, line, mus, err)
- }
-}
-
-// Utilities for Watch.
-
-// watcherServiceWatchStreamSender implements watch.WatcherServiceWatchStreamSender
-type watcherServiceWatchStreamSender struct {
- ctx ipc.ServerContext
- output chan<- types.Change
-}
-
-func (s *watcherServiceWatchStreamSender) Send(cb types.Change) error {
- select {
- case s.output <- cb:
- return nil
- case <-s.ctx.Done():
- return io.EOF
- }
-}
-
-// watcherServiceWatchStream implements watch.WatcherServiceWatchStream
-type watcherServiceWatchStream struct {
- watcherServiceWatchStreamSender
-}
-
-func (s *watcherServiceWatchStream) SendStream() interface {
- Send(cb types.Change) error
-} {
- return s
-}
-func (*watcherServiceWatchStream) Cancel() {}
-
-// watcherWatchStream implements watch.WatcherWatchStream.
-type watcherWatchStream struct {
- ctx *FakeServerContext
- value types.Change
- input <-chan types.Change
- err <-chan error
-}
-
-func (s *watcherWatchStream) Advance() bool {
- var ok bool
- s.value, ok = <-s.input
- return ok
-}
-
-func (s *watcherWatchStream) Value() types.Change {
- return s.value
-}
-
-func (*watcherWatchStream) Err() error {
- return nil
-}
-
-func (s *watcherWatchStream) Finish() error {
- <-s.input
- return <-s.err
-}
-
-func (s *watcherWatchStream) Cancel() {
- s.ctx.Cancel()
-}
-
-func (s *watcherWatchStream) RecvStream() interface {
- Advance() bool
- Value() types.Change
- Err() error
-} {
- return s
-}
-
-func watchImpl(id security.PublicID, watchFn func(ipc.ServerContext, *watcherServiceWatchStream) error) *watcherWatchStream {
- ctx := NewFakeServerContext(id)
- outputc := make(chan types.Change)
- inputc := make(chan types.Change)
- // This goroutine ensures that inputs will eventually stop going through
- // once the context is done. Send could handle this, but running a separate
- // goroutine is easier as we do not control invocations of Send.
- go func() {
- for {
- select {
- case change := <-outputc:
- inputc <- change
- case <-ctx.Done():
- close(inputc)
- return
- }
- }
- }()
- errc := make(chan error, 1)
- go func() {
- stream := &watcherServiceWatchStream{
- watcherServiceWatchStreamSender{
- ctx: ctx,
- output: outputc,
- },
- }
- err := watchFn(ctx, stream)
- errc <- err
- close(errc)
- ctx.Cancel()
- }()
- return &watcherWatchStream{
- ctx: ctx,
- input: inputc,
- err: errc,
- }
-}
-
-func WatchRaw(id security.PublicID, watchFn func(ipc.ServerContext, raw.Request, raw.StoreServiceWatchStream) error, req raw.Request) raw.StoreWatchCall {
- return watchImpl(id, func(ctx ipc.ServerContext, stream *watcherServiceWatchStream) error {
- return watchFn(ctx, req, stream)
- })
-}
-
-func WatchGlob(id security.PublicID, watchFn func(ipc.ServerContext, types.GlobRequest, watch.GlobWatcherServiceWatchGlobStream) error, req types.GlobRequest) watch.GlobWatcherWatchGlobCall {
- return watchImpl(id, func(ctx ipc.ServerContext, iterator *watcherServiceWatchStream) error {
- return watchFn(ctx, req, iterator)
- })
-}
-
-func WatchGlobOnPath(id security.PublicID, watchFn func(ipc.ServerContext, storage.PathName, types.GlobRequest, watch.GlobWatcherServiceWatchGlobStream) error, path storage.PathName, req types.GlobRequest) watch.GlobWatcherWatchGlobCall {
- return watchImpl(id, func(ctx ipc.ServerContext, stream *watcherServiceWatchStream) error {
- return watchFn(ctx, path, req, stream)
- })
-}
-
-func ExpectInitialStateSkipped(t *testing.T, change types.Change) {
- if change.Name != "" {
- t.Fatalf("Expect Name to be \"\" but was: %v", change.Name)
- }
- if change.State != types.InitialStateSkipped {
- t.Fatalf("Expect State to be InitialStateSkipped but was: %v", change.State)
- }
- if len(change.ResumeMarker) != 0 {
- t.Fatalf("Expect no ResumeMarker but was: %v", change.ResumeMarker)
- }
-}
-
-func ExpectEntryExists(t *testing.T, changes []types.Change, name string, id storage.ID, value string) {
- change := findEntry(t, changes, name)
- if change.State != types.Exists {
- t.Fatalf("Expected name to exist: %v", name)
- }
- cv, ok := change.Value.(*storage.Entry)
- if !ok {
- t.Fatalf("Expected an Entry")
- }
- if cv.Stat.ID != id {
- t.Fatalf("Expected ID to be %v, but was: %v", id, cv.Stat.ID)
- }
- if cv.Value != value {
- t.Fatalf("Expected Value to be %v, but was: %v", value, cv.Value)
- }
-}
-
-func ExpectEntryExistsNameOnly(t *testing.T, changes []types.Change, name string) {
- change := findEntry(t, changes, name)
- if change.State != types.Exists {
- t.Fatalf("Expected name to exist: %v", name)
- }
- _, ok := change.Value.(*storage.Entry)
- if !ok {
- t.Fatalf("Expected an Entry")
- }
-}
-
-func ExpectEntryDoesNotExist(t *testing.T, changes []types.Change, name string) {
- change := findEntry(t, changes, name)
- if change.State != types.DoesNotExist {
- t.Fatalf("Expected name to not exist: %v", name)
- }
- if change.Value != nil {
- t.Fatalf("Expected entry to be nil")
- }
-}
-
-func findEntry(t *testing.T, changes []types.Change, name string) types.Change {
- for _, change := range changes {
- if change.Name == name {
- return change
- }
- }
- t.Fatalf("Expected a change for name: %v", name)
- panic("Should not reach here")
-}
-
-var (
- EmptyDir = []raw.DEntry{}
-)
-
-func DirOf(name string, id storage.ID) []raw.DEntry {
- return []raw.DEntry{raw.DEntry{
- Name: name,
- ID: id,
- }}
-}
-
-func ExpectMutationExists(t *testing.T, changes []types.Change, id storage.ID, pre, post raw.Version, isRoot bool, value string, dir []raw.DEntry) {
- change := findMutation(t, changes, id)
- if change.State != types.Exists {
- t.Fatalf("Expected id to exist: %v", id)
- }
- cv := change.Value.(*raw.Mutation)
- if cv.PriorVersion != pre {
- t.Fatalf("Expected PriorVersion to be %v, but was: %v", pre, cv.PriorVersion)
- }
- if cv.Version != post {
- t.Fatalf("Expected Version to be %v, but was: %v", post, cv.Version)
- }
- if cv.IsRoot != isRoot {
- t.Fatalf("Expected IsRoot to be: %v, but was: %v", isRoot, cv.IsRoot)
- }
- if cv.Value != value {
- t.Fatalf("Expected Value to be: %v, but was: %v", value, cv.Value)
- }
- expectDirEquals(t, cv.Dir, dir)
-}
-
-func ExpectMutationDoesNotExist(t *testing.T, changes []types.Change, id storage.ID, pre raw.Version, isRoot bool) {
- change := findMutation(t, changes, id)
- if change.State != types.DoesNotExist {
- t.Fatalf("Expected id to not exist: %v", id)
- }
- cv := change.Value.(*raw.Mutation)
- if cv.PriorVersion != pre {
- t.Fatalf("Expected PriorVersion to be %v, but was: %v", pre, cv.PriorVersion)
- }
- if cv.Version != raw.NoVersion {
- t.Fatalf("Expected Version to be NoVersion, but was: %v", cv.Version)
- }
- if cv.IsRoot != isRoot {
- t.Fatalf("Expected IsRoot to be: %v, but was: %v", isRoot, cv.IsRoot)
- }
- if cv.Value != nil {
- t.Fatalf("Expected Value to be nil")
- }
- if cv.Dir != nil {
- t.Fatalf("Expected Dir to be nil")
- }
-}
-
-func ExpectMutationExistsNoVersionCheck(t *testing.T, changes []types.Change, id storage.ID, value string) {
- change := findMutation(t, changes, id)
- if change.State != types.Exists {
- t.Fatalf("Expected id to exist: %v", id)
- }
- cv := change.Value.(*raw.Mutation)
- if cv.Value != value {
- t.Fatalf("Expected Value to be: %v, but was: %v", value, cv.Value)
- }
-}
-
-func ExpectMutationDoesNotExistNoVersionCheck(t *testing.T, changes []types.Change, id storage.ID) {
- change := findMutation(t, changes, id)
- if change.State != types.DoesNotExist {
- t.Fatalf("Expected id to not exist: %v", id)
- }
-}
-
-func findMutation(t *testing.T, changes []types.Change, id storage.ID) types.Change {
- for _, change := range changes {
- cv, ok := change.Value.(*raw.Mutation)
- if !ok {
- t.Fatalf("Expected a Mutation")
- }
- if cv.ID == id {
- return change
- }
- }
- t.Fatalf("Expected a change for id: %v", id)
- panic("Should not reach here")
-}
-
-func expectDirEquals(t *testing.T, actual, expected []raw.DEntry) {
- if len(actual) != len(expected) {
- t.Fatalf("Expected Dir to have %v refs, but had %v", len(expected), len(actual))
- }
- for i, e := range expected {
- a := actual[i]
- if a != e {
- t.Fatalf("Expected Dir entry %v to be %v, but was %v", i, e, a)
- }
- }
-}
diff --git a/services/store/memstore/transaction.go b/services/store/memstore/transaction.go
deleted file mode 100644
index cf01271..0000000
--- a/services/store/memstore/transaction.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package memstore
-
-import (
- "sync"
-
- "veyron/services/store/memstore/state"
-)
-
-// Transaction is the type of transactions. Each transaction has a snapshot of
-// the entire state of the store; <store> is the shared *Store, assigned when
-// the transaction was first used, and <snapshot> is MutableSnapshot that
-// includes any changes in the transaction.
-//
-// Transactions are initially empty. The <store> and <snapshot> fields are
-// set when the transaction is first used.
-type Transaction struct {
- mutex sync.Mutex
- store *Store
- snapshot *state.MutableSnapshot
-}
-
-// newNilTransaction is used when nil is passed in as the transaction for an
-// object operation. This means that the operation is to be performed on the
-// state <st>.
-func (st *Store) newNilTransaction() *Transaction {
- st.Lock()
- defer st.Unlock()
- return &Transaction{store: st, snapshot: st.State.MutableSnapshot()}
-}
-
-// getTransaction returns the *Transaction value for the service.Transaction.
-// Returns bool commit==true iff the transaction argument is nil, which means
-// that the transaction lifetime is the duration of the operation (so the
-// transaction should be committed immediately after the operation that uses it
-// is performed).
-func (st *Store) getTransaction(tr *Transaction) (*Transaction, bool, error) {
- if tr == nil {
- return st.newNilTransaction(), true, nil
- }
- tr.useState(st)
- return tr, false, nil
-}
-
-// GetTransactionSnapshot returns a read-only snapshot from the transaction.
-func (st *Store) GetTransactionSnapshot(tr *Transaction) (state.Snapshot, error) {
- t, _, err := st.getTransaction(tr)
- if err != nil {
- return nil, err
- }
- return t.Snapshot(), nil
-}
-
-// NewTransaction returns a fresh transaction containing no changes.
-func NewTransaction() *Transaction {
- return &Transaction{}
-}
-
-// useState sets the state in the transaction if it hasn't been set already.
-func (t *Transaction) useState(st *Store) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- if t.store == nil {
- t.store = st
- t.snapshot = st.State.MutableSnapshot()
- }
-}
-
-// Commit commits the transaction. The commit may abort if there have been
-// concurrent changes to the store that conflict with the changes in the
-// transaction. If so, Commit returns an error and leaves the store unchanged.
-// The Commit is atomic -- all of the changes in the transaction are applied to
-// the state, or none of them are.
-func (t *Transaction) Commit() error {
- t.mutex.Lock()
- st, sn := t.store, t.snapshot
- t.mutex.Unlock()
- if st == nil || sn == nil {
- return nil
- }
- err := st.ApplyMutations(sn.Mutations())
- // Log deleted objects via garbage collection. This occurs within the
- // transaction boundary, i.e. before the state lock is released.
- // TODO(tilaks): separate discovery and collection, collect lazily.
- st.maybeGC(err)
- return err
-}
-
-// maybeGC will run a garbage collection if the committed transaction may result
-// in an orphaned object.
-// For now, the heuristic is simple - if the transaction succeeded, run GC().
-func (st *Store) maybeGC(err error) {
- if err == nil {
- st.GC()
- }
-}
-
-// Abort discards a transaction. This is an optimization; transactions
-// eventually time out and get discarded. However, live transactions
-// consume resources, so it is good practice to clean up.
-func (t *Transaction) Abort() error {
- t.mutex.Lock()
- t.store = nil
- t.snapshot = nil
- t.mutex.Unlock()
- return nil
-}
-
-// Snapshot returns a read-only snapshot.
-func (t *Transaction) Snapshot() state.Snapshot {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- return t.snapshot.GetSnapshot()
-}
diff --git a/services/store/memstore/util_test.go b/services/store/memstore/util_test.go
deleted file mode 100644
index a7571ee..0000000
--- a/services/store/memstore/util_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package memstore
-
-import (
- "runtime"
- "testing"
-
- "veyron2/storage"
-)
-
-func mkdir(t *testing.T, st *Store, tr *Transaction, path string) (storage.ID, interface{}) {
- _, file, line, _ := runtime.Caller(1)
- dir := &Dir{}
- stat, err := st.Bind(path).Put(rootPublicID, tr, dir)
- if err != nil || stat == nil {
- t.Errorf("%s(%d): mkdir %s: %s", file, line, path, err)
- }
- return stat.ID, dir
-}
-
-func get(t *testing.T, st *Store, tr *Transaction, path string) interface{} {
- _, file, line, _ := runtime.Caller(1)
- e, err := st.Bind(path).Get(rootPublicID, tr)
- if err != nil {
- t.Fatalf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- return e.Value
-}
-
-func put(t *testing.T, st *Store, tr *Transaction, path string, v interface{}) storage.ID {
- _, file, line, _ := runtime.Caller(1)
- stat, err := st.Bind(path).Put(rootPublicID, tr, v)
- if err != nil {
- t.Errorf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- if _, err := st.Bind(path).Get(rootPublicID, tr); err != nil {
- t.Errorf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- if stat != nil {
- return stat.ID
- }
- return storage.ID{}
-}
-
-func remove(t *testing.T, st *Store, tr *Transaction, path string) {
- if err := st.Bind(path).Remove(rootPublicID, tr); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): can't remove %s: %s", file, line, path, err)
- }
-}
-
-func commit(t *testing.T, tr *Transaction) {
- if err := tr.Commit(); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): Transaction aborted: %s", file, line, err)
- }
-}
-
-func expectExists(t *testing.T, st *Store, tr *Transaction, path string) {
- _, file, line, _ := runtime.Caller(1)
- if ok, _ := st.Bind(path).Exists(rootPublicID, tr); !ok {
- t.Errorf("%s(%d): does not exist: %s", file, line, path)
- }
-}
-
-func expectNotExists(t *testing.T, st *Store, tr *Transaction, path string) {
- if e, err := st.Bind(path).Get(rootPublicID, tr); err == nil {
- _, file, line, _ := runtime.Caller(1)
- t.Errorf("%s(%d): should not exist: %s: got %+v", file, line, path, e.Value)
- }
-}
-
-func expectValue(t *testing.T, st *Store, tr *Transaction, path string, v interface{}) {
- _, file, line, _ := runtime.Caller(1)
- e, err := st.Bind(path).Get(rootPublicID, tr)
- if err != nil {
- t.Errorf("%s(%d): does not exist: %s", file, line, path)
- return
- }
- if e.Value != v {
- t.Errorf("%s(%d): expected %+v, got %+v", file, line, e.Value, v)
- }
-
-}
diff --git a/services/store/memstore/watch/glob_processor.go b/services/store/memstore/watch/glob_processor.go
deleted file mode 100644
index 500e256..0000000
--- a/services/store/memstore/watch/glob_processor.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package watch
-
-import (
- iquery "veyron/services/store/memstore/query"
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/services/watch/types"
- "veyron2/storage"
-)
-
-// globProcessor processes log entries into storage entries that match a pattern.
-type globProcessor struct {
- // hasProcessedState is true iff the initial state has been processed.
- hasProcessedState bool
- // pid is the identity of the client watching for changes.
- pid security.PublicID
- // path on which the watch is placed. Returned names are rooted at this path.
- path storage.PathName
- // pattern that the returned names match.
- pattern string
- // st is the store state as of the last processed event.
- st *state.State
- // matches is a map of each matching name to the id of the object at that
- // name, as of the last processed event.
- matches map[string]storage.ID
-}
-
-func newGlobProcessor(pid security.PublicID, path storage.PathName,
- pattern string) (reqProcessor, error) {
-
- return &globProcessor{
- hasProcessedState: false,
- pid: pid,
- path: path,
- pattern: pattern,
- }, nil
-}
-
-func (p *globProcessor) processState(st *state.State) ([]types.Change, error) {
- // Check that the initial state has not already been processed.
- if p.hasProcessedState {
- return nil, errInitialStateAlreadyProcessed
- }
- p.hasProcessedState = true
-
- // Find all names that match the pattern.
- sn := st.MutableSnapshot()
- matches, err := glob(sn, p.pid, p.path, p.pattern)
- if err != nil {
- return nil, err
- }
- p.st = st
- p.matches = matches
-
- var changes []types.Change
-
- // Create a change for every matching name.
- for name, id := range matches {
- cell := sn.Find(id)
- entry := cell.GetEntry()
- change := types.Change{
- Name: name,
- State: types.Exists,
- Value: entry,
- }
- // TODO(tilaks): don't clone change.
- changes = append(changes, change)
- }
-
- return changes, nil
-}
-
-func (p *globProcessor) processTransaction(mus *state.Mutations) ([]types.Change, error) {
- // Ensure that the initial state has been processed.
- if !p.hasProcessedState {
- return nil, errInitialStateNotProcessed
- }
-
- previousMatches := p.matches
- // Apply the transaction to the state.
- if err := p.st.ApplyMutations(mus); err != nil {
- return nil, err
- }
- // Find all names that match the pattern in the new state.
- sn := p.st.MutableSnapshot()
- newMatches, err := glob(sn, p.pid, p.path, p.pattern)
- if err != nil {
- return nil, err
- }
- p.matches = newMatches
-
- var changes []types.Change
-
- removed, updated := diffMatches(previousMatches, newMatches, mus.Delta)
-
- // Create a change for every matching name that was removed.
- for name := range removed {
- change := types.Change{
- Name: name,
- State: types.DoesNotExist,
- }
- // TODO(tilaks): don't clone change
- changes = append(changes, change)
- }
-
- // Create a change for every matching name that was updated.
- for name := range updated {
- id := newMatches[name]
- cell := sn.Find(id)
- entry := cell.GetEntry()
- change := types.Change{
- Name: name,
- State: types.Exists,
- Value: entry,
- }
- // TODO(tilaks): don't clone change.
- changes = append(changes, change)
- }
-
- return changes, nil
-}
-
-// diffMatches returns the names that have been removed or updated.
-//
-// A name is removed if it can no longer be resolved, or if the object at that
-// name is no longer accessible.
-//
-// A name is updated if
-// 1) it is newly added.
-// 2) the object at that name is now accessible.
-// 3) the object at the name has a new value or new references.
-// 4) the object at that name replaced a previous object.
-func diffMatches(previousMatches, newMatches map[string]storage.ID,
- delta map[storage.ID]*state.Mutation) (removed, updated map[string]struct{}) {
-
- removed = make(map[string]struct{})
- updated = make(map[string]struct{})
- present := struct{}{}
-
- for name, previousID := range previousMatches {
- if newID, ok := newMatches[name]; !ok {
- // There is no longer an object at this name.
- removed[name] = present
- } else if newID != previousID {
- // The object at this name was replaced.
- updated[name] = present
- }
- }
-
- for name, newID := range newMatches {
- if _, ok := previousMatches[name]; !ok {
- // An object was added at this name.
- updated[name] = present
- continue
- }
- if _, ok := delta[newID]; ok {
- // The value or implicit directory of the object at this name was
- // updated.
- updated[name] = present
- }
- }
-
- return
-}
-
-// glob returns all names in a snapshot that match a pattern. Each name maps to
-// the id of the object in the snapshot at that name.
-func glob(sn state.Snapshot, pid security.PublicID, path storage.PathName,
- pattern string) (map[string]storage.ID, error) {
-
- matches := make(map[string]storage.ID)
-
- it, err := iquery.GlobIterator(sn, pid, path, pattern)
- if err != nil {
- return nil, err
- }
-
- for it.IsValid() {
- name := it.Name()
- matchName := append(path, storage.ParsePath(name)...).String()
- entry := it.Get()
- id := entry.Stat.ID
- matches[matchName] = id
- it.Next()
- }
-
- return matches, nil
-}
diff --git a/services/store/memstore/watch/glob_processor_test.go b/services/store/memstore/watch/glob_processor_test.go
deleted file mode 100644
index 02f73c7..0000000
--- a/services/store/memstore/watch/glob_processor_test.go
+++ /dev/null
@@ -1,405 +0,0 @@
-package watch
-
-import (
- "testing"
-
- "veyron/services/store/memstore"
- watchtesting "veyron/services/store/memstore/testing"
-
- "veyron2/storage"
-)
-
-func TestGlobProcessState(t *testing.T) {
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Put /, /a, /a/b.
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- put(t, st, tr, "/a/b", "val3")
- id4 := put(t, st, tr, "/a/c", "val4")
- // Test duplicate paths to the same object.
- put(t, st, tr, "/a/d", id4)
- commit(t, tr)
-
- // Remove /a/b.
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a/b")
- commit(t, tr)
- gc(t, st)
-
- if err := st.Close(); err != nil {
- t.Fatalf("Close() failed: %v", err)
- }
-
- // Re-create a new store. This should compress the log, creating an initial
- // state containing / and /a.
- st, cleanup = openStore(t, dbName)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- rootRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/"), "...")
- rootListProcessor := createGlobProcessor(t, storage.ParsePath("/"), "*")
- aRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "...")
- aListProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "*")
-
- // Expect initial state that contains /, /a, /a/c and /a/d.
- logst := readState(t, log)
-
- changes := processState(t, rootRecursiveProcessor, logst, 4)
- watchtesting.ExpectEntryExists(t, changes, "", id1, "val1")
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
- watchtesting.ExpectEntryExists(t, changes, "a/c", id4, "val4")
- watchtesting.ExpectEntryExists(t, changes, "a/d", id4, "val4")
-
- changes = processState(t, rootListProcessor, logst, 1)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-
- changes = processState(t, aRecursiveProcessor, logst, 3)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
- watchtesting.ExpectEntryExists(t, changes, "a/c", id4, "val4")
- watchtesting.ExpectEntryExists(t, changes, "a/d", id4, "val4")
-
- processState(t, aListProcessor, logst, 2)
- watchtesting.ExpectEntryExists(t, changes, "a/c", id4, "val4")
- watchtesting.ExpectEntryExists(t, changes, "a/d", id4, "val4")
-}
-
-func TestGlobProcessTransactionAdd(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- rootRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/"), "...")
- rootListProcessor := createGlobProcessor(t, storage.ParsePath("/"), "*")
- aRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "...")
- aListProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "*")
- bRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a/b"), "...")
-
- logst := readState(t, log)
- processState(t, rootRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, rootListProcessor, logst.DeepCopy(), 0)
- processState(t, aRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, aListProcessor, logst.DeepCopy(), 0)
- processState(t, bRecursiveProcessor, logst.DeepCopy(), 0)
-
- // First transaction, put /, /a, /a/b.
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- id3 := put(t, st, tr, "/a/b", "val3")
- id4 := put(t, st, tr, "/a/c", "val4")
- // Test duplicate paths to the same object.
- put(t, st, tr, "/a/d", id4)
- commit(t, tr)
-
- // Expect transaction that adds /, /a and /a/b.
- mus := readTransaction(t, log)
-
- changes := processTransaction(t, rootRecursiveProcessor, mus, 5)
- watchtesting.ExpectEntryExists(t, changes, "", id1, "val1")
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val3")
- watchtesting.ExpectEntryExists(t, changes, "a/c", id4, "val4")
- watchtesting.ExpectEntryExists(t, changes, "a/d", id4, "val4")
-
- changes = processTransaction(t, rootListProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-
- changes = processTransaction(t, aRecursiveProcessor, mus, 4)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val3")
- watchtesting.ExpectEntryExists(t, changes, "a/c", id4, "val4")
- watchtesting.ExpectEntryExists(t, changes, "a/d", id4, "val4")
-
- changes = processTransaction(t, aListProcessor, mus, 3)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val3")
- watchtesting.ExpectEntryExists(t, changes, "a/c", id4, "val4")
- watchtesting.ExpectEntryExists(t, changes, "a/d", id4, "val4")
-
- changes = processTransaction(t, bRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val3")
-}
-
-func TestGlobProcessTransactionEmptyPath(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- bRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a/b"), "...")
-
- expectState(t, log, bRecursiveProcessor, 0)
-
- // First transaction, put /, /a.
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- // Expect no change.
- expectTransaction(t, log, bRecursiveProcessor, 0)
-
- // Next transaction, put /a/b.
- tr = memstore.NewTransaction()
- id3 := put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- // Expect transaction that adds /a/b.
- changes := expectTransaction(t, log, bRecursiveProcessor, 1)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val3")
-}
-
-func TestGlobProcessTransactionUpdate(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- rootRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/"), "...")
- rootListProcessor := createGlobProcessor(t, storage.ParsePath("/"), "*")
- aRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "...")
- aListProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "*")
- bRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a/b"), "...")
-
- logst := readState(t, log)
- processState(t, rootRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, rootListProcessor, logst.DeepCopy(), 0)
- processState(t, aRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, aListProcessor, logst.DeepCopy(), 0)
- processState(t, bRecursiveProcessor, logst.DeepCopy(), 0)
-
- // First transaction, put /, /a, /a/b.
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- put(t, st, tr, "/a", "val2")
- id3 := put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- mus := readTransaction(t, log)
- changes := processTransaction(t, rootRecursiveProcessor, mus, 3)
- changes = processTransaction(t, rootListProcessor, mus, 1)
- changes = processTransaction(t, aRecursiveProcessor, mus, 2)
- changes = processTransaction(t, aListProcessor, mus, 1)
- changes = processTransaction(t, bRecursiveProcessor, mus, 1)
-
- // Next transaction, remove /a/b.
- tr = memstore.NewTransaction()
- put(t, st, tr, "/a/b", "val4")
- commit(t, tr)
-
- // Expect transaction that updates /a/b.
- mus = readTransaction(t, log)
-
- changes = processTransaction(t, rootRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val4")
-
- processTransaction(t, rootListProcessor, mus, 0)
-
- changes = processTransaction(t, aRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val4")
-
- processTransaction(t, aListProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val4")
-
- processTransaction(t, bRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a/b", id3, "val4")
-}
-
-func TestGlobProcessTransactionRemove(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- rootRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/"), "...")
- rootListProcessor := createGlobProcessor(t, storage.ParsePath("/"), "*")
- aRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "...")
- aListProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "*")
- bRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a/b"), "...")
-
- logst := readState(t, log)
- processState(t, rootRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, rootListProcessor, logst.DeepCopy(), 0)
- processState(t, aRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, aListProcessor, logst.DeepCopy(), 0)
- processState(t, bRecursiveProcessor, logst.DeepCopy(), 0)
-
- // First transaction, put /, /a, /a/b.
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- mus := readTransaction(t, log)
- processTransaction(t, rootRecursiveProcessor, mus, 3)
- processTransaction(t, rootListProcessor, mus, 1)
- processTransaction(t, aRecursiveProcessor, mus, 2)
- processTransaction(t, aListProcessor, mus, 1)
- processTransaction(t, bRecursiveProcessor, mus, 1)
-
- // Next transaction, remove /a/b.
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a/b")
- commit(t, tr)
-
- // Expect transaction that updates /a and removes /a/b.
- mus = readTransaction(t, log)
-
- changes := processTransaction(t, rootRecursiveProcessor, mus, 2)
- // TODO(tilaks): Should we report implicit directory changes?
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- changes = processTransaction(t, rootListProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-
- changes = processTransaction(t, aRecursiveProcessor, mus, 2)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- changes = processTransaction(t, aListProcessor, mus, 1)
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- changes = processTransaction(t, bRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- // Garbage-collect the node at /a/b.
- gc(t, st)
-
- // Expect no change.
- mus = readTransaction(t, log)
- processTransaction(t, rootRecursiveProcessor, mus, 0)
- processTransaction(t, rootListProcessor, mus, 0)
- processTransaction(t, aRecursiveProcessor, mus, 0)
- processTransaction(t, aListProcessor, mus, 0)
- processTransaction(t, bRecursiveProcessor, mus, 0)
-}
-
-func TestGlobProcessTransactionRemoveRecursive(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- rootRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/"), "...")
- rootListProcessor := createGlobProcessor(t, storage.ParsePath("/"), "*")
- aRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "...")
- aListProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "*")
- bRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a/b"), "...")
-
- logst := readState(t, log)
- processState(t, rootRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, rootListProcessor, logst.DeepCopy(), 0)
- processState(t, aRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, aListProcessor, logst.DeepCopy(), 0)
- processState(t, bRecursiveProcessor, logst.DeepCopy(), 0)
-
- // First transaction, put /, /a, /a/b.
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- put(t, st, tr, "/a", "val2")
- put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- mus := readTransaction(t, log)
- processTransaction(t, rootRecursiveProcessor, mus, 3)
- processTransaction(t, rootListProcessor, mus, 1)
- processTransaction(t, aRecursiveProcessor, mus, 2)
- processTransaction(t, aListProcessor, mus, 1)
- processTransaction(t, bRecursiveProcessor, mus, 1)
-
- // Next transaction, remove /a.
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a")
- commit(t, tr)
-
- // Expect transaction that removes /a and /a/b.
- mus = readTransaction(t, log)
-
- changes := processTransaction(t, rootRecursiveProcessor, mus, 3)
- // TODO(tilaks): Should we report implicit directory changes?
- watchtesting.ExpectEntryExists(t, changes, "", id1, "val1")
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a")
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- changes = processTransaction(t, rootListProcessor, mus, 1)
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a")
-
- changes = processTransaction(t, aRecursiveProcessor, mus, 2)
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a")
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- changes = processTransaction(t, aListProcessor, mus, 1)
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-
- changes = processTransaction(t, bRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryDoesNotExist(t, changes, "a/b")
-}
-
-func TestGlobProcessTransactionReplace(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
-
- rootRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/"), "...")
- rootListProcessor := createGlobProcessor(t, storage.ParsePath("/"), "*")
- aRecursiveProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "...")
- aListProcessor := createGlobProcessor(t, storage.ParsePath("/a"), "*")
-
- logst := readState(t, log)
-
- processState(t, rootRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, rootListProcessor, logst.DeepCopy(), 0)
- processState(t, aRecursiveProcessor, logst.DeepCopy(), 0)
- processState(t, aListProcessor, logst.DeepCopy(), 0)
-
- // First transaction, put /, /a, /a/b.
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- mus := readTransaction(t, log)
- processTransaction(t, rootRecursiveProcessor, mus, 2)
- processTransaction(t, rootListProcessor, mus, 1)
- processTransaction(t, aRecursiveProcessor, mus, 1)
- processTransaction(t, aListProcessor, mus, 0)
-
- // Next transaction, replace /a.
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a")
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- // Expect transaction that updates /a.
- mus = readTransaction(t, log)
-
- changes := processTransaction(t, rootRecursiveProcessor, mus, 2)
- // TODO(tilaks): Should we report implicit directory changes?
- watchtesting.ExpectEntryExists(t, changes, "", id1, "val1")
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-
- changes = processTransaction(t, rootListProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-
- changes = processTransaction(t, aRecursiveProcessor, mus, 1)
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-
- processTransaction(t, aListProcessor, mus, 0)
-}
-
-// TODO(tilaks): test ACL update.
diff --git a/services/store/memstore/watch/processor.go b/services/store/memstore/watch/processor.go
deleted file mode 100644
index a3779e6..0000000
--- a/services/store/memstore/watch/processor.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package watch
-
-import (
- "veyron/services/store/memstore/state"
- "veyron2/services/watch/types"
- "veyron2/verror"
-)
-
-var (
- errInitialStateAlreadyProcessed = verror.Internalf("cannot process state after processing the initial state")
- errInitialStateNotProcessed = verror.Internalf("cannot process a transaction before processing the initial state")
-)
-
-// reqProcessor processes log entries into watch changes. At first,
-// processState() must be called with the initial state recorded in the log.
-// Subsequently, processTransaction() may be called with transactions recorded
-// consecutively in the log.
-type reqProcessor interface {
- // processState returns a set of changes that represent the initial state of
- // the store. The returned changes need not be the sequence of changes that
- // originally created the initial state (e.g. in the case of compress), but
- // are sufficient to re-construct the state viewable within the request.
- // processState may modify its input.
- processState(st *state.State) ([]types.Change, error)
-
- // processTransaction returns the set of changes made in some transaction.
- // The changes are returned in no specific order.
- // processTransaction may modify its input.
- processTransaction(mu *state.Mutations) ([]types.Change, error)
-}
diff --git a/services/store/memstore/watch/raw_processor.go b/services/store/memstore/watch/raw_processor.go
deleted file mode 100644
index a5c012e..0000000
--- a/services/store/memstore/watch/raw_processor.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package watch
-
-import (
- "veyron/services/store/memstore/refs"
- "veyron/services/store/memstore/state"
- "veyron/services/store/raw"
-
- "veyron2/security"
- "veyron2/services/watch/types"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-var (
- rootPath = storage.ParsePath("/")
- nullID storage.ID
-)
-
-// rawProcessor processes log entries into mutations.
-type rawProcessor struct {
- // st is true iff the initial state has been processed.
- hasProcessedState bool
- // pid is the identity of the client watching for changes.
- pid security.PublicID
- // rootID is the id of the root object after processing a change.
- rootID storage.ID
- // rootVersion is the version of the store root after processing a change.
- rootVersion raw.Version
- // preparedDeletions is the set of ids for which deletion changes have been
- // sent by watch, but deleted entries have not been processed from the log.
- // This set consists of deleted store roots, because
- // 1) A root deletion is propagated as a deletion change on the root.
- // 2) A root deletion must be propagated immediately.
- // 3) GC is lazy, so we aggressively create a deletion change for the root.
- // An id is removed from preparedDeletions when the corresponding deleted
- // entry is processed from the log.
- preparedDeletions map[storage.ID]bool
-}
-
-func newRawProcessor(pid security.PublicID) (reqProcessor, error) {
- return &rawProcessor{
- hasProcessedState: false,
- pid: pid,
- preparedDeletions: make(map[storage.ID]bool),
- }, nil
-}
-
-func (p *rawProcessor) processState(st *state.State) ([]types.Change, error) {
- // Check that the initial state has not already been processed.
- if p.hasProcessedState {
- return nil, errInitialStateAlreadyProcessed
- }
- p.hasProcessedState = true
-
- sn := st.MutableSnapshot()
-
- rootID, err := rootID(p.pid, sn)
- if err != nil {
- return nil, err
- }
- p.rootID = rootID
-
- var changes []types.Change
-
- // Create a change for each id in the state. In each change, the object
- // exists, has no PriorVersion, has the Version of the new cell, and
- // has the Value, Tags and Dir of the new cell.
- for it := sn.NewIterator(p.pid, nil,
- state.ListObjects, state.RecursiveFilter); it.IsValid(); it.Next() {
-
- entry := it.Get()
- id := entry.Stat.ID
- // Retrieve Value, Tags and Dir from the corresponding cell.
- cell := sn.Find(id)
- // If this object is the root, update rootVersion.
- isRoot := id == p.rootID
- if isRoot {
- p.rootVersion = cell.Version
- }
- value := &raw.Mutation{
- ID: id,
- PriorVersion: raw.NoVersion,
- Version: cell.Version,
- IsRoot: isRoot,
- Value: cell.Value,
- Dir: flattenDir(refs.FlattenDir(cell.Dir)),
- }
- change := types.Change{
- State: types.Exists,
- Value: value,
- }
- // TODO(tilaks): don't clone change
- changes = append(changes, change)
- }
- return changes, nil
-}
-
-func (p *rawProcessor) processTransaction(mus *state.Mutations) ([]types.Change, error) {
- // Ensure that the initial state has been processed.
- if !p.hasProcessedState {
- return nil, errInitialStateNotProcessed
- }
-
- // If the root was deleted, add extra space for a prepared deletion.
- extra := 0
- if mus.SetRootID && !mus.RootID.IsValid() {
- extra = 1
- }
- changes := make([]types.Change, 0, len(mus.Delta)+len(mus.Deletions)+extra)
-
- if mus.SetRootID {
- if mus.RootID.IsValid() {
- p.rootID = mus.RootID
- } else {
- // The root was deleted, prepare a deletion change.
- value := &raw.Mutation{
- ID: p.rootID,
- PriorVersion: p.rootVersion,
- Version: raw.NoVersion,
- IsRoot: true,
- }
- // TODO(tilaks): don't clone value.
- change := types.Change{
- State: types.DoesNotExist,
- Value: value,
- }
- changes = append(changes, change)
-
- p.preparedDeletions[p.rootID] = true
- p.rootID = nullID
- p.rootVersion = raw.NoVersion
- }
- }
-
- // Create a change for each mutation. In each change, the object exists,
- // has the PriorVersion, Version, Value, Tags and Dir specified in
- // the mutation.
- for id, mu := range mus.Delta {
- // If this object is the root, update rootVersion.
- isRoot := id == p.rootID
- if isRoot {
- p.rootVersion = mu.Postcondition
- }
- value := &raw.Mutation{
- ID: id,
- PriorVersion: mus.Preconditions[id],
- Version: mu.Postcondition,
- IsRoot: isRoot,
- Value: mu.Value,
- Dir: flattenDir(mu.Dir),
- }
- // TODO(tilaks): don't clone value.
- change := types.Change{
- State: types.Exists,
- Value: value,
- }
- // TODO(tilaks): don't clone change.
- changes = append(changes, change)
- }
- // Create a change for each deletion (if one has not already been prepared).
- // In each change, the object does not exist, has the specified PriorVersion,
- // has no Version, and has nil Value, Tags and Dir.
- for id, precondition := range mus.Deletions {
- if p.preparedDeletions[id] {
- delete(p.preparedDeletions, id)
- continue
- }
- value := &raw.Mutation{
- ID: id,
- PriorVersion: precondition,
- Version: raw.NoVersion,
- IsRoot: false,
- }
- // TODO(tilaks): don't clone value.
- change := types.Change{
- State: types.DoesNotExist,
- Value: value,
- }
- // TODO(tilaks): don't clone change.
- changes = append(changes, change)
- }
- return changes, nil
-}
-
-// TODO(tilaks): revisit when raw.Mutation.Dir is of type []*raw.DEntry
-// (once we support optional structs in the idl).
-func flattenDir(pdir []*raw.DEntry) []raw.DEntry {
- fdir := make([]raw.DEntry, len(pdir))
- for i, p := range pdir {
- fdir[i] = *p
- }
- return fdir
-}
-
-// rootID returns the id of the root object in the snapshot. If the snapshot
-// does not have a root, nullID is returned.
-func rootID(pid security.PublicID, sn *state.MutableSnapshot) (storage.ID, error) {
- entry, err := sn.Get(pid, rootPath)
- if verror.Is(err, verror.NotFound) {
- return nullID, nil
- }
- if err != nil {
- return nullID, err
- }
- return entry.Stat.ID, nil
-}
diff --git a/services/store/memstore/watch/raw_processor_test.go b/services/store/memstore/watch/raw_processor_test.go
deleted file mode 100644
index f1a94bd..0000000
--- a/services/store/memstore/watch/raw_processor_test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package watch
-
-import (
- "testing"
-
- "veyron/services/store/memstore"
- watchtesting "veyron/services/store/memstore/testing"
- "veyron/services/store/raw"
-)
-
-func TestRawProcessState(t *testing.T) {
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Put /, /a, /a/b
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- // Remove /a/b
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a/b")
- commit(t, tr)
- gc(t, st)
-
- if err := st.Close(); err != nil {
- t.Fatalf("Close() failed: %v", err)
- }
-
- // Re-create a new store. This should compress the log, creating an initial
- // state containing / and /a.
- st, cleanup = openStore(t, dbName)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
- processor := createRawProcessor(t)
-
- post1 := st.Snapshot().Find(id1).Version
- post2 := st.Snapshot().Find(id2).Version
-
- // Expect initial state that
- // 1) Contains / with value val1 and implicit directory entry /a
- // 2) Contains /a with value val2
- changes := expectState(t, log, processor, 2)
- watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post1, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post2, false, "val2", watchtesting.EmptyDir)
-}
-
-func TestRawProcessTransactionAddRemove(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
- processor := createRawProcessor(t)
-
- expectState(t, log, processor, 0)
-
- // First transaction, put /, /a, /a/b
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- id3 := put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- post1 := st.Snapshot().Find(id1).Version
- post2 := st.Snapshot().Find(id2).Version
- post3 := st.Snapshot().Find(id3).Version
-
- // Expect transaction that
- // 1) Adds / with value val1 and implicit directory entry /a
- // 2) Adds /a with value val2 and implicit directory entry /a/b
- // 3) Adds /a/b with value val3
- changes := expectTransaction(t, log, processor, 3)
- watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post1, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post2, false, "val2", watchtesting.DirOf("b", id3))
- watchtesting.ExpectMutationExists(t, changes, id3, raw.NoVersion, post3, false, "val3", watchtesting.EmptyDir)
-
- // Next transaction, remove /a/b
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a/b")
- commit(t, tr)
-
- pre2 := post2
- pre3 := post3
- post2 = st.Snapshot().Find(id2).Version
-
- // Expect transaction that removes implicit dir entry /a/b from /a
- changes = expectTransaction(t, log, processor, 1)
- watchtesting.ExpectMutationExists(t, changes, id2, pre2, post2, false, "val2", watchtesting.EmptyDir)
-
- // Garbage-collect the node at /a/b
- gc(t, st)
-
- // Expect transaction that deletes the node at /a/b
- changes = expectTransaction(t, log, processor, 1)
- watchtesting.ExpectMutationDoesNotExist(t, changes, id3, pre3, false)
-}
-
-func TestRawProcessTransactionRemoveRecursive(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
- processor := createRawProcessor(t)
-
- processor, err := newRawProcessor(rootPublicID)
- if err != nil {
- t.Fatalf("newRawProcessor() failed: %v", err)
- }
-
- expectState(t, log, processor, 0)
-
- // First transaction, put /, /a, /a/b
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- id3 := put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- post1 := st.Snapshot().Find(id1).Version
- post2 := st.Snapshot().Find(id2).Version
- post3 := st.Snapshot().Find(id3).Version
-
- // Assume the first transaction
- // 1) Adds / with value val1 and implicit directory entry /a
- // 2) Adds /a with value val2 and implicit directory entry /a/b
- // 3) Adds /a/b with value val3
- expectTransaction(t, log, processor, 3)
-
- // Next transaction, remove /a
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/a")
- commit(t, tr)
-
- pre1 := post1
- pre2 := post2
- pre3 := post3
- post1 = st.Snapshot().Find(id1).Version
-
- // Expect transaction that removes implicit dir entry /a from /
- changes := expectTransaction(t, log, processor, 1)
- watchtesting.ExpectMutationExists(t, changes, id1, pre1, post1, true, "val1", watchtesting.EmptyDir)
-
- // Garbage-collect the nodes at /a and /a/b
- gc(t, st)
-
- // Expect transaction that deletes the nodes at /a and /a/b
- changes = expectTransaction(t, log, processor, 2)
- watchtesting.ExpectMutationDoesNotExist(t, changes, id2, pre2, false)
- watchtesting.ExpectMutationDoesNotExist(t, changes, id3, pre3, false)
-}
-
-func TestRawProcessTransactionUpdateRemoveRoot(t *testing.T) {
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- log, cleanup := openLog(t, dbName)
- defer cleanup()
- processor := createRawProcessor(t)
-
- processor, err := newRawProcessor(rootPublicID)
- if err != nil {
- t.Fatalf("newRawProcessor() failed: %v", err)
- }
-
- expectState(t, log, processor, 0)
-
- // First transaction, put /, /a
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- post1 := st.Snapshot().Find(id1).Version
- post2 := st.Snapshot().Find(id2).Version
-
- // Assume the first transaction
- // 1) Adds / with value val1 and implicit directory entry /a
- // 2) Adds /a with value val2
- expectTransaction(t, log, processor, 2)
-
- // Next transaction, update /
- tr = memstore.NewTransaction()
- put(t, st, tr, "/", "val3")
- commit(t, tr)
-
- pre1 := post1
- post1 = st.Snapshot().Find(id1).Version
-
- // Expect transaction that updates / with value val3
- changes := expectTransaction(t, log, processor, 1)
- watchtesting.ExpectMutationExists(t, changes, id1, pre1, post1, true, "val3", watchtesting.DirOf("a", id2))
-
- // Next transaction, remove /
- tr = memstore.NewTransaction()
- remove(t, st, tr, "/")
- commit(t, tr)
-
- pre1 = post1
- pre2 := post2
-
- // Expect a transaction that deletes /
- changes = expectTransaction(t, log, processor, 1)
- watchtesting.ExpectMutationDoesNotExist(t, changes, id1, pre1, true)
-
- // Garbage-collect the nodes at / and /a
- gc(t, st)
-
- // Expect transaction that deletes the nodes at / and /a
- changes = expectTransaction(t, log, processor, 1)
- watchtesting.ExpectMutationDoesNotExist(t, changes, id2, pre2, false)
-}
diff --git a/services/store/memstore/watch/test_util.go b/services/store/memstore/watch/test_util.go
deleted file mode 100644
index 32735bc..0000000
--- a/services/store/memstore/watch/test_util.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package watch
-
-import (
- "io/ioutil"
- "os"
- "runtime"
- "testing"
-
- "veyron/services/store/memstore"
- "veyron/services/store/memstore/state"
-
- "veyron2/security"
- "veyron2/services/watch/types"
- "veyron2/storage"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
-)
-
-func get(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) interface{} {
- _, file, line, _ := runtime.Caller(1)
- e, err := st.Bind(path).Get(rootPublicID, tr)
- if err != nil {
- t.Fatalf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- return e.Value
-}
-
-func put(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string, v interface{}) storage.ID {
- _, file, line, _ := runtime.Caller(1)
- stat, err := st.Bind(path).Put(rootPublicID, tr, v)
- if err != nil {
- t.Fatalf("%s(%d): can't put %s: %s", file, line, path, err)
- }
- if _, err := st.Bind(path).Get(rootPublicID, tr); err != nil {
- t.Fatalf("%s(%d): can't get %s: %s", file, line, path, err)
- }
- if stat != nil {
- return stat.ID
- }
- return storage.ID{}
-}
-
-func remove(t *testing.T, st *memstore.Store, tr *memstore.Transaction, path string) {
- if err := st.Bind(path).Remove(rootPublicID, tr); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't remove %s: %s", file, line, path, err)
- }
-}
-
-func commit(t *testing.T, tr *memstore.Transaction) {
- if err := tr.Commit(); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): Transaction aborted: %s", file, line, err)
- }
-}
-
-func gc(t *testing.T, st *memstore.Store) {
- if err := st.GC(); err != nil {
- _, file, line, _ := runtime.Caller(1)
- t.Fatalf("%s(%d): can't gc: %s", file, line, err)
- }
-}
-
-func createStore(t *testing.T) (string, *memstore.Store, func()) {
- dbName, err := ioutil.TempDir(os.TempDir(), "vstore")
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- cleanup := func() {
- os.RemoveAll(dbName)
- }
-
- st, err := memstore.New(rootPublicID, dbName)
- if err != nil {
- cleanup()
- t.Fatalf("memstore.New() failed: %v", err)
- }
-
- return dbName, st, cleanup
-}
-
-func openStore(t *testing.T, dbName string) (*memstore.Store, func()) {
- st, err := memstore.New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("memstore.New() failed: %v", err)
- }
-
- return st, func() {
- os.RemoveAll(dbName)
- }
-}
-
-func openLog(t *testing.T, dbName string) (*memstore.RLog, func()) {
- log, err := memstore.OpenLog(dbName, true)
- if err != nil {
- t.Fatalf("openLog() failed: %v", err)
- }
-
- return log, func() {
- log.Close()
- }
-}
-
-func createRawProcessor(t *testing.T) reqProcessor {
- processor, err := newRawProcessor(rootPublicID)
- if err != nil {
- t.Fatalf("newRawProcessor() failed: %v", err)
- }
- return processor
-}
-
-func createGlobProcessor(t *testing.T, path storage.PathName, pattern string) reqProcessor {
- processor, err := newGlobProcessor(rootPublicID, path, pattern)
- if err != nil {
- t.Fatalf("newGlobProcessor() failed: %v", err)
- }
- return processor
-}
-
-func createWatcher(t *testing.T, dbName string) (*Watcher, func()) {
- w, err := New(rootPublicID, dbName)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- return w, func() {
- w.Close()
- }
-}
-
-func expectState(t *testing.T, log *memstore.RLog, processor reqProcessor, numChanges int) []types.Change {
- st := readState(t, log)
- return processState(t, processor, st, numChanges)
-}
-
-func readState(t *testing.T, log *memstore.RLog) *state.State {
- st, err := log.ReadState(rootPublicID)
- if err != nil {
- t.Fatalf("ReadState() failed: %v", err)
- }
- return st.State
-}
-
-func processState(t *testing.T, processor reqProcessor, st *state.State, numChanges int) []types.Change {
- changes, err := processor.processState(st)
- if err != nil {
- t.Fatalf("processState() failed: %v", err)
- }
- if len(changes) != numChanges {
- t.Fatalf("Expected state to have %d changes, got %d", numChanges, len(changes))
- }
- return changes
-}
-
-func expectTransaction(t *testing.T, log *memstore.RLog, processor reqProcessor, numChanges int) []types.Change {
- mus := readTransaction(t, log)
- return processTransaction(t, processor, mus, numChanges)
-}
-
-func readTransaction(t *testing.T, log *memstore.RLog) *state.Mutations {
- mus, err := log.ReadTransaction()
- if err != nil {
- t.Fatalf("ReadTransaction() failed: %v", err)
- }
- return mus
-}
-
-func processTransaction(t *testing.T, processor reqProcessor, mus *state.Mutations, numChanges int) []types.Change {
- changes, err := processor.processTransaction(mus)
- if err != nil {
- t.Fatalf("processTransaction() failed: %v", err)
- }
- if len(changes) != numChanges {
- t.Fatalf("Expected transaction to have %d changes, got %d", numChanges, len(changes))
- }
- return changes
-}
diff --git a/services/store/memstore/watch/watcher.go b/services/store/memstore/watch/watcher.go
deleted file mode 100644
index 5896f5e..0000000
--- a/services/store/memstore/watch/watcher.go
+++ /dev/null
@@ -1,326 +0,0 @@
-package watch
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "time"
-
- "veyron/runtimes/google/lib/sync"
- "veyron/services/store/memstore"
- "veyron/services/store/raw"
-
- "veyron2/ipc"
- "veyron2/security"
- "veyron2/services/watch"
- "veyron2/services/watch/types"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-var (
- errWatchClosed = io.EOF
- errUnknownResumeMarker = verror.BadArgf("Unknown ResumeMarker")
- nowResumeMarker = []byte("now") // UTF-8 conversion.
- initialStateSkippedChange = types.Change{
- Name: "",
- State: types.InitialStateSkipped,
- }
-)
-
-type Watcher struct {
- // admin is the public id of the store administrator.
- admin security.PublicID
- // dbName is the name of the store's database directory.
- dbName string
- // closed is a channel that is closed when the watcher is closed.
- // Watch invocations finish as soon as possible once the channel is closed.
- closed chan struct{}
- // pending records the number of Watch invocations on this watcher that
- // have not yet finished.
- pending sync.WaitGroup
-}
-
-// New returns a new watcher. The returned watcher supports repeated and
-// concurrent invocations of Watch until it is closed.
-// admin is the public id of the store administrator. dbName is the name of the
-// of the store's database directory.
-func New(admin security.PublicID, dbName string) (*Watcher, error) {
- return &Watcher{
- admin: admin,
- dbName: dbName,
- closed: make(chan struct{}),
- }, nil
-}
-
-// WatchRaw returns a stream of all changes.
-func (w *Watcher) WatchRaw(ctx ipc.ServerContext, req raw.Request,
- stream raw.StoreServiceWatchStream) error {
-
- processor, err := newRawProcessor(ctx.RemoteID())
- if err != nil {
- return err
- }
- return w.Watch(ctx, processor, req.ResumeMarker, stream.SendStream())
-}
-
-// WatchGlob returns a stream of changes that match a pattern.
-func (w *Watcher) WatchGlob(ctx ipc.ServerContext, path storage.PathName,
- req types.GlobRequest, stream watch.GlobWatcherServiceWatchGlobStream) error {
-
- processor, err := newGlobProcessor(ctx.RemoteID(), path, req.Pattern)
- if err != nil {
- return err
- }
- return w.Watch(ctx, processor, req.ResumeMarker, stream.SendStream())
-}
-
-// WatchQuery returns a stream of changes that satisfy a query.
-func (w *Watcher) WatchQuery(ctx ipc.ServerContext, path storage.PathName,
- req types.QueryRequest, stream watch.QueryWatcherServiceWatchQueryStream) error {
-
- return verror.Internalf("WatchQuery not yet implemented")
-}
-
-// WatchStream is the interface for streaming responses of Watch methods.
-type WatchStream interface {
- // Send places the item onto the output stream, blocking if there is no
- // buffer space available.
- Send(item types.Change) error
-}
-
-// Watch handles the specified request, processing records in the store log and
-// sending changes to the specified watch stream. If the call is cancelled or
-// otherwise closed early, Watch will terminate and return an error.
-// Watch implements the service.Watcher interface.
-func (w *Watcher) Watch(ctx ipc.ServerContext, processor reqProcessor,
- resumeMarker types.ResumeMarker, stream WatchStream) error {
-
- // Closing cancel terminates processRequest.
- cancel := make(chan struct{})
- defer close(cancel)
-
- done := make(chan error, 1)
-
- if !w.pending.TryAdd() {
- return errWatchClosed
- }
- // This goroutine does not leak because processRequest is always terminated.
- go func() {
- defer w.pending.Done()
- done <- w.processRequest(cancel, processor, resumeMarker, stream)
- close(done)
- }()
-
- select {
- case err := <-done:
- return err
- // Close cancel and terminate processRequest if:
- // 1) The watcher has been closed.
- // 2) The call closes. This is signalled on the context's closed channel.
- case <-w.closed:
- case <-ctx.Done():
- }
- return errWatchClosed
-}
-
-func (w *Watcher) processRequest(cancel <-chan struct{}, processor reqProcessor,
- resumeMarker types.ResumeMarker, stream WatchStream) error {
-
- log, err := memstore.OpenLog(w.dbName, true)
- if err != nil {
- return err
- }
- // This goroutine does not leak because cancel is always closed.
- go func() {
- <-cancel
-
- // Closing the log terminates any ongoing read, and processRequest
- // returns an error.
- log.Close()
-
- // stream.Send() is automatically cancelled when the call completes,
- // so we don't explicitly cancel sendChanges.
-
- // TODO(tilaks): cancel processState(), processTransaction().
- }()
-
- filter, err := newChangeFilter(resumeMarker)
- if err != nil {
- return err
- }
-
- if isNowResumeMarker(resumeMarker) {
- sendChanges(stream, []types.Change{initialStateSkippedChange})
- }
-
- // Process initial state.
- store, err := log.ReadState(w.admin)
- if err != nil {
- return err
- }
- st := store.State
- // Save timestamp as processState may modify st.
- timestamp := st.Timestamp()
- changes, err := processor.processState(st)
- if err != nil {
- return err
- }
- if send, err := filter.shouldProcessChanges(timestamp); err != nil {
- return err
- } else if send {
- if err := processChanges(stream, changes, timestamp); err != nil {
- return err
- }
- }
-
- for {
- // Process transactions.
- mu, err := log.ReadTransaction()
- if err != nil {
- return err
- }
- // Save timestamp as processTransaction may modify mu.
- timestamp := mu.Timestamp
- changes, err = processor.processTransaction(mu)
- if err != nil {
- return err
- }
- if send, err := filter.shouldProcessChanges(timestamp); err != nil {
- return err
- } else if send {
- if err := processChanges(stream, changes, timestamp); err != nil {
- return err
- }
- }
- }
-}
-
-// Close implements the service.Watcher interface.
-func (w *Watcher) Close() error {
- close(w.closed)
- w.pending.Wait()
- return nil
-}
-
-// IsClosed returns true iff the watcher has been closed.
-func (w *Watcher) isClosed() bool {
- select {
- case <-w.closed:
- return true
- default:
- return false
- }
-}
-
-type changeFilter interface {
- // shouldProcessChanges determines whether to process changes with the given
- // timestamp. Changes should appear in the sequence of the store log, and
- // timestamps should be monotonically increasing.
- shouldProcessChanges(timestamp uint64) (bool, error)
-}
-
-type baseFilter struct {
- // initialTimestamp is the minimum timestamp of the first change sent.
- initialTimestamp uint64
- // crossedInitialTimestamp is true if a change with timestamp >=
- // initialTimestamp has already been sent.
- crossedInitialTimestamp bool
-}
-
-// onOrAfterFilter accepts any change with timestamp >= initialTimestamp.
-type onOrAfterFilter struct {
- baseFilter
-}
-
-// onAndAfterFilter accepts any change with timestamp >= initialTimestamp, but
-// requires the first change to have timestamp = initialTimestamp.
-type onAndAfterFilter struct {
- baseFilter
-}
-
-// newChangeFilter creates a changeFilter that processes changes only
-// at or after the requested resumeMarker.
-func newChangeFilter(resumeMarker []byte) (changeFilter, error) {
- if len(resumeMarker) == 0 {
- return &onOrAfterFilter{baseFilter{0, false}}, nil
- }
- if isNowResumeMarker(resumeMarker) {
- // TODO(tilaks): Get the current resume marker from the log.
- return &onOrAfterFilter{baseFilter{uint64(time.Now().UnixNano()), false}}, nil
- }
- if len(resumeMarker) != 8 {
- return nil, errUnknownResumeMarker
- }
- return &onAndAfterFilter{baseFilter{binary.BigEndian.Uint64(resumeMarker), false}}, nil
-}
-
-func (f *onOrAfterFilter) shouldProcessChanges(timestamp uint64) (bool, error) {
- // Bypass checks if a change with timestamp >= initialTimestamp has already
- // been sent.
- if !f.crossedInitialTimestamp {
- if timestamp < f.initialTimestamp {
- return false, nil
- }
- }
- f.crossedInitialTimestamp = true
- return true, nil
-}
-
-func (f *onAndAfterFilter) shouldProcessChanges(timestamp uint64) (bool, error) {
- // Bypass checks if a change with timestamp >= initialTimestamp has already
- // been sent.
- if !f.crossedInitialTimestamp {
- if timestamp < f.initialTimestamp {
- return false, nil
- }
- if timestamp > f.initialTimestamp {
- return false, errUnknownResumeMarker
- }
- // TODO(tilaks): if the most recent timestamp in the log is less than
- // initialTimestamp, return ErrUnknownResumeMarker.
- }
- f.crossedInitialTimestamp = true
- return true, nil
-}
-
-func processChanges(stream WatchStream, changes []types.Change, timestamp uint64) error {
- addContinued(changes)
- addResumeMarkers(changes, timestampToResumeMarker(timestamp))
- return sendChanges(stream, changes)
-}
-
-func sendChanges(stream WatchStream, changes []types.Change) error {
- for _, change := range changes {
- if err := stream.Send(change); err != nil {
- return err
- }
- }
- return nil
-}
-
-func addContinued(changes []types.Change) {
- // Last change marks the end of the processed atomic group.
- for i, _ := range changes {
- changes[i].Continued = true
- }
- if len(changes) > 0 {
- changes[len(changes)-1].Continued = false
- }
-}
-
-func addResumeMarkers(changes []types.Change, resumeMarker []byte) {
- for i, _ := range changes {
- changes[i].ResumeMarker = resumeMarker
- }
-}
-
-func isNowResumeMarker(resumeMarker []byte) bool {
- return bytes.Equal(resumeMarker, nowResumeMarker)
-}
-
-func timestampToResumeMarker(timestamp uint64) []byte {
- buf := make([]byte, 8)
- binary.BigEndian.PutUint64(buf, timestamp)
- return buf
-}
diff --git a/services/store/memstore/watch/watcher_test.go b/services/store/memstore/watch/watcher_test.go
deleted file mode 100644
index 46132a1..0000000
--- a/services/store/memstore/watch/watcher_test.go
+++ /dev/null
@@ -1,585 +0,0 @@
-package watch
-
-import (
- "bytes"
- "io"
- "sync"
- "testing"
- "time"
-
- "veyron/services/store/memstore"
- watchtesting "veyron/services/store/memstore/testing"
- "veyron/services/store/raw"
-
- "veyron2/rt"
- "veyron2/services/watch/types"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-func TestWatchRaw(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- post1 := st.Snapshot().Find(id1).Version
-
- // Start a watch request.
- req := raw.Request{}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- rStream := ws.RecvStream()
-
- // Check that watch detects the changes in the first transaction.
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post1, true, "val1", watchtesting.EmptyDir)
-
- // Put /a
- tr = memstore.NewTransaction()
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- pre1 := post1
- post1 = st.Snapshot().Find(id1).Version
- post2 := st.Snapshot().Find(id2).Version
-
- // Check that watch detects the changes in the second transaction.
- changes = []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatal("Expected change to continue the transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id1, pre1, post1, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post2, false, "val2", watchtesting.EmptyDir)
-}
-
-func TestWatchGlob(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- // Start a watch request.
- path := storage.ParsePath("/")
- req := types.GlobRequest{Pattern: "..."}
- ws := watchtesting.WatchGlobOnPath(rootPublicID, w.WatchGlob, path, req)
-
- rStream := ws.RecvStream()
-
- // Check that watch detects the changes in the first transaction.
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectEntryExists(t, changes, "", id1, "val1")
-
- // Put /a
- tr = memstore.NewTransaction()
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- // Check that watch detects the changes in the second transaction.
- changes = []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatal("Expected change to continue the transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectEntryExists(t, changes, "", id1, "val1")
- watchtesting.ExpectEntryExists(t, changes, "a", id2, "val2")
-}
-
-func TestWatchCancellation(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Start a watch request.
- req := raw.Request{}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // Commit a transaction.
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- // Check that watch processed the first transaction.
- rStream := ws.RecvStream()
- if !rStream.Advance() {
- t.Fatal("Expected a change.")
- }
-
- // Cancel the watch request.
- ws.Cancel()
- // Give watch some time to process the cancellation.
- time.Sleep(time.Second)
-
- // Commit a second transaction.
- tr = memstore.NewTransaction()
- put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- // Check that watch did not processed the second transaction.
- if rStream.Advance() || rStream.Err() != nil {
- t.Errorf("Unexpected error: %v", rStream.Err())
- }
-
- // Check that io.EOF was returned.
- if err := ws.Finish(); err != io.EOF {
- t.Errorf("Unexpected error: %v", err)
- }
-}
-
-func TestWatchClosed(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- var once sync.Once
- defer once.Do(cleanup)
-
- // Start a watch request.
- req := raw.Request{}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // Commit a transaction.
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- // Check that watch processed the first transaction.
- if !ws.RecvStream().Advance() {
- t.Fatal("Expected a change.")
- }
-
- // Close the watcher, check that io.EOF was returned.
- once.Do(cleanup)
- if err := ws.Finish(); err != io.EOF {
- t.Errorf("Unexpected error: %v", err)
- }
-}
-
-func TestStateResumeMarker(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- post11 := st.Snapshot().Find(id1).Version
-
- if err := st.Close(); err != nil {
- t.Fatalf("Close() failed: %v", err)
- }
-
- // Re-create a new store. This should compress the log, creating an initial
- // state containing / and /a.
- st, cleanup = openStore(t, dbName)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /a
- tr = memstore.NewTransaction()
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- pre21 := post11
- post21 := st.Snapshot().Find(id1).Version
- post22 := st.Snapshot().Find(id2).Version
-
- // Start a watch request.
- req := raw.Request{}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // Retrieve the resume marker for the initial state.
- rStream := ws.RecvStream()
-
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- resumeMarker1 := change.ResumeMarker
-
- // Cancel the watch request.
- ws.Cancel()
- ws.Finish()
-
- // Start a watch request after the initial state.
- req = raw.Request{ResumeMarker: resumeMarker1}
- ws = watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- rStream = ws.RecvStream()
-
- // Check that watch detects the changes in the state and the transaction.
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post11, true, "val1", watchtesting.EmptyDir)
-
- // Check that watch detects the changes in the state and the transaction.
- changes = []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatal("Expected change to continue the transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id1, pre21, post21, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
-}
-
-func TestTransactionResumeMarker(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- id1 := put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- post11 := st.Snapshot().Find(id1).Version
-
- // Put /a
- tr = memstore.NewTransaction()
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- pre21 := post11
- post21 := st.Snapshot().Find(id1).Version
- post22 := st.Snapshot().Find(id2).Version
-
- // Start a watch request.
- req := raw.Request{}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // Retrieve the resume marker for the first transaction.
- rStream := ws.RecvStream()
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- resumeMarker1 := change.ResumeMarker
-
- // Cancel the watch request.
- ws.Cancel()
- ws.Finish()
-
- // Start a watch request after the first transaction.
- req = raw.Request{ResumeMarker: resumeMarker1}
- ws = watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- rStream = ws.RecvStream()
-
- // Check that watch detects the changes in the first transaction.
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id1, raw.NoVersion, post11, true, "val1", watchtesting.EmptyDir)
-
- // Check that watch detects the changes in the second transaction.
- changes = []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatal("Expected change to continue the transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- resumeMarker2 := change.ResumeMarker
- watchtesting.ExpectMutationExists(t, changes, id1, pre21, post21, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
-
- // Cancel the watch request.
- ws.Cancel()
- ws.Finish()
-
- // Start a watch request at the second transaction.
- req = raw.Request{ResumeMarker: resumeMarker2}
- ws = watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- rStream = ws.RecvStream()
-
- // Check that watch detects the changes in the second transaction.
- changes = []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatal("Expected change to continue the transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatal("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id1, pre21, post21, true, "val1", watchtesting.DirOf("a", id2))
- watchtesting.ExpectMutationExists(t, changes, id2, raw.NoVersion, post22, false, "val2", watchtesting.EmptyDir)
-}
-
-func TestNowResumeMarker(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- // Put /a
- tr = memstore.NewTransaction()
- id2 := put(t, st, tr, "/a", "val2")
- commit(t, tr)
-
- post22 := st.Snapshot().Find(id2).Version
-
- // Pass some time so that the second transaction happened before "now".
- time.Sleep(time.Millisecond)
-
- // Start a watch request with the "now" resume marker.
- req := raw.Request{ResumeMarker: nowResumeMarker}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // Check that watch announces that the initial state was skipped.
- // This also ensures that the third transaction happens after "now".
- rStream := ws.RecvStream()
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- watchtesting.ExpectInitialStateSkipped(t, change)
-
- // Put /a/b
- tr = memstore.NewTransaction()
- id3 := put(t, st, tr, "/a/b", "val3")
- commit(t, tr)
-
- pre32 := post22
- post32 := st.Snapshot().Find(id2).Version
- post33 := st.Snapshot().Find(id3).Version
-
- // Check that watch detects the changes in the third transaction.
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatalf("Expected change to continue the transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- watchtesting.ExpectMutationExists(t, changes, id2, pre32, post32, false, "val2", watchtesting.DirOf("b", id3))
- watchtesting.ExpectMutationExists(t, changes, id3, raw.NoVersion, post33, false, "val3", watchtesting.EmptyDir)
-}
-
-func TestUnknownResumeMarkers(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- // Start a watch request with a resume marker that's too early.
- resumeMarker := timestampToResumeMarker(1)
- req := raw.Request{ResumeMarker: resumeMarker}
- ws := watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // The resume marker should be unknown.
- if err := ws.Finish(); !verror.Is(err, verror.BadArg) {
- t.Errorf("Error should be %v: got %v", verror.BadArg, err)
- }
-
- // Start a watch request with a resume marker that's too late.
- resumeMarker = timestampToResumeMarker(2 ^ 63 - 1)
- req = raw.Request{ResumeMarker: resumeMarker}
- ws = watchtesting.WatchRaw(rootPublicID, w.WatchRaw, req)
-
- // The resume marker should be unknown.
- if err := ws.Finish(); !verror.Is(err, verror.BadArg) {
- t.Errorf("Error should be %v: got %v", verror.BadArg, err)
- }
-}
-
-func TestConsistentResumeMarkers(t *testing.T) {
- rt.Init()
-
- // Create a new store.
- dbName, st, cleanup := createStore(t)
- defer cleanup()
-
- // Create a new watcher.
- w, cleanup := createWatcher(t, dbName)
- defer cleanup()
-
- // Put /
- tr := memstore.NewTransaction()
- put(t, st, tr, "/", "val1")
- commit(t, tr)
-
- // Start a watch request.
- path := storage.ParsePath("/")
- req := types.GlobRequest{Pattern: "..."}
- ws := watchtesting.WatchGlobOnPath(rootPublicID, w.WatchGlob, path, req)
-
- rStream := ws.RecvStream()
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- // Save the ResumeMarker of the change.
- r := change.ResumeMarker
-
- // Start another watch request.
- ws = watchtesting.WatchGlobOnPath(rootPublicID, w.WatchGlob, path, req)
-
- rStream = ws.RecvStream()
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- // Expect the same ResumeMarker.
- if !bytes.Equal(r, change.ResumeMarker) {
- t.Fatal("Inconsistent ResumeMarker.")
- }
-}
diff --git a/services/store/raw/service.vdl b/services/store/raw/service.vdl
deleted file mode 100644
index 4d18639..0000000
--- a/services/store/raw/service.vdl
+++ /dev/null
@@ -1,79 +0,0 @@
-// Package raw defines a raw interface for the Veyron store.
-//
-// The raw interface supports synchronizing with remote stores by transporting
-// Mutations.
-
-package raw
-
-import (
- "veyron2/storage"
- "veyron2/services/watch/types"
-)
-
-const (
- // The raw Store has Object name "<mount>/.store.raw", where <mount> is the
- // Object name of the mount point.
- RawStoreSuffix = ".store.raw"
-
- // NoVersion means the entry is not present in the store.
- NoVersion = Version(0)
-)
-
-// Version identifies the value in the store for a key at some point in time.
-// The version is a numeric identifier that is globally unique within the space
-// of a single ID, meaning that if two stores contain an entry with the same ID
-// and version, then the entries represent the same thing, at the same point in
-// time (as agreed upon by the two stores).
-type Version uint64
-
-// DEntry is a directory entry.
-type DEntry struct {
- Name string
- ID storage.ID
-}
-
-// Mutation represents an update to an entry in the store, and contains enough
-// information for a privileged service to replicate the update elsewhere.
-type Mutation struct {
- // ID is the key that identifies the entry.
- ID storage.ID
-
- // The version of the entry immediately before the update. For new entries,
- // the PriorVersion is NoVersion.
- PriorVersion Version
-
- // The version of the entry immediately after the update. For deleted entries,
- // the Version is NoVersion.
- Version Version
-
- // IsRoot is true if
- // 1) The entry was the store root immediately before being deleted, or
- // 2) The entry is the store root immediately after the update.
- IsRoot bool
-
- // Value is value stored at this entry.
- Value any
-
- // Dir is the implicit directory of this entry, and may contain references
- // to other entries in the store.
- Dir []DEntry
-}
-
-// Request specifies how to resume from a previous Watch call.
-type Request struct {
- // ResumeMarker specifies how to resume from a previous Watch call.
- // See the ResumeMarker type for detailed comments.
- ResumeMarker types.ResumeMarker
-}
-
-// Store defines a raw interface for the Veyron store. Mutations can be received
-// via the Watcher interface, and committed via PutMutation.
-type Store interface {
- // Watch returns a stream of all changes.
- Watch(Req Request) stream<_, types.Change> error
-
- // PutMutations atomically commits a stream of Mutations when the stream is
- // closed. Mutations are not committed if the request is cancelled before
- // the stream has been closed.
- PutMutations() stream<Mutation, _> error
-}
diff --git a/services/store/raw/service.vdl.go b/services/store/raw/service.vdl.go
deleted file mode 100644
index 5e75600..0000000
--- a/services/store/raw/service.vdl.go
+++ /dev/null
@@ -1,577 +0,0 @@
-// This file was auto-generated by the veyron vdl tool.
-// Source: service.vdl
-
-package raw
-
-import (
- "veyron2/services/watch/types"
-
- "veyron2/storage"
-
- // The non-user imports are prefixed with "_gen_" to prevent collisions.
- _gen_io "io"
- _gen_veyron2 "veyron2"
- _gen_context "veyron2/context"
- _gen_ipc "veyron2/ipc"
- _gen_naming "veyron2/naming"
- _gen_vdlutil "veyron2/vdl/vdlutil"
- _gen_wiretype "veyron2/wiretype"
-)
-
-// Version identifies the value in the store for a key at some point in time.
-// The version is a numeric identifier that is globally unique within the space
-// of a single ID, meaning that if two stores contain an entry with the same ID
-// and version, then the entries represent the same thing, at the same point in
-// time (as agreed upon by the two stores).
-type Version uint64
-
-// DEntry is a directory entry.
-type DEntry struct {
- Name string
- ID storage.ID
-}
-
-// Mutation represents an update to an entry in the store, and contains enough
-// information for a privileged service to replicate the update elsewhere.
-type Mutation struct {
- // ID is the key that identifies the entry.
- ID storage.ID
- // The version of the entry immediately before the update. For new entries,
- // the PriorVersion is NoVersion.
- PriorVersion Version
- // The version of the entry immediately after the update. For deleted entries,
- // the Version is NoVersion.
- Version Version
- // IsRoot is true if
- // 1) The entry was the store root immediately before being deleted, or
- // 2) The entry is the store root immediately after the update.
- IsRoot bool
- // Value is value stored at this entry.
- Value _gen_vdlutil.Any
- // Dir is the implicit directory of this entry, and may contain references
- // to other entries in the store.
- Dir []DEntry
-}
-
-// Request specifies how to resume from a previous Watch call.
-type Request struct {
- // ResumeMarker specifies how to resume from a previous Watch call.
- // See the ResumeMarker type for detailed comments.
- ResumeMarker types.ResumeMarker
-}
-
-// The raw Store has Object name "<mount>/.store.raw", where <mount> is the
-// Object name of the mount point.
-const RawStoreSuffix = ".store.raw"
-
-// NoVersion means the entry is not present in the store.
-const NoVersion = Version(0)
-
-// TODO(bprosnitz) Remove this line once signatures are updated to use typevals.
-// It corrects a bug where _gen_wiretype is unused in VDL pacakges where only bootstrap types are used on interfaces.
-const _ = _gen_wiretype.TypeIDInvalid
-
-// Store defines a raw interface for the Veyron store. Mutations can be received
-// via the Watcher interface, and committed via PutMutation.
-// Store is the interface the client binds and uses.
-// Store_ExcludingUniversal is the interface without internal framework-added methods
-// to enable embedding without method collisions. Not to be used directly by clients.
-type Store_ExcludingUniversal interface {
- // Watch returns a stream of all changes.
- Watch(ctx _gen_context.T, Req Request, opts ..._gen_ipc.CallOpt) (reply StoreWatchCall, err error)
- // PutMutations atomically commits a stream of Mutations when the stream is
- // closed. Mutations are not committed if the request is cancelled before
- // the stream has been closed.
- PutMutations(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply StorePutMutationsCall, err error)
-}
-type Store interface {
- _gen_ipc.UniversalServiceMethods
- Store_ExcludingUniversal
-}
-
-// StoreService is the interface the server implements.
-type StoreService interface {
-
- // Watch returns a stream of all changes.
- Watch(context _gen_ipc.ServerContext, Req Request, stream StoreServiceWatchStream) (err error)
- // PutMutations atomically commits a stream of Mutations when the stream is
- // closed. Mutations are not committed if the request is cancelled before
- // the stream has been closed.
- PutMutations(context _gen_ipc.ServerContext, stream StoreServicePutMutationsStream) (err error)
-}
-
-// StoreWatchCall is the interface for call object of the method
-// Watch in the service interface Store.
-type StoreWatchCall interface {
- // RecvStream returns the recv portion of the stream
- RecvStream() interface {
- // Advance stages an element so the client can retrieve it
- // with Value. Advance returns true iff there is an
- // element to retrieve. The client must call Advance before
- // calling Value. Advance may block if an element is not
- // immediately available.
- Advance() bool
-
- // Value returns the element that was staged by Advance.
- // Value may panic if Advance returned false or was not
- // called at all. Value does not block.
- Value() types.Change
-
- // Err returns a non-nil error iff the stream encountered
- // any errors. Err does not block.
- Err() error
- }
-
- // Finish blocks until the server is done and returns the positional
- // return values for call.
- //
- // If Cancel has been called, Finish will return immediately; the output of
- // Finish could either be an error signalling cancelation, or the correct
- // positional return values from the server depending on the timing of the
- // call.
- //
- // Calling Finish is mandatory for releasing stream resources, unless Cancel
- // has been called or any of the other methods return an error.
- // Finish should be called at most once.
- Finish() (err error)
-
- // Cancel cancels the RPC, notifying the server to stop processing. It
- // is safe to call Cancel concurrently with any of the other stream methods.
- // Calling Cancel after Finish has returned is a no-op.
- Cancel()
-}
-
-type implStoreWatchStreamIterator struct {
- clientCall _gen_ipc.Call
- val types.Change
- err error
-}
-
-func (c *implStoreWatchStreamIterator) Advance() bool {
- c.val = types.Change{}
- c.err = c.clientCall.Recv(&c.val)
- return c.err == nil
-}
-
-func (c *implStoreWatchStreamIterator) Value() types.Change {
- return c.val
-}
-
-func (c *implStoreWatchStreamIterator) Err() error {
- if c.err == _gen_io.EOF {
- return nil
- }
- return c.err
-}
-
-// Implementation of the StoreWatchCall interface that is not exported.
-type implStoreWatchCall struct {
- clientCall _gen_ipc.Call
- readStream implStoreWatchStreamIterator
-}
-
-func (c *implStoreWatchCall) RecvStream() interface {
- Advance() bool
- Value() types.Change
- Err() error
-} {
- return &c.readStream
-}
-
-func (c *implStoreWatchCall) Finish() (err error) {
- if ierr := c.clientCall.Finish(&err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (c *implStoreWatchCall) Cancel() {
- c.clientCall.Cancel()
-}
-
-type implStoreServiceWatchStreamSender struct {
- serverCall _gen_ipc.ServerCall
-}
-
-func (s *implStoreServiceWatchStreamSender) Send(item types.Change) error {
- return s.serverCall.Send(item)
-}
-
-// StoreServiceWatchStream is the interface for streaming responses of the method
-// Watch in the service interface Store.
-type StoreServiceWatchStream interface {
- // SendStream returns the send portion of the stream.
- SendStream() interface {
- // Send places the item onto the output stream, blocking if there is no buffer
- // space available. If the client has canceled, an error is returned.
- Send(item types.Change) error
- }
-}
-
-// Implementation of the StoreServiceWatchStream interface that is not exported.
-type implStoreServiceWatchStream struct {
- writer implStoreServiceWatchStreamSender
-}
-
-func (s *implStoreServiceWatchStream) SendStream() interface {
- // Send places the item onto the output stream, blocking if there is no buffer
- // space available. If the client has canceled, an error is returned.
- Send(item types.Change) error
-} {
- return &s.writer
-}
-
-// StorePutMutationsCall is the interface for call object of the method
-// PutMutations in the service interface Store.
-type StorePutMutationsCall interface {
-
- // SendStream returns the send portion of the stream
- SendStream() interface {
- // Send places the item onto the output stream, blocking if there is no
- // buffer space available. Calls to Send after having called Close
- // or Cancel will fail. Any blocked Send calls will be unblocked upon
- // calling Cancel.
- Send(item Mutation) error
-
- // Close indicates to the server that no more items will be sent;
- // server Recv calls will receive io.EOF after all sent items. This is
- // an optional call - it's used by streaming clients that need the
- // server to receive the io.EOF terminator before the client calls
- // Finish (for example, if the client needs to continue receiving items
- // from the server after having finished sending).
- // Calls to Close after having called Cancel will fail.
- // Like Send, Close blocks when there's no buffer space available.
- Close() error
- }
-
- // Finish performs the equivalent of SendStream().Close, then blocks until the server
- // is done, and returns the positional return values for call.
- // If Cancel has been called, Finish will return immediately; the output of
- // Finish could either be an error signalling cancelation, or the correct
- // positional return values from the server depending on the timing of the
- // call.
- //
- // Calling Finish is mandatory for releasing stream resources, unless Cancel
- // has been called or any of the other methods return an error.
- // Finish should be called at most once.
- Finish() (err error)
-
- // Cancel cancels the RPC, notifying the server to stop processing. It
- // is safe to call Cancel concurrently with any of the other stream methods.
- // Calling Cancel after Finish has returned is a no-op.
- Cancel()
-}
-
-type implStorePutMutationsStreamSender struct {
- clientCall _gen_ipc.Call
-}
-
-func (c *implStorePutMutationsStreamSender) Send(item Mutation) error {
- return c.clientCall.Send(item)
-}
-
-func (c *implStorePutMutationsStreamSender) Close() error {
- return c.clientCall.CloseSend()
-}
-
-// Implementation of the StorePutMutationsCall interface that is not exported.
-type implStorePutMutationsCall struct {
- clientCall _gen_ipc.Call
- writeStream implStorePutMutationsStreamSender
-}
-
-func (c *implStorePutMutationsCall) SendStream() interface {
- Send(item Mutation) error
- Close() error
-} {
- return &c.writeStream
-}
-
-func (c *implStorePutMutationsCall) Finish() (err error) {
- if ierr := c.clientCall.Finish(&err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (c *implStorePutMutationsCall) Cancel() {
- c.clientCall.Cancel()
-}
-
-type implStoreServicePutMutationsStreamIterator struct {
- serverCall _gen_ipc.ServerCall
- val Mutation
- err error
-}
-
-func (s *implStoreServicePutMutationsStreamIterator) Advance() bool {
- s.val = Mutation{}
- s.err = s.serverCall.Recv(&s.val)
- return s.err == nil
-}
-
-func (s *implStoreServicePutMutationsStreamIterator) Value() Mutation {
- return s.val
-}
-
-func (s *implStoreServicePutMutationsStreamIterator) Err() error {
- if s.err == _gen_io.EOF {
- return nil
- }
- return s.err
-}
-
-// StoreServicePutMutationsStream is the interface for streaming responses of the method
-// PutMutations in the service interface Store.
-type StoreServicePutMutationsStream interface {
- // RecvStream returns the recv portion of the stream
- RecvStream() interface {
- // Advance stages an element so the client can retrieve it
- // with Value. Advance returns true iff there is an
- // element to retrieve. The client must call Advance before
- // calling Value. Advance may block if an element is not
- // immediately available.
- Advance() bool
-
- // Value returns the element that was staged by Advance.
- // Value may panic if Advance returned false or was not
- // called at all. Value does not block.
- Value() Mutation
-
- // Err returns a non-nil error iff the stream encountered
- // any errors. Err does not block.
- Err() error
- }
-}
-
-// Implementation of the StoreServicePutMutationsStream interface that is not exported.
-type implStoreServicePutMutationsStream struct {
- reader implStoreServicePutMutationsStreamIterator
-}
-
-func (s *implStoreServicePutMutationsStream) RecvStream() interface {
- // Advance stages an element so the client can retrieve it
- // with Value. Advance returns true iff there is an
- // element to retrieve. The client must call Advance before
- // calling Value. The client must call Cancel if it does
- // not iterate through all elements (i.e. until Advance
- // returns false). Advance may block if an element is not
- // immediately available.
- Advance() bool
-
- // Value returns the element that was staged by Advance.
- // Value may panic if Advance returned false or was not
- // called at all. Value does not block.
- Value() Mutation
-
- // Err returns a non-nil error iff the stream encountered
- // any errors. Err does not block.
- Err() error
-} {
- return &s.reader
-}
-
-// BindStore returns the client stub implementing the Store
-// interface.
-//
-// If no _gen_ipc.Client is specified, the default _gen_ipc.Client in the
-// global Runtime is used.
-func BindStore(name string, opts ..._gen_ipc.BindOpt) (Store, error) {
- var client _gen_ipc.Client
- switch len(opts) {
- case 0:
- // Do nothing.
- case 1:
- if clientOpt, ok := opts[0].(_gen_ipc.Client); opts[0] == nil || ok {
- client = clientOpt
- } else {
- return nil, _gen_vdlutil.ErrUnrecognizedOption
- }
- default:
- return nil, _gen_vdlutil.ErrTooManyOptionsToBind
- }
- stub := &clientStubStore{defaultClient: client, name: name}
-
- return stub, nil
-}
-
-// NewServerStore creates a new server stub.
-//
-// It takes a regular server implementing the StoreService
-// interface, and returns a new server stub.
-func NewServerStore(server StoreService) interface{} {
- return &ServerStubStore{
- service: server,
- }
-}
-
-// clientStubStore implements Store.
-type clientStubStore struct {
- defaultClient _gen_ipc.Client
- name string
-}
-
-func (__gen_c *clientStubStore) client(ctx _gen_context.T) _gen_ipc.Client {
- if __gen_c.defaultClient != nil {
- return __gen_c.defaultClient
- }
- return _gen_veyron2.RuntimeFromContext(ctx).Client()
-}
-
-func (__gen_c *clientStubStore) Watch(ctx _gen_context.T, Req Request, opts ..._gen_ipc.CallOpt) (reply StoreWatchCall, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Watch", []interface{}{Req}, opts...); err != nil {
- return
- }
- reply = &implStoreWatchCall{clientCall: call, readStream: implStoreWatchStreamIterator{clientCall: call}}
- return
-}
-
-func (__gen_c *clientStubStore) PutMutations(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply StorePutMutationsCall, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "PutMutations", nil, opts...); err != nil {
- return
- }
- reply = &implStorePutMutationsCall{clientCall: call, writeStream: implStorePutMutationsStreamSender{clientCall: call}}
- return
-}
-
-func (__gen_c *clientStubStore) UnresolveStep(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply []string, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "UnresolveStep", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubStore) Signature(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply _gen_ipc.ServiceSignature, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Signature", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubStore) GetMethodTags(ctx _gen_context.T, method string, opts ..._gen_ipc.CallOpt) (reply []interface{}, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "GetMethodTags", []interface{}{method}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-// ServerStubStore wraps a server that implements
-// StoreService and provides an object that satisfies
-// the requirements of veyron2/ipc.ReflectInvoker.
-type ServerStubStore struct {
- service StoreService
-}
-
-func (__gen_s *ServerStubStore) GetMethodTags(call _gen_ipc.ServerCall, method string) ([]interface{}, error) {
- // TODO(bprosnitz) GetMethodTags() will be replaces with Signature().
- // Note: This exhibits some weird behavior like returning a nil error if the method isn't found.
- // This will change when it is replaced with Signature().
- switch method {
- case "Watch":
- return []interface{}{}, nil
- case "PutMutations":
- return []interface{}{}, nil
- default:
- return nil, nil
- }
-}
-
-func (__gen_s *ServerStubStore) Signature(call _gen_ipc.ServerCall) (_gen_ipc.ServiceSignature, error) {
- result := _gen_ipc.ServiceSignature{Methods: make(map[string]_gen_ipc.MethodSignature)}
- result.Methods["PutMutations"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{},
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "", Type: 68},
- },
- InStream: 75,
- }
- result.Methods["Watch"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "Req", Type: 67},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "", Type: 68},
- },
-
- OutStream: 70,
- }
-
- result.TypeDefs = []_gen_vdlutil.Any{
- _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.SliceType{Elem: 0x41, Name: "veyron2/services/watch/types.ResumeMarker", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x42, Name: "ResumeMarker"},
- },
- "veyron/services/store/raw.Request", []string(nil)},
- _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
- _gen_wiretype.FieldType{Type: 0x24, Name: "State"},
- _gen_wiretype.FieldType{Type: 0x45, Name: "Value"},
- _gen_wiretype.FieldType{Type: 0x42, Name: "ResumeMarker"},
- _gen_wiretype.FieldType{Type: 0x2, Name: "Continued"},
- },
- "veyron2/services/watch/types.Change", []string(nil)},
- _gen_wiretype.ArrayType{Elem: 0x41, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x35, Name: "veyron/services/store/raw.Version", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
- _gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
- },
- "veyron/services/store/raw.DEntry", []string(nil)},
- _gen_wiretype.SliceType{Elem: 0x49, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x47, Name: "ID"},
- _gen_wiretype.FieldType{Type: 0x48, Name: "PriorVersion"},
- _gen_wiretype.FieldType{Type: 0x48, Name: "Version"},
- _gen_wiretype.FieldType{Type: 0x2, Name: "IsRoot"},
- _gen_wiretype.FieldType{Type: 0x45, Name: "Value"},
- _gen_wiretype.FieldType{Type: 0x4a, Name: "Dir"},
- },
- "veyron/services/store/raw.Mutation", []string(nil)},
- }
-
- return result, nil
-}
-
-func (__gen_s *ServerStubStore) UnresolveStep(call _gen_ipc.ServerCall) (reply []string, err error) {
- if unresolver, ok := __gen_s.service.(_gen_ipc.Unresolver); ok {
- return unresolver.UnresolveStep(call)
- }
- if call.Server() == nil {
- return
- }
- var published []string
- if published, err = call.Server().Published(); err != nil || published == nil {
- return
- }
- reply = make([]string, len(published))
- for i, p := range published {
- reply[i] = _gen_naming.Join(p, call.Name())
- }
- return
-}
-
-func (__gen_s *ServerStubStore) Watch(call _gen_ipc.ServerCall, Req Request) (err error) {
- stream := &implStoreServiceWatchStream{writer: implStoreServiceWatchStreamSender{serverCall: call}}
- err = __gen_s.service.Watch(call, Req, stream)
- return
-}
-
-func (__gen_s *ServerStubStore) PutMutations(call _gen_ipc.ServerCall) (err error) {
- stream := &implStoreServicePutMutationsStream{reader: implStoreServicePutMutationsStreamIterator{serverCall: call}}
- err = __gen_s.service.PutMutations(call, stream)
- return
-}
diff --git a/services/store/raw/version.go b/services/store/raw/version.go
deleted file mode 100644
index aa5d385..0000000
--- a/services/store/raw/version.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package raw
-
-import (
- "math/rand"
- "time"
-)
-
-var rng *rand.Rand
-
-func init() {
- rng = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
-}
-
-// NewVersion returns a new version number.
-//
-// TODO(jyh): Choose a better version generator.
-func NewVersion() Version {
- for {
- if v := Version(rng.Int63()); v != 0 {
- return v
- }
- }
-}
diff --git a/services/store/server/server.go b/services/store/server/server.go
deleted file mode 100644
index e979c5a..0000000
--- a/services/store/server/server.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Package server implements a storage service.
-package server
-
-// This file defines Server, which implements the server-side Store API from
-// veyron2/services/store/service.vdl.
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "veyron/services/store/memstore"
- memwatch "veyron/services/store/memstore/watch"
- "veyron/services/store/raw"
-
- "veyron2/ipc"
- "veyron2/security"
- "veyron2/verror"
-)
-
-const (
- // transactionMaxLifetime is the maximum duration before a transaction will
- // be garbage collected.
- //
- // TODO(jyh): This should probably be a configuration parameter.
- transactionMaxLifetime = 30 * time.Second
-)
-
-var (
- errNestedTransaction = verror.BadArgf("cannot create a nested Transaction")
- // Triggers if client calls commit/abort on a name that's not part of a
- // transaction.
- errNoTransaction = verror.NotFoundf("no transaction")
- // Note, this can happen e.g. due to expiration.
- errTransactionDoesNotExist = verror.NotFoundf("transaction does not exist")
- // Transaction exists, but may not be used by the caller.
- errPermissionDenied = verror.NotAuthorizedf("permission denied")
-
- rng = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
-
- nullTransactionID transactionID
-)
-
-// Server implements Store and uses memstore internally.
-type Server struct {
- mutex sync.RWMutex
-
- // store is the actual store implementation.
- store *memstore.Store
-
- // transactions is the set of active transactions.
- transactions map[transactionID]*transaction
-
- // Transaction garbage collection.
- pending sync.WaitGroup
- ticker *time.Ticker
- closed chan struct{}
-
- // watcher is the actual store watcher implementation.
- watcher *memwatch.Watcher
-}
-
-// transactionID is an internal transaction identifier chosen by the server.
-//
-// TODO(jyh): Consider using a larger identifier space to reduce chance of
-// collisions. (Note, createTransaction handles collisions when generating
-// transactionIDs.)
-type transactionID uint64
-
-// transactionContext defines the context in which a transaction is used. A
-// transaction may be used only in the context that created it.
-// transactionContext weakly identifies a session by the local and remote
-// principals involved in the RPC.
-// TODO(tilaks): Use the local and remote addresses to identify the session.
-// Does a session with a mobile device break if the remote address changes?
-type transactionContext interface {
- // LocalID returns the PublicID of the principal at the local end of the
- // request.
- LocalID() security.PublicID
- // RemoteID returns the PublicID of the principal at the remote end of the
- // request.
- RemoteID() security.PublicID
-}
-
-type transaction struct {
- trans *memstore.Transaction
- expires time.Time
- creatorCtx transactionContext
-}
-
-// ServerConfig provides the parameters needed to construct a Server.
-type ServerConfig struct {
- Admin security.PublicID // Administrator.
- DBName string // DBName is the name if the database directory.
-}
-
-// New creates a new server.
-func New(config ServerConfig) (*Server, error) {
- mstore, err := memstore.New(config.Admin, config.DBName)
- if err != nil {
- return nil, err
- }
- mwatcher, err := memwatch.New(config.Admin, config.DBName)
- if err != nil {
- return nil, err
- }
- s := &Server{
- store: mstore,
- transactions: make(map[transactionID]*transaction),
- ticker: time.NewTicker(time.Second),
- closed: make(chan struct{}),
- watcher: mwatcher,
- }
- s.pending.Add(1)
- go s.gcLoop()
- // Start with an empty directory at root.
- rootDir := &thing{name: "", obj: s.store.Bind(""), tid: nullTransactionID, server: s}
- if err := rootDir.makeInternal(config.Admin, nil); err != nil {
- return nil, err
- }
- return s, nil
-}
-
-func (s *Server) Close() {
- close(s.closed)
- s.ticker.Stop()
- s.pending.Wait()
- s.store.Close()
- s.watcher.Close()
-}
-
-func (s *Server) String() string {
- return "StoreServer"
-}
-
-// Attributes returns the server status.
-func (s *Server) Attributes(arg string) map[string]string {
- return map[string]string{
- "health": "ok",
- "servertype": s.String(),
- }
-}
-
-// findTransactionComponent returns the (begin, end) offsets of the "$tid.*"
-// component in the given object name, or (-1, -1) if oname does not contain a
-// transaction component.
-func findTransactionComponent(oname string) (int, int) {
- begin := 0
- if !strings.HasPrefix(oname, "$tid") {
- begin = strings.Index(oname, "/$tid")
- }
- if begin == -1 {
- return -1, -1
- }
- end := strings.Index(oname[begin+1:], "/")
- if end == -1 {
- end = len(oname)
- } else {
- end += begin + 1
- }
- return begin, end
-}
-
-// TODO(sadovsky): One of the following:
-// - Reserve prefix string "$tid." for internal use.
-// - Reserve prefix char "$" for internal use.
-// - Require users to escape prefix char "$" when they are referring to their
-// own data, e.g. "\$foo".
-func makeTransactionComponent(id transactionID) string {
- return fmt.Sprintf("$tid.%d", id)
-}
-
-// stripTransactionComponent returns the given object name with its "$tid.*"
-// component removed, and also returns the stripped transactionID.
-// Examples:
-// "/foo/$tid.123/bar" => {"/foo/bar", transactionID(123)}
-// "/foo/bar" => {"/foo/bar", nullTransactionID}
-func stripTransactionComponent(oname string) (string, transactionID, error) {
- begin, end := findTransactionComponent(oname)
- if begin == -1 {
- return oname, nullTransactionID, nil
- }
- tc := oname[begin:end]
- id, err := strconv.ParseInt(tc[strings.LastIndex(tc, ".")+1:], 10, 64)
- if err != nil {
- return "", nullTransactionID, fmt.Errorf("Failed to extract id from %q", tc)
- }
- return oname[:begin] + oname[end:], transactionID(id), nil
-}
-
-// NOTE(sadovsky): The transaction's scope should be limited to oname's subtree
-// and its parent, but for now we expand it to the entire store (and don't use
-// oname below).
-func (s *Server) createTransaction(ctx transactionContext, oname string) (string, error) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
-
- var id transactionID
- for {
- id = transactionID(rng.Int63())
- _, ok := s.transactions[id]
- if !ok {
- break
- }
- }
- tdata := &transaction{
- trans: memstore.NewTransaction(),
- expires: time.Now().Add(transactionMaxLifetime),
- creatorCtx: ctx,
- }
- s.transactions[id] = tdata
- return makeTransactionComponent(id), nil
-}
-
-// findTransaction returns the transaction for the given transaction ID.
-func (s *Server) findTransaction(ctx transactionContext, id transactionID) (*memstore.Transaction, error) {
- s.mutex.RLock()
- defer s.mutex.RUnlock()
- return s.findTransactionLocked(ctx, id)
-}
-
-func (s *Server) findTransactionLocked(ctx transactionContext, id transactionID) (*memstore.Transaction, error) {
- if id == nullTransactionID {
- return nil, nil
- }
- tdata, ok := s.transactions[id]
- if !ok {
- return nil, errTransactionDoesNotExist
- }
- // A transaction may be used only by the session (and therefore client)
- // that created it.
- if !tdata.matchesContext(ctx) {
- return nil, errPermissionDenied
- }
- return tdata.trans, nil
-}
-
-// Commit commits the changes in the transaction to the store. The
-// operation is atomic, so all mutations are performed, or none. Returns an
-// error if the transaction aborted.
-func (s *Server) commitTransaction(ctx transactionContext, id transactionID) error {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- t, err := s.findTransactionLocked(ctx, id)
- if err != nil {
- return err
- }
- if t == nil {
- return errNoTransaction
- }
- err = t.Commit()
- delete(s.transactions, id)
- return err
-}
-
-// Abort discards a transaction. This is an optimization; transactions
-// eventually time out and get discarded. However, live transactions
-// consume resources, so if you know that you won't be using a transaction
-// anymore, you should discard it explicitly.
-func (s *Server) abortTransaction(ctx transactionContext, id transactionID) error {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- t, err := s.findTransactionLocked(ctx, id)
- if err != nil {
- return err
- }
- if t == nil {
- return errNoTransaction
- }
- err = t.Abort()
- delete(s.transactions, id)
- return err
-}
-
-func (t *transaction) matchesContext(ctx transactionContext) bool {
- creatorCtx := t.creatorCtx
- return membersEqual(creatorCtx.LocalID().Names(), ctx.LocalID().Names()) &&
- membersEqual(creatorCtx.RemoteID().Names(), ctx.RemoteID().Names())
-}
-
-// membersEquals checks whether two slices of strings have the same set of
-// members, regardless of order.
-func membersEqual(slice1, slice2 []string) bool {
- set1 := make(map[string]bool, len(slice1))
- for _, s := range slice1 {
- set1[s] = true
- }
- set2 := make(map[string]bool, len(slice2))
- for _, s := range slice2 {
- set2[s] = true
- }
- // DeepEqual tests keys for == equality, which is sufficient for strings.
- return reflect.DeepEqual(set1, set2)
-}
-
-// gcLoop drops transactions that have expired.
-func (s *Server) gcLoop() {
- for {
- select {
- case <-s.closed:
- s.pending.Done()
- return
- case <-s.ticker.C:
- }
-
- s.mutex.Lock()
- now := time.Now()
- for id, tdata := range s.transactions {
- if now.After(tdata.expires) {
- tdata.trans.Abort()
- delete(s.transactions, id)
- }
- }
- s.mutex.Unlock()
- }
-}
-
-// Watch returns a stream of all changes.
-func (s *Server) Watch(ctx ipc.ServerContext, req raw.Request, stream raw.StoreServiceWatchStream) error {
- return s.watcher.WatchRaw(ctx, req, stream)
-}
-
-// PutMutations atomically commits a stream of Mutations when the stream is
-// closed. Mutations are not committed if the request is cancelled before the
-// stream has been closed.
-func (s *Server) PutMutations(ctx ipc.ServerContext, stream raw.StoreServicePutMutationsStream) error {
- return s.store.PutMutations(ctx, stream)
-}
-
-type storeDispatcher struct {
- s *Server
- auth security.Authorizer
-}
-
-// NewStoreDispatcher returns an object dispatcher.
-func NewStoreDispatcher(s *Server, auth security.Authorizer) ipc.Dispatcher {
- return &storeDispatcher{s: s, auth: auth}
-}
-
-func (d *storeDispatcher) Lookup(suffix, method string) (ipc.Invoker, security.Authorizer, error) {
- serv, err := d.lookupServer(suffix)
- if err != nil {
- return nil, nil, err
- }
- return ipc.ReflectInvoker(serv), d.auth, nil
-}
-
-func (d *storeDispatcher) lookupServer(suffix string) (interface{}, error) {
- // Strip leading "/" if present so that server internals can assume a
- // particular form.
- suffix = strings.TrimPrefix(suffix, "/")
- if strings.HasSuffix(suffix, raw.RawStoreSuffix) {
- return raw.NewServerStore(d.s), nil
- } else {
- t, err := d.s.lookupThing(suffix)
- if err != nil {
- return nil, err
- }
- return NewServerstoreThing(t), nil
- }
-}
-
-func (s *Server) lookupThing(name string) (*thing, error) {
- name, tid, err := stripTransactionComponent(name)
- if err != nil {
- return nil, err
- }
- return &thing{name: name, obj: s.store.Bind(name), tid: tid, server: s}, nil
-}
diff --git a/services/store/server/server_test.go b/services/store/server/server_test.go
deleted file mode 100644
index a0f3ca7..0000000
--- a/services/store/server/server_test.go
+++ /dev/null
@@ -1,672 +0,0 @@
-package server
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "reflect"
- "runtime"
- "testing"
-
- _ "veyron/lib/testutil" // initialize vlog
- storetest "veyron/services/store/memstore/testing"
- "veyron/services/store/raw"
-
- "veyron2/ipc"
- "veyron2/naming"
- "veyron2/rt"
- "veyron2/security"
- "veyron2/services/watch/types"
- "veyron2/storage"
- _ "veyron2/vlog"
- "veyron2/vom"
-)
-
-var (
- rootPublicID security.PublicID = security.FakePublicID("root")
- rootName = fmt.Sprintf("%s", rootPublicID)
- blessedPublicId security.PublicID = security.FakePublicID("root/blessed")
-)
-
-// Dir is a simple directory.
-type Dir struct {
- Entries map[string]storage.ID
-}
-
-func init() {
- vom.Register(&Dir{})
-}
-
-func newValue() interface{} {
- return &Dir{}
-}
-
-func closeTest(config ServerConfig, s *Server) {
- s.Close()
- os.Remove(config.DBName)
-}
-
-func newServer() (*Server, func()) {
- dbName, err := ioutil.TempDir(os.TempDir(), "test_server_test.db")
- if err != nil {
- log.Fatal("ioutil.TempDir() failed: ", err)
- }
- config := ServerConfig{
- Admin: rootPublicID,
- DBName: dbName,
- }
- s, err := New(config)
- if err != nil {
- log.Fatal("server.New() failed: ", err)
- }
- closer := func() { closeTest(config, s) }
- return s, closer
-}
-
-func lookupThingOrDie(s *Server, name string) *thing {
- t, err := s.lookupThing(name)
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// createTransaction creates a new transaction and returns its name relative to
-// the root of the store.
-func createTransaction(t *testing.T, s *Server, ctx ipc.ServerContext, name string) string {
- _, file, line, _ := runtime.Caller(1)
- tid, err := lookupThingOrDie(s, name).NewTransaction(ctx, nil)
- if err != nil {
- t.Fatalf("%s(%d): can't create transaction %s: %s", file, line, name, err)
- }
- return naming.Join(name, tid)
-}
-
-func TestLookupInvalidTransactionName(t *testing.T) {
- s, c := newServer()
- defer c()
-
- _, err := s.lookupThing("/$tid.bad/foo")
- if err == nil {
- t.Fatalf("lookupThing should've failed, but didn't")
- }
-}
-
-func TestNestedTransactionError(t *testing.T) {
- rt.Init()
- s, c := newServer()
- defer c()
-
- rootCtx := storetest.NewFakeServerContext(rootPublicID)
- tname := createTransaction(t, s, rootCtx, "/")
- if _, err := lookupThingOrDie(s, tname).NewTransaction(rootCtx, nil); err == nil {
- t.Fatalf("creating nested transaction at %s should've failed, but didn't", tname)
- }
- // Try again with a valid object in between the two $tid components;
- // CreateTransaction should still fail.
- lookupThingOrDie(s, tname).Put(rootCtx, newValue())
- foo := naming.Join(tname, "foo")
- if _, err := lookupThingOrDie(s, foo).NewTransaction(rootCtx, nil); err == nil {
- t.Fatalf("creating nested transaction at %s should've failed, but didn't", foo)
- }
-}
-
-func TestPutGetRemoveObject(t *testing.T) {
- s, c := newServer()
- defer c()
-
- testPutGetRemove(t, s, "a")
-}
-
-func testPutGetRemove(t *testing.T, s *Server, name string) {
- rt.Init()
- rootCtx := storetest.NewFakeServerContext(rootPublicID)
- value := newValue()
- {
- // Check that the object does not exist.
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if ok, err := tobj.Exists(rootCtx); ok || err != nil {
- t.Fatalf("Should not exist: %s", err)
- }
- if v, err := tobj.Get(rootCtx); v.Stat.ID.IsValid() && err == nil {
- t.Fatalf("Should not exist: %v, %s", v, err)
- }
- }
-
- {
- // Add the object.
- tobj1 := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if _, err := tobj1.Put(rootCtx, value); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if ok, err := tobj1.Exists(rootCtx); !ok || err != nil {
- t.Fatalf("Should exist: %s", err)
- }
- if _, err := tobj1.Get(rootCtx); err != nil {
- t.Fatalf("Object should exist: %s", err)
- }
-
- // Transactions are isolated.
- tobj2 := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if ok, err := tobj2.Exists(rootCtx); ok || err != nil {
- t.Fatalf("Should not exist: %s", err)
- }
- if v, err := tobj2.Get(rootCtx); v.Stat.ID.IsValid() && err == nil {
- t.Fatalf("Should not exist: %v, %s", v, err)
- }
-
- // Apply tobj1.
- if err := tobj1.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
-
- // tobj2 is still isolated.
- if ok, err := tobj2.Exists(rootCtx); ok || err != nil {
- t.Fatalf("Should not exist: %s", err)
- }
- if v, err := tobj2.Get(rootCtx); v.Stat.ID.IsValid() && err == nil {
- t.Fatalf("Should not exist: %v, %s", v, err)
- }
-
- // tobj3 observes the commit.
- tobj3 := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if ok, err := tobj3.Exists(rootCtx); !ok || err != nil {
- t.Fatalf("Should exist: %s", err)
- }
- if _, err := tobj3.Get(rootCtx); err != nil {
- t.Fatalf("Object should exist: %s", err)
- }
- }
-
- {
- // Remove the object.
- tobj1 := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if err := tobj1.Remove(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if ok, err := tobj1.Exists(rootCtx); ok || err != nil {
- t.Fatalf("Should not exist: %s", err)
- }
- if v, err := tobj1.Get(rootCtx); v.Stat.ID.IsValid() || err == nil {
- t.Fatalf("Object should not exist: %T, %v, %s", v, v, err)
- }
-
- // The removal is isolated.
- tobj2 := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if ok, err := tobj2.Exists(rootCtx); !ok || err != nil {
- t.Fatalf("Should exist: %s", err)
- }
- if _, err := tobj2.Get(rootCtx); err != nil {
- t.Fatalf("Object should exist: %s", err)
- }
-
- // Apply tobj1.
- if err := tobj1.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
-
- // The removal is isolated.
- if ok, err := tobj2.Exists(rootCtx); !ok || err != nil {
- t.Fatalf("Should exist: %s", err)
- }
- if _, err := tobj2.Get(rootCtx); err != nil {
- t.Fatalf("Object should exist: %s", err)
- }
- }
-
- {
- // Check that the object does not exist.
- tobj1 := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
- if ok, err := tobj1.Exists(rootCtx); ok || err != nil {
- t.Fatalf("Should not exist")
- }
- if v, err := tobj1.Get(rootCtx); v.Stat.ID.IsValid() && err == nil {
- t.Fatalf("Should not exist: %v, %s", v, err)
- }
- }
-}
-
-// TODO(sadovsky): Add more test cases for Commit/Abort:
-// - expired transaction: server should return errTransactionDoesNotExist
-// - no transaction: server should return errNoTransaction
-
-func TestWatchGlob(t *testing.T) {
- rt.Init()
- rootCtx := storetest.NewFakeServerContext(rootPublicID)
-
- s, c := newServer()
- defer c()
-
- dirname, objname := "/a", "/a/b"
- dir, obj := lookupThingOrDie(s, dirname), lookupThingOrDie(s, objname)
-
- // Before the watch request has been made, commit a transaction that makes
- // directory /a.
- {
- tdir := lookupThingOrDie(s, createTransaction(t, s, rootCtx, dirname))
- err := tdir.Make(rootCtx)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if err := tdir.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // Start watch requests on /a and /a/b.
- req := types.GlobRequest{Pattern: "..."}
- wdir := storetest.WatchGlob(rootPublicID, dir.WatchGlob, req)
- wobj := storetest.WatchGlob(rootPublicID, obj.WatchGlob, req)
-
- rStreamDir := wdir.RecvStream()
- rStreamObj := wobj.RecvStream()
-
- // The watch on /a should send a change on /a.
- {
- if !rStreamDir.Advance() {
- t.Fatalf("Advance() failed: %v", rStreamDir.Err())
- }
- change := rStreamDir.Value()
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- }
- // The watch on /a/b should send no change. The first change it sends is
- // verified below.
-
- value := "v"
- var id storage.ID
-
- // Commit a second transaction that puts /a/b.
- {
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, objname))
- st, err := tobj.Put(rootCtx, value)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- id = st.ID
- if err := tobj.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // The watch on /a should send changes on /a and /a/b.
- {
- changes := []types.Change{}
- if !rStreamDir.Advance() {
- t.Fatalf("Advance() failed: %v", rStreamDir.Err())
- }
- change := rStreamDir.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatalf("Expected change to NOT be the last in this transaction")
- }
- if !rStreamDir.Advance() {
- t.Fatalf("Advance() failed: %v", rStreamDir.Err())
- }
- change = rStreamDir.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- storetest.ExpectEntryExistsNameOnly(t, changes, "a")
- storetest.ExpectEntryExists(t, changes, "a/b", id, value)
- }
- // The watch on /a/b should send a change on /a/b.
- {
- changes := []types.Change{}
- if !rStreamObj.Advance() {
- t.Fatalf("Advance() failed: %v", rStreamObj.Err())
- }
- change := rStreamObj.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- storetest.ExpectEntryExists(t, changes, "a/b", id, value)
- }
-}
-
-func TestRawWatch(t *testing.T) {
- rt.Init()
- rootCtx := storetest.NewFakeServerContext(rootPublicID)
-
- s, c := newServer()
- defer c()
-
- name1 := "/a"
- value1 := "v1"
- var id1 storage.ID
-
- // Before the watch request has been made, commit a transaction that puts /a.
- {
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name1))
- st, err := tobj.Put(rootCtx, value1)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- id1 = st.ID
- if err := tobj.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // Start a watch request.
- req := raw.Request{}
- ws := storetest.WatchRaw(rootPublicID, s.Watch, req)
-
- rStream := ws.RecvStream()
- // Check that watch detects the changes in the first transaction.
- {
- changes := []types.Change{}
- // First change is making the root dir (in server.go), second is updating
- // the root dir (adding a dir entry), third is putting /a.
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatalf("Expected change to NOT be the last in this transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- storetest.ExpectMutationExistsNoVersionCheck(t, changes, id1, value1)
- }
-
- name2 := "/b"
- value2 := "v2"
- var id2 storage.ID
-
- // Commit a second transaction that puts /b.
- {
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name2))
- st, err := tobj.Put(rootCtx, value2)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- id2 = st.ID
- if err := tobj.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // Check that watch detects the changes in the second transaction.
- {
- changes := []types.Change{}
- // First change is updating the root dir (adding a dir entry), second is
- // putting /b.
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatalf("Expected change to NOT be the last in this transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- // Note, we don't know the ID of the root dir so we can't check that it
- // exists in 'changes'.
- storetest.ExpectMutationExistsNoVersionCheck(t, changes, id2, value2)
- }
-}
-
-// Note, this test is identical to TestRawWatch up until the removal of /b.
-func TestGarbageCollectionOnCommit(t *testing.T) {
- rt.Init()
- rootCtx := storetest.NewFakeServerContext(rootPublicID)
-
- s, c := newServer()
- defer c()
-
- name1 := "/a"
- value1 := "v1"
- var id1 storage.ID
-
- // Before the watch request has been made, commit a transaction that puts /a.
- {
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name1))
- st, err := tobj.Put(rootCtx, value1)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- id1 = st.ID
- if err := tobj.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // Start a watch request.
- req := raw.Request{}
- ws := storetest.WatchRaw(rootPublicID, s.Watch, req)
-
- rStream := ws.RecvStream()
- // Check that watch detects the changes in the first transaction.
- {
- changes := []types.Change{}
- // First change is making the root dir (in server.go), second is updating
- // the root dir (adding a dir entry), third is putting /a.
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatalf("Expected change to NOT be the last in this transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- storetest.ExpectMutationExistsNoVersionCheck(t, changes, id1, value1)
- }
-
- name2 := "/b"
- value2 := "v2"
- var id2 storage.ID
-
- // Commit a second transaction that puts /b.
- {
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name2))
- st, err := tobj.Put(rootCtx, value2)
- if err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- id2 = st.ID
- if err := tobj.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // Check that watch detects the changes in the second transaction.
- {
- changes := []types.Change{}
- // First change is updating the root dir (adding a dir entry), second is
- // putting /b.
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if !change.Continued {
- t.Fatalf("Expected change to NOT be the last in this transaction")
- }
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change = rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- // Note, we don't know the ID of the root dir so we can't check that it
- // exists in 'changes'.
- storetest.ExpectMutationExistsNoVersionCheck(t, changes, id2, value2)
- }
-
- // Commit a third transaction that removes /b.
- {
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name2))
- if err := tobj.Remove(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- if err := tobj.Commit(rootCtx); err != nil {
- t.Fatalf("Unexpected error: %s", err)
- }
- }
-
- // Check that watch detects the changes in the third transaction.
- {
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- // Note, we don't know the ID of the root dir so we can't check that it
- // exists in 'changes'.
- }
-
- // Check that watch detects the garbage collection of /b.
- {
- changes := []types.Change{}
- if !rStream.Advance() {
- t.Fatalf("Advance() failed: %v", rStream.Err())
- }
- change := rStream.Value()
- changes = append(changes, change)
- if change.Continued {
- t.Fatalf("Expected change to be the last in this transaction")
- }
- storetest.ExpectMutationDoesNotExistNoVersionCheck(t, changes, id2)
- }
-}
-
-func TestTransactionSecurity(t *testing.T) {
- rt.Init()
- rootCtx := storetest.NewFakeServerContext(rootPublicID)
- blessedCtx := storetest.NewFakeServerContext(blessedPublicId)
-
- s, c := newServer()
- defer c()
-
- // Create a root.
- name := "/"
- value := newValue()
-
- // Create a transaction in the root's session.
- tobj := lookupThingOrDie(s, createTransaction(t, s, rootCtx, name))
-
- // Check that the transaction cannot be accessed by the blessee.
- if _, err := tobj.Exists(blessedCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if _, err := tobj.Get(blessedCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if _, err := tobj.Put(blessedCtx, value); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if err := tobj.Remove(blessedCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if err := tobj.Abort(blessedCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if err := tobj.Commit(blessedCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
-
- // Create a transaction in the blessee's session.
- tobj = lookupThingOrDie(s, createTransaction(t, s, blessedCtx, name))
-
- // Check that the transaction cannot be accessed by the root.
- if _, err := tobj.Exists(rootCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if _, err := tobj.Get(rootCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if _, err := tobj.Put(rootCtx, value); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if err := tobj.Remove(rootCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if err := tobj.Abort(rootCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
- if err := tobj.Commit(rootCtx); err != errPermissionDenied {
- t.Fatalf("Unexpected error: %v", err)
- }
-}
-
-func TestStoreDispatcher(t *testing.T) {
- rawType := reflect.PtrTo(reflect.TypeOf(raw.ServerStubStore{}))
- thingType := reflect.PtrTo(reflect.TypeOf(ServerStubstoreThing{}))
-
- tests := []struct {
- name string
- t reflect.Type
- }{
- {raw.RawStoreSuffix, rawType},
- {"a/b/" + raw.RawStoreSuffix, rawType},
- {"a/b/c" + raw.RawStoreSuffix, rawType},
- {"", thingType},
- {"a/b/", thingType},
- {"a/b/c", thingType},
- }
-
- s, c := newServer()
- defer c()
-
- // TODO(bprosnitz): Switch this to use just exported methods (using signature)
- // once signature stabilizes.
- d := NewStoreDispatcher(s, nil).(*storeDispatcher)
- for _, test := range tests {
- serv, err := d.lookupServer(test.name)
- if err != nil {
- t.Fatalf("error looking up %s: %s", test.name, err)
- }
- if reflect.TypeOf(serv) != test.t {
- t.Fatalf("error looking up %s. got %T, expected %v", test.name, serv, test.t)
- }
- }
-}
diff --git a/services/store/server/service.vdl b/services/store/server/service.vdl
deleted file mode 100644
index f4648ee..0000000
--- a/services/store/server/service.vdl
+++ /dev/null
@@ -1,14 +0,0 @@
-package server
-
-import (
- "veyron2/services/store"
-)
-
-// Named 'storeThing' instead of 'thing' so that the struct in thing.go can be
-// named 'thing'.
-type storeThing interface {
- store.DirSpecific
- store.ObjectSpecific
- store.DirOrObject
- store.Transaction
-}
diff --git a/services/store/server/service.vdl.go b/services/store/server/service.vdl.go
deleted file mode 100644
index f596c17..0000000
--- a/services/store/server/service.vdl.go
+++ /dev/null
@@ -1,408 +0,0 @@
-// This file was auto-generated by the veyron vdl tool.
-// Source: service.vdl
-
-package server
-
-import (
- "veyron2/services/store"
-
- // The non-user imports are prefixed with "_gen_" to prevent collisions.
- _gen_veyron2 "veyron2"
- _gen_context "veyron2/context"
- _gen_ipc "veyron2/ipc"
- _gen_naming "veyron2/naming"
- _gen_vdlutil "veyron2/vdl/vdlutil"
- _gen_wiretype "veyron2/wiretype"
-)
-
-// TODO(bprosnitz) Remove this line once signatures are updated to use typevals.
-// It corrects a bug where _gen_wiretype is unused in VDL pacakges where only bootstrap types are used on interfaces.
-const _ = _gen_wiretype.TypeIDInvalid
-
-// Named 'storeThing' instead of 'thing' so that the struct in thing.go can be
-// named 'thing'.
-// storeThing is the interface the client binds and uses.
-// storeThing_ExcludingUniversal is the interface without internal framework-added methods
-// to enable embedding without method collisions. Not to be used directly by clients.
-type storeThing_ExcludingUniversal interface {
- store.DirSpecific_ExcludingUniversal
- store.ObjectSpecific_ExcludingUniversal
- store.DirOrObject_ExcludingUniversal
- store.Transaction_ExcludingUniversal
-}
-type storeThing interface {
- _gen_ipc.UniversalServiceMethods
- storeThing_ExcludingUniversal
-}
-
-// storeThingService is the interface the server implements.
-type storeThingService interface {
- store.DirSpecificService
- store.ObjectSpecificService
- store.DirOrObjectService
- store.TransactionService
-}
-
-// BindstoreThing returns the client stub implementing the storeThing
-// interface.
-//
-// If no _gen_ipc.Client is specified, the default _gen_ipc.Client in the
-// global Runtime is used.
-func BindstoreThing(name string, opts ..._gen_ipc.BindOpt) (storeThing, error) {
- var client _gen_ipc.Client
- switch len(opts) {
- case 0:
- // Do nothing.
- case 1:
- if clientOpt, ok := opts[0].(_gen_ipc.Client); opts[0] == nil || ok {
- client = clientOpt
- } else {
- return nil, _gen_vdlutil.ErrUnrecognizedOption
- }
- default:
- return nil, _gen_vdlutil.ErrTooManyOptionsToBind
- }
- stub := &clientStubstoreThing{defaultClient: client, name: name}
- stub.DirSpecific_ExcludingUniversal, _ = store.BindDirSpecific(name, client)
- stub.ObjectSpecific_ExcludingUniversal, _ = store.BindObjectSpecific(name, client)
- stub.DirOrObject_ExcludingUniversal, _ = store.BindDirOrObject(name, client)
- stub.Transaction_ExcludingUniversal, _ = store.BindTransaction(name, client)
-
- return stub, nil
-}
-
-// NewServerstoreThing creates a new server stub.
-//
-// It takes a regular server implementing the storeThingService
-// interface, and returns a new server stub.
-func NewServerstoreThing(server storeThingService) interface{} {
- return &ServerStubstoreThing{
- ServerStubDirSpecific: *store.NewServerDirSpecific(server).(*store.ServerStubDirSpecific),
- ServerStubObjectSpecific: *store.NewServerObjectSpecific(server).(*store.ServerStubObjectSpecific),
- ServerStubDirOrObject: *store.NewServerDirOrObject(server).(*store.ServerStubDirOrObject),
- ServerStubTransaction: *store.NewServerTransaction(server).(*store.ServerStubTransaction),
- service: server,
- }
-}
-
-// clientStubstoreThing implements storeThing.
-type clientStubstoreThing struct {
- store.DirSpecific_ExcludingUniversal
- store.ObjectSpecific_ExcludingUniversal
- store.DirOrObject_ExcludingUniversal
- store.Transaction_ExcludingUniversal
-
- defaultClient _gen_ipc.Client
- name string
-}
-
-func (__gen_c *clientStubstoreThing) client(ctx _gen_context.T) _gen_ipc.Client {
- if __gen_c.defaultClient != nil {
- return __gen_c.defaultClient
- }
- return _gen_veyron2.RuntimeFromContext(ctx).Client()
-}
-
-func (__gen_c *clientStubstoreThing) UnresolveStep(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply []string, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "UnresolveStep", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubstoreThing) Signature(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply _gen_ipc.ServiceSignature, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Signature", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubstoreThing) GetMethodTags(ctx _gen_context.T, method string, opts ..._gen_ipc.CallOpt) (reply []interface{}, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "GetMethodTags", []interface{}{method}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-// ServerStubstoreThing wraps a server that implements
-// storeThingService and provides an object that satisfies
-// the requirements of veyron2/ipc.ReflectInvoker.
-type ServerStubstoreThing struct {
- store.ServerStubDirSpecific
- store.ServerStubObjectSpecific
- store.ServerStubDirOrObject
- store.ServerStubTransaction
-
- service storeThingService
-}
-
-func (__gen_s *ServerStubstoreThing) GetMethodTags(call _gen_ipc.ServerCall, method string) ([]interface{}, error) {
- // TODO(bprosnitz) GetMethodTags() will be replaces with Signature().
- // Note: This exhibits some weird behavior like returning a nil error if the method isn't found.
- // This will change when it is replaced with Signature().
- if resp, err := __gen_s.ServerStubDirSpecific.GetMethodTags(call, method); resp != nil || err != nil {
- return resp, err
- }
- if resp, err := __gen_s.ServerStubObjectSpecific.GetMethodTags(call, method); resp != nil || err != nil {
- return resp, err
- }
- if resp, err := __gen_s.ServerStubDirOrObject.GetMethodTags(call, method); resp != nil || err != nil {
- return resp, err
- }
- if resp, err := __gen_s.ServerStubTransaction.GetMethodTags(call, method); resp != nil || err != nil {
- return resp, err
- }
- return nil, nil
-}
-
-func (__gen_s *ServerStubstoreThing) Signature(call _gen_ipc.ServerCall) (_gen_ipc.ServiceSignature, error) {
- result := _gen_ipc.ServiceSignature{Methods: make(map[string]_gen_ipc.MethodSignature)}
-
- result.TypeDefs = []_gen_vdlutil.Any{}
- var ss _gen_ipc.ServiceSignature
- var firstAdded int
- ss, _ = __gen_s.ServerStubDirSpecific.Signature(call)
- firstAdded = len(result.TypeDefs)
- for k, v := range ss.Methods {
- for i, _ := range v.InArgs {
- if v.InArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.InArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- for i, _ := range v.OutArgs {
- if v.OutArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.OutArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- if v.InStream >= _gen_wiretype.TypeIDFirst {
- v.InStream += _gen_wiretype.TypeID(firstAdded)
- }
- if v.OutStream >= _gen_wiretype.TypeIDFirst {
- v.OutStream += _gen_wiretype.TypeID(firstAdded)
- }
- result.Methods[k] = v
- }
- //TODO(bprosnitz) combine type definitions from embeded interfaces in a way that doesn't cause duplication.
- for _, d := range ss.TypeDefs {
- switch wt := d.(type) {
- case _gen_wiretype.SliceType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.ArrayType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.MapType:
- if wt.Key >= _gen_wiretype.TypeIDFirst {
- wt.Key += _gen_wiretype.TypeID(firstAdded)
- }
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.StructType:
- for i, fld := range wt.Fields {
- if fld.Type >= _gen_wiretype.TypeIDFirst {
- wt.Fields[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- d = wt
- // NOTE: other types are missing, but we are upgrading anyways.
- }
- result.TypeDefs = append(result.TypeDefs, d)
- }
- ss, _ = __gen_s.ServerStubObjectSpecific.Signature(call)
- firstAdded = len(result.TypeDefs)
- for k, v := range ss.Methods {
- for i, _ := range v.InArgs {
- if v.InArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.InArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- for i, _ := range v.OutArgs {
- if v.OutArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.OutArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- if v.InStream >= _gen_wiretype.TypeIDFirst {
- v.InStream += _gen_wiretype.TypeID(firstAdded)
- }
- if v.OutStream >= _gen_wiretype.TypeIDFirst {
- v.OutStream += _gen_wiretype.TypeID(firstAdded)
- }
- result.Methods[k] = v
- }
- //TODO(bprosnitz) combine type definitions from embeded interfaces in a way that doesn't cause duplication.
- for _, d := range ss.TypeDefs {
- switch wt := d.(type) {
- case _gen_wiretype.SliceType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.ArrayType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.MapType:
- if wt.Key >= _gen_wiretype.TypeIDFirst {
- wt.Key += _gen_wiretype.TypeID(firstAdded)
- }
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.StructType:
- for i, fld := range wt.Fields {
- if fld.Type >= _gen_wiretype.TypeIDFirst {
- wt.Fields[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- d = wt
- // NOTE: other types are missing, but we are upgrading anyways.
- }
- result.TypeDefs = append(result.TypeDefs, d)
- }
- ss, _ = __gen_s.ServerStubDirOrObject.Signature(call)
- firstAdded = len(result.TypeDefs)
- for k, v := range ss.Methods {
- for i, _ := range v.InArgs {
- if v.InArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.InArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- for i, _ := range v.OutArgs {
- if v.OutArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.OutArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- if v.InStream >= _gen_wiretype.TypeIDFirst {
- v.InStream += _gen_wiretype.TypeID(firstAdded)
- }
- if v.OutStream >= _gen_wiretype.TypeIDFirst {
- v.OutStream += _gen_wiretype.TypeID(firstAdded)
- }
- result.Methods[k] = v
- }
- //TODO(bprosnitz) combine type definitions from embeded interfaces in a way that doesn't cause duplication.
- for _, d := range ss.TypeDefs {
- switch wt := d.(type) {
- case _gen_wiretype.SliceType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.ArrayType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.MapType:
- if wt.Key >= _gen_wiretype.TypeIDFirst {
- wt.Key += _gen_wiretype.TypeID(firstAdded)
- }
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.StructType:
- for i, fld := range wt.Fields {
- if fld.Type >= _gen_wiretype.TypeIDFirst {
- wt.Fields[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- d = wt
- // NOTE: other types are missing, but we are upgrading anyways.
- }
- result.TypeDefs = append(result.TypeDefs, d)
- }
- ss, _ = __gen_s.ServerStubTransaction.Signature(call)
- firstAdded = len(result.TypeDefs)
- for k, v := range ss.Methods {
- for i, _ := range v.InArgs {
- if v.InArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.InArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- for i, _ := range v.OutArgs {
- if v.OutArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.OutArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- if v.InStream >= _gen_wiretype.TypeIDFirst {
- v.InStream += _gen_wiretype.TypeID(firstAdded)
- }
- if v.OutStream >= _gen_wiretype.TypeIDFirst {
- v.OutStream += _gen_wiretype.TypeID(firstAdded)
- }
- result.Methods[k] = v
- }
- //TODO(bprosnitz) combine type definitions from embeded interfaces in a way that doesn't cause duplication.
- for _, d := range ss.TypeDefs {
- switch wt := d.(type) {
- case _gen_wiretype.SliceType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.ArrayType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.MapType:
- if wt.Key >= _gen_wiretype.TypeIDFirst {
- wt.Key += _gen_wiretype.TypeID(firstAdded)
- }
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.StructType:
- for i, fld := range wt.Fields {
- if fld.Type >= _gen_wiretype.TypeIDFirst {
- wt.Fields[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- d = wt
- // NOTE: other types are missing, but we are upgrading anyways.
- }
- result.TypeDefs = append(result.TypeDefs, d)
- }
-
- return result, nil
-}
-
-func (__gen_s *ServerStubstoreThing) UnresolveStep(call _gen_ipc.ServerCall) (reply []string, err error) {
- if unresolver, ok := __gen_s.service.(_gen_ipc.Unresolver); ok {
- return unresolver.UnresolveStep(call)
- }
- if call.Server() == nil {
- return
- }
- var published []string
- if published, err = call.Server().Published(); err != nil || published == nil {
- return
- }
- reply = make([]string, len(published))
- for i, p := range published {
- reply[i] = _gen_naming.Join(p, call.Name())
- }
- return
-}
diff --git a/services/store/server/thing.go b/services/store/server/thing.go
deleted file mode 100644
index 37c0d73..0000000
--- a/services/store/server/thing.go
+++ /dev/null
@@ -1,394 +0,0 @@
-package server
-
-// This file defines thing, which implements the server-side Thing API from
-// veyron2/services/store/service.vdl.
-
-import (
- "veyron/services/store/memstore"
-
- "veyron2/ipc"
- "veyron2/query"
- "veyron2/security"
- "veyron2/services/mounttable"
- mttypes "veyron2/services/mounttable/types"
- "veyron2/services/store"
- "veyron2/services/watch"
- watchtypes "veyron2/services/watch/types"
- "veyron2/storage"
- "veyron2/vdl/vdlutil"
- "veyron2/verror"
-)
-
-const (
- // Large random value, used to indicate that this memstore object represents
- // a directory.
- dirValue int64 = 9380751577234
-)
-
-type thing struct {
- name string // will never contain a transaction id
- obj *memstore.Object
- tid transactionID // may be nullTransactionID
- server *Server
-}
-
-var (
- // TODO(sadovsky): Make sure all of these error cases are covered by tests.
- // TODO(sadovsky): Revisit these error types.
- errMakeDirObjectExists = verror.Existsf("Object exists in Dir.Make path")
- errCalledObjectMethodOnDir = verror.BadArgf("called Object method on Dir")
- errPutObjectInObject = verror.BadArgf("put Object in another Object")
- errCannotRemoveRootDir = verror.BadArgf("cannot remove root Dir")
-
- _ storeThingService = (*thing)(nil)
-
- nullEntry storage.Entry
- nullStat storage.Stat
-)
-
-func isDir(entry *storage.Entry) bool {
- value, ok := entry.Value.(int64)
- return ok && value == dirValue
-}
-
-func isObject(entry *storage.Entry) bool {
- return !isDir(entry)
-}
-
-func (t *thing) String() string {
- return t.name
-}
-
-func (t *thing) Attributes(arg string) map[string]string {
- return map[string]string{
- "health": "ok",
- "servertype": t.String(),
- }
-}
-
-////////////////////////////////////////
-// DirOrObject methods
-
-// Remove removes this thing.
-func (t *thing) Remove(ctx ipc.ServerContext) error {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return err
- }
- path := storage.ParsePath(t.name)
- if len(path) == 0 {
- return errCannotRemoveRootDir
- }
- return t.obj.Remove(ctx.RemoteID(), tx)
-}
-
-// Query returns a sequence of objects that match the given query.
-func (t *thing) Query(ctx ipc.ServerContext, q query.Query, stream store.DirOrObjectServiceQueryStream) error {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return err
- }
- it, err := t.obj.Query(ctx.RemoteID(), tx, q)
- if err != nil {
- return err
- }
- for it.Next() {
- if err := stream.SendStream().Send(*it.Get()); err != nil {
- it.Abort()
- return err
- }
- }
- return it.Err()
-}
-
-// Stat returns information about this thing.
-func (t *thing) Stat(ctx ipc.ServerContext) (storage.Stat, error) {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return nullStat, err
- }
- s, err := t.obj.Stat(ctx.RemoteID(), tx)
- if err != nil {
- return nullStat, err
- }
- // Determine the Kind.
- entry, err := t.obj.Get(ctx.RemoteID(), tx)
- if err != nil {
- // TODO(sadovsky): Is this the right thing to return here? If obj.Get()
- // returns state.errNotFound, it probably makes more sense to return
- // nullStat and nil error. (Note, for now t.obj.Stat() is not implemented
- // and always returns an error, so we never actually get here.)
- return nullStat, err
- }
- if isDir(entry) {
- s.Kind = storage.DirKind
- } else {
- s.Kind = storage.ObjectKind
- }
- return *s, err
-}
-
-// Exists returns true iff this thing is present in the store.
-func (t *thing) Exists(ctx ipc.ServerContext) (bool, error) {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return false, err
- }
- return t.obj.Exists(ctx.RemoteID(), tx)
-}
-
-// NewTransaction creates a transaction with the given options. It returns the
-// name of the transaction relative to this thing's name.
-func (t *thing) NewTransaction(ctx ipc.ServerContext, opts []vdlutil.Any) (string, error) {
- if t.tid != nullTransactionID {
- return "", errNestedTransaction
- }
- return t.server.createTransaction(ctx, t.name)
-}
-
-// Glob streams a series of names that match the given pattern.
-func (t *thing) Glob(ctx ipc.ServerContext, pattern string, stream mounttable.GlobbableServiceGlobStream) error {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return err
- }
- it, err := t.obj.Glob(ctx.RemoteID(), tx, pattern)
- if err != nil {
- return err
- }
- gsa := &globStreamAdapter{stream}
- for ; it.IsValid(); it.Next() {
- if err := gsa.SendStream().Send(it.Name()); err != nil {
- return err
- }
- }
- return nil
-}
-
-// WatchGlob returns a stream of changes that match a pattern.
-func (t *thing) WatchGlob(ctx ipc.ServerContext, req watchtypes.GlobRequest, stream watch.GlobWatcherServiceWatchGlobStream) error {
- return t.server.watcher.WatchGlob(ctx, storage.ParsePath(t.name), req, stream)
-}
-
-// WatchQuery returns a stream of changes that satisfy a query.
-func (t *thing) WatchQuery(ctx ipc.ServerContext, req watchtypes.QueryRequest, stream watch.QueryWatcherServiceWatchQueryStream) error {
- return t.server.watcher.WatchQuery(ctx, storage.ParsePath(t.name), req, stream)
-}
-
-////////////////////////////////////////
-// Dir-only methods
-
-// Called by Make(), and also called directly by server.go with a nil
-// transaction to create the root directory.
-func (t *thing) makeInternal(remoteID security.PublicID, tx *memstore.Transaction) error {
- // Make dirs from the top down. Return error if we encounter an Object.
- parts := storage.PathName{""}
- parts = append(parts, storage.ParsePath(t.name)...)
- // Set to true once we encounter a path component that doesn't already
- // exist. Create new dirs from there on down.
- newTree := false
- for i, _ := range parts {
- obj := t.server.store.Bind(parts[:i+1].String())
- if !newTree {
- // Note, obj.Get() returns state.errNotFound if the entry does not exist.
- // TODO(sadovsky): Check for that specific error type and propagate all
- // other errors.
- entry, err := obj.Get(remoteID, tx)
- if err != nil {
- newTree = true
- } else if isObject(entry) {
- return errMakeDirObjectExists
- }
- }
- if newTree {
- obj.Put(remoteID, tx, dirValue)
- }
- }
- return nil
-}
-
-func (t *thing) Make(ctx ipc.ServerContext) error {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return err
- }
- return t.makeInternal(ctx.RemoteID(), tx)
-}
-
-////////////////////////////////////////
-// Object-only methods
-
-// Get returns the value for the Object. The value returned is from the
-// most recent mutation of the entry in the Transaction, or from the
-// Transaction's snapshot if there is no mutation.
-func (t *thing) Get(ctx ipc.ServerContext) (storage.Entry, error) {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return nullEntry, err
- }
- // Note, obj.Get() returns state.errNotFound if the entry does not exist.
- entry, err := t.obj.Get(ctx.RemoteID(), tx)
- if err != nil {
- return nullEntry, err
- }
- if isDir(entry) {
- return nullEntry, errCalledObjectMethodOnDir
- }
- return *entry, err
-}
-
-// Put modifies the value of the Object.
-func (t *thing) Put(ctx ipc.ServerContext, val vdlutil.Any) (storage.Stat, error) {
- tx, err := t.server.findTransaction(ctx, t.tid)
- if err != nil {
- return nullStat, err
- }
- // Verify that this entry either doesn't exist or exists and is an Object.
- // Note, obj.Get() returns state.errNotFound if the entry does not exist.
- // TODO(sadovsky): Check for that specific error type and propagate all
- // other errors.
- entry, err := t.obj.Get(ctx.RemoteID(), tx)
- if err == nil && isDir(entry) {
- return nullStat, errCalledObjectMethodOnDir
- }
- // Verify that the parent already exists and is a Dir.
- // Note, at this point we know t.name isn't the root of the store, b/c if it
- // were, the check above would've failed.
- path := storage.ParsePath(t.name)
- path = path[:len(path)-1] // path to parent
- // Note, we don't return an error here if path doesn't exist -- we let the
- // memstore Put() code handle that case.
- entry, err = t.server.store.Bind(path.String()).Get(ctx.RemoteID(), tx)
- if err == nil && isObject(entry) {
- return nullStat, errPutObjectInObject
- }
- s, err := t.obj.Put(ctx.RemoteID(), tx, interface{}(val))
- if err != nil {
- return nullStat, err
- }
- // TODO(sadovsky): Add test for this.
- s.Kind = storage.ObjectKind
- return *s, err
-}
-
-////////////////////////////////////////
-// Transaction methods
-
-func (t *thing) Commit(ctx ipc.ServerContext) error {
- return t.server.commitTransaction(ctx, t.tid)
-}
-
-func (t *thing) Abort(ctx ipc.ServerContext) error {
- return t.server.abortTransaction(ctx, t.tid)
-}
-
-////////////////////////////////////////
-// SyncGroup methods.
-// See veyron2/services/store/service.vdl for detailed comments.
-// TODO(hpucha): Actually implement these methods.
-
-// GetSyncGroupNames returns the global names of all SyncGroups attached
-// to this directory.
-func (t *thing) GetSyncGroupNames(ctx ipc.ServerContext) (names []string, err error) {
- return nil, verror.Internalf("GetSyncGroupNames not yet implemented")
-}
-
-// CreateSyncGroup creates a new SyncGroup.
-func (t *thing) CreateSyncGroup(ctx ipc.ServerContext, name string, config store.SyncGroupConfig) error {
- // One approach is that all syncgroup operations are
- // serialized with a single lock. This enables easily checking
- // nesting of syncgroups.
-
- // sgop.Lock(), defer sgop.Unlock()
- // acl check
- // sanity checks
- // sgname in the dir does not exist
- // syncgroup is not nested
- // call syncd
- // if err != nil return err
- // update dir->sgname mapping
-
- return verror.Internalf("CreateSyncGroup not yet implemented")
-}
-
-// JoinSyncGroup joins a SyncGroup with the specified global Veyron name.
-func (t *thing) JoinSyncGroup(ctx ipc.ServerContext, name string) error {
- // sgop.Lock(), defer sgop.Unlock()
- // acl check (parentdir or dir)
- // sanity checks
- // if dir exists
- // sgname in the dir does not exist
- // syncgroup is not nested
- // call syncd
- // if err != nil return err
- // if err == nil && rootoid does not exist && dir does not exist
- // mkdir(rootoid) and setacl
- // update dir->sgname mapping
- // if err == nil && rootoid exists && dir exists && oid(dir) == rootoid
- // update dir->sgname mapping
- // All other cases are error, call syncd.leave(sg) and return err
-
- return verror.Internalf("JoinSyncGroup not yet implemented")
-}
-
-// LeaveSyncGroup leaves the SyncGroup.
-func (t *thing) LeaveSyncGroup(ctx ipc.ServerContext, name string) error {
- // sgop.Lock(), defer sgop.Unlock()
- // acl check
- // sanity checks (sgname in the dir exists)
- // call syncd
- // if err != nil return err
- // update dir->sgname mapping
- // if sglist in dir is empty, rewrite oids.
-
- return verror.Internalf("LeaveSyncGroup not yet implemented")
-}
-
-// DestroySyncGroup destroys the SyncGroup.
-func (t *thing) DestroySyncGroup(ctx ipc.ServerContext, name string) error {
- return verror.Internalf("DestroySyncGroup not yet implemented")
-}
-
-// EjectFromSyncGroup ejects a member from the SyncGroup.
-func (t *thing) EjectFromSyncGroup(ctx ipc.ServerContext, name, member string) error {
- return verror.Internalf("EjectFromSyncGroup not yet implemented")
-}
-
-// GetSyncGroupConfig gets the config info of the SyncGroup.
-func (t *thing) GetSyncGroupConfig(ctx ipc.ServerContext, name string) (config store.SyncGroupConfig, eTag string, err error) {
- return store.SyncGroupConfig{}, "", verror.Internalf("GetSyncGroupConfig not yet implemented")
-}
-
-// SetSyncGroupConfig sets the config info of the SyncGroup.
-func (t *thing) SetSyncGroupConfig(ctx ipc.ServerContext, name string, config store.SyncGroupConfig, eTag string) error {
- return verror.Internalf("SetSyncGroupConfig not yet implemented")
-}
-
-// GetMembersOfSyncGroup gets the Veyron names of the Stores that joined
-// this SyncGroup.
-func (t *thing) GetMembersOfSyncGroup(ctx ipc.ServerContext, name string) ([]string, error) {
- return nil, verror.Internalf("GetMembersOfSyncGroup not yet implemented")
-}
-
-////////////////////////////////////////
-// Internals
-
-type globStreamSenderAdapter struct {
- stream interface {
- Send(entry mttypes.MountEntry) error
- }
-}
-
-func (a *globStreamSenderAdapter) Send(item string) error {
- return a.stream.Send(mttypes.MountEntry{Name: item})
-}
-
-type globStreamAdapter struct {
- stream mounttable.GlobbableServiceGlobStream
-}
-
-func (a *globStreamAdapter) SendStream() interface {
- Send(item string) error
-} {
- return &globStreamSenderAdapter{a.stream.SendStream()}
-}
diff --git a/services/store/stored/main.go b/services/store/stored/main.go
deleted file mode 100644
index 9c19912..0000000
--- a/services/store/stored/main.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// stored is a storage server.
-//
-// Usage:
-//
-// stored [--name=<mount>] [--db=<dbName>]
-//
-// - <name> is the Veyron mount point name, default /global/vstore/<hostname>/<username>.
-// - <dbName> is the filename in which to store the data.
-//
-// The store service has Object name, <name>/.store.
-// The raw store service has Object name, <name>/.store.raw.
-// Individual values with path <path> have name <name>/<path>.
-package main
-
-import (
- "flag"
- "log"
- "os"
- "os/user"
-
- vflag "veyron/security/flag"
- "veyron/services/store/server"
-
- "veyron2/rt"
-
- _ "veyron/services/store/typeregistryhack"
-)
-
-var (
- mountName string
-
- // TODO(rthellend): Remove the protocol and address flags when the config
- // manager is working.
- protocol = flag.String("protocol", "tcp", "protocol to listen on")
- address = flag.String("address", ":0", "address to listen on")
-
- dbName = flag.String("db", "/var/tmp/veyron_store.db",
- "Metadata database")
- viewerPort = flag.Int("viewerPort", 5000,
- "IPV4 port to serve viewer from, or 0 to disable viewer")
-)
-
-func init() {
- username := "unknown"
- if u, err := user.Current(); err == nil {
- username = u.Username
- }
- hostname := "unknown"
- if h, err := os.Hostname(); err == nil {
- hostname = h
- }
- dir := "global/vstore/" + hostname + "/" + username
- flag.StringVar(&mountName, "name", dir, "Mount point for media")
-}
-
-// main starts the store service, taking args from command line flags.
-func main() {
- r := rt.Init()
-
- // Create a new server instance.
- s, err := r.NewServer()
- if err != nil {
- log.Fatal("r.NewServer() failed: ", err)
- }
-
- // Create a new StoreService.
- storeService, err := server.New(
- server.ServerConfig{Admin: r.Identity().PublicID(), DBName: *dbName})
- if err != nil {
- log.Fatal("server.New() failed: ", err)
- }
- defer storeService.Close()
-
- // Create the authorizer.
- auth := vflag.NewAuthorizerOrDie()
-
- // Register the services.
- storeDisp := server.NewStoreDispatcher(storeService, auth)
- // Create an endpoint and start listening.
- ep, err := s.Listen(*protocol, *address)
- if err != nil {
- log.Fatal("s.Listen() failed: ", err)
- }
- // Publish the service in the mount table.
- log.Printf("Mounting store on %s, endpoint /%s", mountName, ep)
- if err := s.Serve(mountName, storeDisp); err != nil {
- log.Fatal("s.Serve() failed: ", err)
- }
-
- // Run viewer if requested.
- if *viewerPort > 0 {
- // TODO(kash): Port the viewer to the new dir/object store api.
- //go viewer.ListenAndServe(r, fmt.Sprintf(":%d", *viewerPort), mountName, vstore.New())
- }
-
- // Wait forever.
- done := make(chan struct{})
- <-done
-}
diff --git a/services/store/testutil/store.go b/services/store/testutil/store.go
deleted file mode 100644
index 9190f09..0000000
--- a/services/store/testutil/store.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package testutil
-
-import (
- "crypto/rand"
- "io/ioutil"
- "os"
- "testing"
-
- istore "veyron/services/store/server"
- _ "veyron/services/store/typeregistryhack"
-
- "veyron2/ipc"
- "veyron2/naming"
- "veyron2/security"
-)
-
-// NewStore creates a new testing instance of the store server and returns
-// an object name that identifies the instance and a closure that can
-// be used to terminate the instance and clean up.
-func NewStore(t *testing.T, server ipc.Server, id security.PublicID) (string, func()) {
- // Create a temporary directory for the store server.
- prefix := "vstore-test-db"
- dbName, err := ioutil.TempDir("", prefix)
- if err != nil {
- t.Fatalf("TempDir(%v, %v) failed: %v", "", prefix, err)
- }
-
- // Create a new StoreService.
- config := istore.ServerConfig{Admin: id, DBName: dbName}
- storeService, err := istore.New(config)
- if err != nil {
- t.Fatalf("New(%v) failed: %v", config, err)
- }
-
- var buf [16]byte
- if _, err := rand.Read(buf[:]); err != nil {
- t.Fatalf("rand.Read() failed: %v", err)
- }
-
- // Register the services.
- storeDispatcher := istore.NewStoreDispatcher(storeService, nil)
- if err := server.Serve("", storeDispatcher); err != nil {
- t.Fatalf("Register(%v) failed: %v", storeDispatcher, err)
- }
-
- // Create an endpoint and start listening.
- protocol, hostname := "tcp", "127.0.0.1:0"
- ep, err := server.Listen(protocol, hostname)
- if err != nil {
- t.Fatalf("Listen(%v, %v) failed: %v", protocol, hostname, err)
- }
-
- name := naming.JoinAddressName(ep.String(), "")
-
- // Create a closure that cleans things up.
- cleanup := func() {
- server.Stop()
- os.RemoveAll(dbName)
- }
-
- return name, cleanup
-}
diff --git a/services/store/typeregistryhack/init.go b/services/store/typeregistryhack/init.go
deleted file mode 100644
index bea5d02..0000000
--- a/services/store/typeregistryhack/init.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Package typeregistryhack registers types that client send to the store, for
-// VOM cannot decode objects of unknown type.
-//
-// TODO(tilaks): use val.Value to decode unknown types.
-package typeregistryhack
-
-import (
- // Register boxes types.
- "veyron/examples/boxes"
- // Register mdb types.
- _ "veyron/examples/mdb/schema"
- // Register todos types.
- _ "veyron/examples/todos/schema"
- // Register bank types.
- _ "veyron/examples/bank/schema"
- // Register stfortune types.
- _ "veyron/examples/stfortune/schema"
- // Register profile types.
- "veyron/services/mgmt/profile"
- // Register application types.
- "veyron2/services/mgmt/application"
- // Register build types.
- _ "veyron2/services/mgmt/build"
-
- "veyron2/vom"
-)
-
-func init() {
- // Register profile types.
- vom.Register(&struct{}{}) // directories have type struct{}.
- vom.Register(&profile.Specification{})
- vom.Register(&application.Envelope{})
- vom.Register(&boxes.Box{})
-}
diff --git a/services/store/viewer/entry.go b/services/store/viewer/entry.go
deleted file mode 100644
index a5e3991..0000000
--- a/services/store/viewer/entry.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// TODO(kash): Rewrite this to use the new dir/object store api.
-// +build ignore
-
-package viewer
-
-import (
- "path/filepath"
- "sort"
- "time"
-
- "veyron2/context"
- "veyron2/naming"
- "veyron2/storage"
-)
-
-// Entry is the type used to pass store values to user-defined templates.
-type Entry struct {
- ctx context.T
- storeRoot string
- store storage.Store
- Name string
- Value interface{}
-}
-
-// EntryForRawTemplate is the type used to pass store values to the "raw"
-// template defined in viewer.go.
-type EntryForRawTemplate struct {
- *Entry
- Subdirs []string // relative names
- RawSubdirs bool // whether to add "?raw" to subdir hrefs
-}
-
-// abspath returns the absolute path from a path relative to this value.
-func (e *Entry) abspath(path string) string {
- return naming.Join(e.storeRoot, e.Name, path)
-}
-
-// Date performs a Time conversion, given an integer argument that represents a
-// time in nanoseconds since the Unix epoch.
-func (e *Entry) Date(ns int64) time.Time {
- return time.Unix(0, ns)
-}
-
-// Join joins the path elements.
-func (e *Entry) Join(elem ...string) string {
- return filepath.ToSlash(filepath.Join(elem...))
-}
-
-// Base returns the last element of the path.
-func (e *Entry) Base(path string) string {
- return filepath.Base(path)
-}
-
-// Glob performs a glob expansion of the pattern. The results are sorted.
-func (e *Entry) Glob(pattern string) ([]string, error) {
- results := e.store.Bind(e.abspath("")).Glob(e.ctx, pattern)
- names := []string{}
- rStream := results.RecvStream()
- for rStream.Advance() {
- names = append(names, rStream.Value())
- }
- if err := rStream.Err(); err != nil {
- return nil, err
- }
- sort.Strings(names)
- return names, nil
-}
-
-// Get fetches a value from the store, where path is relative to this value.
-// The result is nil if the value does not exist.
-func (e *Entry) Get(path string) interface{} {
- en, err := e.store.Bind(e.abspath(path)).Get(e.ctx)
- if err != nil {
- return nil
- }
- return en.Value
-}
diff --git a/services/store/viewer/reflect.go b/services/store/viewer/reflect.go
deleted file mode 100644
index c7b4359..0000000
--- a/services/store/viewer/reflect.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package viewer
-
-import (
- "bytes"
- "fmt"
- "reflect"
-
- "veyron2/storage"
-)
-
-// printer defines a pretty printer for Go values, using reflection to traverse
-// the values.
-type printer struct {
- buf bytes.Buffer
-}
-
-var (
- tyID = reflect.TypeOf(storage.ID{})
-)
-
-// stringTypePointers removes the outer Ptr and Interface types.
-func stripTypePointers(ty reflect.Type) reflect.Type {
- kind := ty.Kind()
- for kind == reflect.Ptr || kind == reflect.Interface {
- ty = ty.Elem()
- kind = ty.Kind()
- }
- return ty
-}
-
-// templatePath returns the path to the templates value, defined as
-// /templates/<pkgPath>/<typeName>.
-func templatePath(v interface{}) string {
- ty := stripTypePointers(reflect.TypeOf(v))
- pkgPath := ty.PkgPath()
- tyName := ty.Name()
- if pkgPath == "" || tyName == "" {
- return ""
- }
- return fmt.Sprintf("templates/%s/%s", pkgPath, tyName)
-}
-
-// print formats the argument.
-func (p *printer) print(v interface{}) {
- p.printValue(0, reflect.ValueOf(v))
-}
-
-// printType formats the type.
-func (p *printer) printType(indent int, v reflect.Type) {
- p.printString(v.Name())
-}
-
-// printValue is the main pretty-printer method. It formats the value at the
-// indentation level specified by the argument.
-func (p *printer) printValue(indent int, v reflect.Value) {
- if !v.IsValid() {
- p.printString("<invalid>")
- return
- }
- ty := v.Type()
- if ty == tyID {
- p.printString(v.Interface().(storage.ID).String())
- return
- }
- switch ty.Kind() {
- case reflect.Ptr:
- p.printString("&")
- fallthrough
- case reflect.Interface:
- p.printValue(indent, v.Elem())
- case reflect.Array, reflect.Slice:
- p.printSliceValue(indent, v)
- case reflect.Map:
- p.printMapValue(indent, v)
- case reflect.Struct:
- p.printStructValue(indent, v)
- case reflect.String:
- fmt.Fprintf(&p.buf, "%q", v.Interface())
- default:
- fmt.Fprintf(&p.buf, "%+v", v.Interface())
- }
-}
-
-// printSliceValue formats a slice or array.
-func (p *printer) printSliceValue(indent int, v reflect.Value) {
- p.printType(indent, v.Type())
- p.printString("{")
- l := v.Len()
- for i := 0; i < l; i++ {
- p.printIndent(indent + 1)
- p.printValue(indent+1, v.Index(i))
- p.printString(",")
- }
- p.printIndent(indent)
- p.printString("}")
-}
-
-// printMapValue formats a map.
-func (p *printer) printMapValue(indent int, v reflect.Value) {
- p.printType(indent, v.Type())
- p.printString("{")
- for _, key := range v.MapKeys() {
- p.printIndent(indent + 1)
- p.printValue(indent+1, key)
- p.printString(": ")
- p.printValue(indent+2, v.MapIndex(key))
- p.printString(",")
- }
- p.printIndent(indent)
- p.printString("}")
-}
-
-// printStructValue formats a struct.
-func (p *printer) printStructValue(indent int, v reflect.Value) {
- ty := v.Type()
- p.printType(indent, ty)
- p.printString("{")
- l := ty.NumField()
- for i := 0; i < l; i++ {
- field := ty.Field(i)
- if field.PkgPath != "" {
- continue
- }
- p.printIndent(indent + 1)
- p.printString(field.Name)
- p.printString(": ")
- p.printValue(indent+1, v.Field(i))
- p.printString(",")
- }
- p.printIndent(indent)
- p.printString("}")
-}
-
-// printString adds a string to the output.
-func (p *printer) printString(s string) {
- p.buf.WriteString(s)
-}
-
-// printIndent prints a newline and then indents to the specified indentation.
-func (p *printer) printIndent(indent int) {
- p.buf.WriteRune('\n')
- for i := 0; i < indent; i++ {
- p.buf.WriteString(" ")
- }
-}
-
-// String returns the formatted text.
-func (p *printer) String() string {
- return p.buf.String()
-}
diff --git a/services/store/viewer/viewer.go b/services/store/viewer/viewer.go
deleted file mode 100644
index dd222c9..0000000
--- a/services/store/viewer/viewer.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// TODO(kash): Rewrite this to use the new dir/object store api.
-// +build ignore
-
-// package viewer exports a store through an HTTP server, with the following
-// features.
-//
-// URL paths correspond to store paths. For example, if the URL is
-// http://myhost/a/b/c, the store value that is fetched is /a/b/c.
-//
-// Values can be formatted using html templates (documented in the
-// text/templates package). Templates are named according to the type of the
-// value that they format, using a path /templates/<pkgPath>/<typeName>. For
-// example, suppose we are viewing the page for /movies/Inception, and it
-// contains a value of type *examples/store/mdb/schema.Movie. We fetch the
-// template /templates/examples/store/mdb/schema/Movie, which must be a string
-// in html/template format. If it exists, the template is compiled and used to
-// print the value. If the template does not exist, the value is formatted in
-// raw form.
-//
-// String values that have a path ending with suffix .css are printed in raw
-// form.
-package viewer
-
-import (
- "fmt"
- "html"
- "html/template"
- "io"
- "net/http"
- "path/filepath"
-
- "veyron2"
- "veyron2/context"
- "veyron2/naming"
- "veyron2/storage"
- "veyron2/vlog"
-)
-
-// server is the HTTP server handler.
-type server struct {
- storeRoot string
- store storage.Store
- runtime veyron2.Runtime
-}
-
-var _ http.Handler = (*server)(nil)
-
-const (
- // rawTemplateText is used to format the output in a raw textual form.
- rawTemplateText = `<!DOCTYPE html>
-<html>
-{{$entry := .}}
-{{$prefix := $entry.Name}}
-{{$rawSubdirs := $entry.RawSubdirs}}
-<head>
-<title>{{.Name}}</title>
-</head>
-<body>
-<h1>{{.Name}}</h1>
-<pre>{{.Value}}</pre>
-{{with .Subdirs}}
-<h3>Subdirectories</h3>
-{{range .}}
-{{$name := $entry.Join $prefix .}}
-<p><a href="{{$name}}{{if $rawSubdirs}}?raw{{end}}">{{.}}</a></p>
-{{end}}
-{{end}}
-</body>
-</html>`
-)
-
-var (
- rawTemplate = mustParse("raw", rawTemplateText)
-)
-
-// mustParse parses the template text. It panics on error.
-func mustParse(name, text string) *template.Template {
- tmpl, err := template.New(name).Parse(text)
- if err != nil {
- panic(fmt.Sprintf("Error parsing template %q: %s", text, err))
- }
- return tmpl
-}
-
-// abspath returns the absolute path from a path relative to the store root.
-func (s *server) abspath(path string) string {
- return naming.Join(s.storeRoot, path)
-}
-
-// loadTemplate fetches the template for the value from the store. The template
-// is based on the type of the value, under /template/<pkgPath>/<typeName>.
-func (s *server) loadTemplate(ctx context.T, v interface{}) *template.Template {
- path := templatePath(v)
- en, err := s.store.Bind(s.abspath(path)).Get(ctx)
- if err != nil {
- return nil
- }
- str, ok := en.Value.(string)
- if !ok {
- return nil
- }
- tmpl, err := template.New(path).Parse(str)
- if err != nil {
- vlog.Infof("Template error: %s: %s", path, err)
- return nil
- }
- return tmpl
-}
-
-// printRawValuePage prints the value in raw format.
-func (s *server) printRawValuePage(ctx context.T, w http.ResponseWriter, path string, v interface{}, rawSubdirs bool) {
- var p printer
- p.print(v)
- x := &EntryForRawTemplate{&Entry{ctx: ctx, storeRoot: s.storeRoot, store: s.store, Name: path, Value: p.String()}, []string{}, rawSubdirs}
- x.Subdirs, _ = x.Glob("*")
- if err := rawTemplate.Execute(w, x); err != nil {
- w.Write([]byte(html.EscapeString(err.Error())))
- }
-}
-
-// printValuePage prints the value using a template if possible. If a template
-// is not found, the value is printed in raw format instead.
-func (s *server) printValuePage(ctx context.T, w http.ResponseWriter, path string, v interface{}) {
- if tmpl := s.loadTemplate(ctx, v); tmpl != nil {
- x := &Entry{ctx: ctx, storeRoot: s.storeRoot, store: s.store, Name: path, Value: v}
- if err := tmpl.Execute(w, x); err != nil {
- w.Write([]byte(html.EscapeString(err.Error())))
- }
- return
- }
- s.printRawValuePage(ctx, w, path, v, false)
-}
-
-// printRawPage prints a string value directly, without processing.
-func (s *server) printRawPage(w http.ResponseWriter, v interface{}) {
- str, ok := v.(string)
- if !ok {
- fmt.Fprintf(w, "%s", v)
- } else {
- io.WriteString(w, str)
- }
-}
-
-// ServeHTTP is the main HTTP handler.
-func (s *server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- path := req.URL.Path
- ctx := s.runtime.NewContext()
- en, err := s.store.Bind(s.abspath(path)).Get(ctx)
- if err != nil {
- msg := fmt.Sprintf(
- "<html><body><h1>%s</h1><h2>Error: %s</h2></body></html>",
- html.EscapeString(path),
- html.EscapeString(err.Error()))
- w.WriteHeader(http.StatusNotFound)
- w.Write([]byte(msg))
- return
- }
-
- q := req.URL.Query()
- switch filepath.Ext(path) {
- case ".css":
- w.Header().Set("Content-Type", "text/css; charset=utf-8")
- s.printRawPage(w, en.Value)
- default:
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- if q["raw"] != nil {
- s.printRawValuePage(ctx, w, path, en.Value, true)
- } else {
- s.printValuePage(ctx, w, path, en.Value)
- }
- }
-}
-
-// ListenAndServe is the main entry point. It serves store at the specified
-// network address.
-func ListenAndServe(runtime veyron2.Runtime, addr string, storeRoot string, st storage.Store) error {
- s := &server{storeRoot: storeRoot, store: st, runtime: runtime}
- vlog.Infof("Viewer running at http://localhost%s", addr)
- return http.ListenAndServe(addr, s)
-}
diff --git a/services/syncgroup/id.go b/services/syncgroup/id.go
deleted file mode 100644
index 1c1189a..0000000
--- a/services/syncgroup/id.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package syncgroup
-
-import (
- "encoding/hex"
- "veyron2/storage"
- "veyron2/verror"
-)
-
-// The current implementation of SyncGroupID assumes that an ID is a storage.ID.
-// ID is defined in syncgroup.vdl.
-
-// NewID returns a new ID.
-func NewID() ID {
- return ID(storage.NewID())
-}
-
-// String returns id as a hex string.
-func (id ID) String() string {
- return storage.ID(id).String()
-}
-
-// ParseID returns an ID formed from the string str, which will normally have
-// been created with ID.String().
-func ParseID(str string) (id ID, err error) {
- var tmp []byte
- tmp, err = hex.DecodeString(str)
- if err != nil {
- return id, err
- }
- if len(tmp) == 0 || len(tmp) > len(id) {
- return id, verror.BadProtocolf("string \"%v\" has wrong length to be a syncgroup.ID", str)
- }
- for i := 0; i != len(tmp); i++ {
- id[i] = tmp[i]
- }
- return id, err
-}
-
-// IsValid returns whether id is not the zero id.
-func (id ID) IsValid() bool {
- return storage.ID(id).IsValid()
-}
-
-// CompareIDs compares two IDs lexicographically.
-func CompareIDs(id1 ID, id2 ID) int {
- return storage.CompareIDs(storage.ID(id1), storage.ID(id2))
-}
diff --git a/services/syncgroup/id_test.go b/services/syncgroup/id_test.go
deleted file mode 100644
index 06fff56..0000000
--- a/services/syncgroup/id_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package syncgroup
-
-// This file tests syncgroup/id.go
-
-import (
- "testing"
-)
-
-// signum returns -1, 0, or 1 according to whether its argument is -ve, 0, or
-// +ve, respectively.
-func signum(x int) (result int) {
- if x < 0 {
- result = -1
- } else if x > 0 {
- result = 1
- }
- return result
-}
-
-// NewID is tested as a side-effect of the other routines.
-
-// TestIsValid tests IsValid().
-func TestIsValid(t *testing.T) {
- var zeroID ID
- var nonZeroID ID = NewID()
-
- if zeroID.IsValid() {
- t.Errorf("IsValid(%v) == true", zeroID)
- }
- if !nonZeroID.IsValid() {
- t.Errorf("IsValid(%v) == false", nonZeroID)
- }
-}
-
-// TestCompareIDs tests CompareIDs().
-func TestCompareIDs(t *testing.T) {
- ids := []ID{ID{}, NewID(), NewID()}
- for i := 0; i != len(ids); i++ {
- for j := 0; j != len(ids); j++ {
- if (i == j) != (CompareIDs(ids[i], ids[j]) == 0) {
- t.Errorf("(CompareIDs(%v, %v) == 0) == %v, expected %v",
- ids[i], ids[j],
- CompareIDs(ids[i], ids[j]) == 0,
- i == j)
- }
- if signum(CompareIDs(ids[i], ids[j])) !=
- -signum(CompareIDs(ids[j], ids[i])) {
- t.Errorf("(signum(CompareIDs(%v, %v)) == %v != -(%v == signum(CompareIDs(%v, %v)))",
- ids[i], ids[j], signum(CompareIDs(ids[i], ids[j])),
- signum(CompareIDs(ids[j], ids[i])), ids[j], ids[i])
- }
- }
- }
-}
-
-// TestStringAndParseID tests String() and ParseID().
-func TestStringAndParseID(t *testing.T) {
- var lenOfString int
- for i := 0; i != 10; i++ {
- var id ID
- var str string
- if i == 0 { // First time through, use the zero id, and remember the length.
- str = id.String()
- lenOfString = len(str)
- if lenOfString <= 0 {
- t.Errorf("len(id.String()) == %v <= 0", len(str))
- }
- } else { // Subsequently, use a new id.
- id = NewID()
- str = id.String()
- }
- if len(str) != lenOfString {
- t.Errorf("len(id.String()) == %v != %v", len(str), lenOfString)
- }
- var parsedID ID
- var err error
- parsedID, err = ParseID(str)
- if err != nil {
- t.Errorf("ParseID(%v) yields %v", str, err)
- }
- if CompareIDs(id, parsedID) != 0 {
- t.Errorf("ParseID(%v) == %v != %v", str, parsedID, id)
- }
- }
-}
diff --git a/services/syncgroup/syncgroup.vdl b/services/syncgroup/syncgroup.vdl
deleted file mode 100644
index 0531567..0000000
--- a/services/syncgroup/syncgroup.vdl
+++ /dev/null
@@ -1,164 +0,0 @@
-// Package syncgroup provides the means for Stores to set up SyncGroups for
-// subsequent synchronization of objects between them.
-//
-// The intent is that SyncGroup objects are created and administered by
-// SyncGroup servers, even though they are subsequently mirrored among SyncGroup
-// members by the normal synchronization mechanisms.
-//
-// SyncGroupServer also aids in discovering members of a particular SyncGroup.
-// SyncGroupServer maintains the names of joiners for a SyncGroup that are in
-// turn accessible to all members. Each SyncGroup is also associated with a
-// set of mount tables. A Store that joins a SyncGroup must advertise its name
-// (and optionally its SyncGroups) in these mount tables. Stores are expected
-// also optionally to advertise the SyncGroups they join in the local
-// neighbourhood.
-package syncgroup
-
-import "veyron2/security"
-import "veyron2/services/security/access"
-// import "veyron2/services/watch" // Watch call is not yet implemented
-import "veyron2/storage"
-
-// An ID is a globally unique identifier for a SyncGroup.
-type ID storage.ID
-
-// A SyncGroupInfo is the conceptual state of a SyncGroup object.
-type SyncGroupInfo struct {
- Name string // Global Veyron name of object.
- Config SyncGroupConfig // Configuration parameters of this SyncGroup.
- RootOID storage.ID // ID of object at root of SyncGroup's tree.
- ETag string // Version ID for concurrency control.
-
- // The SyncGroup's object ID, which is chosen by the creating SyncGroupServer
- // and is globally unique.
- SGOID ID
-
- // A map from joiner names to the associated metaData for devices that
- // have called Join() or Create() and not subsequently called Leave()
- // or had Eject() called on them. The map returned by the calls below
- // may contain only a subset of joiners if the number is large.
- Joiners map[NameIdentity]JoinerMetaData
-}
-
-// A SyncGroupConfig contains some fields of SyncGroupInfo that
-// are passed at create time, but which can be changed later.
-type SyncGroupConfig struct {
- Desc string // Human readable description.
- PathPatterns []string // Global path patterns.
- Options map[string]any // Options for future evolution.
- ACL security.ACL // The object's ACL.
-
- // Mount tables used to advertise for synchronization.
- // Typically, we will have only one entry. However, an array allows
- // mount tables to be changed over time.
- MountTables []string
-}
-
-// A JoinerMetaData contains the non-name information stored per joiner.
-type JoinerMetaData struct {
- // SyncPriority is a hint to bias the choice of syncing partners.
- // Members of the SyncGroup should choose to synchronize more often
- // with partners with lower values.
- SyncPriority int32
-}
-
-// A NameIdentity gives a Veyron name and identity for a joiner.
-// TODO(m3b): When names include an identity, this should become a single
-// string.
-type NameIdentity struct {
- Name string // Global name of joiner.
- Identity string // Security identity of the joiner.
-}
-
-// SyncGroupServer is the collection of calls on SyncGroup objects at
-// a SyncGroup server. The calls used most often, like Create and Join, are
-// used almost exclusively by the Store. Clients typically call the Store to
-// cause these things to happen.
-//
-// Calls starting with "Set" take an eTag value that may be either empty, or
-// the value of ETag from a recent response to Get(), Watch(), or GetACL().
-type SyncGroupServer interface {
- // Create creates this SyncGroup with the given arguments, and if
- // joiner.Name!="", with {joiner, metaData} in its Joiners map. It is
- // expected that acl will give Read and Write access to any device that
- // the administrator expects to join and sync; if the acl is empty, a
- // default ACL giving access only to the caller is used. On success,
- // Create returns the SyncGroupInfo of the newly created object.
- //
- // Requires: this SyncGroup must not exist;
- // the caller must have write permission at the SyncGroup server;
- // the caller's identity must be a prefix of the SyncGroup's name.
- // Beware that for Create(), the access label is matched against a
- // server-wide ACL; for all other calls the access label is matched
- // against the object's ACL.
- Create(createArgs SyncGroupConfig, rootOID storage.ID,
- joiner NameIdentity, metaData JoinerMetaData) (
- sgInfo SyncGroupInfo, err error) {security.WriteLabel}
-
- // Join adds {joiner, metaData} to the SyncGroup's Joiners map and
- // returns the SyncGroupInfo for this SyncGroup. The act of joining
- // allows other devices to find the caller, which is still required to
- // have read+write access on the SyncGroup to participate in
- // synchronization. A device may call Join again with the same
- // NameIdentity in order to change metaData.
- // For SyncGroups with large numbers of joiners, Join may return
- // a subset of Joiners.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have both read and write permission on the
- // SyncGroup.
- // TODO(m3b): The label should be read and write; can that be expressed?
- Join(joiner NameIdentity, metaData JoinerMetaData) (
- sgInfo SyncGroupInfo, err error) {security.WriteLabel}
-
- // Leave removes the joiner with the given name/identity from the
- // SyncGroup's Joiners map.
- //
- // Requires: this SyncGroup must exist;
- // the caller must assert the identity name.Identity.
- // (Thus, a device that Joined may Leave even if it would no longer
- // have permission to Join() the SyncGroup.)
- Leave(name NameIdentity) error
-
- // Eject is like Leave, but the caller must wield Admin
- // privilege on the group, and need not wield name.Identity.
- //
- // Requires: the SyncGroup must exist;
- // the caller must have admin permission on the SyncGroup.
- Eject(name NameIdentity) error {security.AdminLabel}
-
- // Destroy ejects all devices from the SyncGroup and removes it.
- // Devices that had joined will learn of this when their
- // SyncGroup object disappears.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have admin permission on the SyncGroup.
- Destroy() error {security.AdminLabel}
-
- // Get returns the SyncGroupInfo for this SyncGroup. For SyncGroups
- // with a large number of joiners, Get may return a subset of Joiners.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have read permission on the SyncGroup.
- // TODO(m3b): This call may be removed when Watch is implemented.
- Get() (sgInfo SyncGroupInfo, err error) {security.ReadLabel}
-
- // Watch returns stream of SyncGroupInfo for this SyncGroup as
- // it changes.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have read permission on the SyncGroup.
- // TODO(m3b): This call is not yet implemented.
- // Watch() stream<_, watch.ChangeBatch> error {security.ReadLabel}
-
- // SetConfig sets the Config field of this SyncGroup.
- //
- // Requires: this SyncGroup must exist;
- // if non-empty, the eTag must match the value in the object;
- // the caller must have admin permission on the SyncGroup.
- SetConfig(config SyncGroupConfig, eTag string) error {security.AdminLabel}
-
- // SetACL and GetACL are included from access.Object.
- // TODO(m3b): This inherits AdminLabel for GetACL(); we might prefer ReadLabel.
- access.Object
-}
diff --git a/services/syncgroup/syncgroup.vdl.go b/services/syncgroup/syncgroup.vdl.go
deleted file mode 100644
index 78c38c5..0000000
--- a/services/syncgroup/syncgroup.vdl.go
+++ /dev/null
@@ -1,646 +0,0 @@
-// This file was auto-generated by the veyron vdl tool.
-// Source: syncgroup.vdl
-
-// Package syncgroup provides the means for Stores to set up SyncGroups for
-// subsequent synchronization of objects between them.
-//
-// The intent is that SyncGroup objects are created and administered by
-// SyncGroup servers, even though they are subsequently mirrored among SyncGroup
-// members by the normal synchronization mechanisms.
-//
-// SyncGroupServer also aids in discovering members of a particular SyncGroup.
-// SyncGroupServer maintains the names of joiners for a SyncGroup that are in
-// turn accessible to all members. Each SyncGroup is also associated with a
-// set of mount tables. A Store that joins a SyncGroup must advertise its name
-// (and optionally its SyncGroups) in these mount tables. Stores are expected
-// also optionally to advertise the SyncGroups they join in the local
-// neighbourhood.
-package syncgroup
-
-import (
- "veyron2/security"
-
- "veyron2/services/security/access"
-
- "veyron2/storage"
-
- // The non-user imports are prefixed with "_gen_" to prevent collisions.
- _gen_veyron2 "veyron2"
- _gen_context "veyron2/context"
- _gen_ipc "veyron2/ipc"
- _gen_naming "veyron2/naming"
- _gen_vdlutil "veyron2/vdl/vdlutil"
- _gen_wiretype "veyron2/wiretype"
-)
-
-// An ID is a globally unique identifier for a SyncGroup.
-type ID storage.ID
-
-// A SyncGroupInfo is the conceptual state of a SyncGroup object.
-type SyncGroupInfo struct {
- Name string // Global Veyron name of object.
- Config SyncGroupConfig // Configuration parameters of this SyncGroup.
- RootOID storage.ID // ID of object at root of SyncGroup's tree.
- ETag string // Version ID for concurrency control.
- // The SyncGroup's object ID, which is chosen by the creating SyncGroupServer
- // and is globally unique.
- SGOID ID
- // A map from joiner names to the associated metaData for devices that
- // have called Join() or Create() and not subsequently called Leave()
- // or had Eject() called on them. The map returned by the calls below
- // may contain only a subset of joiners if the number is large.
- Joiners map[NameIdentity]JoinerMetaData
-}
-
-// A SyncGroupConfig contains some fields of SyncGroupInfo that
-// are passed at create time, but which can be changed later.
-type SyncGroupConfig struct {
- Desc string // Human readable description.
- PathPatterns []string // Global path patterns.
- Options map[string]_gen_vdlutil.Any // Options for future evolution.
- ACL security.ACL // The object's ACL.
- // Mount tables used to advertise for synchronization.
- // Typically, we will have only one entry. However, an array allows
- // mount tables to be changed over time.
- MountTables []string
-}
-
-// A JoinerMetaData contains the non-name information stored per joiner.
-type JoinerMetaData struct {
- // SyncPriority is a hint to bias the choice of syncing partners.
- // Members of the SyncGroup should choose to synchronize more often
- // with partners with lower values.
- SyncPriority int32
-}
-
-// A NameIdentity gives a Veyron name and identity for a joiner.
-// TODO(m3b): When names include an identity, this should become a single
-// string.
-type NameIdentity struct {
- Name string // Global name of joiner.
- Identity string // Security identity of the joiner.
-}
-
-// TODO(bprosnitz) Remove this line once signatures are updated to use typevals.
-// It corrects a bug where _gen_wiretype is unused in VDL pacakges where only bootstrap types are used on interfaces.
-const _ = _gen_wiretype.TypeIDInvalid
-
-// SyncGroupServer is the collection of calls on SyncGroup objects at
-// a SyncGroup server. The calls used most often, like Create and Join, are
-// used almost exclusively by the Store. Clients typically call the Store to
-// cause these things to happen.
-//
-// Calls starting with "Set" take an eTag value that may be either empty, or
-// the value of ETag from a recent response to Get(), Watch(), or GetACL().
-// SyncGroupServer is the interface the client binds and uses.
-// SyncGroupServer_ExcludingUniversal is the interface without internal framework-added methods
-// to enable embedding without method collisions. Not to be used directly by clients.
-type SyncGroupServer_ExcludingUniversal interface {
- // Object provides access control for Veyron objects.
- access.Object_ExcludingUniversal
- // Create creates this SyncGroup with the given arguments, and if
- // joiner.Name!="", with {joiner, metaData} in its Joiners map. It is
- // expected that acl will give Read and Write access to any device that
- // the administrator expects to join and sync; if the acl is empty, a
- // default ACL giving access only to the caller is used. On success,
- // Create returns the SyncGroupInfo of the newly created object.
- //
- // Requires: this SyncGroup must not exist;
- // the caller must have write permission at the SyncGroup server;
- // the caller's identity must be a prefix of the SyncGroup's name.
- // Beware that for Create(), the access label is matched against a
- // server-wide ACL; for all other calls the access label is matched
- // against the object's ACL.
- Create(ctx _gen_context.T, createArgs SyncGroupConfig, rootOID storage.ID, joiner NameIdentity, metaData JoinerMetaData, opts ..._gen_ipc.CallOpt) (reply SyncGroupInfo, err error)
- // Join adds {joiner, metaData} to the SyncGroup's Joiners map and
- // returns the SyncGroupInfo for this SyncGroup. The act of joining
- // allows other devices to find the caller, which is still required to
- // have read+write access on the SyncGroup to participate in
- // synchronization. A device may call Join again with the same
- // NameIdentity in order to change metaData.
- // For SyncGroups with large numbers of joiners, Join may return
- // a subset of Joiners.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have both read and write permission on the
- // SyncGroup.
- // TODO(m3b): The label should be read and write; can that be expressed?
- Join(ctx _gen_context.T, joiner NameIdentity, metaData JoinerMetaData, opts ..._gen_ipc.CallOpt) (reply SyncGroupInfo, err error)
- // Leave removes the joiner with the given name/identity from the
- // SyncGroup's Joiners map.
- //
- // Requires: this SyncGroup must exist;
- // the caller must assert the identity name.Identity.
- // (Thus, a device that Joined may Leave even if it would no longer
- // have permission to Join() the SyncGroup.)
- Leave(ctx _gen_context.T, name NameIdentity, opts ..._gen_ipc.CallOpt) (err error)
- // Eject is like Leave, but the caller must wield Admin
- // privilege on the group, and need not wield name.Identity.
- //
- // Requires: the SyncGroup must exist;
- // the caller must have admin permission on the SyncGroup.
- Eject(ctx _gen_context.T, name NameIdentity, opts ..._gen_ipc.CallOpt) (err error)
- // Destroy ejects all devices from the SyncGroup and removes it.
- // Devices that had joined will learn of this when their
- // SyncGroup object disappears.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have admin permission on the SyncGroup.
- Destroy(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (err error)
- // Get returns the SyncGroupInfo for this SyncGroup. For SyncGroups
- // with a large number of joiners, Get may return a subset of Joiners.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have read permission on the SyncGroup.
- // TODO(m3b): This call may be removed when Watch is implemented.
- Get(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply SyncGroupInfo, err error)
- // SetConfig sets the Config field of this SyncGroup.
- //
- // Requires: this SyncGroup must exist;
- // if non-empty, the eTag must match the value in the object;
- // the caller must have admin permission on the SyncGroup.
- SetConfig(ctx _gen_context.T, config SyncGroupConfig, eTag string, opts ..._gen_ipc.CallOpt) (err error)
-}
-type SyncGroupServer interface {
- _gen_ipc.UniversalServiceMethods
- SyncGroupServer_ExcludingUniversal
-}
-
-// SyncGroupServerService is the interface the server implements.
-type SyncGroupServerService interface {
-
- // Object provides access control for Veyron objects.
- access.ObjectService
- // Create creates this SyncGroup with the given arguments, and if
- // joiner.Name!="", with {joiner, metaData} in its Joiners map. It is
- // expected that acl will give Read and Write access to any device that
- // the administrator expects to join and sync; if the acl is empty, a
- // default ACL giving access only to the caller is used. On success,
- // Create returns the SyncGroupInfo of the newly created object.
- //
- // Requires: this SyncGroup must not exist;
- // the caller must have write permission at the SyncGroup server;
- // the caller's identity must be a prefix of the SyncGroup's name.
- // Beware that for Create(), the access label is matched against a
- // server-wide ACL; for all other calls the access label is matched
- // against the object's ACL.
- Create(context _gen_ipc.ServerContext, createArgs SyncGroupConfig, rootOID storage.ID, joiner NameIdentity, metaData JoinerMetaData) (reply SyncGroupInfo, err error)
- // Join adds {joiner, metaData} to the SyncGroup's Joiners map and
- // returns the SyncGroupInfo for this SyncGroup. The act of joining
- // allows other devices to find the caller, which is still required to
- // have read+write access on the SyncGroup to participate in
- // synchronization. A device may call Join again with the same
- // NameIdentity in order to change metaData.
- // For SyncGroups with large numbers of joiners, Join may return
- // a subset of Joiners.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have both read and write permission on the
- // SyncGroup.
- // TODO(m3b): The label should be read and write; can that be expressed?
- Join(context _gen_ipc.ServerContext, joiner NameIdentity, metaData JoinerMetaData) (reply SyncGroupInfo, err error)
- // Leave removes the joiner with the given name/identity from the
- // SyncGroup's Joiners map.
- //
- // Requires: this SyncGroup must exist;
- // the caller must assert the identity name.Identity.
- // (Thus, a device that Joined may Leave even if it would no longer
- // have permission to Join() the SyncGroup.)
- Leave(context _gen_ipc.ServerContext, name NameIdentity) (err error)
- // Eject is like Leave, but the caller must wield Admin
- // privilege on the group, and need not wield name.Identity.
- //
- // Requires: the SyncGroup must exist;
- // the caller must have admin permission on the SyncGroup.
- Eject(context _gen_ipc.ServerContext, name NameIdentity) (err error)
- // Destroy ejects all devices from the SyncGroup and removes it.
- // Devices that had joined will learn of this when their
- // SyncGroup object disappears.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have admin permission on the SyncGroup.
- Destroy(context _gen_ipc.ServerContext) (err error)
- // Get returns the SyncGroupInfo for this SyncGroup. For SyncGroups
- // with a large number of joiners, Get may return a subset of Joiners.
- //
- // Requires: this SyncGroup must exist;
- // the caller must have read permission on the SyncGroup.
- // TODO(m3b): This call may be removed when Watch is implemented.
- Get(context _gen_ipc.ServerContext) (reply SyncGroupInfo, err error)
- // SetConfig sets the Config field of this SyncGroup.
- //
- // Requires: this SyncGroup must exist;
- // if non-empty, the eTag must match the value in the object;
- // the caller must have admin permission on the SyncGroup.
- SetConfig(context _gen_ipc.ServerContext, config SyncGroupConfig, eTag string) (err error)
-}
-
-// BindSyncGroupServer returns the client stub implementing the SyncGroupServer
-// interface.
-//
-// If no _gen_ipc.Client is specified, the default _gen_ipc.Client in the
-// global Runtime is used.
-func BindSyncGroupServer(name string, opts ..._gen_ipc.BindOpt) (SyncGroupServer, error) {
- var client _gen_ipc.Client
- switch len(opts) {
- case 0:
- // Do nothing.
- case 1:
- if clientOpt, ok := opts[0].(_gen_ipc.Client); opts[0] == nil || ok {
- client = clientOpt
- } else {
- return nil, _gen_vdlutil.ErrUnrecognizedOption
- }
- default:
- return nil, _gen_vdlutil.ErrTooManyOptionsToBind
- }
- stub := &clientStubSyncGroupServer{defaultClient: client, name: name}
- stub.Object_ExcludingUniversal, _ = access.BindObject(name, client)
-
- return stub, nil
-}
-
-// NewServerSyncGroupServer creates a new server stub.
-//
-// It takes a regular server implementing the SyncGroupServerService
-// interface, and returns a new server stub.
-func NewServerSyncGroupServer(server SyncGroupServerService) interface{} {
- return &ServerStubSyncGroupServer{
- ServerStubObject: *access.NewServerObject(server).(*access.ServerStubObject),
- service: server,
- }
-}
-
-// clientStubSyncGroupServer implements SyncGroupServer.
-type clientStubSyncGroupServer struct {
- access.Object_ExcludingUniversal
-
- defaultClient _gen_ipc.Client
- name string
-}
-
-func (__gen_c *clientStubSyncGroupServer) client(ctx _gen_context.T) _gen_ipc.Client {
- if __gen_c.defaultClient != nil {
- return __gen_c.defaultClient
- }
- return _gen_veyron2.RuntimeFromContext(ctx).Client()
-}
-
-func (__gen_c *clientStubSyncGroupServer) Create(ctx _gen_context.T, createArgs SyncGroupConfig, rootOID storage.ID, joiner NameIdentity, metaData JoinerMetaData, opts ..._gen_ipc.CallOpt) (reply SyncGroupInfo, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Create", []interface{}{createArgs, rootOID, joiner, metaData}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) Join(ctx _gen_context.T, joiner NameIdentity, metaData JoinerMetaData, opts ..._gen_ipc.CallOpt) (reply SyncGroupInfo, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Join", []interface{}{joiner, metaData}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) Leave(ctx _gen_context.T, name NameIdentity, opts ..._gen_ipc.CallOpt) (err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Leave", []interface{}{name}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) Eject(ctx _gen_context.T, name NameIdentity, opts ..._gen_ipc.CallOpt) (err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Eject", []interface{}{name}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) Destroy(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Destroy", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) Get(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply SyncGroupInfo, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Get", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) SetConfig(ctx _gen_context.T, config SyncGroupConfig, eTag string, opts ..._gen_ipc.CallOpt) (err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "SetConfig", []interface{}{config, eTag}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) UnresolveStep(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply []string, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "UnresolveStep", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) Signature(ctx _gen_context.T, opts ..._gen_ipc.CallOpt) (reply _gen_ipc.ServiceSignature, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "Signature", nil, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-func (__gen_c *clientStubSyncGroupServer) GetMethodTags(ctx _gen_context.T, method string, opts ..._gen_ipc.CallOpt) (reply []interface{}, err error) {
- var call _gen_ipc.Call
- if call, err = __gen_c.client(ctx).StartCall(ctx, __gen_c.name, "GetMethodTags", []interface{}{method}, opts...); err != nil {
- return
- }
- if ierr := call.Finish(&reply, &err); ierr != nil {
- err = ierr
- }
- return
-}
-
-// ServerStubSyncGroupServer wraps a server that implements
-// SyncGroupServerService and provides an object that satisfies
-// the requirements of veyron2/ipc.ReflectInvoker.
-type ServerStubSyncGroupServer struct {
- access.ServerStubObject
-
- service SyncGroupServerService
-}
-
-func (__gen_s *ServerStubSyncGroupServer) GetMethodTags(call _gen_ipc.ServerCall, method string) ([]interface{}, error) {
- // TODO(bprosnitz) GetMethodTags() will be replaces with Signature().
- // Note: This exhibits some weird behavior like returning a nil error if the method isn't found.
- // This will change when it is replaced with Signature().
- if resp, err := __gen_s.ServerStubObject.GetMethodTags(call, method); resp != nil || err != nil {
- return resp, err
- }
- switch method {
- case "Create":
- return []interface{}{security.Label(4)}, nil
- case "Join":
- return []interface{}{security.Label(4)}, nil
- case "Leave":
- return []interface{}{}, nil
- case "Eject":
- return []interface{}{security.Label(8)}, nil
- case "Destroy":
- return []interface{}{security.Label(8)}, nil
- case "Get":
- return []interface{}{security.Label(2)}, nil
- case "SetConfig":
- return []interface{}{security.Label(8)}, nil
- default:
- return nil, nil
- }
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Signature(call _gen_ipc.ServerCall) (_gen_ipc.ServiceSignature, error) {
- result := _gen_ipc.ServiceSignature{Methods: make(map[string]_gen_ipc.MethodSignature)}
- result.Methods["Create"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "createArgs", Type: 72},
- {Name: "rootOID", Type: 74},
- {Name: "joiner", Type: 75},
- {Name: "metaData", Type: 76},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "sgInfo", Type: 79},
- {Name: "err", Type: 80},
- },
- }
- result.Methods["Destroy"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{},
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "", Type: 80},
- },
- }
- result.Methods["Eject"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "name", Type: 75},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "", Type: 80},
- },
- }
- result.Methods["Get"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{},
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "sgInfo", Type: 79},
- {Name: "err", Type: 80},
- },
- }
- result.Methods["Join"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "joiner", Type: 75},
- {Name: "metaData", Type: 76},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "sgInfo", Type: 79},
- {Name: "err", Type: 80},
- },
- }
- result.Methods["Leave"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "name", Type: 75},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "", Type: 80},
- },
- }
- result.Methods["SetConfig"] = _gen_ipc.MethodSignature{
- InArgs: []_gen_ipc.MethodArgument{
- {Name: "config", Type: 72},
- {Name: "eTag", Type: 3},
- },
- OutArgs: []_gen_ipc.MethodArgument{
- {Name: "", Type: 80},
- },
- }
-
- result.TypeDefs = []_gen_vdlutil.Any{
- _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "anydata", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x3, Elem: 0x41, Name: "", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x3, Name: "veyron2/security.BlessingPattern", Tags: []string(nil)}, _gen_wiretype.NamedPrimitiveType{Type: 0x34, Name: "veyron2/security.LabelSet", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x43, Elem: 0x44, Name: "", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x3, Elem: 0x44, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x45, Name: "In"},
- _gen_wiretype.FieldType{Type: 0x46, Name: "NotIn"},
- },
- "veyron2/security.ACL", []string(nil)},
- _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x3, Name: "Desc"},
- _gen_wiretype.FieldType{Type: 0x3d, Name: "PathPatterns"},
- _gen_wiretype.FieldType{Type: 0x42, Name: "Options"},
- _gen_wiretype.FieldType{Type: 0x47, Name: "ACL"},
- _gen_wiretype.FieldType{Type: 0x3d, Name: "MountTables"},
- },
- "veyron/services/syncgroup.SyncGroupConfig", []string(nil)},
- _gen_wiretype.NamedPrimitiveType{Type: 0x32, Name: "byte", Tags: []string(nil)}, _gen_wiretype.ArrayType{Elem: 0x49, Len: 0x10, Name: "veyron2/storage.ID", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
- _gen_wiretype.FieldType{Type: 0x3, Name: "Identity"},
- },
- "veyron/services/syncgroup.NameIdentity", []string(nil)},
- _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x24, Name: "SyncPriority"},
- },
- "veyron/services/syncgroup.JoinerMetaData", []string(nil)},
- _gen_wiretype.ArrayType{Elem: 0x49, Len: 0x10, Name: "veyron/services/syncgroup.ID", Tags: []string(nil)}, _gen_wiretype.MapType{Key: 0x4b, Elem: 0x4c, Name: "", Tags: []string(nil)}, _gen_wiretype.StructType{
- []_gen_wiretype.FieldType{
- _gen_wiretype.FieldType{Type: 0x3, Name: "Name"},
- _gen_wiretype.FieldType{Type: 0x48, Name: "Config"},
- _gen_wiretype.FieldType{Type: 0x4a, Name: "RootOID"},
- _gen_wiretype.FieldType{Type: 0x3, Name: "ETag"},
- _gen_wiretype.FieldType{Type: 0x4d, Name: "SGOID"},
- _gen_wiretype.FieldType{Type: 0x4e, Name: "Joiners"},
- },
- "veyron/services/syncgroup.SyncGroupInfo", []string(nil)},
- _gen_wiretype.NamedPrimitiveType{Type: 0x1, Name: "error", Tags: []string(nil)}}
- var ss _gen_ipc.ServiceSignature
- var firstAdded int
- ss, _ = __gen_s.ServerStubObject.Signature(call)
- firstAdded = len(result.TypeDefs)
- for k, v := range ss.Methods {
- for i, _ := range v.InArgs {
- if v.InArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.InArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- for i, _ := range v.OutArgs {
- if v.OutArgs[i].Type >= _gen_wiretype.TypeIDFirst {
- v.OutArgs[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- if v.InStream >= _gen_wiretype.TypeIDFirst {
- v.InStream += _gen_wiretype.TypeID(firstAdded)
- }
- if v.OutStream >= _gen_wiretype.TypeIDFirst {
- v.OutStream += _gen_wiretype.TypeID(firstAdded)
- }
- result.Methods[k] = v
- }
- //TODO(bprosnitz) combine type definitions from embeded interfaces in a way that doesn't cause duplication.
- for _, d := range ss.TypeDefs {
- switch wt := d.(type) {
- case _gen_wiretype.SliceType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.ArrayType:
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.MapType:
- if wt.Key >= _gen_wiretype.TypeIDFirst {
- wt.Key += _gen_wiretype.TypeID(firstAdded)
- }
- if wt.Elem >= _gen_wiretype.TypeIDFirst {
- wt.Elem += _gen_wiretype.TypeID(firstAdded)
- }
- d = wt
- case _gen_wiretype.StructType:
- for i, fld := range wt.Fields {
- if fld.Type >= _gen_wiretype.TypeIDFirst {
- wt.Fields[i].Type += _gen_wiretype.TypeID(firstAdded)
- }
- }
- d = wt
- // NOTE: other types are missing, but we are upgrading anyways.
- }
- result.TypeDefs = append(result.TypeDefs, d)
- }
-
- return result, nil
-}
-
-func (__gen_s *ServerStubSyncGroupServer) UnresolveStep(call _gen_ipc.ServerCall) (reply []string, err error) {
- if unresolver, ok := __gen_s.service.(_gen_ipc.Unresolver); ok {
- return unresolver.UnresolveStep(call)
- }
- if call.Server() == nil {
- return
- }
- var published []string
- if published, err = call.Server().Published(); err != nil || published == nil {
- return
- }
- reply = make([]string, len(published))
- for i, p := range published {
- reply[i] = _gen_naming.Join(p, call.Name())
- }
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Create(call _gen_ipc.ServerCall, createArgs SyncGroupConfig, rootOID storage.ID, joiner NameIdentity, metaData JoinerMetaData) (reply SyncGroupInfo, err error) {
- reply, err = __gen_s.service.Create(call, createArgs, rootOID, joiner, metaData)
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Join(call _gen_ipc.ServerCall, joiner NameIdentity, metaData JoinerMetaData) (reply SyncGroupInfo, err error) {
- reply, err = __gen_s.service.Join(call, joiner, metaData)
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Leave(call _gen_ipc.ServerCall, name NameIdentity) (err error) {
- err = __gen_s.service.Leave(call, name)
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Eject(call _gen_ipc.ServerCall, name NameIdentity) (err error) {
- err = __gen_s.service.Eject(call, name)
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Destroy(call _gen_ipc.ServerCall) (err error) {
- err = __gen_s.service.Destroy(call)
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) Get(call _gen_ipc.ServerCall) (reply SyncGroupInfo, err error) {
- reply, err = __gen_s.service.Get(call)
- return
-}
-
-func (__gen_s *ServerStubSyncGroupServer) SetConfig(call _gen_ipc.ServerCall, config SyncGroupConfig, eTag string) (err error) {
- err = __gen_s.service.SetConfig(call, config, eTag)
- return
-}
diff --git a/services/wsprd/lib/remove_this.go b/services/wsprd/lib/remove_this.go
index 32be65d..cd02607 100644
--- a/services/wsprd/lib/remove_this.go
+++ b/services/wsprd/lib/remove_this.go
@@ -1,11 +1,11 @@
package lib
import (
+ "veyron.io/store/veyron2/services/store"
+ "veyron.io/store/veyron2/storage"
rps "veyron/examples/rockpaperscissors"
mttypes "veyron2/services/mounttable/types"
- "veyron2/services/store"
watchtypes "veyron2/services/watch/types"
- "veyron2/storage"
"veyron2/vom"
)
diff --git a/tools/qsh/impl/impl.go b/tools/qsh/impl/impl.go
deleted file mode 100644
index d349a92..0000000
--- a/tools/qsh/impl/impl.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package impl
-
-import (
- "fmt"
- "io"
- "os"
- "sort"
-
- "veyron2/context"
- "veyron2/query"
- "veyron2/storage"
-
- // TODO(rjkroege@google.com): Replace with the appropriate vom2 functionality
- // when available.
- _ "veyron/services/store/typeregistryhack"
-)
-
-func indenter(w io.Writer, indent int) {
- for i := 0; i < indent; i++ {
- fmt.Fprintf(w, "\t")
- }
-}
-
-// Prints a single QueryResult to the provided io.Writer.
-func printResult(qr storage.QueryResult, w io.Writer, indent int) {
- // TODO(rjkroege@google.com): Consider permitting the user to provide a Go
- // template to format output.
- if v := qr.Value(); v != nil {
- indenter(w, indent)
- fmt.Fprintf(w, "%s: %#v\n", qr.Name(), v)
- } else {
- // Force fields to be consistently ordered.
- fields := qr.Fields()
- names := make([]string, 0, len(fields))
- for k, _ := range fields {
- names = append(names, k)
- }
- sort.Strings(names)
-
- indenter(w, indent)
- fmt.Fprintf(w, "%s: map[string]interface {}{\n", qr.Name())
- for _, k := range names {
- f := fields[k]
- switch v := f.(type) {
- case storage.QueryStream:
- indenter(w, indent+1)
- fmt.Fprintf(w, "%s: {\n", k)
- printStream(v, w, indent+2)
- indenter(w, indent+1)
- fmt.Fprintf(w, "},\n")
- default:
- indenter(w, indent+1)
- fmt.Fprintf(w, "\"%s\":%#v,\n", k, v)
- }
- }
- indenter(w, indent)
- fmt.Fprintf(w, "},\n")
- }
-}
-
-func printStream(qs storage.QueryStream, w io.Writer, indent int) error {
- for qs.Advance() {
- printResult(qs.Value(), w, indent)
- }
- if err := qs.Err(); err != nil {
- return err
- }
- return nil
-}
-
-func RunQuery(ctx context.T, queryRoot storage.Dir, queryString string) error {
- return printStream(queryRoot.Query(ctx, query.Query{queryString}), os.Stdout, 0)
-}
diff --git a/tools/qsh/impl/impl_test.go b/tools/qsh/impl/impl_test.go
deleted file mode 100644
index 8419b95..0000000
--- a/tools/qsh/impl/impl_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package impl
-
-import (
- "bytes"
- "testing"
-
- "veyron2/storage"
-)
-
-type mockQueryResult struct {
- name string
- value interface{}
- fields map[string]interface{}
-}
-
-func (mqr mockQueryResult) Name() string {
- return mqr.name
-}
-
-func (mqr mockQueryResult) Value() interface{} {
- return mqr.value
-}
-
-func (mqr mockQueryResult) Fields() map[string]interface{} {
- return mqr.fields
-}
-
-type mockQueryStream struct {
- index int
- results []mockQueryResult
- error error
-}
-
-func (mqs *mockQueryStream) Advance() bool {
- if mqs.error != nil {
- return false
- }
- // Initialize index to -1
- mqs.index++
- if mqs.index >= len(mqs.results) {
- return false
- }
- return true
-}
-
-func (mqs *mockQueryStream) Value() storage.QueryResult {
- return mqs.results[mqs.index]
-}
-
-func (mqs *mockQueryStream) Err() error {
- return mqs.error
-}
-
-func (mqs *mockQueryStream) Cancel() {
- mqs.index = len(mqs.results) + 1
-}
-
-type testCase struct {
- result mockQueryResult
- expectedOutput string
-}
-
-const (
- result3Out = `result3: map[string]interface {}{
- qs: {
- resultNested1: 10
- resultNested2: 11
- },
-},
-`
-)
-
-func TestPrintResult(t *testing.T) {
- tests := []testCase{
- {
- result: mockQueryResult{
- name: "result1",
- value: 10,
- fields: nil,
- },
- expectedOutput: "result1: 10\n",
- },
-
- {
- result: mockQueryResult{
- name: "result2",
- value: nil,
- fields: map[string]interface{}{"a": 1, "b": 2},
- },
- expectedOutput: `result2: map[string]interface {}{
- "a":1,
- "b":2,
-},
-`,
- },
-
- {
- result: mockQueryResult{
- name: "result3",
- value: nil,
- fields: map[string]interface{}{
- "qs": storage.QueryStream(&mockQueryStream{
- index: -1,
- error: nil,
- results: []mockQueryResult{
- mockQueryResult{
- name: "resultNested1",
- value: 10,
- fields: nil,
- },
- mockQueryResult{
- name: "resultNested2",
- value: 11,
- fields: nil,
- },
- },
- }),
- },
- },
- expectedOutput: result3Out,
- },
- }
-
- for _, d := range tests {
- var b bytes.Buffer
- printResult(d.result, &b, 0)
-
- if got, want := b.String(), d.expectedOutput; got != want {
- t.Errorf("got <%s>, want <%s>", got, want)
- }
- }
-}
diff --git a/tools/qsh/main.go b/tools/qsh/main.go
deleted file mode 100644
index 2588b5e..0000000
--- a/tools/qsh/main.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package main
-
-import (
- "flag"
- "log"
-
- "veyron/tools/qsh/impl"
-
- "veyron2/rt"
- "veyron2/storage/vstore"
-)
-
-var flagQueryRoot = flag.String("queryroot", "",
- "An object name in the store to serve as the root of the query.")
-
-const usage = `
-Synopsis: qsh --queryroot=<object in the store> query...
-
-Runs a given query starting at the given root.
-`
-
-func main() {
- r := rt.Init()
-
- // TODO(rjkroege@google.com): Handle ^c nicely.
- flag.Parse()
- queryStringArgs := flag.Args()
- if len(queryStringArgs) != 1 {
- log.Fatalf("qsh: Expected only one query arg\n" + usage)
- }
-
- queryRoot := *flagQueryRoot
- if queryRoot == "" {
- log.Fatalf("qsh: No queryroot specified\n" + usage)
- }
-
- err := impl.RunQuery(r.NewContext(), vstore.BindDir(queryRoot), queryStringArgs[0])
- if err != nil {
- log.Printf("qsh: When attempting query: \"%s\" experienced an error: ", queryStringArgs[0], err.Error())
- }
-}