Merge "remove unused gkvlite package from third_party"
diff --git a/go/src/github.com/steveyen/gkvlite/.gitignore b/go/src/github.com/steveyen/gkvlite/.gitignore
deleted file mode 100644
index 747689d..0000000
--- a/go/src/github.com/steveyen/gkvlite/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-#*
-*~
-*.test
-*.tmp
-tmp
-tools/view/view
-tools/slab/slab
diff --git a/go/src/github.com/steveyen/gkvlite/.travis.yml b/go/src/github.com/steveyen/gkvlite/.travis.yml
deleted file mode 100644
index 4f2ee4d..0000000
--- a/go/src/github.com/steveyen/gkvlite/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/go/src/github.com/steveyen/gkvlite/BUILD b/go/src/github.com/steveyen/gkvlite/BUILD
deleted file mode 100644
index 64c3538..0000000
--- a/go/src/github.com/steveyen/gkvlite/BUILD
+++ /dev/null
@@ -1,7 +0,0 @@
-# Description:
-#  Key-value persistence library for Go.
-licenses(["notice"])  # MIT
-
-exports_files(["LICENSE"])
-
-package(default_visibility = ["//visibility:public"])
diff --git a/go/src/github.com/steveyen/gkvlite/LICENSE b/go/src/github.com/steveyen/gkvlite/LICENSE
deleted file mode 100644
index 2665630..0000000
--- a/go/src/github.com/steveyen/gkvlite/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2012 Steve Yen
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/go/src/github.com/steveyen/gkvlite/OWNERS b/go/src/github.com/steveyen/gkvlite/OWNERS
deleted file mode 100644
index 308a342..0000000
--- a/go/src/github.com/steveyen/gkvlite/OWNERS
+++ /dev/null
@@ -1,11 +0,0 @@
-ashankar
-caprita
-cnicolaou
-gauthamt
-hpucha
-jyh
-kash
-p
-rdaoud
-spetrovic
-toddw
diff --git a/go/src/github.com/steveyen/gkvlite/README.google b/go/src/github.com/steveyen/gkvlite/README.google
deleted file mode 100644
index 1260bcb..0000000
--- a/go/src/github.com/steveyen/gkvlite/README.google
+++ /dev/null
@@ -1,11 +0,0 @@
-URL: https://github.com/steveyen/gkvlite/archive/7470afc986957d095ff5a64ce7d5beb2943dfc7e.zip
-Version: 7470afc986957d095ff5a64ce7d5beb2943dfc7e
-License: MIT
-License File: LICENSE
-
-Description:
-gkvlite is a simple, ordered, ACID, key-value persistence library for Go.
-
-Local Modifications:
-- Remove slab package from build (to avoid dependency on the goslab library)
-- Reduce the loop count in TestPersistRefCountRandom() to speed up the test.
diff --git a/go/src/github.com/steveyen/gkvlite/README.md b/go/src/github.com/steveyen/gkvlite/README.md
deleted file mode 100644
index 7989ca5..0000000
--- a/go/src/github.com/steveyen/gkvlite/README.md
+++ /dev/null
@@ -1,313 +0,0 @@
-gkvlite
--------
-
-gkvlite is a simple, ordered, ACID, key-value persistence library for Go.
-
-Overview
-========
-
-gkvlite is a library that provides a simple key-value persistence
-store, inspired by SQLite and CouchDB/Couchstore.
-
-gkvlite has the following features...
-
-* 100% implemented in the Go Language (golang).
-* Open source license - MIT.
-* Keys are ordered, so range scans are supported.
-* On-disk storage for a "Store" is a single file.
-* ACID properties are supported via a simple, append-only,
-  copy-on-write storage design.
-* Concurrent goroutine support (multiple readers, single mutator).
-
-Key concepts
-============
-
-* A Store is held in a single file.
-* A Store can have zero or more Collections.
-* A Collection can have zero or more Items.
-* An Item is a key and value.
-* A key is a []byte, max length 64KB (length is uint16).
-* A value is a []byte, max length 4GB (length is uint32).
-
-ACID properties
-===============
-
-* Atomicity - all unpersisted changes from all Collections during a
-  Store.Flush() will be persisted atomically.
-* Consistency - simple key-value level consistency is supported.
-* Isolation - mutations won't affect concurrent readers or snapshots.
-* Durability - you control when you want to Flush() & fsync
-  so your application can address its performance-vs-safety tradeoffs
-  appropriately.
-
-Performance
-===========
-
-* In general, performance is similar to probabilistic balanced
-  binary tree performance.
-* O(log N) performance for item retrieval, insert, update, delete.
-* O(log N) performance to find the smallest or largest items (by key).
-* Range iteration performance is same as binary tree traversal
-  performance.
-* You can optionally retrieve just keys only, to save I/O & memory
-  resources.
-
-Snapshots
-=========
-
-* Read-only Store snapshots are supported.
-* Mutations on the original Store won't be seen by snapshots.
-* Snapshot creation is a fast O(1) operation per Collection.
-
-Concurrency
-===========
-
-The simplest way to use gkvlite is in single-threaded fashion, such as
-by using a go channel or other application-provided locking to
-serialize access to a Store.
-
-More advanced users may want to use gkvlite's support for concurrent
-goroutines.  The idea is that multiple read-only goroutines, a single
-read-write (mutation) goroutine, and a single persistence (flusher)
-goroutine do not need to block each other.
-
-More specifically, you should have only a single read-write (or
-mutation) goroutine per Store, and should have only a single
-persistence goroutine per Store (doing Flush()'s).  And, you may have
-multiple, concurrent read-only goroutines per Store (doing read-only
-operations like Get()'s, Visit()'s, Snapshot()'s, CopyTo()'s, etc).
-
-A read-only goroutine that performs a long or slow read operation,
-like a Get() that must retrieve from disk or a range scan, will see a
-consistent, isolated view of the collection.  That is, mutations that
-happened after the slow read started will not be seen by the reader.
-
-IMPORTANT: In concurrent usage, the user must provide a StoreFile
-implementation that is concurrent safe.
-
-Note that os.File is not a concurrent safe implementation of the
-StoreFile interface.  You will need to provide your own implementation
-of the StoreFile interface, such as by using a channel to serialize
-StoreFile method invocations.
-
-Finally, advanced users may also use a read-write (mutation) goroutine
-per Collection instead of per Store.  There should only be, though,
-only a single persistence (flusher) goroutine per Store.
-
-Other features
-==============
-
-* In-memory-only mode is supported, where you can use the same API but
-  without any persistence.
-* You provide the os.File - this library just uses the os.File you
-  provide.
-* You provide the os.File.Sync() - if you want to fsync your file,
-  call file.Sync() after you do a Flush().
-* Similar to SQLite's VFS feature, you can supply your own StoreFile
-  interface implementation instead of an actual os.File, for your own
-  advanced testing or I/O interposing needs (e.g., compression,
-  checksums, I/O statistics, caching, enabling concurrency, etc).
-* You can specify your own KeyCompare function.  The default is
-  bytes.Compare().  See also the
-  StoreCallbacks.KeyCompareForCollection() callback function.
-* Collections are written to file sorted by Collection name.  This
-  allows users with advanced concurrency needs to reason about how
-  concurrent flushes interact with concurrent mutations.  For example,
-  if you have a main data collection and a secondary-index collection,
-  with clever collection naming you can know that the main collection
-  will always be "ahead of" the secondary-index collection even with
-  concurrent flushing.
-* A Store can be reverted using the FlushRevert() API to revert the
-  last Flush().  This brings the state of a Store back to where it was
-  as of the next-to-last Flush().  This allows the application to
-  rollback or undo changes on a persisted file.
-* Reverted snapshots (calling FlushRevert() on a snapshot) does not
-  affect (is isolated from) the original Store and does not affect the
-  underlying file.  Calling FlushRevert() on the main Store, however,
-  will adversely affect any active snapshots; where the application
-  should stop using any snapshots that were created before the
-  FlushRevert() invocation on the main Store.
-* To evict O(log N) number of items from memory, call
-  Collection.EvictSomeItems(), which traverses a random tree branch
-  and evicts any clean (already persisted) items found during that
-  traversal.  Eviction means clearing out references to those clean
-  items, which means those items can be candidates for GC.
-* You can control item priority to access hotter items faster by
-  shuffling them closer to the top of balanced binary trees (warning:
-  intricate/advanced tradeoffs here).
-* Tree depth is provided by using the VisitItemsAscendEx() or
-  VisitItemsDescendEx() methods.
-* You can associate transient, ephemeral (non-persisted) data with
-  your items.  If you do use the Item.Transient field, you should use
-  sync/atomic pointer functions for concurrency correctness.
-  In general, Item's should be treated as immutable, except for the
-  Item.Transient field.
-* Application-level Item.Val buffer management is possible via the
-  optional ItemValAddRef/ItemValDecRef() store callbacks, to help
-  reduce garbage memory.  Libraries like github.com/steveyen/go-slab
-  may be helpful here.
-* Errors from file operations are propagated all the way back to your
-  code, so your application can respond appropriately.
-* Tested - "go test" unit tests.
-* Docs - "go doc" documentation.
-
-LICENSE
-=======
-
-Open source - MIT licensed.
-
-Examples
-========
-
-    import (
-        "os"
-        "github.com/steveyen/gkvlite"
-    )
-    
-    f, err := os.Create("/tmp/test.gkvlite")
-    s, err := gkvlite.NewStore(f)
-    c := s.SetCollection("cars", nil)
-    
-    // You can also retrieve the collection, where c == cc.
-    cc := s.GetCollection("cars")
-    
-    // Insert values.
-    c.Set([]byte("tesla"), []byte("$$$"))
-    c.Set([]byte("mercedes"), []byte("$$"))
-    c.Set([]byte("bmw"), []byte("$"))
-    
-    // Replace values.
-    c.Set([]byte("tesla"), []byte("$$$$"))
-    
-    // Retrieve values.
-    mercedesPrice, err := c.Get([]byte("mercedes"))
-    
-    // One of the most priceless vehicles is not in the collection.
-    thisIsNil, err := c.Get([]byte("the-apollo-15-moon-buggy"))
-    
-    // Iterate through items.
-    c.VisitItemsAscend([]byte("ford"), func(i *gkvlite.Item) bool {
-        // This visitor callback will be invoked with every item
-        // with key "ford" and onwards, in key-sorted order.
-        // So: "mercedes", "tesla" are visited, in that ascending order,
-        // but not "bmw".
-        // If we want to stop visiting, return false;
-        // otherwise return true to keep visiting.
-        return true
-    })
-    
-    // Let's get a snapshot.
-    snap := s.Snapshot()
-    snapCars := snap.GetCollection("cars")
-    
-    // The snapshot won't see modifications against the original Store.
-    c.Delete([]byte("mercedes"))
-    mercedesIsNil, err := c.Get([]byte("mercedes"))
-    mercedesPriceFromSnapshot, err := snapCars.Get([]bytes("mercedes"))
-    
-    // Persist all the changes to disk.
-    s.Flush()
-    
-    f.Sync() // Some applications may also want to fsync the underlying file.
-    
-    // Now, other file readers can see the data, too.
-    f2, err := os.Open("/tmp/test.gkvlite")
-    s2, err := gkvlite.NewStore(f2)
-    c2 := s.GetCollection("cars")
-    
-    bmwPrice, err := c2.Get([]byte("bmw"))
-
-Tips
-====
-
-Because all collections are persisted atomically when you flush a
-store to disk, you can implement consistent secondary indexes by
-maintaining additional collections per store.  For example, a "users"
-collection can hold a JSON document per user, keyed by userId.
-Another "userEmails" collection can be used like a secondary index,
-keyed by "emailAddress:userId", with empty values (e.g., []byte{}).
-
-Bulk inserts or batched mutations are roughly supported in gkvlite
-where your application should only occasionally invoke Flush() after N
-mutations or after a given amount of time, as opposed to invoking a
-Flush() after every Set/Delete().
-
-Implementation / design
-=======================
-
-The fundamental data structure is an immutable treap (tree + heap).
-
-When used with random heap item priorities, treaps have probabilistic
-balanced tree behavior with the usual O(log N) performance bounds
-expected of balanced binary trees.
-
-The persistence design is append-only, using ideas from Apache CouchDB
-and Couchstore / Couchbase, providing a simple approach to reaching
-ACID properties in the face of process or machine crashes.  On
-re-opening a file, the implementation scans the file backwards looking
-for the last good root record and logically "truncates" the file at
-that point.  New mutations are appended from that last good root
-location.  This follows the MVCC (multi-version concurrency control)
-and "the log is the database" approach of CouchDB / Couchstore /
-Couchbase.
-
-TRADEOFF: the append-only persistence design means file sizes will
-grow until there's a compaction.  To get a compacted file, use
-CopyTo() with a high "flushEvery" argument.
-
-The append-only file format allows the FlushRevert() API (undo the
-changes on a file) to have a simple implementation of scanning
-backwards in the file for the next-to-last good root record and
-physically truncating the file at that point.
-
-TRADEOFF: the append-only design means it's possible for an advanced
-adversary to corrupt a gkvlite file by cleverly storing the bytes of a
-valid gkvlite root record as a value; however, they would need to know
-the size of the containing gkvlite database file in order to compute a
-valid gkvlite root record and be able to force a process or machine
-crash after their fake root record is written but before the next good
-root record is written/sync'ed.
-
-The immutable, copy-on-write treap plus the append-only persistence
-design allows for fast and efficient MVCC snapshots.
-
-TRADEOFF: the immutable, copy-on-write design means more memory
-garbage may be created than other designs, meaning more work for the
-garbage collector (GC).
-
-TODO / ideas
-============
-
-* TODO: Performance: consider splitting item storage from node
-  storage, so we're not mixing metadata and data in same cache pages.
-  Need to measure how much win this could be in cases like compaction.
-  Tradeoff as this could mean no more single file simplicity.
-
-* TODO: Performance: persist items as log, and don't write treap nodes
-  on every Flush().
-
-* TODO: Keep stats on misses, disk fetches & writes, etc.
-
-* TODO: Provide public API for O(log N) collection spliting & joining.
-
-* TODO: Provide O(1) MidItem() or TopItem() implementation, so that
-  users can split collections at decent points.
-
-* TODO: Provide item priority shifting during CopyTo().
-
-* TODO: Build up perfectly balanced treap from large batch of
-  (potentially externally) sorted items.
-
-* TODO: Allow users to retrieve an item's value size (in bytes)
-  without having to first fetch the item into memory.
-
-* TODO: Allow more fine-grained cached item and node eviction.  Node
-  and item objects are currently cached in-memory by gkvlite for
-  higher retrieval performance, only for the nodes & items that you
-  either have updated or have fetched from disk.  Certain applications
-  may need that memory instead, though, for more important tasks.  The
-  current coarse workaround is to drop all your references to any
-  relevant Stores and Collections, start brand new Store/Collection
-  instances, and let GC reclaim memory.
-
-* See more TODO's throughout codebase / grep.
diff --git a/go/src/github.com/steveyen/gkvlite/alloc.go b/go/src/github.com/steveyen/gkvlite/alloc.go
deleted file mode 100644
index 803d758..0000000
--- a/go/src/github.com/steveyen/gkvlite/alloc.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package gkvlite
-
-import (
-	"fmt"
-	"sync"
-	"sync/atomic"
-	"unsafe"
-)
-
-var freeNodeLock sync.Mutex
-var freeNodes *node
-
-var freeNodeLocLock sync.Mutex
-var freeNodeLocs *nodeLoc
-
-var freeRootNodeLocLock sync.Mutex
-var freeRootNodeLocs *rootNodeLoc
-
-var allocStats AllocStats
-
-type AllocStats struct {
-	MkNodes      int64
-	FreeNodes    int64 // Number of invocations of the freeNode() API.
-	AllocNodes   int64
-	CurFreeNodes int64 // Current length of freeNodes list.
-
-	MkNodeLocs      int64
-	FreeNodeLocs    int64 // Number of invocations of the freeNodeLoc() API.
-	AllocNodeLocs   int64
-	CurFreeNodeLocs int64 // Current length of freeNodeLocs list.
-
-	MkRootNodeLocs      int64
-	FreeRootNodeLocs    int64 // Number of invocations of the freeRootNodeLoc() API.
-	AllocRootNodeLocs   int64
-	CurFreeRootNodeLocs int64 // Current length of freeRootNodeLocs list.
-}
-
-func withAllocLocks(cb func()) {
-	freeNodeLock.Lock()
-	freeNodeLocLock.Lock()
-	freeRootNodeLocLock.Lock()
-	defer freeNodeLock.Unlock()
-	defer freeNodeLocLock.Unlock()
-	defer freeRootNodeLocLock.Unlock()
-	cb()
-}
-
-func (t *Collection) markReclaimable(n *node, reclaimMark *node) {
-	t.rootLock.Lock()
-	defer t.rootLock.Unlock()
-	if n == nil || n.next != nil || n == reclaimMark {
-		return
-	}
-	n.next = reclaimMark
-}
-
-func (t *Collection) reclaimMarkUpdate(nloc *nodeLoc,
-	oldReclaimMark, newReclaimMark *node) *node {
-	if nloc.isEmpty() {
-		return nil
-	}
-	n := nloc.Node()
-	t.rootLock.Lock()
-	if n != nil && n.next == oldReclaimMark {
-		n.next = newReclaimMark
-		t.rootLock.Unlock()
-		t.reclaimMarkUpdate(&n.left, oldReclaimMark, newReclaimMark)
-		t.reclaimMarkUpdate(&n.right, oldReclaimMark, newReclaimMark)
-	} else {
-		t.rootLock.Unlock()
-	}
-	return n
-}
-
-func (t *Collection) reclaimNodes_unlocked(n *node,
-	reclaimLater *[3]*node, reclaimMark *node) int64 {
-	if n == nil {
-		return 0
-	}
-	if reclaimLater != nil {
-		for i := 0; i < len(reclaimLater); i++ {
-			if reclaimLater[i] == n {
-				reclaimLater[i] = nil
-			}
-		}
-	}
-	if n.next != reclaimMark {
-		return 0
-	}
-	var left *node
-	var right *node
-	if !n.left.isEmpty() {
-		left = n.left.Node()
-	}
-	if !n.right.isEmpty() {
-		right = n.right.Node()
-	}
-	t.freeNode_unlocked(n, reclaimMark)
-	numLeft := t.reclaimNodes_unlocked(left, reclaimLater, reclaimMark)
-	numRight := t.reclaimNodes_unlocked(right, reclaimLater, reclaimMark)
-	return 1 + numLeft + numRight
-}
-
-// Assumes that the caller serializes invocations.
-func (t *Collection) mkNode(itemIn *itemLoc, leftIn *nodeLoc, rightIn *nodeLoc,
-	numNodesIn uint64, numBytesIn uint64) *node {
-	freeNodeLock.Lock()
-	allocStats.MkNodes++
-	t.allocStats.MkNodes++
-	n := freeNodes
-	if n == nil {
-		allocStats.AllocNodes++
-		t.allocStats.AllocNodes++
-		freeNodeLock.Unlock()
-		atomic.AddUint64(&t.store.nodeAllocs, 1)
-		n = &node{}
-	} else {
-		freeNodes = n.next
-		allocStats.CurFreeNodes--
-		freeNodeLock.Unlock()
-	}
-	if itemIn != nil {
-		i := itemIn.Item()
-		if i != nil {
-			t.store.ItemAddRef(t, i)
-		}
-	}
-	n.item.Copy(itemIn)
-	n.left.Copy(leftIn)
-	n.right.Copy(rightIn)
-	n.numNodes = numNodesIn
-	n.numBytes = numBytesIn
-	n.next = nil
-	return n
-}
-
-func (t *Collection) freeNode_unlocked(n *node, reclaimMark *node) {
-	if n == nil || n == reclaimMark {
-		return
-	}
-	if n.next != nil && n.next != reclaimMark {
-		panic("double free node")
-	}
-	i := n.item.Item()
-	if i != nil {
-		t.store.ItemDecRef(t, i)
-	}
-	n.item = *empty_itemLoc
-	n.left = *empty_nodeLoc
-	n.right = *empty_nodeLoc
-	n.numNodes = 0
-	n.numBytes = 0
-	n.next = freeNodes
-	freeNodes = n
-	allocStats.CurFreeNodes++
-	allocStats.FreeNodes++
-	t.allocStats.FreeNodes++
-}
-
-// Assumes that the caller serializes invocations.
-func (t *Collection) mkNodeLoc(n *node) *nodeLoc {
-	freeNodeLocLock.Lock()
-	allocStats.MkNodeLocs++
-	t.allocStats.MkNodeLocs++
-	nloc := freeNodeLocs
-	if nloc == nil {
-		allocStats.AllocNodeLocs++
-		t.allocStats.AllocNodeLocs++
-		freeNodeLocLock.Unlock()
-		nloc = &nodeLoc{}
-	} else {
-		freeNodeLocs = nloc.next
-		allocStats.CurFreeNodeLocs--
-		freeNodeLocLock.Unlock()
-	}
-	nloc.loc = unsafe.Pointer(nil)
-	nloc.node = unsafe.Pointer(n)
-	nloc.next = nil
-	return nloc
-}
-
-// Assumes that the caller serializes invocations.
-func (t *Collection) freeNodeLoc(nloc *nodeLoc) {
-	if nloc == nil || nloc == empty_nodeLoc {
-		return
-	}
-	if nloc.next != nil {
-		panic("double free nodeLoc")
-	}
-	nloc.loc = unsafe.Pointer(nil)
-	nloc.node = unsafe.Pointer(nil)
-
-	freeNodeLocLock.Lock()
-	nloc.next = freeNodeLocs
-	freeNodeLocs = nloc
-	allocStats.CurFreeNodeLocs++
-	allocStats.FreeNodeLocs++
-	t.allocStats.FreeNodeLocs++
-	freeNodeLocLock.Unlock()
-}
-
-func (t *Collection) mkRootNodeLoc(root *nodeLoc) *rootNodeLoc {
-	freeRootNodeLocLock.Lock()
-	allocStats.MkRootNodeLocs++
-	t.allocStats.MkRootNodeLocs++
-	rnl := freeRootNodeLocs
-	if rnl == nil {
-		allocStats.AllocRootNodeLocs++
-		t.allocStats.AllocRootNodeLocs++
-		freeRootNodeLocLock.Unlock()
-		rnl = &rootNodeLoc{}
-	} else {
-		freeRootNodeLocs = rnl.next
-		allocStats.CurFreeRootNodeLocs--
-		freeRootNodeLocLock.Unlock()
-	}
-	rnl.refs = 1
-	rnl.root = root
-	rnl.next = nil
-	rnl.chainedCollection = nil
-	rnl.chainedRootNodeLoc = nil
-	for i := 0; i < len(rnl.reclaimLater); i++ {
-		rnl.reclaimLater[i] = nil
-	}
-	return rnl
-}
-
-func (t *Collection) freeRootNodeLoc(rnl *rootNodeLoc) {
-	if rnl == nil {
-		return
-	}
-	if rnl.next != nil {
-		panic("double free rootNodeLoc")
-	}
-	rnl.refs = 0
-	rnl.root = nil
-	rnl.chainedCollection = nil
-	rnl.chainedRootNodeLoc = nil
-	for i := 0; i < len(rnl.reclaimLater); i++ {
-		if rnl.reclaimLater[i] != nil {
-			panic(fmt.Sprintf("non-nil rnl.reclaimLater[%d]: %v",
-				i, rnl.reclaimLater[i]))
-		}
-	}
-	freeRootNodeLocLock.Lock()
-	rnl.next = freeRootNodeLocs
-	freeRootNodeLocs = rnl
-	allocStats.CurFreeRootNodeLocs++
-	allocStats.FreeRootNodeLocs++
-	t.allocStats.FreeRootNodeLocs++
-	freeRootNodeLocLock.Unlock()
-}
diff --git a/go/src/github.com/steveyen/gkvlite/collection.go b/go/src/github.com/steveyen/gkvlite/collection.go
deleted file mode 100644
index 6e92191..0000000
--- a/go/src/github.com/steveyen/gkvlite/collection.go
+++ /dev/null
@@ -1,476 +0,0 @@
-package gkvlite
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"math/rand"
-	"sync"
-	"sync/atomic"
-	"unsafe"
-)
-
-// User-supplied key comparison func should return 0 if a == b,
-// -1 if a < b, and +1 if a > b.  For example: bytes.Compare()
-type KeyCompare func(a, b []byte) int
-
-// A persistable collection of ordered key-values (Item's).
-type Collection struct {
-	name    string // May be "" for a private collection.
-	store   *Store
-	compare KeyCompare
-
-	rootLock *sync.Mutex
-	root     *rootNodeLoc // Protected by rootLock.
-
-	allocStats AllocStats // User must serialize access (e.g., see locks in alloc.go).
-
-	AppData unsafe.Pointer // For app-specific data; atomic CAS recommended.
-}
-
-type rootNodeLoc struct {
-	// The rootNodeLoc fields are protected by Collection.rootLock.
-	refs int64 // Reference counter.
-	root *nodeLoc
-	next *rootNodeLoc // For free-list tracking.
-
-	reclaimMark node // Address is used as a sentinel.
-
-	// We might own a reference count on another Collection/rootNodeLoc.
-	// When our reference drops to 0 and we're free'd, then also release
-	// our reference count on the next guy in the chain.
-	chainedCollection  *Collection
-	chainedRootNodeLoc *rootNodeLoc
-
-	// More nodes to maybe reclaim when our reference count goes to 0.
-	// But they might be repeated, so we scan for them during reclaimation.
-	reclaimLater [3]*node
-}
-
-func (t *Collection) Name() string {
-	return t.name
-}
-
-func (t *Collection) closeCollection() { // Just "close" is a keyword.
-	if t == nil {
-		return
-	}
-	t.rootLock.Lock()
-	r := t.root
-	t.root = nil
-	t.rootLock.Unlock()
-	t.reclaimMarkUpdate(r.root, nil, &r.reclaimMark)
-	if r != nil {
-		t.rootDecRef(r)
-	}
-}
-
-// Retrieve an item by its key.  Use withValue of false if you don't
-// need the item's value (Item.Val may be nil), which might be able
-// to save on I/O and memory resources, especially for large values.
-// The returned Item should be treated as immutable.
-func (t *Collection) GetItem(key []byte, withValue bool) (i *Item, err error) {
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	n := rnl.root
-	for {
-		nNode, err := n.read(t.store)
-		if err != nil || n.isEmpty() || nNode == nil {
-			return nil, err
-		}
-		i := &nNode.item
-		iItem, err := i.read(t, false)
-		if err != nil {
-			return nil, err
-		}
-		if iItem == nil || iItem.Key == nil {
-			return nil, errors.New("missing item after item.read() in GetItem()")
-		}
-		c := t.compare(key, iItem.Key)
-		if c < 0 {
-			n = &nNode.left
-		} else if c > 0 {
-			n = &nNode.right
-		} else {
-			if withValue {
-				iItem, err = i.read(t, withValue)
-				if err != nil {
-					return nil, err
-				}
-			}
-			t.store.ItemAddRef(t, iItem)
-			return iItem, nil
-		}
-	}
-}
-
-// Retrieve a value by its key.  Returns nil if the item is not in the
-// collection.  The returned value should be treated as immutable.
-func (t *Collection) Get(key []byte) (val []byte, err error) {
-	i, err := t.GetItem(key, true)
-	if err != nil {
-		return nil, err
-	}
-	if i != nil {
-		return i.Val, nil
-	}
-	return nil, nil
-}
-
-// Replace or insert an item of a given key.
-// A random item Priority (e.g., rand.Int31()) will usually work well,
-// but advanced users may consider using non-random item priorities
-// at the risk of unbalancing the lookup tree.  The input Item instance
-// should be considered immutable and owned by the Collection.
-func (t *Collection) SetItem(item *Item) (err error) {
-	if t.store.readOnly {
-		return errors.New("store is read only")
-	}
-	if item.Key == nil || len(item.Key) > 0xffff || len(item.Key) == 0 ||
-		item.Val == nil {
-		return errors.New("Item.Key/Val missing or too long")
-	}
-	if item.Priority < 0 {
-		return errors.New("Item.Priority must be non-negative")
-	}
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	root := rnl.root
-	n := t.mkNode(nil, nil, nil, 1, uint64(len(item.Key))+uint64(item.NumValBytes(t)))
-	t.store.ItemAddRef(t, item)
-	n.item.item = unsafe.Pointer(item) // Avoid garbage via separate init.
-	nloc := t.mkNodeLoc(n)
-	defer t.freeNodeLoc(nloc)
-	r, err := t.store.union(t, root, nloc, &rnl.reclaimMark)
-	if err != nil {
-		return err
-	}
-	rnlNew := t.mkRootNodeLoc(r)
-	// Can't reclaim n right now because r might point to n.
-	rnlNew.reclaimLater[0] = t.reclaimMarkUpdate(nloc,
-		&rnl.reclaimMark, &rnlNew.reclaimMark)
-	if !t.rootCAS(rnl, rnlNew) {
-		return errors.New("concurrent mutation attempted")
-	}
-	t.rootDecRef(rnl)
-	return nil
-}
-
-// Replace or insert an item of a given key.
-func (t *Collection) Set(key []byte, val []byte) error {
-	return t.SetItem(&Item{Key: key, Val: val, Priority: rand.Int31()})
-}
-
-// Deletes an item of a given key.
-func (t *Collection) Delete(key []byte) (wasDeleted bool, err error) {
-	if t.store.readOnly {
-		return false, errors.New("store is read only")
-	}
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	root := rnl.root
-	i, err := t.GetItem(key, false)
-	if err != nil || i == nil {
-		return false, err
-	}
-	t.store.ItemDecRef(t, i)
-	left, middle, right, err := t.store.split(t, root, key, &rnl.reclaimMark)
-	if err != nil {
-		return false, err
-	}
-	defer t.freeNodeLoc(left)
-	defer t.freeNodeLoc(right)
-	defer t.freeNodeLoc(middle)
-	if middle.isEmpty() {
-		return false, fmt.Errorf("concurrent delete, key: %v", key)
-	}
-	r, err := t.store.join(t, left, right, &rnl.reclaimMark)
-	if err != nil {
-		return false, err
-	}
-	rnlNew := t.mkRootNodeLoc(r)
-	// Can't reclaim immediately due to readers.
-	rnlNew.reclaimLater[0] = t.reclaimMarkUpdate(left,
-		&rnl.reclaimMark, &rnlNew.reclaimMark)
-	rnlNew.reclaimLater[1] = t.reclaimMarkUpdate(right,
-		&rnl.reclaimMark, &rnlNew.reclaimMark)
-	rnlNew.reclaimLater[2] = t.reclaimMarkUpdate(middle,
-		&rnl.reclaimMark, &rnlNew.reclaimMark)
-	t.markReclaimable(rnlNew.reclaimLater[2], &rnlNew.reclaimMark)
-	if !t.rootCAS(rnl, rnlNew) {
-		return false, errors.New("concurrent mutation attempted")
-	}
-	t.rootDecRef(rnl)
-	return true, nil
-}
-
-// Retrieves the item with the "smallest" key.
-// The returned item should be treated as immutable.
-func (t *Collection) MinItem(withValue bool) (*Item, error) {
-	return t.store.walk(t, withValue,
-		func(n *node) (*nodeLoc, bool) { return &n.left, true })
-}
-
-// Retrieves the item with the "largest" key.
-// The returned item should be treated as immutable.
-func (t *Collection) MaxItem(withValue bool) (*Item, error) {
-	return t.store.walk(t, withValue,
-		func(n *node) (*nodeLoc, bool) { return &n.right, true })
-}
-
-// Evict some clean items found by randomly walking a tree branch.
-// For concurrent users, only the single mutator thread should call
-// EvictSomeItems(), making it serialized with mutations.
-func (t *Collection) EvictSomeItems() (numEvicted uint64) {
-	if t.store.readOnly {
-		return 0
-	}
-	i, err := t.store.walk(t, false, func(n *node) (*nodeLoc, bool) {
-		if !n.item.Loc().isEmpty() {
-			i := n.item.Item()
-			if i != nil && atomic.CompareAndSwapPointer(&n.item.item,
-				unsafe.Pointer(i), unsafe.Pointer(nil)) {
-				t.store.ItemDecRef(t, i)
-				numEvicted++
-			}
-		}
-		next := &n.left
-		if (rand.Int() & 0x01) == 0x01 {
-			next = &n.right
-		}
-		if next.isEmpty() {
-			return nil, false
-		}
-		return next, true
-	})
-	if i != nil && err != nil {
-		t.store.ItemDecRef(t, i)
-	}
-	return numEvicted
-}
-
-type ItemVisitor func(i *Item) bool
-type ItemVisitorEx func(i *Item, depth uint64) bool
-
-// Visit items greater-than-or-equal to the target key in ascending order.
-func (t *Collection) VisitItemsAscend(target []byte, withValue bool, v ItemVisitor) error {
-	return t.VisitItemsAscendEx(target, withValue,
-		func(i *Item, depth uint64) bool { return v(i) })
-}
-
-// Visit items less-than the target key in descending order.
-func (t *Collection) VisitItemsDescend(target []byte, withValue bool, v ItemVisitor) error {
-	return t.VisitItemsDescendEx(target, withValue,
-		func(i *Item, depth uint64) bool { return v(i) })
-}
-
-// Visit items greater-than-or-equal to the target key in ascending order; with depth info.
-func (t *Collection) VisitItemsAscendEx(target []byte, withValue bool,
-	visitor ItemVisitorEx) error {
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-
-	var prevVisitItem *Item
-	var errCheckedVisitor error
-
-	checkedVisitor := func(i *Item, depth uint64) bool {
-		if prevVisitItem != nil && t.compare(prevVisitItem.Key, i.Key) > 0 {
-			errCheckedVisitor = fmt.Errorf("corrupted / out-of-order index"+
-				", key: %s vs %s, coll: %p, collName: %s, store: %p, storeFile: %v",
-				string(prevVisitItem.Key), string(i.Key), t, t.name, t.store, t.store.file)
-			return false
-		}
-		prevVisitItem = i
-		return visitor(i, depth)
-	}
-
-	_, err := t.store.visitNodes(t, rnl.root,
-		target, withValue, checkedVisitor, 0, ascendChoice)
-	if errCheckedVisitor != nil {
-		return errCheckedVisitor
-	}
-	return err
-}
-
-// Visit items less-than the target key in descending order; with depth info.
-func (t *Collection) VisitItemsDescendEx(target []byte, withValue bool,
-	visitor ItemVisitorEx) error {
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-
-	_, err := t.store.visitNodes(t, rnl.root,
-		target, withValue, visitor, 0, descendChoice)
-	return err
-}
-
-func ascendChoice(cmp int, n *node) (bool, *nodeLoc, *nodeLoc) {
-	return cmp <= 0, &n.left, &n.right
-}
-
-func descendChoice(cmp int, n *node) (bool, *nodeLoc, *nodeLoc) {
-	return cmp > 0, &n.right, &n.left
-}
-
-// Returns total number of items and total key bytes plus value bytes.
-func (t *Collection) GetTotals() (numItems uint64, numBytes uint64, err error) {
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	n := rnl.root
-	nNode, err := n.read(t.store)
-	if err != nil || n.isEmpty() || nNode == nil {
-		return 0, 0, err
-	}
-	return nNode.numNodes, nNode.numBytes, nil
-}
-
-// Returns JSON representation of root node file location.
-func (t *Collection) MarshalJSON() ([]byte, error) {
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	return rnl.MarshalJSON()
-}
-
-// Returns JSON representation of root node file location.
-func (rnl *rootNodeLoc) MarshalJSON() ([]byte, error) {
-	loc := rnl.root.Loc()
-	if loc.isEmpty() {
-		return json.Marshal(ploc_empty)
-	}
-	return json.Marshal(loc)
-}
-
-// Unmarshals JSON representation of root node file location.
-func (t *Collection) UnmarshalJSON(d []byte) error {
-	p := ploc{}
-	if err := json.Unmarshal(d, &p); err != nil {
-		return err
-	}
-	if t.rootLock == nil {
-		t.rootLock = &sync.Mutex{}
-	}
-	nloc := t.mkNodeLoc(nil)
-	nloc.loc = unsafe.Pointer(&p)
-	if !t.rootCAS(nil, t.mkRootNodeLoc(nloc)) {
-		return errors.New("concurrent mutation during UnmarshalJSON().")
-	}
-	return nil
-}
-
-func (t *Collection) AllocStats() (res AllocStats) {
-	withAllocLocks(func() { res = t.allocStats })
-	return res
-}
-
-// Writes dirty items of a collection BUT (WARNING) does NOT write new
-// root records.  Use Store.Flush() to write root records, which would
-// make these writes visible to the next file re-opening/re-loading.
-func (t *Collection) Write() error {
-	if t.store.readOnly {
-		return errors.New("store is read only")
-	}
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	return t.write(rnl.root)
-}
-
-func (t *Collection) write(nloc *nodeLoc) error {
-	if err := t.writeItems(nloc); err != nil {
-		return err
-	}
-	if err := t.writeNodes(nloc); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (t *Collection) writeItems(nloc *nodeLoc) (err error) {
-	if nloc == nil || !nloc.Loc().isEmpty() {
-		return nil // Write only unpersisted items of non-empty, unpersisted nodes.
-	}
-	node := nloc.Node()
-	if node == nil {
-		return nil
-	}
-	if err = t.writeItems(&node.left); err != nil {
-		return err
-	}
-	if err = node.item.write(t); err != nil { // Write items in key order.
-		return err
-	}
-	return t.writeItems(&node.right)
-}
-
-func (t *Collection) writeNodes(nloc *nodeLoc) (err error) {
-	if nloc == nil || !nloc.Loc().isEmpty() {
-		return nil // Write only non-empty, unpersisted nodes.
-	}
-	node := nloc.Node()
-	if node == nil {
-		return nil
-	}
-	if err = t.writeNodes(&node.left); err != nil {
-		return err
-	}
-	if err = t.writeNodes(&node.right); err != nil {
-		return err
-	}
-	return nloc.write(t.store) // Write nodes in children-first order.
-}
-
-func (t *Collection) rootCAS(prev, next *rootNodeLoc) bool {
-	t.rootLock.Lock()
-	defer t.rootLock.Unlock()
-
-	if t.root != prev {
-		return false // TODO: Callers need to release resources.
-	}
-	t.root = next
-
-	if prev != nil && prev.refs > 2 {
-		// Since the prev is in-use, hook up its chain to disallow
-		// next's nodes from being reclaimed until prev is done.
-		if prev.chainedCollection != nil ||
-			prev.chainedRootNodeLoc != nil {
-			panic(fmt.Sprintf("chain already taken, coll: %v", t.Name()))
-		}
-		prev.chainedCollection = t
-		prev.chainedRootNodeLoc = t.root
-		t.root.refs++ // This ref is owned by prev.
-	}
-
-	return true
-}
-
-func (t *Collection) rootAddRef() *rootNodeLoc {
-	t.rootLock.Lock()
-	defer t.rootLock.Unlock()
-	t.root.refs++
-	return t.root
-}
-
-func (t *Collection) rootDecRef(r *rootNodeLoc) {
-	t.rootLock.Lock()
-	freeNodeLock.Lock()
-	t.rootDecRef_unlocked(r)
-	freeNodeLock.Unlock()
-	t.rootLock.Unlock()
-}
-
-func (t *Collection) rootDecRef_unlocked(r *rootNodeLoc) {
-	r.refs--
-	if r.refs > 0 {
-		return
-	}
-	if r.chainedCollection != nil && r.chainedRootNodeLoc != nil {
-		r.chainedCollection.rootDecRef_unlocked(r.chainedRootNodeLoc)
-	}
-	t.reclaimNodes_unlocked(r.root.Node(), &r.reclaimLater, &r.reclaimMark)
-	for i := 0; i < len(r.reclaimLater); i++ {
-		if r.reclaimLater[i] != nil {
-			t.reclaimNodes_unlocked(r.reclaimLater[i], nil, &r.reclaimMark)
-			r.reclaimLater[i] = nil
-		}
-	}
-	t.freeNodeLoc(r.root)
-	t.freeRootNodeLoc(r)
-}
diff --git a/go/src/github.com/steveyen/gkvlite/item.go b/go/src/github.com/steveyen/gkvlite/item.go
deleted file mode 100644
index 51c7ece..0000000
--- a/go/src/github.com/steveyen/gkvlite/item.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package gkvlite
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"sync/atomic"
-	"unsafe"
-)
-
-// A persistable item.
-type Item struct {
-	Transient unsafe.Pointer // For any ephemeral data; atomic CAS recommended.
-	Key, Val  []byte         // Val may be nil if not fetched into memory yet.
-	Priority  int32          // Use rand.Int31() for probabilistic balancing.
-}
-
-// A persistable item and its persistence location.
-type itemLoc struct {
-	loc  unsafe.Pointer // *ploc - can be nil if item is dirty (not yet persisted).
-	item unsafe.Pointer // *Item - can be nil if item is not fetched into memory yet.
-}
-
-var empty_itemLoc = &itemLoc{}
-
-// Number of Key bytes plus number of Val bytes.
-func (i *Item) NumBytes(c *Collection) int {
-	return len(i.Key) + i.NumValBytes(c)
-}
-
-func (i *Item) NumValBytes(c *Collection) int {
-	if c.store.callbacks.ItemValLength != nil {
-		return c.store.callbacks.ItemValLength(c, i)
-	}
-	return len(i.Val)
-}
-
-// The returned Item will not have been allocated through the optional
-// StoreCallbacks.ItemAlloc() callback.
-func (i *Item) Copy() *Item {
-	return &Item{
-		Key:       i.Key,
-		Val:       i.Val,
-		Priority:  i.Priority,
-		Transient: i.Transient,
-	}
-}
-
-func (i *itemLoc) Loc() *ploc {
-	return (*ploc)(atomic.LoadPointer(&i.loc))
-}
-
-func (i *itemLoc) Item() *Item {
-	return (*Item)(atomic.LoadPointer(&i.item))
-}
-
-func (i *itemLoc) Copy(src *itemLoc) {
-	if src == nil {
-		i.Copy(empty_itemLoc)
-		return
-	}
-	atomic.StorePointer(&i.loc, unsafe.Pointer(src.Loc()))
-	atomic.StorePointer(&i.item, unsafe.Pointer(src.Item()))
-}
-
-const itemLoc_hdrLength int = 4 + 2 + 4 + 4
-
-func (i *itemLoc) write(c *Collection) (err error) {
-	if i.Loc().isEmpty() {
-		iItem := i.Item()
-		if iItem == nil {
-			return errors.New("itemLoc.write with nil item")
-		}
-		if c.store.callbacks.BeforeItemWrite != nil {
-			iItem, err = c.store.callbacks.BeforeItemWrite(c, iItem)
-			if err != nil {
-				return err
-			}
-		}
-		offset := atomic.LoadInt64(&c.store.size)
-		hlength := itemLoc_hdrLength + len(iItem.Key)
-		vlength := iItem.NumValBytes(c)
-		ilength := hlength + vlength
-		b := make([]byte, hlength)
-		pos := 0
-		binary.BigEndian.PutUint32(b[pos:pos+4], uint32(ilength))
-		pos += 4
-		binary.BigEndian.PutUint16(b[pos:pos+2], uint16(len(iItem.Key)))
-		pos += 2
-		binary.BigEndian.PutUint32(b[pos:pos+4], uint32(vlength))
-		pos += 4
-		binary.BigEndian.PutUint32(b[pos:pos+4], uint32(iItem.Priority))
-		pos += 4
-		pos += copy(b[pos:], iItem.Key)
-		if pos != hlength {
-			return fmt.Errorf("itemLoc.write() pos: %v didn't match hlength: %v",
-				pos, hlength)
-		}
-		if _, err := c.store.file.WriteAt(b, offset); err != nil {
-			return err
-		}
-		err := c.store.ItemValWrite(c, iItem, c.store.file, offset+int64(pos))
-		if err != nil {
-			return err
-		}
-		atomic.StoreInt64(&c.store.size, offset+int64(ilength))
-		atomic.StorePointer(&i.loc,
-			unsafe.Pointer(&ploc{Offset: offset, Length: uint32(ilength)}))
-	}
-	return nil
-}
-
-func (iloc *itemLoc) read(c *Collection, withValue bool) (icur *Item, err error) {
-	if iloc == nil {
-		return nil, nil
-	}
-	icur = iloc.Item()
-	if icur == nil || (icur.Val == nil && withValue) {
-		loc := iloc.Loc()
-		if loc.isEmpty() {
-			return nil, nil
-		}
-		if loc.Length < uint32(itemLoc_hdrLength) {
-			return nil, fmt.Errorf("unexpected item loc.Length: %v < %v",
-				loc.Length, itemLoc_hdrLength)
-		}
-		b := make([]byte, itemLoc_hdrLength)
-		if _, err := c.store.file.ReadAt(b, loc.Offset); err != nil {
-			return nil, err
-		}
-		pos := 0
-		length := binary.BigEndian.Uint32(b[pos : pos+4])
-		pos += 4
-		keyLength := binary.BigEndian.Uint16(b[pos : pos+2])
-		pos += 2
-		valLength := binary.BigEndian.Uint32(b[pos : pos+4])
-		pos += 4
-		i := c.store.ItemAlloc(c, keyLength)
-		if i == nil {
-			return nil, errors.New("ItemAlloc() failed")
-		}
-		i.Priority = int32(binary.BigEndian.Uint32(b[pos : pos+4]))
-		pos += 4
-		if length != uint32(itemLoc_hdrLength)+uint32(keyLength)+valLength {
-			c.store.ItemDecRef(c, i)
-			return nil, errors.New("mismatched itemLoc lengths")
-		}
-		if pos != itemLoc_hdrLength {
-			c.store.ItemDecRef(c, i)
-			return nil, fmt.Errorf("read pos != itemLoc_hdrLength, %v != %v",
-				pos, itemLoc_hdrLength)
-		}
-		if _, err := c.store.file.ReadAt(i.Key,
-			loc.Offset+int64(itemLoc_hdrLength)); err != nil {
-			c.store.ItemDecRef(c, i)
-			return nil, err
-		}
-		if withValue {
-			err := c.store.ItemValRead(c, i, c.store.file,
-				loc.Offset+int64(itemLoc_hdrLength)+int64(keyLength), valLength)
-			if err != nil {
-				c.store.ItemDecRef(c, i)
-				return nil, err
-			}
-		}
-		if c.store.callbacks.AfterItemRead != nil {
-			i, err = c.store.callbacks.AfterItemRead(c, i)
-			if err != nil {
-				c.store.ItemDecRef(c, i)
-				return nil, err
-			}
-		}
-		if !atomic.CompareAndSwapPointer(&iloc.item,
-			unsafe.Pointer(icur), unsafe.Pointer(i)) {
-			c.store.ItemDecRef(c, i)
-			return iloc.read(c, withValue)
-		}
-		if icur != nil {
-			c.store.ItemDecRef(c, icur)
-		}
-		icur = i
-	}
-	return icur, nil
-}
-
-func (iloc *itemLoc) NumBytes(c *Collection) int {
-	loc := iloc.Loc()
-	if loc.isEmpty() {
-		i := iloc.Item()
-		if i == nil {
-			return 0
-		}
-		return i.NumBytes(c)
-	}
-	return int(loc.Length) - itemLoc_hdrLength
-}
diff --git a/go/src/github.com/steveyen/gkvlite/node.go b/go/src/github.com/steveyen/gkvlite/node.go
deleted file mode 100644
index b929f6c..0000000
--- a/go/src/github.com/steveyen/gkvlite/node.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package gkvlite
-
-import (
-	"encoding/binary"
-	"fmt"
-	"sync/atomic"
-	"unsafe"
-)
-
-// A persistable node.
-type node struct {
-	numNodes, numBytes uint64
-	item               itemLoc
-	left, right        nodeLoc
-	next               *node // For free-list tracking.
-}
-
-// A persistable node and its persistence location.
-type nodeLoc struct {
-	loc  unsafe.Pointer // *ploc - can be nil if node is dirty (not yet persisted).
-	node unsafe.Pointer // *node - can be nil if node is not fetched into memory yet.
-	next *nodeLoc       // For free-list tracking.
-}
-
-var empty_nodeLoc = &nodeLoc{} // Sentinel.
-
-func (nloc *nodeLoc) Loc() *ploc {
-	return (*ploc)(atomic.LoadPointer(&nloc.loc))
-}
-
-func (nloc *nodeLoc) Node() *node {
-	return (*node)(atomic.LoadPointer(&nloc.node))
-}
-
-func (nloc *nodeLoc) Copy(src *nodeLoc) *nodeLoc {
-	if src == nil {
-		return nloc.Copy(empty_nodeLoc)
-	}
-	atomic.StorePointer(&nloc.loc, unsafe.Pointer(src.Loc()))
-	atomic.StorePointer(&nloc.node, unsafe.Pointer(src.Node()))
-	return nloc
-}
-
-func (nloc *nodeLoc) isEmpty() bool {
-	return nloc == nil || (nloc.Loc().isEmpty() && nloc.Node() == nil)
-}
-
-func (nloc *nodeLoc) write(o *Store) error {
-	if nloc != nil && nloc.Loc().isEmpty() {
-		node := nloc.Node()
-		if node == nil {
-			return nil
-		}
-		offset := atomic.LoadInt64(&o.size)
-		length := ploc_length + ploc_length + ploc_length + 8 + 8
-		b := make([]byte, length)
-		pos := 0
-		pos = node.item.Loc().write(b, pos)
-		pos = node.left.Loc().write(b, pos)
-		pos = node.right.Loc().write(b, pos)
-		binary.BigEndian.PutUint64(b[pos:pos+8], node.numNodes)
-		pos += 8
-		binary.BigEndian.PutUint64(b[pos:pos+8], node.numBytes)
-		pos += 8
-		if pos != length {
-			return fmt.Errorf("nodeLoc.write() pos: %v didn't match length: %v",
-				pos, length)
-		}
-		if _, err := o.file.WriteAt(b, offset); err != nil {
-			return err
-		}
-		atomic.StoreInt64(&o.size, offset+int64(length))
-		atomic.StorePointer(&nloc.loc,
-			unsafe.Pointer(&ploc{Offset: offset, Length: uint32(length)}))
-	}
-	return nil
-}
-
-func (nloc *nodeLoc) read(o *Store) (n *node, err error) {
-	if nloc == nil {
-		return nil, nil
-	}
-	n = nloc.Node()
-	if n != nil {
-		return n, nil
-	}
-	loc := nloc.Loc()
-	if loc.isEmpty() {
-		return nil, nil
-	}
-	if loc.Length != uint32(ploc_length+ploc_length+ploc_length+8+8) {
-		return nil, fmt.Errorf("unexpected node loc.Length: %v != %v",
-			loc.Length, ploc_length+ploc_length+ploc_length+8+8)
-	}
-	b := make([]byte, loc.Length)
-	if _, err := o.file.ReadAt(b, loc.Offset); err != nil {
-		return nil, err
-	}
-	pos := 0
-	atomic.AddUint64(&o.nodeAllocs, 1)
-	n = &node{}
-	var p *ploc
-	p = &ploc{}
-	p, pos = p.read(b, pos)
-	n.item.loc = unsafe.Pointer(p)
-	p = &ploc{}
-	p, pos = p.read(b, pos)
-	n.left.loc = unsafe.Pointer(p)
-	p = &ploc{}
-	p, pos = p.read(b, pos)
-	n.right.loc = unsafe.Pointer(p)
-	n.numNodes = binary.BigEndian.Uint64(b[pos : pos+8])
-	pos += 8
-	n.numBytes = binary.BigEndian.Uint64(b[pos : pos+8])
-	pos += 8
-	if pos != len(b) {
-		return nil, fmt.Errorf("nodeLoc.read() pos: %v didn't match length: %v",
-			pos, len(b))
-	}
-	atomic.StorePointer(&nloc.node, unsafe.Pointer(n))
-	return n, nil
-}
-
-func numInfo(o *Store, left *nodeLoc, right *nodeLoc) (
-	leftNum uint64, leftBytes uint64, rightNum uint64, rightBytes uint64, err error) {
-	leftNode, err := left.read(o)
-	if err != nil {
-		return 0, 0, 0, 0, err
-	}
-	rightNode, err := right.read(o)
-	if err != nil {
-		return 0, 0, 0, 0, err
-	}
-	if !left.isEmpty() && leftNode != nil {
-		leftNum = leftNode.numNodes
-		leftBytes = leftNode.numBytes
-	}
-	if !right.isEmpty() && rightNode != nil {
-		rightNum = rightNode.numNodes
-		rightBytes = rightNode.numBytes
-	}
-	return leftNum, leftBytes, rightNum, rightBytes, nil
-}
-
-func dump(o *Store, n *nodeLoc, level int) {
-	if n.isEmpty() {
-		return
-	}
-	nNode, _ := n.read(o)
-	dump(o, &nNode.left, level+1)
-	dumpIndent(level)
-	k := "<evicted>"
-	if nNode.item.Item() != nil {
-		k = string(nNode.item.Item().Key)
-	}
-	fmt.Printf("%p - %v\n", nNode, k)
-	dump(o, &nNode.right, level+1)
-}
-
-func dumpIndent(level int) {
-	for i := 0; i < level; i++ {
-		fmt.Print(" ")
-	}
-}
diff --git a/go/src/github.com/steveyen/gkvlite/ploc.go b/go/src/github.com/steveyen/gkvlite/ploc.go
deleted file mode 100644
index 0749387..0000000
--- a/go/src/github.com/steveyen/gkvlite/ploc.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package gkvlite
-
-import (
-	"encoding/binary"
-)
-
-// Offset/location of a persisted range of bytes.
-type ploc struct {
-	Offset int64  `json:"o"` // Usable for os.ReadAt/WriteAt() at file offset 0.
-	Length uint32 `json:"l"` // Number of bytes.
-}
-
-const ploc_length int = 8 + 4
-
-var ploc_empty *ploc = &ploc{} // Sentinel.
-
-func (p *ploc) isEmpty() bool {
-	return p == nil || (p.Offset == int64(0) && p.Length == uint32(0))
-}
-
-func (p *ploc) write(b []byte, pos int) int {
-	if p == nil {
-		return ploc_empty.write(b, pos)
-	}
-	binary.BigEndian.PutUint64(b[pos:pos+8], uint64(p.Offset))
-	pos += 8
-	binary.BigEndian.PutUint32(b[pos:pos+4], p.Length)
-	pos += 4
-	return pos
-}
-
-func (p *ploc) read(b []byte, pos int) (*ploc, int) {
-	p.Offset = int64(binary.BigEndian.Uint64(b[pos : pos+8]))
-	pos += 8
-	p.Length = binary.BigEndian.Uint32(b[pos : pos+4])
-	pos += 4
-	if p.isEmpty() {
-		return nil, pos
-	}
-	return p, pos
-}
diff --git a/go/src/github.com/steveyen/gkvlite/store.go b/go/src/github.com/steveyen/gkvlite/store.go
deleted file mode 100644
index c191823..0000000
--- a/go/src/github.com/steveyen/gkvlite/store.go
+++ /dev/null
@@ -1,495 +0,0 @@
-package gkvlite
-
-import (
-	"bytes"
-	"encoding/binary"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"sort"
-	"sync"
-	"sync/atomic"
-	"unsafe"
-)
-
-// A persistable store holding collections of ordered keys & values.
-type Store struct {
-	// Atomic CAS'ed int64/uint64's must be at the top for 32-bit compatibility.
-	size       int64          // Atomic protected; file size or next write position.
-	nodeAllocs uint64         // Atomic protected; total node allocation stats.
-	coll       unsafe.Pointer // Copy-on-write map[string]*Collection.
-	file       StoreFile      // When nil, we're memory-only or no persistence.
-	callbacks  StoreCallbacks // Optional / may be nil.
-	readOnly   bool           // When true, Flush()'ing is disallowed.
-}
-
-// The StoreFile interface is implemented by os.File.  Application
-// specific implementations may add concurrency, caching, stats, etc.
-type StoreFile interface {
-	io.ReaderAt
-	io.WriterAt
-	Stat() (os.FileInfo, error)
-	Truncate(size int64) error
-}
-
-// Allows applications to override or interpose before/after events.
-type StoreCallbacks struct {
-	BeforeItemWrite, AfterItemRead ItemCallback
-
-	// Optional callback to allocate an Item with an Item.Key.  If
-	// your app uses ref-counting, the returned Item should have
-	// logical ref-count of 1.
-	ItemAlloc func(c *Collection, keyLength uint16) *Item
-
-	// Optional callback to allow you to track gkvlite's ref-counts on
-	// an Item.  Apps might use this for buffer management and putting
-	// Item's on a free-list.
-	ItemAddRef func(c *Collection, i *Item)
-
-	// Optional callback to allow you to track gkvlite's ref-counts on
-	// an Item.  Apps might use this for buffer management and putting
-	// Item's on a free-list.
-	ItemDecRef func(c *Collection, i *Item)
-
-	// Optional callback to control on-disk size, in bytes, of an item's value.
-	ItemValLength func(c *Collection, i *Item) int
-
-	// Optional callback to allow you to write item bytes differently.
-	ItemValWrite func(c *Collection, i *Item,
-		w io.WriterAt, offset int64) error
-
-	// Optional callback to read item bytes differently.  For example,
-	// the app might have an optimization to just remember the reader
-	// & file offsets in the item.Transient field for lazy reading.
-	ItemValRead func(c *Collection, i *Item,
-		r io.ReaderAt, offset int64, valLength uint32) error
-
-	// Invoked when a Store is reloaded (during NewStoreEx()) from
-	// disk, this callback allows the user to optionally supply a key
-	// comparison func for each collection.  Otherwise, the default is
-	// the bytes.Compare func.
-	KeyCompareForCollection func(collName string) KeyCompare
-}
-
-type ItemCallback func(*Collection, *Item) (*Item, error)
-
-const VERSION = uint32(4)
-
-var MAGIC_BEG []byte = []byte("0g1t2r")
-var MAGIC_END []byte = []byte("3e4a5p")
-
-var rootsEndLen int = 8 + 4 + 2*len(MAGIC_END)
-var rootsLen int64 = int64(2*len(MAGIC_BEG) + 4 + 4 + rootsEndLen)
-
-// Provide a nil StoreFile for in-memory-only (non-persistent) usage.
-func NewStore(file StoreFile) (*Store, error) {
-	return NewStoreEx(file, StoreCallbacks{})
-}
-
-func NewStoreEx(file StoreFile,
-	callbacks StoreCallbacks) (*Store, error) {
-	coll := make(map[string]*Collection)
-	res := &Store{coll: unsafe.Pointer(&coll), callbacks: callbacks}
-	if file == nil || !reflect.ValueOf(file).Elem().IsValid() {
-		return res, nil // Memory-only Store.
-	}
-	res.file = file
-	if err := res.readRoots(); err != nil {
-		return nil, err
-	}
-	return res, nil
-}
-
-// SetCollection() is used to create a named Collection, or to modify
-// the KeyCompare function on an existing Collection.  In either case,
-// a new Collection to use is returned.  A newly created Collection
-// and any mutations on it won't be persisted until you do a Flush().
-func (s *Store) SetCollection(name string, compare KeyCompare) *Collection {
-	if compare == nil {
-		compare = bytes.Compare
-	}
-	for {
-		orig := atomic.LoadPointer(&s.coll)
-		coll := copyColl(*(*map[string]*Collection)(orig))
-		cnew := s.MakePrivateCollection(compare)
-		cnew.name = name
-		cold := coll[name]
-		if cold != nil {
-			cnew.rootLock = cold.rootLock
-			cnew.root = cold.rootAddRef()
-		}
-		coll[name] = cnew
-		if atomic.CompareAndSwapPointer(&s.coll, orig, unsafe.Pointer(&coll)) {
-			cold.closeCollection()
-			return cnew
-		}
-		cnew.closeCollection()
-	}
-}
-
-// Returns a new, unregistered (non-named) collection.  This allows
-// advanced users to manage collections of private collections.
-func (s *Store) MakePrivateCollection(compare KeyCompare) *Collection {
-	if compare == nil {
-		compare = bytes.Compare
-	}
-	return &Collection{
-		store:    s,
-		compare:  compare,
-		rootLock: &sync.Mutex{},
-		root:     &rootNodeLoc{refs: 1, root: empty_nodeLoc},
-	}
-}
-
-// Retrieves a named Collection.
-func (s *Store) GetCollection(name string) *Collection {
-	coll := *(*map[string]*Collection)(atomic.LoadPointer(&s.coll))
-	return coll[name]
-}
-
-func (s *Store) GetCollectionNames() []string {
-	return collNames(*(*map[string]*Collection)(atomic.LoadPointer(&s.coll)))
-}
-
-func collNames(coll map[string]*Collection) []string {
-	res := make([]string, 0, len(coll))
-	for name, _ := range coll {
-		res = append(res, name)
-	}
-	sort.Strings(res) // Sorting because common callers need stability.
-	return res
-}
-
-// The Collection removal won't be reflected into persistence until
-// you do a Flush().  Invoking RemoveCollection(x) and then
-// SetCollection(x) is a fast way to empty a Collection.
-func (s *Store) RemoveCollection(name string) {
-	for {
-		orig := atomic.LoadPointer(&s.coll)
-		coll := copyColl(*(*map[string]*Collection)(orig))
-		cold := coll[name]
-		delete(coll, name)
-		if atomic.CompareAndSwapPointer(&s.coll, orig, unsafe.Pointer(&coll)) {
-			cold.closeCollection()
-			return
-		}
-	}
-}
-
-func copyColl(orig map[string]*Collection) map[string]*Collection {
-	res := make(map[string]*Collection)
-	for name, c := range orig {
-		res[name] = c
-	}
-	return res
-}
-
-// Writes (appends) any dirty, unpersisted data to file.  As a
-// greater-window-of-data-loss versus higher-performance tradeoff,
-// consider having many mutations (Set()'s & Delete()'s) and then
-// have a less occasional Flush() instead of Flush()'ing after every
-// mutation.  Users may also wish to file.Sync() after a Flush() for
-// extra data-loss protection.
-func (s *Store) Flush() error {
-	if s.readOnly {
-		return errors.New("readonly, so cannot Flush()")
-	}
-	if s.file == nil {
-		return errors.New("no file / in-memory only, so cannot Flush()")
-	}
-	coll := *(*map[string]*Collection)(atomic.LoadPointer(&s.coll))
-	rnls := map[string]*rootNodeLoc{}
-	cnames := collNames(coll)
-	for _, name := range cnames {
-		c := coll[name]
-		rnls[name] = c.rootAddRef()
-	}
-	defer func() {
-		for _, name := range cnames {
-			coll[name].rootDecRef(rnls[name])
-		}
-	}()
-	for _, name := range cnames {
-		if err := coll[name].write(rnls[name].root); err != nil {
-			return err
-		}
-	}
-	return s.writeRoots(rnls)
-}
-
-// Reverts the last Flush(), bringing the Store back to its state at
-// its next-to-last Flush() or to an empty Store (with no Collections)
-// if there were no next-to-last Flush().  This call will truncate the
-// Store file.
-func (s *Store) FlushRevert() error {
-	if s.file == nil {
-		return errors.New("no file / in-memory only, so cannot FlushRevert()")
-	}
-	orig := atomic.LoadPointer(&s.coll)
-	coll := make(map[string]*Collection)
-	if atomic.CompareAndSwapPointer(&s.coll, orig, unsafe.Pointer(&coll)) {
-		for _, cold := range *(*map[string]*Collection)(orig) {
-			cold.closeCollection()
-		}
-	}
-	if atomic.LoadInt64(&s.size) > rootsLen {
-		atomic.AddInt64(&s.size, -1)
-	}
-	err := s.readRootsScan(true)
-	if err != nil {
-		return err
-	}
-	if s.readOnly {
-		return nil
-	}
-	return s.file.Truncate(atomic.LoadInt64(&s.size))
-}
-
-// Returns a read-only snapshot, including any mutations on the
-// original Store that have not been Flush()'ed to disk yet.  The
-// snapshot has its mutations and Flush() operations disabled because
-// the original store "owns" writes to the StoreFile.
-func (s *Store) Snapshot() (snapshot *Store) {
-	coll := copyColl(*(*map[string]*Collection)(atomic.LoadPointer(&s.coll)))
-	res := &Store{
-		coll:      unsafe.Pointer(&coll),
-		file:      s.file,
-		size:      atomic.LoadInt64(&s.size),
-		readOnly:  true,
-		callbacks: s.callbacks,
-	}
-	for _, name := range collNames(coll) {
-		collOrig := coll[name]
-		coll[name] = &Collection{
-			store:    res,
-			compare:  collOrig.compare,
-			rootLock: collOrig.rootLock,
-			root:     collOrig.rootAddRef(),
-		}
-	}
-	return res
-}
-
-func (s *Store) Close() {
-	s.file = nil
-	cptr := atomic.LoadPointer(&s.coll)
-	if cptr == nil ||
-		!atomic.CompareAndSwapPointer(&s.coll, cptr, unsafe.Pointer(nil)) {
-		return
-	}
-	coll := *(*map[string]*Collection)(cptr)
-	for _, name := range collNames(coll) {
-		coll[name].closeCollection()
-	}
-}
-
-// Copy all active collections and their items to a different file.
-// If flushEvery > 0, then during the item copying, Flush() will be
-// invoked at every flushEvery'th item and at the end of the item
-// copying.  The copy will not include any old items or nodes so the
-// copy should be more compact if flushEvery is relatively large.
-func (s *Store) CopyTo(dstFile StoreFile, flushEvery int) (res *Store, err error) {
-	dstStore, err := NewStore(dstFile)
-	if err != nil {
-		return nil, err
-	}
-	coll := *(*map[string]*Collection)(atomic.LoadPointer(&s.coll))
-	for _, name := range collNames(coll) {
-		srcColl := coll[name]
-		dstColl := dstStore.SetCollection(name, srcColl.compare)
-		minItem, err := srcColl.MinItem(true)
-		if err != nil {
-			return nil, err
-		}
-		if minItem == nil {
-			continue
-		}
-		defer s.ItemDecRef(srcColl, minItem)
-		numItems := 0
-		var errCopyItem error = nil
-		err = srcColl.VisitItemsAscend(minItem.Key, true, func(i *Item) bool {
-			if errCopyItem = dstColl.SetItem(i); errCopyItem != nil {
-				return false
-			}
-			numItems++
-			if flushEvery > 0 && numItems%flushEvery == 0 {
-				if errCopyItem = dstStore.Flush(); errCopyItem != nil {
-					return false
-				}
-			}
-			return true
-		})
-		if err != nil {
-			return nil, err
-		}
-		if errCopyItem != nil {
-			return nil, errCopyItem
-		}
-	}
-	if flushEvery > 0 {
-		if err = dstStore.Flush(); err != nil {
-			return nil, err
-		}
-	}
-	return dstStore, nil
-}
-
-// Updates the provided map with statistics.
-func (s *Store) Stats(out map[string]uint64) {
-	out["fileSize"] = uint64(atomic.LoadInt64(&s.size))
-	out["nodeAllocs"] = atomic.LoadUint64(&s.nodeAllocs)
-}
-
-func (o *Store) writeRoots(rnls map[string]*rootNodeLoc) error {
-	sJSON, err := json.Marshal(rnls)
-	if err != nil {
-		return err
-	}
-	offset := atomic.LoadInt64(&o.size)
-	length := 2*len(MAGIC_BEG) + 4 + 4 + len(sJSON) + 8 + 4 + 2*len(MAGIC_END)
-	b := bytes.NewBuffer(make([]byte, length)[:0])
-	b.Write(MAGIC_BEG)
-	b.Write(MAGIC_BEG)
-	binary.Write(b, binary.BigEndian, uint32(VERSION))
-	binary.Write(b, binary.BigEndian, uint32(length))
-	b.Write(sJSON)
-	binary.Write(b, binary.BigEndian, int64(offset))
-	binary.Write(b, binary.BigEndian, uint32(length))
-	b.Write(MAGIC_END)
-	b.Write(MAGIC_END)
-	if _, err := o.file.WriteAt(b.Bytes()[:length], offset); err != nil {
-		return err
-	}
-	atomic.StoreInt64(&o.size, offset+int64(length))
-	return nil
-}
-
-func (o *Store) readRoots() error {
-	finfo, err := o.file.Stat()
-	if err != nil {
-		return err
-	}
-	atomic.StoreInt64(&o.size, finfo.Size())
-	if o.size <= 0 {
-		return nil
-	}
-	return o.readRootsScan(false)
-}
-
-func (o *Store) readRootsScan(defaultToEmpty bool) (err error) {
-	rootsEnd := make([]byte, rootsEndLen)
-	for {
-		for { // Scan backwards for MAGIC_END.
-			if atomic.LoadInt64(&o.size) <= rootsLen {
-				if defaultToEmpty {
-					atomic.StoreInt64(&o.size, 0)
-					return nil
-				}
-				return errors.New("couldn't find roots; file corrupted or wrong?")
-			}
-			if _, err := o.file.ReadAt(rootsEnd,
-				atomic.LoadInt64(&o.size)-int64(len(rootsEnd))); err != nil {
-				return err
-			}
-			if bytes.Equal(MAGIC_END, rootsEnd[8+4:8+4+len(MAGIC_END)]) &&
-				bytes.Equal(MAGIC_END, rootsEnd[8+4+len(MAGIC_END):]) {
-				break
-			}
-			atomic.AddInt64(&o.size, -1) // TODO: optimizations to scan backwards faster.
-		}
-		// Read and check the roots.
-		var offset int64
-		var length uint32
-		endBuf := bytes.NewBuffer(rootsEnd)
-		err = binary.Read(endBuf, binary.BigEndian, &offset)
-		if err != nil {
-			return err
-		}
-		if err = binary.Read(endBuf, binary.BigEndian, &length); err != nil {
-			return err
-		}
-		if offset >= 0 && offset < atomic.LoadInt64(&o.size)-int64(rootsLen) &&
-			length == uint32(atomic.LoadInt64(&o.size)-offset) {
-			data := make([]byte, atomic.LoadInt64(&o.size)-offset-int64(len(rootsEnd)))
-			if _, err := o.file.ReadAt(data, offset); err != nil {
-				return err
-			}
-			if bytes.Equal(MAGIC_BEG, data[:len(MAGIC_BEG)]) &&
-				bytes.Equal(MAGIC_BEG, data[len(MAGIC_BEG):2*len(MAGIC_BEG)]) {
-				var version, length0 uint32
-				b := bytes.NewBuffer(data[2*len(MAGIC_BEG):])
-				if err = binary.Read(b, binary.BigEndian, &version); err != nil {
-					return err
-				}
-				if err = binary.Read(b, binary.BigEndian, &length0); err != nil {
-					return err
-				}
-				if version != VERSION {
-					return fmt.Errorf("version mismatch: "+
-						"current version: %v != found version: %v", VERSION, version)
-				}
-				if length0 != length {
-					return fmt.Errorf("length mismatch: "+
-						"wanted length: %v != found length: %v", length0, length)
-				}
-				m := make(map[string]*Collection)
-				if err = json.Unmarshal(data[2*len(MAGIC_BEG)+4+4:], &m); err != nil {
-					return err
-				}
-				for collName, t := range m {
-					t.name = collName
-					t.store = o
-					if o.callbacks.KeyCompareForCollection != nil {
-						t.compare = o.callbacks.KeyCompareForCollection(collName)
-					}
-					if t.compare == nil {
-						t.compare = bytes.Compare
-					}
-				}
-				atomic.StorePointer(&o.coll, unsafe.Pointer(&m))
-				return nil
-			} // else, perhaps value was unlucky in having MAGIC_END's.
-		} // else, perhaps a gkvlite file was stored as a value.
-		atomic.AddInt64(&o.size, -1) // Roots were wrong, so keep scanning.
-	}
-}
-
-func (o *Store) ItemAlloc(c *Collection, keyLength uint16) *Item {
-	if o.callbacks.ItemAlloc != nil {
-		return o.callbacks.ItemAlloc(c, keyLength)
-	}
-	return &Item{Key: make([]byte, keyLength)}
-}
-
-func (o *Store) ItemAddRef(c *Collection, i *Item) {
-	if o.callbacks.ItemAddRef != nil {
-		o.callbacks.ItemAddRef(c, i)
-	}
-}
-
-func (o *Store) ItemDecRef(c *Collection, i *Item) {
-	if o.callbacks.ItemDecRef != nil {
-		o.callbacks.ItemDecRef(c, i)
-	}
-}
-
-func (o *Store) ItemValRead(c *Collection, i *Item,
-	r io.ReaderAt, offset int64, valLength uint32) error {
-	if o.callbacks.ItemValRead != nil {
-		return o.callbacks.ItemValRead(c, i, r, offset, valLength)
-	}
-	i.Val = make([]byte, valLength)
-	_, err := r.ReadAt(i.Val, offset)
-	return err
-}
-
-func (o *Store) ItemValWrite(c *Collection, i *Item, w io.WriterAt, offset int64) error {
-	if o.callbacks.ItemValWrite != nil {
-		return o.callbacks.ItemValWrite(c, i, w, offset)
-	}
-	_, err := w.WriteAt(i.Val, offset)
-	return err
-}
diff --git a/go/src/github.com/steveyen/gkvlite/store_test.go b/go/src/github.com/steveyen/gkvlite/store_test.go
deleted file mode 100644
index 78d4acc..0000000
--- a/go/src/github.com/steveyen/gkvlite/store_test.go
+++ /dev/null
@@ -1,2806 +0,0 @@
-package gkvlite
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"math/rand"
-	"os"
-	"runtime"
-	"strconv"
-	"sync/atomic"
-	"testing"
-	"unsafe"
-)
-
-type mockfile struct {
-	f           *os.File
-	readat      func(p []byte, off int64) (n int, err error)
-	writeat     func(p []byte, off int64) (n int, err error)
-	stat        func() (fi os.FileInfo, err error)
-	numReadAt   int
-	numWriteAt  int
-	numStat     int
-	numTruncate int
-}
-
-func (m *mockfile) ReadAt(p []byte, off int64) (n int, err error) {
-	m.numReadAt++
-	if m.readat != nil {
-		return m.readat(p, off)
-	}
-	return m.f.ReadAt(p, off)
-}
-
-func (m *mockfile) WriteAt(p []byte, off int64) (n int, err error) {
-	m.numWriteAt++
-	if m.writeat != nil {
-		return m.writeat(p, off)
-	}
-	return m.f.WriteAt(p, off)
-}
-
-func (m *mockfile) Stat() (fi os.FileInfo, err error) {
-	m.numStat++
-	if m.stat != nil {
-		return m.stat()
-	}
-	return m.f.Stat()
-}
-
-func (m *mockfile) Truncate(size int64) error {
-	m.numTruncate++
-	return m.f.Truncate(size)
-}
-
-func TestStoreMem(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	s.SetCollection("x", bytes.Compare)
-	x := s.GetCollection("x")
-	if x == nil {
-		t.Errorf("expected SetColl/GetColl to work")
-	}
-	if x.Name() != "x" {
-		t.Errorf("expected name to be same")
-	}
-	x2 := s.GetCollection("x")
-	if x2 != x {
-		t.Errorf("expected 2nd GetColl to work")
-	}
-	if x2.Name() != "x" {
-		t.Errorf("expected name to be same")
-	}
-	numItems, numBytes, err := x2.GetTotals()
-	if err != nil || numItems != 0 || numBytes != 0 {
-		t.Errorf("expected empty memory coll to be empty")
-	}
-
-	tests := []struct {
-		op  string
-		val string
-		pri int
-		exp string
-	}{
-		{"get", "not-there", 0, "NIL"},
-		{"ups", "a", 100, ""},
-		{"get", "a", 0, "a"},
-		{"ups", "b", 200, ""},
-		{"get", "a", 0, "a"},
-		{"get", "b", 0, "b"},
-		{"ups", "c", 300, ""},
-		{"get", "a", 0, "a"},
-		{"get", "b", 0, "b"},
-		{"get", "c", 0, "c"},
-		{"get", "not-there", 0, "NIL"},
-		{"ups", "a", 400, ""},
-		{"get", "a", 0, "a"},
-		{"get", "b", 0, "b"},
-		{"get", "c", 0, "c"},
-		{"get", "not-there", 0, "NIL"},
-		{"del", "a", 0, ""},
-		{"get", "a", 0, "NIL"},
-		{"get", "b", 0, "b"},
-		{"get", "c", 0, "c"},
-		{"get", "not-there", 0, "NIL"},
-		{"ups", "a", 10, ""},
-		{"get", "a", 0, "a"},
-		{"get", "b", 0, "b"},
-		{"get", "c", 0, "c"},
-		{"get", "not-there", 0, "NIL"},
-		{"del", "a", 0, ""},
-		{"del", "b", 0, ""},
-		{"del", "c", 0, ""},
-		{"get", "a", 0, "NIL"},
-		{"get", "b", 0, "NIL"},
-		{"get", "c", 0, "NIL"},
-		{"get", "not-there", 0, "NIL"},
-		{"del", "a", 0, ""},
-		{"del", "b", 0, ""},
-		{"del", "c", 0, ""},
-		{"get", "a", 0, "NIL"},
-		{"get", "b", 0, "NIL"},
-		{"get", "c", 0, "NIL"},
-		{"get", "not-there", 0, "NIL"},
-		{"ups", "a", 10, ""},
-		{"get", "a", 0, "a"},
-		{"get", "b", 0, "NIL"},
-		{"get", "c", 0, "NIL"},
-		{"get", "not-there", 0, "NIL"},
-	}
-
-	for testIdx, test := range tests {
-		switch test.op {
-		case "get":
-			i, err := x.GetItem([]byte(test.val), true)
-			if err != nil {
-				t.Errorf("test: %v, expected get nil error, got: %v",
-					testIdx, err)
-			}
-			if i == nil && test.exp == "NIL" {
-				continue
-			}
-			if string(i.Key) != test.exp {
-				t.Errorf("test: %v, on Get, expected key: %v, got: %v",
-					testIdx, test.exp, i.Key)
-			}
-			if string(i.Val) != test.exp {
-				t.Errorf("test: %v, on Get, expected val: %v, got: %v",
-					testIdx, test.exp, i.Key)
-			}
-		case "ups":
-			err := x.SetItem(&Item{
-				Key:      []byte(test.val),
-				Val:      []byte(test.val),
-				Priority: int32(test.pri),
-			})
-			if err != nil {
-				t.Errorf("test: %v, expected ups nil error, got: %v",
-					testIdx, err)
-			}
-		case "del":
-			_, err := x.Delete([]byte(test.val))
-			if err != nil {
-				t.Errorf("test: %v, expected del nil error, got: %v",
-					testIdx, err)
-			}
-		}
-	}
-
-	xx := s.SetCollection("xx", nil)
-
-	for testIdx, test := range tests {
-		switch test.op {
-		case "get":
-			i, err := xx.Get([]byte(test.val))
-			if err != nil {
-				t.Errorf("test: %v, expected get nil error, got: %v",
-					testIdx, err)
-			}
-			if i == nil && test.exp == "NIL" {
-				continue
-			}
-			if string(i) != test.exp {
-				t.Errorf("test: %v, on Get, expected val: %v, got: %v",
-					testIdx, test.exp, test.val)
-			}
-		case "ups":
-			err := xx.Set([]byte(test.val), []byte(test.val))
-			if err != nil {
-				t.Errorf("test: %v, expected ups nil error, got: %v",
-					testIdx, err)
-			}
-		case "del":
-			_, err := xx.Delete([]byte(test.val))
-			if err != nil {
-				t.Errorf("test: %v, expected del nil error, got: %v",
-					testIdx, err)
-			}
-		}
-	}
-
-	if xx.SetItem(&Item{}) == nil {
-		t.Error("expected error on empty item")
-	}
-	if xx.SetItem(&Item{Key: []byte("hello")}) == nil {
-		t.Error("expected error on nil item Val")
-	}
-	if xx.SetItem(&Item{Val: []byte("hello")}) == nil {
-		t.Error("expected error on nil item Key")
-	}
-	if xx.SetItem(&Item{Key: []byte{}, Val: []byte{}}) == nil {
-		t.Error("expected error on zero-length item Key")
-	}
-	if xx.SetItem(&Item{Key: make([]byte, 0xffff+1), Val: []byte{}}) == nil {
-		t.Error("expected error on too long item Key")
-	}
-	if xx.SetItem(&Item{Key: make([]byte, 0xffff-1), Val: []byte{}}) != nil {
-		t.Error("expected success on just under key length")
-	}
-	if xx.SetItem(&Item{Key: make([]byte, 0xffff), Val: []byte{}}) != nil {
-		t.Error("expected success on just perfect key length")
-	}
-	err = xx.SetItem(&Item{
-		Key:      make([]byte, 0xffff),
-		Val:      []byte{},
-		Priority: -1})
-	if err == nil {
-		t.Error("expected error on -1 Priority")
-	}
-}
-
-func loadCollection(x *Collection, arr []string) {
-	for i, s := range arr {
-		x.SetItem(&Item{
-			Key:      []byte(s),
-			Val:      []byte(s),
-			Priority: int32(i),
-		})
-	}
-}
-
-func visitExpectCollection(t *testing.T, x *Collection, start string,
-	arr []string, cb func(i *Item)) {
-	n := 0
-	err := x.VisitItemsAscend([]byte(start), true, func(i *Item) bool {
-		if cb != nil {
-			cb(i)
-		}
-		if string(i.Key) != arr[n] {
-			t.Fatalf("expected visit item: %v, saw key: %s, item: %#v",
-				arr[n], string(i.Key), i)
-		}
-		n++
-		return true
-	})
-	if err != nil {
-		t.Errorf("expected no visit error, got: %v", err)
-	}
-	if n != len(arr) {
-		t.Errorf("expected # visit callbacks: %v, saw: %v", len(arr), n)
-	}
-}
-
-func visitDescendExpectCollection(t *testing.T, x *Collection, tgt string,
-	arr []string, cb func(i *Item)) {
-	n := 0
-	err := x.VisitItemsDescend([]byte(tgt), true, func(i *Item) bool {
-		if cb != nil {
-			cb(i)
-		}
-		if string(i.Key) != arr[n] {
-			t.Errorf("expected visit item: %v, saw: %v", arr[n], i)
-		}
-		n++
-		return true
-	})
-	if err != nil {
-		t.Errorf("expected no visit error, got: %v", err)
-	}
-	if n != len(arr) {
-		t.Errorf("expected # visit callbacks: %v, saw: %v", len(arr), n)
-	}
-}
-
-func TestVisitStoreMem(t *testing.T) {
-	s, _ := NewStore(nil)
-	if s.Flush() == nil {
-		t.Errorf("expected in-memory store Flush() error")
-	}
-	if len(s.GetCollectionNames()) != 0 {
-		t.Errorf("expected no coll names on empty store")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	if s.Flush() == nil {
-		t.Errorf("expected in-memory store Flush() error")
-	}
-	if len(s.GetCollectionNames()) != 1 || s.GetCollectionNames()[0] != "x" {
-		t.Errorf("expected 1 coll name x")
-	}
-	if s.GetCollection("x") != x {
-		t.Errorf("expected coll x to be the same")
-	}
-	if s.GetCollection("y") != nil {
-		t.Errorf("expected coll y to be nil")
-	}
-
-	visitExpectCollection(t, x, "a", []string{}, nil)
-	min, err := x.MinItem(true)
-	if err != nil || min != nil {
-		t.Errorf("expected no min, got: %v, err: %v", min, err)
-	}
-	max, err := x.MaxItem(true)
-	if err != nil || max != nil {
-		t.Errorf("expected no max, got: %v, err: %v", max, err)
-	}
-
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	if s.Flush() == nil {
-		t.Errorf("expected in-memory store Flush() error")
-	}
-
-	visitX := func() {
-		visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-		visitExpectCollection(t, x, "a1", []string{"b", "c", "d", "e"}, nil)
-		visitExpectCollection(t, x, "b", []string{"b", "c", "d", "e"}, nil)
-		visitExpectCollection(t, x, "b1", []string{"c", "d", "e"}, nil)
-		visitExpectCollection(t, x, "c", []string{"c", "d", "e"}, nil)
-		visitExpectCollection(t, x, "c1", []string{"d", "e"}, nil)
-		visitExpectCollection(t, x, "d", []string{"d", "e"}, nil)
-		visitExpectCollection(t, x, "d1", []string{"e"}, nil)
-		visitExpectCollection(t, x, "e", []string{"e"}, nil)
-		visitExpectCollection(t, x, "f", []string{}, nil)
-	}
-	visitX()
-
-	min0, err := x.MinItem(false)
-	if err != nil || string(min0.Key) != "a" {
-		t.Errorf("expected min of a")
-	}
-	min1, err := x.MinItem(true)
-	if err != nil || string(min1.Key) != "a" || string(min1.Val) != "a" {
-		t.Errorf("expected min of a")
-	}
-	max0, err := x.MaxItem(false)
-	if err != nil || string(max0.Key) != "e" {
-		t.Errorf("expected max0 of e, got: %#v, err: %#v", max0, err)
-	}
-	max1, err := x.MaxItem(true)
-	if err != nil || string(max1.Key) != "e" || string(max1.Val) != "e" {
-		t.Errorf("expected max1 of e, got: %#v, err: %#v", max1, err)
-	}
-
-	if s.Flush() == nil {
-		t.Errorf("expected in-memory store Flush() error")
-	}
-
-	numItems, numBytes, err := x.GetTotals()
-	if err != nil || numItems != 5 || numBytes != 10 {
-		t.Errorf("mimatched memory coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-}
-
-func TestStoreFile(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	if err != nil || f == nil {
-		t.Errorf("could not create file: %v", fname)
-	}
-
-	s, err := NewStore(f)
-	if err != nil || s == nil {
-		t.Errorf("expected NewStore(f) to work, err: %v", err)
-	}
-	if len(s.GetCollectionNames()) != 0 {
-		t.Errorf("expected no coll names on empty store")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	if x == nil {
-		t.Errorf("expected SetCollection() to work")
-	}
-
-	if err := s.Flush(); err != nil {
-		t.Errorf("expected empty Flush() to have no error, err: %v", err)
-	}
-	finfo, err := f.Stat()
-	if err != nil {
-		t.Errorf("expected stat to work")
-	}
-	sizeAfter1stFlush := finfo.Size()
-	if sizeAfter1stFlush == 0 {
-		t.Errorf("expected non-empty file after 1st Flush(), got: %v",
-			sizeAfter1stFlush)
-	}
-
-	numItems, numBytes, err := x.GetTotals()
-	if err != nil || numItems != 0 || numBytes != 0 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	if len(s.GetCollectionNames()) != 1 || s.GetCollectionNames()[0] != "x" {
-		t.Errorf("expected 1 coll name x")
-	}
-	if s.GetCollection("x") != x {
-		t.Errorf("expected coll x to be the same")
-	}
-	if s.GetCollection("y") != nil {
-		t.Errorf("expected coll y to be nil")
-	}
-
-	loadCollection(x, []string{"a"})
-	if err := s.Flush(); err != nil {
-		t.Errorf("expected single key Flush() to have no error, err: %v", err)
-	}
-	f.Sync()
-
-	i, err := x.GetItem([]byte("a"), true)
-	if err != nil {
-		t.Errorf("expected s.GetItem(a) to not error, err: %v", err)
-	}
-	if i == nil {
-		t.Errorf("expected s.GetItem(a) to return non-nil item")
-	}
-	if string(i.Key) != "a" {
-		t.Errorf("expected s.GetItem(a) to return key a, got: %v", i.Key)
-	}
-	if string(i.Val) != "a" {
-		t.Errorf("expected s.GetItem(a) to return val a, got: %v", i.Val)
-	}
-
-	numItems, numBytes, err = x.GetTotals()
-	if err != nil || numItems != 1 || numBytes != 2 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	// ------------------------------------------------
-
-	f2, err := os.Open(fname) // Test reading the file.
-	if err != nil || f2 == nil {
-		t.Errorf("could not reopen file: %v", fname)
-	}
-	s2, err := NewStore(f2)
-	if err != nil || s2 == nil {
-		t.Errorf("expected NewStore(f) to work, err: %v", err)
-	}
-	if len(s2.GetCollectionNames()) != 1 || s2.GetCollectionNames()[0] != "x" {
-		t.Errorf("expected 1 coll name x")
-	}
-	x2 := s2.GetCollection("x")
-	if x2 == nil {
-		t.Errorf("expected x2 to be there")
-	}
-	i, err = x2.GetItem([]byte("a"), true)
-	if err != nil {
-		t.Errorf("expected s2.GetItem(a) to not error, err: %v", err)
-	}
-	if i == nil {
-		t.Errorf("expected s2.GetItem(a) to return non-nil item")
-	}
-	if string(i.Key) != "a" {
-		t.Errorf("expected s2.GetItem(a) to return key a, got: %v", i.Key)
-	}
-	if string(i.Val) != "a" {
-		t.Errorf("expected s2.GetItem(a) to return val a, got: %+v", i)
-	}
-	i2, err := x2.GetItem([]byte("not-there"), true)
-	if i2 != nil || err != nil {
-		t.Errorf("expected miss to miss nicely.")
-	}
-	numItems, numBytes, err = x2.GetTotals()
-	if err != nil || numItems != 1 || numBytes != 2 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	// ------------------------------------------------
-
-	loadCollection(x, []string{"c", "b"})
-
-	// x2 has its own snapshot, so should not see the new items.
-	i, err = x2.GetItem([]byte("b"), true)
-	if i != nil || err != nil {
-		t.Errorf("expected b miss to miss nicely.")
-	}
-	i, err = x2.GetItem([]byte("c"), true)
-	if i != nil || err != nil {
-		t.Errorf("expected c miss to miss nicely.")
-	}
-
-	// Even after Flush().
-	if err := s.Flush(); err != nil {
-		t.Errorf("expected Flush() to have no error, err: %v", err)
-	}
-	f.Sync()
-
-	i, err = x2.GetItem([]byte("b"), true)
-	if i != nil || err != nil {
-		t.Errorf("expected b miss to still miss nicely.")
-	}
-	i, err = x2.GetItem([]byte("c"), true)
-	if i != nil || err != nil {
-		t.Errorf("expected c miss to still miss nicely.")
-	}
-
-	i, err = x.GetItem([]byte("a"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected a to be in x.")
-	}
-	i, err = x.GetItem([]byte("b"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected b to be in x.")
-	}
-	i, err = x.GetItem([]byte("c"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected c to be in x.")
-	}
-
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c"}, nil)
-	visitExpectCollection(t, x2, "a", []string{"a"}, nil)
-
-	numItems, numBytes, err = x.GetTotals()
-	if err != nil || numItems != 3 || numBytes != 6 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = x2.GetTotals()
-	if err != nil || numItems != 1 || numBytes != 2 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	// ------------------------------------------------
-
-	f3, err := os.Open(fname) // Another file reader.
-	if err != nil || f3 == nil {
-		t.Errorf("could not reopen file: %v", fname)
-	}
-	s3, err := NewStore(f3)
-	if err != nil || s3 == nil {
-		t.Errorf("expected NewStore(f) to work, err: %v", err)
-	}
-	if len(s3.GetCollectionNames()) != 1 || s3.GetCollectionNames()[0] != "x" {
-		t.Errorf("expected 1 coll name x")
-	}
-	x3 := s3.GetCollection("x")
-	if x3 == nil {
-		t.Errorf("expected x2 to be there")
-	}
-
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c"}, nil)
-	visitExpectCollection(t, x2, "a", []string{"a"}, nil)
-	visitExpectCollection(t, x3, "a", []string{"a", "b", "c"}, nil)
-
-	i, err = x3.GetItem([]byte("a"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected a to be in x3.")
-	}
-	i, err = x3.GetItem([]byte("b"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected b to be in x3.")
-	}
-	i, err = x3.GetItem([]byte("c"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected c to be in x3.")
-	}
-
-	// ------------------------------------------------
-
-	// Exercising deletion.
-	if _, err = x.Delete([]byte("b")); err != nil {
-		t.Errorf("expected Delete to have no error, err: %v", err)
-	}
-	if err := s.Flush(); err != nil {
-		t.Errorf("expected Flush() to have no error, err: %v", err)
-	}
-	f.Sync()
-
-	f4, err := os.Open(fname) // Another file reader.
-	s4, err := NewStore(f4)
-	x4 := s4.GetCollection("x")
-
-	visitExpectCollection(t, x, "a", []string{"a", "c"}, nil)
-	visitExpectCollection(t, x2, "a", []string{"a"}, nil)
-	visitExpectCollection(t, x3, "a", []string{"a", "b", "c"}, nil)
-	visitExpectCollection(t, x4, "a", []string{"a", "c"}, nil)
-
-	i, err = x4.GetItem([]byte("a"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected a to be in x4.")
-	}
-	i, err = x4.GetItem([]byte("b"), true)
-	if i != nil || err != nil {
-		t.Errorf("expected b to not be in x4.")
-	}
-	i, err = x4.GetItem([]byte("c"), true)
-	if i == nil || err != nil {
-		t.Errorf("expected c to be in x4.")
-	}
-
-	numItems, numBytes, err = x.GetTotals()
-	if err != nil || numItems != 2 || numBytes != 4 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = x2.GetTotals()
-	if err != nil || numItems != 1 || numBytes != 2 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = x3.GetTotals()
-	if err != nil || numItems != 3 || numBytes != 6 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = x4.GetTotals()
-	if err != nil || numItems != 2 || numBytes != 4 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	// ------------------------------------------------
-
-	// Exercising deletion more.
-	if _, err = x.Delete([]byte("c")); err != nil {
-		t.Errorf("expected Delete to have no error, err: %v", err)
-	}
-	loadCollection(x, []string{"d", "c", "b", "b", "c"})
-	if _, err = x.Delete([]byte("b")); err != nil {
-		t.Errorf("expected Delete to have no error, err: %v", err)
-	}
-	if _, err = x.Delete([]byte("c")); err != nil {
-		t.Errorf("expected Delete to have no error, err: %v", err)
-	}
-	if err := s.Flush(); err != nil {
-		t.Errorf("expected Flush() to have no error, err: %v", err)
-	}
-	f.Sync()
-
-	f5, err := os.Open(fname) // Another file reader.
-	s5, err := NewStore(f5)
-	x5 := s5.GetCollection("x")
-
-	visitExpectCollection(t, x, "a", []string{"a", "d"}, nil)
-	visitExpectCollection(t, x2, "a", []string{"a"}, nil)
-	visitExpectCollection(t, x3, "a", []string{"a", "b", "c"}, nil)
-	visitExpectCollection(t, x4, "a", []string{"a", "c"}, nil)
-	visitExpectCollection(t, x5, "a", []string{"a", "d"}, nil)
-
-	// ------------------------------------------------
-
-	// Exercise MinItem and MaxItem.
-	mmTests := []struct {
-		coll *Collection
-		min  string
-		max  string
-	}{
-		{x, "a", "d"},
-		{x2, "a", "a"},
-		{x3, "a", "c"},
-		{x4, "a", "c"},
-		{x5, "a", "d"},
-	}
-
-	for mmTestIdx, mmTest := range mmTests {
-		i, err = mmTest.coll.MinItem(true)
-		if err != nil {
-			t.Errorf("mmTestIdx: %v, expected no Min error, but got err: %v",
-				mmTestIdx, err)
-		}
-		if i == nil {
-			t.Errorf("mmTestIdx: %v, expected Min item, but got nil",
-				mmTestIdx)
-		}
-		if string(i.Key) != mmTest.min {
-			t.Errorf("mmTestIdx: %v, expected Min item key: %v, but got: %v",
-				mmTestIdx, mmTest.min, string(i.Key))
-		}
-		if string(i.Val) != mmTest.min {
-			t.Errorf("mmTestIdx: %v, expected Min item val: %v, but got: %v",
-				mmTestIdx, mmTest.min, string(i.Val))
-		}
-
-		i, err = mmTest.coll.MaxItem(true)
-		if err != nil {
-			t.Errorf("mmTestIdx: %v, expected no MaxItem error, but got err: %v",
-				mmTestIdx, err)
-		}
-		if i == nil {
-			t.Errorf("mmTestIdx: %v, expected MaxItem item, but got nil",
-				mmTestIdx)
-		}
-		if string(i.Key) != mmTest.max {
-			t.Errorf("mmTestIdx: %v, expected MaxItem item key: %v, but got: %v",
-				mmTestIdx, mmTest.max, string(i.Key))
-		}
-		if string(i.Val) != mmTest.max {
-			t.Errorf("mmTestIdx: %v, expected MaxItem item key: %v, but got: %v",
-				mmTestIdx, mmTest.max, string(i.Val))
-		}
-	}
-
-	// ------------------------------------------------
-
-	// Try some withValue false.
-	f5a, err := os.Open(fname) // Another file reader.
-	s5a, err := NewStore(f5a)
-	x5a := s5a.GetCollection("x")
-
-	i, err = x5a.GetItem([]byte("a"), false)
-	if i == nil {
-		t.Error("was expecting a item")
-	}
-	if string(i.Key) != "a" {
-		t.Error("was expecting a item has Key a")
-	}
-	if i.Val != nil {
-		t.Error("was expecting a item with nil Val when withValue false")
-	}
-
-	i, err = x5a.GetItem([]byte("a"), true)
-	if i == nil {
-		t.Error("was expecting a item")
-	}
-	if string(i.Key) != "a" {
-		t.Error("was expecting a item has Key a")
-	}
-	if string(i.Val) != "a" {
-		t.Error("was expecting a item has Val a")
-	}
-
-	// ------------------------------------------------
-
-	// Exercising delete everything.
-	for _, k := range []string{"a", "b", "c", "d"} {
-		if _, err = x.Delete([]byte(k)); err != nil {
-			t.Errorf("expected Delete to have no error, err: %v", err)
-		}
-	}
-	if err := s.Flush(); err != nil {
-		t.Errorf("expected Flush() to have no error, err: %v", err)
-	}
-	f.Sync()
-
-	f6, err := os.Open(fname) // Another file reader.
-	s6, err := NewStore(f6)
-	x6 := s6.GetCollection("x")
-
-	visitExpectCollection(t, x, "a", []string{}, nil)
-	visitExpectCollection(t, x2, "a", []string{"a"}, nil)
-	visitExpectCollection(t, x3, "a", []string{"a", "b", "c"}, nil)
-	visitExpectCollection(t, x4, "a", []string{"a", "c"}, nil)
-	visitExpectCollection(t, x5, "a", []string{"a", "d"}, nil)
-	visitExpectCollection(t, x6, "a", []string{}, nil)
-
-	// ------------------------------------------------
-
-	ssnap := s.Snapshot()
-	if ssnap == nil {
-		t.Errorf("expected snapshot to work")
-	}
-	xsnap := ssnap.GetCollection("x")
-	if xsnap == nil {
-		t.Errorf("expected snapshot to have x")
-	}
-	err = xsnap.SetItem(&Item{
-		Key:      []byte("should-be-read-only"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	if err == nil {
-		t.Errorf("expected snapshot SetItem() error")
-	}
-	wd, err := xsnap.Delete([]byte("should-be-read-only"))
-	if err == nil || wd != false {
-		t.Errorf("expected snapshot Delete() error")
-	}
-	visitExpectCollection(t, xsnap, "a", []string{}, nil)
-	if ssnap.Flush() == nil {
-		t.Errorf("expected snapshot Flush() error")
-	}
-
-	s3snap := s3.Snapshot()
-	if s3snap == nil {
-		t.Errorf("expected snapshot to work")
-	}
-	x3snap := s3snap.GetCollection("x")
-	if x3snap == nil {
-		t.Errorf("expected snapshot to have x")
-	}
-	visitExpectCollection(t, x3snap, "a", []string{"a", "b", "c"}, nil)
-	if s3snap.Flush() == nil {
-		t.Errorf("expected snapshot Flush() error")
-	}
-
-	s3snap1 := s3.Snapshot()
-	if s3snap1 == nil {
-		t.Errorf("expected snapshot to work")
-	}
-	x3snap1 := s3snap1.GetCollection("x")
-	if x3snap1 == nil {
-		t.Errorf("expected snapshot to have x")
-	}
-	visitExpectCollection(t, x3snap1, "a", []string{"a", "b", "c"}, nil)
-	if s3snap1.Flush() == nil {
-		t.Errorf("expected snapshot Flush() error")
-	}
-
-	// ------------------------------------------------
-
-	// Exercise CopyTo.
-	ccTests := []struct {
-		src    *Store
-		fevery int
-		expect []string
-	}{
-		{s6, 1, []string{}},
-		{s5, 0, []string{"a", "d"}},
-		{s5, 2, []string{"a", "d"}},
-		{s5, 4, []string{"a", "d"}},
-		{s3snap, 2, []string{"a", "b", "c"}},
-		{s3snap1, 1, []string{"a", "b", "c"}},
-		{s3snap1, 0, []string{"a", "b", "c"}},
-		{s3, 1000, []string{"a", "b", "c"}},
-	}
-
-	for ccTestIdx, ccTest := range ccTests {
-		ccName := fmt.Sprintf("tmpCompactCopy-%v.test", ccTestIdx)
-		os.Remove(ccName)
-		ccFile, err := os.Create(ccName)
-		if err != nil || f == nil {
-			t.Errorf("%v: could not create file: %v", ccTestIdx, fname)
-		}
-		cc, err := ccTest.src.CopyTo(ccFile, ccTest.fevery)
-		if err != nil {
-			t.Errorf("%v: expected successful CopyTo, got: %v", ccTestIdx, err)
-		}
-		cx := cc.GetCollection("x")
-		if cx == nil {
-			t.Errorf("%v: expected successful CopyTo of x coll", ccTestIdx)
-		}
-		visitExpectCollection(t, cx, "a", ccTest.expect, nil)
-		if ccTest.fevery > 100 {
-			// Expect file to be more compact when there's less flushing.
-			finfoSrc, _ := ccTest.src.file.Stat()
-			finfoCpy, _ := cc.file.Stat()
-			if finfoSrc.Size() < finfoCpy.Size() {
-				t.Error("%v: expected copy to be smaller / compacted"+
-					"src size: %v, cpy size: %v", ccTestIdx,
-					finfoSrc.Size(), finfoCpy.Size())
-			}
-		} else {
-			err = cc.Flush()
-			if err != nil {
-				t.Errorf("%v: expect Flush() to work on snapshot, got: %v",
-					ccTestIdx, err)
-			}
-		}
-		ccFile.Close()
-
-		// Reopen the snapshot and see that it's right.
-		ccFile, err = os.Open(ccName)
-		cc, err = NewStore(ccFile)
-		if err != nil {
-			t.Errorf("%v: expected successful NewStore against snapshot file, got: %v",
-				ccTestIdx, err)
-		}
-		cx = cc.GetCollection("x")
-		if cx == nil {
-			t.Errorf("%v: expected successful CopyTo of x coll", ccTestIdx)
-		}
-		visitExpectCollection(t, cx, "a", ccTest.expect, nil)
-		if ccTest.fevery > 100 {
-			// Expect file to be more compact when there's less flushing.
-			finfoSrc, _ := ccTest.src.file.Stat()
-			finfoCpy, _ := cc.file.Stat()
-			if finfoSrc.Size() < finfoCpy.Size() {
-				t.Errorf("%v: expected copy to be smaller / compacted"+
-					"src size: %v, cpy size: %v", ccTestIdx,
-					finfoSrc.Size(), finfoCpy.Size())
-			}
-		}
-		ccFile.Close()
-		os.Remove(ccName)
-	}
-}
-
-func TestStoreMultipleCollections(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	if err != nil || f == nil {
-		t.Errorf("could not create file: %v", fname)
-	}
-
-	s, err := NewStore(f)
-	if err != nil {
-		t.Errorf("expected NewStore to work")
-	}
-	if len(s.GetCollectionNames()) != 0 {
-		t.Errorf("expected no coll names on empty store")
-	}
-	x := s.SetCollection("x", nil)
-	y := s.SetCollection("y", nil)
-	z := s.SetCollection("z", nil)
-
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	loadCollection(y, []string{"1", "2", "3", "4", "5"})
-	loadCollection(z, []string{"D", "C", "B", "A"})
-
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	visitExpectCollection(t, y, "1", []string{"1", "2", "3", "4", "5"}, nil)
-	visitExpectCollection(t, z, "A", []string{"A", "B", "C", "D"}, nil)
-
-	numItems, numBytes, err := x.GetTotals()
-	if err != nil || numItems != 5 || numBytes != 10 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = y.GetTotals()
-	if err != nil || numItems != 5 || numBytes != 10 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = z.GetTotals()
-	if err != nil || numItems != 4 || numBytes != 8 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	err = s.Flush()
-	if err != nil {
-		t.Errorf("expected flush to work")
-	}
-	f.Close()
-
-	f1, err := os.OpenFile(fname, os.O_RDWR, 0666)
-	s1, err := NewStore(f1)
-	if err != nil {
-		t.Errorf("expected NewStore to work")
-	}
-	if len(s1.GetCollectionNames()) != 3 {
-		t.Errorf("expected 3 colls")
-	}
-
-	x1 := s1.GetCollection("x")
-	y1 := s1.GetCollection("y")
-	z1 := s1.GetCollection("z")
-
-	visitExpectCollection(t, x1, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	visitExpectCollection(t, y1, "1", []string{"1", "2", "3", "4", "5"}, nil)
-	visitExpectCollection(t, z1, "A", []string{"A", "B", "C", "D"}, nil)
-
-	numItems, numBytes, err = x1.GetTotals()
-	if err != nil || numItems != 5 || numBytes != 10 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = y1.GetTotals()
-	if err != nil || numItems != 5 || numBytes != 10 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = z1.GetTotals()
-	if err != nil || numItems != 4 || numBytes != 8 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-
-	s1.RemoveCollection("x")
-
-	// Reset the y collection.
-	s1.RemoveCollection("y")
-	s1.SetCollection("y", nil)
-
-	err = s1.Flush()
-	if err != nil {
-		t.Errorf("expected flush to work")
-	}
-	f1.Sync()
-	f1.Close()
-
-	f2, err := os.Open(fname)
-	s2, err := NewStore(f2)
-	if err != nil {
-		t.Errorf("expected NewStore to work")
-	}
-	if len(s2.GetCollectionNames()) != 2 {
-		t.Errorf("expected 2 colls, got %v", s2.GetCollectionNames())
-	}
-
-	x2 := s2.GetCollection("x")
-	y2 := s2.GetCollection("y")
-	z2 := s2.GetCollection("z")
-
-	if x2 != nil {
-		t.Errorf("expected x coll to be gone")
-	}
-
-	visitExpectCollection(t, y2, "1", []string{}, nil)
-	visitExpectCollection(t, z2, "A", []string{"A", "B", "C", "D"}, nil)
-
-	if err = z2.Set([]byte("hi"), []byte("world")); err != nil {
-		t.Errorf("expected set to work, got %v", err)
-	}
-
-	if err = s2.Flush(); err == nil {
-		t.Errorf("expected Flush() to fail on a readonly file")
-	}
-
-	numItems, numBytes, err = y2.GetTotals()
-	if err != nil || numItems != 0 || numBytes != 0 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-	numItems, numBytes, err = z2.GetTotals()
-	if err != nil || numItems != 5 || numBytes != 8+2+5 {
-		t.Errorf("mimatched coll totals, got: %v, %v, %v",
-			numItems, numBytes, err)
-	}
-}
-
-func TestStoreConcurrentVisits(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	s.Flush()
-	f.Close()
-
-	f1, _ := os.OpenFile(fname, os.O_RDWR, 0666)
-	s1, _ := NewStore(f1)
-	x1 := s1.GetCollection("x")
-
-	for i := 0; i < 100; i++ {
-		go func() {
-			visitExpectCollection(t, x1, "a", []string{"a", "b", "c", "d", "e"},
-				func(i *Item) {
-					runtime.Gosched() // Yield to test concurrency.
-				})
-		}()
-	}
-}
-
-func TestStoreConcurrentDeleteDuringVisits(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	s.Flush()
-	f.Close()
-
-	f1, _ := os.OpenFile(fname, os.O_RDWR, 0666)
-	s1, _ := NewStore(f1)
-	x1 := s1.GetCollection("x")
-
-	exp := []string{"a", "b", "c", "d", "e"}
-	toDelete := int32(len(exp))
-
-	// Concurrent mutations like a delete should not affect a visit()
-	// that's already inflight.
-	visitExpectCollection(t, x1, "a", exp, func(i *Item) {
-		d := atomic.AddInt32(&toDelete, -1)
-		toDeleteKey := exp[d]
-		if _, err := x1.Delete([]byte(toDeleteKey)); err != nil {
-			t.Errorf("expected concurrent delete to work on key: %v, got: %v",
-				toDeleteKey, err)
-		}
-	})
-}
-
-func TestStoreConcurrentInsertDuringVisits(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	s.Flush()
-	f.Close()
-
-	f1, _ := os.OpenFile(fname, os.O_RDWR, 0666)
-	s1, _ := NewStore(f1)
-	x1 := s1.GetCollection("x")
-
-	exp := []string{"a", "b", "c", "d", "e"}
-	add := []string{"A", "1", "E", "2", "C"}
-	toAdd := int32(0)
-
-	// Concurrent mutations like inserts should not affect a visit()
-	// that's already inflight.
-	visitExpectCollection(t, x1, "a", exp, func(i *Item) {
-		go func() {
-			a := atomic.AddInt32(&toAdd, 1)
-			toAddKey := []byte(add[a-1])
-			if err := x1.Set(toAddKey, toAddKey); err != nil {
-				t.Errorf("expected concurrent set to work on key: %v, got: %v",
-					toAddKey, err)
-			}
-		}()
-		runtime.Gosched() // Yield to test concurrency.
-	})
-}
-
-func TestWasDeleted(t *testing.T) {
-	s, _ := NewStore(nil)
-	x := s.SetCollection("x", bytes.Compare)
-	wasDeleted, err := x.Delete([]byte("notThere"))
-	if err != nil {
-		t.Errorf("expected no deletion error")
-	}
-	if wasDeleted {
-		t.Errorf("expected wasDeleted false")
-	}
-
-	loadCollection(x, []string{"a", "b"})
-	wasDeleted, err = x.Delete([]byte("notThere"))
-	if err != nil {
-		t.Errorf("expected no deletion error")
-	}
-	if wasDeleted {
-		t.Errorf("expected wasDeleted false")
-	}
-	wasDeleted, err = x.Delete([]byte("a"))
-	if err != nil {
-		t.Errorf("expected no deletion error")
-	}
-	if !wasDeleted {
-		t.Errorf("expected wasDeleted true")
-	}
-	wasDeleted, err = x.Delete([]byte("notThere"))
-	if err != nil {
-		t.Errorf("expected no deletion error")
-	}
-	if wasDeleted {
-		t.Errorf("expected wasDeleted false")
-	}
-}
-
-func BenchmarkSets(b *testing.B) {
-	s, _ := NewStore(nil)
-	x := s.SetCollection("x", nil)
-	keys := make([][]byte, 20)
-	for i := 0; i < len(keys); i++ {
-		keys[i] = []byte(strconv.Itoa(i % len(keys)))
-	}
-	v := []byte("")
-	b.ResetTimer() // Ignore time from above.
-	for i := 0; i < b.N; i++ {
-		x.Set(keys[i%len(keys)], v)
-	}
-}
-
-func BenchmarkGets(b *testing.B) {
-	s, _ := NewStore(nil)
-	x := s.SetCollection("x", nil)
-	v := []byte("")
-	for i := 0; i < b.N; i++ {
-		x.Set([]byte(strconv.Itoa(i)), v)
-	}
-	b.ResetTimer() // Ignore time from above.
-	for i := 0; i < b.N; i++ {
-		x.Get([]byte(strconv.Itoa(i)))
-	}
-}
-
-func TestSizeof(t *testing.T) {
-	t.Logf("sizeof various structs and types, in bytes...")
-	t.Logf("  node: %v", unsafe.Sizeof(node{}))
-	t.Logf("  nodeLoc: %v", unsafe.Sizeof(nodeLoc{}))
-	t.Logf("  Item: %v", unsafe.Sizeof(Item{}))
-	t.Logf("  itemLoc: %v", unsafe.Sizeof(itemLoc{}))
-	t.Logf("  ploc: %v", unsafe.Sizeof(ploc{}))
-	t.Logf("  []byte: %v", unsafe.Sizeof([]byte{}))
-	t.Logf("  uint32: %v", unsafe.Sizeof(uint32(0)))
-	t.Logf("  uint64: %v", unsafe.Sizeof(uint64(0)))
-}
-
-func TestPrivateCollection(t *testing.T) {
-	s, _ := NewStore(nil)
-	x := s.MakePrivateCollection(nil)
-	if x == nil {
-		t.Errorf("expected private collection")
-	}
-	if len(s.GetCollectionNames()) != 0 {
-		t.Errorf("expected private collection to be unlisted")
-	}
-}
-
-func TestBadStoreFile(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	ioutil.WriteFile(fname, []byte("not a real store file"), 0600)
-	defer os.Remove(fname)
-	f, err := os.Open(fname)
-	if err != nil || f == nil {
-		t.Errorf("could not reopen file: %v", fname)
-	}
-	s, err := NewStore(f)
-	if err == nil {
-		t.Errorf("expected NewStore(f) to fail")
-	}
-	if s != nil {
-		t.Errorf("expected NewStore(f) to fail with s")
-	}
-}
-
-func TestEvictSomeItems(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	numEvicted := uint64(0)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	for i := 0; i < 1000; i++ {
-		visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-		s.Flush()
-		numEvicted += x.EvictSomeItems()
-	}
-	for i := 0; i < 1000; i++ {
-		visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-		loadCollection(x, []string{"e", "d", "a"})
-		s.Flush()
-		numEvicted += x.EvictSomeItems()
-	}
-	if numEvicted == 0 {
-		t.Errorf("expected some evictions")
-	}
-}
-
-func TestJoinWithFileErrors(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	defer os.Remove(fname)
-
-	errAfter := 0x1000000
-	numReads := 0
-
-	m := &mockfile{
-		f: f,
-		readat: func(p []byte, off int64) (n int, err error) {
-			numReads++
-			if numReads >= errAfter {
-				return 0, errors.New("mockfile error")
-			}
-			return f.ReadAt(p, off)
-		},
-	}
-
-	sOrig, _ := NewStore(m)
-	xOrig := sOrig.SetCollection("x", nil)
-	if m.numStat != 1 {
-		t.Errorf("expected 1 numStat, got: %#v", m)
-	}
-	if m.numTruncate != 0 {
-		t.Errorf("expected 0 truncates, got: %#v", m)
-	}
-
-	loadCollection(xOrig, []string{"a", "b", "c", "d", "e", "f"})
-	sOrig.Flush()
-	if m.numStat != 1 {
-		t.Errorf("expected 1 numStat, got: %#v", m)
-	}
-	if m.numTruncate != 0 {
-		t.Errorf("expected 0 truncates, got: %#v", m)
-	}
-
-	fname2 := "tmp2.test"
-	os.Remove(fname2)
-	f2, _ := os.Create(fname2)
-	defer os.Remove(fname2)
-
-	sMore, _ := NewStore(f2)
-	xMore := sMore.SetCollection("x", nil)
-	loadCollection(xMore, []string{"5", "4", "3", "2", "1", "0"})
-	sMore.Flush()
-
-	var res *nodeLoc
-	var err error
-
-	// ----------------------------------------
-
-	errAfter = 0x10000000 // Attempt with no errors.
-	numReads = 0
-
-	s2, _ := NewStore(m)
-	x2 := s2.GetCollection("x")
-
-	rnl := x2.root
-	if rnl == nil {
-		t.Errorf("expected rnl")
-	}
-	root := rnl.root
-	if root == nil || root.isEmpty() {
-		t.Errorf("expected an x2 root")
-	}
-	if root.Loc().isEmpty() {
-		t.Errorf("expected an x2 root to be on disk")
-	}
-	if root.Node() != nil {
-		t.Errorf("expected an x2 root to not be loaded into memory yet")
-	}
-
-	errAfter = 0x10000000 // Attempt with no errors.
-	numReads = 0
-
-	res, err = s2.join(x2, root, empty_nodeLoc, nil)
-	if err != nil {
-		t.Errorf("expected no error")
-	}
-	if res.isEmpty() {
-		t.Errorf("expected non-empty res")
-	}
-	if numReads == 0 {
-		t.Errorf("expected some reads, got: %v", numReads)
-	}
-
-	// ----------------------------------------
-
-	for i := 1; i < 7; i++ {
-		errAfter = 0x10000000 // Attempt with no errors.
-		numReads = 0
-
-		s2, err = NewStore(m)
-		if err != nil {
-			t.Errorf("expected NewStore m to work on loop: %v", i)
-		}
-		s3, err := NewStore(f2)
-		if err != nil {
-			t.Errorf("expected NewStore f2 to work on loop: %v", i)
-		}
-
-		x2 = s2.GetCollection("x")
-		x3 := s3.GetCollection("x")
-
-		rnl2 := x2.root
-		root2 := rnl2.root
-		if root2 == nil || root2.isEmpty() {
-			t.Errorf("expected an x2 root")
-		}
-		if root2.Loc().isEmpty() {
-			t.Errorf("expected an x2 root to be on disk")
-		}
-		if root2.Node() != nil {
-			t.Errorf("expected an x2 root to not be loaded into memory yet")
-		}
-
-		rnl3 := x3.root
-		root3 := rnl3.root
-		if root3 == nil || root3.isEmpty() {
-			t.Errorf("expected an x3 root")
-		}
-		if root3.Loc().isEmpty() {
-			t.Errorf("expected an x3 root to be on disk")
-		}
-		if root3.Node() != nil {
-			t.Errorf("expected an x3 root to not be loaded into memory yet")
-		}
-
-		errAfter = i
-		numReads = 0
-
-		res, err = s2.join(x2, root2, root3, nil)
-		if err == nil {
-			t.Errorf("expected error due to mockfile errorAfter %v, got nil", errAfter)
-		}
-		if !res.isEmpty() {
-			t.Errorf("expected empty res due to mockfile error")
-		}
-		if numReads == 0 {
-			t.Errorf("expected some reads, got: %v", numReads)
-		}
-	}
-}
-
-func TestStoreStats(t *testing.T) {
-	freeNodes = nil
-
-	m := map[string]uint64{}
-	n := map[string]uint64{}
-
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	s.Stats(m)
-	if m["fileSize"] != 0 {
-		t.Errorf("expected 0 fileSize, got: %v", m["fileSize"])
-	}
-	if m["nodeAllocs"] != 0 {
-		t.Errorf("expected 0 nodeAllocs, got: %v", m["nodeAllocs"])
-	}
-
-	x.Set([]byte("hello"), []byte("world"))
-	s.Stats(n)
-	if n["fileSize"] != m["fileSize"] {
-		t.Errorf("expected 0 fileSize, got: %#v, %#v", n, m)
-	}
-	if n["nodeAllocs"] != m["nodeAllocs"]+1 {
-		t.Errorf("expected 1 nodeAllocs, got: %#v, %#v", n, m)
-	}
-
-	x.Set([]byte("hello"), []byte("there"))
-	s.Stats(n)
-	if n["fileSize"] != m["fileSize"] {
-		t.Errorf("expected 0 fileSize, got: %#v, %#v", n, m)
-	}
-	if n["nodeAllocs"] != m["nodeAllocs"]+3 {
-		t.Errorf("expected 3 nodeAllocs, got: %#v, %#v", n, m)
-	}
-
-	x.Delete([]byte("hello"))
-	s.Stats(n)
-	if n["fileSize"] != m["fileSize"] {
-		t.Errorf("expected 0 fileSize, got: %#v, %#v", n, m)
-	}
-	if n["nodeAllocs"] != m["nodeAllocs"]+3 {
-		t.Errorf("expected 3 nodeAllocs, got: %#v, %#v", n, m)
-	}
-}
-
-func TestVisitItemsAscendEx(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	n := 40
-	for i := 0; i < n; i++ {
-		k := fmt.Sprintf("%v", i)
-		x.Set([]byte(k), []byte(k))
-	}
-	maxDepth := uint64(0)
-	t.Logf("dumping tree for manual balancedness check")
-	err = x.VisitItemsAscendEx(nil, true, func(i *Item, depth uint64) bool {
-		o := ""
-		for i := 0; uint64(i) < depth; i++ {
-			o = o + "-"
-		}
-		t.Logf("=%v%v", o, string(i.Key))
-		if maxDepth < depth {
-			maxDepth = depth
-		}
-		return true
-	})
-	if err != nil {
-		t.Errorf("expected visit ex to work, got: %v", err)
-	}
-	if maxDepth >= uint64(n*3/4) {
-		t.Errorf("expected maxDepth to not be so unlucky, got: %v, n: %v",
-			maxDepth, n)
-	}
-}
-
-func TestVisitItemsDescend(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-
-	visitDescendExpectCollection(t, x, "z", []string{"e", "d", "c", "b", "a"}, nil)
-	visitDescendExpectCollection(t, x, "e", []string{"d", "c", "b", "a"}, nil)
-	visitDescendExpectCollection(t, x, "b", []string{"a"}, nil)
-	visitDescendExpectCollection(t, x, "a", []string{}, nil)
-	visitDescendExpectCollection(t, x, "", []string{}, nil)
-}
-
-func TestKeyCompareForCollectionCallback(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	visitExpectCollection(t, x, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	s.Flush()
-	f.Close()
-
-	comparisons := 0
-	myKeyCompare := func(a, b []byte) int {
-		comparisons++
-		return bytes.Compare(a, b)
-	}
-
-	f1, _ := os.OpenFile(fname, os.O_RDWR, 0666)
-	s1, err := NewStoreEx(f1, StoreCallbacks{
-		KeyCompareForCollection: func(collName string) KeyCompare {
-			return myKeyCompare
-		},
-	})
-	if err != nil {
-		t.Errorf("expected NewStoreEx with non-nil KeyCompareForCollection to work")
-	}
-	x1 := s1.GetCollection("x")
-	visitExpectCollection(t, x1, "a", []string{"a", "b", "c", "d", "e"}, nil)
-	x1.Get([]byte("a"))
-	x1.Get([]byte("b"))
-
-	if comparisons == 0 {
-		t.Errorf("expected invocations of myKeyCompare")
-	}
-	if x1.Name() != "x" {
-		t.Errorf("expected same name after reload")
-	}
-}
-
-func TestMemoryDeleteEveryItem(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	testDeleteEveryItem(t, s, 10000, 100, func() {})
-}
-
-func TestPersistDeleteEveryItem(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	defer os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	c := 0
-	testDeleteEveryItem(t, s, 10000, 100, func() {
-		c++
-		err := s.Flush()
-		if err != nil {
-			t.Errorf("expected Flush to work, err: %v", err)
-		}
-	})
-	if c == 0 {
-		t.Errorf("expected cb to get invoked")
-	}
-	err := s.Flush()
-	if err != nil {
-		t.Errorf("expected last Flush to work, err: %v", err)
-	}
-	f.Close()
-
-	f2, err := os.Open(fname)
-	if err != nil {
-		t.Errorf("expected re-Open() to work, err: %v", err)
-	}
-	s2, err := NewStore(f2)
-	if err != nil {
-		t.Errorf("expected NewStore to work, err: %v", err)
-	}
-	x := s2.SetCollection("x", bytes.Compare)
-	if x == nil {
-		t.Errorf("expected x to be there")
-	}
-	m := 0
-	err = x.VisitItemsAscend(nil, true, func(i *Item) bool {
-		m++
-		return true
-	})
-	if err != nil {
-		t.Errorf("expected Visit to work, err: %v", err)
-	}
-	if m != 0 {
-		t.Errorf("expected 0 items, got: %v", m)
-	}
-	f2.Close()
-}
-
-func testDeleteEveryItem(t *testing.T, s *Store, n int, every int,
-	cb func()) {
-	x := s.SetCollection("x", bytes.Compare)
-	for i := 0; i < n; i++ {
-		err := x.Set([]byte(fmt.Sprintf("%d", i)), []byte{})
-		if err != nil {
-			t.Errorf("expected SetItem to work, %v", i)
-		}
-		if i%every == 0 {
-			cb()
-		}
-	}
-	m := 0
-	x.VisitItemsAscend(nil, true, func(i *Item) bool {
-		m++
-		return true
-	})
-	if m != n {
-		t.Errorf("expected %v items, got: %v", n, m)
-	}
-	for i := 0; i < n; i++ {
-		wasDeleted, err := x.Delete([]byte(fmt.Sprintf("%d", i)))
-		if err != nil {
-			t.Errorf("expected Delete to work, %v", i)
-		}
-		if !wasDeleted {
-			t.Errorf("expected Delete to actually delete")
-		}
-		if i%every == 0 {
-			cb()
-		}
-	}
-	m = 0
-	x.VisitItemsAscend(nil, true, func(i *Item) bool {
-		m++
-		return true
-	})
-	if m != 0 {
-		t.Errorf("expected 0 items, got: %v", m)
-	}
-}
-
-func TestItemCopy(t *testing.T) {
-	i := &Item{
-		Key:       []byte("hi"),
-		Val:       []byte("world"),
-		Priority:  1234,
-		Transient: unsafe.Pointer(&node{}),
-	}
-	j := i.Copy()
-	if !bytes.Equal(j.Key, i.Key) || !bytes.Equal(j.Val, i.Val) ||
-		j.Priority != i.Priority || j.Transient != i.Transient {
-		t.Errorf("expected item copy to work")
-	}
-}
-
-func TestCurFreeNodes(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	n := x.mkNode(empty_itemLoc, empty_nodeLoc, empty_nodeLoc, 0, 0)
-	f := allocStats
-	x.freeNode_unlocked(n, nil)
-	if f.FreeNodes+1 != allocStats.FreeNodes {
-		t.Errorf("expected freeNodes to increment")
-	}
-	if f.CurFreeNodes+1 != allocStats.CurFreeNodes {
-		t.Errorf("expected CurFreeNodes + 1 == allocStats.CurrFreeNodes, got: %v, %v",
-			f.CurFreeNodes+1, allocStats.CurFreeNodes)
-	}
-}
-
-func TestCollectionStats(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	g := x.AllocStats()
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	h := x.AllocStats()
-	if h.MkNodes <= g.MkNodes {
-		t.Errorf("expected MkNodes to be more")
-	}
-	if h.MkNodeLocs <= g.MkNodeLocs {
-		t.Errorf("expected MkNodeLocs to be more")
-	}
-	if h.MkRootNodeLocs <= g.MkRootNodeLocs {
-		t.Errorf("expected MkRootNodeLocs to be more")
-	}
-}
-
-func TestDump(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	loadCollection(x, []string{"e", "d", "a", "c", "b", "c", "a"})
-	fmt.Printf("testing dump\n")
-	dump(s, x.root.root, 1)
-}
-
-func TestStoreClose(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	if s.coll == unsafe.Pointer(nil) {
-		t.Errorf("expected coll before Close()")
-	}
-	s.Close()
-	if s.coll != unsafe.Pointer(nil) {
-		t.Errorf("expected no coll after Close()")
-	}
-	s.Close()
-	if s.coll != unsafe.Pointer(nil) {
-		t.Errorf("expected no coll after re-Close()")
-	}
-	s.Close()
-	if s.coll != unsafe.Pointer(nil) {
-		t.Errorf("expected no coll after re-Close()")
-	}
-
-	// Now, with a collection
-	s, _ = NewStore(nil)
-	s.SetCollection("x", bytes.Compare)
-	if s.coll == unsafe.Pointer(nil) {
-		t.Errorf("expected coll before Close()")
-	}
-	s.Close()
-	if s.coll != unsafe.Pointer(nil) {
-		t.Errorf("expected no coll after Close()")
-	}
-	s.Close()
-	if s.coll != unsafe.Pointer(nil) {
-		t.Errorf("expected no coll after Close()")
-	}
-}
-
-func TestItemNumValBytes(t *testing.T) {
-	var x *Collection
-	var h *Item
-	s, err := NewStoreEx(nil, StoreCallbacks{
-		ItemValLength: func(c *Collection, i *Item) int {
-			if c != x {
-				t.Errorf("expected colls to be the same")
-			}
-			if h != i {
-				t.Errorf("expected items to be the same")
-			}
-			return 1313
-		},
-	})
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x = s.SetCollection("x", bytes.Compare)
-	h = &Item{}
-	if 1313 != h.NumValBytes(x) {
-		t.Errorf("expected NumValBytes to be 1313")
-	}
-}
-
-func TestReclaimRootChain(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbb"),
-		Priority: 200,
-	})
-	s2 := s.Snapshot()
-	x2 := s2.GetCollection("x")
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbbb"),
-		Priority: 200,
-	})
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaaa"),
-		Priority: 100,
-	})
-	v, err := x.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaaa")) {
-		t.Errorf("expected aaaa, got: %v\n", v)
-	}
-	v, err = x2.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaa")) {
-		t.Errorf("expected aaa, got: %v\n", v)
-	}
-}
-
-func TestReclaimRootChainMultipleMutations(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbb"),
-		Priority: 200,
-	})
-	s2 := s.Snapshot()
-	x2 := s2.GetCollection("x")
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbbb"),
-		Priority: 200,
-	})
-	s3 := s.Snapshot()
-	x3 := s3.GetCollection("x")
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaaa"),
-		Priority: 100,
-	})
-	v, err := x.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaaa")) {
-		t.Errorf("expected aaaa, got: %v\n", v)
-	}
-	v, err = x2.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaa")) {
-		t.Errorf("expected aaa, got: %v\n", v)
-	}
-	v, err = x3.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaa")) {
-		t.Errorf("expected aaaa, got: %v\n", v)
-	}
-	s2.Close()
-	v, err = x.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaaa")) {
-		t.Errorf("expected aaaa, got: %v\n", v)
-	}
-	v, err = x3.Get([]byte("a"))
-	if v == nil || !bytes.Equal(v, []byte("aaa")) {
-		t.Errorf("expected aaa, got: %v\n", v)
-	}
-}
-
-func TestMemoryFlushRevert(t *testing.T) {
-	s, _ := NewStore(nil)
-	err := s.FlushRevert()
-	if err == nil {
-		t.Errorf("expected memory-only FlushRevert to fail")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	err = s.FlushRevert()
-	if err == nil {
-		t.Errorf("expected memory-only FlushRevert to fail")
-	}
-}
-
-func TestFlushRevertEmptyStore(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	s, _ := NewStore(f)
-	err := s.FlushRevert()
-	if err != nil {
-		t.Errorf("expected flush revert on empty store to work, err: %v", err)
-	}
-	s.SetCollection("x", nil)
-	err = s.FlushRevert()
-	if err != nil {
-		t.Errorf("expected flush revert on empty store to work, err: %v", err)
-	}
-	if s.GetCollection("x") != nil {
-		t.Errorf("expected flush revert to provide no collections")
-	}
-	s.Flush()
-	err = s.FlushRevert()
-	if err != nil {
-		t.Errorf("expected flush revert on empty store to work, err: %v", err)
-	}
-	if s.GetCollection("x") != nil {
-		t.Errorf("expected flush revert to provide no collections")
-	}
-	stat, err := f.Stat()
-	if err != nil {
-		t.Errorf("expected stat to work")
-	}
-	if stat.Size() != 0 {
-		t.Errorf("expected file size to be 0, got: %v", stat.Size())
-	}
-}
-
-func TestFlushRevert(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	s.Flush()
-	stat0, err := f.Stat()
-	if err != nil {
-		t.Errorf("expected stat to work")
-	}
-	if stat0.Size() <= rootsLen {
-		t.Errorf("expected file size to be >0, got: %v", stat0.Size())
-	}
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbb"),
-		Priority: 200,
-	})
-	s.Flush()
-	stat1, err := f.Stat()
-	if err != nil {
-		t.Errorf("expected stat to work")
-	}
-	if stat1.Size() <= stat0.Size()+rootsLen {
-		t.Errorf("expected file size to be larger, got: %v vs %v",
-			stat1.Size(), stat0.Size()+rootsLen)
-	}
-
-	ss := s.Snapshot()
-	err = ss.FlushRevert()
-	if err != nil {
-		t.Errorf("expected flush revert on empty store to work, err: %v", err)
-	}
-	x = ss.GetCollection("x")
-	if x == nil {
-		t.Errorf("expected flush revert to still have collection x")
-	}
-	sstat3, err := f.Stat()
-	if err != nil {
-		t.Errorf("expected stat to work")
-	}
-	if sstat3.Size() != stat1.Size() {
-		t.Errorf("expected snapshot post-flush-revert file size to same, got: %v vs %v",
-			sstat3.Size(), stat1.Size())
-	}
-	aval, err := x.Get([]byte("a"))
-	if err != nil || aval == nil || string(aval) != "aaa" {
-		t.Errorf("expected post-flush-revert a to be there, got: %v, %v", aval, err)
-	}
-	bval, err := x.Get([]byte("b"))
-	if err != nil || bval != nil {
-		t.Errorf("expected post-flush-revert b to be gone, got: %v, %v", bval, err)
-	}
-	visitExpectCollection(t, x, "a", []string{"a"}, nil)
-	for i := 0; i < 10; i++ {
-		err = ss.FlushRevert()
-		if err != nil {
-			t.Errorf("expected another flush revert to work, err: %v", err)
-		}
-		x = ss.GetCollection("x")
-		if x != nil {
-			t.Errorf("expected flush revert to still have no collection")
-		}
-		statx, err := f.Stat()
-		if err != nil {
-			t.Errorf("expected stat to work")
-		}
-		if statx.Size() != stat1.Size() {
-			t.Errorf("expected snapshot flush-revert to have same file size, got: %v",
-				statx.Size())
-		}
-	}
-
-	err = s.FlushRevert()
-	if err != nil {
-		t.Errorf("expected flush revert on empty store to work, err: %v", err)
-	}
-	x = s.GetCollection("x")
-	if x == nil {
-		t.Errorf("expected flush revert to still have collection x")
-	}
-	stat3, err := f.Stat()
-	if err != nil {
-		t.Errorf("expected stat to work")
-	}
-	if stat3.Size() != stat0.Size() {
-		t.Errorf("expected post-flush-revert file size to be reverted, got: %v vs %v",
-			stat3.Size(), stat0.Size())
-	}
-	aval, err = x.Get([]byte("a"))
-	if err != nil || aval == nil || string(aval) != "aaa" {
-		t.Errorf("expected post-flush-revert a to be there, got: %v, %v", aval, err)
-	}
-	bval, err = x.Get([]byte("b"))
-	if err != nil || bval != nil {
-		t.Errorf("expected post-flush-revert b to be gone, got: %v, %v", bval, err)
-	}
-	visitExpectCollection(t, x, "a", []string{"a"}, nil)
-	for i := 0; i < 10; i++ {
-		err = s.FlushRevert()
-		if err != nil {
-			t.Errorf("expected another flush revert to work, err: %v", err)
-		}
-		x = s.GetCollection("x")
-		if x != nil {
-			t.Errorf("expected flush revert to still have no collection")
-		}
-		statx, err := f.Stat()
-		if err != nil {
-			t.Errorf("expected stat to work")
-		}
-		if statx.Size() != 0 {
-			t.Errorf("expected too many flush-revert to bring file size 0, got: %v",
-				statx.Size())
-		}
-	}
-}
-
-func TestFlushRevertWithReadError(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	defer os.Remove(fname)
-
-	readShouldErr := false
-	m := &mockfile{
-		f: f,
-		readat: func(p []byte, off int64) (n int, err error) {
-			if readShouldErr {
-				return 0, errors.New("mockfile error")
-			}
-			return f.ReadAt(p, off)
-		},
-	}
-
-	s, _ := NewStore(m)
-	x := s.SetCollection("x", nil)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	s.Flush()
-
-	readShouldErr = true
-	err = s.FlushRevert()
-	if err == nil {
-		t.Errorf("expected FlushRevert to error")
-	}
-}
-
-func TestCollectionMisc(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-
-	e0, e1, e2, err := s.split(x, empty_nodeLoc, nil, nil)
-	if err != nil ||
-		e0 != empty_nodeLoc ||
-		e1 != empty_nodeLoc ||
-		e2 != empty_nodeLoc {
-		t.Errorf("expected split of empty node loc to be empty")
-	}
-
-	b, err := x.MarshalJSON()
-	if err != nil {
-		t.Errorf("expected MarshalJSON to work")
-	}
-	c, err := x.rootAddRef().MarshalJSON()
-	if err != nil {
-		t.Errorf("expected MarshalJSON to work")
-	}
-	if !bytes.Equal(b, c) {
-		t.Errorf("expected MarshalJSON to be same")
-	}
-	if x.Write() != nil {
-		t.Errorf("expected Write to be nil")
-	}
-	if x.UnmarshalJSON([]byte{0}) == nil {
-		t.Errorf("expected UnmarshalJSON to fail")
-	}
-	if x.UnmarshalJSON([]byte("{}")) == nil {
-		t.Errorf("expected UnmarshalJSON to fail")
-	}
-	i := &Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	}
-	x.SetItem(i)
-	i.Key = nil // Introduce bad condition.
-	_, err = x.Get([]byte("a"))
-	if err == nil {
-		t.Errorf("expected Get() to err")
-	}
-	i.Key = []byte("a")
-}
-
-func TestDoubleFreeNode(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	n := x.mkNode(nil, nil, nil, 1, 0)
-	c := 0
-	defer func() {
-		if r := recover(); r == nil {
-			t.Errorf("expected panic with double free")
-		}
-		if c != 2 {
-			t.Errorf("expected c to be 2")
-		}
-	}()
-	withAllocLocks(func() {
-		x.freeNode_unlocked(nil, nil)
-		c++
-		x.freeNode_unlocked(n, nil)
-		c++
-		x.freeNode_unlocked(n, nil)
-		c++
-	})
-}
-
-func TestDoubleFreeNodeLoc(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	n := x.mkNode(nil, nil, nil, 1, 0)
-	nl := x.mkNodeLoc(n)
-	c := 0
-	defer func() {
-		if r := recover(); r == nil {
-			t.Errorf("expected panic with double free")
-		}
-		if c != 2 {
-			t.Errorf("expected c to be 2")
-		}
-	}()
-	x.freeNodeLoc(nil)
-	c++
-	x.freeNodeLoc(nl)
-	c++
-	x.freeNodeLoc(nl)
-	c++
-}
-
-func TestDoubleFreeRootNodeLoc(t *testing.T) {
-	s, err := NewStore(nil)
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStore to work")
-	}
-	x := s.SetCollection("x", bytes.Compare)
-	n := x.mkNode(nil, nil, nil, 1, 0)
-	nl := x.mkNodeLoc(n)
-	rnl := x.mkRootNodeLoc(nl)
-	c := 0
-	defer func() {
-		if r := recover(); r == nil {
-			t.Errorf("expected panic with double free")
-		}
-		if c != 2 {
-			t.Errorf("expected c to be 2")
-		}
-	}()
-	x.freeRootNodeLoc(nil)
-	c++
-	x.freeRootNodeLoc(rnl)
-	c++
-	x.freeRootNodeLoc(rnl)
-	c++
-}
-
-func TestNodeLocRead(t *testing.T) {
-	var nl *nodeLoc
-	n, err := nl.read(nil)
-	if n != nil || err != nil {
-		t.Errorf("expected nodeLoc.read() err")
-	}
-}
-
-func TestNumInfo(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	defer os.Remove(fname)
-
-	readShouldErr := false
-	m := &mockfile{
-		f: f,
-		readat: func(p []byte, off int64) (n int, err error) {
-			if readShouldErr {
-				return 0, errors.New("mockfile error")
-			}
-			return f.ReadAt(p, off)
-		},
-	}
-
-	s, _ := NewStore(m)
-	x := s.SetCollection("x", nil)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	s.Flush()
-
-	readShouldErr = true
-
-	rnl := x.rootAddRef()
-	rnl.root.node = unsafe.Pointer(nil) // Evict node from memory to force reading.
-
-	_, _, _, _, err = numInfo(s, rnl.root, empty_nodeLoc)
-	if err == nil {
-		t.Errorf("expected numInfo to error")
-	}
-	_, _, _, _, err = numInfo(s, empty_nodeLoc, rnl.root)
-	if err == nil {
-		t.Errorf("expected numInfo to error")
-	}
-}
-
-func TestWriteEmptyItemsErr(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	defer os.Remove(fname)
-
-	writeShouldErr := false
-	m := &mockfile{
-		f: f,
-		writeat: func(p []byte, off int64) (n int, err error) {
-			if writeShouldErr {
-				return 0, errors.New("mockfile error")
-			}
-			return f.WriteAt(p, off)
-		},
-	}
-
-	s, _ := NewStore(m)
-
-	writeShouldErr = true
-	if s.Flush() == nil {
-		t.Errorf("expected Flush() to error")
-	}
-}
-
-func TestWriteItemsErr(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	defer os.Remove(fname)
-
-	writeShouldErr := false
-	m := &mockfile{
-		f: f,
-		writeat: func(p []byte, off int64) (n int, err error) {
-			if writeShouldErr {
-				return 0, errors.New("mockfile error")
-			}
-			return f.WriteAt(p, off)
-		},
-	}
-
-	s, _ := NewStore(m)
-	x := s.SetCollection("x", nil)
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbb"),
-		Priority: 100,
-	})
-	x.SetItem(&Item{
-		Key:      []byte("c"),
-		Val:      []byte("ccc"),
-		Priority: 10,
-	})
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 10,
-	})
-
-	writeShouldErr = true
-	if s.Flush() == nil {
-		t.Errorf("expected Flush() to error")
-	}
-}
-
-func TestStatErr(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	defer os.Remove(fname)
-
-	s, _ := NewStore(f)
-	x := s.SetCollection("x", nil)
-	x.SetItem(&Item{
-		Key:      []byte("a"),
-		Val:      []byte("aaa"),
-		Priority: 100,
-	})
-	s.Flush()
-
-	f2, err := os.Open(fname) // Test reading the file.
-
-	m := &mockfile{
-		f: f2,
-		stat: func() (fi os.FileInfo, err error) {
-			return nil, errors.New("mockfile error")
-		},
-	}
-	s2, err := NewStore(m)
-	if err == nil {
-		t.Errorf("expected store open to fail due to stat err")
-	}
-	if s2 != nil {
-		t.Errorf("expected store open to fail due to stat err")
-	}
-}
-
-func TestNodeLocWriteErr(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	defer os.Remove(fname)
-
-	writeShouldErr := false
-	m := &mockfile{
-		f: f,
-		writeat: func(p []byte, off int64) (n int, err error) {
-			if writeShouldErr {
-				return 0, errors.New("mockfile error")
-			}
-			return f.WriteAt(p, off)
-		},
-	}
-
-	s, _ := NewStore(m)
-	x := s.SetCollection("x", nil)
-	x.SetItem(&Item{
-		Key:      []byte("b"),
-		Val:      []byte("bbb"),
-		Priority: 100,
-	})
-	rnl := x.rootAddRef()
-
-	writeShouldErr = true
-	if rnl.root.write(s) == nil {
-		t.Errorf("expected write node to fail")
-	}
-	writeShouldErr = false
-
-	rnl.root.node = unsafe.Pointer(nil) // Force a nil node.
-	if rnl.root.write(s) != nil {
-		t.Errorf("expected write node on nil node to work")
-	}
-}
-
-func TestStoreRefCount(t *testing.T) {
-	counts := map[string]int{}
-	nadds := 0
-	ndecs := 0
-	s, err := NewStoreEx(nil, StoreCallbacks{
-		ItemAddRef: func(c *Collection, i *Item) {
-			counts[string(i.Key)]++
-			if counts[string(i.Key)] <= 0 {
-				t.Errorf("in ItemAddRef, count for k: %s was <= 0", string(i.Key))
-			}
-			nadds++
-		},
-		ItemDecRef: func(c *Collection, i *Item) {
-			counts[string(i.Key)]--
-			if counts[string(i.Key)] < 0 {
-				t.Errorf("in ItemDecRef, count for k: %s was <= 0", string(i.Key))
-			}
-			if counts[string(i.Key)] == 0 {
-				delete(counts, string(i.Key))
-			}
-			ndecs++
-		},
-	})
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStoreEx to work")
-	}
-	mustJustOneRef := func(msg string) {
-		for k, count := range counts {
-			if count != 1 {
-				t.Errorf("expect count 1 for k: %s, got: %v, msg: %s", k, count, msg)
-			}
-		}
-	}
-	mustn := func(ea, ed int, msg string) {
-		if ea != nadds {
-			t.Errorf("expected nadds: %v, got: %v, msg: %s", ea, nadds, msg)
-		}
-		if ed != ndecs {
-			t.Errorf("expected ndecs: %v, got: %v, msg: %s", ed, ndecs, msg)
-		}
-	}
-	mustn(0, 0, "")
-	s.SetCollection("x", bytes.Compare)
-	mustn(0, 0, "")
-	x := s.GetCollection("x")
-	if x == nil {
-		t.Errorf("expected SetColl/GetColl to work")
-	}
-	mustn(0, 0, "")
-	if x.Name() != "x" {
-		t.Errorf("expected name to be same")
-	}
-	x2 := s.GetCollection("x")
-	if x2 != x {
-		t.Errorf("expected 2nd GetColl to work")
-	}
-	mustn(0, 0, "")
-	if x2.Name() != "x" {
-		t.Errorf("expected name to be same")
-	}
-	mustn(0, 0, "")
-	numItems, numBytes, err := x2.GetTotals()
-	if err != nil || numItems != 0 || numBytes != 0 {
-		t.Errorf("expected empty memory coll to be empty")
-	}
-	mustn(0, 0, "")
-
-	tests := []struct {
-		op  string
-		val string
-		pri int
-		exp string
-	}{
-		{"ups", "a", 100, ""},
-		{"ups", "b", 200, ""},
-		{"ups", "a", 300, ""},
-		{"ups", "c", 200, ""},
-		{"ups", "a", 100, ""},
-		{"ups", "b", 200, ""},
-		{"ups", "a", 300, ""},
-		{"ups", "b", 100, ""},
-		{"ups", "c", 300, ""},
-		{"del", "a", 0, ""},
-		{"del", "b", 0, ""},
-		{"del", "c", 0, ""},
-	}
-
-	for testIdx, test := range tests {
-		switch test.op {
-		case "get":
-			i, err := x.GetItem([]byte(test.val), true)
-			if err != nil {
-				t.Errorf("test: %v, expected get nil error, got: %v",
-					testIdx, err)
-			}
-			if i != nil {
-				s.ItemDecRef(x, i)
-			}
-			if i == nil && test.exp == "NIL" {
-				continue
-			}
-			if string(i.Key) != test.exp {
-				t.Errorf("test: %v, on Get, expected key: %v, got: %v",
-					testIdx, test.exp, i.Key)
-			}
-			if string(i.Val) != test.exp {
-				t.Errorf("test: %v, on Get, expected val: %v, got: %v",
-					testIdx, test.exp, i.Key)
-			}
-		case "ups":
-			err := x.SetItem(&Item{
-				Key:      []byte(test.val),
-				Val:      []byte(test.val),
-				Priority: int32(test.pri),
-			})
-			if err != nil {
-				t.Errorf("test: %v, expected ups nil error, got: %v",
-					testIdx, err)
-			}
-		case "del":
-			_, err := x.Delete([]byte(test.val))
-			if err != nil {
-				t.Errorf("test: %v, expected del nil error, got: %v",
-					testIdx, err)
-			}
-		}
-	}
-
-	mustJustOneRef("final")
-}
-
-func TestStoreRefCountRandom(t *testing.T) {
-	counts := map[string]int{}
-	mustJustOneRef := func(msg string) {
-		for k, count := range counts {
-			if count != 1 {
-				t.Fatalf("expect count 1 for k: %s, got: %v, msg: %s",
-					k, count, msg)
-			}
-		}
-	}
-	mustJustOneRef("")
-
-	s, err := NewStoreEx(nil, StoreCallbacks{
-		ItemAddRef: func(c *Collection, i *Item) {
-			k := fmt.Sprintf("%s-%s", i.Key, string(i.Val))
-			counts[k]++
-			if counts[k] <= 0 {
-				t.Errorf("in ItemAddRef, count for k: %s was <= 0", k)
-			}
-		},
-		ItemDecRef: func(c *Collection, i *Item) {
-			k := fmt.Sprintf("%s-%s", i.Key, string(i.Val))
-			counts[k]--
-			if counts[k] < 0 {
-				t.Errorf("in ItemDecRef, count for k: %s was <= 0", k)
-			}
-			if counts[k] == 0 {
-				delete(counts, k)
-			}
-		},
-	})
-	if err != nil || s == nil {
-		t.Errorf("expected memory-only NewStoreEx to work")
-	}
-
-	x := s.SetCollection("x", bytes.Compare)
-
-	numSets := 0
-	numKeys := 10
-	for i := 0; i < 100; i++ {
-		for j := 0; j < 1000; j++ {
-			ks := fmt.Sprintf("%03d", rand.Int()%numKeys)
-			k := []byte(ks)
-			r := rand.Int() % 2
-			switch r {
-			case 0:
-				numSets++
-				v := fmt.Sprintf("%d", numSets)
-				pri := rand.Int31()
-				err := x.SetItem(&Item{
-					Key:      k,
-					Val:      []byte(v),
-					Priority: pri,
-				})
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			case 1:
-				_, err := x.Delete(k)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			}
-		}
-		for k := 0; k < numKeys; k++ {
-			_, err := x.Delete([]byte(fmt.Sprintf("%03d", k)))
-			if err != nil {
-				t.Fatalf("expected nil error, got: %v", err)
-			}
-		}
-		mustJustOneRef(fmt.Sprintf("i: %d", i))
-	}
-}
-
-func TestPersistRefCountRandom(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	defer os.Remove(fname)
-
-	counts := map[string]int{}
-
-	itemAddRef := func(c *Collection, i *Item) {
-		if i.Val == nil {
-			return
-		}
-		k := fmt.Sprintf("%s-%s", i.Key, string(i.Val))
-		counts[k]++
-		if counts[k] <= 0 {
-			t.Fatalf("in ItemAddRef, count for k: %s was <= 0", k)
-		}
-	}
-
-	itemDecRef := func(c *Collection, i *Item) {
-		if i.Val == nil {
-			return
-		}
-		k := fmt.Sprintf("%s-%s", i.Key, string(i.Val))
-		if counts[k] == 0 {
-			t.Fatalf("in ItemDecRef, count for k: %s at 0", k)
-		}
-		counts[k]--
-		if counts[k] == 0 {
-			delete(counts, k)
-		}
-	}
-
-	start := func(f *os.File) (*Store, *Collection, map[string]int) {
-		s, err := NewStoreEx(f, StoreCallbacks{
-			ItemAddRef: itemAddRef,
-			ItemDecRef: itemDecRef,
-			ItemValRead: func(c *Collection, i *Item,
-				r io.ReaderAt, offset int64, valLength uint32) error {
-				i.Val = make([]byte, valLength)
-				_, err := r.ReadAt(i.Val, offset)
-				itemAddRef(c, i)
-				return err
-			},
-		})
-		if err != nil || s == nil {
-			t.Errorf("expected memory-only NewStoreEx to work")
-		}
-		x := s.SetCollection("x", bytes.Compare)
-		return s, x, counts
-	}
-
-	s, x, counts := start(f)
-
-	stop := func() {
-		s.Flush()
-		s.Close()
-		if len(counts) != 0 {
-			t.Errorf("counts not empty after Close(), got: %#v\n", counts)
-		}
-		f.Close()
-	}
-
-	mustJustOneRef := func(msg string) {
-		for k, count := range counts {
-			if count != 1 {
-				t.Fatalf("expect count 1 for k: %s, got: %v, msg: %s, counts: %#v",
-					k, count, msg, counts)
-			}
-		}
-	}
-	mustJustOneRef("")
-
-	numSets := 0
-	numKeys := 10
-	for i := 0; i < 10; i++ {
-		for j := 0; j < 1000; j++ {
-			ks := fmt.Sprintf("%03d", rand.Int()%numKeys)
-			k := []byte(ks)
-			r := rand.Int() % 100
-			if r < 60 {
-				numSets++
-				v := fmt.Sprintf("%d", numSets)
-				pri := rand.Int31()
-				err := x.SetItem(&Item{
-					Key:      k,
-					Val:      []byte(v),
-					Priority: pri,
-				})
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			} else if r < 90 {
-				_, err := x.Delete(k)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			} else {
-				// Close and reopen the store.
-				stop()
-				f, _ = os.OpenFile(fname, os.O_RDWR, 0666)
-				s, x, counts = start(f)
-			}
-		}
-		for k := 0; k < numKeys; k++ {
-			_, err := x.Delete([]byte(fmt.Sprintf("%03d", k)))
-			if err != nil {
-				t.Fatalf("expected nil error, got: %v", err)
-			}
-		}
-		mustJustOneRef(fmt.Sprintf("i: %d", i))
-	}
-}
-
-func TestEvictRefCountRandom(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, _ := os.Create(fname)
-	defer os.Remove(fname)
-
-	start := func(f *os.File) (*Store, *Collection, map[string]int) {
-		counts := map[string]int{}
-		var s *Store
-		var x *Collection
-		var err error
-		itemAddRef := func(c *Collection, i *Item) {
-			k := fmt.Sprintf("%s-%s", i.Key, string(i.Val))
-			if i.Val == nil {
-				return
-			}
-			counts[k]++
-			if counts[k] <= 0 {
-				t.Fatalf("in ItemAddRef, count for k: %s was <= 0", k)
-			}
-		}
-		itemDecRef := func(c *Collection, i *Item) {
-			k := fmt.Sprintf("%s-%s", i.Key, string(i.Val))
-			if i.Val == nil {
-				return
-			}
-			if counts[k] == 0 {
-				if x.root != nil {
-					dump(s, x.root.root, 1)
-				}
-				t.Fatalf("in ItemDecRef, count for k: %s at 0, counts: %#v",
-					k, counts)
-			}
-			counts[k]--
-			if counts[k] == 0 {
-				delete(counts, k)
-			}
-		}
-		s, err = NewStoreEx(f, StoreCallbacks{
-			ItemAddRef: itemAddRef,
-			ItemDecRef: itemDecRef,
-			ItemValRead: func(c *Collection, i *Item,
-				r io.ReaderAt, offset int64, valLength uint32) error {
-				i.Val = make([]byte, valLength)
-				_, err := r.ReadAt(i.Val, offset)
-				itemAddRef(c, i)
-				return err
-			},
-		})
-		if err != nil || s == nil {
-			t.Errorf("expected memory-only NewStoreEx to work")
-		}
-		x = s.SetCollection("x", bytes.Compare)
-		return s, x, counts
-	}
-
-	s, x, counts := start(f)
-
-	stop := func() {
-		s.Flush()
-		s.Close()
-		if len(counts) != 0 {
-			t.Errorf("counts not empty after Close(), got: %#v\n", counts)
-		}
-		f.Close()
-		f = nil
-	}
-
-	mustJustOneRef := func(msg string) {
-		for k, count := range counts {
-			if count != 1 {
-				t.Fatalf("expect count 1 for k: %s, got: %v, msg: %s, counts: %#v",
-					k, count, msg, counts)
-			}
-		}
-	}
-	mustJustOneRef("")
-
-	numSets := 0
-	numKeys := 10
-	for i := 0; i < 10; i++ {
-		for j := 0; j < 1000; j++ {
-			ks := fmt.Sprintf("%03d", rand.Int()%numKeys)
-			k := []byte(ks)
-			r := rand.Int() % 100
-			if r < 30 {
-				i, err := x.GetItem(k, true)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-				if i != nil {
-					s.ItemDecRef(x, i)
-				}
-			} else if r < 60 {
-				numSets++
-				v := fmt.Sprintf("%d", numSets)
-				pri := rand.Int31()
-				err := x.SetItem(&Item{
-					Key:      k,
-					Val:      []byte(v),
-					Priority: pri,
-				})
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			} else if r < 75 {
-				_, err := x.Delete(k)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			} else if r < 90 {
-				x.EvictSomeItems()
-			} else {
-				// Close and reopen the store.
-				stop()
-				f, _ = os.OpenFile(fname, os.O_RDWR, 0666)
-				s, x, counts = start(f)
-			}
-		}
-		x.EvictSomeItems()
-		for k := 0; k < numKeys; k++ {
-			_, err := x.Delete([]byte(fmt.Sprintf("%03d", k)))
-			if err != nil {
-				t.Fatalf("expected nil error, got: %v", err)
-			}
-		}
-		mustJustOneRef(fmt.Sprintf("i: %d", i))
-	}
-}
diff --git a/go/src/github.com/steveyen/gkvlite/tools/slab/slab.go b/go/src/github.com/steveyen/gkvlite/tools/slab/slab.go
deleted file mode 100644
index 2c78d22..0000000
--- a/go/src/github.com/steveyen/gkvlite/tools/slab/slab.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// ignore this package, not needed by veyron yet. avoid dependency on go-slab
-// +build ignore
-
-package main
-
-// Test integration of gkvlite with go-slab, using gkvlite's optional
-// ItemAddRef/ItemDecRef() callbacks to integrate with a slab memory
-// allocator.
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"io"
-	"log"
-	"math/rand"
-	"os"
-	"sort"
-	"testing"
-	"unsafe"
-
-	"github.com/steveyen/gkvlite"
-	"github.com/steveyen/go-slab"
-)
-
-var maxOps = flag.Int("ops", 0,
-	"max number of ops; 0 means run forever")
-var maxItems = flag.Int("n", 10000,
-	"max number of items")
-var maxItemBytes = flag.Int("maxItemBytes", 20000,
-	"max bytes for an item")
-var pctSets = flag.Int("sets", 50,
-	"percentage of sets; 50 means 50% sets")
-var pctDeletes = flag.Int("deletes", 5,
-	"percentage of deletes; 5 means 5% deletes")
-var pctEvicts = flag.Int("evicts", 3,
-	"percentage of evicts; 3 means 3% evicts")
-var pctReopens = flag.Int("reopens", 0,
-	"percentage of reopens; 0 means 0% reopens")
-var useSlab = flag.Bool("useSlab", true,
-	"whether to use slab allocator")
-var flushEvery = flag.Int("flushEvery", 0,
-	"flush every N ops; 0 means no flushing")
-var slabStats = flag.Bool("slabStats", false,
-	"whether to emit slab stats")
-
-func usage() {
-	fmt.Fprintf(os.Stderr, "gkvlite slab testing tool\n")
-	fmt.Fprintf(os.Stderr, "\nusage: %s [flags] dbFileName\n", os.Args[0])
-	fmt.Fprintf(os.Stderr, "  dbFileName - file name used during test\n")
-	fmt.Fprintf(os.Stderr, "\nexamples:\n")
-	fmt.Fprintf(os.Stderr, "  ./slab -n 1000 -sets 30 test.tmp\n")
-	fmt.Fprintf(os.Stderr, "\nflags:\n")
-	flag.PrintDefaults()
-	fmt.Fprintf(os.Stderr, "  -h: print this usage/help message\n")
-}
-
-func main() {
-	flag.Usage = usage
-	flag.Parse()
-	args := flag.Args()
-	if len(args) < 1 {
-		log.Fatalf("error: missing dbFileName")
-	}
-	if *pctSets + *pctDeletes + *pctEvicts + *pctReopens > 100 {
-		log.Fatalf("error: percentages are > 100%% (%d%%+%d%%+%d%%+%d%%)",
-			*pctSets, *pctDeletes, *pctEvicts + *pctReopens)
-	}
-	run(args[0], *useSlab, *flushEvery, *maxItemBytes,
-		*maxOps, *maxItems, *pctSets, *pctDeletes, *pctEvicts, *pctReopens)
-}
-
-// Helper function to read a contiguous byte sequence, splitting
-// it up into chained buf's of maxBufSize.  The last buf in the
-// chain can have length <= maxBufSize.
-func readBufChain(arena *slab.Arena, maxBufSize int, r io.ReaderAt, offset int64,
-	valLength uint32) ([]byte, error) {
-	n := int(valLength)
-	if n > maxBufSize {
-		n = maxBufSize
-	}
-	b := arena.Alloc(n)
-	_, err := r.ReadAt(b, offset)
-	if err != nil {
-		arena.DecRef(b)
-		return nil, err
-	}
-	remaining := valLength - uint32(n)
-	if remaining > 0 {
-		next, err := readBufChain(arena, maxBufSize, r, offset + int64(n), remaining)
-		if err != nil {
-			arena.DecRef(b)
-			return nil, err
-		}
-		arena.SetNext(b, next)
-	}
-	return b, nil
-}
-
-type ItemNode struct {
-	refs int
-	next *ItemNode
-	item gkvlite.Item
-}
-
-var freeItemNodes *ItemNode
-
-func setupStoreArena(t *testing.T, maxBufSize int) (
-	*slab.Arena, gkvlite.StoreCallbacks) {
-	arena := slab.NewArena(48, // The smallest slab class "chunk size" is 48 bytes.
-		1024*1024, // Each slab will be 1MB in size.
-		2,         // Power of 2 growth in "chunk sizes".
-		nil)       // Use default make([]byte) for slab memory.
-	if arena == nil {
-		t.Errorf("expected arena")
-	}
-
-	itemValLength := func(c *gkvlite.Collection, i *gkvlite.Item) int {
-		if i.Val == nil {
-			if t != nil {
-				t.Fatalf("itemValLength on nil i.Val, i: %#v", i)
-			} else {
-				panic(fmt.Sprintf("itemValLength on nil i.Val, i: %#v", i))
-			}
-		}
-		s := 0
-		b := i.Val
-		for b != nil {
-			s = s + len(b)
-			n := arena.GetNext(b)
-			if s > len(b) {
-				arena.DecRef(b)
-			}
-			b = n
-		}
-		return s
-	}
-	itemValWrite := func(c *gkvlite.Collection,
-		i *gkvlite.Item, w io.WriterAt, offset int64) error {
-		s := 0
-		b := i.Val
-		for b != nil {
-			_, err := w.WriteAt(b, offset + int64(s))
-			if err != nil {
-				return err
-			}
-			s = s + len(b)
-			n := arena.GetNext(b)
-			if s > len(b) {
-				arena.DecRef(b)
-			}
-			b = n
-		}
-		return nil
-	}
-	itemValRead := func(c *gkvlite.Collection,
-		i *gkvlite.Item, r io.ReaderAt, offset int64, valLength uint32) error {
-		b, err := readBufChain(arena, maxBufSize, r, offset, valLength)
-		if err != nil {
-			return err
-		}
-		i.Val = b
-		return nil
-	}
-	itemAlloc := func(c *gkvlite.Collection, keyLength uint16) *gkvlite.Item {
-		var n *ItemNode
-		if freeItemNodes != nil {
-			n = freeItemNodes
-			freeItemNodes = freeItemNodes.next
-			n.next = nil
-		} else {
-			n = &ItemNode{}
-			n.item.Transient = unsafe.Pointer(n)
-		}
-		if n.refs != 0 ||
-			n.item.Key != nil || n.item.Val != nil || n.item.Priority != 0 ||
-			n.item.Transient != unsafe.Pointer(n) {
-			panic("unexpected ItemNode refs or item fields")
-		}
-		n.refs = 1
-		n.next = nil
-		n.item.Key = arena.Alloc(int(keyLength))
-		return &n.item
-	}
-	itemAddRef := func(c *gkvlite.Collection, i *gkvlite.Item) {
-		n := (*ItemNode)(i.Transient)
-		n.refs++
-	}
-	itemDecRef := func(c *gkvlite.Collection, i *gkvlite.Item) {
-		n := (*ItemNode)(i.Transient)
-		n.refs--
-		if n.refs == 0 {
-			if i.Key == nil {
-				panic("expected a Key")
-			}
-			arena.DecRef(i.Key)
-			i.Key = nil
-			if i.Val != nil {
-				arena.DecRef(i.Val)
-				i.Val = nil
-			}
-			i.Priority = 0
-			n.next = freeItemNodes
-			freeItemNodes = n
-		}
-	}
-
-	scb := gkvlite.StoreCallbacks{
-		ItemValLength: itemValLength,
-		ItemValWrite:  itemValWrite,
-		ItemValRead:   itemValRead,
-		ItemAlloc:     itemAlloc,
-		ItemAddRef:    itemAddRef,
-		ItemDecRef:    itemDecRef,
-	}
-	return arena, scb
-}
-
-func run(fname string, useSlab bool, flushEvery int, maxItemBytes int,
-	maxOps, maxItems, pctSets, pctDeletes, pctEvicts, pctReopens int) {
-	fmt.Printf("fname: %s, useSlab: %v, flushEvery: %d" +
-		", maxItemBytes: %d, maxOps: %d, maxItems: %d" +
-		", pctSets: %d, pctDeletes; %d, pctEvicts: %d, pctReopens: %d\n",
-		fname, useSlab, flushEvery, maxItemBytes,
-		maxOps, maxItems, pctSets, pctDeletes, pctEvicts, pctReopens)
-
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	if err != nil {
-		panic(fmt.Sprintf("error: could not create file: %s, err: %v", fname, err))
-	}
-
-	arenaStats := map[string]int64{}
-	arena, scb := setupStoreArena(nil, 256)
-	if !useSlab {
-		arena = nil
-		scb = gkvlite.StoreCallbacks{
-			ItemValLength: func(c *gkvlite.Collection, i *gkvlite.Item) int {
-				return len(i.Val)
-			},
-		}
-	}
-
-	start := func(f *os.File) (*gkvlite.Store, *gkvlite.Collection) {
-		s, err := gkvlite.NewStoreEx(f, scb)
-		if err != nil || s == nil {
-			panic(fmt.Sprintf("error: expected NewStoreEx to work, err: %v", err))
-		}
-		x := s.SetCollection("x", bytes.Compare)
-		if x == nil {
-			panic(fmt.Sprintf("error: expected SetColl/GetColl to work"))
-		}
-		return s, x
-	}
-
-	s, x := start(f)
-
-	stop := func() {
-		s.Flush()
-		s.Close()
-		f.Close()
-		f = nil
-	}
-
-	numGets := 0
-	numSets := 0
-	numDeletes := 0
-	numEvicts := 0
-	numReopens := 0
-	numFlushes := 0
-
-	psd := pctSets + pctDeletes
-	psde := pctSets + pctDeletes + pctEvicts
-	psder := pctSets + pctDeletes + pctEvicts + pctReopens
-
-	for i := 0; maxOps <= 0 || i < maxOps; i++ {
-		kr := rand.Int() % maxItems
-		kr4 := kr * kr * kr * kr
-		if kr4 > maxItemBytes {
-			kr4 = maxItemBytes
-		}
-		ks := fmt.Sprintf("%03d", kr)
-		k := []byte(ks)
-		r := rand.Int() % 100
-		if r < pctSets {
-			numSets++
-			var b []byte
-			if arena != nil {
-				b = arena.Alloc(kr4)
-			} else {
-				b = make([]byte, kr4)
-			}
-			pri := rand.Int31()
-			var it *gkvlite.Item
-			if scb.ItemAlloc != nil {
-				it = scb.ItemAlloc(x, uint16(len(k)))
-			} else {
-				it = &gkvlite.Item{Key: make([]byte, len(k))}
-			}
-			copy(it.Key, k)
-			it.Val = b
-			it.Priority = pri
-			err := x.SetItem(it)
-			if err != nil {
-				panic(fmt.Sprintf("error: expected nil error, got: %v", err))
-			}
-			if scb.ItemDecRef != nil {
-				scb.ItemDecRef(x, it)
-			}
-		} else if r < psd {
-			numDeletes++
-			_, err := x.Delete(k)
-			if err != nil {
-				panic(fmt.Sprintf("error: expected nil error, got: %v", err))
-			}
-		} else if r < psde {
-			numEvicts++
-			x.EvictSomeItems()
-		} else if r < psder {
-			numReopens++
-			stop()
-			f, _ = os.OpenFile(fname, os.O_RDWR, 0666)
-			s, x = start(f)
-		} else {
-			numGets++
-			i, err := x.GetItem(k, true)
-			if err != nil {
-				panic(fmt.Sprintf("error: expected nil error, got: %v", err))
-			}
-			if i != nil {
-				if scb.ItemValLength(x, i) != kr4 {
-					panic(fmt.Sprintf("error: expected len: %d, got %d",
-						kr4, scb.ItemValLength(x, i)))
-				}
-				if scb.ItemDecRef != nil {
-					scb.ItemDecRef(x, i)
-				}
-			}
-		}
-
-		if flushEvery > 0 && i % flushEvery == 0 {
-			numFlushes++
-			s.Flush()
-		}
-
-		if i % 10000 == 0 {
-			if arena != nil && *slabStats == true {
-				arena.Stats(arenaStats)
-				mk := []string{}
-				for k, _ := range arenaStats {
-					mk = append(mk, k)
-				}
-				sort.Strings(mk)
-				for _, k := range mk {
-					log.Printf("%s = %d", k, arenaStats[k])
-				}
-			}
-			log.Printf("i: %d, numGets: %d, numSets: %d, numDeletes: %d" +
-				", numEvicts: %d, numReopens: %d, numFlushes: %d\n",
-				i, numGets, numSets, numDeletes, numEvicts, numReopens, numFlushes)
-		}
-	}
-}
diff --git a/go/src/github.com/steveyen/gkvlite/tools/slab/slab_test.go b/go/src/github.com/steveyen/gkvlite/tools/slab/slab_test.go
deleted file mode 100644
index 0cb8bd9..0000000
--- a/go/src/github.com/steveyen/gkvlite/tools/slab/slab_test.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// ignore this package, not needed by veyron yet. avoid dependency on go-slab
-// +build ignore
-
-package main
-
-// Test integration of gkvlite with go-slab, using gkvlite's optional
-// ItemAddRef/ItemDecRef() callbacks to integrate with a slab memory
-// allocator.
-
-import (
-	"bytes"
-	"fmt"
-	"math/rand"
-	"os"
-	"testing"
-
-	"github.com/steveyen/gkvlite"
-)
-
-func TestSlabStore(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	if err != nil {
-		t.Errorf("expected to create file: " + fname)
-	}
-	defer os.Remove(fname)
-
-	arena, scb := setupStoreArena(t, 256)
-	s, err := gkvlite.NewStoreEx(f, scb)
-	if err != nil || s == nil {
-		t.Errorf("expected NewStoreEx to work")
-	}
-	s.SetCollection("x", bytes.Compare)
-	x := s.GetCollection("x")
-	if x == nil {
-		t.Errorf("expected SetColl/GetColl to work")
-	}
-
-	b := arena.Alloc(5)
-	if b == nil {
-		t.Errorf("expected buf")
-	}
-	copy(b, []byte("hello"))
-	i := scb.ItemAlloc(x, 1)
-	copy(i.Key, []byte("a"))
-	i.Val = b
-	i.Priority = 100
-	x.SetItem(i)
-	scb.ItemDecRef(x, i)
-
-	i = scb.ItemAlloc(x, 3)
-	copy(i.Key, []byte("big"))
-	i.Val = arena.Alloc(1234)
-	i.Priority = 100
-	x.SetItem(i)
-	scb.ItemDecRef(x, i)
-
-	err = s.Flush()
-	if err != nil {
-		t.Errorf("expected Flush() to error")
-	}
-	s.Close()
-	f.Close()
-
-	f, _ = os.OpenFile(fname, os.O_RDWR, 0666)
-	arena, scb = setupStoreArena(t, 64) // Read with a different buf-size.
-	s, err = gkvlite.NewStoreEx(f, scb)
-	if err != nil || s == nil {
-		t.Errorf("expected NewStoreEx to work")
-	}
-	x = s.SetCollection("x", bytes.Compare)
-	if x == nil {
-		t.Errorf("expected SetColl/GetColl to work")
-	}
-	i, err = x.GetItem([]byte("a"), true)
-	if err != nil || i == nil {
-		t.Errorf("expected no GetItem() err, got: %v", err)
-	}
-	if string(i.Val) != "hello" {
-		t.Errorf("expected hello, got: %#v", i)
-	}
-	s.ItemDecRef(x, i)
-	i, err = x.GetItem([]byte("big"), true)
-	if err != nil || i == nil {
-		t.Errorf("expected no GetItem() err, got: %v", err)
-	}
-	if len(i.Val) != 64 {
-		t.Errorf("expected 64, got: %d", len(i.Val))
-	}
-	if scb.ItemValLength(x, i) != 1234 {
-		t.Errorf("expected 1234, got: %d", scb.ItemValLength(x, i))
-	}
-	s.ItemDecRef(x, i)
-}
-
-func TestSlabStoreRandom(t *testing.T) {
-	fname := "tmp.test"
-	os.Remove(fname)
-	f, err := os.Create(fname)
-	if err != nil {
-		t.Errorf("expected to create file: " + fname)
-	}
-	defer os.Remove(fname)
-
-	arena, scb := setupStoreArena(t, 256)
-
-	start := func(f *os.File) (*gkvlite.Store, *gkvlite.Collection) {
-		s, err := gkvlite.NewStoreEx(f, scb)
-		if err != nil || s == nil {
-			t.Errorf("expected NewStoreEx to work")
-		}
-		s.SetCollection("x", bytes.Compare)
-		x := s.GetCollection("x")
-		if x == nil {
-			t.Errorf("expected SetColl/GetColl to work")
-		}
-		return s, x
-	}
-
-	s, x := start(f)
-
-	stop := func() {
-		s.Flush()
-		s.Close()
-		f.Close()
-		f = nil
-	}
-
-	numSets := 0
-	numKeys := 10
-	for i := 0; i < 100; i++ {
-		for j := 0; j < 1000; j++ {
-			kr := rand.Int() % numKeys
-			ks := fmt.Sprintf("%03d", kr)
-			k := []byte(ks)
-			r := rand.Int() % 100
-			if r < 20 {
-				i, err := x.GetItem(k, true)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-				if i != nil {
-					kr4 := kr * kr * kr * kr
-					if scb.ItemValLength(x, i) != kr4 {
-						t.Errorf("expected len: %d, got %d",
-							kr4, scb.ItemValLength(x, i))
-					}
-					s.ItemDecRef(x, i)
-				}
-			} else if r < 60 {
-				numSets++
-				b := arena.Alloc(kr * kr * kr * kr)
-				pri := rand.Int31()
-				it := scb.ItemAlloc(x, uint16(len(k)))
-				copy(it.Key, k)
-				it.Val = b
-				it.Priority = pri
-				err := x.SetItem(it)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-				scb.ItemDecRef(x, it)
-			} else if r < 80 {
-				_, err := x.Delete(k)
-				if err != nil {
-					t.Errorf("expected nil error, got: %v", err)
-				}
-			} else if r < 90 {
-				x.EvictSomeItems()
-			} else {
-				// Close and reopen the store.
-				stop()
-				f, _ = os.OpenFile(fname, os.O_RDWR, 0666)
-				s, x = start(f)
-			}
-		}
-		x.EvictSomeItems()
-		for k := 0; k < numKeys; k++ {
-			_, err := x.Delete([]byte(fmt.Sprintf("%03d", k)))
-			if err != nil {
-				t.Fatalf("expected nil error, got: %v", err)
-			}
-		}
-	}
-}
diff --git a/go/src/github.com/steveyen/gkvlite/tools/view/view.go b/go/src/github.com/steveyen/gkvlite/tools/view/view.go
deleted file mode 100644
index b975136..0000000
--- a/go/src/github.com/steveyen/gkvlite/tools/view/view.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package main
-
-import (
-	"flag"
-	"fmt"
-	"os"
-
-	"github.com/steveyen/gkvlite"
-)
-
-var keyFormat = flag.String("key-format", "string",
-	"format item key as string, bytes, raw, or none")
-var valFormat = flag.String("val-format", "string",
-	"format item val as string, bytes, raw, or none")
-var indent = flag.Bool("indent", false,
-	"show tree depth using indentation")
-
-func usage() {
-	fmt.Fprintf(os.Stderr, "gkvlite file view/inspect tool\n")
-	fmt.Fprintf(os.Stderr, "  suports gkvlite file versions: %d\n", gkvlite.VERSION)
-	fmt.Fprintf(os.Stderr, "\nusage: %s [flags] gkvlite-file [cmd [cmd-specific-args ...]]\n",
-		os.Args[0])
-	fmt.Fprintf(os.Stderr, "\nexamples:\n")
-	fmt.Fprintf(os.Stderr, "  ./view /tmp/my-data.gkvlite\n")
-	fmt.Fprintf(os.Stderr, "  ./view /tmp/my-data.gkvlite names\n")
-	fmt.Fprintf(os.Stderr, "  ./view /tmp/my-data.gkvlite items warehouse-inventory\n")
-	fmt.Fprintf(os.Stderr, "\ncmd:\n")
-	fmt.Fprintf(os.Stderr, "  names - lists collection names (default cmd)\n")
-	fmt.Fprintf(os.Stderr, "  items <collection-name> - emits items in a collection\n")
-	fmt.Fprintf(os.Stderr, "\nflags:\n")
-	flag.PrintDefaults()
-	fmt.Fprintf(os.Stderr, "  -h: print this usage/help message\n")
-}
-
-func main() {
-	flag.Usage = usage
-	flag.Parse()
-
-	if err := mainDo(flag.Args()); err != nil {
-		fmt.Printf("error: %v\n", err)
-		os.Exit(1)
-	}
-}
-
-func mainDo(args []string) error {
-	if len(args) < 1 {
-		return fmt.Errorf("missing gkvlite file arg")
-	}
-	fname, args := args[0], args[1:]
-
-	f, err := os.Open(fname)
-	if err != nil || f == nil {
-		return fmt.Errorf("could not open file: %v", fname)
-	}
-	defer f.Close()
-
-	s, err := gkvlite.NewStore(f)
-	if err != nil || s == nil {
-		return fmt.Errorf("could not create store from file: %v", fname)
-	}
-
-	cmd := "names"
-	if len(args) > 0 {
-		cmd, args = args[0], args[1:]
-	}
-	switch cmd {
-	case "names":
-		collNames := s.GetCollectionNames()
-		for _, collName := range collNames {
-			fmt.Printf("%s\n", collName)
-		}
-	case "items":
-		if len(args) < 1 {
-			return fmt.Errorf("missing 'items <collection-name>' param")
-		}
-		collName := args[0]
-		coll := s.GetCollection(collName)
-		if coll == nil {
-			return fmt.Errorf("could not find collection: %v", collName)
-		}
-		return coll.VisitItemsAscendEx(nil, true, emitItem)
-	default:
-		return fmt.Errorf("unknown command: %v", cmd)
-	}
-
-	return nil
-}
-
-func emitItem(i *gkvlite.Item, depth uint64) bool {
-	if *indent {
-		for i := 0; i < int(depth); i++ {
-			fmt.Printf(" ")
-		}
-	}
-	emit(i.Key, *keyFormat)
-	if *keyFormat != "none" && *valFormat != "none" {
-		fmt.Printf("=")
-	}
-	emit(i.Val, *valFormat)
-	fmt.Printf("\n")
-	return true
-}
-
-func emit(v []byte, format string) {
-	switch format {
-	default:
-	case "string":
-		fmt.Printf("%s", string(v))
-	case "bytes":
-		fmt.Printf("%#v", v)
-	case "raw":
-		fmt.Printf("%v", v)
-	case "none":
-	}
-}
diff --git a/go/src/github.com/steveyen/gkvlite/treap.go b/go/src/github.com/steveyen/gkvlite/treap.go
deleted file mode 100644
index 6308fc9..0000000
--- a/go/src/github.com/steveyen/gkvlite/treap.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package gkvlite
-
-import (
-	"fmt"
-)
-
-// The core algorithms for treaps are straightforward.  However, that
-// algorithmic simplicity is obscured by the additional useful
-// features, such as persistence, garbage-avoidance, stats tracking,
-// and error handling.  For a simple, memory-only implementation of
-// the union/split/join treap algorithms that may be easier to
-// understand, see:
-// https://github.com/steveyen/gtreap/blob/master/treap.go
-
-// Memory management rules: the union/split/join functions will not
-// free their input nodeLoc's, but are responsible instead for
-// invoking markReclaimable() on the directly pointed-at input nodes.
-// The union/split/join functions must copy the input nodeLoc's if
-// they wish to keep the nodeLoc's data.
-//
-// The caller is responsible for freeing the returned nodeLoc's and
-// (if appropriate) the input nodeLoc's.  The caller also takes
-// responsibility for markReclaimable() on returned output nodes.
-
-// Returns a treap that is the union of this treap and that treap.
-func (o *Store) union(t *Collection, this *nodeLoc, that *nodeLoc,
-	reclaimMark *node) (
-	res *nodeLoc, err error) {
-	thisNode, err := this.read(o)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	thatNode, err := that.read(o)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	if this.isEmpty() || thisNode == nil {
-		return t.mkNodeLoc(nil).Copy(that), nil
-	}
-	if that.isEmpty() || thatNode == nil {
-		return t.mkNodeLoc(nil).Copy(this), nil
-	}
-	thisItemLoc := &thisNode.item
-	thisItem, err := thisItemLoc.read(t, false)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	thatItemLoc := &thatNode.item
-	thatItem, err := thatItemLoc.read(t, false)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	if thisItem.Priority > thatItem.Priority {
-		left, middle, right, err :=
-			o.split(t, that, thisItem.Key, reclaimMark)
-		if err != nil {
-			return empty_nodeLoc, err
-		}
-		newLeft, err := o.union(t, &thisNode.left, left, reclaimMark)
-		if err != nil {
-			return empty_nodeLoc, err
-		}
-		newRight, err := o.union(t, &thisNode.right, right, reclaimMark)
-		if err != nil {
-			return empty_nodeLoc, err
-		}
-		leftNum, leftBytes, rightNum, rightBytes, err :=
-			numInfo(o, newLeft, newRight)
-		if err != nil {
-			return empty_nodeLoc, err
-		}
-		var middleNode *node
-		if !middle.isEmpty() {
-			middleNode, err = middle.read(o)
-			if err != nil {
-				return empty_nodeLoc, err
-			}
-			middleItemLoc := &middleNode.item
-			res = t.mkNodeLoc(t.mkNode(middleItemLoc, newLeft, newRight,
-				leftNum+rightNum+1,
-				leftBytes+rightBytes+uint64(middleItemLoc.NumBytes(t))))
-		} else {
-			res = t.mkNodeLoc(t.mkNode(thisItemLoc, newLeft, newRight,
-				leftNum+rightNum+1,
-				leftBytes+rightBytes+uint64(thisItemLoc.NumBytes(t))))
-		}
-		t.freeNodeLoc(left)
-		t.freeNodeLoc(right)
-		t.freeNodeLoc(middle)
-		t.freeNodeLoc(newLeft)
-		t.freeNodeLoc(newRight)
-		t.markReclaimable(thisNode, reclaimMark)
-		t.markReclaimable(middleNode, reclaimMark)
-		return res, nil
-	}
-	// We don't use middle because the "that" node has precedence.
-	left, middle, right, err :=
-		o.split(t, this, thatItem.Key, reclaimMark)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	newLeft, err := o.union(t, left, &thatNode.left, reclaimMark)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	newRight, err := o.union(t, right, &thatNode.right, reclaimMark)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	leftNum, leftBytes, rightNum, rightBytes, err :=
-		numInfo(o, newLeft, newRight)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	res = t.mkNodeLoc(t.mkNode(thatItemLoc, newLeft, newRight,
-		leftNum+rightNum+1,
-		leftBytes+rightBytes+uint64(thatItemLoc.NumBytes(t))))
-	middleNode := middle.Node()
-	t.freeNodeLoc(left)
-	t.freeNodeLoc(right)
-	t.freeNodeLoc(middle)
-	t.freeNodeLoc(newLeft)
-	t.freeNodeLoc(newRight)
-	t.markReclaimable(thatNode, reclaimMark)
-	t.markReclaimable(middleNode, reclaimMark)
-	return res, nil
-}
-
-// Splits a treap into two treaps based on a split key "s".  The
-// result is (left, middle, right), where left treap has keys < s,
-// right treap has keys > s, and middle is either...
-// * empty/nil - meaning key s was not in the original treap.
-// * non-empty - returning the original nodeLoc/item that had key s.
-func (o *Store) split(t *Collection, n *nodeLoc, s []byte,
-	reclaimMark *node) (
-	*nodeLoc, *nodeLoc, *nodeLoc, error) {
-	nNode, err := n.read(o)
-	if err != nil || n.isEmpty() || nNode == nil {
-		return empty_nodeLoc, empty_nodeLoc, empty_nodeLoc, err
-	}
-
-	nItemLoc := &nNode.item
-	nItem, err := nItemLoc.read(t, false)
-	if err != nil {
-		return empty_nodeLoc, empty_nodeLoc, empty_nodeLoc, err
-	}
-
-	c := t.compare(s, nItem.Key)
-	if c == 0 {
-		left := t.mkNodeLoc(nil).Copy(&nNode.left)
-		right := t.mkNodeLoc(nil).Copy(&nNode.right)
-		middle := t.mkNodeLoc(nil).Copy(n)
-		return left, middle, right, nil
-	}
-
-	if c < 0 {
-		if nNode.left.isEmpty() {
-			return empty_nodeLoc, empty_nodeLoc, t.mkNodeLoc(nil).Copy(n), nil
-		}
-		left, middle, right, err :=
-			o.split(t, &nNode.left, s, reclaimMark)
-		if err != nil {
-			return empty_nodeLoc, empty_nodeLoc, empty_nodeLoc, err
-		}
-		leftNum, leftBytes, rightNum, rightBytes, err := numInfo(o, right, &nNode.right)
-		if err != nil {
-			return empty_nodeLoc, empty_nodeLoc, empty_nodeLoc, err
-		}
-		newRight := t.mkNodeLoc(t.mkNode(nItemLoc, right, &nNode.right,
-			leftNum+rightNum+1,
-			leftBytes+rightBytes+uint64(nItemLoc.NumBytes(t))))
-		t.freeNodeLoc(right)
-		t.markReclaimable(nNode, reclaimMark)
-		return left, middle, newRight, nil
-	}
-
-	if nNode.right.isEmpty() {
-		return t.mkNodeLoc(nil).Copy(n), empty_nodeLoc, empty_nodeLoc, nil
-	}
-	left, middle, right, err :=
-		o.split(t, &nNode.right, s, reclaimMark)
-	if err != nil {
-		return empty_nodeLoc, empty_nodeLoc, empty_nodeLoc, err
-	}
-	leftNum, leftBytes, rightNum, rightBytes, err := numInfo(o, &nNode.left, left)
-	if err != nil {
-		return empty_nodeLoc, empty_nodeLoc, empty_nodeLoc, err
-	}
-	newLeft := t.mkNodeLoc(t.mkNode(nItemLoc, &nNode.left, left,
-		leftNum+rightNum+1,
-		leftBytes+rightBytes+uint64(nItemLoc.NumBytes(t))))
-	t.freeNodeLoc(left)
-	t.markReclaimable(nNode, reclaimMark)
-	return newLeft, middle, right, nil
-}
-
-// Joins this treap and that treap into one treap.  Unlike union(),
-// the join() function assumes all keys from this treap should be less
-// than keys from that treap.
-func (o *Store) join(t *Collection, this *nodeLoc, that *nodeLoc,
-	reclaimMark *node) (
-	res *nodeLoc, err error) {
-	thisNode, err := this.read(o)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	thatNode, err := that.read(o)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	if this.isEmpty() || thisNode == nil {
-		return t.mkNodeLoc(nil).Copy(that), nil
-	}
-	if that.isEmpty() || thatNode == nil {
-		return t.mkNodeLoc(nil).Copy(this), nil
-	}
-	thisItemLoc := &thisNode.item
-	thisItem, err := thisItemLoc.read(t, false)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	thatItemLoc := &thatNode.item
-	thatItem, err := thatItemLoc.read(t, false)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	if thisItem.Priority > thatItem.Priority {
-		newRight, err :=
-			o.join(t, &thisNode.right, that, reclaimMark)
-		if err != nil {
-			return empty_nodeLoc, err
-		}
-		leftNum, leftBytes, rightNum, rightBytes, err :=
-			numInfo(o, &thisNode.left, newRight)
-		if err != nil {
-			return empty_nodeLoc, err
-		}
-		res = t.mkNodeLoc(t.mkNode(thisItemLoc, &thisNode.left, newRight,
-			leftNum+rightNum+1,
-			leftBytes+rightBytes+uint64(thisItemLoc.NumBytes(t))))
-		t.markReclaimable(thisNode, reclaimMark)
-		t.freeNodeLoc(newRight)
-		return res, nil
-	}
-	newLeft, err :=
-		o.join(t, this, &thatNode.left, reclaimMark)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	leftNum, leftBytes, rightNum, rightBytes, err :=
-		numInfo(o, newLeft, &thatNode.right)
-	if err != nil {
-		return empty_nodeLoc, err
-	}
-	res = t.mkNodeLoc(t.mkNode(thatItemLoc, newLeft, &thatNode.right,
-		leftNum+rightNum+1,
-		leftBytes+rightBytes+uint64(thatItemLoc.NumBytes(t))))
-	t.markReclaimable(thatNode, reclaimMark)
-	t.freeNodeLoc(newLeft)
-	return res, nil
-}
-
-func (o *Store) walk(t *Collection, withValue bool, cfn func(*node) (*nodeLoc, bool)) (
-	res *Item, err error) {
-	rnl := t.rootAddRef()
-	defer t.rootDecRef(rnl)
-	n := rnl.root
-	nNode, err := n.read(o)
-	if err != nil || n.isEmpty() || nNode == nil {
-		return nil, err
-	}
-	for {
-		child, ok := cfn(nNode)
-		if !ok {
-			return nil, nil
-		}
-		childNode, err := child.read(o)
-		if err != nil {
-			return nil, err
-		}
-		if child.isEmpty() || childNode == nil {
-			i, err := nNode.item.read(t, withValue)
-			if err != nil {
-				return nil, err
-			}
-			o.ItemAddRef(t, i)
-			return i, nil
-		}
-		nNode = childNode
-	}
-}
-
-func (o *Store) visitNodes(t *Collection, n *nodeLoc, target []byte,
-	withValue bool, visitor ItemVisitorEx, depth uint64,
-	choiceFunc func(int, *node) (bool, *nodeLoc, *nodeLoc)) (bool, error) {
-	nNode, err := n.read(o)
-	if err != nil {
-		return false, err
-	}
-	if n.isEmpty() || nNode == nil {
-		return true, nil
-	}
-	nItemLoc := &nNode.item
-	nItem, err := nItemLoc.read(t, false)
-	if err != nil {
-		return false, err
-	}
-	if nItem == nil {
-		panic(fmt.Sprintf("visitNodes nItem nil: %#v", nNode))
-	}
-	choice, choiceT, choiceF := choiceFunc(t.compare(target, nItem.Key), nNode)
-	if choice {
-		keepGoing, err :=
-			o.visitNodes(t, choiceT, target, withValue, visitor, depth+1, choiceFunc)
-		if err != nil || !keepGoing {
-			return false, err
-		}
-		nItem, err := nItemLoc.read(t, withValue)
-		if err != nil {
-			return false, err
-		}
-		if !visitor(nItem, depth) {
-			return false, nil
-		}
-	}
-	return o.visitNodes(t, choiceF, target, withValue, visitor, depth+1, choiceFunc)
-}