v.io/syncbase/x/ref/services/syncbase/localblobstore/blobmap: rename chunkmap to blobmap

This CL renames v.io/syncbase/x/ref/services/syncbase/localblobstore/chunkmap
to v.io/syncbase/x/ref/services/syncbase/localblobstore/blobmap

This is in preparation for storing more data about blobs in the underlying leveldb.
The comments will change to reflect that in the next CL.

This was put in a CL by itself to ease reviewing for the automated parts of the change.

The CL was generated as follows:

	git mv x/ref/services/syncbase/localblobstore/chunkmap x/ref/services/syncbase/localblobstore/blobmap
	git mv x/ref/services/syncbase/localblobstore/blobmap/chunkmap.go x/ref/services/syncbase/localblobstore/blobmap/blobmap.go
	git mv x/ref/services/syncbase/localblobstore/blobmap/chunkmap.go x/ref/services/syncbase/localblobstore/blobmap/blobmap_test.go

	for x in \
			x/ref/services/syncbase/localblobstore/blobmap/blobmap.go \
			x/ref/services/syncbase/localblobstore/blobmap/blobmap_test.go \
			x/ref/services/syncbase/localblobstore/fs_cablobstore/fs_cablobstore.go; do
		sed 's/ChunkMap/BlobMap/g; s/chunkmap/blobmap/g; s/\<cm\>/bm/g' < $x | gofmt > /tmp/x
		cp /tmp/x $x
	done

Change-Id: I66151ea04e0252253b458c65362cc3f01c5284eb
diff --git a/x/ref/services/syncbase/localblobstore/chunkmap/chunkmap.go b/x/ref/services/syncbase/localblobstore/blobmap/blobmap.go
similarity index 83%
rename from x/ref/services/syncbase/localblobstore/chunkmap/chunkmap.go
rename to x/ref/services/syncbase/localblobstore/blobmap/blobmap.go
index a13cf9f..c674f68 100644
--- a/x/ref/services/syncbase/localblobstore/chunkmap/chunkmap.go
+++ b/x/ref/services/syncbase/localblobstore/blobmap/blobmap.go
@@ -2,10 +2,10 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package chunkmap implements a map from chunk checksums to chunk locations
+// Package blobmap implements a map from chunk checksums to chunk locations
 // and vice versa, using a store.Store (currently, one implemented with
 // leveldb).
-package chunkmap
+package blobmap
 
 import "encoding/binary"
 import "sync"
@@ -15,15 +15,15 @@
 import "v.io/v23/context"
 import "v.io/v23/verror"
 
-const pkgPath = "v.io/syncbase/x/ref/services/syncbase/localblobstore/chunkmap"
+const pkgPath = "v.io/syncbase/x/ref/services/syncbase/localblobstore/blobmap"
 
 var (
-	errBadBlobIDLen        = verror.Register(pkgPath+".errBadBlobIDLen", verror.NoRetry, "{1:}{2:} chunkmap {3}: bad blob length {4} should be {5}{:_}")
-	errBadChunkHashLen     = verror.Register(pkgPath+".errBadChunkHashLen", verror.NoRetry, "{1:}{2:} chunkmap {3}: bad chunk hash length {4} should be {5}{:_}")
-	errNoSuchBlob          = verror.Register(pkgPath+".errNoSuchBlob", verror.NoRetry, "{1:}{2:} chunkmap {3}: no such blob{:_}")
-	errMalformedChunkEntry = verror.Register(pkgPath+".errMalformedChunkEntry", verror.NoRetry, "{1:}{2:} chunkmap {3}: malfored chunk entry{:_}")
-	errNoSuchChunk         = verror.Register(pkgPath+".errNoSuchChunk", verror.NoRetry, "{1:}{2:} chunkmap {3}: no such chunk{:_}")
-	errMalformedBlobEntry  = verror.Register(pkgPath+".errMalformedBlobEntry", verror.NoRetry, "{1:}{2:} chunkmap {3}: malfored blob entry{:_}")
+	errBadBlobIDLen        = verror.Register(pkgPath+".errBadBlobIDLen", verror.NoRetry, "{1:}{2:} blobmap {3}: bad blob length {4} should be {5}{:_}")
+	errBadChunkHashLen     = verror.Register(pkgPath+".errBadChunkHashLen", verror.NoRetry, "{1:}{2:} blobmap {3}: bad chunk hash length {4} should be {5}{:_}")
+	errNoSuchBlob          = verror.Register(pkgPath+".errNoSuchBlob", verror.NoRetry, "{1:}{2:} blobmap {3}: no such blob{:_}")
+	errMalformedChunkEntry = verror.Register(pkgPath+".errMalformedChunkEntry", verror.NoRetry, "{1:}{2:} blobmap {3}: malfored chunk entry{:_}")
+	errNoSuchChunk         = verror.Register(pkgPath+".errNoSuchChunk", verror.NoRetry, "{1:}{2:} blobmap {3}: no such chunk{:_}")
+	errMalformedBlobEntry  = verror.Register(pkgPath+".errMalformedBlobEntry", verror.NoRetry, "{1:}{2:} blobmap {3}: malfored blob entry{:_}")
 )
 
 // There are two tables: chunk-to-location, and blob-to-chunk.
@@ -81,34 +81,34 @@
 	Size   int64  // size of chunk
 }
 
-// A ChunkMap maps chunk checksums to Locations, and vice versa.
-type ChunkMap struct {
+// A BlobMap maps chunk checksums to Locations, and vice versa.
+type BlobMap struct {
 	dir string      // the directory where the store is held
 	st  store.Store // private store that holds the mapping.
 }
 
-// New() returns a pointer to a ChunkMap, backed by storage in directory dir.
-func New(ctx *context.T, dir string) (cm *ChunkMap, err error) {
-	cm = new(ChunkMap)
-	cm.dir = dir
-	cm.st, err = leveldb.Open(dir, leveldb.OpenOptions{CreateIfMissing: true, ErrorIfExists: false})
-	return cm, err
+// New() returns a pointer to a BlobMap, backed by storage in directory dir.
+func New(ctx *context.T, dir string) (bm *BlobMap, err error) {
+	bm = new(BlobMap)
+	bm.dir = dir
+	bm.st, err = leveldb.Open(dir, leveldb.OpenOptions{CreateIfMissing: true, ErrorIfExists: false})
+	return bm, err
 }
 
-// Close() closes any files or other resources associated with *cm.
-// No other methods on cm may be called after Close().
-func (cm *ChunkMap) Close() error {
-	return cm.st.Close()
+// Close() closes any files or other resources associated with *bm.
+// No other methods on bm may be called after Close().
+func (bm *BlobMap) Close() error {
+	return bm.st.Close()
 }
 
 // AssociateChunkWithLocation() remembers that the specified chunk hash is
 // associated with the specified Location.
-func (cm *ChunkMap) AssociateChunkWithLocation(ctx *context.T, chunk []byte, loc Location) (err error) {
+func (bm *BlobMap) AssociateChunkWithLocation(ctx *context.T, chunk []byte, loc Location) (err error) {
 	// Check of expected lengths explicitly in routines that modify the database.
 	if len(loc.BlobID) != blobIDLen {
-		err = verror.New(errBadBlobIDLen, ctx, cm.dir, len(loc.BlobID), blobIDLen)
+		err = verror.New(errBadBlobIDLen, ctx, bm.dir, len(loc.BlobID), blobIDLen)
 	} else if len(chunk) != chunkHashLen {
-		err = verror.New(errBadChunkHashLen, ctx, cm.dir, len(chunk), chunkHashLen)
+		err = verror.New(errBadChunkHashLen, ctx, bm.dir, len(chunk), chunkHashLen)
 	} else {
 		var key [maxKeyLen]byte
 		var val [maxValLen]byte
@@ -122,7 +122,7 @@
 
 		valLen := copy(val[:], chunk)
 		valLen += binary.PutVarint(val[valLen:], loc.Size)
-		err = cm.st.Put(key[:keyLen], val[:valLen])
+		err = bm.st.Put(key[:keyLen], val[:valLen])
 
 		if err == nil {
 			keyLen = copy(key[:], chunkPrefix)
@@ -132,7 +132,7 @@
 			valLen = binary.PutVarint(val[:], loc.Offset)
 			valLen += binary.PutVarint(val[valLen:], loc.Size)
 
-			err = cm.st.Put(key[:keyLen], val[:valLen])
+			err = bm.st.Put(key[:keyLen], val[:valLen])
 		}
 	}
 
@@ -141,10 +141,10 @@
 
 // DeleteBlob() deletes any of the chunk associations previously added with
 // AssociateChunkWithLocation(..., chunk, ...).
-func (cm *ChunkMap) DeleteBlob(ctx *context.T, blob []byte) (err error) {
+func (bm *BlobMap) DeleteBlob(ctx *context.T, blob []byte) (err error) {
 	// Check of expected lengths explicitly in routines that modify the database.
 	if len(blob) != blobIDLen {
-		err = verror.New(errBadBlobIDLen, ctx, cm.dir, len(blob), blobIDLen)
+		err = verror.New(errBadBlobIDLen, ctx, bm.dir, len(blob), blobIDLen)
 	} else {
 		var start [maxKeyLen]byte
 		var limit [maxKeyLen]byte
@@ -163,7 +163,7 @@
 
 		seenAValue := false
 
-		s := cm.st.Scan(start[:startLen], limit[:limitLen])
+		s := bm.st.Scan(start[:startLen], limit[:limitLen])
 		for s.Advance() && err == nil {
 			seenAValue = true
 
@@ -174,13 +174,13 @@
 				deleteKeyLen := deletePrefixLen
 				deleteKeyLen += copy(deleteKey[deleteKeyLen:], value[:chunkHashLen])
 				deleteKeyLen += copy(deleteKey[deleteKeyLen:], blob)
-				err = cm.st.Delete(deleteKey[:deleteKeyLen])
+				err = bm.st.Delete(deleteKey[:deleteKeyLen])
 			}
 
 			if err == nil {
 				// Delete the blob-to-chunk entry last, as it's
 				// used to find the chunk-to-location entry.
-				err = cm.st.Delete(key)
+				err = bm.st.Delete(key)
 			}
 		}
 
@@ -189,7 +189,7 @@
 		} else {
 			err = s.Err()
 			if err == nil && !seenAValue {
-				err = verror.New(errNoSuchBlob, ctx, cm.dir, blob)
+				err = verror.New(errNoSuchBlob, ctx, bm.dir, blob)
 			}
 		}
 	}
@@ -200,10 +200,10 @@
 // LookupChunk() returns a Location for the specified chunk.  Only one Location
 // is returned, even if several are available in the database.  If the client
 // finds that the Location is not available, perhaps because its blob has
-// been deleted, the client should remove the blob from the ChunkMap using
+// been deleted, the client should remove the blob from the BlobMap using
 // DeleteBlob(loc.Blob), and try again.  (The client may also wish to
 // arrange at some point to call GC() on the blob store.)
-func (cm *ChunkMap) LookupChunk(ctx *context.T, chunkHash []byte) (loc Location, err error) {
+func (bm *BlobMap) LookupChunk(ctx *context.T, chunkHash []byte) (loc Location, err error) {
 	var start [maxKeyLen]byte
 	var limit [maxKeyLen]byte
 
@@ -216,7 +216,7 @@
 	var keyBuf [maxKeyLen]byte // buffer for keys returned by stream
 	var valBuf [maxValLen]byte // buffer for values returned by stream
 
-	s := cm.st.Scan(start[:startLen], limit[:limitLen])
+	s := bm.st.Scan(start[:startLen], limit[:limitLen])
 	if s.Advance() {
 		var n int
 		key := s.Key(keyBuf[:])
@@ -227,7 +227,7 @@
 			loc.Size, n = binary.Varint(value[n:])
 		}
 		if n <= 0 {
-			err = verror.New(errMalformedChunkEntry, ctx, cm.dir, chunkHash, key, value)
+			err = verror.New(errMalformedChunkEntry, ctx, bm.dir, chunkHash, key, value)
 		}
 		s.Cancel()
 	} else {
@@ -235,7 +235,7 @@
 			err = s.Err()
 		}
 		if err == nil {
-			err = verror.New(errNoSuchChunk, ctx, cm.dir, chunkHash)
+			err = verror.New(errNoSuchChunk, ctx, bm.dir, chunkHash)
 		}
 	}
 
@@ -243,7 +243,7 @@
 }
 
 // A ChunkStream allows the client to iterate over the chunks in a blob:
-//	cs := cm.NewChunkStream(ctx, blob)
+//	cs := bm.NewChunkStream(ctx, blob)
 //	for cs.Advance() {
 //		chunkHash := cs.Value()
 //		...process chunkHash...
@@ -252,7 +252,7 @@
 //		...there was an error...
 //	}
 type ChunkStream struct {
-	cm     *ChunkMap
+	bm     *BlobMap
 	ctx    *context.T
 	stream store.Stream
 
@@ -267,7 +267,7 @@
 
 // NewChunkStream() returns a pointer to a new ChunkStream that allows the client
 // to enumerate the chunk hashes in a blob, in order.
-func (cm *ChunkMap) NewChunkStream(ctx *context.T, blob []byte) *ChunkStream {
+func (bm *BlobMap) NewChunkStream(ctx *context.T, blob []byte) *ChunkStream {
 	var start [maxKeyLen]byte
 	var limit [maxKeyLen]byte
 
@@ -278,9 +278,9 @@
 	limitLen += copy(limit[limitLen:], offsetLimit)
 
 	cs := new(ChunkStream)
-	cs.cm = cm
+	cs.bm = bm
 	cs.ctx = ctx
-	cs.stream = cm.st.Scan(start[:startLen], limit[:limitLen])
+	cs.stream = bm.st.Scan(start[:startLen], limit[:limitLen])
 	cs.more = true
 
 	return cs
@@ -311,7 +311,7 @@
 				ok = (n > 0)
 			}
 			if !ok {
-				cs.err = verror.New(errMalformedBlobEntry, cs.ctx, cs.cm.dir, cs.key, cs.value)
+				cs.err = verror.New(errMalformedBlobEntry, cs.ctx, cs.bm.dir, cs.key, cs.value)
 				cs.stream.Cancel()
 			}
 		}
@@ -355,8 +355,8 @@
 	cs.stream.Cancel()
 }
 
-// A BlobStream allows the client to iterate over the blobs in ChunkMap:
-//	bs := cm.NewBlobStream(ctx)
+// A BlobStream allows the client to iterate over the blobs in BlobMap:
+//	bs := bm.NewBlobStream(ctx)
 //	for bs.Advance() {
 //		blobID := bs.Value()
 //		...process blobID...
@@ -365,7 +365,7 @@
 //		...there was an error...
 //	}
 type BlobStream struct {
-	cm  *ChunkMap
+	bm  *BlobMap
 	ctx *context.T
 
 	key    []byte          // key for current element
@@ -387,10 +387,10 @@
 }
 
 // NewBlobStream() returns a pointer to a new BlobStream that allows the client
-// to enumerate the blobs ChunkMap, in lexicographic order.
-func (cm *ChunkMap) NewBlobStream(ctx *context.T) *BlobStream {
+// to enumerate the blobs BlobMap, in lexicographic order.
+func (bm *BlobMap) NewBlobStream(ctx *context.T) *BlobStream {
 	bs := new(BlobStream)
-	bs.cm = cm
+	bs.bm = bm
 	bs.ctx = ctx
 	bs.more = true
 	return bs
@@ -426,14 +426,14 @@
 			bs.key = bs.keyBuf[:prefixAndKeyLen]
 		}
 		if ok {
-			stream := bs.cm.st.Scan(bs.key, keyLimit)
+			stream := bs.bm.st.Scan(bs.key, keyLimit)
 			if !stream.Advance() {
 				bs.err = stream.Err()
 				ok = false // no more stream, even if no error
 			} else {
 				bs.key = stream.Key(bs.keyBuf[:])
 				if len(bs.key) < prefixAndKeyLen {
-					bs.err = verror.New(errMalformedBlobEntry, bs.ctx, bs.cm.dir, bs.key, stream.Value(nil))
+					bs.err = verror.New(errMalformedBlobEntry, bs.ctx, bs.bm.dir, bs.key, stream.Value(nil))
 					ok = false
 				}
 				stream.Cancel() // We get at most one element from each stream.
diff --git a/x/ref/services/syncbase/localblobstore/blobmap/blobmap_test.go b/x/ref/services/syncbase/localblobstore/blobmap/blobmap_test.go
new file mode 100644
index 0000000..450049a
--- /dev/null
+++ b/x/ref/services/syncbase/localblobstore/blobmap/blobmap_test.go
@@ -0,0 +1,278 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A test for blobmap.
+package blobmap_test
+
+import "bytes"
+import "io/ioutil"
+import "math/rand"
+import "os"
+import "runtime"
+import "testing"
+
+import "v.io/syncbase/x/ref/services/syncbase/localblobstore/blobmap"
+import "v.io/v23/context"
+
+import "v.io/x/ref/test"
+import _ "v.io/x/ref/runtime/factories/generic"
+
+// id() returns a new random 16-byte byte vector.
+func id() []byte {
+	v := make([]byte, 16)
+	for i := 0; i != len(v); i++ {
+		v[i] = byte(rand.Int31n(256))
+	}
+	return v
+}
+
+// verifyBlobs() tests that the blobs in *bm are those in b[], as revealed via
+// the BlobStream() interface.
+func verifyBlobs(t *testing.T, ctx *context.T, bm *blobmap.BlobMap, b [][]byte) {
+	_, _, callerLine, _ := runtime.Caller(1)
+	seen := make([]bool, len(b)) // seen[i] == whether b[i] seen in *bm
+	bs := bm.NewBlobStream(ctx)
+	var i int
+	for i = 0; bs.Advance(); i++ {
+		blob := bs.Value(nil)
+		var j int
+		for j = 0; j != len(b) && bytes.Compare(b[j], blob) != 0; j++ {
+		}
+		if j == len(b) {
+			t.Errorf("blobmap_test: line %d: unexpected blob %v present in BlobMap",
+				callerLine, blob)
+		} else if seen[j] {
+			t.Errorf("blobmap_test: line %d: blob %v seen twice in BlobMap",
+				callerLine, blob)
+		} else {
+			seen[j] = true
+		}
+	}
+	if i != len(b) {
+		t.Errorf("blobmap_test: line %d: found %d blobs in BlobMap, but expected %d",
+			callerLine, i, len(b))
+	}
+	for j := range seen {
+		if !seen[j] {
+			t.Errorf("blobmap_test: line %d: blob %v not seen un BlobMap",
+				callerLine, b[j])
+		}
+	}
+	if bs.Err() != nil {
+		t.Errorf("blobmap_test: line %d: BlobStream.Advance: unexpected error %v",
+			callerLine, bs.Err())
+	}
+}
+
+// verifyNoChunksInBlob() tests that blob b[blobi] has no chunks in *bm, as
+// revealed by the ChunkStream interface.
+func verifyNoChunksInBlob(t *testing.T, ctx *context.T, bm *blobmap.BlobMap, blobi int, b [][]byte) {
+	_, _, callerLine, _ := runtime.Caller(1)
+	cs := bm.NewChunkStream(ctx, b[blobi])
+	for i := 0; cs.Advance(); i++ {
+		t.Errorf("blobmap_test: line %d: blob %d: chunk %d: %v",
+			callerLine, blobi, i, cs.Value(nil))
+	}
+	if cs.Err() != nil {
+		t.Errorf("blobmap_test: line %d: blob %d: ChunkStream.Advance: unexpected error %v",
+			callerLine, blobi, cs.Err())
+	}
+}
+
+// verifyChunksInBlob() tests that blob b[blobi] in *bm contains the expected
+// chunks from c[].  Each blob is expected to have 8 chunks, 0...7, except that
+// b[1] has c[8] instead of c[4] for chunk 4.
+func verifyChunksInBlob(t *testing.T, ctx *context.T, bm *blobmap.BlobMap, blobi int, b [][]byte, c [][]byte) {
+	_, _, callerLine, _ := runtime.Caller(1)
+	var err error
+	var i int
+	cs := bm.NewChunkStream(ctx, b[blobi])
+	for i = 0; cs.Advance(); i++ {
+		chunk := cs.Value(nil)
+		chunki := i
+		if blobi == 1 && i == 4 { // In blob 1, c[4] is replaced by c[8]
+			chunki = 8
+		}
+		if bytes.Compare(c[chunki], chunk) != 0 {
+			t.Errorf("blobmap_test: line %d: blob %d: chunk %d: got %v, expected %v",
+				callerLine, blobi, i, chunk, c[chunki])
+		}
+
+		var loc blobmap.Location
+		loc, err = bm.LookupChunk(ctx, chunk)
+		if err != nil {
+			t.Errorf("blobmap_test: line %d: blob %d: chunk %d: LookupChunk got unexpected error: %v",
+				callerLine, blobi, i, err)
+		} else {
+			if i == 4 {
+				if bytes.Compare(loc.BlobID, b[blobi]) != 0 {
+					t.Errorf("blobmap_test: line %d: blob %d: chunk %d: Location.BlobID got %v, expected %v",
+						callerLine, blobi, i, loc.BlobID, b[blobi])
+				}
+			} else {
+				if bytes.Compare(loc.BlobID, b[0]) != 0 && bytes.Compare(loc.BlobID, b[1]) != 0 {
+					t.Errorf("blobmap_test: line %d: blob %d: chunk %d: Location.BlobID got %v, expected %v",
+						callerLine, blobi, i, loc.BlobID, b[blobi])
+				}
+			}
+			if loc.Offset != int64(i) {
+				t.Errorf("blobmap_test: line %d: blob %d: chunk %d: Location.Offset got %d, expected %d",
+					callerLine, blobi, i, loc.Offset, i)
+			}
+			if loc.Size != 1 {
+				t.Errorf("blobmap_test: line %d: blob %d: chunk %d: Location.Size got %d, expected 1",
+					callerLine, blobi, i, loc.Size)
+			}
+
+			// The offsets and sizes will match, between the result
+			// from the stream and the result from LookupChunk(),
+			// because for all chunks written to both, they are
+			// written to the same places.  However, the blob need
+			// not match, since LookupChunk() will return an
+			// arbitrary Location in the store that contains the
+			// chunk.
+			loc2 := cs.Location()
+			if loc.Offset != loc2.Offset || loc.Size != loc2.Size {
+				t.Errorf("blobmap_test: line %d: blob %d: chunk %d: disagreement about location: LookupChunk %v vs ChunkStream %v",
+					callerLine, blobi, i, loc, loc2)
+			}
+		}
+	}
+	if cs.Err() != nil {
+		t.Errorf("blobmap_test: line %d: blob %d: ChunkStream.Err() unepxected error %v",
+			callerLine, blobi, cs.Err())
+	}
+	if i != 8 {
+		t.Errorf("blobmap_test: line %d: blob %d: ChunkStream.Advance unexpectedly saw %d chunks, expected 8",
+			callerLine, blobi, i)
+	}
+}
+
+// TestAddRetrieveAndDelete() tests insertion, retrieval, and deletion of blobs
+// from a BlobMap.  It's all done in one test case, because one cannot retrieve
+// or delete blobs that have not been inserted.
+func TestAddRetrieveAndDelete(t *testing.T) {
+	ctx, shutdown := test.V23Init()
+	defer shutdown()
+
+	// Make a temporary directory.
+	var err error
+	var testDirName string
+	testDirName, err = ioutil.TempDir("", "blobmap_test")
+	if err != nil {
+		t.Fatalf("blobmap_test: can't make tmp directory: %v", err)
+	}
+	defer os.RemoveAll(testDirName)
+
+	// Create a blobmap.
+	var bm *blobmap.BlobMap
+	bm, err = blobmap.New(ctx, testDirName)
+	if err != nil {
+		t.Fatalf("blobmap_test: blobmap.New failed: %v", err)
+	}
+
+	// Two blobs: b[0] and b[1].
+	b := [][]byte{id(), id()}
+
+	// Nine chunks: c[0 .. 8]
+	c := [][]byte{id(), id(), id(), id(), id(), id(), id(), id(), id()}
+
+	// Verify that there are no blobs, or chunks in blobs initially.
+	verifyBlobs(t, ctx, bm, nil)
+	verifyNoChunksInBlob(t, ctx, bm, 0, b)
+	verifyNoChunksInBlob(t, ctx, bm, 1, b)
+
+	// Verify that all chunks have no locations initially.
+	for chunki := range c {
+		_, err = bm.LookupChunk(ctx, c[chunki])
+		if err == nil {
+			t.Errorf("blobmap_test: chunk %d: LookupChunk: unexpected lack of error", chunki)
+		}
+	}
+
+	// Put chunks 0..7 into blob 0, and chunks 0..3, 8, 5..7 into blob 1.
+	// Each blob is treated as size 1.
+	for blobi := 0; blobi != 2; blobi++ {
+		for i := 0; i != 8; i++ {
+			chunki := i
+			if blobi == 1 && i == 4 { // In blob 1, c[4] 4 is replaced by c[8]
+				chunki = 8
+			}
+			err = bm.AssociateChunkWithLocation(ctx, c[chunki],
+				blobmap.Location{BlobID: b[blobi], Offset: int64(i), Size: 1})
+			if err != nil {
+				t.Errorf("blobmap_test: blob %d: AssociateChunkWithLocation: unexpected error: %v",
+					blobi, err)
+			}
+		}
+	}
+
+	// Verify that the blobs are present, with the chunks specified.
+	verifyBlobs(t, ctx, bm, b)
+	verifyChunksInBlob(t, ctx, bm, 0, b, c)
+	verifyChunksInBlob(t, ctx, bm, 1, b, c)
+
+	// Verify that all chunks now have locations.
+	for chunki := range c {
+		_, err = bm.LookupChunk(ctx, c[chunki])
+		if err != nil {
+			t.Errorf("blobmap_test: chunk %d: LookupChunk: unexpected error: %v",
+				chunki, err)
+		}
+	}
+
+	// Delete b[0].
+	err = bm.DeleteBlob(ctx, b[0])
+	if err != nil {
+		t.Errorf("blobmap_test: blob 0: DeleteBlob: unexpected error: %v", err)
+	}
+
+	// Verify that all chunks except chunk 4 (which was in only blob 0)
+	// still have locations.
+	for chunki := range c {
+		_, err = bm.LookupChunk(ctx, c[chunki])
+		if chunki == 4 {
+			if err == nil {
+				t.Errorf("blobmap_test: chunk %d: LookupChunk: expected lack of error",
+					chunki)
+			}
+		} else {
+			if err != nil {
+				t.Errorf("blobmap_test: chunk %d: LookupChunk: unexpected error: %v",
+					chunki, err)
+			}
+		}
+	}
+
+	// Verify that blob 0 is gone, but blob 1 remains.
+	verifyBlobs(t, ctx, bm, b[1:])
+	verifyNoChunksInBlob(t, ctx, bm, 0, b)
+	verifyChunksInBlob(t, ctx, bm, 1, b, c)
+
+	// Delete b[1].
+	err = bm.DeleteBlob(ctx, b[1])
+	if err != nil {
+		t.Errorf("blobmap_test: blob 1: DeleteBlob: unexpected error: %v",
+			err)
+	}
+
+	// Verify that there are no blobs, or chunks in blobs once more.
+	verifyBlobs(t, ctx, bm, nil)
+	verifyNoChunksInBlob(t, ctx, bm, 0, b)
+	verifyNoChunksInBlob(t, ctx, bm, 1, b)
+
+	// Verify that all chunks have no locations once more.
+	for chunki := range c {
+		_, err = bm.LookupChunk(ctx, c[chunki])
+		if err == nil {
+			t.Errorf("blobmap_test: chunk %d: LookupChunk: unexpected lack of error",
+				chunki)
+		}
+	}
+
+	err = bm.Close()
+	if err != nil {
+		t.Errorf("blobmap_test: unexpected error closing BlobMap: %v", err)
+	}
+}
diff --git a/x/ref/services/syncbase/localblobstore/chunkmap/chunkmap_test.go b/x/ref/services/syncbase/localblobstore/chunkmap/chunkmap_test.go
deleted file mode 100644
index b7ab2df..0000000
--- a/x/ref/services/syncbase/localblobstore/chunkmap/chunkmap_test.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// A test for chunkmap.
-package chunkmap_test
-
-import "bytes"
-import "io/ioutil"
-import "math/rand"
-import "os"
-import "runtime"
-import "testing"
-
-import "v.io/syncbase/x/ref/services/syncbase/localblobstore/chunkmap"
-import "v.io/v23/context"
-
-import "v.io/x/ref/test"
-import _ "v.io/x/ref/runtime/factories/generic"
-
-// id() returns a new random 16-byte byte vector.
-func id() []byte {
-	v := make([]byte, 16)
-	for i := 0; i != len(v); i++ {
-		v[i] = byte(rand.Int31n(256))
-	}
-	return v
-}
-
-// verifyBlobs() tests that the blobs in *cm are those in b[], as revealed via
-// the BlobStream() interface.
-func verifyBlobs(t *testing.T, ctx *context.T, cm *chunkmap.ChunkMap, b [][]byte) {
-	_, _, callerLine, _ := runtime.Caller(1)
-	seen := make([]bool, len(b)) // seen[i] == whether b[i] seen in *cm
-	bs := cm.NewBlobStream(ctx)
-	var i int
-	for i = 0; bs.Advance(); i++ {
-		blob := bs.Value(nil)
-		var j int
-		for j = 0; j != len(b) && bytes.Compare(b[j], blob) != 0; j++ {
-		}
-		if j == len(b) {
-			t.Errorf("chunkmap_test: line %d: unexpected blob %v present in ChunkMap",
-				callerLine, blob)
-		} else if seen[j] {
-			t.Errorf("chunkmap_test: line %d: blob %v seen twice in ChunkMap",
-				callerLine, blob)
-		} else {
-			seen[j] = true
-		}
-	}
-	if i != len(b) {
-		t.Errorf("chunkmap_test: line %d: found %d blobs in ChunkMap, but expected %d",
-			callerLine, i, len(b))
-	}
-	for j := range seen {
-		if !seen[j] {
-			t.Errorf("chunkmap_test: line %d: blob %v not seen un ChunkMap",
-				callerLine, b[j])
-		}
-	}
-	if bs.Err() != nil {
-		t.Errorf("chunkmap_test: line %d: BlobStream.Advance: unexpected error %v",
-			callerLine, bs.Err())
-	}
-}
-
-// verifyNoChunksInBlob() tests that blob b[blobi] has no chunks in *cm, as
-// revealed by the ChunkStream interface.
-func verifyNoChunksInBlob(t *testing.T, ctx *context.T, cm *chunkmap.ChunkMap, blobi int, b [][]byte) {
-	_, _, callerLine, _ := runtime.Caller(1)
-	cs := cm.NewChunkStream(ctx, b[blobi])
-	for i := 0; cs.Advance(); i++ {
-		t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: %v",
-			callerLine, blobi, i, cs.Value(nil))
-	}
-	if cs.Err() != nil {
-		t.Errorf("chunkmap_test: line %d: blob %d: ChunkStream.Advance: unexpected error %v",
-			callerLine, blobi, cs.Err())
-	}
-}
-
-// verifyChunksInBlob() tests that blob b[blobi] in *cm contains the expected
-// chunks from c[].  Each blob is expected to have 8 chunks, 0...7, except that
-// b[1] has c[8] instead of c[4] for chunk 4.
-func verifyChunksInBlob(t *testing.T, ctx *context.T, cm *chunkmap.ChunkMap, blobi int, b [][]byte, c [][]byte) {
-	_, _, callerLine, _ := runtime.Caller(1)
-	var err error
-	var i int
-	cs := cm.NewChunkStream(ctx, b[blobi])
-	for i = 0; cs.Advance(); i++ {
-		chunk := cs.Value(nil)
-		chunki := i
-		if blobi == 1 && i == 4 { // In blob 1, c[4] is replaced by c[8]
-			chunki = 8
-		}
-		if bytes.Compare(c[chunki], chunk) != 0 {
-			t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: got %v, expected %v",
-				callerLine, blobi, i, chunk, c[chunki])
-		}
-
-		var loc chunkmap.Location
-		loc, err = cm.LookupChunk(ctx, chunk)
-		if err != nil {
-			t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: LookupChunk got unexpected error: %v",
-				callerLine, blobi, i, err)
-		} else {
-			if i == 4 {
-				if bytes.Compare(loc.BlobID, b[blobi]) != 0 {
-					t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: Location.BlobID got %v, expected %v",
-						callerLine, blobi, i, loc.BlobID, b[blobi])
-				}
-			} else {
-				if bytes.Compare(loc.BlobID, b[0]) != 0 && bytes.Compare(loc.BlobID, b[1]) != 0 {
-					t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: Location.BlobID got %v, expected %v",
-						callerLine, blobi, i, loc.BlobID, b[blobi])
-				}
-			}
-			if loc.Offset != int64(i) {
-				t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: Location.Offset got %d, expected %d",
-					callerLine, blobi, i, loc.Offset, i)
-			}
-			if loc.Size != 1 {
-				t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: Location.Size got %d, expected 1",
-					callerLine, blobi, i, loc.Size)
-			}
-
-			// The offsets and sizes will match, between the result
-			// from the stream and the result from LookupChunk(),
-			// because for all chunks written to both, they are
-			// written to the same places.  However, the blob need
-			// not match, since LookupChunk() will return an
-			// arbitrary Location in the store that contains the
-			// chunk.
-			loc2 := cs.Location()
-			if loc.Offset != loc2.Offset || loc.Size != loc2.Size {
-				t.Errorf("chunkmap_test: line %d: blob %d: chunk %d: disagreement about location: LookupChunk %v vs ChunkStream %v",
-					callerLine, blobi, i, loc, loc2)
-			}
-		}
-	}
-	if cs.Err() != nil {
-		t.Errorf("chunkmap_test: line %d: blob %d: ChunkStream.Err() unepxected error %v",
-			callerLine, blobi, cs.Err())
-	}
-	if i != 8 {
-		t.Errorf("chunkmap_test: line %d: blob %d: ChunkStream.Advance unexpectedly saw %d chunks, expected 8",
-			callerLine, blobi, i)
-	}
-}
-
-// TestAddRetrieveAndDelete() tests insertion, retrieval, and deletion of blobs
-// from a ChunkMap.  It's all done in one test case, because one cannot retrieve
-// or delete blobs that have not been inserted.
-func TestAddRetrieveAndDelete(t *testing.T) {
-	ctx, shutdown := test.V23Init()
-	defer shutdown()
-
-	// Make a temporary directory.
-	var err error
-	var testDirName string
-	testDirName, err = ioutil.TempDir("", "chunkmap_test")
-	if err != nil {
-		t.Fatalf("chunkmap_test: can't make tmp directory: %v", err)
-	}
-	defer os.RemoveAll(testDirName)
-
-	// Create a chunkmap.
-	var cm *chunkmap.ChunkMap
-	cm, err = chunkmap.New(ctx, testDirName)
-	if err != nil {
-		t.Fatalf("chunkmap_test: chunkmap.New failed: %v", err)
-	}
-
-	// Two blobs: b[0] and b[1].
-	b := [][]byte{id(), id()}
-
-	// Nine chunks: c[0 .. 8]
-	c := [][]byte{id(), id(), id(), id(), id(), id(), id(), id(), id()}
-
-	// Verify that there are no blobs, or chunks in blobs initially.
-	verifyBlobs(t, ctx, cm, nil)
-	verifyNoChunksInBlob(t, ctx, cm, 0, b)
-	verifyNoChunksInBlob(t, ctx, cm, 1, b)
-
-	// Verify that all chunks have no locations initially.
-	for chunki := range c {
-		_, err = cm.LookupChunk(ctx, c[chunki])
-		if err == nil {
-			t.Errorf("chunkmap_test: chunk %d: LookupChunk: unexpected lack of error", chunki)
-		}
-	}
-
-	// Put chunks 0..7 into blob 0, and chunks 0..3, 8, 5..7 into blob 1.
-	// Each blob is treated as size 1.
-	for blobi := 0; blobi != 2; blobi++ {
-		for i := 0; i != 8; i++ {
-			chunki := i
-			if blobi == 1 && i == 4 { // In blob 1, c[4] 4 is replaced by c[8]
-				chunki = 8
-			}
-			err = cm.AssociateChunkWithLocation(ctx, c[chunki],
-				chunkmap.Location{BlobID: b[blobi], Offset: int64(i), Size: 1})
-			if err != nil {
-				t.Errorf("chunkmap_test: blob %d: AssociateChunkWithLocation: unexpected error: %v",
-					blobi, err)
-			}
-		}
-	}
-
-	// Verify that the blobs are present, with the chunks specified.
-	verifyBlobs(t, ctx, cm, b)
-	verifyChunksInBlob(t, ctx, cm, 0, b, c)
-	verifyChunksInBlob(t, ctx, cm, 1, b, c)
-
-	// Verify that all chunks now have locations.
-	for chunki := range c {
-		_, err = cm.LookupChunk(ctx, c[chunki])
-		if err != nil {
-			t.Errorf("chunkmap_test: chunk %d: LookupChunk: unexpected error: %v",
-				chunki, err)
-		}
-	}
-
-	// Delete b[0].
-	err = cm.DeleteBlob(ctx, b[0])
-	if err != nil {
-		t.Errorf("chunkmap_test: blob 0: DeleteBlob: unexpected error: %v", err)
-	}
-
-	// Verify that all chunks except chunk 4 (which was in only blob 0)
-	// still have locations.
-	for chunki := range c {
-		_, err = cm.LookupChunk(ctx, c[chunki])
-		if chunki == 4 {
-			if err == nil {
-				t.Errorf("chunkmap_test: chunk %d: LookupChunk: expected lack of error",
-					chunki)
-			}
-		} else {
-			if err != nil {
-				t.Errorf("chunkmap_test: chunk %d: LookupChunk: unexpected error: %v",
-					chunki, err)
-			}
-		}
-	}
-
-	// Verify that blob 0 is gone, but blob 1 remains.
-	verifyBlobs(t, ctx, cm, b[1:])
-	verifyNoChunksInBlob(t, ctx, cm, 0, b)
-	verifyChunksInBlob(t, ctx, cm, 1, b, c)
-
-	// Delete b[1].
-	err = cm.DeleteBlob(ctx, b[1])
-	if err != nil {
-		t.Errorf("chunkmap_test: blob 1: DeleteBlob: unexpected error: %v",
-			err)
-	}
-
-	// Verify that there are no blobs, or chunks in blobs once more.
-	verifyBlobs(t, ctx, cm, nil)
-	verifyNoChunksInBlob(t, ctx, cm, 0, b)
-	verifyNoChunksInBlob(t, ctx, cm, 1, b)
-
-	// Verify that all chunks have no locations once more.
-	for chunki := range c {
-		_, err = cm.LookupChunk(ctx, c[chunki])
-		if err == nil {
-			t.Errorf("chunkmap_test: chunk %d: LookupChunk: unexpected lack of error",
-				chunki)
-		}
-	}
-
-	err = cm.Close()
-	if err != nil {
-		t.Errorf("chunkmap_test: unexpected error closing ChunkMap: %v", err)
-	}
-}
diff --git a/x/ref/services/syncbase/localblobstore/fs_cablobstore/fs_cablobstore.go b/x/ref/services/syncbase/localblobstore/fs_cablobstore/fs_cablobstore.go
index 4a67bf3..4fb7500 100644
--- a/x/ref/services/syncbase/localblobstore/fs_cablobstore/fs_cablobstore.go
+++ b/x/ref/services/syncbase/localblobstore/fs_cablobstore/fs_cablobstore.go
@@ -51,7 +51,7 @@
 
 import "v.io/syncbase/x/ref/services/syncbase/localblobstore"
 import "v.io/syncbase/x/ref/services/syncbase/localblobstore/chunker"
-import "v.io/syncbase/x/ref/services/syncbase/localblobstore/chunkmap"
+import "v.io/syncbase/x/ref/services/syncbase/localblobstore/blobmap"
 import "v.io/v23/context"
 import "v.io/v23/verror"
 
@@ -90,8 +90,8 @@
 
 // An FsCaBlobStore represents a simple, content-addressable store.
 type FsCaBlobStore struct {
-	rootName string             // The name of the root of the store.
-	cm       *chunkmap.ChunkMap // Mapping from chunks to blob locations and vice versa.
+	rootName string           // The name of the root of the store.
+	bm       *blobmap.BlobMap // Mapping from chunks to blob locations and vice versa.
 
 	// mu protects fields below, plus most fields in each blobDesc when used from a BlobWriter.
 	mu         sync.Mutex
@@ -182,21 +182,21 @@
 			err = verror.New(errNotADir, ctx, fullName)
 		}
 	}
-	var cm *chunkmap.ChunkMap
+	var bm *blobmap.BlobMap
 	if err == nil {
-		cm, err = chunkmap.New(ctx, filepath.Join(rootName, chunkDir))
+		bm, err = blobmap.New(ctx, filepath.Join(rootName, chunkDir))
 	}
 	if err == nil {
 		fscabs = new(FsCaBlobStore)
 		fscabs.rootName = rootName
-		fscabs.cm = cm
+		fscabs.bm = bm
 	}
 	return fscabs, err
 }
 
 // Close() closes the FsCaBlobStore. {
 func (fscabs *FsCaBlobStore) Close() error {
-	return fscabs.cm.Close()
+	return fscabs.bm.Close()
 }
 
 // Root() returns the name of the root directory where *fscabs is stored.
@@ -216,7 +216,7 @@
 		if err != nil {
 			err = verror.New(errCantDeleteBlob, ctx, blobName, err)
 		} else {
-			err = fscabs.cm.DeleteBlob(ctx, blobID)
+			err = fscabs.bm.DeleteBlob(ctx, blobID)
 		}
 	}
 	return err
@@ -410,7 +410,7 @@
 	} else { // commit the change by updating the size
 		fscabs.mu.Lock()
 		desc.size += size
-		desc.cv.Broadcast() // Tell chunkmap BlobReader there's more to read.
+		desc.cv.Broadcast() // Tell blobmap BlobReader there's more to read.
 		fscabs.mu.Unlock()
 	}
 
@@ -584,10 +584,10 @@
 	f      *file     // The file being written.
 	hasher hash.Hash // Running hash of blob.
 
-	// Fields to allow the ChunkMap to be written.
+	// Fields to allow the BlobMap to be written.
 	csBr  *BlobReader     // Reader over the blob that's currently being written.
 	cs    *chunker.Stream // Stream of chunks derived from csBr
-	csErr chan error      // writeChunkMap() sends its result here; Close/CloseWithoutFinalize receives it.
+	csErr chan error      // writeBlobMap() sends its result here; Close/CloseWithoutFinalize receives it.
 }
 
 // NewBlobWriter() returns a pointer to a newly allocated BlobWriter on
@@ -623,9 +623,9 @@
 			// Can't happen; descriptor refers to no fragments.
 			panic(verror.New(errBlobDeleted, ctx, bw.desc.name))
 		}
-		// Write the chunks of this blob into the ChunkMap, as they are
+		// Write the chunks of this blob into the BlobMap, as they are
 		// written by this writer.
-		bw.forkWriteChunkMap()
+		bw.forkWriteBlobMap()
 	}
 	return bw, err
 }
@@ -668,20 +668,20 @@
 			err = nil
 		}
 		if err == nil {
-			// Write the chunks of this blob into the ChunkMap, as
+			// Write the chunks of this blob into the BlobMap, as
 			// they are written by this writer.
-			bw.forkWriteChunkMap()
+			bw.forkWriteBlobMap()
 		}
 	}
 	return bw, err
 }
 
-// forkWriteChunkMap() creates a new thread to run writeChunkMap().  It adds
-// the chunks written to *bw to the blob store's ChunkMap.  The caller is
-// expected to call joinWriteChunkMap() at some later point.
-func (bw *BlobWriter) forkWriteChunkMap() {
+// forkWriteBlobMap() creates a new thread to run writeBlobMap().  It adds
+// the chunks written to *bw to the blob store's BlobMap.  The caller is
+// expected to call joinWriteBlobMap() at some later point.
+func (bw *BlobWriter) forkWriteBlobMap() {
 	// The descRef's ref count is incremented here to compensate
-	// for the decrement it will receive in br.Close() in joinWriteChunkMap.
+	// for the decrement it will receive in br.Close() in joinWriteBlobMap.
 	if !bw.fscabs.descRef(bw.desc) {
 		// Can't happen; descriptor's ref count was already non-zero.
 		panic(verror.New(errBlobDeleted, bw.ctx, bw.desc.name))
@@ -689,24 +689,24 @@
 	bw.csBr = bw.fscabs.blobReaderFromDesc(bw.ctx, bw.desc, waitForWriter)
 	bw.cs = chunker.NewStream(bw.ctx, &chunker.DefaultParam, bw.csBr)
 	bw.csErr = make(chan error)
-	go bw.writeChunkMap()
+	go bw.writeBlobMap()
 }
 
-// insertChunk() inserts chunk into the blob store's ChunkMap, associating it
+// insertChunk() inserts chunk into the blob store's BlobMap, associating it
 // with the specified byte offset in the blob blobID being written by *bw.  The byte
 // offset of the next chunk is returned.
 func (bw *BlobWriter) insertChunk(blobID []byte, chunkHash []byte, offset int64, size int64) (int64, error) {
-	err := bw.fscabs.cm.AssociateChunkWithLocation(bw.ctx, chunkHash[:],
-		chunkmap.Location{BlobID: blobID, Offset: offset, Size: size})
+	err := bw.fscabs.bm.AssociateChunkWithLocation(bw.ctx, chunkHash[:],
+		blobmap.Location{BlobID: blobID, Offset: offset, Size: size})
 	if err != nil {
 		bw.cs.Cancel()
 	}
 	return offset + size, err
 }
 
-// writeChunkMap() iterates over the chunk in stream bw.cs, and associates each
+// writeBlobMap() iterates over the chunk in stream bw.cs, and associates each
 // one with the blob being written.
-func (bw *BlobWriter) writeChunkMap() {
+func (bw *BlobWriter) writeBlobMap() {
 	var err error
 	var offset int64
 	blobID := fileNameToHash(blobDir, bw.desc.name)
@@ -736,13 +736,13 @@
 		offset, err = bw.insertChunk(blobID, chunkHash[:], offset, chunkLen)
 	}
 	bw.fscabs.mu.Unlock()
-	bw.csErr <- err // wake joinWriteChunkMap()
+	bw.csErr <- err // wake joinWriteBlobMap()
 }
 
-// joinWriteChunkMap waits for the completion of the thread forked by forkWriteChunkMap().
-// It returns when the chunks in the blob have been written to the blob store's ChunkMap.
-func (bw *BlobWriter) joinWriteChunkMap(err error) error {
-	err2 := <-bw.csErr // read error from end of writeChunkMap()
+// joinWriteBlobMap waits for the completion of the thread forked by forkWriteBlobMap().
+// It returns when the chunks in the blob have been written to the blob store's BlobMap.
+func (bw *BlobWriter) joinWriteBlobMap(err error) error {
+	err2 := <-bw.csErr // read error from end of writeBlobMap()
 	if err == nil {
 		err = err2
 	}
@@ -765,9 +765,9 @@
 		bw.fscabs.mu.Lock()
 		bw.desc.finalized = true
 		bw.desc.openWriter = false
-		bw.desc.cv.Broadcast() // Tell chunkmap BlobReader that writing has ceased.
+		bw.desc.cv.Broadcast() // Tell blobmap BlobReader that writing has ceased.
 		bw.fscabs.mu.Unlock()
-		err = bw.joinWriteChunkMap(err)
+		err = bw.joinWriteBlobMap(err)
 		bw.fscabs.descUnref(bw.desc)
 	}
 	return err
@@ -783,11 +783,11 @@
 	} else {
 		bw.fscabs.mu.Lock()
 		bw.desc.openWriter = false
-		bw.desc.cv.Broadcast() // Tell chunkmap BlobReader that writing has ceased.
+		bw.desc.cv.Broadcast() // Tell blobmap BlobReader that writing has ceased.
 		bw.fscabs.mu.Unlock()
 		_, err = bw.f.close(bw.ctx, err)
 		bw.f = nil
-		err = bw.joinWriteChunkMap(err)
+		err = bw.joinWriteBlobMap(err)
 		bw.fscabs.descUnref(bw.desc)
 	}
 	return err
@@ -852,7 +852,7 @@
 						offset:   offset + desc.fragment[i].offset,
 						fileName: desc.fragment[i].fileName})
 					bw.desc.size += consume
-					bw.desc.cv.Broadcast() // Tell chunkmap BlobReader there's more to read.
+					bw.desc.cv.Broadcast() // Tell blobmap BlobReader there's more to read.
 					bw.fscabs.mu.Unlock()
 				}
 				offset = 0
@@ -1286,7 +1286,7 @@
 	if blobID == nil {
 		cs = &errorChunkStream{err: verror.New(errInvalidBlobName, ctx, blobName)}
 	} else {
-		cs = fscabs.cm.NewChunkStream(ctx, blobID)
+		cs = fscabs.bm.NewChunkStream(ctx, blobID)
 	}
 	return cs
 }
@@ -1296,8 +1296,8 @@
 // LookupChunk returns the location of a chunk with the specified chunk hash
 // within the store.
 func (fscabs *FsCaBlobStore) LookupChunk(ctx *context.T, chunkHash []byte) (loc localblobstore.Location, err error) {
-	var chunkMapLoc chunkmap.Location
-	chunkMapLoc, err = fscabs.cm.LookupChunk(ctx, chunkHash)
+	var chunkMapLoc blobmap.Location
+	chunkMapLoc, err = fscabs.bm.LookupChunk(ctx, chunkHash)
 	if err == nil {
 		loc.BlobName = hashToFileName(blobDir, chunkMapLoc.BlobID)
 		loc.Size = chunkMapLoc.Size
@@ -1353,17 +1353,17 @@
 	}
 	for !ok && rs.pendingChunk != nil && !rs.isCancelled() {
 		var err error
-		var loc0 chunkmap.Location
-		loc0, err = rs.fscabs.cm.LookupChunk(rs.ctx, rs.pendingChunk)
+		var loc0 blobmap.Location
+		loc0, err = rs.fscabs.bm.LookupChunk(rs.ctx, rs.pendingChunk)
 		if err == nil {
 			blobName := hashToFileName(blobDir, loc0.BlobID)
 			var blobDesc *blobDesc
 			if blobDesc, err = rs.fscabs.getBlob(rs.ctx, blobName); err != nil {
-				// The ChunkMap contained a reference to a
+				// The BlobMap contained a reference to a
 				// deleted blob.  Delete the reference in the
-				// ChunkMap; the next loop iteration will
+				// BlobMap; the next loop iteration will
 				// consider the chunk again.
-				rs.fscabs.cm.DeleteBlob(rs.ctx, loc0.BlobID)
+				rs.fscabs.bm.DeleteBlob(rs.ctx, loc0.BlobID)
 			} else {
 				rs.fscabs.descUnref(blobDesc)
 				// The chunk is in a known blob.  Combine
@@ -1372,8 +1372,8 @@
 				rs.pendingChunk = nil // consumed
 				for rs.pendingChunk == nil && rs.chunkStream.Advance() {
 					rs.pendingChunk = rs.chunkStream.Value(rs.pendingChunkBuf[:])
-					var loc chunkmap.Location
-					loc, err = rs.fscabs.cm.LookupChunk(rs.ctx, rs.pendingChunk)
+					var loc blobmap.Location
+					loc, err = rs.fscabs.bm.LookupChunk(rs.ctx, rs.pendingChunk)
 					if err == nil && bytes.Compare(loc0.BlobID, loc.BlobID) == 0 && loc.Offset == loc0.Offset+loc0.Size {
 						loc0.Size += loc.Size
 						rs.pendingChunk = nil // consumed
@@ -1382,7 +1382,7 @@
 				rs.step = localblobstore.RecipeStep{Blob: blobName, Offset: loc0.Offset, Size: loc0.Size}
 				ok = true
 			}
-		} else { // The chunk is not in the ChunkMap; yield a single chunk hash.
+		} else { // The chunk is not in the BlobMap; yield a single chunk hash.
 			rs.step = localblobstore.RecipeStep{Chunk: rs.pendingChunk}
 			rs.pendingChunk = nil // consumed
 			ok = true
@@ -1446,14 +1446,14 @@
 	}
 	err = caIter.Err()
 
-	// cmBlobs maps the names of blobs found in the ChunkMap to their IDs.
+	// cmBlobs maps the names of blobs found in the BlobMap to their IDs.
 	// (The IDs can be derived from the names; the map is really being used
 	// to record which blobs exist, and the value merely avoids repeated
 	// conversions.)
 	cmBlobs := make(map[string][]byte)
 	if err == nil {
-		// Record all the blobs known to the ChunkMap;
-		bs := fscabs.cm.NewBlobStream(ctx)
+		// Record all the blobs known to the BlobMap;
+		bs := fscabs.bm.NewBlobStream(ctx)
 		for bs.Advance() {
 			blobID := bs.Value(nil)
 			cmBlobs[hashToFileName(blobDir, blobID)] = blobID
@@ -1477,10 +1477,10 @@
 	}
 
 	if err == nil {
-		// Remove all blobs still mentioned in cmBlobs from the ChunkMap;
+		// Remove all blobs still mentioned in cmBlobs from the BlobMap;
 		// these are the ones that no longer exist in the blobs directory.
 		for _, blobID := range cmBlobs {
-			err = fscabs.cm.DeleteBlob(ctx, blobID)
+			err = fscabs.bm.DeleteBlob(ctx, blobID)
 			if err != nil {
 				break
 			}