Merge "disable openssl for android/amd64"
diff --git a/rpc/.api b/rpc/.api
index 0da22cf..d2b6325 100644
--- a/rpc/.api
+++ b/rpc/.api
@@ -28,9 +28,6 @@
 pkg rpc, type ArgDesc struct
 pkg rpc, type ArgDesc struct, Doc string
 pkg rpc, type ArgDesc struct, Name string
-pkg rpc, type BlessingsRequest struct
-pkg rpc, type BlessingsRequest struct, Blessings *security.Blessings
-pkg rpc, type BlessingsRequest struct, Key uint64
 pkg rpc, type CallOpt interface { RPCCallOpt }
 pkg rpc, type CallOpt interface, RPCCallOpt()
 pkg rpc, type ChildrenGlobber interface { GlobChildren__ }
@@ -118,9 +115,7 @@
 pkg rpc, type MountStatus struct, Server string
 pkg rpc, type MountStatus struct, TTL time.Duration
 pkg rpc, type Request struct
-pkg rpc, type Request struct, Blessings BlessingsRequest
 pkg rpc, type Request struct, Deadline time.Deadline
-pkg rpc, type Request struct, Discharges []security.Discharge
 pkg rpc, type Request struct, EndStreamArgs bool
 pkg rpc, type Request struct, GrantedBlessings security.Blessings
 pkg rpc, type Request struct, Language string
diff --git a/rpc/protocol.vdl b/rpc/protocol.vdl
index 70cb2cd..ec4ea11 100644
--- a/rpc/protocol.vdl
+++ b/rpc/protocol.vdl
@@ -108,13 +108,6 @@
 	// provided by the client.
 	GrantedBlessings security.WireBlessings
 
-	// Blessings is the blessings of the Client used for the current RPC.
-	Blessings BlessingsRequest
-
-	// Discharges are third party caveat discharges that may be required
-	// to make Blessings valid.
-	Discharges []security.WireDischarge
-
 	// TraceRequest maintains the vtrace context between clients and servers
 	// and specifies additional parameters that control how tracing behaves.
 	TraceRequest vtrace.Request
@@ -151,23 +144,6 @@
 	AckBlessings bool
 }
 
-// BlessingsRequest represents security.Blessings for a particular request.
-// Since multiple requests on the same authenticated connection often use the
-// same blessings, we implement a caching scheme, where the client picks an
-// unused key and sends it along with the blessings.  After the client receives
-// confirmation that the server has received the key, subsequent client requests
-// only send the key.
-//
-// If BlessingRequest.Key is 0, the Client is running in SecurityNone and the
-// Blessings information should ignored.
-type BlessingsRequest struct {
-	// Key is the required key that the Client has cached Blessings with.
-	Key uint64
-
-	// Blessings is optional if the Server has already been notified of Key.
-	Blessings ?security.WireBlessings
-}
-
 // The reserved method names that we currently understand.
 const (
 	// TODO(toddw): Rename GlobMethod to ReservedGlob.
diff --git a/rpc/protocol.vdl.go b/rpc/protocol.vdl.go
index b7a4bd2..de3c91e 100644
--- a/rpc/protocol.vdl.go
+++ b/rpc/protocol.vdl.go
@@ -46,11 +46,6 @@
 	// GrantedBlessings are blessings bound to the principal running the server,
 	// provided by the client.
 	GrantedBlessings security.Blessings
-	// Blessings is the blessings of the Client used for the current RPC.
-	Blessings BlessingsRequest
-	// Discharges are third party caveat discharges that may be required
-	// to make Blessings valid.
-	Discharges []security.Discharge
 	// TraceRequest maintains the vtrace context between clients and servers
 	// and specifies additional parameters that control how tracing behaves.
 	TraceRequest vtrace.Request
@@ -92,31 +87,9 @@
 }) {
 }
 
-// BlessingsRequest represents security.Blessings for a particular request.
-// Since multiple requests on the same authenticated connection often use the
-// same blessings, we implement a caching scheme, where the client picks an
-// unused key and sends it along with the blessings.  After the client receives
-// confirmation that the server has received the key, subsequent client requests
-// only send the key.
-//
-// If BlessingRequest.Key is 0, the Client is running in SecurityNone and the
-// Blessings information should ignored.
-type BlessingsRequest struct {
-	// Key is the required key that the Client has cached Blessings with.
-	Key uint64
-	// Blessings is optional if the Server has already been notified of Key.
-	Blessings *security.Blessings
-}
-
-func (BlessingsRequest) __VDLReflect(struct {
-	Name string `vdl:"v.io/v23/rpc.BlessingsRequest"`
-}) {
-}
-
 func init() {
 	vdl.Register((*Request)(nil))
 	vdl.Register((*Response)(nil))
-	vdl.Register((*BlessingsRequest)(nil))
 }
 
 // TODO(toddw): Rename GlobMethod to ReservedGlob.
diff --git a/syncbase/featuretests/blob_v23_test.go b/syncbase/featuretests/blob_v23_test.go
index f586531..8896f35 100644
--- a/syncbase/featuretests/blob_v23_test.go
+++ b/syncbase/featuretests/blob_v23_test.go
@@ -9,24 +9,23 @@
 	"crypto/rand"
 	"fmt"
 	"reflect"
+	"testing"
 	"time"
 
 	"v.io/v23/context"
 	"v.io/v23/naming"
 	wire "v.io/v23/services/syncbase/nosql"
 	"v.io/v23/syncbase"
-	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/lib/v23test"
 	"v.io/x/ref/services/syncbase/server/util"
-	"v.io/x/ref/test/v23tests"
 )
 
-//go:generate jiri test generate
+func TestV23BlobWholeTransfer(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
-func V23TestBlobWholeTransfer(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-
-	sbs, cleanup := setupSyncbases(t, 2)
-	defer cleanup()
+	sbs := setupSyncbases(t, sh, 2)
 
 	sgName := naming.Join("s0", util.SyncbaseSuffix, "SG1")
 
diff --git a/syncbase/featuretests/client_v23_test.go b/syncbase/featuretests/client_v23_test.go
index d65d55e..9907ff4 100644
--- a/syncbase/featuretests/client_v23_test.go
+++ b/syncbase/featuretests/client_v23_test.go
@@ -5,24 +5,24 @@
 package featuretests_test
 
 import (
+	"testing"
+
 	"v.io/v23/syncbase"
-	_ "v.io/x/ref/runtime/factories/generic"
-	tu "v.io/x/ref/services/syncbase/testutil"
-	"v.io/x/ref/test/v23tests"
+	"v.io/x/ref/lib/v23test"
 )
 
-//go:generate jiri test generate
-
-func V23TestSyncbasedPutGet(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func TestV23SyncbasedPutGet(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
 	// Start syncbased.
-	serverCreds := forkCredentials(t, "server")
-	cleanup := tu.StartSyncbased(t, serverCreds, testSbName, "", `{"Read": {"In":["root:server", "root:client"]}, "Write": {"In":["root:server", "root:client"]}}`)
-	defer cleanup()
+	serverCreds := sh.ForkCredentials("server")
+	sh.StartSyncbase(serverCreds, testSbName, "", `{"Read": {"In":["root:server", "root:client"]}, "Write": {"In":["root:server", "root:client"]}}`)
 
 	// Create app, database and table.
-	ctx := forkContext(t, "client")
+	// TODO(ivanpi): Use setupAppA.
+	ctx := sh.ForkContext("client")
 	a := syncbase.NewService(testSbName).App("a")
 	if err := a.Create(ctx, nil); err != nil {
 		t.Fatalf("unable to create an app: %v", err)
diff --git a/syncbase/featuretests/cr_v23_test.go b/syncbase/featuretests/cr_v23_test.go
index 2e19071..9d64f4f 100644
--- a/syncbase/featuretests/cr_v23_test.go
+++ b/syncbase/featuretests/cr_v23_test.go
@@ -7,6 +7,7 @@
 import (
 	"fmt"
 	"strings"
+	"testing"
 	"time"
 
 	"v.io/v23/context"
@@ -15,13 +16,10 @@
 	"v.io/v23/syncbase"
 	"v.io/v23/syncbase/nosql"
 	"v.io/v23/verror"
-	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/lib/v23test"
 	"v.io/x/ref/services/syncbase/server/util"
-	"v.io/x/ref/test/v23tests"
 )
 
-//go:generate jiri test generate
-
 // Tests the conflict resolution configuration rules.
 // Setup:
 // S0 and S1 both have data for keys foo0 to foo9 prepopulated.
@@ -34,31 +32,33 @@
 //
 // TODO(jlodhia): Add more rules based on value type and combination of key
 // prefix and value type once its implemented.
-func V23TestCRRuleConfig(t *v23tests.T) {
-	runTestWithSetup(t, 10, func(client0Ctx, client1Ctx *context.T, sgName string) {
-		// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
+func TestV23CRRuleConfig(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	client0Ctx, client1Ctx, sgName := setupCRTest(t, sh, 10)
 
-		// Since sync is paused, the following updates are concurrent and not
-		// racy as long as Put() is sufficiently synchronous.
-		ok(t, updateData(client0Ctx, "s0", 0, 10, "concurrentUpdate"))
-		ok(t, updateData(client1Ctx, "s1", 0, 10, "concurrentUpdate"))
+	// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
 
-		schemaKeyPrefix := "foo0"
-		runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaKeyPrefix, 2, func() {
-			// Re enable sync between the two syncbases and wait for a bit to let the
-			// syncbases sync and call conflict resolution.
-			ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
-			ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
+	// Since sync is paused, the following updates are concurrent and not
+	// racy as long as Put() is sufficiently synchronous.
+	ok(t, updateData(client0Ctx, "s0", 0, 10, "concurrentUpdate"))
+	ok(t, updateData(client1Ctx, "s1", 0, 10, "concurrentUpdate"))
 
-			// Verify that the resolved data looks correct.
-			ok(t, waitForValue(client0Ctx, "s0", "foo0", "AppResolvedVal", schemaKeyPrefix))
-			ok(t, waitForValue(client0Ctx, "s0", "foo1", "concurrentUpdate"+"s1", schemaKeyPrefix))
+	schemaKeyPrefix := "foo0"
+	runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaKeyPrefix, 2, func() {
+		// Re enable sync between the two syncbases and wait for a bit to let the
+		// syncbases sync and call conflict resolution.
+		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
+		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
 
-			ok(t, waitForValue(client1Ctx, "s1", "foo0", "AppResolvedVal", schemaKeyPrefix))
-			ok(t, waitForValue(client1Ctx, "s1", "foo1", "concurrentUpdate"+"s1", schemaKeyPrefix))
-		})
+		// Verify that the resolved data looks correct.
+		ok(t, waitForValue(client0Ctx, "s0", "foo0", "AppResolvedVal", schemaKeyPrefix))
+		ok(t, waitForValue(client0Ctx, "s0", "foo1", "concurrentUpdate"+"s1", schemaKeyPrefix))
+
+		ok(t, waitForValue(client1Ctx, "s1", "foo0", "AppResolvedVal", schemaKeyPrefix))
+		ok(t, waitForValue(client1Ctx, "s1", "foo1", "concurrentUpdate"+"s1", schemaKeyPrefix))
 	})
 }
 
@@ -69,36 +69,38 @@
 // value for foo0 concurrently where S1's write has a newer timestamp.
 // Result:
 // The value for foo0 after sync settles on what S1 wrote for both syncbases.
-func V23TestCRDefault(t *v23tests.T) {
-	runTestWithSetup(t, 1, func(client0Ctx, client1Ctx *context.T, sgName string) {
-		// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
+func TestV23CRDefault(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	client0Ctx, client1Ctx, sgName := setupCRTest(t, sh, 1)
 
-		// Since sync is paused, the following updates are concurrent and not
-		// racy as long as Put() is sufficiently synchronous.
-		ok(t, updateData(client0Ctx, "s0", 0, 1, "concurrentUpdate"))
-		time.Sleep(5 * time.Millisecond) // make sure that the clock moves forwared between the two updates.
-		ok(t, updateData(client1Ctx, "s1", 0, 1, "concurrentUpdate"))
+	// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
 
-		// Add new seperate keys to each syncbase so that we can verify if sync
-		// has happened between the two syncbases by waiting on the other's key.
-		ok(t, populateData(client0Ctx, "s0", "foo", 22, 23))
-		ok(t, populateData(client1Ctx, "s1", "foo", 44, 45))
+	// Since sync is paused, the following updates are concurrent and not
+	// racy as long as Put() is sufficiently synchronous.
+	ok(t, updateData(client0Ctx, "s0", 0, 1, "concurrentUpdate"))
+	time.Sleep(5 * time.Millisecond) // make sure that the clock moves forwared between the two updates.
+	ok(t, updateData(client1Ctx, "s1", 0, 1, "concurrentUpdate"))
 
-		// Re enable sync between the two syncbases and wait for a bit to let the
-		// syncbases sync and call conflict resolution.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
+	// Add new seperate keys to each syncbase so that we can verify if sync
+	// has happened between the two syncbases by waiting on the other's key.
+	ok(t, populateData(client0Ctx, "s0", "foo", 22, 23))
+	ok(t, populateData(client1Ctx, "s1", "foo", 44, 45))
 
-		// Verify that both sides have synced with the other.
-		ok(t, waitForValue(client0Ctx, "s0", "foo44", "testkey", "")) // 44 is written by S1
-		ok(t, waitForValue(client1Ctx, "s1", "foo22", "testkey", "")) // 22 is written by S0
+	// Re enable sync between the two syncbases and wait for a bit to let the
+	// syncbases sync and call conflict resolution.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
 
-		// Verify that the resolved data looks correct.
-		ok(t, waitForValue(client0Ctx, "s0", "foo0", "concurrentUpdate"+"s1", ""))
-		ok(t, waitForValue(client1Ctx, "s1", "foo0", "concurrentUpdate"+"s1", ""))
-	})
+	// Verify that both sides have synced with the other.
+	ok(t, waitForValue(client0Ctx, "s0", "foo44", "testkey", "")) // 44 is written by S1
+	ok(t, waitForValue(client1Ctx, "s1", "foo22", "testkey", "")) // 22 is written by S0
+
+	// Verify that the resolved data looks correct.
+	ok(t, waitForValue(client0Ctx, "s0", "foo0", "concurrentUpdate"+"s1", ""))
+	ok(t, waitForValue(client1Ctx, "s1", "foo0", "concurrentUpdate"+"s1", ""))
 }
 
 // Tests last timestamp wins for batches under conflict.
@@ -110,33 +112,35 @@
 // Result:
 // After conflict resolution, final values for all rows within the batch must
 // come from either S0 or S1 but not a mixture of the two.
-func V23TestCRWithAtomicBatch(t *v23tests.T) {
-	runTestWithSetup(t, 100, func(client0Ctx, client1Ctx *context.T, sgName string) {
-		// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
+func TestV23CRWithAtomicBatch(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	client0Ctx, client1Ctx, sgName := setupCRTest(t, sh, 100)
 
-		// Since sync is paused, the following updates are concurrent and not
-		// racy as long as Put() is sufficiently synchronous.
-		go ok(t, updateDataInBatch(client0Ctx, "s0", 0, 100, "concurrentBatchUpdate", "batchDoneKey1"))
-		go ok(t, updateDataInBatch(client1Ctx, "s1", 0, 100, "concurrentBatchUpdate", "batchDoneKey2"))
-		time.Sleep(1 * time.Second) // let the above go routine get scheduled.
+	// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
 
-		// Re enable sync between the two syncbases and wait for a bit to let the
-		// syncbases sync and call conflict resolution.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
+	// Since sync is paused, the following updates are concurrent and not
+	// racy as long as Put() is sufficiently synchronous.
+	go ok(t, updateDataInBatch(client0Ctx, "s0", 0, 100, "concurrentBatchUpdate", "batchDoneKey1"))
+	go ok(t, updateDataInBatch(client1Ctx, "s1", 0, 100, "concurrentBatchUpdate", "batchDoneKey2"))
+	time.Sleep(1 * time.Second) // let the above go routine get scheduled.
 
-		// Make sure that the sync has completed by injecting a row on s0 and
-		// reading it on s1.
-		ok(t, populateData(client0Ctx, "s0", "foo", 200, 201))
-		ok(t, waitForValue(client1Ctx, "s1", "foo200", "testkey", ""))
-		ok(t, populateData(client1Ctx, "s1", "foo", 400, 401))
-		ok(t, waitForValue(client0Ctx, "s0", "foo400", "testkey", ""))
+	// Re enable sync between the two syncbases and wait for a bit to let the
+	// syncbases sync and call conflict resolution.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
 
-		ok(t, verifyConflictResolvedBatch(client0Ctx, "s0", "foo", 0, 100, "concurrentBatchUpdate"))
-		ok(t, verifyConflictResolvedBatch(client1Ctx, "s1", "foo", 0, 100, "concurrentBatchUpdate"))
-	})
+	// Make sure that the sync has completed by injecting a row on s0 and
+	// reading it on s1.
+	ok(t, populateData(client0Ctx, "s0", "foo", 200, 201))
+	ok(t, waitForValue(client1Ctx, "s1", "foo200", "testkey", ""))
+	ok(t, populateData(client1Ctx, "s1", "foo", 400, 401))
+	ok(t, waitForValue(client0Ctx, "s0", "foo400", "testkey", ""))
+
+	ok(t, verifyConflictResolvedBatch(client0Ctx, "s0", "foo", 0, 100, "concurrentBatchUpdate"))
+	ok(t, verifyConflictResolvedBatch(client1Ctx, "s1", "foo", 0, 100, "concurrentBatchUpdate"))
 }
 
 // Tests AppResolves resolution policy by creating conflicts for rows that will
@@ -145,40 +149,42 @@
 //    calls to the app.
 // 2) 5 rows written as a single batch on both syncbases resulting into a
 //    single conflict for the batch.
-func V23TestCRAppResolved(t *v23tests.T) {
-	runTestWithSetup(t, 10, func(client0Ctx, client1Ctx *context.T, sgName string) {
-		// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
+func TestV23CRAppResolved(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	client0Ctx, client1Ctx, sgName := setupCRTest(t, sh, 10)
 
-		// Since sync is paused, the following updates are concurrent and not
-		// racy as long as Put() is sufficiently synchronous.
-		ok(t, updateData(client0Ctx, "s0", 0, 5, "concurrentUpdate"))
-		ok(t, updateData(client1Ctx, "s1", 0, 5, "concurrentUpdate"))
+	// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
 
-		ok(t, updateDataInBatch(client0Ctx, "s0", 5, 10, "concurrentBatchUpdate", ""))
-		ok(t, updateDataInBatch(client1Ctx, "s1", 5, 10, "concurrentBatchUpdate", ""))
+	// Since sync is paused, the following updates are concurrent and not
+	// racy as long as Put() is sufficiently synchronous.
+	ok(t, updateData(client0Ctx, "s0", 0, 5, "concurrentUpdate"))
+	ok(t, updateData(client1Ctx, "s1", 0, 5, "concurrentUpdate"))
 
-		schemaPrefix := "foo"
-		keyPrefix := "foo"
-		// TODO(jlodhia): change the expected num conflicts from 12 to 6 once
-		// sync's cr code handles duplicate resolutions internally.
-		runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaPrefix, 12, func() {
-			// Re enable sync between the two syncbases and wait for a bit to let the
-			// syncbases sync and call conflict resolution.
-			ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
-			ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
+	ok(t, updateDataInBatch(client0Ctx, "s0", 5, 10, "concurrentBatchUpdate", ""))
+	ok(t, updateDataInBatch(client1Ctx, "s1", 5, 10, "concurrentBatchUpdate", ""))
 
-			// Verify that the resolved data looks correct.
-			keyUnderConflict := "foo8" // one of the keys under conflict
-			ok(t, waitForValue(client0Ctx, "s0", keyUnderConflict, "AppResolvedVal", schemaPrefix))
-			ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 0, 5, "AppResolvedVal"))
-			ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 5, 10, "AppResolvedVal"))
+	schemaPrefix := "foo"
+	keyPrefix := "foo"
+	// TODO(jlodhia): change the expected num conflicts from 12 to 6 once
+	// sync's cr code handles duplicate resolutions internally.
+	runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaPrefix, 12, func() {
+		// Re enable sync between the two syncbases and wait for a bit to let the
+		// syncbases sync and call conflict resolution.
+		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
+		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
 
-			ok(t, waitForValue(client1Ctx, "s1", keyUnderConflict, "AppResolvedVal", schemaPrefix))
-			ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 0, 5, "AppResolvedVal"))
-			ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 5, 10, "AppResolvedVal"))
-		})
+		// Verify that the resolved data looks correct.
+		keyUnderConflict := "foo8" // one of the keys under conflict
+		ok(t, waitForValue(client0Ctx, "s0", keyUnderConflict, "AppResolvedVal", schemaPrefix))
+		ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 0, 5, "AppResolvedVal"))
+		ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 5, 10, "AppResolvedVal"))
+
+		ok(t, waitForValue(client1Ctx, "s1", keyUnderConflict, "AppResolvedVal", schemaPrefix))
+		ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 0, 5, "AppResolvedVal"))
+		ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 5, 10, "AppResolvedVal"))
 	})
 }
 
@@ -193,35 +199,37 @@
 // are newer than S0's.
 // Result:
 // All rows are resolved via AppResolves.
-func V23TestCRAppBasedResolutionOverridesOthers(t *v23tests.T) {
-	runTestWithSetup(t, 20, func(client0Ctx, client1Ctx *context.T, sgName string) {
-		// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
+func TestV23CRAppBasedResolutionOverridesOthers(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	client0Ctx, client1Ctx, sgName := setupCRTest(t, sh, 20)
 
-		// Since sync is paused, the following updates are concurrent and not
-		// racy as long as Put() is sufficiently synchronous.
-		ok(t, updateDataInBatch(client0Ctx, "s0", 0, 20, "concurrentBatchUpdate", ""))
-		ok(t, updateDataInBatch(client1Ctx, "s1", 0, 20, "concurrentBatchUpdate", ""))
+	// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
 
-		schemaPrefix := "foo1"
-		keyPrefix := "foo"
-		// TODO(jlodhia): change the expected num conflicts from 2 to 1 once
-		// sync's cr code handles duplicate resolutions internally.
-		runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaPrefix, 2, func() {
-			// Re enable sync between the two syncbases and wait for a bit to let the
-			// syncbases sync and call conflict resolution.
-			ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
-			ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
+	// Since sync is paused, the following updates are concurrent and not
+	// racy as long as Put() is sufficiently synchronous.
+	ok(t, updateDataInBatch(client0Ctx, "s0", 0, 20, "concurrentBatchUpdate", ""))
+	ok(t, updateDataInBatch(client1Ctx, "s1", 0, 20, "concurrentBatchUpdate", ""))
 
-			// Verify that the resolved data looks correct.
-			keyUnderConflict := "foo11" // one of the keys under conflict
-			ok(t, waitForValue(client0Ctx, "s0", keyUnderConflict, "AppResolvedVal", schemaPrefix))
-			ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 0, 20, "AppResolvedVal"))
+	schemaPrefix := "foo1"
+	keyPrefix := "foo"
+	// TODO(jlodhia): change the expected num conflicts from 2 to 1 once
+	// sync's cr code handles duplicate resolutions internally.
+	runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaPrefix, 2, func() {
+		// Re enable sync between the two syncbases and wait for a bit to let the
+		// syncbases sync and call conflict resolution.
+		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
+		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
 
-			ok(t, waitForValue(client1Ctx, "s1", keyUnderConflict, "AppResolvedVal", schemaPrefix))
-			ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 0, 20, "AppResolvedVal"))
-		})
+		// Verify that the resolved data looks correct.
+		keyUnderConflict := "foo11" // one of the keys under conflict
+		ok(t, waitForValue(client0Ctx, "s0", keyUnderConflict, "AppResolvedVal", schemaPrefix))
+		ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 0, 20, "AppResolvedVal"))
+
+		ok(t, waitForValue(client1Ctx, "s1", keyUnderConflict, "AppResolvedVal", schemaPrefix))
+		ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 0, 20, "AppResolvedVal"))
 	})
 }
 
@@ -235,59 +243,54 @@
 // S1 concurrently writes batches B2{foo3 to foo6}
 // Result:
 // All rows are resolved via AppResolves as a single conflict call.
-func V23TestCRMultipleBatchesAsSingleConflict(t *v23tests.T) {
+func TestV23CRMultipleBatchesAsSingleConflict(t *testing.T) {
 	// TODO(hpucha): Start running this test once sync handles insertion of
 	// local objects by conflict resolution which originally were not under
 	// conflict.
 	t.Skip()
 
-	runTestWithSetup(t, 10, func(client0Ctx, client1Ctx *context.T, sgName string) {
-		// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
-		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
-		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	client0Ctx, client1Ctx, sgName := setupCRTest(t, sh, 10)
 
-		// Since sync is paused, the following updates are concurrent and not
-		// racy as long as Put() is sufficiently synchronous.
-		// Batch1 has 0, 1, 2, 3 on S0
-		ok(t, updateDataInBatch(client0Ctx, "s0", 0, 4, "concurrentBatchUpdate", ""))
-		// Batch2 has 6, 7, 8, 9 on S0
-		ok(t, updateDataInBatch(client0Ctx, "s0", 6, 10, "concurrentBatchUpdate", ""))
-		// Batch3 has 3, 4, 5, 6 on S1
-		ok(t, updateDataInBatch(client1Ctx, "s1", 3, 7, "concurrentBatchUpdate", ""))
+	// Turn off syncing on both s0 and s1 by removing each other from syncgroup ACLs.
+	ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0"))
+	ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s1"))
 
-		schemaPrefix := "foo"
-		keyPrefix := "foo"
-		// TODO(jlodhia): change the expected num conflicts from 2 to 1 once
-		// sync's cr code handles duplicate resolutions internally.
-		runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaPrefix, 2, func() {
-			// Re enable sync between the two syncbases and wait for a bit to let the
-			// syncbases sync and call conflict resolution.
-			ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
-			ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
+	// Since sync is paused, the following updates are concurrent and not
+	// racy as long as Put() is sufficiently synchronous.
+	// Batch1 has 0, 1, 2, 3 on S0
+	ok(t, updateDataInBatch(client0Ctx, "s0", 0, 4, "concurrentBatchUpdate", ""))
+	// Batch2 has 6, 7, 8, 9 on S0
+	ok(t, updateDataInBatch(client0Ctx, "s0", 6, 10, "concurrentBatchUpdate", ""))
+	// Batch3 has 3, 4, 5, 6 on S1
+	ok(t, updateDataInBatch(client1Ctx, "s1", 3, 7, "concurrentBatchUpdate", ""))
 
-			// Verify that the resolved data looks correct.
-			keyUnderConflict := "foo8" // one of the keys under conflict
-			ok(t, waitForValue(client0Ctx, "s0", keyUnderConflict, "AppResolvedVal", schemaPrefix))
-			ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 0, 10, "AppResolvedVal"))
+	schemaPrefix := "foo"
+	keyPrefix := "foo"
+	// TODO(jlodhia): change the expected num conflicts from 2 to 1 once
+	// sync's cr code handles duplicate resolutions internally.
+	runWithAppBasedResolver(t, client0Ctx, client1Ctx, schemaPrefix, 2, func() {
+		// Re enable sync between the two syncbases and wait for a bit to let the
+		// syncbases sync and call conflict resolution.
+		ok(t, toggleSync(client0Ctx, "s0", sgName, "root:s0;root:s1"))
+		ok(t, toggleSync(client1Ctx, "s1", sgName, "root:s0;root:s1"))
 
-			ok(t, waitForValue(client1Ctx, "s1", keyUnderConflict, "AppResolvedVal", schemaPrefix))
-			ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 0, 10, "AppResolvedVal"))
-		})
+		// Verify that the resolved data looks correct.
+		keyUnderConflict := "foo8" // one of the keys under conflict
+		ok(t, waitForValue(client0Ctx, "s0", keyUnderConflict, "AppResolvedVal", schemaPrefix))
+		ok(t, verifyConflictResolvedData(client0Ctx, "s0", keyPrefix, schemaPrefix, 0, 10, "AppResolvedVal"))
+
+		ok(t, waitForValue(client1Ctx, "s1", keyUnderConflict, "AppResolvedVal", schemaPrefix))
+		ok(t, verifyConflictResolvedData(client1Ctx, "s1", keyPrefix, schemaPrefix, 0, 10, "AppResolvedVal"))
 	})
 }
 
-// TODO(sadovsky): This "runTestWithSetup" pattern is not ideal; it would be
-// better to have tests do the following:
-//     client0, client1, sgName, cleanup := setup(t, numInitRows)
-//     defer cleanup()
-//     <implementation of fn>
-func runTestWithSetup(t *v23tests.T, numInitRows int, fn func(client0, client1 *context.T, sgName string)) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func setupCRTest(t *testing.T, sh *v23test.Shell, numInitRows int) (client0, client1 *context.T, sgName string) {
+	sh.StartRootMountTable()
+	sbs := setupSyncbases(t, sh, 2)
 
-	sbs, cleanup := setupSyncbases(t, 2)
-	defer cleanup()
-
-	sgName := naming.Join("s0", util.SyncbaseSuffix, "SG1")
+	sgName = naming.Join("s0", util.SyncbaseSuffix, "SG1")
 
 	// Create syncgroup and populate data on s0.
 	ok(t, createSyncgroup(sbs[0].clientCtx, "s0", sgName, "tb:foo", "", sbBlessings(sbs), nil))
@@ -297,15 +300,14 @@
 	ok(t, joinSyncgroup(sbs[1].clientCtx, "s1", sgName))
 	ok(t, verifySyncgroupData(sbs[1].clientCtx, "s1", "foo", 0, numInitRows))
 
-	fn(sbs[0].clientCtx, sbs[1].clientCtx, sgName)
+	return sbs[0].clientCtx, sbs[1].clientCtx, sgName
 }
 
-// TODO(sadovsky): As with runTestWithSetup, this pattern is not ideal in that
-// it makes the test code harder to follow. It would be better to define two
-// helper functions: one to do the stuff before fn(), and another to do the
-// stuff after fn(). (Also, switching to channel-based signalling should
-// simplify things substantially.)
-func runWithAppBasedResolver(t *v23tests.T, client0Ctx, client1Ctx *context.T, schemaPrefix string, maxCallCount int, fn func()) {
+// TODO(sadovsky): This pattern is not ideal in that it makes the test code
+// harder to follow. It would be better to define two helper functions: one
+// to do the stuff before fn(), and another to do the stuff after fn(). (Also,
+// switching to channel-based signalling should simplify things substantially.)
+func runWithAppBasedResolver(t *testing.T, client0Ctx, client1Ctx *context.T, schemaPrefix string, maxCallCount int, fn func()) {
 	// Create and hold a conflict resolution connection on s0 and s1 to receive
 	// future conflicts. The expected call count is 2 * the number of batches
 	// because each batch is being concurrently resolved on s0 and s1 creating new
diff --git a/syncbase/featuretests/mgmt_v23_test.go b/syncbase/featuretests/mgmt_v23_test.go
index 73681e3..1732142 100644
--- a/syncbase/featuretests/mgmt_v23_test.go
+++ b/syncbase/featuretests/mgmt_v23_test.go
@@ -4,6 +4,10 @@
 
 package featuretests_test
 
+// TODO(ivanpi): Port and reenable.
+
+// +build ignore
+
 import (
 	"errors"
 	"fmt"
@@ -19,8 +23,6 @@
 	"v.io/x/ref/test/v23tests"
 )
 
-//go:generate jiri test generate
-
 var (
 	hostname   string
 	errTimeout = errors.New("timeout")
diff --git a/syncbase/featuretests/restartability_v23_test.go b/syncbase/featuretests/restartability_v23_test.go
index 3e10d44..d1b7263 100644
--- a/syncbase/featuretests/restartability_v23_test.go
+++ b/syncbase/featuretests/restartability_v23_test.go
@@ -5,14 +5,13 @@
 package featuretests_test
 
 import (
-	"bytes"
 	"errors"
 	"io/ioutil"
 	"os"
 	"path/filepath"
 	"reflect"
 	"regexp"
-	"syscall"
+	"testing"
 	"time"
 
 	"v.io/v23/context"
@@ -22,33 +21,25 @@
 	"v.io/v23/syncbase/nosql"
 	"v.io/v23/verror"
 	"v.io/v23/vom"
-	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/lib/v23test"
 	tu "v.io/x/ref/services/syncbase/testutil"
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
 )
 
-//go:generate jiri test generate
-
 const (
 	acl = `{"Read": {"In":["root:u:client"]}, "Write": {"In":["root:u:client"]}, "Resolve": {"In":["root:u:client"]}}`
 )
 
-func restartabilityInit(t *v23tests.T) (rootDir string, clientCtx *context.T, serverCreds *modules.CustomCredentials) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func restartabilityInit(sh *v23test.Shell) (rootDir string, clientCtx *context.T, serverCreds *v23test.Credentials) {
+	sh.StartRootMountTable()
 
-	var err error
-	rootDir, err = ioutil.TempDir("", "syncbase_leveldb")
-	if err != nil {
-		tu.V23Fatalf(t, "can't create temp dir: %v", err)
-	}
-
-	clientCtx = forkContext(t, "u:client")
-	serverCreds = forkCredentials(t, "r:server")
+	rootDir = sh.MakeTempDir()
+	clientCtx = sh.ForkContext("u:client")
+	serverCreds = sh.ForkCredentials("r:server")
 	return
 }
 
-func createAppDatabaseTable(t *v23tests.T, clientCtx *context.T) nosql.Database {
+// TODO(ivanpi): Duplicate of setupAppA.
+func createAppDatabaseTable(t *testing.T, clientCtx *context.T) nosql.Database {
 	a := syncbase.NewService(testSbName).App("a")
 	if err := a.Create(clientCtx, nil); err != nil {
 		t.Fatalf("unable to create an app: %v", err)
@@ -63,36 +54,38 @@
 	return d
 }
 
-func V23TestRestartabilityHierarchy(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityHierarchy(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
-	cleanup()
+	cleanup(os.Interrupt)
 
-	cleanup = tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
-	defer cleanup()
+	_ = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	checkHierarchy(t, clientCtx)
 }
 
-// Same as V23TestRestartabilityHierarchy except the first syncbase is killed
+// Same as TestV23RestartabilityHierarchy except the first syncbase is killed
 // with SIGKILL instead of SIGINT.
-func V23TestRestartabilityCrash(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityCrash(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
-	cleanup2 := tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
-	defer cleanup2()
+	_ = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	checkHierarchy(t, clientCtx)
 }
 
 // Creates apps, dbs, tables, and rows.
-func createHierarchy(t *v23tests.T, ctx *context.T) {
+func createHierarchy(t *testing.T, ctx *context.T) {
 	s := syncbase.NewService(testSbName)
 	for _, a := range []syncbase.App{s.App("a1"), s.App("a2")} {
 		if err := a.Create(ctx, nil); err != nil {
@@ -117,7 +110,7 @@
 }
 
 // Checks for the apps, dbs, tables, and rows created by runCreateHierarchy.
-func checkHierarchy(t *v23tests.T, ctx *context.T) {
+func checkHierarchy(t *testing.T, ctx *context.T) {
 	s := syncbase.NewService(testSbName)
 	var got, want []string
 	var err error
@@ -156,9 +149,11 @@
 	}
 }
 
-func V23TestRestartabilityQuiescent(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityQuiescent(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	d := createAppDatabaseTable(t, clientCtx)
 
 	tb := d.Table("tb")
@@ -176,10 +171,9 @@
 		t.Fatalf("unexpected value: got %q, want %q", got, want)
 	}
 
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 	// Restart syncbase.
-	cleanup2 := tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
-	defer cleanup2()
+	_ = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	if err := r.Get(clientCtx, &result); err != nil {
 		t.Fatalf("r.Get() failed: %v", err)
@@ -190,9 +184,11 @@
 }
 
 // A read-only batch should fail if the server crashes in the middle.
-func V23TestRestartabilityReadOnlyBatch(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityReadOnlyBatch(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	d := createAppDatabaseTable(t, clientCtx)
 
 	// Add one row.
@@ -215,7 +211,7 @@
 		t.Fatalf("unexpected value: got %q, want %q", got, want)
 	}
 
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 	expectedFailCtx, _ := context.WithTimeout(clientCtx, time.Second)
 	// We get a variety of errors depending on how much of the network state of
 	// syncbased has been reclaimed when this rpc goes out.
@@ -224,8 +220,7 @@
 	}
 
 	// Restart syncbase.
-	cleanup2 := tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
-	defer cleanup2()
+	_ = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	if err := r.Get(clientCtx, &result); verror.ErrorID(err) != sbwire.ErrUnknownBatch.ID {
 		t.Fatalf("expected r.Get() to fail because of ErrUnknownBatch.  got: %v", err)
@@ -241,9 +236,11 @@
 }
 
 // A read/write batch should fail if the server crashes in the middle.
-func V23TestRestartabilityReadWriteBatch(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityReadWriteBatch(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	d := createAppDatabaseTable(t, clientCtx)
 
 	batch, err := d.BeginBatch(clientCtx, nosqlwire.BatchOptions{})
@@ -265,7 +262,7 @@
 		t.Fatalf("unexpected value: got %q, want %q", got, want)
 	}
 
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 	expectedFailCtx, _ := context.WithTimeout(clientCtx, time.Second)
 	// We get a variety of errors depending on how much of the network state of
 	// syncbased has been reclaimed when this rpc goes out.
@@ -274,8 +271,7 @@
 	}
 
 	// Restart syncbase.
-	cleanup2 := tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
-	defer cleanup2()
+	_ = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	if err := r.Get(clientCtx, &result); verror.ErrorID(err) != sbwire.ErrUnknownBatch.ID {
 		t.Fatalf("expected r.Get() to fail because of ErrUnknownBatch.  got: %v", err)
@@ -290,7 +286,7 @@
 	}
 }
 
-func decodeString(t *v23tests.T, val []byte) string {
+func decodeString(t *testing.T, val []byte) string {
 	var ret string
 	if err := vom.Decode(val, &ret); err != nil {
 		t.Fatalf("unable to decode: %v", err)
@@ -298,9 +294,11 @@
 	return ret
 }
 
-func V23TestRestartabilityWatch(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityWatch(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	d := createAppDatabaseTable(t, clientCtx)
 
 	// Put one row as well as get the initial ResumeMarker.
@@ -327,7 +325,7 @@
 		t.Fatalf("unexpected error: %v", err)
 	}
 	if !stream.Advance() {
-		cleanup(syscall.SIGINT)
+		cleanup(os.Interrupt)
 		t.Fatalf("expected to be able to Advance: %v", stream.Err())
 	}
 	change := stream.Change()
@@ -338,7 +336,7 @@
 	marker = change.ResumeMarker
 
 	// Kill syncbased.
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
 	// The stream should break when the server crashes.
 	if stream.Advance() {
@@ -346,7 +344,7 @@
 	}
 
 	// Restart syncbased.
-	cleanup = tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+	cleanup = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	// Put another row.
 	r = d.Table("tb").Row("r")
@@ -369,7 +367,7 @@
 		t.Fatalf("unexpected row: %s, %s", change.Row, val)
 	}
 
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
 	// The stream should break when the server crashes.
 	if stream.Advance() {
@@ -377,7 +375,7 @@
 	}
 }
 
-func corruptFile(t *v23tests.T, rootDir, pathRegex string) {
+func corruptFile(t *testing.T, rootDir, pathRegex string) {
 	var fileToCorrupt string
 	filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
 		if fileToCorrupt != "" {
@@ -409,64 +407,71 @@
 	}
 }
 
-func V23TestRestartabilityServiceDBCorruption(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityServiceDBCorruption(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
 	corruptFile(t, rootDir, filepath.Join(rootDir, `leveldb/.*\.log`))
 
+	// TODO(ivanpi): Repeated below, refactor into method.
 	// Expect syncbase to fail to start.
-	syncbased := t.BuildV23Pkg("v.io/x/ref/services/syncbase/syncbased")
-	startOpts := syncbased.StartOpts().WithCustomCredentials(serverCreds)
-	invocation := syncbased.WithStartOpts(startOpts).Start(
+	syncbasedPath := sh.JiriBuildGoPkg("v.io/x/ref/services/syncbase/syncbased")
+	syncbased := sh.Cmd(syncbasedPath,
 		"--alsologtostderr=true",
 		"--v23.tcp.address=127.0.0.1:0",
 		"--v23.permissions.literal", acl,
 		"--name="+testSbName,
 		"--root-dir="+rootDir)
-	stdout, stderr := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
-	if err := invocation.Wait(stdout, stderr); err == nil {
-		t.Fatalf("Expected syncbased to fail to start.")
+	syncbased = syncbased.WithCredentials(serverCreds)
+	syncbased.ExitErrorIsOk = true
+	stdout, stderr := syncbased.Output()
+	if syncbased.Err == nil {
+		t.Fatal("Expected syncbased to fail to start.")
 	}
 	t.Logf("syncbased terminated\nstdout: %v\nstderr: %v\n", stdout, stderr)
 
-	cleanup = tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+	cleanup = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 }
 
-func V23TestRestartabilityAppDBCorruption(t *v23tests.T) {
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+func TestV23RestartabilityAppDBCorruption(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
 	corruptFile(t, rootDir, `apps/[^/]*/dbs/[^/]*/leveldb/.*\.log`)
 
 	// Expect syncbase to fail to start.
-	syncbased := t.BuildV23Pkg("v.io/x/ref/services/syncbase/syncbased")
-	startOpts := syncbased.StartOpts().WithCustomCredentials(serverCreds)
-	invocation := syncbased.WithStartOpts(startOpts).Start(
+	syncbasedPath := sh.JiriBuildGoPkg("v.io/x/ref/services/syncbase/syncbased")
+	syncbased := sh.Cmd(syncbasedPath,
 		"--alsologtostderr=true",
 		"--v23.tcp.address=127.0.0.1:0",
 		"--v23.permissions.literal", acl,
 		"--name="+testSbName,
 		"--root-dir="+rootDir)
-	stdout, stderr := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
-	if err := invocation.Wait(stdout, stderr); err == nil {
-		t.Fatalf("Expected syncbased to fail to start.")
+	syncbased = syncbased.WithCredentials(serverCreds)
+	syncbased.ExitErrorIsOk = true
+	stdout, stderr := syncbased.Output()
+	if syncbased.Err == nil {
+		t.Fatal("Expected syncbased to fail to start.")
 	}
 	t.Logf("syncbased terminated\nstdout: %v\nstderr: %v\n", stdout, stderr)
 
-	cleanup = tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+	cleanup = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	// Recreate a1/d1 since that is the one that got corrupted.
 	d := syncbase.NewService(testSbName).App("a1").NoSQLDatabase("d1", nil)
@@ -485,15 +490,17 @@
 	}
 
 	checkHierarchy(t, clientCtx)
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 }
 
-func V23TestRestartabilityStoreGarbageCollect(t *v23tests.T) {
+func TestV23RestartabilityStoreGarbageCollect(t *testing.T) {
 	// TODO(ivanpi): Fully testing store garbage collection requires fault
 	// injection or mocking out the store.
 	// NOTE: Test assumes that leveldb destroy is implemented as 'rm -r'.
-	rootDir, clientCtx, serverCreds := restartabilityInit(t)
-	cleanup := tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	rootDir, clientCtx, serverCreds := restartabilityInit(sh)
+	cleanup := sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
@@ -557,7 +564,7 @@
 	createHierarchy(t, clientCtx)
 	checkHierarchy(t, clientCtx)
 
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
 	// leveldbDir should still exist.
 	if _, err := os.Stat(leveldbDir); err != nil {
@@ -566,9 +573,9 @@
 
 	// Restarting syncbased should not affect the hierarchy. Garbage collection
 	// should again fail to destroy leveldbDir.
-	cleanup = tu.StartKillableSyncbased(t, serverCreds, testSbName, rootDir, acl)
+	cleanup = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 	checkHierarchy(t, clientCtx)
-	cleanup(syscall.SIGKILL)
+	cleanup(os.Kill)
 
 	// leveldbDir should still exist.
 	if _, err := os.Stat(leveldbDir); err != nil {
@@ -581,7 +588,7 @@
 	}
 
 	// Restart syncbased. Garbage collection should now succeed.
-	cleanup2 := tu.StartSyncbased(t, serverCreds, testSbName, rootDir, acl)
+	_ = sh.StartSyncbase(serverCreds, testSbName, rootDir, acl)
 
 	// leveldbDir should not exist anymore.
 	if _, err := os.Stat(leveldbDir); !os.IsNotExist(err) {
@@ -590,6 +597,4 @@
 
 	// The hierarchy should not have been affected.
 	checkHierarchy(t, clientCtx)
-
-	cleanup2()
 }
diff --git a/syncbase/featuretests/syncgroup_v23_test.go b/syncbase/featuretests/syncgroup_v23_test.go
index 4cae2a7..b33bd9c 100644
--- a/syncbase/featuretests/syncgroup_v23_test.go
+++ b/syncbase/featuretests/syncgroup_v23_test.go
@@ -10,29 +10,28 @@
 import (
 	"fmt"
 	"strings"
+	"testing"
 	"time"
 
 	"v.io/v23/context"
 	"v.io/v23/naming"
 	"v.io/v23/security"
 	"v.io/v23/security/access"
-	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/lib/v23test"
 	constants "v.io/x/ref/services/syncbase/server/util"
-	"v.io/x/ref/test/v23tests"
 )
 
-//go:generate jiri test generate
-
-// V23TestSyncgroupRendezvousOnline tests that Syncbases can join a syncgroup
+// TestV23SyncgroupRendezvousOnline tests that Syncbases can join a syncgroup
 // when: all Syncbases are online and a creator creates the syncgroup and shares
 // the syncgroup name with all the joiners.
-func V23TestSyncgroupRendezvousOnline(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func TestV23SyncgroupRendezvousOnline(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
 	N := 5
 	// Setup N Syncbases.
-	sbs, cleanup := setupSyncbases(t, N)
-	defer cleanup()
+	sbs := setupSyncbases(t, sh, N)
 
 	// Syncbase s0 is the creator.
 	sgName := naming.Join(sbs[0].sbName, constants.SyncbaseSuffix, "SG1")
@@ -56,23 +55,24 @@
 		ok(t, verifySyncgroupMembers(sb.clientCtx, sb.sbName, sgName, N))
 	}
 
-	fmt.Println("V23TestSyncgroupRendezvousOnline=====Phase 1 Done")
+	fmt.Println("TestV23SyncgroupRendezvousOnline=====Phase 1 Done")
 }
 
-// V23TestSyncgroupRendezvousOnlineCloud tests that Syncbases can join a
+// TestV23SyncgroupRendezvousOnlineCloud tests that Syncbases can join a
 // syncgroup when: all Syncbases are online and a creator creates the syncgroup
 // and nominates a cloud syncbase for the other joiners to join at.
-func V23TestSyncgroupRendezvousOnlineCloud(t *v23tests.T) {
+func TestV23SyncgroupRendezvousOnlineCloud(t *testing.T) {
 	// TODO(hpucha): There is a potential bug that is currently preventing
 	// this test from succeeding.
 	t.Skip()
 
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
 	N := 5
 	// Setup N+1 Syncbases (1 for the cloud instance).
-	sbs, cleanup := setupSyncbases(t, N+1)
-	defer cleanup()
+	sbs := setupSyncbases(t, sh, N+1)
 
 	// Syncbase s0 is the creator, and sN is the cloud.
 	sgName := naming.Join(sbs[N].sbName, constants.SyncbaseSuffix, "SG1")
@@ -96,23 +96,24 @@
 		// ok(t, verifySyncgroupMembers(sbs[i].clientCtx, sbs[i].sbName, sgName, N+1))
 	}
 
-	fmt.Println("V23TestSyncgroupRendezvousOnlineCloud=====Phase 1 Done")
+	fmt.Println("TestV23SyncgroupRendezvousOnlineCloud=====Phase 1 Done")
 }
 
-// V23TestSyncgroupNeighborhoodOnly tests that Syncbases can join a syncgroup
+// TestV23SyncgroupNeighborhoodOnly tests that Syncbases can join a syncgroup
 // when: all Syncbases do not have general connectivity but can reach each other
 // over neighborhood, and a creator creates the syncgroup and shares the
 // syncgroup name with all the joiners. Restricted connectivity is simulated by
 // picking a syncgroup name that is not reachable and a syncgroup mount table
 // that doesn't exist.
-func V23TestSyncgroupNeighborhoodOnly(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func TestV23SyncgroupNeighborhoodOnly(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
 	N := 5
 
 	// Setup N Syncbases.
-	sbs, cleanup := setupSyncbases(t, N)
-	defer cleanup()
+	sbs := setupSyncbases(t, sh, N)
 
 	// Syncbase s0 is the creator, but the syncgroup refers to non-existent
 	// Syncbase "s6".
@@ -151,20 +152,21 @@
 		ok(t, verifySyncgroupMembers(sb.clientCtx, sb.sbName, sgName, N))
 	}
 
-	fmt.Println("V23TestSyncgroupNeighborhoodOnly=====Phase 1 Done")
+	fmt.Println("TestV23SyncgroupNeighborhoodOnly=====Phase 1 Done")
 }
 
-// V23TestSyncgroupPreknownStaggered tests that Syncbases can join a syncgroup
+// TestV23SyncgroupPreknownStaggered tests that Syncbases can join a syncgroup
 // when: all Syncbases come online in a staggered fashion. Each Syncbase always
 // tries to join a syncgroup with a predetermined name, and if join fails,
 // creates the syncgroup.
-func V23TestSyncgroupPreknownStaggered(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
+func TestV23SyncgroupPreknownStaggered(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
 
 	N := 5
 	// Setup N Syncbases.
-	sbs, cleanup := setupSyncbases(t, N)
-	defer cleanup()
+	sbs := setupSyncbases(t, sh, N)
 
 	// Syncbase s0 is the first to join or create. Run s0 separately to
 	// stagger the process.
@@ -192,7 +194,7 @@
 		ok(t, verifySyncgroupMembers(sb.clientCtx, sb.sbName, sgName, N))
 	}
 
-	fmt.Println("V23TestSyncgroupPreknownStaggered=====Phase 1 Done")
+	fmt.Println("TestV23SyncgroupPreknownStaggered=====Phase 1 Done")
 }
 
 ////////////////////////////////////////
diff --git a/syncbase/featuretests/test_util_test.go b/syncbase/featuretests/test_util_test.go
index a9ed6fc..9cc07c1 100644
--- a/syncbase/featuretests/test_util_test.go
+++ b/syncbase/featuretests/test_util_test.go
@@ -8,8 +8,8 @@
 	"errors"
 	"fmt"
 	"reflect"
-	"runtime/debug"
 	"strings"
+	"testing"
 	"time"
 
 	"v.io/v23"
@@ -18,9 +18,8 @@
 	wire "v.io/v23/services/syncbase/nosql"
 	"v.io/v23/syncbase"
 	"v.io/v23/syncbase/nosql"
+	"v.io/x/ref/lib/v23test"
 	tu "v.io/x/ref/services/syncbase/testutil"
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
 )
 
 const (
@@ -47,38 +46,31 @@
 
 type testSyncbase struct {
 	sbName    string
-	sbCreds   *modules.CustomCredentials
+	sbCreds   *v23test.Credentials
 	clientId  string
 	clientCtx *context.T
-	cleanup   func()
 }
 
-// Spawns "num" Syncbase instances and returns handles to them, along with a
-// cleanup function.
-func setupSyncbases(t *v23tests.T, num int) ([]*testSyncbase, func()) {
+// Spawns "num" Syncbase instances and returns handles to them.
+func setupSyncbases(t *testing.T, sh *v23test.Shell, num int, args ...string) []*testSyncbase {
 	sbs := make([]*testSyncbase, num)
 	for i, _ := range sbs {
 		sbName, clientId := fmt.Sprintf("s%d", i), fmt.Sprintf("c%d", i)
 		sbs[i] = &testSyncbase{
 			sbName:    sbName,
-			sbCreds:   forkCredentials(t, sbName),
+			sbCreds:   sh.ForkCredentials(sbName),
 			clientId:  clientId,
-			clientCtx: forkContext(t, clientId),
+			clientCtx: sh.ForkContext(clientId),
 		}
 		// Give RWA permissions to this Syncbase's client.
 		acl := fmt.Sprintf(`{"Read":{"In":["root:%s"]},"Write":{"In":["root:%s"]},"Admin":{"In":["root:%s"]}}`, clientId, clientId, clientId)
-		sbs[i].cleanup = tu.StartSyncbased(t, sbs[i].sbCreds, sbs[i].sbName, "", acl)
+		sh.StartSyncbase(sbs[i].sbCreds, sbs[i].sbName, "", acl, args...)
 	}
 	// Call setupHierarchy on each Syncbase.
 	for _, sb := range sbs {
 		ok(t, setupHierarchy(sb.clientCtx, sb.sbName))
 	}
-	cleanup := func() {
-		for _, sb := range sbs {
-			sb.cleanup()
-		}
-	}
-	return sbs, cleanup
+	return sbs
 }
 
 // Returns a ";"-separated list of Syncbase blessing names.
@@ -162,6 +154,8 @@
 	return nil
 }
 
+// TODO(ivanpi): Remove sendSignal now that all functions using it are in the
+// same process.
 func sendSignal(ctx *context.T, d nosql.Database, signalKey string) error {
 	tb := d.Table(testTable)
 	r := tb.Row(signalKey)
@@ -332,57 +326,29 @@
 	return res
 }
 
-// forkCredentials returns a new *modules.CustomCredentials with a fresh
-// principal, blessed by t with the given extension.
-// TODO(sadovsky): Maybe move to tu.
-func forkCredentials(t *v23tests.T, extension string) *modules.CustomCredentials {
-	c, err := t.Shell().NewChildCredentials(extension)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return c
-}
-
-// forkContext returns a new *context.T with a fresh principal, blessed by t
-// with the given extension.
-// TODO(sadovsky): Maybe move to tu.
-func forkContext(t *v23tests.T, extension string) *context.T {
-	return tu.NewCtx(t.Context(), v23.GetPrincipal(t.Context()), extension)
-}
-
 ////////////////////////////////////////////////////////////
 // Generic testing helpers
 
-func fatal(t *v23tests.T, args ...interface{}) {
-	debug.PrintStack()
-	t.Fatal(args...)
-}
-
-func fatalf(t *v23tests.T, format string, args ...interface{}) {
-	debug.PrintStack()
-	t.Fatalf(format, args...)
-}
-
-func ok(t *v23tests.T, err error) {
+func ok(t *testing.T, err error) {
 	if err != nil {
-		fatal(t, err)
+		tu.Fatal(t, err)
 	}
 }
 
-func nok(t *v23tests.T, err error) {
+func nok(t *testing.T, err error) {
 	if err == nil {
-		fatal(t, "nil err")
+		tu.Fatal(t, "nil err")
 	}
 }
 
-func eq(t *v23tests.T, got, want interface{}) {
+func eq(t *testing.T, got, want interface{}) {
 	if !reflect.DeepEqual(got, want) {
-		fatalf(t, "got %v, want %v", got, want)
+		tu.Fatalf(t, "got %v, want %v", got, want)
 	}
 }
 
-func neq(t *v23tests.T, got, notWant interface{}) {
+func neq(t *testing.T, got, notWant interface{}) {
 	if reflect.DeepEqual(got, notWant) {
-		fatalf(t, "got %v", got)
+		tu.Fatalf(t, "got %v", got)
 	}
 }
diff --git a/syncbase/featuretests/v23_main_test.go b/syncbase/featuretests/v23_main_test.go
new file mode 100644
index 0000000..42b1a4b
--- /dev/null
+++ b/syncbase/featuretests/v23_main_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package featuretests_test
+
+import (
+	"os"
+	"testing"
+
+	"v.io/x/ref/lib/v23test"
+	_ "v.io/x/ref/runtime/factories/generic"
+)
+
+func TestMain(m *testing.M) {
+	os.Exit(v23test.Run(m.Run))
+}
diff --git a/syncbase/featuretests/v23_test.go b/syncbase/featuretests/v23_test.go
deleted file mode 100644
index 1891ab5..0000000
--- a/syncbase/featuretests/v23_test.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file was auto-generated via go generate.
-// DO NOT UPDATE MANUALLY
-
-package featuretests_test
-
-import (
-	"os"
-	"testing"
-
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
-)
-
-func TestMain(m *testing.M) {
-	modules.DispatchAndExitIfChild()
-	cleanup := v23tests.UseSharedBinDir()
-	r := m.Run()
-	cleanup()
-	os.Exit(r)
-}
-
-func TestV23BlobWholeTransfer(t *testing.T) {
-	v23tests.RunTest(t, V23TestBlobWholeTransfer)
-}
-
-func TestV23SyncbasedPutGet(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedPutGet)
-}
-
-func TestV23CRRuleConfig(t *testing.T) {
-	v23tests.RunTest(t, V23TestCRRuleConfig)
-}
-
-func TestV23CRDefault(t *testing.T) {
-	v23tests.RunTest(t, V23TestCRDefault)
-}
-
-func TestV23CRWithAtomicBatch(t *testing.T) {
-	v23tests.RunTest(t, V23TestCRWithAtomicBatch)
-}
-
-func TestV23CRAppResolved(t *testing.T) {
-	v23tests.RunTest(t, V23TestCRAppResolved)
-}
-
-func TestV23CRAppBasedResolutionOverridesOthers(t *testing.T) {
-	v23tests.RunTest(t, V23TestCRAppBasedResolutionOverridesOthers)
-}
-
-func TestV23CRMultipleBatchesAsSingleConflict(t *testing.T) {
-	v23tests.RunTest(t, V23TestCRMultipleBatchesAsSingleConflict)
-}
-
-func TestV23DeviceManager(t *testing.T) {
-	v23tests.RunTest(t, V23TestDeviceManager)
-}
-
-func TestV23RestartabilityHierarchy(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityHierarchy)
-}
-
-func TestV23RestartabilityCrash(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityCrash)
-}
-
-func TestV23RestartabilityQuiescent(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityQuiescent)
-}
-
-func TestV23RestartabilityReadOnlyBatch(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityReadOnlyBatch)
-}
-
-func TestV23RestartabilityReadWriteBatch(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityReadWriteBatch)
-}
-
-func TestV23RestartabilityWatch(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityWatch)
-}
-
-func TestV23RestartabilityServiceDBCorruption(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityServiceDBCorruption)
-}
-
-func TestV23RestartabilityAppDBCorruption(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityAppDBCorruption)
-}
-
-func TestV23RestartabilityStoreGarbageCollect(t *testing.T) {
-	v23tests.RunTest(t, V23TestRestartabilityStoreGarbageCollect)
-}
-
-func TestV23SyncgroupRendezvousOnline(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncgroupRendezvousOnline)
-}
-
-func TestV23SyncgroupRendezvousOnlineCloud(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncgroupRendezvousOnlineCloud)
-}
-
-func TestV23SyncgroupNeighborhoodOnly(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncgroupNeighborhoodOnly)
-}
-
-func TestV23SyncgroupPreknownStaggered(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncgroupPreknownStaggered)
-}
-
-func TestV23VClockMovesForward(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockMovesForward)
-}
-
-func TestV23VClockSystemClockUpdate(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockSystemClockUpdate)
-}
-
-func TestV23VClockSystemClockFrequency(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockSystemClockFrequency)
-}
-
-func TestV23VClockNtpUpdate(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockNtpUpdate)
-}
-
-func TestV23VClockNtpSkewAfterReboot(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockNtpSkewAfterReboot)
-}
-
-func TestV23VClockNtpFrequency(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockNtpFrequency)
-}
-
-func TestV23VClockSyncBasic(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockSyncBasic)
-}
-
-func TestV23VClockSyncWithLocalNtp(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockSyncWithLocalNtp)
-}
-
-func TestV23VClockSyncWithReboots(t *testing.T) {
-	v23tests.RunTest(t, V23TestVClockSyncWithReboots)
-}
diff --git a/syncbase/featuretests/vclock_v23_test.go b/syncbase/featuretests/vclock_v23_test.go
index 8551787..ebf7e99 100644
--- a/syncbase/featuretests/vclock_v23_test.go
+++ b/syncbase/featuretests/vclock_v23_test.go
@@ -5,19 +5,18 @@
 package featuretests_test
 
 import (
+	"testing"
 	"time"
 
 	"v.io/v23/context"
 	"v.io/v23/naming"
 	wire "v.io/v23/services/syncbase"
-	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/lib/v23test"
 	"v.io/x/ref/services/syncbase/server/util"
 	tu "v.io/x/ref/services/syncbase/testutil"
-	"v.io/x/ref/test/v23tests"
+	"v.io/x/ref/test/expect"
 )
 
-//go:generate jiri test generate
-
 const (
 	openPerms = `{"Read": {"In":["..."]}, "Write": {"In":["..."]}, "Resolve": {"In":["..."]}, "Admin": {"In":["..."]}}`
 )
@@ -29,20 +28,17 @@
 	fiveSecs = 5 * time.Second
 )
 
-func testInit(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // Tests for local vclock updates
 
 // Tests that the virtual clock moves forward.
-func V23TestVClockMovesForward(t *v23tests.T) {
-	testInit(t)
-	ctx := forkContext(t, "c0")
-	s0Creds := forkCredentials(t, "s0")
-	cleanup := tu.StartSyncbased(t, s0Creds, "s0", "", openPerms)
-	defer cleanup()
+func TestV23VClockMovesForward(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	ctx := sh.ForkContext("c0")
+	s0Creds := sh.ForkCredentials("s0")
+	sh.StartSyncbase(s0Creds, "s0", "", openPerms, "--dev")
 
 	t0, err := sc("s0").DevModeGetTime(ctx)
 	ok(t, err)
@@ -55,12 +51,13 @@
 }
 
 // Tests that system clock updates affect the virtual clock.
-func V23TestVClockSystemClockUpdate(t *v23tests.T) {
-	testInit(t)
-	ctx := forkContext(t, "c0")
-	s0Creds := forkCredentials(t, "s0")
-	cleanup := tu.StartSyncbased(t, s0Creds, "s0", "", openPerms)
-	defer cleanup()
+func TestV23VClockSystemClockUpdate(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	ctx := sh.ForkContext("c0")
+	s0Creds := sh.ForkCredentials("s0")
+	sh.StartSyncbase(s0Creds, "s0", "", openPerms, "--dev")
 
 	// Initialize system time.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
@@ -111,12 +108,13 @@
 
 // Tests that the virtual clock daemon checks for system clock updates at the
 // expected frequency (loosely speaking).
-func V23TestVClockSystemClockFrequency(t *v23tests.T) {
-	testInit(t)
-	ctx := forkContext(t, "c0")
-	s0Creds := forkCredentials(t, "s0")
-	cleanup := tu.StartSyncbased(t, s0Creds, "s0", "", openPerms)
-	defer cleanup()
+func TestV23VClockSystemClockFrequency(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	ctx := sh.ForkContext("c0")
+	s0Creds := sh.ForkCredentials("s0")
+	sh.StartSyncbase(s0Creds, "s0", "", openPerms, "--dev")
 
 	checkSbTimeNotEq(t, "s0", ctx, jan2015)
 
@@ -140,37 +138,39 @@
 
 // Tests that NTP sync affects virtual clock state (e.g. clock can move
 // backward).
-func V23TestVClockNtpUpdate(t *v23tests.T) {
-	testInit(t)
-	ctx := forkContext(t, "c0")
-	s0Creds := forkCredentials(t, "s0")
-	cleanup := tu.StartSyncbased(t, s0Creds, "s0", "", openPerms)
-	defer cleanup()
+func TestV23VClockNtpUpdate(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	ctx := sh.ForkContext("c0")
+	s0Creds := sh.ForkCredentials("s0")
+	sh.StartSyncbase(s0Creds, "s0", "", openPerms, "--dev")
 
 	checkSbTimeNotEq(t, "s0", ctx, jan2015)
 
 	// Use NTP to set the clock to jan2015.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, jan2015),
+		NtpHost:     startFakeNtpServer(t, sh, jan2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", ctx, jan2015)
 
 	// Use NTP to move the clock forward to feb2015.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, feb2015),
+		NtpHost:     startFakeNtpServer(t, sh, feb2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", ctx, feb2015)
 }
 
 // Tests that NTP skew persists across reboots.
-func V23TestVClockNtpSkewAfterReboot(t *v23tests.T) {
-	testInit(t)
-	ctx := forkContext(t, "c0")
-	s0Creds := forkCredentials(t, "s0")
-	cleanup := tu.StartSyncbased(t, s0Creds, "s0", "", openPerms)
-	defer cleanup()
+func TestV23VClockNtpSkewAfterReboot(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	ctx := sh.ForkContext("c0")
+	s0Creds := sh.ForkCredentials("s0")
+	sh.StartSyncbase(s0Creds, "s0", "", openPerms, "--dev")
 
 	// Set s0's local clock.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
@@ -183,7 +183,7 @@
 	// Do NTP at s0. As a result, s0 will think it has a one hour NTP skew, i.e.
 	// NTP time minus system clock time equals one hour.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, jan2015),
+		NtpHost:     startFakeNtpServer(t, sh, jan2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", ctx, jan2015)
@@ -201,12 +201,13 @@
 
 // Tests that the virtual clock daemon checks in with NTP at the expected
 // frequency (loosely speaking).
-func V23TestVClockNtpFrequency(t *v23tests.T) {
-	testInit(t)
-	ctx := forkContext(t, "c0")
-	s0Creds := forkCredentials(t, "s0")
-	cleanup := tu.StartSyncbased(t, s0Creds, "s0", "", openPerms)
-	defer cleanup()
+func TestV23VClockNtpFrequency(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	ctx := sh.ForkContext("c0")
+	s0Creds := sh.ForkCredentials("s0")
+	sh.StartSyncbase(s0Creds, "s0", "", openPerms, "--dev")
 
 	t0, err := sc("s0").DevModeGetTime(ctx)
 	ok(t, err)
@@ -216,7 +217,7 @@
 
 	// Use NTP to set the clock to jan2015.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, jan2015),
+		NtpHost:     startFakeNtpServer(t, sh, jan2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", ctx, jan2015)
@@ -226,7 +227,7 @@
 	// virtual clock should continue reporting the old time, even after we sleep
 	// for several seconds.
 	ok(t, sc("s0").DevModeUpdateVClock(ctx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, feb2015),
+		NtpHost:     startFakeNtpServer(t, sh, feb2015),
 		DoNtpUpdate: false,
 	}))
 
@@ -239,16 +240,17 @@
 
 // Tests p2p clock sync where local is not NTP-synced and is 1, 2, or 3 hops
 // away from an NTP-synced device.
-func V23TestVClockSyncBasic(t *v23tests.T) {
-	testInit(t)
-	sbs, cleanup := setupSyncbases(t, 4)
-	defer cleanup()
+func TestV23VClockSyncBasic(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	sbs := setupSyncbases(t, sh, 4, "--dev")
 
 	checkSbTimeNotEq(t, "s0", sbs[0].clientCtx, jan2015)
 
 	// Do NTP at s0.
 	ok(t, sc("s0").DevModeUpdateVClock(sbs[0].clientCtx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, jan2015),
+		NtpHost:     startFakeNtpServer(t, sh, jan2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", sbs[0].clientCtx, jan2015)
@@ -267,20 +269,21 @@
 }
 
 // Tests p2p clock sync where multiple devices are NTP-synced.
-func V23TestVClockSyncWithLocalNtp(t *v23tests.T) {
-	testInit(t)
-	sbs, cleanup := setupSyncbases(t, 3)
-	defer cleanup()
+func TestV23VClockSyncWithLocalNtp(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	sbs := setupSyncbases(t, sh, 3, "--dev")
 
 	// Do NTP at s0 and s2.
 	ok(t, sc("s0").DevModeUpdateVClock(sbs[0].clientCtx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, jan2015),
+		NtpHost:     startFakeNtpServer(t, sh, jan2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", sbs[0].clientCtx, jan2015)
 
 	ok(t, sc("s2").DevModeUpdateVClock(sbs[2].clientCtx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, feb2015),
+		NtpHost:     startFakeNtpServer(t, sh, feb2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s2", sbs[2].clientCtx, feb2015)
@@ -306,7 +309,7 @@
 	// Do NTP at s0 again; the update should propagate through the existing
 	// syncgroups.
 	ok(t, sc("s0").DevModeUpdateVClock(sbs[0].clientCtx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, mar2015),
+		NtpHost:     startFakeNtpServer(t, sh, mar2015),
 		DoNtpUpdate: true,
 	}))
 
@@ -318,10 +321,11 @@
 
 // Tests p2p clock sync where local is not NTP-synced and is 1 hop away from an
 // NTP-synced device with >0 reboots.
-func V23TestVClockSyncWithReboots(t *v23tests.T) {
-	testInit(t)
-	sbs, cleanup := setupSyncbases(t, 2)
-	defer cleanup()
+func TestV23VClockSyncWithReboots(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	sbs := setupSyncbases(t, sh, 2, "--dev")
 
 	// Set s0's local clock.
 	ok(t, sc("s0").DevModeUpdateVClock(sbs[0].clientCtx, wire.DevModeUpdateVClockOpts{
@@ -334,7 +338,7 @@
 	// Do NTP at s0. As a result, s0 will think it has a one hour NTP skew, i.e.
 	// NTP time minus system clock time equals one hour.
 	ok(t, sc("s0").DevModeUpdateVClock(sbs[0].clientCtx, wire.DevModeUpdateVClockOpts{
-		NtpHost:     startFakeNtpServer(t, jan2015),
+		NtpHost:     startFakeNtpServer(t, sh, jan2015),
 		DoNtpUpdate: true,
 	}))
 	checkSbTimeApproxEq(t, "s0", sbs[0].clientCtx, jan2015)
@@ -373,7 +377,7 @@
 
 // Creates a "chain" of syncgroups, where each adjacent pair of Syncbases {A,B}
 // share a syncgroup with key prefix "AB".
-func setupChain(t *v23tests.T, sbs []*testSyncbase) {
+func setupChain(t *testing.T, sbs []*testSyncbase) {
 	for i, _ := range sbs {
 		if i == len(sbs)-1 {
 			break
@@ -387,12 +391,16 @@
 	}
 }
 
-func startFakeNtpServer(t *v23tests.T, now time.Time) string {
+func startFakeNtpServer(t *testing.T, sh *v23test.Shell, now time.Time) string {
 	nowBuf, err := now.MarshalText()
 	ok(t, err)
-	ntpd := t.BuildV23Pkg("v.io/x/ref/services/syncbase/testutil/fake_ntp_server")
-	invocation := ntpd.WithStartOpts(ntpd.StartOpts().NoExecProgram().WithSessions(t, time.Second)).Start("--now=" + string(nowBuf))
-	host := invocation.ExpectVar("HOST")
+	ntpd := sh.JiriBuildGoPkg("v.io/x/ref/services/syncbase/testutil/fake_ntp_server")
+	inv := sh.Cmd(ntpd, "--now="+string(nowBuf))
+	// TODO(ivanpi): Use Session built into v23test.Shell when checked in.
+	// TODO(ivanpi): 1 second is potentially flaky, is it safe to bump?
+	exp := expect.NewSession(t, inv.StdoutPipe(), time.Second)
+	inv.Start()
+	host := exp.ExpectVar("HOST")
 	if host == "" {
 		t.Fatalf("fake_ntp_server failed to start")
 	}
@@ -400,7 +408,7 @@
 }
 
 // sbTimeDelta returns sbTime, abs(target-sbTime).
-func sbTimeDelta(t *v23tests.T, sbName string, ctx *context.T, target time.Time) (time.Time, time.Duration) {
+func sbTimeDelta(t *testing.T, sbName string, ctx *context.T, target time.Time) (time.Time, time.Duration) {
 	sbTime, err := sc(sbName).DevModeGetTime(ctx)
 	ok(t, err)
 	delta := target.Sub(sbTime)
@@ -412,18 +420,18 @@
 
 // checkSbTimeApproxEq checks that the given Syncbase's virtual clock time is
 // within 10 seconds of target.
-func checkSbTimeApproxEq(t *v23tests.T, sbName string, ctx *context.T, target time.Time) {
+func checkSbTimeApproxEq(t *testing.T, sbName string, ctx *context.T, target time.Time) {
 	sbTime, delta := sbTimeDelta(t, sbName, ctx, target)
 	if delta > 10*time.Second {
-		fatalf(t, "unexpected time: got %v, target %v", sbTime, target)
+		tu.Fatalf(t, "unexpected time: got %v, target %v", sbTime, target)
 	}
 }
 
 // checkSbTimeNotEq checks that the given Syncbase's virtual clock time is not
 // within 1 minute of target.
-func checkSbTimeNotEq(t *v23tests.T, sbName string, ctx *context.T, target time.Time) {
+func checkSbTimeNotEq(t *testing.T, sbName string, ctx *context.T, target time.Time) {
 	sbTime, delta := sbTimeDelta(t, sbName, ctx, target)
 	if delta < time.Minute {
-		fatalf(t, "expected different times: got %v, target %v", sbTime, target)
+		tu.Fatalf(t, "expected different times: got %v, target %v", sbTime, target)
 	}
 }
diff --git a/syncbase/nosql/syncgroup_v23_test.go b/syncbase/nosql/syncgroup_v23_test.go
index 9807984..eb969cf 100644
--- a/syncbase/nosql/syncgroup_v23_test.go
+++ b/syncbase/nosql/syncgroup_v23_test.go
@@ -6,26 +6,24 @@
 
 import (
 	"fmt"
-	"io/ioutil"
 	"math/rand"
 	"os"
 	"reflect"
-	"strconv"
+	"runtime/debug"
 	"strings"
+	"testing"
 	"time"
 
 	"v.io/v23"
+	"v.io/v23/context"
 	"v.io/v23/naming"
 	wire "v.io/v23/services/syncbase/nosql"
 	"v.io/v23/syncbase"
 	"v.io/v23/syncbase/nosql"
 	"v.io/v23/verror"
-	"v.io/x/ref"
+	"v.io/x/ref/lib/v23test"
 	_ "v.io/x/ref/runtime/factories/generic"
 	constants "v.io/x/ref/services/syncbase/server/util"
-	tu "v.io/x/ref/services/syncbase/testutil"
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
 )
 
 const (
@@ -36,68 +34,68 @@
 // on my Macbook Pro! Various instances of time.Sleep() below likely contribute
 // to the problem.
 
-//go:generate jiri test generate
+// TODO(ivanpi): Move to featuretests and deduplicate helpers.
 
-// V23TestSyncbasedJoinSyncgroup tests the creation and joining of a syncgroup.
+// TestV23SyncbasedJoinSyncgroup tests the creation and joining of a syncgroup.
 // Client0 creates a syncgroup at Syncbase0. Client1 requests to join the
 // syncgroup at Syncbase1. Syncbase1 in turn requests Syncbase0 to join the
 // syncgroup.
-func V23TestSyncbasedJoinSyncgroup(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncbasedJoinSyncgroup(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1"))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
 }
 
-// V23TestSyncbasedGetDeltas tests the sending of deltas between two Syncbase
+// TestV23SyncbasedGetDeltas tests the sending of deltas between two Syncbase
 // instances and their clients.  The 1st client creates a syncgroup and puts
 // some database entries in it.  The 2nd client joins that syncgroup and reads
 // the database entries.  This verifies the end-to-end synchronization of data
 // along the path: client0--Syncbase0--Syncbase1--client1.
-func V23TestSyncbasedGetDeltas(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncbasedGetDeltas(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo,tb:bar", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
-	tu.RunClient(t, client0Creds, runPopulateNonVomData, "sync0", "bar", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo,tb:bar", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
+	ok(t, runPopulateNonVomData(client0Ctx, "sync0", "bar", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
-	tu.RunClient(t, client1Creds, runVerifySyncgroupNonVomData, "sync1", "bar", "0", "10")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
+	ok(t, runVerifySyncgroupNonVomData(client1Ctx, "sync1", "bar", 0, 10))
 }
 
-// V23TestSyncbasedGetDeltasWithDel tests the sending of deltas between two
+// TestV23SyncbasedGetDeltasWithDel tests the sending of deltas between two
 // Syncbase instances and their clients. The 1st client creates a syncgroup and
 // puts some database entries in it. The 2nd client joins that syncgroup and
 // reads the database entries. The 1st client then deletes a portion of this
@@ -105,39 +103,39 @@
 // correctly synced. This verifies the end-to-end synchronization of data along
 // the path: client0--Syncbase0--Syncbase1--client1 with a workload of puts and
 // deletes.
-func V23TestSyncbasedGetDeltasWithDel(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncbasedGetDeltasWithDel(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo,tb:bar", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo,tb:bar", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
 
-	tu.RunClient(t, client0Creds, runDeleteData, "sync0", "foo", "0")
-	tu.RunClient(t, client0Creds, runVerifyDeletedData, "sync0", "foo")
-	tu.RunClient(t, client1Creds, runVerifyDeletedData, "sync1", "foo")
+	ok(t, runDeleteData(client0Ctx, "sync0", 0))
+	ok(t, runVerifyDeletedData(client0Ctx, "sync0", "foo"))
+	ok(t, runVerifyDeletedData(client1Ctx, "sync1", "foo"))
 
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "bar", "0")
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "bar", "0", "10", "false")
+	ok(t, runPopulateData(client0Ctx, "sync0", "bar", 0))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "bar", 0, 10, false))
 }
 
-// V23TestSyncbasedCompEval is a comprehensive sniff test for core sync
+// TestV23SyncbasedCompEval is a comprehensive sniff test for core sync
 // functionality. It tests the exchange of deltas between two Syncbase instances
 // and their clients. The 1st client creates a syncgroup and puts some database
 // entries in it. The 2nd client joins that syncgroup and reads the database
@@ -148,31 +146,27 @@
 // the bi-directional exchange of syncgroup deltas. After the first phase is
 // done, both Syncbase instances are shutdown and restarted, and new data is
 // synced once again.
-func V23TestSyncbasedCompEval(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	server0RDir, err := ioutil.TempDir("", "sync0")
-	if err != nil {
-		tu.V23Fatalf(t, "can't create temp dir: %v", err)
-	}
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", server0RDir,
+func TestV23SyncbasedCompEval(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	server0RDir := sh.MakeTempDir()
+	cleanSync0 := sh.StartSyncbase(server0Creds, "sync0", server0RDir,
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	server1RDir, err := ioutil.TempDir("", "sync1")
-	if err != nil {
-		tu.V23Fatalf(t, "can't create temp dir: %v", err)
-	}
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", server1RDir,
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	server1RDir := sh.MakeTempDir()
+	cleanSync1 := sh.StartSyncbase(server1Creds, "sync1", server1RDir,
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
 
 	// This is a decoy syncgroup that no other Syncbase joins, but is on the
 	// same database as the first syncgroup. Populating it after the first
@@ -181,58 +175,48 @@
 	// syncgroup. This triggers the handling of filtered log records in the
 	// restartability code.
 	sgName1 := naming.Join("sync0", constants.SyncbaseSuffix, "SG2")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName1, "tb:bar", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "bar", "0")
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName1, "tb:bar", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "bar", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
 
 	// Shutdown and restart Syncbase instances.
-	cleanSync0()
-	cleanSync1()
+	cleanSync0(os.Interrupt)
+	cleanSync1(os.Interrupt)
 
-	cleanSync0 = tu.StartSyncbased(t, server0Creds, "sync0", server0RDir,
+	cleanSync0 = sh.StartSyncbase(server0Creds, "sync0", server0RDir,
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	cleanSync1 = tu.StartSyncbased(t, server1Creds, "sync1", server1RDir,
+	cleanSync1 = sh.StartSyncbase(server1Creds, "sync1", server1RDir,
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
 
-	tu.RunClient(t, client0Creds, runSetSyncgroupSpec, "sync0", sgName, "v2", "tb:foo", "root:s0", "root:s1", "root:s3")
-	tu.RunClient(t, client1Creds, runGetSyncgroupSpec, "sync1", sgName, "v2", "tb:foo", "root:s0", "root:s1", "root:s3")
+	ok(t, runSetSyncgroupSpec(client0Ctx, "sync0", sgName, "v2", "tb:foo", "root:s0", "root:s1", "root:s3"))
+	ok(t, runGetSyncgroupSpec(client1Ctx, "sync1", sgName, "v2", "tb:foo", "root:s0", "root:s1", "root:s3"))
 
-	tu.RunClient(t, client1Creds, runUpdateData, "sync1", "5")
-	tu.RunClient(t, client1Creds, runPopulateData, "sync1", "foo", "10")
-	tu.RunClient(t, client1Creds, runSetSyncgroupSpec, "sync1", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4")
+	ok(t, runUpdateData(client1Ctx, "sync1", 5))
+	ok(t, runPopulateData(client1Ctx, "sync1", "foo", 10))
+	ok(t, runSetSyncgroupSpec(client1Ctx, "sync1", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4"))
 
-	tu.RunClient(t, client0Creds, runVerifyLocalAndRemoteData, "sync0")
-	tu.RunClient(t, client0Creds, runGetSyncgroupSpec, "sync0", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4")
+	ok(t, runVerifyLocalAndRemoteData(client0Ctx, "sync0"))
+	ok(t, runGetSyncgroupSpec(client0Ctx, "sync0", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4"))
 
 	// Shutdown and restart Syncbase instances.
-	cleanSync0()
-	cleanSync1()
+	cleanSync0(os.Interrupt)
+	cleanSync1(os.Interrupt)
 
-	cleanSync0 = tu.StartSyncbased(t, server0Creds, "sync0", server0RDir,
+	_ = sh.StartSyncbase(server0Creds, "sync0", server0RDir,
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	cleanSync1 = tu.StartSyncbased(t, server1Creds, "sync1", server1RDir,
+	_ = sh.StartSyncbase(server1Creds, "sync1", server1RDir,
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
 
-	tu.RunClient(t, client0Creds, runGetSyncgroupSpec, "sync0", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4")
-	tu.RunClient(t, client1Creds, runGetSyncgroupSpec, "sync1", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "20")
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "20", "10", "true")
-
-	cleanSync0()
-	cleanSync1()
-
-	rdirs := []string{server0RDir, server1RDir}
-	for _, r := range rdirs {
-		if err := os.RemoveAll(r); err != nil {
-			tu.V23Fatalf(t, "can't remove dir %v: %v", r, err)
-		}
-	}
+	ok(t, runGetSyncgroupSpec(client0Ctx, "sync0", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4"))
+	ok(t, runGetSyncgroupSpec(client1Ctx, "sync1", sgName, "v3", "tb:foo", "root:s0", "root:s1", "root:s4"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 20))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 20, 10, true))
 }
 
-// V23TestSyncbasedExchangeDeltasWithAcls tests the exchange of deltas including
+// TestV23SyncbasedExchangeDeltasWithAcls tests the exchange of deltas including
 // acls between two Syncbase instances and their clients.  The 1st client
 // creates a syncgroup at "foo", sets an acl, and puts some database entries in
 // it.  The 2nd client joins that syncgroup and reads the database entries.  The
@@ -240,42 +224,42 @@
 // The 1st client should be unable to access the subset of keys under
 // "foobar". The 2nd client then modifies the prefix acl at "foobar" with access
 // to both clients. The 1st client should regain access.
-func V23TestSyncbasedExchangeDeltasWithAcls(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncbasedExchangeDeltasWithAcls(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}, "Admin": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}, "Admin": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foobarbaz", "0")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
-	tu.RunClient(t, client0Creds, runSetPrefixPermissions, "sync0", "foo", "root:c0", "root:c1")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foobarbaz", 0))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
+	ok(t, runSetPrefixPermissions(client0Ctx, "sync0", "foo", "root:c0", "root:c1"))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foobarbaz", "0", "10", "false")
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "true")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foobarbaz", 0, 10, false))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, true))
 
-	tu.RunClient(t, client1Creds, runSetPrefixPermissions, "sync1", "foobar", "root:c1")
-	tu.RunClient(t, client0Creds, runVerifyLostAccess, "sync0", "foobarbaz", "0", "10")
-	tu.RunClient(t, client0Creds, runVerifySyncgroupData, "sync0", "foo", "0", "10", "true")
+	ok(t, runSetPrefixPermissions(client1Ctx, "sync1", "foobar", "root:c1"))
+	ok(t, runVerifyLostAccess(client0Ctx, "sync0", "foobarbaz", 0, 10))
+	ok(t, runVerifySyncgroupData(client0Ctx, "sync0", "foo", 0, 10, true))
 
-	tu.RunClient(t, client1Creds, runSetPrefixPermissions, "sync1", "foobar", "root:c0", "root:c1")
-	tu.RunClient(t, client0Creds, runVerifySyncgroupData, "sync0", "foobarbaz", "0", "10", "false")
+	ok(t, runSetPrefixPermissions(client1Ctx, "sync1", "foobar", "root:c0", "root:c1"))
+	ok(t, runVerifySyncgroupData(client0Ctx, "sync0", "foobarbaz", 0, 10, false))
 }
 
-// V23 TestSyncbasedExchangeDeltasWithConflicts tests the exchange of deltas
+// TestV23SyncbasedExchangeDeltasWithConflicts tests the exchange of deltas
 // between two Syncbase instances and their clients in the presence of
 // conflicting updates. The 1st client creates a syncgroup and puts some
 // database entries in it. The 2nd client joins that syncgroup and reads the
@@ -298,87 +282,90 @@
 // TODO(hpucha): We could diff the states of the two clients and ensure they are
 // identical. Optionally we could expose inner state of syncbased via some
 // debug methods.
-func V23TestSyncbasedExchangeDeltasWithConflicts(t *v23tests.T) {
+func TestV23SyncbasedExchangeDeltasWithConflicts(t *testing.T) {
 	// Run it multiple times to exercise different interactions between sync
 	// and local updates that change every run due to timing.
 	for i := 0; i < 10; i++ {
-		testSyncbasedExchangeDeltasWithConflicts(t)
+		// TODO(ivanpi): hack: shell created here to satisfy requirement that
+		// NewShell for large tests is called directly from a TestV23* function.
+		sh := v23test.NewShell(t, v23test.Opts{Large: true})
+		defer sh.Cleanup()
+		testSyncbasedExchangeDeltasWithConflicts(t, sh)
+		sh.Cleanup()
 	}
 }
 
-func testSyncbasedExchangeDeltasWithConflicts(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func testSyncbasedExchangeDeltasWithConflicts(t *testing.T, sh *v23test.Shell) {
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
 
-	go tu.RunClient(t, client0Creds, runUpdateData, "sync0", "5")
+	go func() { ok(t, runUpdateData(client0Ctx, "sync0", 5)) }()
 	d := time.Duration(rand.Int63n(50)) * time.Millisecond
 	time.Sleep(d)
-	tu.RunClient(t, client1Creds, runUpdateData, "sync1", "5")
+	ok(t, runUpdateData(client1Ctx, "sync1", 5))
 
 	time.Sleep(10 * time.Second)
 
-	tu.RunClient(t, client0Creds, runVerifyConflictResolution, "sync0")
-	tu.RunClient(t, client1Creds, runVerifyConflictResolution, "sync1")
+	ok(t, runVerifyConflictResolution(client0Ctx, "sync0"))
+	ok(t, runVerifyConflictResolution(client1Ctx, "sync1"))
 }
 
-// V23TestNestedSyncgroups tests the exchange of deltas between two Syncbase
+// TestV23NestedSyncgroups tests the exchange of deltas between two Syncbase
 // instances and their clients with nested syncgroups. The 1st client creates
 // two syncgroups at prefixes "f" and "foo" and puts some database entries in
 // both of them.  The 2nd client first joins the syncgroup with prefix "foo" and
 // verifies that it reads the corresponding database entries.  The 2nd client
 // then joins the syncgroup with prefix "f" and verifies that it can read the
 // "f" keys.
-func V23TestNestedSyncgroups(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23NestedSyncgroups(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sg1Name := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 	sg2Name := naming.Join("sync0", constants.SyncbaseSuffix, "SG2")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sg1Name, "tb:foo", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sg2Name, "tb:f", "", "root:s0", "root:s1")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "f", "0")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sg1Name, "tb:foo", "", "root:s0", "root:s1"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sg2Name, "tb:f", "", "root:s0", "root:s1"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "f", 0))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sg1Name)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sg2Name)
-	tu.RunClient(t, client1Creds, runVerifyNestedSyncgroupData, "sync1")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sg1Name))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sg2Name))
+	ok(t, runVerifyNestedSyncgroupData(client1Ctx, "sync1"))
 }
 
-// V23TestNestedAndPeerSyncgroups tests the exchange of deltas between three
+// TestV23NestedAndPeerSyncgroups tests the exchange of deltas between three
 // Syncbase instances and their clients consisting of nested/peer
 // syncgroups. The 1st client creates two syncgroups: SG1 at prefix "foo" and
 // SG2 at "f" and puts some database entries in both of them.  The 2nd client
@@ -387,154 +374,152 @@
 // syncgroups SG2 and SG3 and verifies that it can read all the "f" and "foo"
 // keys created by client 1. Client 2 also verifies that it can read all the "f"
 // and "foo" keys created by client 1.
-func V23TestNestedAndPeerSyncgroups(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23NestedAndPeerSyncgroups(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
-	server2Creds, _ := t.Shell().NewChildCredentials("s2")
-	client2Creds, _ := t.Shell().NewChildCredentials("c2")
-	cleanSync2 := tu.StartSyncbased(t, server2Creds, "sync2", "",
+	server2Creds := sh.ForkCredentials("s2")
+	client2Ctx := sh.ForkContext("c2")
+	sh.StartSyncbase(server2Creds, "sync2", "",
 		`{"Read": {"In":["root:c2"]}, "Write": {"In":["root:c2"]}}`)
-	defer cleanSync2()
 
 	sg1Name := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 	sg2Name := naming.Join("sync0", constants.SyncbaseSuffix, "SG2")
 	sg3Name := naming.Join("sync1", constants.SyncbaseSuffix, "SG3")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sg1Name, "tb:foo", "", "root:s0", "root:s1", "root:s2")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sg2Name, "tb:f", "", "root:s0", "root:s2")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "f", "0")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sg1Name, "tb:foo", "", "root:s0", "root:s1", "root:s2"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sg2Name, "tb:f", "", "root:s0", "root:s2"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "f", 0))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sg1Name)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
-	tu.RunClient(t, client1Creds, runCreateSyncgroup, "sync1", sg3Name, "tb:f", "", "root:s1", "root:s2")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sg1Name))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
+	ok(t, runCreateSyncgroup(client1Ctx, "sync1", sg3Name, "tb:f", "", "root:s1", "root:s2"))
 
-	tu.RunClient(t, client2Creds, runSetupAppA, "sync2")
-	tu.RunClient(t, client2Creds, runJoinSyncgroup, "sync2", sg2Name)
-	tu.RunClient(t, client2Creds, runJoinSyncgroup, "sync2", sg3Name)
-	tu.RunClient(t, client2Creds, runVerifyNestedSyncgroupData, "sync2")
+	ok(t, runSetupAppA(client2Ctx, "sync2"))
+	ok(t, runJoinSyncgroup(client2Ctx, "sync2", sg2Name))
+	ok(t, runJoinSyncgroup(client2Ctx, "sync2", sg3Name))
+	ok(t, runVerifyNestedSyncgroupData(client2Ctx, "sync2"))
 
-	tu.RunClient(t, client1Creds, runVerifyNestedSyncgroupData, "sync1")
+	ok(t, runVerifyNestedSyncgroupData(client1Ctx, "sync1"))
 }
 
-// V23TestSyncbasedGetDeltasPrePopulate tests the sending of deltas between two
+// TestV23SyncbasedGetDeltasPrePopulate tests the sending of deltas between two
 // Syncbase instances and their clients with data existing before the creation
 // of a syncgroup.  The 1st client puts entries in a database then creates a
 // syncgroup over that data.  The 2nd client joins that syncgroup and reads the
 // database entries.
-func V23TestSyncbasedGetDeltasPrePopulate(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncbasedGetDeltasPrePopulate(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
 	// Populate table data before creating the syncgroup.  Also populate
 	// with data that is not part of the syncgroup to verify filtering.
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "bar", "0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
+	ok(t, runPopulateData(client0Ctx, "sync0", "bar", 0))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1"))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
-	tu.RunClient(t, client1Creds, runVerifyNonSyncgroupData, "sync1", "bar")
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
+	ok(t, runVerifyNonSyncgroupData(client1Ctx, "sync1", "bar"))
 }
 
-// V23TestSyncbasedGetDeltasMultiApp tests the sending of deltas between two
+// TestV23SyncbasedGetDeltasMultiApp tests the sending of deltas between two
 // Syncbase instances and their clients across multiple apps, databases, and
 // tables.  The 1st client puts entries in multiple tables across multiple
 // app databases then creates multiple syncgroups (one per database) over that
 // data.  The 2nd client joins these syncgroups and reads all the data.
-func V23TestSyncbasedGetDeltasMultiApp(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncbasedGetDeltasMultiApp(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
 	sgNamePrefix := naming.Join("sync0", constants.SyncbaseSuffix)
-	na, nd, nt := "2", "2", "2" // number of apps, dbs, tables
+	na, nd, nt := 2, 2, 2 // number of apps, dbs, tables
 
-	tu.RunClient(t, client0Creds, runSetupAppMulti, "sync0", na, nd, nt)
-	tu.RunClient(t, client0Creds, runPopulateSyncgroupMulti, "sync0", sgNamePrefix, na, nd, nt, "foo", "bar")
+	ok(t, runSetupAppMulti(client0Ctx, "sync0", na, nd, nt))
+	ok(t, runPopulateSyncgroupMulti(client0Ctx, "sync0", sgNamePrefix, na, nd, nt, "foo", "bar"))
 
-	tu.RunClient(t, client1Creds, runSetupAppMulti, "sync1", na, nd, nt)
-	tu.RunClient(t, client1Creds, runJoinSyncgroupMulti, "sync1", sgNamePrefix, na, nd)
-	tu.RunClient(t, client1Creds, runVerifySyncgroupDataMulti, "sync1", na, nd, nt, "foo", "bar")
+	ok(t, runSetupAppMulti(client1Ctx, "sync1", na, nd, nt))
+	ok(t, runJoinSyncgroupMulti(client1Ctx, "sync1", sgNamePrefix, na, nd))
+	ok(t, runVerifySyncgroupDataMulti(client1Ctx, "sync1", na, nd, nt, "foo", "bar"))
 }
 
-// V23TestSyncgroupSync tests the syncing of syncgroup metadata. The 1st client
+// TestV23SyncgroupSync tests the syncing of syncgroup metadata. The 1st client
 // creates the syncgroup SG1, and clients 2 and 3 join this syncgroup. All three
 // clients must learn of the remaining two. Note that client 2 relies on
 // syncgroup metadata syncing to learn of client 3 .
-func V23TestSyncgroupSync(t *v23tests.T) {
-	v23tests.RunRootMT(t, "--v23.tcp.address=127.0.0.1:0")
-	server0Creds, _ := t.Shell().NewChildCredentials("s0")
-	client0Creds, _ := t.Shell().NewChildCredentials("c0")
-	cleanSync0 := tu.StartSyncbased(t, server0Creds, "sync0", "",
+func TestV23SyncgroupSync(t *testing.T) {
+	sh := v23test.NewShell(t, v23test.Opts{Large: true})
+	defer sh.Cleanup()
+	sh.StartRootMountTable()
+	server0Creds := sh.ForkCredentials("s0")
+	client0Ctx := sh.ForkContext("c0")
+	sh.StartSyncbase(server0Creds, "sync0", "",
 		`{"Read": {"In":["root:c0"]}, "Write": {"In":["root:c0"]}}`)
-	defer cleanSync0()
 
-	server1Creds, _ := t.Shell().NewChildCredentials("s1")
-	client1Creds, _ := t.Shell().NewChildCredentials("c1")
-	cleanSync1 := tu.StartSyncbased(t, server1Creds, "sync1", "",
+	server1Creds := sh.ForkCredentials("s1")
+	client1Ctx := sh.ForkContext("c1")
+	sh.StartSyncbase(server1Creds, "sync1", "",
 		`{"Read": {"In":["root:c1"]}, "Write": {"In":["root:c1"]}}`)
-	defer cleanSync1()
 
-	server2Creds, _ := t.Shell().NewChildCredentials("s2")
-	client2Creds, _ := t.Shell().NewChildCredentials("c2")
-	cleanSync2 := tu.StartSyncbased(t, server2Creds, "sync2", "",
+	server2Creds := sh.ForkCredentials("s2")
+	client2Ctx := sh.ForkContext("c2")
+	sh.StartSyncbase(server2Creds, "sync2", "",
 		`{"Read": {"In":["root:c2"]}, "Write": {"In":["root:c2"]}}`)
-	defer cleanSync2()
 
 	sgName := naming.Join("sync0", constants.SyncbaseSuffix, "SG1")
 
-	tu.RunClient(t, client0Creds, runSetupAppA, "sync0")
-	tu.RunClient(t, client0Creds, runCreateSyncgroup, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1", "root:s2")
-	tu.RunClient(t, client0Creds, runPopulateData, "sync0", "foo", "0")
+	ok(t, runSetupAppA(client0Ctx, "sync0"))
+	ok(t, runCreateSyncgroup(client0Ctx, "sync0", sgName, "tb:foo", "", "root:s0", "root:s1", "root:s2"))
+	ok(t, runPopulateData(client0Ctx, "sync0", "foo", 0))
 
-	tu.RunClient(t, client1Creds, runSetupAppA, "sync1")
-	tu.RunClient(t, client1Creds, runJoinSyncgroup, "sync1", sgName)
+	ok(t, runSetupAppA(client1Ctx, "sync1"))
+	ok(t, runJoinSyncgroup(client1Ctx, "sync1", sgName))
 
-	tu.RunClient(t, client2Creds, runSetupAppA, "sync2")
-	tu.RunClient(t, client2Creds, runJoinSyncgroup, "sync2", sgName)
+	ok(t, runSetupAppA(client2Ctx, "sync2"))
+	ok(t, runJoinSyncgroup(client2Ctx, "sync2", sgName))
 
-	tu.RunClient(t, client1Creds, runGetSyncgroupMembers, "sync1", sgName, "3")
-	tu.RunClient(t, client2Creds, runGetSyncgroupMembers, "sync2", sgName, "3")
+	ok(t, runGetSyncgroupMembers(client1Ctx, "sync1", sgName, 3))
+	ok(t, runGetSyncgroupMembers(client2Ctx, "sync2", sgName, 3))
 
-	tu.RunClient(t, client1Creds, runVerifySyncgroupData, "sync1", "foo", "0", "10", "false")
-	tu.RunClient(t, client2Creds, runVerifySyncgroupData, "sync2", "foo", "0", "10", "false")
+	ok(t, runVerifySyncgroupData(client1Ctx, "sync1", "foo", 0, 10, false))
+	ok(t, runVerifySyncgroupData(client2Ctx, "sync2", "foo", 0, 10, false))
 }
 
 ////////////////////////////////////
@@ -558,102 +543,87 @@
 // TODO(hpucha): Look into refactoring scan logic out of the helpers, and
 // avoiding gets when we can scan.
 
-var runSetupAppA = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
-	a.Create(ctx, nil)
+func runSetupAppA(ctx *context.T, serviceName string) error {
+	a := syncbase.NewService(serviceName).App("a")
+	if err := a.Create(ctx, nil); err != nil {
+		return err
+	}
 	d := a.NoSQLDatabase("d", nil)
-	d.Create(ctx, nil)
-	d.Table(testTable).Create(ctx, nil)
+	if err := d.Create(ctx, nil); err != nil {
+		return err
+	}
+	tb := d.Table(testTable)
+	if err := tb.Create(ctx, nil); err != nil {
+		return err
+	}
 
 	return nil
-}, "runSetupAppA")
+}
 
-// Arguments: 0: Syncbase name, 1: syncgroup name, 2: prefixes, 3: mount table,
-// 4 onwards: syncgroup permission blessings.
-var runCreateSyncgroup = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runCreateSyncgroup(ctx *context.T, serviceName, sgName, sgPrefixes, mtName string, sgBlessings ...string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
-	mtName := args[3]
-	if mtName == "" {
-		mtName = env.Vars[ref.EnvNamespacePrefix]
+	mtNames := v23.GetNamespace(ctx).Roots()
+	if mtName != "" {
+		mtNames = []string{mtName}
 	}
 
 	spec := wire.SyncgroupSpec{
 		Description: "test syncgroup sg",
-		Perms:       perms(args[4:]...),
-		Prefixes:    toSgPrefixes(args[2]),
-		MountTables: []string{mtName},
+		Perms:       perms(sgBlessings...),
+		Prefixes:    toSgPrefixes(sgPrefixes),
+		MountTables: mtNames,
 	}
 
-	sg := d.Syncgroup(args[1])
+	sg := d.Syncgroup(sgName)
 	info := wire.SyncgroupMemberInfo{SyncPriority: 8}
 	if err := sg.Create(ctx, spec, info); err != nil {
-		return fmt.Errorf("Create SG %q failed: %v\n", args[1], err)
+		return fmt.Errorf("Create SG %q failed: %v\n", sgName, err)
 	}
 	return nil
-}, "runCreateSyncgroup")
+}
 
-var runJoinSyncgroup = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runJoinSyncgroup(ctx *context.T, serviceName, sgName string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
-	sg := d.Syncgroup(args[1])
+	sg := d.Syncgroup(sgName)
 	info := wire.SyncgroupMemberInfo{SyncPriority: 10}
 	if _, err := sg.Join(ctx, info); err != nil {
-		return fmt.Errorf("Join SG %q failed: %v\n", args[1], err)
+		return fmt.Errorf("Join SG %q failed: %v\n", sgName, err)
 	}
 	return nil
-}, "runJoinSyncgroup")
+}
 
-// Arguments: 0: Syncbase name, 1: syncgroup name, 2: syncgroup description, 3:
-// prefixes, 4 onwards: syncgroup permission blessings.
-var runSetSyncgroupSpec = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runSetSyncgroupSpec(ctx *context.T, serviceName, sgName, sgDesc, sgPrefixes string, sgBlessings ...string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
-	sg := d.Syncgroup(args[1])
+	sg := d.Syncgroup(sgName)
 
-	mtName := env.Vars[ref.EnvNamespacePrefix]
+	mtNames := v23.GetNamespace(ctx).Roots()
 	spec := wire.SyncgroupSpec{
-		Description: args[2],
-		Prefixes:    toSgPrefixes(args[3]),
-		Perms:       perms(args[4:]...),
-		MountTables: []string{mtName},
+		Description: sgDesc,
+		Prefixes:    toSgPrefixes(sgPrefixes),
+		Perms:       perms(sgBlessings...),
+		MountTables: mtNames,
 	}
 
 	if err := sg.SetSpec(ctx, spec, ""); err != nil {
-		return fmt.Errorf("SetSpec SG %q failed: %v\n", args[1], err)
+		return fmt.Errorf("SetSpec SG %q failed: %v\n", sgName, err)
 	}
 	return nil
-}, "runSetSyncgroupSpec")
+}
 
-// Arguments: 0: Syncbase name, 1: syncgroup name, 2: syncgroup description, 3:
-// prefixes, 4 onwards: syncgroup permission blessings.
-var runGetSyncgroupSpec = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runGetSyncgroupSpec(ctx *context.T, serviceName, sgName, wantDesc, wantPrefixes string, wantBlessings ...string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
-	sg := d.Syncgroup(args[1])
+	sg := d.Syncgroup(sgName)
 
-	wantDesc := args[2]
-	wantPfxs := toSgPrefixes(args[3])
-	wantPerms := perms(args[4:]...)
+	wantPfxs := toSgPrefixes(wantPrefixes)
+	wantPerms := perms(wantBlessings...)
 
 	var spec wire.SyncgroupSpec
 	var err error
@@ -661,37 +631,31 @@
 		time.Sleep(500 * time.Millisecond)
 		spec, _, err = sg.GetSpec(ctx)
 		if err != nil {
-			return fmt.Errorf("GetSpec SG %q failed: %v\n", args[1], err)
+			return fmt.Errorf("GetSpec SG %q failed: %v\n", sgName, err)
 		}
 		if spec.Description == wantDesc {
 			break
 		}
 	}
 	if spec.Description != wantDesc || !reflect.DeepEqual(spec.Prefixes, wantPfxs) || !reflect.DeepEqual(spec.Perms, wantPerms) {
-		return fmt.Errorf("GetSpec SG %q failed: description got %v, want %v, prefixes got %v, want %v, perms got %v, want %v\n", args[1], spec.Description, wantDesc, spec.Prefixes, wantPfxs, spec.Perms, wantPerms)
+		return fmt.Errorf("GetSpec SG %q failed: description got %v, want %v, prefixes got %v, want %v, perms got %v, want %v\n", sgName, spec.Description, wantDesc, spec.Prefixes, wantPfxs, spec.Perms, wantPerms)
 	}
 	return nil
-}, "runGetSyncgroupSpec")
+}
 
-// Arguments: 0: Syncbase name, 1: syncgroup name, 2: number of syncgroup
-// members.
-var runGetSyncgroupMembers = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runGetSyncgroupMembers(ctx *context.T, serviceName, sgName string, wantMembers uint64) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
-	sg := d.Syncgroup(args[1])
+	sg := d.Syncgroup(sgName)
 
-	wantMembers, _ := strconv.ParseUint(args[2], 10, 64)
 	var gotMembers uint64
 
 	for i := 0; i < 8; i++ {
 		time.Sleep(500 * time.Millisecond)
 		members, err := sg.GetMembers(ctx)
 		if err != nil {
-			return fmt.Errorf("GetMembers SG %q failed: %v\n", args[1], err)
+			return fmt.Errorf("GetMembers SG %q failed: %v\n", sgName, err)
 		}
 		gotMembers = uint64(len(members))
 		if wantMembers == gotMembers {
@@ -699,47 +663,37 @@
 		}
 	}
 	if wantMembers != gotMembers {
-		return fmt.Errorf("GetMembers SG %q failed: members got %v, want %v\n", args[1], gotMembers, wantMembers)
+		return fmt.Errorf("GetMembers SG %q failed: members got %v, want %v\n", sgName, gotMembers, wantMembers)
 	}
 	return nil
-}, "runGetSyncgroupMembers")
+}
 
-// Arguments: 0: Syncbase name, 1: key prefix, 2: start index.
-var runPopulateData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runPopulateData(ctx *context.T, serviceName, keyPrefix string, start uint64) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Do Puts.
 	tb := d.Table(testTable)
-	start, _ := strconv.ParseUint(args[2], 10, 64)
 
 	for i := start; i < start+10; i++ {
-		key := fmt.Sprintf("%s%d", args[1], i)
+		key := fmt.Sprintf("%s%d", keyPrefix, i)
 		r := tb.Row(key)
 		if err := r.Put(ctx, "testkey"+key); err != nil {
 			return fmt.Errorf("r.Put() failed: %v\n", err)
 		}
 	}
 	return nil
-}, "runPopulateData")
+}
 
-// Arguments: 0: Syncbase name, 1: key prefix, 2: start index.
-var runPopulateNonVomData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runPopulateNonVomData(ctx *context.T, serviceName, keyPrefix string, start uint64) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Do Puts.
 	tb := d.Table("tb")
-	start, _ := strconv.ParseUint(args[2], 10, 64)
 
 	for i := start; i < start+10; i++ {
-		key := fmt.Sprintf("%s%d", args[1], i)
+		key := fmt.Sprintf("%s%d", keyPrefix, i)
 		r := tb.Row(key)
 		c := wire.RowClient(r.FullName())
 		val := []byte("nonvomtestkey" + key)
@@ -748,56 +702,32 @@
 		}
 	}
 	return nil
-}, "runPopulateNonVomData")
+}
 
-// Arguments: 0: Syncbase name, 1: start index.
-// Optional args: 2: end index, 3: value prefix.
-// Values are from [start,end) or [start, start+5) depending on whether end
-// param was provided.
-var runUpdateData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	serviceName, startStr := args[0], args[1]
-	start, _ := strconv.ParseUint(startStr, 10, 64)
-	end, prefix := start+5, "testkey"
-	if len(args) > 2 {
-		end, _ = strconv.ParseUint(args[2], 10, 64)
-		if end <= start {
-			return fmt.Errorf("Test error: end <= start. start: %d, end: %d", start, end)
-		}
-	}
-	if len(args) > 3 {
-		prefix = args[3]
-	}
-
+func runUpdateData(ctx *context.T, serviceName string, start uint64) error {
 	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Do Puts.
 	tb := d.Table(testTable)
 
-	for i := start; i < end; i++ {
+	for i := start; i < start+5; i++ {
 		key := fmt.Sprintf("foo%d", i)
 		r := tb.Row(key)
-		if err := r.Put(ctx, prefix+serviceName+key); err != nil {
+		if err := r.Put(ctx, "testkey"+serviceName+key); err != nil {
 			return fmt.Errorf("r.Put() failed: %v\n", err)
 		}
 	}
 
 	return nil
-}, "runUpdateData")
+}
 
-var runDeleteData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runDeleteData(ctx *context.T, serviceName string, start uint64) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Do Puts.
 	tb := d.Table(testTable)
-	start, _ := strconv.ParseUint(args[1], 10, 64)
 
 	for i := start; i < start+5; i++ {
 		key := fmt.Sprintf("foo%d", i)
@@ -808,41 +738,30 @@
 	}
 
 	return nil
-}, "runDeleteData")
+}
 
-// Arguments: 0: syncbase name, 1: key prefix, 2: blessing for acl.
-var runSetPrefixPermissions = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runSetPrefixPermissions(ctx *context.T, serviceName, keyPrefix string, aclBlessings ...string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Set acl.
 	tb := d.Table(testTable)
 
-	if err := tb.SetPrefixPermissions(ctx, nosql.Prefix(args[1]), perms(args[2:]...)); err != nil {
+	if err := tb.SetPrefixPermissions(ctx, nosql.Prefix(keyPrefix), perms(aclBlessings...)); err != nil {
 		return fmt.Errorf("tb.SetPrefixPermissions() failed: %v\n", err)
 	}
 
 	return nil
-}, "runSetPrefixPermissions")
+}
 
-// Arguments: 0: syncbase name, 1: key prefix, 2: start index, 3: number of keys, 4: skip scan.
-var runVerifySyncgroupData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifySyncgroupData(ctx *context.T, serviceName, keyPrefix string, start, count uint64, skipScan bool) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Wait for a bit (up to 4 sec) until the last key appears.
 	tb := d.Table(testTable)
 
-	start, _ := strconv.ParseUint(args[2], 10, 64)
-	count, _ := strconv.ParseUint(args[3], 10, 64)
-	skipScan, _ := strconv.ParseBool(args[4])
-	lastKey := fmt.Sprintf("%s%d", args[1], start+count-1)
+	lastKey := fmt.Sprintf("%s%d", keyPrefix, start+count-1)
 
 	r := tb.Row(lastKey)
 	for i := 0; i < 8; i++ {
@@ -855,7 +774,7 @@
 
 	// Verify that all keys and values made it correctly.
 	for i := start; i < start+count; i++ {
-		key := fmt.Sprintf("%s%d", args[1], i)
+		key := fmt.Sprintf("%s%d", keyPrefix, i)
 		r := tb.Row(key)
 		var got string
 		if err := r.Get(ctx, &got); err != nil {
@@ -869,9 +788,9 @@
 
 	if !skipScan {
 		// Re-verify using a scan operation.
-		stream := tb.Scan(ctx, nosql.Prefix(args[1]))
+		stream := tb.Scan(ctx, nosql.Prefix(keyPrefix))
 		for i := 0; stream.Advance(); i++ {
-			want := fmt.Sprintf("%s%d", args[1], i)
+			want := fmt.Sprintf("%s%d", keyPrefix, i)
 			got := stream.Key()
 			if got != want {
 				return fmt.Errorf("unexpected key in scan: got %q, want %q\n", got, want)
@@ -890,22 +809,16 @@
 		}
 	}
 	return nil
-}, "runVerifySyncgroupData")
+}
 
-// Arguments: 0: syncbase name, 1: key prefix, 2: start index, 3: number of keys.
-var runVerifySyncgroupNonVomData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifySyncgroupNonVomData(ctx *context.T, serviceName, keyPrefix string, start, count uint64) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Wait for a bit (up to 4 sec) until the last key appears.
 	tb := d.Table("tb")
 
-	start, _ := strconv.ParseUint(args[2], 10, 64)
-	count, _ := strconv.ParseUint(args[3], 10, 64)
-	lastKey := fmt.Sprintf("%s%d", args[1], start+count-1)
+	lastKey := fmt.Sprintf("%s%d", keyPrefix, start+count-1)
 
 	r := tb.Row(lastKey)
 	c := wire.RowClient(r.FullName())
@@ -918,7 +831,7 @@
 
 	// Verify that all keys and values made it correctly.
 	for i := start; i < start+count; i++ {
-		key := fmt.Sprintf("%s%d", args[1], i)
+		key := fmt.Sprintf("%s%d", keyPrefix, i)
 		r := tb.Row(key)
 		c := wire.RowClient(r.FullName())
 		var got string
@@ -933,13 +846,10 @@
 		}
 	}
 	return nil
-}, "runVerifySyncgroupNonVomData")
+}
 
-var runVerifyDeletedData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifyDeletedData(ctx *context.T, serviceName, keyPrefix string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Wait for a bit for deletions to propagate.
@@ -955,10 +865,10 @@
 	}
 
 	// Verify using a scan operation.
-	stream := tb.Scan(ctx, nosql.Prefix(args[1]))
+	stream := tb.Scan(ctx, nosql.Prefix(keyPrefix))
 	count := 0
 	for i := 5; stream.Advance(); i++ {
-		want := fmt.Sprintf("%s%d", args[1], i)
+		want := fmt.Sprintf("%s%d", keyPrefix, i)
 		got := stream.Key()
 		if got != want {
 			return fmt.Errorf("unexpected key in scan: got %q, want %q\n", got, want)
@@ -982,13 +892,10 @@
 	}
 
 	return nil
-}, "runVerifyDeletedData")
+}
 
-var runVerifyConflictResolution = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifyConflictResolution(ctx *context.T, serviceName string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 	tb := d.Table(testTable)
 
@@ -1023,19 +930,16 @@
 		}
 	}
 	return nil
-}, "runVerifyConflictResolution")
+}
 
-var runVerifyNonSyncgroupData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifyNonSyncgroupData(ctx *context.T, serviceName, keyPrefix string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 	tb := d.Table(testTable)
 
 	// Verify through a scan that none of that data exists.
 	count := 0
-	stream := tb.Scan(ctx, nosql.Prefix(args[1]))
+	stream := tb.Scan(ctx, nosql.Prefix(keyPrefix))
 	for stream.Advance() {
 		count++
 	}
@@ -1044,17 +948,14 @@
 		return fmt.Errorf("scan stream error: %v\n", err)
 	}
 	if count > 0 {
-		return fmt.Errorf("found %d entries in %s prefix that should not be there\n", count, args[1])
+		return fmt.Errorf("found %d entries in %s prefix that should not be there\n", count, keyPrefix)
 	}
 
 	return nil
-}, "runVerifyNonSyncgroupData")
+}
 
-var runVerifyLocalAndRemoteData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifyLocalAndRemoteData(ctx *context.T, serviceName string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 	tb := d.Table(testTable)
 
@@ -1094,22 +995,16 @@
 		}
 	}
 	return nil
-}, "runVerifyLocalAndRemoteData")
+}
 
-// Arguments: 0: syncbase name, 1: key prefix, 2: start pos for key, 3: number of keys.
-var runVerifyLostAccess = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifyLostAccess(ctx *context.T, serviceName, keyPrefix string, start, count uint64) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Wait for a bit (up to 4 sec) until the last key disappears.
 	tb := d.Table(testTable)
 
-	start, _ := strconv.ParseUint(args[2], 10, 64)
-	count, _ := strconv.ParseUint(args[3], 10, 64)
-	lastKey := fmt.Sprintf("%s%d", args[1], start+count-1)
+	lastKey := fmt.Sprintf("%s%d", keyPrefix, start+count-1)
 
 	r := tb.Row(lastKey)
 	for i := 0; i < 8; i++ {
@@ -1122,7 +1017,7 @@
 
 	// Verify that all keys and values have lost access.
 	for i := start; i < start+count; i++ {
-		key := fmt.Sprintf("%s%d", args[1], i)
+		key := fmt.Sprintf("%s%d", keyPrefix, i)
 		r := tb.Row(key)
 		var got string
 		if err := r.Get(ctx, &got); verror.ErrorID(err) != verror.ErrNoAccess.ID {
@@ -1131,13 +1026,10 @@
 	}
 
 	return nil
-}, "runVerifyLostAccess")
+}
 
-var runVerifyNestedSyncgroupData = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	a := syncbase.NewService(args[0]).App("a")
+func runVerifyNestedSyncgroupData(ctx *context.T, serviceName string) error {
+	a := syncbase.NewService(serviceName).App("a")
 	d := a.NoSQLDatabase("d", nil)
 
 	// Wait for a bit (up to 8 sec) until the last key appears. This chosen
@@ -1176,16 +1068,10 @@
 		}
 	}
 	return nil
-}, "runVerifyNestedSyncgroupData")
+}
 
-var runSetupAppMulti = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	svc := syncbase.NewService(args[0])
-	numApps, _ := strconv.Atoi(args[1])
-	numDbs, _ := strconv.Atoi(args[2])
-	numTbs, _ := strconv.Atoi(args[3])
+func runSetupAppMulti(ctx *context.T, serviceName string, numApps, numDbs, numTbs int) error {
+	svc := syncbase.NewService(serviceName)
 
 	for i := 0; i < numApps; i++ {
 		appName := fmt.Sprintf("a%d", i)
@@ -1205,20 +1091,12 @@
 	}
 
 	return nil
-}, "runSetupAppMulti")
+}
 
-var runPopulateSyncgroupMulti = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
+func runPopulateSyncgroupMulti(ctx *context.T, serviceName, sgNamePrefix string, numApps, numDbs, numTbs int, prefixes ...string) error {
+	mtNames := v23.GetNamespace(ctx).Roots()
 
-	mtName := env.Vars[ref.EnvNamespacePrefix]
-
-	svc := syncbase.NewService(args[0])
-	sgNamePrefix := args[1]
-	numApps, _ := strconv.Atoi(args[2])
-	numDbs, _ := strconv.Atoi(args[3])
-	numTbs, _ := strconv.Atoi(args[4])
-	prefixes := args[5:]
+	svc := syncbase.NewService(serviceName)
 
 	// For each app...
 	for i := 0; i < numApps; i++ {
@@ -1258,7 +1136,7 @@
 				Description: fmt.Sprintf("test sg %s/%s", appName, dbName),
 				Perms:       perms("root:s0", "root:s1"),
 				Prefixes:    toSgPrefixes(strings.Join(sgPrefixes, ",")),
-				MountTables: []string{mtName},
+				MountTables: mtNames,
 			}
 
 			sg := d.Syncgroup(sgName)
@@ -1270,16 +1148,10 @@
 	}
 
 	return nil
-}, "runPopulateSyncgroupMulti")
+}
 
-var runJoinSyncgroupMulti = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	svc := syncbase.NewService(args[0])
-	sgNamePrefix := args[1]
-	numApps, _ := strconv.Atoi(args[2])
-	numDbs, _ := strconv.Atoi(args[3])
+func runJoinSyncgroupMulti(ctx *context.T, serviceName, sgNamePrefix string, numApps, numDbs int) error {
+	svc := syncbase.NewService(serviceName)
 
 	for i := 0; i < numApps; i++ {
 		appName := fmt.Sprintf("a%d", i)
@@ -1299,17 +1171,10 @@
 	}
 
 	return nil
-}, "runJoinSyncgroupMulti")
+}
 
-var runVerifySyncgroupDataMulti = modules.Register(func(env *modules.Env, args ...string) error {
-	ctx, shutdown := v23.Init()
-	defer shutdown()
-
-	svc := syncbase.NewService(args[0])
-	numApps, _ := strconv.Atoi(args[1])
-	numDbs, _ := strconv.Atoi(args[2])
-	numTbs, _ := strconv.Atoi(args[3])
-	prefixes := args[4:]
+func runVerifySyncgroupDataMulti(ctx *context.T, serviceName string, numApps, numDbs, numTbs int, prefixes ...string) error {
+	svc := syncbase.NewService(serviceName)
 
 	time.Sleep(20 * time.Second)
 
@@ -1345,4 +1210,15 @@
 	}
 
 	return nil
-}, "runVerifySyncgroupDataMulti")
+}
+
+func ok(t *testing.T, err error) {
+	if err != nil {
+		debug.PrintStack()
+		t.Fatal(err)
+	}
+}
+
+func TestMain(m *testing.M) {
+	os.Exit(v23test.Run(m.Run))
+}
diff --git a/syncbase/nosql/v23_test.go b/syncbase/nosql/v23_test.go
deleted file mode 100644
index 959eeba..0000000
--- a/syncbase/nosql/v23_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file was auto-generated via go generate.
-// DO NOT UPDATE MANUALLY
-
-package nosql_test
-
-import (
-	"os"
-	"testing"
-
-	"v.io/x/ref/test/modules"
-	"v.io/x/ref/test/v23tests"
-)
-
-func TestMain(m *testing.M) {
-	modules.DispatchAndExitIfChild()
-	cleanup := v23tests.UseSharedBinDir()
-	r := m.Run()
-	cleanup()
-	os.Exit(r)
-}
-
-func TestV23SyncbasedJoinSyncgroup(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedJoinSyncgroup)
-}
-
-func TestV23SyncbasedGetDeltas(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedGetDeltas)
-}
-
-func TestV23SyncbasedGetDeltasWithDel(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedGetDeltasWithDel)
-}
-
-func TestV23SyncbasedCompEval(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedCompEval)
-}
-
-func TestV23SyncbasedExchangeDeltasWithAcls(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedExchangeDeltasWithAcls)
-}
-
-func TestV23SyncbasedExchangeDeltasWithConflicts(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedExchangeDeltasWithConflicts)
-}
-
-func TestV23NestedSyncgroups(t *testing.T) {
-	v23tests.RunTest(t, V23TestNestedSyncgroups)
-}
-
-func TestV23NestedAndPeerSyncgroups(t *testing.T) {
-	v23tests.RunTest(t, V23TestNestedAndPeerSyncgroups)
-}
-
-func TestV23SyncbasedGetDeltasPrePopulate(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedGetDeltasPrePopulate)
-}
-
-func TestV23SyncbasedGetDeltasMultiApp(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncbasedGetDeltasMultiApp)
-}
-
-func TestV23SyncgroupSync(t *testing.T) {
-	v23tests.RunTest(t, V23TestSyncgroupSync)
-}
diff --git a/vdl/convert.go b/vdl/convert.go
index 6255594..d958257 100644
--- a/vdl/convert.go
+++ b/vdl/convert.go
@@ -939,6 +939,12 @@
 	return fmt.Errorf("invalid conversion from bytes to %v", c.tt)
 }
 
+// settable exists to avoid a call to reflect.Call() to invoke Set()
+// which results in an allocation
+type settable interface {
+	Set(string) error
+}
+
 func (c convTarget) fromString(src string) error {
 	if c.vv == nil {
 		tt := removeOptional(c.tt)
@@ -948,10 +954,9 @@
 			// that TypeFromReflect has already validated the Assign method, so we
 			// can call without error checking.
 			if c.rv.CanAddr() {
-				in := []reflect.Value{reflect.ValueOf(src)}
-				out := c.rv.Addr().MethodByName("Set").Call(in)
-				if ierr := out[0].Interface(); ierr != nil {
-					return ierr.(error)
+				err := c.rv.Addr().Interface().(settable).Set(src)
+				if err != nil {
+					return err
 				}
 				return nil
 			}
@@ -1204,7 +1209,7 @@
 			if tt.Kind() == Union {
 				// Special-case: the fill target is a union concrete field struct.  This
 				// means that we should only return a field if the field name matches.
-				name := c.rv.MethodByName("Name").Call(nil)[0].String()
+				name := c.rv.Interface().(nameable).Name()
 				if name != key.rv.String() {
 					return convTarget{}, ErrFieldNoExist
 				}
diff --git a/vdl/target.go b/vdl/target.go
index ca4c578..83c17aa 100644
--- a/vdl/target.go
+++ b/vdl/target.go
@@ -149,6 +149,18 @@
 	return result, nil
 }
 
+// stringable exists to avoid a call to reflect.Call() to invoke String()
+// which results in an allocation
+type stringable interface {
+	String() string
+}
+
+// nameable exists to avoid a call to reflect.Call() to invoke Name()
+// which results in an allocation
+type nameable interface {
+	Name() string
+}
+
 // FromReflect converts from rv to the target, by walking through rv and calling
 // the appropriate methods on the target.
 func FromReflect(target Target, rv reflect.Value) error {
@@ -236,11 +248,11 @@
 	// has already validated the methods, so we can call without error checking.
 	switch tt.Kind() {
 	case Enum:
-		label := rv.MethodByName("String").Call(nil)[0].String()
+		label := rv.Interface().(stringable).String()
 		return target.FromEnumLabel(label, ttFrom)
 	case Union:
 		// We're guaranteed rv is the concrete field struct.
-		name := rv.MethodByName("Name").Call(nil)[0].String()
+		name := rv.Interface().(nameable).Name()
 		fieldsTarget, err := target.StartFields(ttFrom)
 		if err != nil {
 			return err
diff --git a/vom/encoder.go b/vom/encoder.go
index adc89fd..abcf330 100644
--- a/vom/encoder.go
+++ b/vom/encoder.go
@@ -331,9 +331,10 @@
 }
 
 // Implementation of vdl.Target interface.
+var boolAllowed = []vdl.Kind{vdl.Bool}
 
 func (e *encoder) FromBool(src bool, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.Bool); err != nil {
+	if err := e.prepareType(tt, boolAllowed...); err != nil {
 		return err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -347,8 +348,10 @@
 	return nil
 }
 
+var uintAllowed = []vdl.Kind{vdl.Byte, vdl.Uint16, vdl.Uint32, vdl.Uint64}
+
 func (e *encoder) FromUint(src uint64, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.Byte, vdl.Uint16, vdl.Uint32, vdl.Uint64); err != nil {
+	if err := e.prepareType(tt, uintAllowed...); err != nil {
 		return err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -366,8 +369,10 @@
 	return nil
 }
 
+var intAllowed = []vdl.Kind{vdl.Int8, vdl.Int16, vdl.Int32, vdl.Int64}
+
 func (e *encoder) FromInt(src int64, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.Int8, vdl.Int16, vdl.Int32, vdl.Int64); err != nil {
+	if err := e.prepareType(tt, intAllowed...); err != nil {
 		return err
 	}
 	if e.version == Version80 && tt.Kind() == vdl.Int8 {
@@ -384,8 +389,10 @@
 	return nil
 }
 
+var floatAllowed = []vdl.Kind{vdl.Float32, vdl.Float64}
+
 func (e *encoder) FromFloat(src float64, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.Float32, vdl.Float64); err != nil {
+	if err := e.prepareType(tt, floatAllowed...); err != nil {
 		return err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -399,8 +406,10 @@
 	return nil
 }
 
+var complexAllowed = []vdl.Kind{vdl.Complex64, vdl.Complex128}
+
 func (e *encoder) FromComplex(src complex128, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.Complex64, vdl.Complex128); err != nil {
+	if err := e.prepareType(tt, complexAllowed...); err != nil {
 		return err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -451,8 +460,10 @@
 	return nil
 }
 
+var stringAllowed = []vdl.Kind{vdl.String}
+
 func (e *encoder) FromString(src string, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.String); err != nil {
+	if err := e.prepareType(tt, stringAllowed...); err != nil {
 		return err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -466,8 +477,10 @@
 	return nil
 }
 
+var enumAllowed = []vdl.Kind{vdl.Enum}
+
 func (e *encoder) FromEnumLabel(src string, tt *vdl.Type) error {
-	if err := e.prepareType(tt, vdl.Enum); err != nil {
+	if err := e.prepareType(tt, enumAllowed...); err != nil {
 		return err
 	}
 	index := tt.EnumIndex(src)
@@ -485,8 +498,10 @@
 	return nil
 }
 
+var typeObjectAllowed = []vdl.Kind{vdl.TypeObject}
+
 func (e *encoder) FromTypeObject(src *vdl.Type) error {
-	if err := e.prepareType(vdl.TypeObjectType, vdl.TypeObject); err != nil {
+	if err := e.prepareType(vdl.TypeObjectType, typeObjectAllowed...); err != nil {
 		return err
 	}
 	// Note that this function should never be called for wire types.
@@ -516,9 +531,11 @@
 	return nil
 }
 
+var nilAllowed = []vdl.Kind{vdl.Any, vdl.Optional}
+
 func (e *encoder) FromNil(tt *vdl.Type) error {
 	if !tt.CanBeNil() {
-		return errTypeMismatch(tt, vdl.Any, vdl.Optional)
+		return errTypeMismatch(tt, nilAllowed...)
 	}
 	if err := e.prepareTypeHelper(tt, true); err != nil {
 		return err
@@ -545,8 +562,10 @@
 	return nil
 }
 
+var listAllowed = []vdl.Kind{vdl.Array, vdl.List}
+
 func (e *encoder) StartList(tt *vdl.Type, len int) (vdl.ListTarget, error) {
-	if err := e.prepareType(tt, vdl.Array, vdl.List); err != nil {
+	if err := e.prepareType(tt, listAllowed...); err != nil {
 		return nil, err
 	}
 	if tt.Kind() == vdl.List {
@@ -573,8 +592,10 @@
 	return e, nil
 }
 
+var setAllowed = []vdl.Kind{vdl.Set}
+
 func (e *encoder) StartSet(tt *vdl.Type, len int) (vdl.SetTarget, error) {
-	if err := e.prepareType(tt, vdl.Set); err != nil {
+	if err := e.prepareType(tt, setAllowed...); err != nil {
 		return nil, err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -589,8 +610,10 @@
 	return e, nil
 }
 
+var mapAllowed = []vdl.Kind{vdl.Map}
+
 func (e *encoder) StartMap(tt *vdl.Type, len int) (vdl.MapTarget, error) {
-	if err := e.prepareType(tt, vdl.Map); err != nil {
+	if err := e.prepareType(tt, mapAllowed...); err != nil {
 		return nil, err
 	}
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
@@ -605,12 +628,14 @@
 	return e, nil
 }
 
+var fieldsAllowed = []vdl.Kind{vdl.Struct, vdl.Union}
+
 func (e *encoder) StartFields(tt *vdl.Type) (vdl.FieldsTarget, error) {
 	if e.isStructFieldValue() && e.topType().Kind() != vdl.Any {
 		// TODO(bprosnitz) We shouldn't need to write the struct field index for fields that are empty structs/unions
 		binaryEncodeUint(e.buf, uint64(e.topTypeFieldIndex()))
 	}
-	if err := e.prepareType(tt, vdl.Struct, vdl.Union); err != nil {
+	if err := e.prepareType(tt, fieldsAllowed...); err != nil {
 		return nil, err
 	}
 	e.pushType(tt)