syncbase: Enforce ACL spec on non-sync RPCs.

Non-sync RPCs now check permissions according to the Syncbase ACL
specification document, including recursively checking for Resolve
access.
Expanded tests to cover most updated RPCs.

MultiPart: 1/2
Change-Id: I67ffc43a1d745cfcef1e4205fc3a6cdd56c4a8f8
diff --git a/services/syncbase/service.vdl b/services/syncbase/service.vdl
index 45e5d10..c8a5248 100644
--- a/services/syncbase/service.vdl
+++ b/services/syncbase/service.vdl
@@ -19,7 +19,7 @@
 // Unless stated otherwise, each permissions tag requirement on a method also
 // implies requiring Resolve on all levels of hierarchy up to, but excluding,
 // the level requiring the tag.
-// TODO(ivanpi): Implemented on Exists, implement elsewhere.
+// TODO(ivanpi): Implement on SyncgroupManager methods.
 // ErrNoAccess, Err[No]Exist, ErrUnknownBatch are only returned if the caller
 // is allowed to call Exists on the receiver of the RPC (or the first missing
 // component of the hierarchy to the receiver); otherwise, the returned error
@@ -51,36 +51,48 @@
 // RPC methods if the --dev flag is not set.
 
 // Service represents a Vanadium Syncbase service.
-// Service.Glob operates over Database ids.
+// Service.Glob operates over Database ids, requiring Read on Service.
 type Service interface {
 	// DevModeUpdateVClock updates various bits of Syncbase virtual clock and
 	// clock daemon state based on the specified options.
-	// Requires --dev flag to be set (in addition to Admin check).
+	//
+	// Requires: Admin on Service.
+	// Also requires --dev flag to be set.
 	DevModeUpdateVClock(uco DevModeUpdateVClockOpts) error {access.Admin}
 
 	// DevModeGetTime returns the current time per the Syncbase clock.
-	// Requires --dev flag to be set (in addition to Admin check).
+	//
+	// Requires: Admin on Service.
+	// Also requires --dev flag to be set.
 	DevModeGetTime() (time.Time | error) {access.Admin}
 
 	// SetPermissions and GetPermissions are included from the Object interface.
 	// Permissions must include at least one admin.
+	//
+	// Requires: Admin on Service.
 	permissions.Object
 }
 
 // Database represents a set of Collections. Batches, queries, syncgroups, and
 // watch all operate at the Database level.
-// Database.Glob operates over Collection ids.
+// Database.Glob operates over Collection ids, requiring Read on Database.
 type Database interface {
 	// Create creates this Database. Permissions must be non-nil and include at
 	// least one admin.
-	// Create requires the caller to have Write permission at the Service.
+	//
+	// Requires: Write on Service.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Database's id. This requirement is waived for Admin on Service.
 	Create(metadata ?SchemaMetadata, perms access.Permissions) error {access.Write}
 
 	// Destroy destroys this Database, permanently removing all of its data.
 	// TODO(sadovsky): Specify what happens to syncgroups.
-	Destroy() error {access.Write}
+	//
+	// Requires: Admin on Database or Service.
+	Destroy() error {access.Admin}
 
 	// Exists returns true only if this Database exists.
+	//
 	// Requires: at least one tag on Database, or Read or Write on Service.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	Exists() (bool | error)
@@ -92,17 +104,20 @@
 	// visible in a new snapshot of the Database, ignoring user batches.
 	// (Note that the same issue is present in glob on Collection, where Scan can
 	// be used instead if batch awareness is required.)
-	// Note, the glob client library checks Resolve access on every component
-	// along the path (by doing a Dispatcher.Lookup), whereas this doesn't happen
-	// for other RPCs.
-	// TODO(ivanpi): Resolve should be checked on all RPCs.
 	// TODO(sadovsky): Maybe switch to streaming RPC.
+	//
+	// Requires: Read on Database.
 	ListCollections(bh BatchHandle) ([]Id | error) {access.Read}
 
 	// Exec executes a syncQL query with positional parameters and returns all
 	// results as specified by the query's select/delete statement.
 	// Concurrency semantics are documented in model.go.
-	Exec(bh BatchHandle, query string, params []any) stream<_, []any> error {access.Read}
+	//
+	// Requires: Read and/or Write on Collection, depending on the query:
+	// - Read for select
+	// - Read and Write for delete
+	// TODO(ivanpi): Write should suffice for delete without v in WHERE clause.
+	Exec(bh BatchHandle, query string, params []any) stream<_, []any> error
 
 	// BeginBatch creates a new batch. It returns a batch handle to pass in when
 	// calling batch-aware RPCs.
@@ -112,30 +127,48 @@
 	// a batch. Note that glob RPCs are not batch-aware.
 	// TODO(sadovsky): Maybe make BatchOptions optional. Also, rename 'bo' to
 	// 'opts' once v.io/i/912 is resolved for Java.
-	BeginBatch(bo BatchOptions) (BatchHandle | error) {access.Read}
+	//
+	// Requires: at least one tag on Database.
+	BeginBatch(bo BatchOptions) (BatchHandle | error)
 
 	// Commit persists the pending changes to the database.
 	// If the batch is readonly, Commit() will fail with ErrReadOnlyBatch; Abort()
 	// should be used instead.
 	// If the BatchHandle is empty, Commit() will fail with ErrNotBoundToBatch.
-	Commit(bh BatchHandle) error {access.Read}
+	//
+	// Requires: at least one tag on Database.
+	// Also verifies that any changes to data and ACLs are allowed for the caller,
+	// since the batch is signed by the committer. Since only the final value for
+	// each key is committed and synced, changes to data need to be allowed by
+	// the ACL before or after the batch. Specifically, adding Write permission,
+	// changing a value based on it, then removing Write permission within a batch
+	// is not allowed because it cannot be verified by remote peers.
+	Commit(bh BatchHandle) error
 
 	// Abort notifies the server that any pending changes can be discarded.
 	// It is not strictly required, but it may allow the server to release locks
 	// or other resources sooner than if it was not called.
 	// If the BatchHandle is empty, Abort() will fail with ErrNotBoundToBatch.
-	Abort(bh BatchHandle) error {access.Read}
+	//
+	// Requires: at least one tag on Database.
+	Abort(bh BatchHandle) error
 
 	// PauseSync pauses sync for this database. Incoming sync, as well as outgoing
 	// sync of subsequent writes, will be disabled until ResumeSync is called.
 	// PauseSync is idempotent.
-	PauseSync() error {access.Write}
+	//
+	// Requires: Admin on Database.
+	PauseSync() error {access.Admin}
 
 	// ResumeSync resumes sync for this database. ResumeSync is idempotent.
-	ResumeSync() error {access.Write}
+	//
+	// Requires: Admin on Database.
+	ResumeSync() error {access.Admin}
 
 	// SetPermissions and GetPermissions are included from the Object interface.
 	// Permissions must include at least one admin.
+	//
+	// Requires: Admin on Database.
 	permissions.Object
 
 	// DatabaseWatcher implements the API to watch for updates in the database.
@@ -159,17 +192,25 @@
 }
 
 // Collection represents a set of Rows.
-// Collection.Glob operates over keys of Rows in the Collection.
+// Collection.Glob operates over keys of Rows in the Collection, requiring Read
+// on Collection.
 type Collection interface {
 	// Create creates this Collection. Permissions must be non-nil and include at
 	// least one admin.
+	//
+	// Requires: Write on Database.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Collection's id.
 	Create(bh BatchHandle, perms access.Permissions) error {access.Write}
 
 	// Destroy destroys this Collection, permanently removing all of its data.
-	// TODO(sadovsky): Specify what happens to syncgroups.
-	Destroy(bh BatchHandle) error {access.Write}
+	//
+	// Requires: Admin on Collection or on Database.
+	// TODO(ivanpi): Prevent for synced Collections.
+	Destroy(bh BatchHandle) error {access.Admin}
 
 	// Exists returns true only if this Collection exists.
+	//
 	// Requires: at least one tag on Collection, or Read or Write on Database.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Database does not exist, returned value is identical to
@@ -177,14 +218,20 @@
 	Exists(bh BatchHandle) (bool | error)
 
 	// GetPermissions returns the current Permissions for the Collection.
+	//
+	// Requires: Admin on Collection.
 	GetPermissions(bh BatchHandle) (access.Permissions | error) {access.Admin}
 
 	// SetPermissions replaces the current Permissions for the Collection.
 	// Permissions must include at least one admin.
+	//
+	// Requires: Admin on Collection.
 	SetPermissions(bh BatchHandle, perms access.Permissions) error {access.Admin}
 
 	// DeleteRange deletes all rows in the given half-open range [start, limit).
 	// If limit is "", all rows with keys >= start are included.
+	//
+	// Requires: Write on Collection.
 	DeleteRange(bh BatchHandle, start, limit []byte) error {access.Write}
 
 	// Scan returns all rows in the given half-open range [start, limit). If limit
@@ -192,6 +239,8 @@
 	// Concurrency semantics are documented in model.go.
 	// Note, we use []byte rather than string for start and limit because they
 	// need not be valid UTF-8; VDL expects strings to be valid UTF-8.
+	//
+	// Requires: Read on Collection.
 	Scan(bh BatchHandle, start, limit []byte) stream<_, KeyValue> error {access.Read}
 }
 
@@ -199,6 +248,7 @@
 // All access checks are performed against the Collection ACL.
 type Row interface {
 	// Exists returns true only if this Row exists.
+	//
 	// Requires: Read or Write on Collection.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Collection does not exist, returned value is identical to
@@ -209,12 +259,18 @@
 	Exists(bh BatchHandle) (bool | error)
 
 	// Get returns the value for this Row.
+	//
+	// Requires: Read on Collection.
 	Get(bh BatchHandle) (any | error) {access.Read}
 
 	// Put writes the given value for this Row.
+	//
+	// Requires: Write on Collection.
 	Put(bh BatchHandle, value any) error {access.Write}
 
 	// Delete deletes this Row.
+	//
+	// Requires: Write on Collection.
 	Delete(bh BatchHandle) error {access.Write}
 }
 
@@ -291,13 +347,13 @@
 type SchemaManager interface {
 	// GetSchemaMetadata retrieves schema metadata for this database.
 	//
-	// Requires: Client must have at least Read access on the Database.
+	// Requires: Read on Database.
 	GetSchemaMetadata() (SchemaMetadata | error) {access.Read}
 
 	// SetSchemaMetadata stores schema metadata for this database.
 	//
-	// Requires: Client must have at least Write access on the Database.
-	SetSchemaMetadata(metadata SchemaMetadata) error {access.Write}
+	// Requires: Admin on Database.
+	SetSchemaMetadata(metadata SchemaMetadata) error {access.Admin}
 }
 
 // ConflictManager interface provides all the methods necessary to handle
@@ -321,7 +377,9 @@
 	// the batch sent for conflict resolution will be {key1, key2, key3}.
 	// If there was another concurrent batch {key2, key4}, then the batch sent
 	// for conflict resolution will be {key1, key2, key3, key4}.
-	StartConflictResolver() stream<ResolutionInfo, ConflictInfo> error {access.Write}
+	//
+	// Requires: Admin on Database.
+	StartConflictResolver() stream<ResolutionInfo, ConflictInfo> error {access.Admin}
 }
 
 // BlobManager is the interface for blob operations.
@@ -336,40 +394,60 @@
 //   after commit.
 type BlobManager interface {
 	// CreateBlob returns a BlobRef for a newly created blob.
+	//
+	// Requires: Write on Database.
 	CreateBlob() (br BlobRef | error) {access.Write}
 
 	// PutBlob appends the byte stream to the blob.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PutBlob(br BlobRef) stream<[]byte, _> error {access.Write}
 
 	// CommitBlob marks the blob as immutable.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	CommitBlob(br BlobRef) error {access.Write}
 
 	// GetBlobSize returns the count of bytes written as part of the blob
 	// (committed or uncommitted).
-	GetBlobSize(br BlobRef) (int64 | error) {access.Read}
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
+	GetBlobSize(br BlobRef) (int64 | error)
 
 	// DeleteBlob locally deletes the blob (committed or uncommitted).
-	DeleteBlob(br BlobRef) error {access.Write}
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
+	DeleteBlob(br BlobRef) error
 
 	// GetBlob returns the byte stream from a committed blob starting at offset.
-	GetBlob(br BlobRef, offset int64) stream<_, []byte> error {access.Read}
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
+	GetBlob(br BlobRef, offset int64) stream<_, []byte> error
 
 	// FetchBlob initiates fetching a blob if not locally found. priority
 	// controls the network priority of the blob. Higher priority blobs are
 	// fetched before the lower priority ones. However, an ongoing blob
 	// transfer is not interrupted. Status updates are streamed back to the
 	// client as fetch is in progress.
-	FetchBlob(br BlobRef, priority uint64) stream<_, BlobFetchStatus> error {access.Read}
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
+	FetchBlob(br BlobRef, priority uint64) stream<_, BlobFetchStatus> error
 
 	// PinBlob locally pins the blob so that it is not evicted.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PinBlob(br BlobRef) error {access.Write}
 
 	// UnpinBlob locally unpins the blob so that it can be evicted if needed.
-	UnpinBlob(br BlobRef) error {access.Write}
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
+	UnpinBlob(br BlobRef) error
 
 	// KeepBlob locally caches the blob with the specified rank. Lower
 	// ranked blobs are more eagerly evicted.
-	KeepBlob(br BlobRef, rank uint64) error {access.Write}
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
+	KeepBlob(br BlobRef, rank uint64) error
 
 	// TODO(hpucha): Clarify how to pick priority and rank. Add API for
 	// efficient blob cloning. Options include: (1) CloneBlob RPC with an
@@ -405,7 +483,9 @@
 // - "" for the initial root entity update
 // The Value field is a StoreChange.
 // If the client has no access to a row specified in a change, that change is
-// excluded from the result stream.
+// excluded from the result stream. Collection updates are always sent and can
+// be used to determine that access to a collection is denied, potentially
+// skipping rows.
 //
 // Note: A single Watch Change batch may contain changes from more than one
 // batch as originally committed on a remote Syncbase or obtained from conflict
@@ -414,10 +494,14 @@
 type DatabaseWatcher interface {
 	// GetResumeMarker returns the ResumeMarker that points to the current end
 	// of the event log. GetResumeMarker() can be called on a batch.
-	GetResumeMarker(bh BatchHandle) (watch.ResumeMarker | error) {access.Read}
+	//
+	// Requires: at least one tag on Database.
+	GetResumeMarker(bh BatchHandle) (watch.ResumeMarker | error)
 
 	// WatchPatterns returns a stream of changes that match any of the specified
 	// patterns. At least one pattern must be specified.
+	//
+	// Requires: Read on Database.
 	WatchPatterns(resumeMarker watch.ResumeMarker, patterns []CollectionRowPattern) stream<_, watch.Change> error {access.Read}
 
 	watch.GlobWatcher
diff --git a/services/syncbase/syncbase.vdl.go b/services/syncbase/syncbase.vdl.go
index f0ff63a..e6925f7 100644
--- a/services/syncbase/syncbase.vdl.go
+++ b/services/syncbase/syncbase.vdl.go
@@ -22,7 +22,7 @@
 // Unless stated otherwise, each permissions tag requirement on a method also
 // implies requiring Resolve on all levels of hierarchy up to, but excluding,
 // the level requiring the tag.
-// TODO(ivanpi): Implemented on Exists, implement elsewhere.
+// TODO(ivanpi): Implement on SyncgroupManager methods.
 // ErrNoAccess, Err[No]Exist, ErrUnknownBatch are only returned if the caller
 // is allowed to call Exists on the receiver of the RPC (or the first missing
 // component of the hierarchy to the receiver); otherwise, the returned error
@@ -3227,7 +3227,7 @@
 // containing Service methods.
 //
 // Service represents a Vanadium Syncbase service.
-// Service.Glob operates over Database ids.
+// Service.Glob operates over Database ids, requiring Read on Service.
 type ServiceClientMethods interface {
 	// Object provides access control for Vanadium objects.
 	//
@@ -3276,10 +3276,14 @@
 	permissions.ObjectClientMethods
 	// DevModeUpdateVClock updates various bits of Syncbase virtual clock and
 	// clock daemon state based on the specified options.
-	// Requires --dev flag to be set (in addition to Admin check).
+	//
+	// Requires: Admin on Service.
+	// Also requires --dev flag to be set.
 	DevModeUpdateVClock(_ *context.T, uco DevModeUpdateVClockOpts, _ ...rpc.CallOpt) error
 	// DevModeGetTime returns the current time per the Syncbase clock.
-	// Requires --dev flag to be set (in addition to Admin check).
+	//
+	// Requires: Admin on Service.
+	// Also requires --dev flag to be set.
 	DevModeGetTime(*context.T, ...rpc.CallOpt) (time.Time, error)
 }
 
@@ -3314,7 +3318,7 @@
 // implements for Service.
 //
 // Service represents a Vanadium Syncbase service.
-// Service.Glob operates over Database ids.
+// Service.Glob operates over Database ids, requiring Read on Service.
 type ServiceServerMethods interface {
 	// Object provides access control for Vanadium objects.
 	//
@@ -3363,10 +3367,14 @@
 	permissions.ObjectServerMethods
 	// DevModeUpdateVClock updates various bits of Syncbase virtual clock and
 	// clock daemon state based on the specified options.
-	// Requires --dev flag to be set (in addition to Admin check).
+	//
+	// Requires: Admin on Service.
+	// Also requires --dev flag to be set.
 	DevModeUpdateVClock(_ *context.T, _ rpc.ServerCall, uco DevModeUpdateVClockOpts) error
 	// DevModeGetTime returns the current time per the Syncbase clock.
-	// Requires --dev flag to be set (in addition to Admin check).
+	//
+	// Requires: Admin on Service.
+	// Also requires --dev flag to be set.
 	DevModeGetTime(*context.T, rpc.ServerCall) (time.Time, error)
 }
 
@@ -3430,14 +3438,14 @@
 var descService = rpc.InterfaceDesc{
 	Name:    "Service",
 	PkgPath: "v.io/v23/services/syncbase",
-	Doc:     "// Service represents a Vanadium Syncbase service.\n// Service.Glob operates over Database ids.",
+	Doc:     "// Service represents a Vanadium Syncbase service.\n// Service.Glob operates over Database ids, requiring Read on Service.",
 	Embeds: []rpc.EmbedDesc{
 		{"Object", "v.io/v23/services/permissions", "// Object provides access control for Vanadium objects.\n//\n// Vanadium services implementing dynamic access control would typically embed\n// this interface and tag additional methods defined by the service with one of\n// Admin, Read, Write, Resolve etc. For example, the VDL definition of the\n// object would be:\n//\n//   package mypackage\n//\n//   import \"v.io/v23/security/access\"\n//   import \"v.io/v23/services/permissions\"\n//\n//   type MyObject interface {\n//     permissions.Object\n//     MyRead() (string, error) {access.Read}\n//     MyWrite(string) error    {access.Write}\n//   }\n//\n// If the set of pre-defined tags is insufficient, services may define their\n// own tag type and annotate all methods with this new type.\n//\n// Instead of embedding this Object interface, define SetPermissions and\n// GetPermissions in their own interface. Authorization policies will typically\n// respect annotations of a single type. For example, the VDL definition of an\n// object would be:\n//\n//  package mypackage\n//\n//  import \"v.io/v23/security/access\"\n//\n//  type MyTag string\n//\n//  const (\n//    Blue = MyTag(\"Blue\")\n//    Red  = MyTag(\"Red\")\n//  )\n//\n//  type MyObject interface {\n//    MyMethod() (string, error) {Blue}\n//\n//    // Allow clients to change access via the access.Object interface:\n//    SetPermissions(perms access.Permissions, version string) error         {Red}\n//    GetPermissions() (perms access.Permissions, version string, err error) {Blue}\n//  }"},
 	},
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "DevModeUpdateVClock",
-			Doc:  "// DevModeUpdateVClock updates various bits of Syncbase virtual clock and\n// clock daemon state based on the specified options.\n// Requires --dev flag to be set (in addition to Admin check).",
+			Doc:  "// DevModeUpdateVClock updates various bits of Syncbase virtual clock and\n// clock daemon state based on the specified options.\n//\n// Requires: Admin on Service.\n// Also requires --dev flag to be set.",
 			InArgs: []rpc.ArgDesc{
 				{"uco", ``}, // DevModeUpdateVClockOpts
 			},
@@ -3445,7 +3453,7 @@
 		},
 		{
 			Name: "DevModeGetTime",
-			Doc:  "// DevModeGetTime returns the current time per the Syncbase clock.\n// Requires --dev flag to be set (in addition to Admin check).",
+			Doc:  "// DevModeGetTime returns the current time per the Syncbase clock.\n//\n// Requires: Admin on Service.\n// Also requires --dev flag to be set.",
 			OutArgs: []rpc.ArgDesc{
 				{"", ``}, // time.Time
 			},
@@ -3479,7 +3487,9 @@
 // - "" for the initial root entity update
 // The Value field is a StoreChange.
 // If the client has no access to a row specified in a change, that change is
-// excluded from the result stream.
+// excluded from the result stream. Collection updates are always sent and can
+// be used to determine that access to a collection is denied, potentially
+// skipping rows.
 //
 // Note: A single Watch Change batch may contain changes from more than one
 // batch as originally committed on a remote Syncbase or obtained from conflict
@@ -3491,9 +3501,13 @@
 	watch.GlobWatcherClientMethods
 	// GetResumeMarker returns the ResumeMarker that points to the current end
 	// of the event log. GetResumeMarker() can be called on a batch.
+	//
+	// Requires: at least one tag on Database.
 	GetResumeMarker(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) (watch.ResumeMarker, error)
 	// WatchPatterns returns a stream of changes that match any of the specified
 	// patterns. At least one pattern must be specified.
+	//
+	// Requires: Read on Database.
 	WatchPatterns(_ *context.T, resumeMarker watch.ResumeMarker, patterns []CollectionRowPattern, _ ...rpc.CallOpt) (DatabaseWatcherWatchPatternsClientCall, error)
 }
 
@@ -3622,7 +3636,9 @@
 // - "" for the initial root entity update
 // The Value field is a StoreChange.
 // If the client has no access to a row specified in a change, that change is
-// excluded from the result stream.
+// excluded from the result stream. Collection updates are always sent and can
+// be used to determine that access to a collection is denied, potentially
+// skipping rows.
 //
 // Note: A single Watch Change batch may contain changes from more than one
 // batch as originally committed on a remote Syncbase or obtained from conflict
@@ -3634,9 +3650,13 @@
 	watch.GlobWatcherServerMethods
 	// GetResumeMarker returns the ResumeMarker that points to the current end
 	// of the event log. GetResumeMarker() can be called on a batch.
+	//
+	// Requires: at least one tag on Database.
 	GetResumeMarker(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (watch.ResumeMarker, error)
 	// WatchPatterns returns a stream of changes that match any of the specified
 	// patterns. At least one pattern must be specified.
+	//
+	// Requires: Read on Database.
 	WatchPatterns(_ *context.T, _ DatabaseWatcherWatchPatternsServerCall, resumeMarker watch.ResumeMarker, patterns []CollectionRowPattern) error
 }
 
@@ -3650,9 +3670,13 @@
 	watch.GlobWatcherServerStubMethods
 	// GetResumeMarker returns the ResumeMarker that points to the current end
 	// of the event log. GetResumeMarker() can be called on a batch.
+	//
+	// Requires: at least one tag on Database.
 	GetResumeMarker(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (watch.ResumeMarker, error)
 	// WatchPatterns returns a stream of changes that match any of the specified
 	// patterns. At least one pattern must be specified.
+	//
+	// Requires: Read on Database.
 	WatchPatterns(_ *context.T, _ *DatabaseWatcherWatchPatternsServerCallStub, resumeMarker watch.ResumeMarker, patterns []CollectionRowPattern) error
 }
 
@@ -3710,25 +3734,24 @@
 var descDatabaseWatcher = rpc.InterfaceDesc{
 	Name:    "DatabaseWatcher",
 	PkgPath: "v.io/v23/services/syncbase",
-	Doc:     "// DatabaseWatcher allows a client to watch for updates to the database. For\n// each watch request, the client will receive a reliable stream of watch events\n// without re-ordering. Only rows and collections matching at least one of the\n// patterns are returned. Rows in collections with no Read access are also\n// filtered out.\n//\n// Watching is done by starting a streaming RPC. The RPC takes a ResumeMarker\n// argument that points to a particular place in the database event log. If an\n// empty ResumeMarker is provided, the WatchStream will begin with a Change\n// batch containing the initial state, always starting with an empty update for\n// the root entity. Otherwise, the WatchStream will contain only changes since\n// the provided ResumeMarker.\n// See watch.GlobWatcher for a detailed explanation of the behavior.\n//\n// The result stream consists of a never-ending sequence of Change messages\n// (until the call fails or is canceled). Each Change contains the Name field\n// with the Vanadium name of the watched entity relative to the database:\n// - \"<encCxId>/<rowKey>\" for row updates\n// - \"<encCxId>\" for collection updates\n// - \"\" for the initial root entity update\n// The Value field is a StoreChange.\n// If the client has no access to a row specified in a change, that change is\n// excluded from the result stream.\n//\n// Note: A single Watch Change batch may contain changes from more than one\n// batch as originally committed on a remote Syncbase or obtained from conflict\n// resolution. However, changes from a single original batch will always appear\n// in the same Change batch.",
+	Doc:     "// DatabaseWatcher allows a client to watch for updates to the database. For\n// each watch request, the client will receive a reliable stream of watch events\n// without re-ordering. Only rows and collections matching at least one of the\n// patterns are returned. Rows in collections with no Read access are also\n// filtered out.\n//\n// Watching is done by starting a streaming RPC. The RPC takes a ResumeMarker\n// argument that points to a particular place in the database event log. If an\n// empty ResumeMarker is provided, the WatchStream will begin with a Change\n// batch containing the initial state, always starting with an empty update for\n// the root entity. Otherwise, the WatchStream will contain only changes since\n// the provided ResumeMarker.\n// See watch.GlobWatcher for a detailed explanation of the behavior.\n//\n// The result stream consists of a never-ending sequence of Change messages\n// (until the call fails or is canceled). Each Change contains the Name field\n// with the Vanadium name of the watched entity relative to the database:\n// - \"<encCxId>/<rowKey>\" for row updates\n// - \"<encCxId>\" for collection updates\n// - \"\" for the initial root entity update\n// The Value field is a StoreChange.\n// If the client has no access to a row specified in a change, that change is\n// excluded from the result stream. Collection updates are always sent and can\n// be used to determine that access to a collection is denied, potentially\n// skipping rows.\n//\n// Note: A single Watch Change batch may contain changes from more than one\n// batch as originally committed on a remote Syncbase or obtained from conflict\n// resolution. However, changes from a single original batch will always appear\n// in the same Change batch.",
 	Embeds: []rpc.EmbedDesc{
 		{"GlobWatcher", "v.io/v23/services/watch", "// GlobWatcher allows a client to receive updates for changes to objects\n// that match a pattern.  See the package comments for details."},
 	},
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "GetResumeMarker",
-			Doc:  "// GetResumeMarker returns the ResumeMarker that points to the current end\n// of the event log. GetResumeMarker() can be called on a batch.",
+			Doc:  "// GetResumeMarker returns the ResumeMarker that points to the current end\n// of the event log. GetResumeMarker() can be called on a batch.\n//\n// Requires: at least one tag on Database.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
 			OutArgs: []rpc.ArgDesc{
 				{"", ``}, // watch.ResumeMarker
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "WatchPatterns",
-			Doc:  "// WatchPatterns returns a stream of changes that match any of the specified\n// patterns. At least one pattern must be specified.",
+			Doc:  "// WatchPatterns returns a stream of changes that match any of the specified\n// patterns. At least one pattern must be specified.\n//\n// Requires: Read on Database.",
 			InArgs: []rpc.ArgDesc{
 				{"resumeMarker", ``}, // watch.ResumeMarker
 				{"patterns", ``},     // []CollectionRowPattern
@@ -4153,30 +4176,50 @@
 //   after commit.
 type BlobManagerClientMethods interface {
 	// CreateBlob returns a BlobRef for a newly created blob.
+	//
+	// Requires: Write on Database.
 	CreateBlob(*context.T, ...rpc.CallOpt) (br BlobRef, _ error)
 	// PutBlob appends the byte stream to the blob.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PutBlob(_ *context.T, br BlobRef, _ ...rpc.CallOpt) (BlobManagerPutBlobClientCall, error)
 	// CommitBlob marks the blob as immutable.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	CommitBlob(_ *context.T, br BlobRef, _ ...rpc.CallOpt) error
 	// GetBlobSize returns the count of bytes written as part of the blob
 	// (committed or uncommitted).
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	GetBlobSize(_ *context.T, br BlobRef, _ ...rpc.CallOpt) (int64, error)
 	// DeleteBlob locally deletes the blob (committed or uncommitted).
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	DeleteBlob(_ *context.T, br BlobRef, _ ...rpc.CallOpt) error
 	// GetBlob returns the byte stream from a committed blob starting at offset.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	GetBlob(_ *context.T, br BlobRef, offset int64, _ ...rpc.CallOpt) (BlobManagerGetBlobClientCall, error)
 	// FetchBlob initiates fetching a blob if not locally found. priority
 	// controls the network priority of the blob. Higher priority blobs are
 	// fetched before the lower priority ones. However, an ongoing blob
 	// transfer is not interrupted. Status updates are streamed back to the
 	// client as fetch is in progress.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	FetchBlob(_ *context.T, br BlobRef, priority uint64, _ ...rpc.CallOpt) (BlobManagerFetchBlobClientCall, error)
 	// PinBlob locally pins the blob so that it is not evicted.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PinBlob(_ *context.T, br BlobRef, _ ...rpc.CallOpt) error
 	// UnpinBlob locally unpins the blob so that it can be evicted if needed.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	UnpinBlob(_ *context.T, br BlobRef, _ ...rpc.CallOpt) error
 	// KeepBlob locally caches the blob with the specified rank. Lower
 	// ranked blobs are more eagerly evicted.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	KeepBlob(_ *context.T, br BlobRef, rank uint64, _ ...rpc.CallOpt) error
 }
 
@@ -4472,30 +4515,50 @@
 //   after commit.
 type BlobManagerServerMethods interface {
 	// CreateBlob returns a BlobRef for a newly created blob.
+	//
+	// Requires: Write on Database.
 	CreateBlob(*context.T, rpc.ServerCall) (br BlobRef, _ error)
 	// PutBlob appends the byte stream to the blob.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PutBlob(_ *context.T, _ BlobManagerPutBlobServerCall, br BlobRef) error
 	// CommitBlob marks the blob as immutable.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	CommitBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// GetBlobSize returns the count of bytes written as part of the blob
 	// (committed or uncommitted).
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	GetBlobSize(_ *context.T, _ rpc.ServerCall, br BlobRef) (int64, error)
 	// DeleteBlob locally deletes the blob (committed or uncommitted).
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	DeleteBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// GetBlob returns the byte stream from a committed blob starting at offset.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	GetBlob(_ *context.T, _ BlobManagerGetBlobServerCall, br BlobRef, offset int64) error
 	// FetchBlob initiates fetching a blob if not locally found. priority
 	// controls the network priority of the blob. Higher priority blobs are
 	// fetched before the lower priority ones. However, an ongoing blob
 	// transfer is not interrupted. Status updates are streamed back to the
 	// client as fetch is in progress.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	FetchBlob(_ *context.T, _ BlobManagerFetchBlobServerCall, br BlobRef, priority uint64) error
 	// PinBlob locally pins the blob so that it is not evicted.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PinBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// UnpinBlob locally unpins the blob so that it can be evicted if needed.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	UnpinBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// KeepBlob locally caches the blob with the specified rank. Lower
 	// ranked blobs are more eagerly evicted.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	KeepBlob(_ *context.T, _ rpc.ServerCall, br BlobRef, rank uint64) error
 }
 
@@ -4505,30 +4568,50 @@
 // is the streaming methods.
 type BlobManagerServerStubMethods interface {
 	// CreateBlob returns a BlobRef for a newly created blob.
+	//
+	// Requires: Write on Database.
 	CreateBlob(*context.T, rpc.ServerCall) (br BlobRef, _ error)
 	// PutBlob appends the byte stream to the blob.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PutBlob(_ *context.T, _ *BlobManagerPutBlobServerCallStub, br BlobRef) error
 	// CommitBlob marks the blob as immutable.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	CommitBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// GetBlobSize returns the count of bytes written as part of the blob
 	// (committed or uncommitted).
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	GetBlobSize(_ *context.T, _ rpc.ServerCall, br BlobRef) (int64, error)
 	// DeleteBlob locally deletes the blob (committed or uncommitted).
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	DeleteBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// GetBlob returns the byte stream from a committed blob starting at offset.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	GetBlob(_ *context.T, _ *BlobManagerGetBlobServerCallStub, br BlobRef, offset int64) error
 	// FetchBlob initiates fetching a blob if not locally found. priority
 	// controls the network priority of the blob. Higher priority blobs are
 	// fetched before the lower priority ones. However, an ongoing blob
 	// transfer is not interrupted. Status updates are streamed back to the
 	// client as fetch is in progress.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	FetchBlob(_ *context.T, _ *BlobManagerFetchBlobServerCallStub, br BlobRef, priority uint64) error
 	// PinBlob locally pins the blob so that it is not evicted.
+	//
+	// Requires: Write on Database and valid BlobRef.
 	PinBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// UnpinBlob locally unpins the blob so that it can be evicted if needed.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	UnpinBlob(_ *context.T, _ rpc.ServerCall, br BlobRef) error
 	// KeepBlob locally caches the blob with the specified rank. Lower
 	// ranked blobs are more eagerly evicted.
+	//
+	// Requires: at least one tag on Database and valid BlobRef.
 	KeepBlob(_ *context.T, _ rpc.ServerCall, br BlobRef, rank uint64) error
 }
 
@@ -4620,7 +4703,7 @@
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "CreateBlob",
-			Doc:  "// CreateBlob returns a BlobRef for a newly created blob.",
+			Doc:  "// CreateBlob returns a BlobRef for a newly created blob.\n//\n// Requires: Write on Database.",
 			OutArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
@@ -4628,7 +4711,7 @@
 		},
 		{
 			Name: "PutBlob",
-			Doc:  "// PutBlob appends the byte stream to the blob.",
+			Doc:  "// PutBlob appends the byte stream to the blob.\n//\n// Requires: Write on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
@@ -4636,7 +4719,7 @@
 		},
 		{
 			Name: "CommitBlob",
-			Doc:  "// CommitBlob marks the blob as immutable.",
+			Doc:  "// CommitBlob marks the blob as immutable.\n//\n// Requires: Write on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
@@ -4644,44 +4727,40 @@
 		},
 		{
 			Name: "GetBlobSize",
-			Doc:  "// GetBlobSize returns the count of bytes written as part of the blob\n// (committed or uncommitted).",
+			Doc:  "// GetBlobSize returns the count of bytes written as part of the blob\n// (committed or uncommitted).\n//\n// Requires: at least one tag on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
 			OutArgs: []rpc.ArgDesc{
 				{"", ``}, // int64
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "DeleteBlob",
-			Doc:  "// DeleteBlob locally deletes the blob (committed or uncommitted).",
+			Doc:  "// DeleteBlob locally deletes the blob (committed or uncommitted).\n//\n// Requires: at least one tag on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
 		},
 		{
 			Name: "GetBlob",
-			Doc:  "// GetBlob returns the byte stream from a committed blob starting at offset.",
+			Doc:  "// GetBlob returns the byte stream from a committed blob starting at offset.\n//\n// Requires: at least one tag on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``},     // BlobRef
 				{"offset", ``}, // int64
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "FetchBlob",
-			Doc:  "// FetchBlob initiates fetching a blob if not locally found. priority\n// controls the network priority of the blob. Higher priority blobs are\n// fetched before the lower priority ones. However, an ongoing blob\n// transfer is not interrupted. Status updates are streamed back to the\n// client as fetch is in progress.",
+			Doc:  "// FetchBlob initiates fetching a blob if not locally found. priority\n// controls the network priority of the blob. Higher priority blobs are\n// fetched before the lower priority ones. However, an ongoing blob\n// transfer is not interrupted. Status updates are streamed back to the\n// client as fetch is in progress.\n//\n// Requires: at least one tag on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``},       // BlobRef
 				{"priority", ``}, // uint64
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "PinBlob",
-			Doc:  "// PinBlob locally pins the blob so that it is not evicted.",
+			Doc:  "// PinBlob locally pins the blob so that it is not evicted.\n//\n// Requires: Write on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
@@ -4689,20 +4768,18 @@
 		},
 		{
 			Name: "UnpinBlob",
-			Doc:  "// UnpinBlob locally unpins the blob so that it can be evicted if needed.",
+			Doc:  "// UnpinBlob locally unpins the blob so that it can be evicted if needed.\n//\n// Requires: at least one tag on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``}, // BlobRef
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
 		},
 		{
 			Name: "KeepBlob",
-			Doc:  "// KeepBlob locally caches the blob with the specified rank. Lower\n// ranked blobs are more eagerly evicted.",
+			Doc:  "// KeepBlob locally caches the blob with the specified rank. Lower\n// ranked blobs are more eagerly evicted.\n//\n// Requires: at least one tag on Database and valid BlobRef.",
 			InArgs: []rpc.ArgDesc{
 				{"br", ``},   // BlobRef
 				{"rank", ``}, // uint64
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
 		},
 	},
 }
@@ -4863,11 +4940,11 @@
 type SchemaManagerClientMethods interface {
 	// GetSchemaMetadata retrieves schema metadata for this database.
 	//
-	// Requires: Client must have at least Read access on the Database.
+	// Requires: Read on Database.
 	GetSchemaMetadata(*context.T, ...rpc.CallOpt) (SchemaMetadata, error)
 	// SetSchemaMetadata stores schema metadata for this database.
 	//
-	// Requires: Client must have at least Write access on the Database.
+	// Requires: Admin on Database.
 	SetSchemaMetadata(_ *context.T, metadata SchemaMetadata, _ ...rpc.CallOpt) error
 }
 
@@ -4904,11 +4981,11 @@
 type SchemaManagerServerMethods interface {
 	// GetSchemaMetadata retrieves schema metadata for this database.
 	//
-	// Requires: Client must have at least Read access on the Database.
+	// Requires: Read on Database.
 	GetSchemaMetadata(*context.T, rpc.ServerCall) (SchemaMetadata, error)
 	// SetSchemaMetadata stores schema metadata for this database.
 	//
-	// Requires: Client must have at least Write access on the Database.
+	// Requires: Admin on Database.
 	SetSchemaMetadata(_ *context.T, _ rpc.ServerCall, metadata SchemaMetadata) error
 }
 
@@ -4974,7 +5051,7 @@
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "GetSchemaMetadata",
-			Doc:  "// GetSchemaMetadata retrieves schema metadata for this database.\n//\n// Requires: Client must have at least Read access on the Database.",
+			Doc:  "// GetSchemaMetadata retrieves schema metadata for this database.\n//\n// Requires: Read on Database.",
 			OutArgs: []rpc.ArgDesc{
 				{"", ``}, // SchemaMetadata
 			},
@@ -4982,11 +5059,11 @@
 		},
 		{
 			Name: "SetSchemaMetadata",
-			Doc:  "// SetSchemaMetadata stores schema metadata for this database.\n//\n// Requires: Client must have at least Write access on the Database.",
+			Doc:  "// SetSchemaMetadata stores schema metadata for this database.\n//\n// Requires: Admin on Database.",
 			InArgs: []rpc.ArgDesc{
 				{"metadata", ``}, // SchemaMetadata
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
 		},
 	},
 }
@@ -5015,6 +5092,8 @@
 	// the batch sent for conflict resolution will be {key1, key2, key3}.
 	// If there was another concurrent batch {key2, key4}, then the batch sent
 	// for conflict resolution will be {key1, key2, key3, key4}.
+	//
+	// Requires: Admin on Database.
 	StartConflictResolver(*context.T, ...rpc.CallOpt) (ConflictManagerStartConflictResolverClientCall, error)
 }
 
@@ -5169,6 +5248,8 @@
 	// the batch sent for conflict resolution will be {key1, key2, key3}.
 	// If there was another concurrent batch {key2, key4}, then the batch sent
 	// for conflict resolution will be {key1, key2, key3, key4}.
+	//
+	// Requires: Admin on Database.
 	StartConflictResolver(*context.T, ConflictManagerStartConflictResolverServerCall) error
 }
 
@@ -5195,6 +5276,8 @@
 	// the batch sent for conflict resolution will be {key1, key2, key3}.
 	// If there was another concurrent batch {key2, key4}, then the batch sent
 	// for conflict resolution will be {key1, key2, key3, key4}.
+	//
+	// Requires: Admin on Database.
 	StartConflictResolver(*context.T, *ConflictManagerStartConflictResolverServerCallStub) error
 }
 
@@ -5250,8 +5333,8 @@
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "StartConflictResolver",
-			Doc:  "// StartConflictResolver registers a resolver for the database that is\n// associated with this ConflictManager and creates a stream to receive\n// conflicts and send resolutions.\n// Batches of ConflictInfos will be sent over with the Continued field\n// within the ConflictInfo representing the batch boundary. Client must\n// respond with a batch of ResolutionInfos in the same fashion.\n// A key is under conflict if two different values were written to it\n// concurrently (in logical time), i.e. neither value is an ancestor of the\n// other in the history graph.\n// A key under conflict can be a part of a batch committed on local or\n// remote or both syncbases. ConflictInfos for all keys in these two batches\n// are grouped together. These keys may themselves be under conflict; the\n// presented batch is a transitive closure of all batches containing keys\n// under conflict.\n// For example, for local batch {key1, key2} and remote batch {key1, key3},\n// the batch sent for conflict resolution will be {key1, key2, key3}.\n// If there was another concurrent batch {key2, key4}, then the batch sent\n// for conflict resolution will be {key1, key2, key3, key4}.",
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
+			Doc:  "// StartConflictResolver registers a resolver for the database that is\n// associated with this ConflictManager and creates a stream to receive\n// conflicts and send resolutions.\n// Batches of ConflictInfos will be sent over with the Continued field\n// within the ConflictInfo representing the batch boundary. Client must\n// respond with a batch of ResolutionInfos in the same fashion.\n// A key is under conflict if two different values were written to it\n// concurrently (in logical time), i.e. neither value is an ancestor of the\n// other in the history graph.\n// A key under conflict can be a part of a batch committed on local or\n// remote or both syncbases. ConflictInfos for all keys in these two batches\n// are grouped together. These keys may themselves be under conflict; the\n// presented batch is a transitive closure of all batches containing keys\n// under conflict.\n// For example, for local batch {key1, key2} and remote batch {key1, key3},\n// the batch sent for conflict resolution will be {key1, key2, key3}.\n// If there was another concurrent batch {key2, key4}, then the batch sent\n// for conflict resolution will be {key1, key2, key3, key4}.\n//\n// Requires: Admin on Database.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
 		},
 	},
 }
@@ -5346,7 +5429,7 @@
 //
 // Database represents a set of Collections. Batches, queries, syncgroups, and
 // watch all operate at the Database level.
-// Database.Glob operates over Collection ids.
+// Database.Glob operates over Collection ids, requiring Read on Database.
 type DatabaseClientMethods interface {
 	// Object provides access control for Vanadium objects.
 	//
@@ -5415,7 +5498,9 @@
 	// - "" for the initial root entity update
 	// The Value field is a StoreChange.
 	// If the client has no access to a row specified in a change, that change is
-	// excluded from the result stream.
+	// excluded from the result stream. Collection updates are always sent and can
+	// be used to determine that access to a collection is denied, potentially
+	// skipping rows.
 	//
 	// Note: A single Watch Change batch may contain changes from more than one
 	// batch as originally committed on a remote Syncbase or obtained from conflict
@@ -5444,12 +5529,18 @@
 	ConflictManagerClientMethods
 	// Create creates this Database. Permissions must be non-nil and include at
 	// least one admin.
-	// Create requires the caller to have Write permission at the Service.
+	//
+	// Requires: Write on Service.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Database's id. This requirement is waived for Admin on Service.
 	Create(_ *context.T, metadata *SchemaMetadata, perms access.Permissions, _ ...rpc.CallOpt) error
 	// Destroy destroys this Database, permanently removing all of its data.
 	// TODO(sadovsky): Specify what happens to syncgroups.
+	//
+	// Requires: Admin on Database or Service.
 	Destroy(*context.T, ...rpc.CallOpt) error
 	// Exists returns true only if this Database exists.
+	//
 	// Requires: at least one tag on Database, or Read or Write on Service.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	Exists(*context.T, ...rpc.CallOpt) (bool, error)
@@ -5460,15 +5551,18 @@
 	// visible in a new snapshot of the Database, ignoring user batches.
 	// (Note that the same issue is present in glob on Collection, where Scan can
 	// be used instead if batch awareness is required.)
-	// Note, the glob client library checks Resolve access on every component
-	// along the path (by doing a Dispatcher.Lookup), whereas this doesn't happen
-	// for other RPCs.
-	// TODO(ivanpi): Resolve should be checked on all RPCs.
 	// TODO(sadovsky): Maybe switch to streaming RPC.
+	//
+	// Requires: Read on Database.
 	ListCollections(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) ([]Id, error)
 	// Exec executes a syncQL query with positional parameters and returns all
 	// results as specified by the query's select/delete statement.
 	// Concurrency semantics are documented in model.go.
+	//
+	// Requires: Read and/or Write on Collection, depending on the query:
+	// - Read for select
+	// - Read and Write for delete
+	// TODO(ivanpi): Write should suffice for delete without v in WHERE clause.
 	Exec(_ *context.T, bh BatchHandle, query string, params []*vom.RawBytes, _ ...rpc.CallOpt) (DatabaseExecClientCall, error)
 	// BeginBatch creates a new batch. It returns a batch handle to pass in when
 	// calling batch-aware RPCs.
@@ -5478,22 +5572,38 @@
 	// a batch. Note that glob RPCs are not batch-aware.
 	// TODO(sadovsky): Maybe make BatchOptions optional. Also, rename 'bo' to
 	// 'opts' once v.io/i/912 is resolved for Java.
+	//
+	// Requires: at least one tag on Database.
 	BeginBatch(_ *context.T, bo BatchOptions, _ ...rpc.CallOpt) (BatchHandle, error)
 	// Commit persists the pending changes to the database.
 	// If the batch is readonly, Commit() will fail with ErrReadOnlyBatch; Abort()
 	// should be used instead.
 	// If the BatchHandle is empty, Commit() will fail with ErrNotBoundToBatch.
+	//
+	// Requires: at least one tag on Database.
+	// Also verifies that any changes to data and ACLs are allowed for the caller,
+	// since the batch is signed by the committer. Since only the final value for
+	// each key is committed and synced, changes to data need to be allowed by
+	// the ACL before or after the batch. Specifically, adding Write permission,
+	// changing a value based on it, then removing Write permission within a batch
+	// is not allowed because it cannot be verified by remote peers.
 	Commit(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) error
 	// Abort notifies the server that any pending changes can be discarded.
 	// It is not strictly required, but it may allow the server to release locks
 	// or other resources sooner than if it was not called.
 	// If the BatchHandle is empty, Abort() will fail with ErrNotBoundToBatch.
+	//
+	// Requires: at least one tag on Database.
 	Abort(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) error
 	// PauseSync pauses sync for this database. Incoming sync, as well as outgoing
 	// sync of subsequent writes, will be disabled until ResumeSync is called.
 	// PauseSync is idempotent.
+	//
+	// Requires: Admin on Database.
 	PauseSync(*context.T, ...rpc.CallOpt) error
 	// ResumeSync resumes sync for this database. ResumeSync is idempotent.
+	//
+	// Requires: Admin on Database.
 	ResumeSync(*context.T, ...rpc.CallOpt) error
 }
 
@@ -5646,7 +5756,7 @@
 //
 // Database represents a set of Collections. Batches, queries, syncgroups, and
 // watch all operate at the Database level.
-// Database.Glob operates over Collection ids.
+// Database.Glob operates over Collection ids, requiring Read on Database.
 type DatabaseServerMethods interface {
 	// Object provides access control for Vanadium objects.
 	//
@@ -5715,7 +5825,9 @@
 	// - "" for the initial root entity update
 	// The Value field is a StoreChange.
 	// If the client has no access to a row specified in a change, that change is
-	// excluded from the result stream.
+	// excluded from the result stream. Collection updates are always sent and can
+	// be used to determine that access to a collection is denied, potentially
+	// skipping rows.
 	//
 	// Note: A single Watch Change batch may contain changes from more than one
 	// batch as originally committed on a remote Syncbase or obtained from conflict
@@ -5744,12 +5856,18 @@
 	ConflictManagerServerMethods
 	// Create creates this Database. Permissions must be non-nil and include at
 	// least one admin.
-	// Create requires the caller to have Write permission at the Service.
+	//
+	// Requires: Write on Service.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Database's id. This requirement is waived for Admin on Service.
 	Create(_ *context.T, _ rpc.ServerCall, metadata *SchemaMetadata, perms access.Permissions) error
 	// Destroy destroys this Database, permanently removing all of its data.
 	// TODO(sadovsky): Specify what happens to syncgroups.
+	//
+	// Requires: Admin on Database or Service.
 	Destroy(*context.T, rpc.ServerCall) error
 	// Exists returns true only if this Database exists.
+	//
 	// Requires: at least one tag on Database, or Read or Write on Service.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	Exists(*context.T, rpc.ServerCall) (bool, error)
@@ -5760,15 +5878,18 @@
 	// visible in a new snapshot of the Database, ignoring user batches.
 	// (Note that the same issue is present in glob on Collection, where Scan can
 	// be used instead if batch awareness is required.)
-	// Note, the glob client library checks Resolve access on every component
-	// along the path (by doing a Dispatcher.Lookup), whereas this doesn't happen
-	// for other RPCs.
-	// TODO(ivanpi): Resolve should be checked on all RPCs.
 	// TODO(sadovsky): Maybe switch to streaming RPC.
+	//
+	// Requires: Read on Database.
 	ListCollections(_ *context.T, _ rpc.ServerCall, bh BatchHandle) ([]Id, error)
 	// Exec executes a syncQL query with positional parameters and returns all
 	// results as specified by the query's select/delete statement.
 	// Concurrency semantics are documented in model.go.
+	//
+	// Requires: Read and/or Write on Collection, depending on the query:
+	// - Read for select
+	// - Read and Write for delete
+	// TODO(ivanpi): Write should suffice for delete without v in WHERE clause.
 	Exec(_ *context.T, _ DatabaseExecServerCall, bh BatchHandle, query string, params []*vom.RawBytes) error
 	// BeginBatch creates a new batch. It returns a batch handle to pass in when
 	// calling batch-aware RPCs.
@@ -5778,22 +5899,38 @@
 	// a batch. Note that glob RPCs are not batch-aware.
 	// TODO(sadovsky): Maybe make BatchOptions optional. Also, rename 'bo' to
 	// 'opts' once v.io/i/912 is resolved for Java.
+	//
+	// Requires: at least one tag on Database.
 	BeginBatch(_ *context.T, _ rpc.ServerCall, bo BatchOptions) (BatchHandle, error)
 	// Commit persists the pending changes to the database.
 	// If the batch is readonly, Commit() will fail with ErrReadOnlyBatch; Abort()
 	// should be used instead.
 	// If the BatchHandle is empty, Commit() will fail with ErrNotBoundToBatch.
+	//
+	// Requires: at least one tag on Database.
+	// Also verifies that any changes to data and ACLs are allowed for the caller,
+	// since the batch is signed by the committer. Since only the final value for
+	// each key is committed and synced, changes to data need to be allowed by
+	// the ACL before or after the batch. Specifically, adding Write permission,
+	// changing a value based on it, then removing Write permission within a batch
+	// is not allowed because it cannot be verified by remote peers.
 	Commit(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 	// Abort notifies the server that any pending changes can be discarded.
 	// It is not strictly required, but it may allow the server to release locks
 	// or other resources sooner than if it was not called.
 	// If the BatchHandle is empty, Abort() will fail with ErrNotBoundToBatch.
+	//
+	// Requires: at least one tag on Database.
 	Abort(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 	// PauseSync pauses sync for this database. Incoming sync, as well as outgoing
 	// sync of subsequent writes, will be disabled until ResumeSync is called.
 	// PauseSync is idempotent.
+	//
+	// Requires: Admin on Database.
 	PauseSync(*context.T, rpc.ServerCall) error
 	// ResumeSync resumes sync for this database. ResumeSync is idempotent.
+	//
+	// Requires: Admin on Database.
 	ResumeSync(*context.T, rpc.ServerCall) error
 }
 
@@ -5869,7 +6006,9 @@
 	// - "" for the initial root entity update
 	// The Value field is a StoreChange.
 	// If the client has no access to a row specified in a change, that change is
-	// excluded from the result stream.
+	// excluded from the result stream. Collection updates are always sent and can
+	// be used to determine that access to a collection is denied, potentially
+	// skipping rows.
 	//
 	// Note: A single Watch Change batch may contain changes from more than one
 	// batch as originally committed on a remote Syncbase or obtained from conflict
@@ -5898,12 +6037,18 @@
 	ConflictManagerServerStubMethods
 	// Create creates this Database. Permissions must be non-nil and include at
 	// least one admin.
-	// Create requires the caller to have Write permission at the Service.
+	//
+	// Requires: Write on Service.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Database's id. This requirement is waived for Admin on Service.
 	Create(_ *context.T, _ rpc.ServerCall, metadata *SchemaMetadata, perms access.Permissions) error
 	// Destroy destroys this Database, permanently removing all of its data.
 	// TODO(sadovsky): Specify what happens to syncgroups.
+	//
+	// Requires: Admin on Database or Service.
 	Destroy(*context.T, rpc.ServerCall) error
 	// Exists returns true only if this Database exists.
+	//
 	// Requires: at least one tag on Database, or Read or Write on Service.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	Exists(*context.T, rpc.ServerCall) (bool, error)
@@ -5914,15 +6059,18 @@
 	// visible in a new snapshot of the Database, ignoring user batches.
 	// (Note that the same issue is present in glob on Collection, where Scan can
 	// be used instead if batch awareness is required.)
-	// Note, the glob client library checks Resolve access on every component
-	// along the path (by doing a Dispatcher.Lookup), whereas this doesn't happen
-	// for other RPCs.
-	// TODO(ivanpi): Resolve should be checked on all RPCs.
 	// TODO(sadovsky): Maybe switch to streaming RPC.
+	//
+	// Requires: Read on Database.
 	ListCollections(_ *context.T, _ rpc.ServerCall, bh BatchHandle) ([]Id, error)
 	// Exec executes a syncQL query with positional parameters and returns all
 	// results as specified by the query's select/delete statement.
 	// Concurrency semantics are documented in model.go.
+	//
+	// Requires: Read and/or Write on Collection, depending on the query:
+	// - Read for select
+	// - Read and Write for delete
+	// TODO(ivanpi): Write should suffice for delete without v in WHERE clause.
 	Exec(_ *context.T, _ *DatabaseExecServerCallStub, bh BatchHandle, query string, params []*vom.RawBytes) error
 	// BeginBatch creates a new batch. It returns a batch handle to pass in when
 	// calling batch-aware RPCs.
@@ -5932,22 +6080,38 @@
 	// a batch. Note that glob RPCs are not batch-aware.
 	// TODO(sadovsky): Maybe make BatchOptions optional. Also, rename 'bo' to
 	// 'opts' once v.io/i/912 is resolved for Java.
+	//
+	// Requires: at least one tag on Database.
 	BeginBatch(_ *context.T, _ rpc.ServerCall, bo BatchOptions) (BatchHandle, error)
 	// Commit persists the pending changes to the database.
 	// If the batch is readonly, Commit() will fail with ErrReadOnlyBatch; Abort()
 	// should be used instead.
 	// If the BatchHandle is empty, Commit() will fail with ErrNotBoundToBatch.
+	//
+	// Requires: at least one tag on Database.
+	// Also verifies that any changes to data and ACLs are allowed for the caller,
+	// since the batch is signed by the committer. Since only the final value for
+	// each key is committed and synced, changes to data need to be allowed by
+	// the ACL before or after the batch. Specifically, adding Write permission,
+	// changing a value based on it, then removing Write permission within a batch
+	// is not allowed because it cannot be verified by remote peers.
 	Commit(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 	// Abort notifies the server that any pending changes can be discarded.
 	// It is not strictly required, but it may allow the server to release locks
 	// or other resources sooner than if it was not called.
 	// If the BatchHandle is empty, Abort() will fail with ErrNotBoundToBatch.
+	//
+	// Requires: at least one tag on Database.
 	Abort(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 	// PauseSync pauses sync for this database. Incoming sync, as well as outgoing
 	// sync of subsequent writes, will be disabled until ResumeSync is called.
 	// PauseSync is idempotent.
+	//
+	// Requires: Admin on Database.
 	PauseSync(*context.T, rpc.ServerCall) error
 	// ResumeSync resumes sync for this database. ResumeSync is idempotent.
+	//
+	// Requires: Admin on Database.
 	ResumeSync(*context.T, rpc.ServerCall) error
 }
 
@@ -6047,10 +6211,10 @@
 var descDatabase = rpc.InterfaceDesc{
 	Name:    "Database",
 	PkgPath: "v.io/v23/services/syncbase",
-	Doc:     "// Database represents a set of Collections. Batches, queries, syncgroups, and\n// watch all operate at the Database level.\n// Database.Glob operates over Collection ids.",
+	Doc:     "// Database represents a set of Collections. Batches, queries, syncgroups, and\n// watch all operate at the Database level.\n// Database.Glob operates over Collection ids, requiring Read on Database.",
 	Embeds: []rpc.EmbedDesc{
 		{"Object", "v.io/v23/services/permissions", "// Object provides access control for Vanadium objects.\n//\n// Vanadium services implementing dynamic access control would typically embed\n// this interface and tag additional methods defined by the service with one of\n// Admin, Read, Write, Resolve etc. For example, the VDL definition of the\n// object would be:\n//\n//   package mypackage\n//\n//   import \"v.io/v23/security/access\"\n//   import \"v.io/v23/services/permissions\"\n//\n//   type MyObject interface {\n//     permissions.Object\n//     MyRead() (string, error) {access.Read}\n//     MyWrite(string) error    {access.Write}\n//   }\n//\n// If the set of pre-defined tags is insufficient, services may define their\n// own tag type and annotate all methods with this new type.\n//\n// Instead of embedding this Object interface, define SetPermissions and\n// GetPermissions in their own interface. Authorization policies will typically\n// respect annotations of a single type. For example, the VDL definition of an\n// object would be:\n//\n//  package mypackage\n//\n//  import \"v.io/v23/security/access\"\n//\n//  type MyTag string\n//\n//  const (\n//    Blue = MyTag(\"Blue\")\n//    Red  = MyTag(\"Red\")\n//  )\n//\n//  type MyObject interface {\n//    MyMethod() (string, error) {Blue}\n//\n//    // Allow clients to change access via the access.Object interface:\n//    SetPermissions(perms access.Permissions, version string) error         {Red}\n//    GetPermissions() (perms access.Permissions, version string, err error) {Blue}\n//  }"},
-		{"DatabaseWatcher", "v.io/v23/services/syncbase", "// DatabaseWatcher allows a client to watch for updates to the database. For\n// each watch request, the client will receive a reliable stream of watch events\n// without re-ordering. Only rows and collections matching at least one of the\n// patterns are returned. Rows in collections with no Read access are also\n// filtered out.\n//\n// Watching is done by starting a streaming RPC. The RPC takes a ResumeMarker\n// argument that points to a particular place in the database event log. If an\n// empty ResumeMarker is provided, the WatchStream will begin with a Change\n// batch containing the initial state, always starting with an empty update for\n// the root entity. Otherwise, the WatchStream will contain only changes since\n// the provided ResumeMarker.\n// See watch.GlobWatcher for a detailed explanation of the behavior.\n//\n// The result stream consists of a never-ending sequence of Change messages\n// (until the call fails or is canceled). Each Change contains the Name field\n// with the Vanadium name of the watched entity relative to the database:\n// - \"<encCxId>/<rowKey>\" for row updates\n// - \"<encCxId>\" for collection updates\n// - \"\" for the initial root entity update\n// The Value field is a StoreChange.\n// If the client has no access to a row specified in a change, that change is\n// excluded from the result stream.\n//\n// Note: A single Watch Change batch may contain changes from more than one\n// batch as originally committed on a remote Syncbase or obtained from conflict\n// resolution. However, changes from a single original batch will always appear\n// in the same Change batch."},
+		{"DatabaseWatcher", "v.io/v23/services/syncbase", "// DatabaseWatcher allows a client to watch for updates to the database. For\n// each watch request, the client will receive a reliable stream of watch events\n// without re-ordering. Only rows and collections matching at least one of the\n// patterns are returned. Rows in collections with no Read access are also\n// filtered out.\n//\n// Watching is done by starting a streaming RPC. The RPC takes a ResumeMarker\n// argument that points to a particular place in the database event log. If an\n// empty ResumeMarker is provided, the WatchStream will begin with a Change\n// batch containing the initial state, always starting with an empty update for\n// the root entity. Otherwise, the WatchStream will contain only changes since\n// the provided ResumeMarker.\n// See watch.GlobWatcher for a detailed explanation of the behavior.\n//\n// The result stream consists of a never-ending sequence of Change messages\n// (until the call fails or is canceled). Each Change contains the Name field\n// with the Vanadium name of the watched entity relative to the database:\n// - \"<encCxId>/<rowKey>\" for row updates\n// - \"<encCxId>\" for collection updates\n// - \"\" for the initial root entity update\n// The Value field is a StoreChange.\n// If the client has no access to a row specified in a change, that change is\n// excluded from the result stream. Collection updates are always sent and can\n// be used to determine that access to a collection is denied, potentially\n// skipping rows.\n//\n// Note: A single Watch Change batch may contain changes from more than one\n// batch as originally committed on a remote Syncbase or obtained from conflict\n// resolution. However, changes from a single original batch will always appear\n// in the same Change batch."},
 		{"SyncgroupManager", "v.io/v23/services/syncbase", "// SyncgroupManager is the interface for syncgroup operations.\n// TODO(hpucha): Add blessings to create/join and add a refresh method."},
 		{"BlobManager", "v.io/v23/services/syncbase", "// BlobManager is the interface for blob operations.\n//\n// Description of API for resumable blob creation (append-only):\n// - Up until commit, a BlobRef may be used with PutBlob, GetBlobSize,\n//   DeleteBlob, and CommitBlob. Blob creation may be resumed by obtaining the\n//   current blob size via GetBlobSize and appending to the blob via PutBlob.\n// - After commit, a blob is immutable, at which point PutBlob and CommitBlob\n//   may no longer be used.\n// - All other methods (GetBlob, FetchBlob, PinBlob, etc.) may only be used\n//   after commit."},
 		{"SchemaManager", "v.io/v23/services/syncbase", "// SchemaManager implements the API for managing schema metadata attached\n// to a Database."},
@@ -6059,7 +6223,7 @@
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "Create",
-			Doc:  "// Create creates this Database. Permissions must be non-nil and include at\n// least one admin.\n// Create requires the caller to have Write permission at the Service.",
+			Doc:  "// Create creates this Database. Permissions must be non-nil and include at\n// least one admin.\n//\n// Requires: Write on Service.\n// Also requires the creator's blessing to match the pattern in the newly\n// created Database's id. This requirement is waived for Admin on Service.",
 			InArgs: []rpc.ArgDesc{
 				{"metadata", ``}, // *SchemaMetadata
 				{"perms", ``},    // access.Permissions
@@ -6068,19 +6232,19 @@
 		},
 		{
 			Name: "Destroy",
-			Doc:  "// Destroy destroys this Database, permanently removing all of its data.\n// TODO(sadovsky): Specify what happens to syncgroups.",
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
+			Doc:  "// Destroy destroys this Database, permanently removing all of its data.\n// TODO(sadovsky): Specify what happens to syncgroups.\n//\n// Requires: Admin on Database or Service.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
 		},
 		{
 			Name: "Exists",
-			Doc:  "// Exists returns true only if this Database exists.\n// Requires: at least one tag on Database, or Read or Write on Service.\n// Otherwise, ErrNoExistOrNoAccess is returned.",
+			Doc:  "// Exists returns true only if this Database exists.\n//\n// Requires: at least one tag on Database, or Read or Write on Service.\n// Otherwise, ErrNoExistOrNoAccess is returned.",
 			OutArgs: []rpc.ArgDesc{
 				{"", ``}, // bool
 			},
 		},
 		{
 			Name: "ListCollections",
-			Doc:  "// ListCollections returns an unsorted list of all Collection ids that the\n// caller is allowed to see.\n// This method exists on Database but not on Service because for the latter\n// we can simply use glob, while for the former glob lists only Collections\n// visible in a new snapshot of the Database, ignoring user batches.\n// (Note that the same issue is present in glob on Collection, where Scan can\n// be used instead if batch awareness is required.)\n// Note, the glob client library checks Resolve access on every component\n// along the path (by doing a Dispatcher.Lookup), whereas this doesn't happen\n// for other RPCs.\n// TODO(ivanpi): Resolve should be checked on all RPCs.\n// TODO(sadovsky): Maybe switch to streaming RPC.",
+			Doc:  "// ListCollections returns an unsorted list of all Collection ids that the\n// caller is allowed to see.\n// This method exists on Database but not on Service because for the latter\n// we can simply use glob, while for the former glob lists only Collections\n// visible in a new snapshot of the Database, ignoring user batches.\n// (Note that the same issue is present in glob on Collection, where Scan can\n// be used instead if batch awareness is required.)\n// TODO(sadovsky): Maybe switch to streaming RPC.\n//\n// Requires: Read on Database.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
@@ -6091,50 +6255,46 @@
 		},
 		{
 			Name: "Exec",
-			Doc:  "// Exec executes a syncQL query with positional parameters and returns all\n// results as specified by the query's select/delete statement.\n// Concurrency semantics are documented in model.go.",
+			Doc:  "// Exec executes a syncQL query with positional parameters and returns all\n// results as specified by the query's select/delete statement.\n// Concurrency semantics are documented in model.go.\n//\n// Requires: Read and/or Write on Collection, depending on the query:\n// - Read for select\n// - Read and Write for delete\n// TODO(ivanpi): Write should suffice for delete without v in WHERE clause.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``},     // BatchHandle
 				{"query", ``},  // string
 				{"params", ``}, // []*vom.RawBytes
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "BeginBatch",
-			Doc:  "// BeginBatch creates a new batch. It returns a batch handle to pass in when\n// calling batch-aware RPCs.\n// Concurrency semantics are documented in model.go.\n// All batch-aware RPCs can also be called outside a batch (with an empty\n// handle), with the exception of Commit and Abort which only make sense on\n// a batch. Note that glob RPCs are not batch-aware.\n// TODO(sadovsky): Maybe make BatchOptions optional. Also, rename 'bo' to\n// 'opts' once v.io/i/912 is resolved for Java.",
+			Doc:  "// BeginBatch creates a new batch. It returns a batch handle to pass in when\n// calling batch-aware RPCs.\n// Concurrency semantics are documented in model.go.\n// All batch-aware RPCs can also be called outside a batch (with an empty\n// handle), with the exception of Commit and Abort which only make sense on\n// a batch. Note that glob RPCs are not batch-aware.\n// TODO(sadovsky): Maybe make BatchOptions optional. Also, rename 'bo' to\n// 'opts' once v.io/i/912 is resolved for Java.\n//\n// Requires: at least one tag on Database.",
 			InArgs: []rpc.ArgDesc{
 				{"bo", ``}, // BatchOptions
 			},
 			OutArgs: []rpc.ArgDesc{
 				{"", ``}, // BatchHandle
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "Commit",
-			Doc:  "// Commit persists the pending changes to the database.\n// If the batch is readonly, Commit() will fail with ErrReadOnlyBatch; Abort()\n// should be used instead.\n// If the BatchHandle is empty, Commit() will fail with ErrNotBoundToBatch.",
+			Doc:  "// Commit persists the pending changes to the database.\n// If the batch is readonly, Commit() will fail with ErrReadOnlyBatch; Abort()\n// should be used instead.\n// If the BatchHandle is empty, Commit() will fail with ErrNotBoundToBatch.\n//\n// Requires: at least one tag on Database.\n// Also verifies that any changes to data and ACLs are allowed for the caller,\n// since the batch is signed by the committer. Since only the final value for\n// each key is committed and synced, changes to data need to be allowed by\n// the ACL before or after the batch. Specifically, adding Write permission,\n// changing a value based on it, then removing Write permission within a batch\n// is not allowed because it cannot be verified by remote peers.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "Abort",
-			Doc:  "// Abort notifies the server that any pending changes can be discarded.\n// It is not strictly required, but it may allow the server to release locks\n// or other resources sooner than if it was not called.\n// If the BatchHandle is empty, Abort() will fail with ErrNotBoundToBatch.",
+			Doc:  "// Abort notifies the server that any pending changes can be discarded.\n// It is not strictly required, but it may allow the server to release locks\n// or other resources sooner than if it was not called.\n// If the BatchHandle is empty, Abort() will fail with ErrNotBoundToBatch.\n//\n// Requires: at least one tag on Database.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
 		},
 		{
 			Name: "PauseSync",
-			Doc:  "// PauseSync pauses sync for this database. Incoming sync, as well as outgoing\n// sync of subsequent writes, will be disabled until ResumeSync is called.\n// PauseSync is idempotent.",
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
+			Doc:  "// PauseSync pauses sync for this database. Incoming sync, as well as outgoing\n// sync of subsequent writes, will be disabled until ResumeSync is called.\n// PauseSync is idempotent.\n//\n// Requires: Admin on Database.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
 		},
 		{
 			Name: "ResumeSync",
-			Doc:  "// ResumeSync resumes sync for this database. ResumeSync is idempotent.",
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
+			Doc:  "// ResumeSync resumes sync for this database. ResumeSync is idempotent.\n//\n// Requires: Admin on Database.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
 		},
 	},
 }
@@ -6186,33 +6346,49 @@
 // containing Collection methods.
 //
 // Collection represents a set of Rows.
-// Collection.Glob operates over keys of Rows in the Collection.
+// Collection.Glob operates over keys of Rows in the Collection, requiring Read
+// on Collection.
 type CollectionClientMethods interface {
 	// Create creates this Collection. Permissions must be non-nil and include at
 	// least one admin.
+	//
+	// Requires: Write on Database.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Collection's id.
 	Create(_ *context.T, bh BatchHandle, perms access.Permissions, _ ...rpc.CallOpt) error
 	// Destroy destroys this Collection, permanently removing all of its data.
-	// TODO(sadovsky): Specify what happens to syncgroups.
+	//
+	// Requires: Admin on Collection or on Database.
+	// TODO(ivanpi): Prevent for synced Collections.
 	Destroy(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) error
 	// Exists returns true only if this Collection exists.
+	//
 	// Requires: at least one tag on Collection, or Read or Write on Database.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Database does not exist, returned value is identical to
 	// Database.Exists().
 	Exists(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) (bool, error)
 	// GetPermissions returns the current Permissions for the Collection.
+	//
+	// Requires: Admin on Collection.
 	GetPermissions(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) (access.Permissions, error)
 	// SetPermissions replaces the current Permissions for the Collection.
 	// Permissions must include at least one admin.
+	//
+	// Requires: Admin on Collection.
 	SetPermissions(_ *context.T, bh BatchHandle, perms access.Permissions, _ ...rpc.CallOpt) error
 	// DeleteRange deletes all rows in the given half-open range [start, limit).
 	// If limit is "", all rows with keys >= start are included.
+	//
+	// Requires: Write on Collection.
 	DeleteRange(_ *context.T, bh BatchHandle, start []byte, limit []byte, _ ...rpc.CallOpt) error
 	// Scan returns all rows in the given half-open range [start, limit). If limit
 	// is "", all rows with keys >= start are included.
 	// Concurrency semantics are documented in model.go.
 	// Note, we use []byte rather than string for start and limit because they
 	// need not be valid UTF-8; VDL expects strings to be valid UTF-8.
+	//
+	// Requires: Read on Collection.
 	Scan(_ *context.T, bh BatchHandle, start []byte, limit []byte, _ ...rpc.CallOpt) (CollectionScanClientCall, error)
 }
 
@@ -6343,33 +6519,49 @@
 // implements for Collection.
 //
 // Collection represents a set of Rows.
-// Collection.Glob operates over keys of Rows in the Collection.
+// Collection.Glob operates over keys of Rows in the Collection, requiring Read
+// on Collection.
 type CollectionServerMethods interface {
 	// Create creates this Collection. Permissions must be non-nil and include at
 	// least one admin.
+	//
+	// Requires: Write on Database.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Collection's id.
 	Create(_ *context.T, _ rpc.ServerCall, bh BatchHandle, perms access.Permissions) error
 	// Destroy destroys this Collection, permanently removing all of its data.
-	// TODO(sadovsky): Specify what happens to syncgroups.
+	//
+	// Requires: Admin on Collection or on Database.
+	// TODO(ivanpi): Prevent for synced Collections.
 	Destroy(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 	// Exists returns true only if this Collection exists.
+	//
 	// Requires: at least one tag on Collection, or Read or Write on Database.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Database does not exist, returned value is identical to
 	// Database.Exists().
 	Exists(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (bool, error)
 	// GetPermissions returns the current Permissions for the Collection.
+	//
+	// Requires: Admin on Collection.
 	GetPermissions(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (access.Permissions, error)
 	// SetPermissions replaces the current Permissions for the Collection.
 	// Permissions must include at least one admin.
+	//
+	// Requires: Admin on Collection.
 	SetPermissions(_ *context.T, _ rpc.ServerCall, bh BatchHandle, perms access.Permissions) error
 	// DeleteRange deletes all rows in the given half-open range [start, limit).
 	// If limit is "", all rows with keys >= start are included.
+	//
+	// Requires: Write on Collection.
 	DeleteRange(_ *context.T, _ rpc.ServerCall, bh BatchHandle, start []byte, limit []byte) error
 	// Scan returns all rows in the given half-open range [start, limit). If limit
 	// is "", all rows with keys >= start are included.
 	// Concurrency semantics are documented in model.go.
 	// Note, we use []byte rather than string for start and limit because they
 	// need not be valid UTF-8; VDL expects strings to be valid UTF-8.
+	//
+	// Requires: Read on Collection.
 	Scan(_ *context.T, _ CollectionScanServerCall, bh BatchHandle, start []byte, limit []byte) error
 }
 
@@ -6380,29 +6572,44 @@
 type CollectionServerStubMethods interface {
 	// Create creates this Collection. Permissions must be non-nil and include at
 	// least one admin.
+	//
+	// Requires: Write on Database.
+	// Also requires the creator's blessing to match the pattern in the newly
+	// created Collection's id.
 	Create(_ *context.T, _ rpc.ServerCall, bh BatchHandle, perms access.Permissions) error
 	// Destroy destroys this Collection, permanently removing all of its data.
-	// TODO(sadovsky): Specify what happens to syncgroups.
+	//
+	// Requires: Admin on Collection or on Database.
+	// TODO(ivanpi): Prevent for synced Collections.
 	Destroy(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 	// Exists returns true only if this Collection exists.
+	//
 	// Requires: at least one tag on Collection, or Read or Write on Database.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Database does not exist, returned value is identical to
 	// Database.Exists().
 	Exists(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (bool, error)
 	// GetPermissions returns the current Permissions for the Collection.
+	//
+	// Requires: Admin on Collection.
 	GetPermissions(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (access.Permissions, error)
 	// SetPermissions replaces the current Permissions for the Collection.
 	// Permissions must include at least one admin.
+	//
+	// Requires: Admin on Collection.
 	SetPermissions(_ *context.T, _ rpc.ServerCall, bh BatchHandle, perms access.Permissions) error
 	// DeleteRange deletes all rows in the given half-open range [start, limit).
 	// If limit is "", all rows with keys >= start are included.
+	//
+	// Requires: Write on Collection.
 	DeleteRange(_ *context.T, _ rpc.ServerCall, bh BatchHandle, start []byte, limit []byte) error
 	// Scan returns all rows in the given half-open range [start, limit). If limit
 	// is "", all rows with keys >= start are included.
 	// Concurrency semantics are documented in model.go.
 	// Note, we use []byte rather than string for start and limit because they
 	// need not be valid UTF-8; VDL expects strings to be valid UTF-8.
+	//
+	// Requires: Read on Collection.
 	Scan(_ *context.T, _ *CollectionScanServerCallStub, bh BatchHandle, start []byte, limit []byte) error
 }
 
@@ -6478,11 +6685,11 @@
 var descCollection = rpc.InterfaceDesc{
 	Name:    "Collection",
 	PkgPath: "v.io/v23/services/syncbase",
-	Doc:     "// Collection represents a set of Rows.\n// Collection.Glob operates over keys of Rows in the Collection.",
+	Doc:     "// Collection represents a set of Rows.\n// Collection.Glob operates over keys of Rows in the Collection, requiring Read\n// on Collection.",
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "Create",
-			Doc:  "// Create creates this Collection. Permissions must be non-nil and include at\n// least one admin.",
+			Doc:  "// Create creates this Collection. Permissions must be non-nil and include at\n// least one admin.\n//\n// Requires: Write on Database.\n// Also requires the creator's blessing to match the pattern in the newly\n// created Collection's id.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``},    // BatchHandle
 				{"perms", ``}, // access.Permissions
@@ -6491,15 +6698,15 @@
 		},
 		{
 			Name: "Destroy",
-			Doc:  "// Destroy destroys this Collection, permanently removing all of its data.\n// TODO(sadovsky): Specify what happens to syncgroups.",
+			Doc:  "// Destroy destroys this Collection, permanently removing all of its data.\n//\n// Requires: Admin on Collection or on Database.\n// TODO(ivanpi): Prevent for synced Collections.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
-			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Write"))},
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
 		},
 		{
 			Name: "Exists",
-			Doc:  "// Exists returns true only if this Collection exists.\n// Requires: at least one tag on Collection, or Read or Write on Database.\n// Otherwise, ErrNoExistOrNoAccess is returned.\n// If Database does not exist, returned value is identical to\n// Database.Exists().",
+			Doc:  "// Exists returns true only if this Collection exists.\n//\n// Requires: at least one tag on Collection, or Read or Write on Database.\n// Otherwise, ErrNoExistOrNoAccess is returned.\n// If Database does not exist, returned value is identical to\n// Database.Exists().",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
@@ -6509,7 +6716,7 @@
 		},
 		{
 			Name: "GetPermissions",
-			Doc:  "// GetPermissions returns the current Permissions for the Collection.",
+			Doc:  "// GetPermissions returns the current Permissions for the Collection.\n//\n// Requires: Admin on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
@@ -6520,7 +6727,7 @@
 		},
 		{
 			Name: "SetPermissions",
-			Doc:  "// SetPermissions replaces the current Permissions for the Collection.\n// Permissions must include at least one admin.",
+			Doc:  "// SetPermissions replaces the current Permissions for the Collection.\n// Permissions must include at least one admin.\n//\n// Requires: Admin on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``},    // BatchHandle
 				{"perms", ``}, // access.Permissions
@@ -6529,7 +6736,7 @@
 		},
 		{
 			Name: "DeleteRange",
-			Doc:  "// DeleteRange deletes all rows in the given half-open range [start, limit).\n// If limit is \"\", all rows with keys >= start are included.",
+			Doc:  "// DeleteRange deletes all rows in the given half-open range [start, limit).\n// If limit is \"\", all rows with keys >= start are included.\n//\n// Requires: Write on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``},    // BatchHandle
 				{"start", ``}, // []byte
@@ -6539,7 +6746,7 @@
 		},
 		{
 			Name: "Scan",
-			Doc:  "// Scan returns all rows in the given half-open range [start, limit). If limit\n// is \"\", all rows with keys >= start are included.\n// Concurrency semantics are documented in model.go.\n// Note, we use []byte rather than string for start and limit because they\n// need not be valid UTF-8; VDL expects strings to be valid UTF-8.",
+			Doc:  "// Scan returns all rows in the given half-open range [start, limit). If limit\n// is \"\", all rows with keys >= start are included.\n// Concurrency semantics are documented in model.go.\n// Note, we use []byte rather than string for start and limit because they\n// need not be valid UTF-8; VDL expects strings to be valid UTF-8.\n//\n// Requires: Read on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``},    // BatchHandle
 				{"start", ``}, // []byte
@@ -6600,6 +6807,7 @@
 // All access checks are performed against the Collection ACL.
 type RowClientMethods interface {
 	// Exists returns true only if this Row exists.
+	//
 	// Requires: Read or Write on Collection.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Collection does not exist, returned value is identical to
@@ -6609,10 +6817,16 @@
 	// Row data and listing, but not Row existence.
 	Exists(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) (bool, error)
 	// Get returns the value for this Row.
+	//
+	// Requires: Read on Collection.
 	Get(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) (*vom.RawBytes, error)
 	// Put writes the given value for this Row.
+	//
+	// Requires: Write on Collection.
 	Put(_ *context.T, bh BatchHandle, value *vom.RawBytes, _ ...rpc.CallOpt) error
 	// Delete deletes this Row.
+	//
+	// Requires: Write on Collection.
 	Delete(_ *context.T, bh BatchHandle, _ ...rpc.CallOpt) error
 }
 
@@ -6658,6 +6872,7 @@
 // All access checks are performed against the Collection ACL.
 type RowServerMethods interface {
 	// Exists returns true only if this Row exists.
+	//
 	// Requires: Read or Write on Collection.
 	// Otherwise, ErrNoExistOrNoAccess is returned.
 	// If Collection does not exist, returned value is identical to
@@ -6667,10 +6882,16 @@
 	// Row data and listing, but not Row existence.
 	Exists(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (bool, error)
 	// Get returns the value for this Row.
+	//
+	// Requires: Read on Collection.
 	Get(_ *context.T, _ rpc.ServerCall, bh BatchHandle) (*vom.RawBytes, error)
 	// Put writes the given value for this Row.
+	//
+	// Requires: Write on Collection.
 	Put(_ *context.T, _ rpc.ServerCall, bh BatchHandle, value *vom.RawBytes) error
 	// Delete deletes this Row.
+	//
+	// Requires: Write on Collection.
 	Delete(_ *context.T, _ rpc.ServerCall, bh BatchHandle) error
 }
 
@@ -6744,7 +6965,7 @@
 	Methods: []rpc.MethodDesc{
 		{
 			Name: "Exists",
-			Doc:  "// Exists returns true only if this Row exists.\n// Requires: Read or Write on Collection.\n// Otherwise, ErrNoExistOrNoAccess is returned.\n// If Collection does not exist, returned value is identical to\n// Collection.Exists().\n// Note, write methods on Row do not leak information whether the Row existed\n// before, but Write is sufficient to call Exists. Therefore, Read protects\n// Row data and listing, but not Row existence.",
+			Doc:  "// Exists returns true only if this Row exists.\n//\n// Requires: Read or Write on Collection.\n// Otherwise, ErrNoExistOrNoAccess is returned.\n// If Collection does not exist, returned value is identical to\n// Collection.Exists().\n// Note, write methods on Row do not leak information whether the Row existed\n// before, but Write is sufficient to call Exists. Therefore, Read protects\n// Row data and listing, but not Row existence.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
@@ -6754,7 +6975,7 @@
 		},
 		{
 			Name: "Get",
-			Doc:  "// Get returns the value for this Row.",
+			Doc:  "// Get returns the value for this Row.\n//\n// Requires: Read on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
@@ -6765,7 +6986,7 @@
 		},
 		{
 			Name: "Put",
-			Doc:  "// Put writes the given value for this Row.",
+			Doc:  "// Put writes the given value for this Row.\n//\n// Requires: Write on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``},    // BatchHandle
 				{"value", ``}, // *vom.RawBytes
@@ -6774,7 +6995,7 @@
 		},
 		{
 			Name: "Delete",
-			Doc:  "// Delete deletes this Row.",
+			Doc:  "// Delete deletes this Row.\n//\n// Requires: Write on Collection.",
 			InArgs: []rpc.ArgDesc{
 				{"bh", ``}, // BatchHandle
 			},
diff --git a/syncbase/batch_test.go b/syncbase/batch_test.go
index 46e9e78..b9a3958 100644
--- a/syncbase/batch_test.go
+++ b/syncbase/batch_test.go
@@ -29,7 +29,7 @@
 func TestName(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 
 	b, err := d.BeginBatch(ctx, wire.BatchOptions{})
 	if err != nil {
@@ -56,7 +56,7 @@
 	ctx, serverName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
 	service := syncbase.NewService(serverName)
-	db := tu.CreateDatabase(test, ctx, service, "d")
+	db := tu.CreateDatabase(test, ctx, service, "d", nil)
 
 	// Create the collection outside of batch, ensuring that the initial perms
 	// do not have write permission. (Had the collection been created inside the
@@ -116,7 +116,7 @@
 func TestBatchBasics(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	tu.CheckScan(t, ctx, c, syncbase.Prefix(""), []string{}, []interface{}{})
@@ -238,7 +238,7 @@
 func TestBatchListCollections(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	tu.CreateCollection(t, ctx, d, "c")
 	user := "root:o:app:client"
 	b, err := d.BeginBatch(ctx, wire.BatchOptions{})
@@ -317,7 +317,7 @@
 func TestBatchExecIsolation(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	foo := Foo{I: 4, S: "f"}
@@ -394,7 +394,7 @@
 func TestBatchReadonlyExecDelete(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	foo := Foo{I: 4, S: "f"}
@@ -433,7 +433,7 @@
 func TestBatchExec(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	foo := Foo{I: 4, S: "f"}
@@ -553,7 +553,7 @@
 func TestReadOnlyBatch(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	if err := c.Put(ctx, "fooKey", "fooValue"); err != nil {
@@ -581,7 +581,7 @@
 func TestOpAfterFinalize(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	// TODO(sadovsky): Add some sort of "op after finalize" error type and check
@@ -665,7 +665,7 @@
 func TestDisallowedMethods(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 
 	dc := wire.DatabaseClient(d.FullName())
 	if err := dc.Commit(ctx, ""); verror.ErrorID(err) != wire.ErrNotBoundToBatch.ID {
@@ -717,7 +717,7 @@
 func TestRunInBatchRetry(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	// Succeed (no conflict) on second try.
@@ -734,7 +734,7 @@
 func TestRunInBatchMaxRetries(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	// Succeed (no conflict) on 10th try. RunInBatch will retry 3 times and give
@@ -752,7 +752,7 @@
 func TestRunInBatchError(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	// Return error from fn. Errors other than ErrConcurrentTransaction are not
@@ -771,7 +771,7 @@
 func TestRunInBatchReadOnly(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	// Test readonly batch.
diff --git a/syncbase/blob_test.go b/syncbase/blob_test.go
index 082b4f3..f5d1d95 100644
--- a/syncbase/blob_test.go
+++ b/syncbase/blob_test.go
@@ -19,7 +19,7 @@
 func TestLocalBlobPutGet(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 
 	b, err := d.CreateBlob(ctx)
 	if err != nil {
diff --git a/syncbase/client_test.go b/syncbase/client_test.go
index ec07050..622a04f 100644
--- a/syncbase/client_test.go
+++ b/syncbase/client_test.go
@@ -99,7 +99,7 @@
 func TestExec(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	foo := Foo{I: 4, S: "f"}
@@ -198,7 +198,7 @@
 func TestDatabasePerms(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	tu.TestPerms(t, ctx, d)
 }
 
@@ -206,7 +206,7 @@
 func TestCollectionCreate(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	tu.TestCreate(t, ctx, d)
 }
 
@@ -216,7 +216,7 @@
 func TestCollectionCreateNameValidation(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	tu.TestCreateNameValidation(t, ctx, d, tu.OkDbCxNames, tu.NotOkDbCxNames)
 }
 
@@ -224,7 +224,7 @@
 func TestCollectionDestroy(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	tu.TestDestroy(t, ctx, d)
 }
 
@@ -232,7 +232,7 @@
 func TestCollectionDestroyAndRecreate(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 	// Write some data.
 	if err := c.Put(ctx, "bar/baz", "A"); err != nil {
@@ -293,7 +293,7 @@
 func TestCollectionScan(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	tu.CheckScan(t, ctx, c, syncbase.Prefix(""), []string{}, []interface{}{})
@@ -337,7 +337,7 @@
 func TestCollectionDeleteRange(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	tu.CheckScan(t, ctx, c, syncbase.Prefix(""), []string{}, []interface{}{})
@@ -376,7 +376,7 @@
 func TestCollectionRowMethods(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	got, want := Foo{}, Foo{I: 4, S: "foo"}
@@ -415,7 +415,7 @@
 func TestRowMethods(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	r := c.Row("f")
@@ -455,7 +455,7 @@
 func TestRowKeyValidation(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 	tu.TestCreateNameValidation(t, ctx, c, tu.OkRowKeys, tu.NotOkRowKeys)
 }
@@ -466,7 +466,7 @@
 func TestRowPermissions(t *testing.T) {
 	_, clientACtx, sName, _, cleanup := tu.SetupOrDieCustom("u:clientA", "server", nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, clientACtx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, clientACtx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, clientACtx, d, "c")
 
 	// Add some key-value pairs.
@@ -507,10 +507,12 @@
 // Tests collection perms where get is allowed but put is not.
 // TODO(ivanpi): Redundant with permissions_test?
 func TestMixedCollectionPerms(t *testing.T) {
-	ctx, clientACtx, sName, rootp, cleanup := tu.SetupOrDieCustom("u:clientA", "server", nil)
+	ctx, clientACtx, sName, rootp, cleanup := tu.SetupOrDieCustom("u:clientA", "server",
+		tu.DefaultPerms(access.AllTypicalTags(), "root:u:clientA").Add("root:u:clientB", string(access.Resolve)))
 	defer cleanup()
 	clientBCtx := tu.NewCtx(ctx, rootp, "u:clientB")
-	d := tu.CreateDatabase(t, clientACtx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, clientACtx, syncbase.NewService(sName), "d",
+		tu.DefaultPerms(wire.AllDatabaseTags, "root:u:clientA").Add("root:u:clientB", string(access.Resolve)))
 	c := tu.CreateCollection(t, clientACtx, d, "c")
 
 	// Set permissions.
@@ -548,7 +550,7 @@
 func TestWatchBasic(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 	var resumeMarkers []watch.ResumeMarker
 
@@ -614,10 +616,12 @@
 // TestWatchWithBatchAndInitialState tests that the client watch correctly
 // handles batches, perms, and fetching initial state on empty resume marker.
 func TestWatchWithBatchAndInitialState(t *testing.T) {
-	ctx, adminCtx, sName, rootp, cleanup := tu.SetupOrDieCustom("u:admin", "server", nil)
+	ctx, adminCtx, sName, rootp, cleanup := tu.SetupOrDieCustom("u:admin", "server",
+		tu.DefaultPerms(access.AllTypicalTags(), "root:u:admin").Add("root:u:client", string(access.Resolve)))
 	defer cleanup()
 	clientCtx := tu.NewCtx(ctx, rootp, "u:client")
-	d := tu.CreateDatabase(t, adminCtx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, adminCtx, syncbase.NewService(sName), "d",
+		tu.DefaultPerms(wire.AllDatabaseTags, "root:u:admin").Add("root:u:client", string(access.Resolve), string(access.Read)))
 	cp := tu.CreateCollection(t, adminCtx, d, "cpublic")
 	ch := tu.CreateCollection(t, adminCtx, d, "chidden")
 
@@ -840,7 +844,7 @@
 func TestBlockingWatch(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	resumeMarker, err := d.GetResumeMarker(ctx)
@@ -877,7 +881,7 @@
 func TestBlockedWatchCancel(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 
 	resumeMarker, err := d.GetResumeMarker(ctx)
 	if err != nil {
diff --git a/syncbase/exec_test.go b/syncbase/exec_test.go
index 7ec7e68..950b8c4 100644
--- a/syncbase/exec_test.go
+++ b/syncbase/exec_test.go
@@ -60,7 +60,7 @@
 func setup(t *testing.T) {
 	var sName string
 	ctx, sName, cleanup = tu.SetupOrDie(nil)
-	db = tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	db = tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	customerCollection = tu.CreateCollection(t, ctx, db, "Customer")
 	numbersCollection = tu.CreateCollection(t, ctx, db, "Numbers")
 	fooCollection = tu.CreateCollection(t, ctx, db, "Foo")
@@ -2314,9 +2314,7 @@
 		{
 			"select v from Unknown",
 			// The following error text is dependent on the implementation of the query.Database interface.
-			// TODO(sadovsky): Error messages should never contain storage engine
-			// prefixes ("c") and delimiters ("\xfe").
-			syncql.NewErrTableCantAccess(ctx, 14, "Unknown", errors.New("syncbase.test:\"root:o:app,d\".Exec: Does not exist: c\xferoot:o:app:client,Unknown\xfe")),
+			syncql.NewErrTableCantAccess(ctx, 14, "Unknown", errors.New("syncbase.test:\"root:o:app,d\".Exec: Does not exist: root:o:app:client,Unknown")),
 		},
 		{
 			"select v from Customer offset -1",
@@ -2485,9 +2483,7 @@
 		// *ScanError Cannot produce a [collection].ScanError.
 		{
 			"select k from Blah",
-			// TODO(sadovsky): Error messages should never contain storage engine
-			// prefixes ("c") and delimiters ("\xfe").
-			syncql.NewErrTableCantAccess(ctx, 14, "Blah", errors.New("syncbase.test:\"root:o:app,d\".Exec: Does not exist: c\xferoot:o:app:client,Blah\xfe")),
+			syncql.NewErrTableCantAccess(ctx, 14, "Blah", errors.New("syncbase.test:\"root:o:app,d\".Exec: Does not exist: root:o:app:client,Blah")),
 		},
 		{
 			"select k, v from Customer where a = b)",
diff --git a/syncbase/model.go b/syncbase/model.go
index bb0d989..206e2ea 100644
--- a/syncbase/model.go
+++ b/syncbase/model.go
@@ -10,6 +10,9 @@
 //   <encCxId> is encode(<cxId>), where <cxId> is <userBlessing>,<cxName>
 // (Note that blessing strings cannot contain ",".)
 
+// Refer to v.io/v23/services/syncbase/service.vdl for the access control policy
+// specification.
+
 // NOTE(sadovsky): Various methods below may end up needing additional options.
 // One can add options to a Go method in a backwards-compatible way by making
 // the method variadic.
@@ -240,7 +243,7 @@
 	Create(ctx *context.T, perms access.Permissions) error
 
 	// Destroy destroys this Collection, permanently removing all of its data.
-	// TODO(sadovsky): Specify what happens to syncgroups.
+	// TODO(ivanpi): Prevent for synced Collections.
 	Destroy(ctx *context.T) error
 
 	// GetPermissions returns the current Permissions for the Collection.
diff --git a/syncbase/permissions_test.go b/syncbase/permissions_test.go
index 1b73600..4260fca 100644
--- a/syncbase/permissions_test.go
+++ b/syncbase/permissions_test.go
@@ -8,6 +8,7 @@
 	"fmt"
 	"reflect"
 	"testing"
+	"time"
 
 	"v.io/v23"
 	"v.io/v23/context"
@@ -30,6 +31,10 @@
 	f func(ctx *context.T, d syncbase.Database) error
 }
 
+type batchDatabaseTest struct {
+	f func(ctx *context.T, b syncbase.BatchDatabase) error
+}
+
 type collectionTest struct {
 	f func(ctx *context.T, c syncbase.Collection) error
 }
@@ -68,6 +73,7 @@
 		}},
 		name:     "service.DevModeUpdateVClock",
 		patterns: []string{"A___"},
+		mutating: true,
 	},
 	{
 		layer: serviceTest{f: func(ctx *context.T, s syncbase.Service) error {
@@ -85,6 +91,14 @@
 		patterns: []string{"A___"},
 		mutating: true,
 	},
+	{
+		layer: serviceTest{f: func(ctx *context.T, s syncbase.Service) error {
+			_, err := s.ListDatabases(ctx)
+			return err
+		}},
+		name:     "service.ListDatabases",
+		patterns: []string{"R___"},
+	},
 
 	// Database tests.
 	{
@@ -100,7 +114,7 @@
 			return d.Destroy(ctx)
 		}},
 		name:     "database.Destroy",
-		patterns: []string{"_W__"},
+		patterns: []string{"XA__", "A___"},
 		mutating: true,
 	},
 	{
@@ -110,7 +124,6 @@
 		}},
 		name:     "database.Exists",
 		patterns: []string{"XX__", "XR__", "XW__", "XA__", "R___", "W___"},
-		mutating: false,
 	},
 	{
 		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
@@ -118,26 +131,159 @@
 			return err
 		}},
 		name:     "database.GetPermissions",
-		patterns: []string{"_A__"},
+		patterns: []string{"XA__"},
 	},
 	{
 		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
 			return d.SetPermissions(ctx, tu.DefaultPerms(wire.AllDatabaseTags, "root"), "")
 		}},
 		name:     "database.SetPermissions",
-		patterns: []string{"_A__"},
+		patterns: []string{"XA__"},
+		mutating: true,
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			_, err := d.ListCollections(ctx)
+			return err
+		}},
+		name:     "database.ListCollections",
+		patterns: []string{"XR__"},
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			ws := d.Watch(ctx, nil, []wire.CollectionRowPattern{util.RowPrefixPattern(wire.Id{"u", "c"}, "")})
+			ws.Advance()
+			return ws.Err()
+		}},
+		name:     "database.Watch",
+		patterns: []string{"XR__"},
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			_, err := d.BeginBatch(ctx, wire.BatchOptions{})
+			return err
+		}},
+		name:     "database.BeginBatch",
+		patterns: []string{"XX__", "XR__", "XW__", "XA__"},
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			_, err := d.GetResumeMarker(ctx)
+			return err
+		}},
+		name:     "database.GetResumeMarker",
+		patterns: []string{"XX__", "XR__", "XW__", "XA__"},
+	},
+	// TODO(ivanpi): Test Exec.
+	// TODO(ivanpi): Test RPC-only methods such as Glob.
+
+	// Batch database tests.
+	{
+		layer: batchDatabaseTest{f: func(ctx *context.T, b syncbase.BatchDatabase) error {
+			return b.Commit(ctx)
+		}},
+		name:     "database.Commit",
+		patterns: []string{"XX__", "XR__", "XW__", "XA__"},
+		mutating: true,
+	},
+	{
+		layer: batchDatabaseTest{f: func(ctx *context.T, b syncbase.BatchDatabase) error {
+			return b.Abort(ctx)
+		}},
+		name:     "database.Abort",
+		patterns: []string{"XX__", "XR__", "XW__", "XA__"},
+	},
+
+	// Conflict resolver tests.
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			_, err := wire.DatabaseClient(d.FullName()).GetSchemaMetadata(ctx)
+			if verror.ErrorID(err) == verror.ErrNoExist.ID {
+				return nil
+			}
+			return err
+		}},
+		name:     "database.GetSchemaMetadata",
+		patterns: []string{"XR__"},
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			return wire.DatabaseClient(d.FullName()).SetSchemaMetadata(ctx, wire.SchemaMetadata{})
+		}},
+		name:     "database.SetSchemaMetadata",
+		patterns: []string{"XA__"},
+		mutating: true,
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			return wire.DatabaseClient(d.FullName()).PauseSync(ctx)
+		}},
+		name:     "database.PauseSync",
+		patterns: []string{"XA__"},
+		mutating: true,
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			return wire.DatabaseClient(d.FullName()).ResumeSync(ctx)
+		}},
+		name:     "database.ResumeSync",
+		patterns: []string{"XA__"},
+		mutating: true,
+	},
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			s, err := wire.DatabaseClient(d.FullName()).StartConflictResolver(ctx)
+			if err != nil {
+				return err
+			}
+			go s.RecvStream().Advance()
+			<-time.After(1 * time.Second)
+			return s.RecvStream().Err()
+		}},
+		name:     "database.StartConflictResolver",
+		patterns: []string{"XA__"},
 		mutating: true,
 	},
 
+	// Blob manager tests.
+	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			_, err := d.CreateBlob(ctx)
+			return err
+		}},
+		name:     "database.CreateBlob",
+		patterns: []string{"XW__"},
+		mutating: true,
+	},
+	// TODO(ivanpi): Test other blob RPCs.
+
+	// Syncgroup manager tests.
+	// TODO(ivanpi): Add.
+
 	// Collection tests.
 	{
+		layer: databaseTest{f: func(ctx *context.T, d syncbase.Database) error {
+			return d.CollectionForId(wire.Id{"root", "cNew"}).Create(ctx, tu.DefaultPerms(wire.AllCollectionTags, "root"))
+		}},
+		name:     "collection.Create",
+		patterns: []string{"XW__"},
+		mutating: true,
+	},
+	{
+		layer: collectionTest{f: func(ctx *context.T, c syncbase.Collection) error {
+			return c.Destroy(ctx)
+		}},
+		name:     "collection.Destroy",
+		patterns: []string{"XXA_", "XA__"},
+		mutating: true,
+	},
+	{
 		layer: collectionTest{f: func(ctx *context.T, c syncbase.Collection) error {
 			_, err := c.Exists(ctx)
 			return err
 		}},
 		name:     "collection.Exists",
 		patterns: []string{"XXR_", "XXW_", "XXA_", "XR__", "XW__"},
-		mutating: false,
 	},
 	{
 		layer: collectionTest{f: func(ctx *context.T, c syncbase.Collection) error {
@@ -145,8 +291,34 @@
 			return err
 		}},
 		name:     "collection.GetPermissions",
-		patterns: []string{"__A_"},
+		patterns: []string{"XXA_"},
 	},
+	{
+		layer: collectionTest{f: func(ctx *context.T, c syncbase.Collection) error {
+			return c.SetPermissions(ctx, tu.DefaultPerms(wire.AllCollectionTags, "root"))
+		}},
+		name:     "collection.SetPermissions",
+		patterns: []string{"XXA_"},
+		mutating: true,
+	},
+	{
+		layer: collectionTest{f: func(ctx *context.T, c syncbase.Collection) error {
+			ss := c.Scan(ctx, syncbase.Prefix(""))
+			ss.Advance()
+			return ss.Err()
+		}},
+		name:     "collection.Scan",
+		patterns: []string{"XXR_"},
+	},
+	{
+		layer: collectionTest{f: func(ctx *context.T, c syncbase.Collection) error {
+			return c.DeleteRange(ctx, syncbase.Prefix(""))
+		}},
+		name:     "collection.DeleteRange",
+		patterns: []string{"XXW_"},
+		mutating: true,
+	},
+	// TODO(ivanpi): Test RPC-only methods such as Glob.
 
 	// Row tests.
 	{
@@ -156,7 +328,6 @@
 		}},
 		name:     "row.Exists",
 		patterns: []string{"XXR_", "XXW_"},
-		mutating: false,
 	},
 	{
 		layer: rowTest{f: func(ctx *context.T, r syncbase.Row) error {
@@ -164,7 +335,24 @@
 			return r.Get(ctx, &value)
 		}},
 		name:     "row.Get",
-		patterns: []string{"__R_"},
+		patterns: []string{"XXR_"},
+	},
+	{
+		layer: rowTest{f: func(ctx *context.T, r syncbase.Row) error {
+			value := "NCC-1701-D"
+			return r.Put(ctx, &value)
+		}},
+		name:     "row.Put",
+		patterns: []string{"XXW_"},
+		mutating: true,
+	},
+	{
+		layer: rowTest{f: func(ctx *context.T, r syncbase.Row) error {
+			return r.Delete(ctx)
+		}},
+		name:     "row.Delete",
+		patterns: []string{"XXW_"},
+		mutating: true,
 	},
 }
 
@@ -312,10 +500,18 @@
 			err = layer.f(clientCtx, s)
 		case databaseTest:
 			err = layer.f(clientCtx, d)
+		case batchDatabaseTest:
+			b, bErr := d.BeginBatch(adminCtx, wire.BatchOptions{})
+			if bErr != nil {
+				tu.Fatalf(t, "d.BeginBatch failed: %v", bErr)
+			}
+			err = layer.f(clientCtx, b)
 		case collectionTest:
 			err = layer.f(clientCtx, c)
 		case rowTest:
 			err = layer.f(clientCtx, r)
+		default:
+			tu.Fatal(t, "unknown test type")
 		}
 		if expectSuccess && err != nil {
 			tu.Fatalf(t, "test %v failed with non-nil error: %v", test, err)
@@ -667,7 +863,7 @@
 	ctx, sName, cleanup := tu.SetupOrDie(nil)
 	defer cleanup()
 	s := syncbase.NewService(sName)
-	rd := tu.CreateDatabase(t, ctx, s, "anchor")
+	rd := tu.CreateDatabase(t, ctx, s, "anchor", nil)
 	rc := tu.CreateCollection(t, ctx, rd, "anchor")
 	rsg := tu.CreateSyncgroup(t, ctx, rd, rc, "anchor", "anchor syncgroup")
 
diff --git a/syncbase/syncgroup_test.go b/syncbase/syncgroup_test.go
index 4ddd5e8..86da93c 100644
--- a/syncbase/syncgroup_test.go
+++ b/syncbase/syncgroup_test.go
@@ -20,7 +20,7 @@
 func TestCreateSyncgroup(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(tu.DefaultPerms(access.AllTypicalTags(), "root:o:app:client"))
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 
 	// Check if create fails with empty spec.
 	spec := wire.SyncgroupSpec{}
@@ -91,10 +91,11 @@
 // join it.
 func TestJoinSyncgroup(t *testing.T) {
 	// Create client1-server pair.
-	ctx, ctx1, sName, rootp, cleanup := tu.SetupOrDieCustom("o:app:client1", "server", tu.DefaultPerms(access.AllTypicalTags(), "root:o:app:client1"))
+	ctx, ctx1, sName, rootp, cleanup := tu.SetupOrDieCustom("o:app:client1", "server",
+		tu.DefaultPerms(access.AllTypicalTags(), "root:o:app:client1").Add("root:o:app:client2", string(access.Resolve)))
 	defer cleanup()
 
-	d1 := tu.CreateDatabase(t, ctx1, syncbase.NewService(sName), "d")
+	d1 := tu.CreateDatabase(t, ctx1, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx1, d1, "c")
 	specA := wire.SyncgroupSpec{
 		Description: "test syncgroup sgA",
@@ -118,12 +119,12 @@
 
 	// Client1 gives access to client2.
 	if err := d1.SetPermissions(ctx1, tu.DefaultPerms(wire.AllDatabaseTags, "root:o:app:client1", "root:o:app:client2"), ""); err != nil {
-		t.Fatalf("d.SetPermissions() failed: %v", err)
+		t.Fatalf("d1.SetPermissions() failed: %v", err)
 	}
 
 	// Verify client2 has access.
 	if err := d2.SetPermissions(ctx2, tu.DefaultPerms(wire.AllDatabaseTags, "root:o:app:client1", "root:o:app:client2"), ""); err != nil {
-		t.Fatalf("d.SetPermissions() failed: %v", err)
+		t.Fatalf("d2.SetPermissions() failed: %v", err)
 	}
 
 	// Check that client2's join still fails since the SG ACL disallows access.
@@ -154,7 +155,7 @@
 func TestSetSpecSyncgroup(t *testing.T) {
 	ctx, sName, cleanup := tu.SetupOrDie(tu.DefaultPerms(access.AllTypicalTags(), "root:o:app:client"))
 	defer cleanup()
-	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d")
+	d := tu.CreateDatabase(t, ctx, syncbase.NewService(sName), "d", nil)
 	c := tu.CreateCollection(t, ctx, d, "c")
 
 	// Create successfully.
diff --git a/syncbase/util/list_child_ids.go b/syncbase/util/list_child_ids.go
index b5833f2..cf4d5ad 100644
--- a/syncbase/util/list_child_ids.go
+++ b/syncbase/util/list_child_ids.go
@@ -54,7 +54,8 @@
 			}
 			ids = append(ids, id)
 		case *naming.GlobReplyError:
-			// TODO(sadovsky): Surface these errors somehow.
+			// Glob for a layer is currently all-or-nothing, so we fail on error.
+			return nil, v.Value.Error
 		}
 	}
 	SortIds(ids)