merge
diff --git a/.gitignore b/.gitignore
index 53191fd..8c5d116 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,5 @@
+/go/bin
+/node_modules
+/tmp
+
 /.v23
diff --git a/.jshintignore b/.jshintignore
new file mode 100644
index 0000000..6804343
--- /dev/null
+++ b/.jshintignore
@@ -0,0 +1,2 @@
+node_modules
+src/gen-vdl
diff --git a/.jshintrc b/.jshintrc
new file mode 100644
index 0000000..f846f31
--- /dev/null
+++ b/.jshintrc
@@ -0,0 +1,28 @@
+{
+  "camelcase": true,
+  "eqeqeq": true,
+  "expr": true,
+  "forin": true,
+  "freeze": true,
+  "immed": true,
+  "indent": 2,
+  "latedef": "nofunc",
+  "maxlen": 80,
+  "newcap": true,
+  "noarg": true,
+  "nonbsp": true,
+  "nonew": true,
+  "quotmark": "single",
+  "sub": true,
+  "trailing": true,
+  "undef": true,
+  "unused": "vars",
+  "esnext": true,
+  "browser": true,
+  "devel": true,
+  "node": true,
+
+  "globals": {
+    "Promise": true
+  }
+}
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..574583c
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,9 @@
+# This is the official list of Vanadium authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as:
+#   Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
new file mode 100644
index 0000000..b294e50
--- /dev/null
+++ b/CONTRIBUTORS
@@ -0,0 +1,10 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+#     Name <email address>
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..411db13
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015 The Vanadium Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..988c630
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,122 @@
+SHELL := /bin/bash -euo pipefail
+export PATH := ./go/bin:$(V23_ROOT)/release/go/bin:$(V23_ROOT)/roadmap/go/bin:node_modules/.bin:$(V23_ROOT)/third_party/cout/node/bin:$(PATH)
+
+# This target causes any target files to be deleted if the target task fails.
+.DELETE_ON_ERROR:
+
+UNAME := $(shell uname)
+
+# When running browser tests on non-Darwin machines, set the --headless flag.
+# This uses Xvfb underneath the hood (inside prova => browser-launcher =>
+# headless), which is not supported on OS X.
+# See: https://github.com/kesla/node-headless/
+ifndef NOHEADLESS
+	ifneq ($(UNAME),Darwin)
+		HEADLESS := --headless
+	endif
+endif
+
+ifdef STOPONFAIL
+	STOPONFAIL := --stopOnFirstFailure
+endif
+
+ifndef NOTAP
+	TAP := --tap
+endif
+
+ifndef NOQUIT
+	QUIT := --quit
+endif
+
+ifdef XUNIT
+	TAP := --tap # TAP must be set for xunit to work
+	OUTPUT_TRANSFORM := tap-xunit
+endif
+
+ifdef BROWSER_OUTPUT
+	BROWSER_OUTPUT_LOCAL = $(BROWSER_OUTPUT)
+	ifdef OUTPUT_TRANSFORM
+		BROWSER_OUTPUT_LOCAL := >($(OUTPUT_TRANSFORM) --package=javascript.browser > $(BROWSER_OUTPUT_LOCAL))
+	endif
+	BROWSER_OUTPUT_LOCAL := | tee $(BROWSER_OUTPUT_LOCAL)
+endif
+
+PROVA_OPTS := --includeFilenameAsPackage $(TAP) $(QUIT) $(STOPONFAIL)
+
+BROWSER_OPTS := --browser --launch chrome $(HEADLESS) --log=./tmp/chrome.log
+
+.DEFAULT_GOAL := all
+
+.PHONY: all
+all:
+
+go/bin: $(shell find $(V23_ROOT) -name "*.go")
+	v23 go build -a -o $@/principal v.io/x/ref/cmd/principal
+	v23 go build -a -tags wspr -o $@/servicerunner v.io/x/ref/cmd/servicerunner
+	v23 go build -a -o $@/syncbased v.io/syncbase/x/ref/services/syncbase/syncbased
+
+.PHONY: gen-vdl
+gen-vdl:
+	v23 run vdl generate --lang=javascript --js-out-dir=src/gen-vdl v.io/syncbase/v23/services/syncbase/...
+
+node_modules: package.json
+	npm prune
+	npm install
+	# Link Vanadium from V23_ROOT.
+	rm -rf ./node_modules/vanadium
+	cd "$(V23_ROOT)/release/javascript/core" && npm link
+	npm link vanadium
+	touch node_modules
+
+# We use the same test runner as vanadium.js.  It handles starting and stopping
+# all required services (proxy, wspr, mounntabled), and runs tests in chrome
+# with prova.
+# TODO(sadovsky): Some of the deps in our package.json are needed solely for
+# runner.js. We should restructure things so that runner.js is its own npm
+# package with its own deps.
+.NOTPARALLEL: test
+.PHONY: test
+test: test-integration
+
+.NOTPARALLEL: test-integration
+.PHONY: test-integration
+test-integration: test-integration-browser test-integration-node
+
+.PHONY: test-integration-node
+test-integration-node: export PATH := ./test:$(PATH)
+test-integration-node: go/bin lint node_modules
+	node ./node_modules/vanadium/test/integration/runner.js --services=start-syncbased.sh -- \
+	prova test/integration/test-*.js $(PROVA_OPTS) $(NODE_OUTPUT_LOCAL)
+
+.PHONY: test-integration-browser
+test-integration-browser: export PATH := ./test:$(PATH)
+test-integration-browser: go/bin lint node_modules
+	node ./node_modules/vanadium/test/integration/runner.js --services=start-syncbased.sh -- \
+	make test-integration-browser-runner
+
+# Note: runner.js sets the V23_NAMESPACE and PROXY_ADDR env vars for the
+# spawned test subprocess; we specify "make test-integration-browser-runner" as
+# the test command so that we can then reference these vars in the Vanadium
+# extension and our prova command.
+.PHONY: test-integration-browser-runner
+test-integration-browser-runner: VANADIUM_JS := $(V23_ROOT)/release/javascript/core
+test-integration-browser-runner: BROWSER_OPTS := --options="--load-extension=$(VANADIUM_JS)/extension/build-test/,--ignore-certificate-errors,--enable-logging=stderr" $(BROWSER_OPTS)
+test-integration-browser-runner:
+	$(MAKE) -C $(VANADIUM_JS)/extension clean
+	$(MAKE) -C $(VANADIUM_JS)/extension build-test
+	prova ./test/integration/test-*.js $(PROVA_OPTS) $(BROWSER_OPTS) $(BROWSER_OUTPUT_LOCAL)
+
+.PHONY: clean
+clean:
+	rm -rf \
+		go/bin \
+		node_modules \
+		tmp
+
+.PHONY: lint
+lint: node_modules
+ifdef NOLINT
+	@echo "Skipping lint - disabled by NOLINT environment variable"
+else
+	jshint .
+endif
diff --git a/PATENTS b/PATENTS
new file mode 100644
index 0000000..d52cc55
--- /dev/null
+++ b/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Vanadium project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Vanadium, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Vanadium. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Vanadium or any code incorporated within this
+implementation of Vanadium constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Vanadium
+shall terminate as of the date such litigation is filed.
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..6d6ff85
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+v23-0.1
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..4a86cd2
--- /dev/null
+++ b/package.json
@@ -0,0 +1,33 @@
+{
+  "name": "syncbase",
+  "version": "0.0.0",
+  "description": "",
+  "main": "src/syncbase.js",
+  "dependencies": {
+    "debug": "~2.2.0",
+    "inherits": "~2.0.1",
+    "through2": "~0.6.5"
+  },
+  "devDependencies": {
+    "async": "~1.0.0",
+    "deep-equal": "~1.0.0",
+    "format": "~0.2.1",
+    "jshint": "~2.7.0",
+    "json-stable-stringify": "~1.0.0",
+    "minimist": "~1.1.1",
+    "mkdirp": "~0.5.1",
+    "prova": "aghassemi/prova#0.0.4",
+    "run-parallel": "~1.1.1",
+    "stream-array": "~1.1.0",
+    "stream-to-array": "~2.0.2",
+    "tap-xunit": "~1.1.1",
+    "which": "~1.1.1",
+    "xtend": "~4.0.0"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://vanadium.googlesource.com/roadmap.js.syncbase"
+  },
+  "author": "",
+  "license": "BSD"
+}
diff --git a/src/app.js b/src/app.js
new file mode 100644
index 0000000..ba39037
--- /dev/null
+++ b/src/app.js
@@ -0,0 +1,82 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var vanadium = require('vanadium');
+
+var Database = require('./nosql/database');
+var util = require('./util');
+var vdl = require('./gen-vdl/v.io/syncbase/v23/services/syncbase');
+
+var wireSignature = vdl.App.prototype._serviceDescription;
+
+module.exports = App;
+
+function App(parentFullName, relativeName) {
+  if (!(this instanceof App)) {
+    return new App(parentFullName, relativeName);
+  }
+
+  util.addNameProperties(this, parentFullName, relativeName);
+
+  // TODO(nlacasse): Use the prr module to simplify all the
+  // 'Object.defineProperty' calls scattered throughout the project.
+  // https://www.npmjs.com/package/prr
+
+  /**
+   * Caches the database wire object.
+   * @private
+   */
+  Object.defineProperty(this, '_wireObj', {
+    enumerable: false,
+    value: null,
+    writable: true
+  });
+}
+
+// noSqlDatabase returns the noSqlDatabase with the given name. relativeName
+// must not contain slashes. schema can be null or undefined only if a schema
+// was never set for the database in the first place.
+App.prototype.noSqlDatabase = function(relativeName, schema) {
+  return new Database(this.fullName, relativeName, schema);
+};
+
+// listDatabases returns of all database names.
+App.prototype.listDatabases = function(ctx, cb) {
+  util.getChildNames(ctx, this.fullName, cb);
+};
+
+// create creates this app.  If perms is empty, we inherit (copy) the Service
+// perms.
+App.prototype.create = function(ctx, perms, cb) {
+  this._wire(ctx).create(ctx, perms, cb);
+};
+
+// delete deletes this app.
+App.prototype.delete = function(ctx, cb) {
+  this._wire(ctx).delete(ctx, cb);
+};
+
+// exists returns true only if this app exists. Insufficient permissions cause
+// exists to return false instead of an error.
+App.prototype.exists = function(ctx, cb) {
+  this._wire(ctx).exists(ctx, cb);
+};
+
+App.prototype.getPermissions = function(ctx, cb) {
+  this._wire(ctx).getPermissions(ctx, cb);
+};
+
+App.prototype.setPermissions = function(ctx, perms, version, cb) {
+  this._wire(ctx).setPermissions(ctx, perms, version, cb);
+};
+
+App.prototype._wire = function(ctx) {
+  if (!this._wireObj) {
+    var rt = vanadium.runtimeForContext(ctx);
+    var client = rt.newClient();
+    this._wireObj = client.bindWithSignature(this.fullName, [wireSignature]);
+  }
+
+  return this._wireObj;
+};
diff --git a/src/gen-vdl/v.io/syncbase/v23/services/syncbase/index.js b/src/gen-vdl/v.io/syncbase/v23/services/syncbase/index.js
new file mode 100644
index 0000000..3777938
--- /dev/null
+++ b/src/gen-vdl/v.io/syncbase/v23/services/syncbase/index.js
@@ -0,0 +1,255 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+var makeError = require('vanadium').verror.makeError;
+var actions = require('vanadium').verror.actions;
+var canonicalize = require('vanadium').vdl.canonicalize;
+
+
+
+
+
+var access = require('./../../../../v23/security/access');
+var permissions = require('./../../../../v23/services/permissions');
+
+module.exports = {};
+
+
+
+// Types:
+
+
+
+
+// Consts:
+
+
+
+// Errors:
+
+module.exports.InvalidNameError = makeError('v.io/syncbase/v23/services/syncbase.InvalidName', actions.NO_RETRY, {
+  'en': '{1:}{2:} invalid name: {3}',
+}, [
+  vdl.types.STRING,
+]);
+
+
+
+
+// Services:
+
+  
+    
+function Service(){}
+module.exports.Service = Service;
+
+    
+      
+Service.prototype.setPermissions = function(ctx, serverCall, perms, version) {
+  throw new Error('Method SetPermissions not implemented');
+};
+    
+      
+Service.prototype.getPermissions = function(ctx, serverCall) {
+  throw new Error('Method GetPermissions not implemented');
+};
+     
+
+    
+Service.prototype._serviceDescription = {
+  name: 'Service',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase',
+  doc: "// Service represents a Vanadium Syncbase service.\n// Service.Glob operates over App names.",
+  embeds: [{
+      name: 'Object',
+      pkgPath: 'v.io/v23/services/permissions',
+      doc: "// Object provides access control for Vanadium objects.\n//\n// Vanadium services implementing dynamic access control would typically embed\n// this interface and tag additional methods defined by the service with one of\n// Admin, Read, Write, Resolve etc. For example, the VDL definition of the\n// object would be:\n//\n//   package mypackage\n//\n//   import \"v.io/v23/security/access\"\n//   import \"v.io/v23/services/permissions\"\n//\n//   type MyObject interface {\n//     permissions.Object\n//     MyRead() (string, error) {access.Read}\n//     MyWrite(string) error    {access.Write}\n//   }\n//\n// If the set of pre-defined tags is insufficient, services may define their\n// own tag type and annotate all methods with this new type.\n//\n// Instead of embedding this Object interface, define SetPermissions and\n// GetPermissions in their own interface. Authorization policies will typically\n// respect annotations of a single type. For example, the VDL definition of an\n// object would be:\n//\n//  package mypackage\n//\n//  import \"v.io/v23/security/access\"\n//\n//  type MyTag string\n//\n//  const (\n//    Blue = MyTag(\"Blue\")\n//    Red  = MyTag(\"Red\")\n//  )\n//\n//  type MyObject interface {\n//    MyMethod() (string, error) {Blue}\n//\n//    // Allow clients to change access via the access.Object interface:\n//    SetPermissions(perms access.Permissions, version string) error         {Red}\n//    GetPermissions() (perms access.Permissions, version string, err error) {Blue}\n//  }"
+    },
+    ],
+  methods: [
+    
+      
+    {
+    name: 'SetPermissions',
+    doc: "// SetPermissions replaces the current Permissions for an object.  version\n// allows for optional, optimistic concurrency control.  If non-empty,\n// version's value must come from GetPermissions.  If any client has\n// successfully called SetPermissions in the meantime, the version will be\n// stale and SetPermissions will fail.  If empty, SetPermissions performs an\n// unconditional update.\n//\n// Permissions objects are expected to be small.  It is up to the\n// implementation to define the exact limit, though it should probably be\n// around 100KB.  Large lists of principals can be represented concisely using\n// blessings.\n//\n// There is some ambiguity when calling SetPermissions on a mount point.\n// Does it affect the mount itself or does it affect the service endpoint\n// that the mount points to?  The chosen behavior is that it affects the\n// service endpoint.  To modify the mount point's Permissions, use\n// ResolveToMountTable to get an endpoint and call SetPermissions on that.\n// This means that clients must know when a name refers to a mount point to\n// change its Permissions.",
+    inArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetPermissions',
+    doc: "// GetPermissions returns the complete, current Permissions for an object. The\n// returned version can be passed to a subsequent call to SetPermissions for\n// optimistic concurrency control. A successful call to SetPermissions will\n// invalidate version, and the client must call GetPermissions again to get\n// the current version.",
+    inArgs: [],
+    outArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function App(){}
+module.exports.App = App;
+
+    
+      
+App.prototype.create = function(ctx, serverCall, perms) {
+  throw new Error('Method Create not implemented');
+};
+    
+      
+App.prototype.delete = function(ctx, serverCall) {
+  throw new Error('Method Delete not implemented');
+};
+    
+      
+App.prototype.exists = function(ctx, serverCall) {
+  throw new Error('Method Exists not implemented');
+};
+    
+      
+App.prototype.setPermissions = function(ctx, serverCall, perms, version) {
+  throw new Error('Method SetPermissions not implemented');
+};
+    
+      
+App.prototype.getPermissions = function(ctx, serverCall) {
+  throw new Error('Method GetPermissions not implemented');
+};
+     
+
+    
+App.prototype._serviceDescription = {
+  name: 'App',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase',
+  doc: "// App represents the data for a specific app instance (possibly a combination\n// of user, device, and app).\n// App.Glob operates over Database names.",
+  embeds: [{
+      name: 'Object',
+      pkgPath: 'v.io/v23/services/permissions',
+      doc: "// Object provides access control for Vanadium objects.\n//\n// Vanadium services implementing dynamic access control would typically embed\n// this interface and tag additional methods defined by the service with one of\n// Admin, Read, Write, Resolve etc. For example, the VDL definition of the\n// object would be:\n//\n//   package mypackage\n//\n//   import \"v.io/v23/security/access\"\n//   import \"v.io/v23/services/permissions\"\n//\n//   type MyObject interface {\n//     permissions.Object\n//     MyRead() (string, error) {access.Read}\n//     MyWrite(string) error    {access.Write}\n//   }\n//\n// If the set of pre-defined tags is insufficient, services may define their\n// own tag type and annotate all methods with this new type.\n//\n// Instead of embedding this Object interface, define SetPermissions and\n// GetPermissions in their own interface. Authorization policies will typically\n// respect annotations of a single type. For example, the VDL definition of an\n// object would be:\n//\n//  package mypackage\n//\n//  import \"v.io/v23/security/access\"\n//\n//  type MyTag string\n//\n//  const (\n//    Blue = MyTag(\"Blue\")\n//    Red  = MyTag(\"Red\")\n//  )\n//\n//  type MyObject interface {\n//    MyMethod() (string, error) {Blue}\n//\n//    // Allow clients to change access via the access.Object interface:\n//    SetPermissions(perms access.Permissions, version string) error         {Red}\n//    GetPermissions() (perms access.Permissions, version string, err error) {Blue}\n//  }"
+    },
+    ],
+  methods: [
+    
+      
+    {
+    name: 'Create',
+    doc: "// Create creates this App.\n// If perms is nil, we inherit (copy) the Service perms.\n// Create requires the caller to have Write permission at the Service.",
+    inArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Delete',
+    doc: "// Delete deletes this App.",
+    inArgs: [],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Exists',
+    doc: "// Exists returns true only if this App exists. Insufficient permissions\n// cause Exists to return false instead of an error.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.BOOL
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetPermissions',
+    doc: "// SetPermissions replaces the current Permissions for an object.  version\n// allows for optional, optimistic concurrency control.  If non-empty,\n// version's value must come from GetPermissions.  If any client has\n// successfully called SetPermissions in the meantime, the version will be\n// stale and SetPermissions will fail.  If empty, SetPermissions performs an\n// unconditional update.\n//\n// Permissions objects are expected to be small.  It is up to the\n// implementation to define the exact limit, though it should probably be\n// around 100KB.  Large lists of principals can be represented concisely using\n// blessings.\n//\n// There is some ambiguity when calling SetPermissions on a mount point.\n// Does it affect the mount itself or does it affect the service endpoint\n// that the mount points to?  The chosen behavior is that it affects the\n// service endpoint.  To modify the mount point's Permissions, use\n// ResolveToMountTable to get an endpoint and call SetPermissions on that.\n// This means that clients must know when a name refers to a mount point to\n// change its Permissions.",
+    inArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetPermissions',
+    doc: "// GetPermissions returns the complete, current Permissions for an object. The\n// returned version can be passed to a subsequent call to SetPermissions for\n// optimistic concurrency control. A successful call to SetPermissions will\n// invalidate version, and the client must call GetPermissions again to get\n// the current version.",
+    inArgs: [],
+    outArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/syncbase/v23/services/syncbase/nosql/index.js b/src/gen-vdl/v.io/syncbase/v23/services/syncbase/nosql/index.js
new file mode 100644
index 0000000..168e415
--- /dev/null
+++ b/src/gen-vdl/v.io/syncbase/v23/services/syncbase/nosql/index.js
@@ -0,0 +1,2051 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+var makeError = require('vanadium').verror.makeError;
+var actions = require('vanadium').verror.actions;
+var canonicalize = require('vanadium').vdl.canonicalize;
+
+
+
+
+
+var access = require('./../../../../../v23/security/access');
+var permissions = require('./../../../../../v23/services/permissions');
+var watch = require('./../../../../../v23/services/watch');
+
+module.exports = {};
+
+
+
+// Types:
+var _type1 = new vdl.Type();
+var _type2 = new vdl.Type();
+var _type3 = new vdl.Type();
+var _type4 = new vdl.Type();
+var _type5 = new vdl.Type();
+var _type6 = new vdl.Type();
+var _type7 = new vdl.Type();
+var _typeBatchOptions = new vdl.Type();
+var _typeBlobFetchState = new vdl.Type();
+var _typeBlobFetchStatus = new vdl.Type();
+var _typeBlobRef = new vdl.Type();
+var _typeCrPolicy = new vdl.Type();
+var _typeCrRule = new vdl.Type();
+var _typeKeyValue = new vdl.Type();
+var _typePrefixPermissions = new vdl.Type();
+var _typeResolverType = new vdl.Type();
+var _typeSchemaMetadata = new vdl.Type();
+var _typeStoreChange = new vdl.Type();
+var _typeSyncGroupMemberInfo = new vdl.Type();
+var _typeSyncGroupSpec = new vdl.Type();
+_type1.kind = vdl.kind.LIST;
+_type1.name = "";
+_type1.elem = vdl.types.STRING;
+_type2.kind = vdl.kind.MAP;
+_type2.name = "";
+_type2.elem = _typeSyncGroupMemberInfo;
+_type2.key = vdl.types.STRING;
+_type3.kind = vdl.kind.LIST;
+_type3.name = "";
+_type3.elem = vdl.types.BYTE;
+_type4.kind = vdl.kind.LIST;
+_type4.name = "";
+_type4.elem = _typeCrRule;
+_type5.kind = vdl.kind.OPTIONAL;
+_type5.name = "";
+_type5.elem = _typeSchemaMetadata;
+_type6.kind = vdl.kind.LIST;
+_type6.name = "";
+_type6.elem = vdl.types.ANY;
+_type7.kind = vdl.kind.LIST;
+_type7.name = "";
+_type7.elem = _typePrefixPermissions;
+_typeBatchOptions.kind = vdl.kind.STRUCT;
+_typeBatchOptions.name = "v.io/syncbase/v23/services/syncbase/nosql.BatchOptions";
+_typeBatchOptions.fields = [{name: "Hint", type: vdl.types.STRING}, {name: "ReadOnly", type: vdl.types.BOOL}];
+_typeBlobFetchState.kind = vdl.kind.ENUM;
+_typeBlobFetchState.name = "v.io/syncbase/v23/services/syncbase/nosql.BlobFetchState";
+_typeBlobFetchState.labels = ["Pending", "Locating", "Fetching", "Done"];
+_typeBlobFetchStatus.kind = vdl.kind.STRUCT;
+_typeBlobFetchStatus.name = "v.io/syncbase/v23/services/syncbase/nosql.BlobFetchStatus";
+_typeBlobFetchStatus.fields = [{name: "State", type: _typeBlobFetchState}, {name: "Received", type: vdl.types.INT64}, {name: "Total", type: vdl.types.INT64}];
+_typeBlobRef.kind = vdl.kind.STRING;
+_typeBlobRef.name = "v.io/syncbase/v23/services/syncbase/nosql.BlobRef";
+_typeCrPolicy.kind = vdl.kind.STRUCT;
+_typeCrPolicy.name = "v.io/syncbase/v23/services/syncbase/nosql.CrPolicy";
+_typeCrPolicy.fields = [{name: "Rules", type: _type4}];
+_typeCrRule.kind = vdl.kind.STRUCT;
+_typeCrRule.name = "v.io/syncbase/v23/services/syncbase/nosql.CrRule";
+_typeCrRule.fields = [{name: "TableName", type: vdl.types.STRING}, {name: "KeyPrefix", type: vdl.types.STRING}, {name: "Type", type: vdl.types.STRING}, {name: "Resolver", type: _typeResolverType}];
+_typeKeyValue.kind = vdl.kind.STRUCT;
+_typeKeyValue.name = "v.io/syncbase/v23/services/syncbase/nosql.KeyValue";
+_typeKeyValue.fields = [{name: "Key", type: vdl.types.STRING}, {name: "Value", type: _type3}];
+_typePrefixPermissions.kind = vdl.kind.STRUCT;
+_typePrefixPermissions.name = "v.io/syncbase/v23/services/syncbase/nosql.PrefixPermissions";
+_typePrefixPermissions.fields = [{name: "Prefix", type: vdl.types.STRING}, {name: "Perms", type: new access.Permissions()._type}];
+_typeResolverType.kind = vdl.kind.ENUM;
+_typeResolverType.name = "v.io/syncbase/v23/services/syncbase/nosql.ResolverType";
+_typeResolverType.labels = ["LastWins", "AppResolves", "Defer"];
+_typeSchemaMetadata.kind = vdl.kind.STRUCT;
+_typeSchemaMetadata.name = "v.io/syncbase/v23/services/syncbase/nosql.SchemaMetadata";
+_typeSchemaMetadata.fields = [{name: "Version", type: vdl.types.INT32}, {name: "Policy", type: _typeCrPolicy}];
+_typeStoreChange.kind = vdl.kind.STRUCT;
+_typeStoreChange.name = "v.io/syncbase/v23/services/syncbase/nosql.StoreChange";
+_typeStoreChange.fields = [{name: "Value", type: _type3}, {name: "FromSync", type: vdl.types.BOOL}];
+_typeSyncGroupMemberInfo.kind = vdl.kind.STRUCT;
+_typeSyncGroupMemberInfo.name = "v.io/syncbase/v23/services/syncbase/nosql.SyncGroupMemberInfo";
+_typeSyncGroupMemberInfo.fields = [{name: "SyncPriority", type: vdl.types.BYTE}];
+_typeSyncGroupSpec.kind = vdl.kind.STRUCT;
+_typeSyncGroupSpec.name = "v.io/syncbase/v23/services/syncbase/nosql.SyncGroupSpec";
+_typeSyncGroupSpec.fields = [{name: "Description", type: vdl.types.STRING}, {name: "Perms", type: new access.Permissions()._type}, {name: "Prefixes", type: _type1}, {name: "MountTables", type: _type1}, {name: "IsPrivate", type: vdl.types.BOOL}];
+_type1.freeze();
+_type2.freeze();
+_type3.freeze();
+_type4.freeze();
+_type5.freeze();
+_type6.freeze();
+_type7.freeze();
+_typeBatchOptions.freeze();
+_typeBlobFetchState.freeze();
+_typeBlobFetchStatus.freeze();
+_typeBlobRef.freeze();
+_typeCrPolicy.freeze();
+_typeCrRule.freeze();
+_typeKeyValue.freeze();
+_typePrefixPermissions.freeze();
+_typeResolverType.freeze();
+_typeSchemaMetadata.freeze();
+_typeStoreChange.freeze();
+_typeSyncGroupMemberInfo.freeze();
+_typeSyncGroupSpec.freeze();
+module.exports.BatchOptions = (vdl.registry.lookupOrCreateConstructor(_typeBatchOptions));
+module.exports.BlobFetchState = {
+  PENDING: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlobFetchState))('Pending', true), _typeBlobFetchState),
+  LOCATING: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlobFetchState))('Locating', true), _typeBlobFetchState),
+  FETCHING: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlobFetchState))('Fetching', true), _typeBlobFetchState),
+  DONE: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlobFetchState))('Done', true), _typeBlobFetchState),
+};
+module.exports.BlobFetchStatus = (vdl.registry.lookupOrCreateConstructor(_typeBlobFetchStatus));
+module.exports.BlobRef = (vdl.registry.lookupOrCreateConstructor(_typeBlobRef));
+module.exports.CrPolicy = (vdl.registry.lookupOrCreateConstructor(_typeCrPolicy));
+module.exports.CrRule = (vdl.registry.lookupOrCreateConstructor(_typeCrRule));
+module.exports.KeyValue = (vdl.registry.lookupOrCreateConstructor(_typeKeyValue));
+module.exports.PrefixPermissions = (vdl.registry.lookupOrCreateConstructor(_typePrefixPermissions));
+module.exports.ResolverType = {
+  LAST_WINS: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeResolverType))('LastWins', true), _typeResolverType),
+  APP_RESOLVES: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeResolverType))('AppResolves', true), _typeResolverType),
+  DEFER: canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeResolverType))('Defer', true), _typeResolverType),
+};
+module.exports.SchemaMetadata = (vdl.registry.lookupOrCreateConstructor(_typeSchemaMetadata));
+module.exports.StoreChange = (vdl.registry.lookupOrCreateConstructor(_typeStoreChange));
+module.exports.SyncGroupMemberInfo = (vdl.registry.lookupOrCreateConstructor(_typeSyncGroupMemberInfo));
+module.exports.SyncGroupSpec = (vdl.registry.lookupOrCreateConstructor(_typeSyncGroupSpec));
+
+
+
+
+// Consts:
+
+  module.exports.NullBlobRef = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlobRef))("", true), _typeBlobRef);
+
+
+
+// Errors:
+
+module.exports.BoundToBatchError = makeError('v.io/syncbase/v23/services/syncbase/nosql.BoundToBatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} bound to batch',
+}, [
+]);
+
+
+module.exports.NotBoundToBatchError = makeError('v.io/syncbase/v23/services/syncbase/nosql.NotBoundToBatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} not bound to batch',
+}, [
+]);
+
+
+module.exports.ReadOnlyBatchError = makeError('v.io/syncbase/v23/services/syncbase/nosql.ReadOnlyBatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} batch is read-only',
+}, [
+]);
+
+
+module.exports.ConcurrentBatchError = makeError('v.io/syncbase/v23/services/syncbase/nosql.ConcurrentBatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} concurrent batch',
+}, [
+]);
+
+
+module.exports.SchemaVersionMismatchError = makeError('v.io/syncbase/v23/services/syncbase/nosql.SchemaVersionMismatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} actual schema version does not match the provided one',
+}, [
+]);
+
+
+module.exports.BlobNotCommittedError = makeError('v.io/syncbase/v23/services/syncbase/nosql.BlobNotCommitted', actions.NO_RETRY, {
+  'en': '{1:}{2:} blob is not yet committed',
+}, [
+]);
+
+
+
+
+// Services:
+
+  
+    
+function DatabaseWatcher(){}
+module.exports.DatabaseWatcher = DatabaseWatcher;
+
+    
+      
+DatabaseWatcher.prototype.getResumeMarker = function(ctx, serverCall) {
+  throw new Error('Method GetResumeMarker not implemented');
+};
+    
+      
+DatabaseWatcher.prototype.watchGlob = function(ctx, serverCall, req) {
+  throw new Error('Method WatchGlob not implemented');
+};
+     
+
+    
+DatabaseWatcher.prototype._serviceDescription = {
+  name: 'DatabaseWatcher',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// DatabaseWatcher allows a client to watch for updates in the database.\n// For each watched request, the client will receive a reliable stream of watch\n// events without re-ordering. See watch.GlobWatcher for a detailed explanation\n// of the behavior.\n// TODO(rogulenko): Currently the only supported watch patterns are\n// 'table/row*'. Consider changing that.\n//\n// The watching is done by starting a streaming RPC. The argument to the RPC\n// contains the ResumeMarker that points to a particular place in the database\n// event log. The result stream consists of a never-ending sequence of Change\n// messages (until the call fails or is canceled). Each Change contains the\n// Name field in the form \"<tableName>/<rowKey>\" and the Value field of the\n// StoreChange type. If the client has no access to a row specified in a change,\n// that change is excluded from the result stream.\n//\n// The DatabaseWatcher is designed to be used in the following way:\n// 1) begin a read-only batch\n// 2) read all information your app needs\n// 3) read the ResumeMarker\n// 4) abort the batch\n// 5) start watching changes to the data using the ResumeMarker\n// In this configuration the client doesn't miss any changes.",
+  embeds: [{
+      name: 'GlobWatcher',
+      pkgPath: 'v.io/v23/services/watch',
+      doc: "// GlobWatcher allows a client to receive updates for changes to objects\n// that match a pattern.  See the package comments for details."
+    },
+    ],
+  methods: [
+    
+      
+    {
+    name: 'GetResumeMarker',
+    doc: "// GetResumeMarker returns the ResumeMarker that points to the current end\n// of the event log. GetResumeMarker() can be called on a batch.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: new watch.ResumeMarker()._type
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'WatchGlob',
+    doc: "// WatchGlob returns a stream of changes that match a pattern.",
+    inArgs: [{
+      name: 'req',
+      doc: "",
+      type: new watch.GlobRequest()._type
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: new watch.Change()._type
+    },
+    tags: [canonicalize.reduce(new access.Tag("Resolve", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function SyncGroupManager(){}
+module.exports.SyncGroupManager = SyncGroupManager;
+
+    
+      
+SyncGroupManager.prototype.getSyncGroupNames = function(ctx, serverCall) {
+  throw new Error('Method GetSyncGroupNames not implemented');
+};
+    
+      
+SyncGroupManager.prototype.createSyncGroup = function(ctx, serverCall, sgName, spec, myInfo) {
+  throw new Error('Method CreateSyncGroup not implemented');
+};
+    
+      
+SyncGroupManager.prototype.joinSyncGroup = function(ctx, serverCall, sgName, myInfo) {
+  throw new Error('Method JoinSyncGroup not implemented');
+};
+    
+      
+SyncGroupManager.prototype.leaveSyncGroup = function(ctx, serverCall, sgName) {
+  throw new Error('Method LeaveSyncGroup not implemented');
+};
+    
+      
+SyncGroupManager.prototype.destroySyncGroup = function(ctx, serverCall, sgName) {
+  throw new Error('Method DestroySyncGroup not implemented');
+};
+    
+      
+SyncGroupManager.prototype.ejectFromSyncGroup = function(ctx, serverCall, sgName, member) {
+  throw new Error('Method EjectFromSyncGroup not implemented');
+};
+    
+      
+SyncGroupManager.prototype.getSyncGroupSpec = function(ctx, serverCall, sgName) {
+  throw new Error('Method GetSyncGroupSpec not implemented');
+};
+    
+      
+SyncGroupManager.prototype.setSyncGroupSpec = function(ctx, serverCall, sgName, spec, version) {
+  throw new Error('Method SetSyncGroupSpec not implemented');
+};
+    
+      
+SyncGroupManager.prototype.getSyncGroupMembers = function(ctx, serverCall, sgName) {
+  throw new Error('Method GetSyncGroupMembers not implemented');
+};
+     
+
+    
+SyncGroupManager.prototype._serviceDescription = {
+  name: 'SyncGroupManager',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// SyncGroupManager is the interface for SyncGroup operations.\n// TODO(hpucha): Add blessings to create/join and add a refresh method.",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'GetSyncGroupNames',
+    doc: "// GetSyncGroupNames returns the global names of all SyncGroups attached to\n// this database.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: _type1
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'CreateSyncGroup',
+    doc: "// CreateSyncGroup creates a new SyncGroup with the given spec.\n//\n// Requires: Client must have at least Read access on the Database; prefix ACL\n// must exist at each SyncGroup prefix; Client must have at least Read access\n// on each of these prefix ACLs.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    {
+      name: 'myInfo',
+      doc: "",
+      type: _typeSyncGroupMemberInfo
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'JoinSyncGroup',
+    doc: "// JoinSyncGroup joins the SyncGroup.\n//\n// Requires: Client must have at least Read access on the Database and on the\n// SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'myInfo',
+      doc: "",
+      type: _typeSyncGroupMemberInfo
+    },
+    ],
+    outArgs: [{
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'LeaveSyncGroup',
+    doc: "// LeaveSyncGroup leaves the SyncGroup. Previously synced data will continue\n// to be available.\n//\n// Requires: Client must have at least Read access on the Database.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'DestroySyncGroup',
+    doc: "// DestroySyncGroup destroys the SyncGroup. Previously synced data will\n// continue to be available to all members.\n//\n// Requires: Client must have at least Read access on the Database, and must\n// have Admin access on the SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'EjectFromSyncGroup',
+    doc: "// EjectFromSyncGroup ejects a member from the SyncGroup. The ejected member\n// will not be able to sync further, but will retain any data it has already\n// synced.\n//\n// Requires: Client must have at least Read access on the Database, and must\n// have Admin access on the SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'member',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetSyncGroupSpec',
+    doc: "// GetSyncGroupSpec gets the SyncGroup spec. version allows for atomic\n// read-modify-write of the spec - see comment for SetSyncGroupSpec.\n//\n// Requires: Client must have at least Read access on the Database and on the\n// SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [{
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetSyncGroupSpec',
+    doc: "// SetSyncGroupSpec sets the SyncGroup spec. version may be either empty or\n// the value from a previous Get. If not empty, Set will only succeed if the\n// current version matches the specified one.\n//\n// Requires: Client must have at least Read access on the Database, and must\n// have Admin access on the SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetSyncGroupMembers',
+    doc: "// GetSyncGroupMembers gets the info objects for members of the SyncGroup.\n//\n// Requires: Client must have at least Read access on the Database and on the\n// SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [{
+      name: 'members',
+      doc: "",
+      type: _type2
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function BlobManager(){}
+module.exports.BlobManager = BlobManager;
+
+    
+      
+BlobManager.prototype.createBlob = function(ctx, serverCall) {
+  throw new Error('Method CreateBlob not implemented');
+};
+    
+      
+BlobManager.prototype.putBlob = function(ctx, serverCall, br) {
+  throw new Error('Method PutBlob not implemented');
+};
+    
+      
+BlobManager.prototype.commitBlob = function(ctx, serverCall, br) {
+  throw new Error('Method CommitBlob not implemented');
+};
+    
+      
+BlobManager.prototype.getBlobSize = function(ctx, serverCall, br) {
+  throw new Error('Method GetBlobSize not implemented');
+};
+    
+      
+BlobManager.prototype.deleteBlob = function(ctx, serverCall, br) {
+  throw new Error('Method DeleteBlob not implemented');
+};
+    
+      
+BlobManager.prototype.getBlob = function(ctx, serverCall, br, offset) {
+  throw new Error('Method GetBlob not implemented');
+};
+    
+      
+BlobManager.prototype.fetchBlob = function(ctx, serverCall, br, priority) {
+  throw new Error('Method FetchBlob not implemented');
+};
+    
+      
+BlobManager.prototype.pinBlob = function(ctx, serverCall, br) {
+  throw new Error('Method PinBlob not implemented');
+};
+    
+      
+BlobManager.prototype.unpinBlob = function(ctx, serverCall, br) {
+  throw new Error('Method UnpinBlob not implemented');
+};
+    
+      
+BlobManager.prototype.keepBlob = function(ctx, serverCall, br, rank) {
+  throw new Error('Method KeepBlob not implemented');
+};
+     
+
+    
+BlobManager.prototype._serviceDescription = {
+  name: 'BlobManager',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// BlobManager is the interface for blob operations.",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'CreateBlob',
+    doc: "// API for resumable blob creation (append-only). After commit, a blob\n// is immutable. Before commit, the BlobRef can be used with PutBlob,\n// GetBlobSize, DeleteBlob, and CommitBlob.\n//\n// CreateBlob returns a BlobRef for a newly created blob.",
+    inArgs: [],
+    outArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'PutBlob',
+    doc: "// PutBlob appends the byte stream to the blob.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: {
+      name: '',
+      doc: '',
+      type: _type3
+    },
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'CommitBlob',
+    doc: "// CommitBlob marks the blob as immutable.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetBlobSize',
+    doc: "// GetBlobSize returns the count of bytes written as part of the blob\n// (committed or uncommitted).",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.INT64
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'DeleteBlob',
+    doc: "// DeleteBlob locally deletes the blob (committed or uncommitted).",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetBlob',
+    doc: "// GetBlob returns the byte stream from a committed blob starting at offset.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    {
+      name: 'offset',
+      doc: "",
+      type: vdl.types.INT64
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _type3
+    },
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'FetchBlob',
+    doc: "// FetchBlob initiates fetching a blob if not locally found. priority\n// controls the network priority of the blob. Higher priority blobs are\n// fetched before the lower priority ones. However an ongoing blob\n// transfer is not interrupted. Status updates are streamed back to the\n// client as fetch is in progress.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    {
+      name: 'priority',
+      doc: "",
+      type: vdl.types.UINT64
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _typeBlobFetchStatus
+    },
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'PinBlob',
+    doc: "// PinBlob locally pins the blob so that it is not evicted.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'UnpinBlob',
+    doc: "// UnpinBlob locally unpins the blob so that it can be evicted if needed.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'KeepBlob',
+    doc: "// KeepBlob locally caches the blob with the specified rank. Lower\n// ranked blobs are more eagerly evicted.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    {
+      name: 'rank',
+      doc: "",
+      type: vdl.types.UINT64
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function SchemaManager(){}
+module.exports.SchemaManager = SchemaManager;
+
+    
+      
+SchemaManager.prototype.getSchemaMetadata = function(ctx, serverCall) {
+  throw new Error('Method GetSchemaMetadata not implemented');
+};
+    
+      
+SchemaManager.prototype.setSchemaMetadata = function(ctx, serverCall, metadata) {
+  throw new Error('Method SetSchemaMetadata not implemented');
+};
+     
+
+    
+SchemaManager.prototype._serviceDescription = {
+  name: 'SchemaManager',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// SchemaManager implements the API for managing schema metadata attached\n// to a Database.",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'GetSchemaMetadata',
+    doc: "// GetSchemaMetadata retrieves schema metadata for this database.\n//\n// Requires: Client must have at least Read access on the Database.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: _typeSchemaMetadata
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetSchemaMetadata',
+    doc: "// SetSchemaMetadata stores schema metadata for this database.\n//\n// Requires: Client must have at least Write access on the Database.",
+    inArgs: [{
+      name: 'metadata',
+      doc: "",
+      type: _typeSchemaMetadata
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function Database(){}
+module.exports.Database = Database;
+
+    
+      
+Database.prototype.create = function(ctx, serverCall, metadata, perms) {
+  throw new Error('Method Create not implemented');
+};
+    
+      
+Database.prototype.delete = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Delete not implemented');
+};
+    
+      
+Database.prototype.exists = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Exists not implemented');
+};
+    
+      
+Database.prototype.exec = function(ctx, serverCall, schemaVersion, query) {
+  throw new Error('Method Exec not implemented');
+};
+    
+      
+Database.prototype.beginBatch = function(ctx, serverCall, schemaVersion, bo) {
+  throw new Error('Method BeginBatch not implemented');
+};
+    
+      
+Database.prototype.commit = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Commit not implemented');
+};
+    
+      
+Database.prototype.abort = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Abort not implemented');
+};
+    
+      
+Database.prototype.setPermissions = function(ctx, serverCall, perms, version) {
+  throw new Error('Method SetPermissions not implemented');
+};
+    
+      
+Database.prototype.getPermissions = function(ctx, serverCall) {
+  throw new Error('Method GetPermissions not implemented');
+};
+    
+      
+Database.prototype.getResumeMarker = function(ctx, serverCall) {
+  throw new Error('Method GetResumeMarker not implemented');
+};
+    
+      
+Database.prototype.watchGlob = function(ctx, serverCall, req) {
+  throw new Error('Method WatchGlob not implemented');
+};
+    
+      
+Database.prototype.getSyncGroupNames = function(ctx, serverCall) {
+  throw new Error('Method GetSyncGroupNames not implemented');
+};
+    
+      
+Database.prototype.createSyncGroup = function(ctx, serverCall, sgName, spec, myInfo) {
+  throw new Error('Method CreateSyncGroup not implemented');
+};
+    
+      
+Database.prototype.joinSyncGroup = function(ctx, serverCall, sgName, myInfo) {
+  throw new Error('Method JoinSyncGroup not implemented');
+};
+    
+      
+Database.prototype.leaveSyncGroup = function(ctx, serverCall, sgName) {
+  throw new Error('Method LeaveSyncGroup not implemented');
+};
+    
+      
+Database.prototype.destroySyncGroup = function(ctx, serverCall, sgName) {
+  throw new Error('Method DestroySyncGroup not implemented');
+};
+    
+      
+Database.prototype.ejectFromSyncGroup = function(ctx, serverCall, sgName, member) {
+  throw new Error('Method EjectFromSyncGroup not implemented');
+};
+    
+      
+Database.prototype.getSyncGroupSpec = function(ctx, serverCall, sgName) {
+  throw new Error('Method GetSyncGroupSpec not implemented');
+};
+    
+      
+Database.prototype.setSyncGroupSpec = function(ctx, serverCall, sgName, spec, version) {
+  throw new Error('Method SetSyncGroupSpec not implemented');
+};
+    
+      
+Database.prototype.getSyncGroupMembers = function(ctx, serverCall, sgName) {
+  throw new Error('Method GetSyncGroupMembers not implemented');
+};
+    
+      
+Database.prototype.createBlob = function(ctx, serverCall) {
+  throw new Error('Method CreateBlob not implemented');
+};
+    
+      
+Database.prototype.putBlob = function(ctx, serverCall, br) {
+  throw new Error('Method PutBlob not implemented');
+};
+    
+      
+Database.prototype.commitBlob = function(ctx, serverCall, br) {
+  throw new Error('Method CommitBlob not implemented');
+};
+    
+      
+Database.prototype.getBlobSize = function(ctx, serverCall, br) {
+  throw new Error('Method GetBlobSize not implemented');
+};
+    
+      
+Database.prototype.deleteBlob = function(ctx, serverCall, br) {
+  throw new Error('Method DeleteBlob not implemented');
+};
+    
+      
+Database.prototype.getBlob = function(ctx, serverCall, br, offset) {
+  throw new Error('Method GetBlob not implemented');
+};
+    
+      
+Database.prototype.fetchBlob = function(ctx, serverCall, br, priority) {
+  throw new Error('Method FetchBlob not implemented');
+};
+    
+      
+Database.prototype.pinBlob = function(ctx, serverCall, br) {
+  throw new Error('Method PinBlob not implemented');
+};
+    
+      
+Database.prototype.unpinBlob = function(ctx, serverCall, br) {
+  throw new Error('Method UnpinBlob not implemented');
+};
+    
+      
+Database.prototype.keepBlob = function(ctx, serverCall, br, rank) {
+  throw new Error('Method KeepBlob not implemented');
+};
+    
+      
+Database.prototype.getSchemaMetadata = function(ctx, serverCall) {
+  throw new Error('Method GetSchemaMetadata not implemented');
+};
+    
+      
+Database.prototype.setSchemaMetadata = function(ctx, serverCall, metadata) {
+  throw new Error('Method SetSchemaMetadata not implemented');
+};
+     
+
+    
+Database.prototype._serviceDescription = {
+  name: 'Database',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// Database represents a collection of Tables. Batches, queries, sync, watch,\n// etc. all operate at the Database level.\n// Database.Glob operates over Table names.\n// Param schemaVersion is the version number that the client expects the database\n// to be at. To disable schema version checking, pass -1.\n//\n// TODO(sadovsky): Add Watch method.",
+  embeds: [{
+      name: 'Object',
+      pkgPath: 'v.io/v23/services/permissions',
+      doc: "// Object provides access control for Vanadium objects.\n//\n// Vanadium services implementing dynamic access control would typically embed\n// this interface and tag additional methods defined by the service with one of\n// Admin, Read, Write, Resolve etc. For example, the VDL definition of the\n// object would be:\n//\n//   package mypackage\n//\n//   import \"v.io/v23/security/access\"\n//   import \"v.io/v23/services/permissions\"\n//\n//   type MyObject interface {\n//     permissions.Object\n//     MyRead() (string, error) {access.Read}\n//     MyWrite(string) error    {access.Write}\n//   }\n//\n// If the set of pre-defined tags is insufficient, services may define their\n// own tag type and annotate all methods with this new type.\n//\n// Instead of embedding this Object interface, define SetPermissions and\n// GetPermissions in their own interface. Authorization policies will typically\n// respect annotations of a single type. For example, the VDL definition of an\n// object would be:\n//\n//  package mypackage\n//\n//  import \"v.io/v23/security/access\"\n//\n//  type MyTag string\n//\n//  const (\n//    Blue = MyTag(\"Blue\")\n//    Red  = MyTag(\"Red\")\n//  )\n//\n//  type MyObject interface {\n//    MyMethod() (string, error) {Blue}\n//\n//    // Allow clients to change access via the access.Object interface:\n//    SetPermissions(perms access.Permissions, version string) error         {Red}\n//    GetPermissions() (perms access.Permissions, version string, err error) {Blue}\n//  }"
+    },
+    {
+      name: 'DatabaseWatcher',
+      pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+      doc: "// DatabaseWatcher allows a client to watch for updates in the database.\n// For each watched request, the client will receive a reliable stream of watch\n// events without re-ordering. See watch.GlobWatcher for a detailed explanation\n// of the behavior.\n// TODO(rogulenko): Currently the only supported watch patterns are\n// 'table/row*'. Consider changing that.\n//\n// The watching is done by starting a streaming RPC. The argument to the RPC\n// contains the ResumeMarker that points to a particular place in the database\n// event log. The result stream consists of a never-ending sequence of Change\n// messages (until the call fails or is canceled). Each Change contains the\n// Name field in the form \"<tableName>/<rowKey>\" and the Value field of the\n// StoreChange type. If the client has no access to a row specified in a change,\n// that change is excluded from the result stream.\n//\n// The DatabaseWatcher is designed to be used in the following way:\n// 1) begin a read-only batch\n// 2) read all information your app needs\n// 3) read the ResumeMarker\n// 4) abort the batch\n// 5) start watching changes to the data using the ResumeMarker\n// In this configuration the client doesn't miss any changes."
+    },
+    {
+      name: 'SyncGroupManager',
+      pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+      doc: "// SyncGroupManager is the interface for SyncGroup operations.\n// TODO(hpucha): Add blessings to create/join and add a refresh method."
+    },
+    {
+      name: 'BlobManager',
+      pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+      doc: "// BlobManager is the interface for blob operations."
+    },
+    {
+      name: 'SchemaManager',
+      pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+      doc: "// SchemaManager implements the API for managing schema metadata attached\n// to a Database."
+    },
+    ],
+  methods: [
+    
+      
+    {
+    name: 'Create',
+    doc: "// Create creates this Database.\n// If perms is nil, we inherit (copy) the App perms.\n// Create requires the caller to have Write permission at the App.",
+    inArgs: [{
+      name: 'metadata',
+      doc: "",
+      type: _type5
+    },
+    {
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Delete',
+    doc: "// Delete deletes this Database.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Exists',
+    doc: "// Exists returns true only if this Database exists. Insufficient permissions\n// cause Exists to return false instead of an error.\n// TODO(ivanpi): Exists may fail with an error if higher levels of hierarchy\n// do not exist.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.BOOL
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Exec',
+    doc: "// Exec executes a syncQL query and returns all results as specified by in the\n// query's select clause. Concurrency semantics are documented in model.go.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'query',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _type6
+    },
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'BeginBatch',
+    doc: "// BeginBatch creates a new batch. It returns an App-relative name for a\n// Database handle bound to this batch. If this Database is already bound to a\n// batch, BeginBatch() will fail with ErrBoundToBatch. Concurrency semantics\n// are documented in model.go.\n// TODO(sadovsky): make BatchOptions optional",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'bo',
+      doc: "",
+      type: _typeBatchOptions
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Commit',
+    doc: "// Commit persists the pending changes to the database.\n// If this Database is not bound to a batch, Commit() will fail with\n// ErrNotBoundToBatch.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Abort',
+    doc: "// Abort notifies the server that any pending changes can be discarded.\n// It is not strictly required, but it may allow the server to release locks\n// or other resources sooner than if it was not called.\n// If this Database is not bound to a batch, Abort() will fail with\n// ErrNotBoundToBatch.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetPermissions',
+    doc: "// SetPermissions replaces the current Permissions for an object.  version\n// allows for optional, optimistic concurrency control.  If non-empty,\n// version's value must come from GetPermissions.  If any client has\n// successfully called SetPermissions in the meantime, the version will be\n// stale and SetPermissions will fail.  If empty, SetPermissions performs an\n// unconditional update.\n//\n// Permissions objects are expected to be small.  It is up to the\n// implementation to define the exact limit, though it should probably be\n// around 100KB.  Large lists of principals can be represented concisely using\n// blessings.\n//\n// There is some ambiguity when calling SetPermissions on a mount point.\n// Does it affect the mount itself or does it affect the service endpoint\n// that the mount points to?  The chosen behavior is that it affects the\n// service endpoint.  To modify the mount point's Permissions, use\n// ResolveToMountTable to get an endpoint and call SetPermissions on that.\n// This means that clients must know when a name refers to a mount point to\n// change its Permissions.",
+    inArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetPermissions',
+    doc: "// GetPermissions returns the complete, current Permissions for an object. The\n// returned version can be passed to a subsequent call to SetPermissions for\n// optimistic concurrency control. A successful call to SetPermissions will\n// invalidate version, and the client must call GetPermissions again to get\n// the current version.",
+    inArgs: [],
+    outArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetResumeMarker',
+    doc: "// GetResumeMarker returns the ResumeMarker that points to the current end\n// of the event log. GetResumeMarker() can be called on a batch.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: new watch.ResumeMarker()._type
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'WatchGlob',
+    doc: "// WatchGlob returns a stream of changes that match a pattern.",
+    inArgs: [{
+      name: 'req',
+      doc: "",
+      type: new watch.GlobRequest()._type
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: new watch.Change()._type
+    },
+    tags: [canonicalize.reduce(new access.Tag("Resolve", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetSyncGroupNames',
+    doc: "// GetSyncGroupNames returns the global names of all SyncGroups attached to\n// this database.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: _type1
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'CreateSyncGroup',
+    doc: "// CreateSyncGroup creates a new SyncGroup with the given spec.\n//\n// Requires: Client must have at least Read access on the Database; prefix ACL\n// must exist at each SyncGroup prefix; Client must have at least Read access\n// on each of these prefix ACLs.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    {
+      name: 'myInfo',
+      doc: "",
+      type: _typeSyncGroupMemberInfo
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'JoinSyncGroup',
+    doc: "// JoinSyncGroup joins the SyncGroup.\n//\n// Requires: Client must have at least Read access on the Database and on the\n// SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'myInfo',
+      doc: "",
+      type: _typeSyncGroupMemberInfo
+    },
+    ],
+    outArgs: [{
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'LeaveSyncGroup',
+    doc: "// LeaveSyncGroup leaves the SyncGroup. Previously synced data will continue\n// to be available.\n//\n// Requires: Client must have at least Read access on the Database.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'DestroySyncGroup',
+    doc: "// DestroySyncGroup destroys the SyncGroup. Previously synced data will\n// continue to be available to all members.\n//\n// Requires: Client must have at least Read access on the Database, and must\n// have Admin access on the SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'EjectFromSyncGroup',
+    doc: "// EjectFromSyncGroup ejects a member from the SyncGroup. The ejected member\n// will not be able to sync further, but will retain any data it has already\n// synced.\n//\n// Requires: Client must have at least Read access on the Database, and must\n// have Admin access on the SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'member',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetSyncGroupSpec',
+    doc: "// GetSyncGroupSpec gets the SyncGroup spec. version allows for atomic\n// read-modify-write of the spec - see comment for SetSyncGroupSpec.\n//\n// Requires: Client must have at least Read access on the Database and on the\n// SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [{
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetSyncGroupSpec',
+    doc: "// SetSyncGroupSpec sets the SyncGroup spec. version may be either empty or\n// the value from a previous Get. If not empty, Set will only succeed if the\n// current version matches the specified one.\n//\n// Requires: Client must have at least Read access on the Database, and must\n// have Admin access on the SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'spec',
+      doc: "",
+      type: _typeSyncGroupSpec
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetSyncGroupMembers',
+    doc: "// GetSyncGroupMembers gets the info objects for members of the SyncGroup.\n//\n// Requires: Client must have at least Read access on the Database and on the\n// SyncGroup ACL.",
+    inArgs: [{
+      name: 'sgName',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [{
+      name: 'members',
+      doc: "",
+      type: _type2
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'CreateBlob',
+    doc: "// API for resumable blob creation (append-only). After commit, a blob\n// is immutable. Before commit, the BlobRef can be used with PutBlob,\n// GetBlobSize, DeleteBlob, and CommitBlob.\n//\n// CreateBlob returns a BlobRef for a newly created blob.",
+    inArgs: [],
+    outArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'PutBlob',
+    doc: "// PutBlob appends the byte stream to the blob.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: {
+      name: '',
+      doc: '',
+      type: _type3
+    },
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'CommitBlob',
+    doc: "// CommitBlob marks the blob as immutable.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetBlobSize',
+    doc: "// GetBlobSize returns the count of bytes written as part of the blob\n// (committed or uncommitted).",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.INT64
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'DeleteBlob',
+    doc: "// DeleteBlob locally deletes the blob (committed or uncommitted).",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetBlob',
+    doc: "// GetBlob returns the byte stream from a committed blob starting at offset.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    {
+      name: 'offset',
+      doc: "",
+      type: vdl.types.INT64
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _type3
+    },
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'FetchBlob',
+    doc: "// FetchBlob initiates fetching a blob if not locally found. priority\n// controls the network priority of the blob. Higher priority blobs are\n// fetched before the lower priority ones. However an ongoing blob\n// transfer is not interrupted. Status updates are streamed back to the\n// client as fetch is in progress.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    {
+      name: 'priority',
+      doc: "",
+      type: vdl.types.UINT64
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _typeBlobFetchStatus
+    },
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'PinBlob',
+    doc: "// PinBlob locally pins the blob so that it is not evicted.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'UnpinBlob',
+    doc: "// UnpinBlob locally unpins the blob so that it can be evicted if needed.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'KeepBlob',
+    doc: "// KeepBlob locally caches the blob with the specified rank. Lower\n// ranked blobs are more eagerly evicted.",
+    inArgs: [{
+      name: 'br',
+      doc: "",
+      type: _typeBlobRef
+    },
+    {
+      name: 'rank',
+      doc: "",
+      type: vdl.types.UINT64
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetSchemaMetadata',
+    doc: "// GetSchemaMetadata retrieves schema metadata for this database.\n//\n// Requires: Client must have at least Read access on the Database.",
+    inArgs: [],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: _typeSchemaMetadata
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetSchemaMetadata',
+    doc: "// SetSchemaMetadata stores schema metadata for this database.\n//\n// Requires: Client must have at least Write access on the Database.",
+    inArgs: [{
+      name: 'metadata',
+      doc: "",
+      type: _typeSchemaMetadata
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function Table(){}
+module.exports.Table = Table;
+
+    
+      
+Table.prototype.create = function(ctx, serverCall, schemaVersion, perms) {
+  throw new Error('Method Create not implemented');
+};
+    
+      
+Table.prototype.delete = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Delete not implemented');
+};
+    
+      
+Table.prototype.exists = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Exists not implemented');
+};
+    
+      
+Table.prototype.deleteRowRange = function(ctx, serverCall, schemaVersion, start, limit) {
+  throw new Error('Method DeleteRowRange not implemented');
+};
+    
+      
+Table.prototype.scan = function(ctx, serverCall, schemaVersion, start, limit) {
+  throw new Error('Method Scan not implemented');
+};
+    
+      
+Table.prototype.getPermissions = function(ctx, serverCall, schemaVersion, key) {
+  throw new Error('Method GetPermissions not implemented');
+};
+    
+      
+Table.prototype.setPermissions = function(ctx, serverCall, schemaVersion, prefix, perms) {
+  throw new Error('Method SetPermissions not implemented');
+};
+    
+      
+Table.prototype.deletePermissions = function(ctx, serverCall, schemaVersion, prefix) {
+  throw new Error('Method DeletePermissions not implemented');
+};
+     
+
+    
+Table.prototype._serviceDescription = {
+  name: 'Table',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// Table represents a collection of Rows.\n// Table.Glob operates over the primary keys of Rows in the Table.\n// SchemaVersion is the version number that the client expects the database\n// to be at. To disable schema version checking, pass -1.",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'Create',
+    doc: "// Create creates this Table.\n// If perms is nil, we inherit (copy) the Database perms.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Delete',
+    doc: "// Delete deletes this Table.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Exists',
+    doc: "// Exists returns true only if this Table exists. Insufficient permissions\n// cause Exists to return false instead of an error.\n// TODO(ivanpi): Exists may fail with an error if higher levels of hierarchy\n// do not exist.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.BOOL
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'DeleteRowRange',
+    doc: "// Delete deletes all rows in the given half-open range [start, limit). If\n// limit is \"\", all rows with keys >= start are included.\n// TODO(sadovsky): Delete prefix perms fully covered by the row range?",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'start',
+      doc: "",
+      type: _type3
+    },
+    {
+      name: 'limit',
+      doc: "",
+      type: _type3
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Scan',
+    doc: "// Scan returns all rows in the given half-open range [start, limit). If limit\n// is \"\", all rows with keys >= start are included. Concurrency semantics are\n// documented in model.go.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'start',
+      doc: "",
+      type: _type3
+    },
+    {
+      name: 'limit',
+      doc: "",
+      type: _type3
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _typeKeyValue
+    },
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetPermissions',
+    doc: "// GetPermissions returns an array of (prefix, perms) pairs. The array is\n// sorted from longest prefix to shortest, so element zero is the one that\n// applies to the row with the given key. The last element is always the\n// prefix \"\" which represents the table's permissions -- the array will always\n// have at least one element.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'key',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: _type7
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'SetPermissions',
+    doc: "// SetPermissions sets the permissions for all current and future rows with\n// the given prefix. If the prefix overlaps with an existing prefix, the\n// longest prefix that matches a row applies. For example:\n//     SetPermissions(ctx, Prefix(\"a/b\"), perms1)\n//     SetPermissions(ctx, Prefix(\"a/b/c\"), perms2)\n// The permissions for row \"a/b/1\" are perms1, and the permissions for row\n// \"a/b/c/1\" are perms2.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'prefix',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    {
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'DeletePermissions',
+    doc: "// DeletePermissions deletes the permissions for the specified prefix. Any\n// rows covered by this prefix will use the next longest prefix's permissions\n// (see the array returned by GetPermissions).",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'prefix',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+  
+    
+function Row(){}
+module.exports.Row = Row;
+
+    
+      
+Row.prototype.exists = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Exists not implemented');
+};
+    
+      
+Row.prototype.get = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Get not implemented');
+};
+    
+      
+Row.prototype.put = function(ctx, serverCall, schemaVersion, value) {
+  throw new Error('Method Put not implemented');
+};
+    
+      
+Row.prototype.delete = function(ctx, serverCall, schemaVersion) {
+  throw new Error('Method Delete not implemented');
+};
+     
+
+    
+Row.prototype._serviceDescription = {
+  name: 'Row',
+  pkgPath: 'v.io/syncbase/v23/services/syncbase/nosql',
+  doc: "// Row represents a single row in a Table.\n// All access checks are performed against the most specific matching prefix\n// permissions in the Table.\n// SchemaVersion is the version number that the client expects the database\n// to be at. To disable schema version checking, pass -1.\n// NOTE(sadovsky): Currently we send []byte values over the wire for Get, Put,\n// and Scan. If there's a way to avoid encoding/decoding on the server side, we\n// can use vdl.Value everywhere without sacrificing performance.",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'Exists',
+    doc: "// Exists returns true only if this Row exists. Insufficient permissions\n// cause Exists to return false instead of an error.\n// TODO(ivanpi): Exists may fail with an error if higher levels of hierarchy\n// do not exist.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: vdl.types.BOOL
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Get',
+    doc: "// Get returns the value for this Row.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [{
+      name: '',
+      doc: "",
+      type: _type3
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Read", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Put',
+    doc: "// Put writes the given value for this Row.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    {
+      name: 'value',
+      doc: "",
+      type: _type3
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'Delete',
+    doc: "// Delete deletes this Row.",
+    inArgs: [{
+      name: 'schemaVersion',
+      doc: "",
+      type: vdl.types.INT32
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Write", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+   
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/v23/security/access/index.js b/src/gen-vdl/v.io/v23/security/access/index.js
new file mode 100644
index 0000000..a929339
--- /dev/null
+++ b/src/gen-vdl/v.io/v23/security/access/index.js
@@ -0,0 +1,118 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+var makeError = require('vanadium').verror.makeError;
+var actions = require('vanadium').verror.actions;
+var canonicalize = require('vanadium').vdl.canonicalize;
+
+
+
+
+
+var security = require('./..');
+
+module.exports = {};
+
+
+
+// Types:
+var _type1 = new vdl.Type();
+var _type2 = new vdl.Type();
+var _type3 = new vdl.Type();
+var _typeAccessList = new vdl.Type();
+var _typePermissions = new vdl.Type();
+var _typeTag = new vdl.Type();
+_type1.kind = vdl.kind.LIST;
+_type1.name = "";
+_type1.elem = new security.BlessingPattern()._type;
+_type2.kind = vdl.kind.LIST;
+_type2.name = "";
+_type2.elem = vdl.types.STRING;
+_type3.kind = vdl.kind.LIST;
+_type3.name = "";
+_type3.elem = new security.RejectedBlessing()._type;
+_typeAccessList.kind = vdl.kind.STRUCT;
+_typeAccessList.name = "v.io/v23/security/access.AccessList";
+_typeAccessList.fields = [{name: "In", type: _type1}, {name: "NotIn", type: _type2}];
+_typePermissions.kind = vdl.kind.MAP;
+_typePermissions.name = "v.io/v23/security/access.Permissions";
+_typePermissions.elem = _typeAccessList;
+_typePermissions.key = vdl.types.STRING;
+_typeTag.kind = vdl.kind.STRING;
+_typeTag.name = "v.io/v23/security/access.Tag";
+_type1.freeze();
+_type2.freeze();
+_type3.freeze();
+_typeAccessList.freeze();
+_typePermissions.freeze();
+_typeTag.freeze();
+module.exports.AccessList = (vdl.registry.lookupOrCreateConstructor(_typeAccessList));
+module.exports.Permissions = (vdl.registry.lookupOrCreateConstructor(_typePermissions));
+module.exports.Tag = (vdl.registry.lookupOrCreateConstructor(_typeTag));
+
+
+
+
+// Consts:
+
+  module.exports.Admin = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeTag))("Admin", true), _typeTag);
+
+  module.exports.Debug = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeTag))("Debug", true), _typeTag);
+
+  module.exports.Read = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeTag))("Read", true), _typeTag);
+
+  module.exports.Write = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeTag))("Write", true), _typeTag);
+
+  module.exports.Resolve = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeTag))("Resolve", true), _typeTag);
+
+
+
+// Errors:
+
+module.exports.TooBigError = makeError('v.io/v23/security/access.TooBig', actions.NO_RETRY, {
+  'en': '{1:}{2:} AccessList is too big',
+}, [
+]);
+
+
+module.exports.NoPermissionsError = makeError('v.io/v23/security/access.NoPermissions', actions.NO_RETRY, {
+  'en': '{1:}{2:} {3} does not have {5} access (rejected blessings: {4})',
+}, [
+  _type2,
+  _type3,
+  vdl.types.STRING,
+]);
+
+
+module.exports.AccessListMatchError = makeError('v.io/v23/security/access.AccessListMatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} {3} does not match the access list (rejected blessings: {4})',
+}, [
+  _type2,
+  _type3,
+]);
+
+
+module.exports.UnenforceablePatternsError = makeError('v.io/v23/security/access.UnenforceablePatterns', actions.NO_RETRY, {
+  'en': '{1:}{2:} AccessList contains the following invalid or unrecognized patterns in the In list: {3}',
+}, [
+  _type1,
+]);
+
+
+module.exports.InvalidOpenAccessListError = makeError('v.io/v23/security/access.InvalidOpenAccessList', actions.NO_RETRY, {
+  'en': '{1:}{2:} AccessList with the pattern ... in its In list must have no other patterns in the In or NotIn lists',
+}, [
+]);
+
+
+
+
+// Services:
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/v23/security/index.js b/src/gen-vdl/v.io/v23/security/index.js
new file mode 100644
index 0000000..de0e2eb
--- /dev/null
+++ b/src/gen-vdl/v.io/v23/security/index.js
@@ -0,0 +1,394 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+var makeError = require('vanadium').verror.makeError;
+var actions = require('vanadium').verror.actions;
+var canonicalize = require('vanadium').vdl.canonicalize;
+
+
+
+
+
+var time = require('./../vdlroot/time');
+var uniqueid = require('./../uniqueid');
+
+module.exports = {};
+
+
+
+// Types:
+var _type1 = new vdl.Type();
+var _type2 = new vdl.Type();
+var _type3 = new vdl.Type();
+var _type4 = new vdl.Type();
+var _type5 = new vdl.Type();
+var _type6 = new vdl.Type();
+var _type7 = new vdl.Type();
+var _type8 = new vdl.Type();
+var _typeBlessingPattern = new vdl.Type();
+var _typeCaveat = new vdl.Type();
+var _typeCaveatDescriptor = new vdl.Type();
+var _typeCertificate = new vdl.Type();
+var _typeDischargeImpetus = new vdl.Type();
+var _typeHash = new vdl.Type();
+var _typeRejectedBlessing = new vdl.Type();
+var _typeSignature = new vdl.Type();
+var _typeThirdPartyRequirements = new vdl.Type();
+var _typeWireBlessings = new vdl.Type();
+var _typeWireDischarge = new vdl.Type();
+var _typenonce = new vdl.Type();
+var _typepublicKeyDischarge = new vdl.Type();
+var _typepublicKeyThirdPartyCaveatParam = new vdl.Type();
+_type1.kind = vdl.kind.LIST;
+_type1.name = "";
+_type1.elem = _typeCaveat;
+_type2.kind = vdl.kind.LIST;
+_type2.name = "";
+_type2.elem = vdl.types.BYTE;
+_type3.kind = vdl.kind.LIST;
+_type3.name = "";
+_type3.elem = vdl.types.STRING;
+_type4.kind = vdl.kind.LIST;
+_type4.name = "";
+_type4.elem = _typeBlessingPattern;
+_type5.kind = vdl.kind.LIST;
+_type5.name = "";
+_type5.elem = vdl.types.ANY;
+_type6.kind = vdl.kind.LIST;
+_type6.name = "";
+_type6.elem = _type7;
+_type7.kind = vdl.kind.LIST;
+_type7.name = "";
+_type7.elem = _typeCertificate;
+_type8.kind = vdl.kind.LIST;
+_type8.name = "";
+_type8.elem = _typeRejectedBlessing;
+_typeBlessingPattern.kind = vdl.kind.STRING;
+_typeBlessingPattern.name = "v.io/v23/security.BlessingPattern";
+_typeCaveat.kind = vdl.kind.STRUCT;
+_typeCaveat.name = "v.io/v23/security.Caveat";
+_typeCaveat.fields = [{name: "Id", type: new uniqueid.Id()._type}, {name: "ParamVom", type: _type2}];
+_typeCaveatDescriptor.kind = vdl.kind.STRUCT;
+_typeCaveatDescriptor.name = "v.io/v23/security.CaveatDescriptor";
+_typeCaveatDescriptor.fields = [{name: "Id", type: new uniqueid.Id()._type}, {name: "ParamType", type: vdl.types.TYPEOBJECT}];
+_typeCertificate.kind = vdl.kind.STRUCT;
+_typeCertificate.name = "v.io/v23/security.Certificate";
+_typeCertificate.fields = [{name: "Extension", type: vdl.types.STRING}, {name: "PublicKey", type: _type2}, {name: "Caveats", type: _type1}, {name: "Signature", type: _typeSignature}];
+_typeDischargeImpetus.kind = vdl.kind.STRUCT;
+_typeDischargeImpetus.name = "v.io/v23/security.DischargeImpetus";
+_typeDischargeImpetus.fields = [{name: "Server", type: _type4}, {name: "Method", type: vdl.types.STRING}, {name: "Arguments", type: _type5}];
+_typeHash.kind = vdl.kind.STRING;
+_typeHash.name = "v.io/v23/security.Hash";
+_typeRejectedBlessing.kind = vdl.kind.STRUCT;
+_typeRejectedBlessing.name = "v.io/v23/security.RejectedBlessing";
+_typeRejectedBlessing.fields = [{name: "Blessing", type: vdl.types.STRING}, {name: "Err", type: vdl.types.ERROR}];
+_typeSignature.kind = vdl.kind.STRUCT;
+_typeSignature.name = "v.io/v23/security.Signature";
+_typeSignature.fields = [{name: "Purpose", type: _type2}, {name: "Hash", type: _typeHash}, {name: "R", type: _type2}, {name: "S", type: _type2}];
+_typeThirdPartyRequirements.kind = vdl.kind.STRUCT;
+_typeThirdPartyRequirements.name = "v.io/v23/security.ThirdPartyRequirements";
+_typeThirdPartyRequirements.fields = [{name: "ReportServer", type: vdl.types.BOOL}, {name: "ReportMethod", type: vdl.types.BOOL}, {name: "ReportArguments", type: vdl.types.BOOL}];
+_typeWireBlessings.kind = vdl.kind.STRUCT;
+_typeWireBlessings.name = "v.io/v23/security.WireBlessings";
+_typeWireBlessings.fields = [{name: "CertificateChains", type: _type6}];
+_typeWireDischarge.kind = vdl.kind.UNION;
+_typeWireDischarge.name = "v.io/v23/security.WireDischarge";
+_typeWireDischarge.fields = [{name: "PublicKey", type: _typepublicKeyDischarge}];
+_typenonce.kind = vdl.kind.ARRAY;
+_typenonce.name = "v.io/v23/security.nonce";
+_typenonce.len = 16;
+_typenonce.elem = vdl.types.BYTE;
+_typepublicKeyDischarge.kind = vdl.kind.STRUCT;
+_typepublicKeyDischarge.name = "v.io/v23/security.publicKeyDischarge";
+_typepublicKeyDischarge.fields = [{name: "ThirdPartyCaveatId", type: vdl.types.STRING}, {name: "Caveats", type: _type1}, {name: "Signature", type: _typeSignature}];
+_typepublicKeyThirdPartyCaveatParam.kind = vdl.kind.STRUCT;
+_typepublicKeyThirdPartyCaveatParam.name = "v.io/v23/security.publicKeyThirdPartyCaveatParam";
+_typepublicKeyThirdPartyCaveatParam.fields = [{name: "Nonce", type: _typenonce}, {name: "Caveats", type: _type1}, {name: "DischargerKey", type: _type2}, {name: "DischargerLocation", type: vdl.types.STRING}, {name: "DischargerRequirements", type: _typeThirdPartyRequirements}];
+_type1.freeze();
+_type2.freeze();
+_type3.freeze();
+_type4.freeze();
+_type5.freeze();
+_type6.freeze();
+_type7.freeze();
+_type8.freeze();
+_typeBlessingPattern.freeze();
+_typeCaveat.freeze();
+_typeCaveatDescriptor.freeze();
+_typeCertificate.freeze();
+_typeDischargeImpetus.freeze();
+_typeHash.freeze();
+_typeRejectedBlessing.freeze();
+_typeSignature.freeze();
+_typeThirdPartyRequirements.freeze();
+_typeWireBlessings.freeze();
+_typeWireDischarge.freeze();
+_typenonce.freeze();
+_typepublicKeyDischarge.freeze();
+_typepublicKeyThirdPartyCaveatParam.freeze();
+module.exports.BlessingPattern = (vdl.registry.lookupOrCreateConstructor(_typeBlessingPattern));
+module.exports.Caveat = (vdl.registry.lookupOrCreateConstructor(_typeCaveat));
+module.exports.CaveatDescriptor = (vdl.registry.lookupOrCreateConstructor(_typeCaveatDescriptor));
+module.exports.Certificate = (vdl.registry.lookupOrCreateConstructor(_typeCertificate));
+module.exports.DischargeImpetus = (vdl.registry.lookupOrCreateConstructor(_typeDischargeImpetus));
+module.exports.Hash = (vdl.registry.lookupOrCreateConstructor(_typeHash));
+module.exports.RejectedBlessing = (vdl.registry.lookupOrCreateConstructor(_typeRejectedBlessing));
+module.exports.Signature = (vdl.registry.lookupOrCreateConstructor(_typeSignature));
+module.exports.ThirdPartyRequirements = (vdl.registry.lookupOrCreateConstructor(_typeThirdPartyRequirements));
+module.exports.WireBlessings = (vdl.registry.lookupOrCreateConstructor(_typeWireBlessings));
+module.exports.WireDischarge = (vdl.registry.lookupOrCreateConstructor(_typeWireDischarge));
+module.exports.nonce = (vdl.registry.lookupOrCreateConstructor(_typenonce));
+module.exports.publicKeyDischarge = (vdl.registry.lookupOrCreateConstructor(_typepublicKeyDischarge));
+module.exports.publicKeyThirdPartyCaveatParam = (vdl.registry.lookupOrCreateConstructor(_typepublicKeyThirdPartyCaveatParam));
+
+
+
+
+// Consts:
+
+  module.exports.ConstCaveat = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeCaveatDescriptor))({
+  'id': new Uint8Array([
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+]),
+  'paramType': vdl.types.BOOL,
+}, true), _typeCaveatDescriptor);
+
+  module.exports.ExpiryCaveat = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeCaveatDescriptor))({
+  'id': new Uint8Array([
+166,
+76,
+45,
+1,
+25,
+251,
+163,
+52,
+128,
+113,
+254,
+235,
+47,
+48,
+128,
+0,
+]),
+  'paramType': new time.Time()._type,
+}, true), _typeCaveatDescriptor);
+
+  module.exports.MethodCaveat = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeCaveatDescriptor))({
+  'id': new Uint8Array([
+84,
+166,
+118,
+57,
+129,
+55,
+24,
+126,
+205,
+178,
+109,
+45,
+105,
+186,
+0,
+3,
+]),
+  'paramType': _type3,
+}, true), _typeCaveatDescriptor);
+
+  module.exports.PublicKeyThirdPartyCaveat = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeCaveatDescriptor))({
+  'id': new Uint8Array([
+121,
+114,
+206,
+23,
+74,
+123,
+169,
+63,
+121,
+84,
+125,
+118,
+156,
+145,
+128,
+0,
+]),
+  'paramType': _typepublicKeyThirdPartyCaveatParam,
+}, true), _typeCaveatDescriptor);
+
+  module.exports.PeerBlessingsCaveat = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeCaveatDescriptor))({
+  'id': new Uint8Array([
+5,
+119,
+248,
+86,
+76,
+142,
+95,
+254,
+255,
+142,
+43,
+31,
+77,
+109,
+128,
+0,
+]),
+  'paramType': _type4,
+}, true), _typeCaveatDescriptor);
+
+  module.exports.NoExtension = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlessingPattern))("$", true), _typeBlessingPattern);
+
+  module.exports.AllPrincipals = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeBlessingPattern))("...", true), _typeBlessingPattern);
+
+  module.exports.ChainSeparator = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("/", true), vdl.types.STRING);
+
+  module.exports.SHA1Hash = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeHash))("SHA1", true), _typeHash);
+
+  module.exports.SHA256Hash = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeHash))("SHA256", true), _typeHash);
+
+  module.exports.SHA384Hash = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeHash))("SHA384", true), _typeHash);
+
+  module.exports.SHA512Hash = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(_typeHash))("SHA512", true), _typeHash);
+
+  module.exports.SignatureForMessageSigning = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("S1", true), vdl.types.STRING);
+
+  module.exports.SignatureForBlessingCertificates = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("B1", true), vdl.types.STRING);
+
+  module.exports.SignatureForDischarge = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("D1", true), vdl.types.STRING);
+
+  module.exports.SignatureForMessageSigningV0 = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("S", true), vdl.types.STRING);
+
+  module.exports.SignatureForBlessingCertificatesV0 = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("B", true), vdl.types.STRING);
+
+  module.exports.SignatureForDischargeV0 = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.STRING))("D", true), vdl.types.STRING);
+
+
+
+// Errors:
+
+module.exports.CaveatNotRegisteredError = makeError('v.io/v23/security.CaveatNotRegistered', actions.NO_RETRY, {
+  'en': '{1:}{2:} no validation function registered for caveat id {3}',
+}, [
+  new uniqueid.Id()._type,
+]);
+
+
+module.exports.CaveatParamAnyError = makeError('v.io/v23/security.CaveatParamAny', actions.NO_RETRY, {
+  'en': '{1:}{2:} caveat {3} uses illegal param type any',
+}, [
+  new uniqueid.Id()._type,
+]);
+
+
+module.exports.CaveatParamTypeMismatchError = makeError('v.io/v23/security.CaveatParamTypeMismatch', actions.NO_RETRY, {
+  'en': '{1:}{2:} bad param type: caveat {3} got {4}, want {5}',
+}, [
+  new uniqueid.Id()._type,
+  vdl.types.TYPEOBJECT,
+  vdl.types.TYPEOBJECT,
+]);
+
+
+module.exports.CaveatParamCodingError = makeError('v.io/v23/security.CaveatParamCoding', actions.NO_RETRY, {
+  'en': '{1:}{2:} unable to encode/decode caveat param(type={4}) for caveat {3}: {5}',
+}, [
+  new uniqueid.Id()._type,
+  vdl.types.TYPEOBJECT,
+  vdl.types.ERROR,
+]);
+
+
+module.exports.CaveatValidationError = makeError('v.io/v23/security.CaveatValidation', actions.NO_RETRY, {
+  'en': '{1:}{2:} caveat validation failed: {3}',
+}, [
+  vdl.types.ERROR,
+]);
+
+
+module.exports.ConstCaveatValidationError = makeError('v.io/v23/security.ConstCaveatValidation', actions.NO_RETRY, {
+  'en': '{1:}{2:} false const caveat always fails validation',
+}, [
+]);
+
+
+module.exports.ExpiryCaveatValidationError = makeError('v.io/v23/security.ExpiryCaveatValidation', actions.NO_RETRY, {
+  'en': '{1:}{2:} now({4}) is after expiry({3})',
+}, [
+  new time.Time()._type,
+  new time.Time()._type,
+]);
+
+
+module.exports.MethodCaveatValidationError = makeError('v.io/v23/security.MethodCaveatValidation', actions.NO_RETRY, {
+  'en': '{1:}{2:} method {3} not in list {4}',
+}, [
+  vdl.types.STRING,
+  _type3,
+]);
+
+
+module.exports.PeerBlessingsCaveatValidationError = makeError('v.io/v23/security.PeerBlessingsCaveatValidation', actions.NO_RETRY, {
+  'en': '{1:}{2:} patterns in peer blessings caveat {4} not matched by the peer {3}',
+}, [
+  _type3,
+  _type4,
+]);
+
+
+module.exports.UnrecognizedRootError = makeError('v.io/v23/security.UnrecognizedRoot', actions.NO_RETRY, {
+  'en': '{1:}{2:} unrecognized public key {3} in root certificate{:4}',
+}, [
+  vdl.types.STRING,
+  vdl.types.ERROR,
+]);
+
+
+module.exports.AuthorizationFailedError = makeError('v.io/v23/security.AuthorizationFailed', actions.NO_RETRY, {
+  'en': '{1:}{2:} principal with blessings {3} (rejected {4}) is not authorized by principal with blessings {5}',
+}, [
+  _type3,
+  _type8,
+  _type3,
+]);
+
+
+module.exports.InvalidSigningBlessingCaveatError = makeError('v.io/v23/security.InvalidSigningBlessingCaveat', actions.NO_RETRY, {
+  'en': '{1:}{2:} blessing has caveat with UUID {3} which makes it unsuitable for signing -- please use blessings with just Expiry caveats',
+}, [
+  new uniqueid.Id()._type,
+]);
+
+
+
+
+// Services:
+
+   
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/v23/services/permissions/index.js b/src/gen-vdl/v.io/v23/services/permissions/index.js
new file mode 100644
index 0000000..3befb3f
--- /dev/null
+++ b/src/gen-vdl/v.io/v23/services/permissions/index.js
@@ -0,0 +1,107 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+var canonicalize = require('vanadium').vdl.canonicalize;
+
+
+
+
+
+var access = require('./../../security/access');
+
+module.exports = {};
+
+
+
+// Types:
+
+
+
+
+// Consts:
+
+
+
+// Errors:
+
+
+
+// Services:
+
+  
+    
+function Object(){}
+module.exports.Object = Object;
+
+    
+      
+Object.prototype.setPermissions = function(ctx, serverCall, perms, version) {
+  throw new Error('Method SetPermissions not implemented');
+};
+    
+      
+Object.prototype.getPermissions = function(ctx, serverCall) {
+  throw new Error('Method GetPermissions not implemented');
+};
+     
+
+    
+Object.prototype._serviceDescription = {
+  name: 'Object',
+  pkgPath: 'v.io/v23/services/permissions',
+  doc: "// Object provides access control for Vanadium objects.\n//\n// Vanadium services implementing dynamic access control would typically embed\n// this interface and tag additional methods defined by the service with one of\n// Admin, Read, Write, Resolve etc. For example, the VDL definition of the\n// object would be:\n//\n//   package mypackage\n//\n//   import \"v.io/v23/security/access\"\n//   import \"v.io/v23/services/permissions\"\n//\n//   type MyObject interface {\n//     permissions.Object\n//     MyRead() (string, error) {access.Read}\n//     MyWrite(string) error    {access.Write}\n//   }\n//\n// If the set of pre-defined tags is insufficient, services may define their\n// own tag type and annotate all methods with this new type.\n//\n// Instead of embedding this Object interface, define SetPermissions and\n// GetPermissions in their own interface. Authorization policies will typically\n// respect annotations of a single type. For example, the VDL definition of an\n// object would be:\n//\n//  package mypackage\n//\n//  import \"v.io/v23/security/access\"\n//\n//  type MyTag string\n//\n//  const (\n//    Blue = MyTag(\"Blue\")\n//    Red  = MyTag(\"Red\")\n//  )\n//\n//  type MyObject interface {\n//    MyMethod() (string, error) {Blue}\n//\n//    // Allow clients to change access via the access.Object interface:\n//    SetPermissions(perms access.Permissions, version string) error         {Red}\n//    GetPermissions() (perms access.Permissions, version string, err error) {Blue}\n//  }",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'SetPermissions',
+    doc: "// SetPermissions replaces the current Permissions for an object.  version\n// allows for optional, optimistic concurrency control.  If non-empty,\n// version's value must come from GetPermissions.  If any client has\n// successfully called SetPermissions in the meantime, the version will be\n// stale and SetPermissions will fail.  If empty, SetPermissions performs an\n// unconditional update.\n//\n// Permissions objects are expected to be small.  It is up to the\n// implementation to define the exact limit, though it should probably be\n// around 100KB.  Large lists of principals can be represented concisely using\n// blessings.\n//\n// There is some ambiguity when calling SetPermissions on a mount point.\n// Does it affect the mount itself or does it affect the service endpoint\n// that the mount points to?  The chosen behavior is that it affects the\n// service endpoint.  To modify the mount point's Permissions, use\n// ResolveToMountTable to get an endpoint and call SetPermissions on that.\n// This means that clients must know when a name refers to a mount point to\n// change its Permissions.",
+    inArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+    
+      
+    {
+    name: 'GetPermissions',
+    doc: "// GetPermissions returns the complete, current Permissions for an object. The\n// returned version can be passed to a subsequent call to SetPermissions for\n// optimistic concurrency control. A successful call to SetPermissions will\n// invalidate version, and the client must call GetPermissions again to get\n// the current version.",
+    inArgs: [],
+    outArgs: [{
+      name: 'perms',
+      doc: "",
+      type: new access.Permissions()._type
+    },
+    {
+      name: 'version',
+      doc: "",
+      type: vdl.types.STRING
+    },
+    ],
+    inStream: null,
+    outStream: null,
+    tags: [canonicalize.reduce(new access.Tag("Admin", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/v23/services/watch/index.js b/src/gen-vdl/v.io/v23/services/watch/index.js
new file mode 100644
index 0000000..426a80c
--- /dev/null
+++ b/src/gen-vdl/v.io/v23/services/watch/index.js
@@ -0,0 +1,112 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+var makeError = require('vanadium').verror.makeError;
+var actions = require('vanadium').verror.actions;
+var canonicalize = require('vanadium').vdl.canonicalize;
+
+
+
+
+
+var access = require('./../../security/access');
+
+module.exports = {};
+
+
+
+// Types:
+var _typeChange = new vdl.Type();
+var _typeGlobRequest = new vdl.Type();
+var _typeResumeMarker = new vdl.Type();
+_typeChange.kind = vdl.kind.STRUCT;
+_typeChange.name = "v.io/v23/services/watch.Change";
+_typeChange.fields = [{name: "Name", type: vdl.types.STRING}, {name: "State", type: vdl.types.INT32}, {name: "Value", type: vdl.types.ANY}, {name: "ResumeMarker", type: _typeResumeMarker}, {name: "Continued", type: vdl.types.BOOL}];
+_typeGlobRequest.kind = vdl.kind.STRUCT;
+_typeGlobRequest.name = "v.io/v23/services/watch.GlobRequest";
+_typeGlobRequest.fields = [{name: "Pattern", type: vdl.types.STRING}, {name: "ResumeMarker", type: _typeResumeMarker}];
+_typeResumeMarker.kind = vdl.kind.LIST;
+_typeResumeMarker.name = "v.io/v23/services/watch.ResumeMarker";
+_typeResumeMarker.elem = vdl.types.BYTE;
+_typeChange.freeze();
+_typeGlobRequest.freeze();
+_typeResumeMarker.freeze();
+module.exports.Change = (vdl.registry.lookupOrCreateConstructor(_typeChange));
+module.exports.GlobRequest = (vdl.registry.lookupOrCreateConstructor(_typeGlobRequest));
+module.exports.ResumeMarker = (vdl.registry.lookupOrCreateConstructor(_typeResumeMarker));
+
+
+
+
+// Consts:
+
+  module.exports.Exists = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.INT32))(0, true), vdl.types.INT32);
+
+  module.exports.DoesNotExist = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.INT32))(1, true), vdl.types.INT32);
+
+  module.exports.InitialStateSkipped = canonicalize.reduce(new (vdl.registry.lookupOrCreateConstructor(vdl.types.INT32))(2, true), vdl.types.INT32);
+
+
+
+// Errors:
+
+module.exports.UnknownResumeMarkerError = makeError('v.io/v23/services/watch.UnknownResumeMarker', actions.NO_RETRY, {
+  'en': '{1:}{2:} unknown resume marker {_}',
+}, [
+]);
+
+
+
+
+// Services:
+
+  
+    
+function GlobWatcher(){}
+module.exports.GlobWatcher = GlobWatcher;
+
+    
+      
+GlobWatcher.prototype.watchGlob = function(ctx, serverCall, req) {
+  throw new Error('Method WatchGlob not implemented');
+};
+     
+
+    
+GlobWatcher.prototype._serviceDescription = {
+  name: 'GlobWatcher',
+  pkgPath: 'v.io/v23/services/watch',
+  doc: "// GlobWatcher allows a client to receive updates for changes to objects\n// that match a pattern.  See the package comments for details.",
+  embeds: [],
+  methods: [
+    
+      
+    {
+    name: 'WatchGlob',
+    doc: "// WatchGlob returns a stream of changes that match a pattern.",
+    inArgs: [{
+      name: 'req',
+      doc: "",
+      type: _typeGlobRequest
+    },
+    ],
+    outArgs: [],
+    inStream: null,
+    outStream: {
+      name: '',
+      doc: '',
+      type: _typeChange
+    },
+    tags: [canonicalize.reduce(new access.Tag("Resolve", true), new access.Tag()._type), ]
+  },
+     
+  ]
+};
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/v23/uniqueid/index.js b/src/gen-vdl/v.io/v23/uniqueid/index.js
new file mode 100644
index 0000000..84dc28f
--- /dev/null
+++ b/src/gen-vdl/v.io/v23/uniqueid/index.js
@@ -0,0 +1,42 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+
+
+
+
+
+
+module.exports = {};
+
+
+
+// Types:
+var _typeId = new vdl.Type();
+_typeId.kind = vdl.kind.ARRAY;
+_typeId.name = "v.io/v23/uniqueid.Id";
+_typeId.len = 16;
+_typeId.elem = vdl.types.BYTE;
+_typeId.freeze();
+module.exports.Id = (vdl.registry.lookupOrCreateConstructor(_typeId));
+
+
+
+
+// Consts:
+
+
+
+// Errors:
+
+
+
+// Services:
+
+   
+ 
+
+
diff --git a/src/gen-vdl/v.io/v23/vdlroot/time/index.js b/src/gen-vdl/v.io/v23/vdlroot/time/index.js
new file mode 100644
index 0000000..1ca8aac
--- /dev/null
+++ b/src/gen-vdl/v.io/v23/vdlroot/time/index.js
@@ -0,0 +1,53 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+var vdl = require('vanadium').vdl;
+
+
+
+
+
+
+module.exports = {};
+
+
+
+// Types:
+var _typeDuration = new vdl.Type();
+var _typeTime = new vdl.Type();
+var _typeWireDeadline = new vdl.Type();
+_typeDuration.kind = vdl.kind.STRUCT;
+_typeDuration.name = "time.Duration";
+_typeDuration.fields = [{name: "Seconds", type: vdl.types.INT64}, {name: "Nanos", type: vdl.types.INT32}];
+_typeTime.kind = vdl.kind.STRUCT;
+_typeTime.name = "time.Time";
+_typeTime.fields = [{name: "Seconds", type: vdl.types.INT64}, {name: "Nanos", type: vdl.types.INT32}];
+_typeWireDeadline.kind = vdl.kind.STRUCT;
+_typeWireDeadline.name = "time.WireDeadline";
+_typeWireDeadline.fields = [{name: "FromNow", type: _typeDuration}, {name: "NoDeadline", type: vdl.types.BOOL}];
+_typeDuration.freeze();
+_typeTime.freeze();
+_typeWireDeadline.freeze();
+module.exports.Duration = (vdl.registry.lookupOrCreateConstructor(_typeDuration));
+module.exports.Time = (vdl.registry.lookupOrCreateConstructor(_typeTime));
+module.exports.WireDeadline = (vdl.registry.lookupOrCreateConstructor(_typeWireDeadline));
+
+
+
+
+// Consts:
+
+
+
+// Errors:
+
+
+
+// Services:
+
+   
+ 
+
+
diff --git a/src/nosql/batch-database.js b/src/nosql/batch-database.js
new file mode 100644
index 0000000..2329dde
--- /dev/null
+++ b/src/nosql/batch-database.js
@@ -0,0 +1,85 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = BatchDatabase;
+
+/*
+ * A handle to a set of reads and writes to the database that should be
+ * considered an atomic unit. See beginBatch() for concurrency semantics.
+ *
+ * This constructor is private.  Use [database.beginBatch]{@link
+ * module:syncbase.nosql.Database.beginBatch} or [nosql.runInBatch]{@link
+ * module:syncbase.nosql~runInBatch} instead.
+ * @constructor
+ * @inner
+ * @param {module:syncbase.database.Database} db Database.
+ * @param {number} schemaVersion Database schema version expected by client.
+ */
+function BatchDatabase(db, schemaVersion) {
+  if (!(this instanceof BatchDatabase)) {
+    return new BatchDatabase(db, schemaVersion);
+  }
+
+  this.schemaVersion = schemaVersion;
+  Object.defineProperty(this, '_db', {
+    enumerable: false,
+    value: db,
+    writeable: false
+  });
+}
+
+/**
+ * Returns the Table with the given name.
+ * @param {string} relativeName Table name.  Must not contain slashes.
+ * @return {module:syncbase.table.Table} Table object.
+ */
+BatchDatabase.prototype.table = function(relativeName) {
+  return this._db.table(relativeName);
+};
+
+/**
+ * Returns a list of all Table names.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+BatchDatabase.prototype.listTables = function(ctx, cb) {
+  this._db.listTables(ctx, cb);
+};
+
+/**
+ * Persists the pending changes to the database.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+BatchDatabase.prototype.commit = function(ctx, cb) {
+  this._db._wire(ctx).commit(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Notifies the server that any pending changes can be discarded.  It is not
+ * strictly required, but it may allow the server to release locks or other
+ * resources sooner than if it was not called.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+BatchDatabase.prototype.abort = function(ctx, cb) {
+  this._db._wire(ctx).abort(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Executes a syncQL query.
+ *
+ * Returns a stream of rows.  The first row contains an array of headers (i.e.
+ * column names).  Subsequent rows contain an array of values for each row that
+ * matches the query.  The number of values returned in each row will match the
+ * size of the headers array.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} query Query string.
+ * @param {function} cb Callback.
+ * @returns {stream} Stream of rows.
+ */
+BatchDatabase.prototype.exec = function(ctx, query, cb) {
+  this._db.exec(ctx, query, cb);
+};
diff --git a/src/nosql/batch.js b/src/nosql/batch.js
new file mode 100644
index 0000000..157a21d
--- /dev/null
+++ b/src/nosql/batch.js
@@ -0,0 +1,62 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = runInBatch;
+
+/**
+ * @summary
+ * runInBatch runs a function with a newly created batch. If the function
+ * errors, the batch is aborted. If the function succeeds, the batch is
+ * committed.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.database.Database} db Database.
+ * @param {module:vanadium.syncbase.nosql.BatchOptions} opts BatchOptions.
+ * @param {module:syncbase.nosql~runInBatchFn} fn Function to run inside a
+ * batch.
+ * @param {module:vanadium~voidCb} cb Callback that will be called after the
+ * batch has been committed or aborted.
+ */
+function runInBatch(ctx, db, opts, fn, cb) {
+  function attempt(cb) {
+    db.beginBatch(ctx, opts, function(err, batchDb) {
+      if (err) {
+        return cb(err);
+      }
+      fn(batchDb, function(err) {
+        if (err) {
+          return batchDb.abort(ctx, function() {
+            return cb(err);  // return fn error, not abort error
+          });
+        }
+        // TODO(sadovsky): commit() can fail for a number of reasons, e.g. RPC
+        // failure or ErrConcurrentTransaction. Depending on the cause of
+        // failure, it may be desirable to retry the commit() and/or to call
+        // abort().
+        batchDb.commit(ctx, cb);
+      });
+    });
+  }
+
+  function retryLoop(i) {
+    attempt(function(err) {
+      if (err && i < 2) {
+        retryLoop(i + 1);
+      } else {
+        cb(err);
+      }
+    });
+  }
+
+  retryLoop(0);
+}
+
+/**
+ * A function that is run inside a batch by [runInBatch]{@link
+ * module:syncbase.nosql~runInBatch}.
+ * @callback module:syncbase.nosql~runInBatchFn
+ * @param {module:syncbase.batchDatabase.BatchDatabase} batch BatchDatabase.
+ * @param {object} service The stub object containing the exported
+ * methods of the remote service.
+ */
diff --git a/src/nosql/blob.js b/src/nosql/blob.js
new file mode 100644
index 0000000..c406165
--- /dev/null
+++ b/src/nosql/blob.js
@@ -0,0 +1,122 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = Blob;
+
+function Blob(db, blobRef) {
+  if (!(this instanceof Blob)) {
+    return new Blob(db, blobRef);
+  }
+
+  /**
+   * @private
+   */
+  Object.defineProperty(this, '_db', {
+    enumerable: false,
+    value: db,
+    writable: false
+  });
+
+  /**
+   * @property ref
+   * @type {string}
+   */
+  Object.defineProperty(this, 'ref', {
+    enumerable: true,
+    value: blobRef,
+    writable: false
+  });
+}
+
+/**
+ * Appends the byte stream to the blob.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ * @returns {Stream<Uint8Array>} Stream of bytes to append to blob.
+ */
+Blob.prototype.put = function(ctx, cb) {
+  return this._db._wire(ctx).putBlob(ctx, this.ref, cb).stream;
+};
+
+/**
+ * Marks the blob as immutable.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Blob.prototype.commit = function(ctx, cb) {
+  this._db._wire(ctx).commitBlob(ctx, this.ref, cb);
+};
+
+/**
+ * Gets the count of bytes written as part of the blob (committed or
+ * uncommitted).
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Blob.prototype.size = function(ctx, cb) {
+  this._db._wire(ctx).getBlobSize(ctx, this.ref, cb);
+};
+
+/**
+ * Locally deletes the blob (committed or uncommitted).
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Blob.prototype.delete = function(ctx, cb) {
+  this._db._wire(ctx).deleteBlob(ctx, this.ref, cb);
+};
+
+/**
+ * Returns the byte stream from a committed blob starting at offset.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {number} offset Offset in bytes.
+ * @param {function} cb Callback.
+ * @returns {Stream<Uint8Array>} Stream of blob bytes.
+ */
+Blob.prototype.get = function(ctx, offset, cb) {
+  return this._db._wire(ctx).getBlob(ctx, this.ref, offset, cb).stream;
+};
+
+/**
+ * Initiates fetching a blob if not locally found, priority controls the
+ * network priority of the blob.  Higher priority blobs are fetched before the
+ * lower priority ones.  However an ongoing blob transfer is not interrupted.
+ * Status updates are streamed back to the client as fetch is in progress.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {number} priority Priority.
+ * @param {function} cb Callback.
+ * @returns {Stream<BlobFetchStatus>} Stream of blob statuses.
+ */
+Blob.prototype.fetch = function(ctx, priority, cb) {
+  return this._db._wire(ctx).fetchBlob(ctx, this.ref, priority, cb).stream;
+};
+
+/**
+ * Locally pins the blob so that it is not evicted.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Blob.prototype.pin = function(ctx, cb) {
+  this._db._wire(ctx).pinBlob(ctx, this.ref, cb);
+};
+
+/**
+ * Locally unpins the blob so that it can be evicted if needed.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Blob.prototype.unpin = function(ctx, cb) {
+  this._db._wire(ctx).unpinBlob(ctx, this.ref, cb);
+};
+
+/**
+ * Locally caches the blob with the specified rank.  Lower ranked blobs are
+ * more eagerly evicted.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {number} rank Rank of blob.
+ * @param {function} cb Callback.
+ */
+Blob.prototype.keep = function(ctx, rank, cb) {
+  this._db._wire(ctx).keepBlob(ctx, this.ref, rank, cb);
+};
diff --git a/src/nosql/database.js b/src/nosql/database.js
new file mode 100644
index 0000000..ce2c35f
--- /dev/null
+++ b/src/nosql/database.js
@@ -0,0 +1,519 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = Database;
+
+var through2 = require('through2');
+var vanadium = require('vanadium');
+// TODO(nlacasse): We should put unwrap and other type util methods on
+// vanadium.vdl object.
+var unwrap = require('vanadium/src/vdl/type-util').unwrap;
+var verror = vanadium.verror;
+
+var nosqlVdl = require('../gen-vdl/v.io/syncbase/v23/services/syncbase/nosql');
+var watchVdl = require('../gen-vdl/v.io/v23/services/watch');
+
+var BatchDatabase = require('./batch-database');
+/* jshint -W079 */
+// Silence jshint's error about redefining 'Blob'.
+var Blob = require('./blob');
+/* jshint +W079 */
+var SyncGroup = require('./syncgroup');
+var Table = require('./table');
+var util = require('../util');
+var watch = require('./watch');
+
+/**
+ * Database represents a collection of Tables. Batches, queries, sync, watch,
+ * etc. all operate at the Database level.
+ * @constructor
+ * @param {string} parentFullName Full name of App which contains this
+ * Database.
+ * @param {string} relativeName Relative name of this Database.  Must not
+ * contain slashes.
+ * @param {module:syncbase.schema.Schema} schema Schema for the database.
+ */
+function Database(parentFullName, relativeName, schema) {
+  if (!(this instanceof Database)) {
+    return new Database(parentFullName, relativeName);
+  }
+
+  util.addNameProperties(this, parentFullName, relativeName);
+
+  Object.defineProperty(this, 'schema', {
+    enumerable: false,
+    value: schema,
+    writable: false
+  });
+
+  Object.defineProperty(this, 'schemaVersion', {
+    enumerable: false,
+    value: schema ? schema.metadata.version : -1,
+    writable: false
+  });
+
+  /**
+   * Caches the database wire object.
+   * @private
+   */
+  Object.defineProperty(this, '_wireObj', {
+    enumerable: false,
+    value: null,
+    writable: true
+  });
+}
+
+/**
+ * @private
+ */
+Database.prototype._wire = function(ctx) {
+  if (this._wireObj) {
+    return this._wireObj;
+  }
+  var client = vanadium.runtimeForContext(ctx).newClient();
+  var signature = [nosqlVdl.Database.prototype._serviceDescription];
+
+  this._wireObj = client.bindWithSignature(this.fullName, signature);
+  return this._wireObj;
+};
+
+/**
+ * Creates this Database.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:vanadium.security.access.Permissions} perms Permissions for
+ * the new database.  If perms is null, we inherit (copy) the App perms.
+ * @param {function} cb Callback.
+ */
+Database.prototype.create = function(ctx, perms, cb) {
+  var schemaMetadata = null;
+  if (this.schema) {
+    schemaMetadata = this.schema.metadata;
+  }
+  this._wire(ctx).create(ctx, schemaMetadata, perms, cb);
+};
+
+/**
+ * Deletes this Database.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.delete = function(ctx, cb) {
+  this._wire(ctx).delete(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Returns true only if this Database exists.
+ * Insufficient permissions cause exists to return false instead of an error.
+ * TODO(ivanpi): exists may fail with an error if higher levels of hierarchy
+ * do not exist.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.exists = function(ctx, cb) {
+  this._wire(ctx).exists(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Executes a syncQL query.
+ *
+ * Returns a stream of rows.  The first row contains an array of headers (i.e.
+ * column names).  Subsequent rows contain an array of values for each row that
+ * matches the query.  The number of values returned in each row will match the
+ * size of the headers array.
+ * Concurrency semantics: It is legal to perform writes concurrently with
+ * Exec. The returned stream reads from a consistent snapshot taken at the
+ * time of the RPC, and will not reflect subsequent writes to keys not yet
+ * reached by the stream.
+ *
+ * NOTE(nlacasse): The Go client library returns the headers seperately from
+ * the stream.  We could potentially do something similar in JavaScript, by
+ * pulling the headers off the stream and passing them to the callback.
+ * However, by Vanadium JS convention the callback gets called at the *end* of
+ * the RPC, so a developer would have to wait for the stream to finish before
+ * seeing what the headers are, which is not ideal.  We also cannot return the
+ * headers directly because reading from the stream is async.
+ *
+ * TODO(nlacasse): Syncbase queries don't work on values that were put without
+ * type information.  When JavaScript encodes values with no type infomation,
+ * it uses "vdl.Value" for the type.  Presumably, syncbase does not know how to
+ * decode such objects, so queries that involve inspecting the object or its
+ * type don't work.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} query Query string.
+ * @param {function} cb Callback.
+ * @returns {stream} Stream of rows.
+ */
+Database.prototype.exec = function(ctx, query, cb) {
+  var streamUnwrapper = through2({
+    objectMode: true
+  }, function(res, enc, cb) {
+    return cb(null, res.map(unwrap));
+  });
+
+  var stream = this._wire(ctx).exec(ctx, this.schemaVersion, query, cb).stream;
+
+  var decodedStream = stream.pipe(streamUnwrapper);
+  stream.on('error', function(err) {
+    decodedStream.emit('error', err);
+  });
+
+  return decodedStream;
+};
+
+/**
+ * Returns the Table with the given name.
+ * @param {string} relativeName Table name.  Must not contain slashes.
+ * @return {module:syncbase.table.Table} Table object.
+ */
+Database.prototype.table = function(relativeName) {
+  return new Table(this.fullName, relativeName, this.schemaVersion);
+};
+
+/**
+ * Returns a list of all Table names.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.listTables = function(ctx, cb) {
+  util.getChildNames(ctx, this.fullName, cb);
+};
+
+/**
+ * @private
+ */
+Database.prototype._tableWire = function(ctx, relativeName) {
+  if (relativeName.indexOf('/') >= 0) {
+    throw new Error('relativeName must not contain slashes.');
+  }
+
+  var client = vanadium.runtimeForContext(ctx).newClient();
+  var signature = [nosqlVdl.Table.prototype._serviceDescription];
+
+  var fullTableName = vanadium.naming.join(this.fullName, relativeName);
+  return client.bindWithSignature(fullTableName, signature);
+};
+
+// TODO(nlacasse): It's strange that we create a Database with:
+//   var db = new Database();
+//   db.create();
+// But we create a Table with:
+//   db.createTable();
+// The .delete method is similarly confusing.  db.delete deletes a database,
+// but table.delete deletes a row (or row range).
+// Consider puting all 'create' and 'delete' methods on the parent class for
+// consistency.
+// TODO(aghassemi): If we keep this, it should return "table" in the CB instead
+// of being void.
+/**
+ * Creates the specified Table.
+ * If perms is nil, we inherit (copy) the Database perms.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} relativeName Table name.  Must not contain slashes.
+ * @param {module:vanadium.security.access.Permissions} perms Permissions for
+ * the new database.  If perms is null, we inherit (copy) the Database perms.
+ * @param {function} cb Callback.
+ */
+Database.prototype.createTable = function(ctx, relativeName, perms, cb) {
+  this._tableWire(ctx, relativeName).create(ctx, this.schemaVersion, perms, cb);
+};
+
+/**
+ * Deletes the specified Table.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} relativeName Relative name of Table to delete.  Must not
+ * contain slashes.
+ * @param {function} cb Callback.
+ */
+Database.prototype.deleteTable = function(ctx, relativeName, cb) {
+  this._tableWire(ctx, relativeName).delete(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Watches for updates to the database. For each watch request, the client will
+ * receive a reliable stream of watch events without re-ordering.
+ *
+ * This method is designed to be used in the following way:
+ * 1) begin a read-only batch
+ * 2) read all information your app needs
+ * 3) read the ResumeMarker
+ * 4) abort the batch
+ * 5) start watching for changes to the data using the ResumeMarker
+ *
+ * In this configuration the client doesn't miss any changes.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} table Name of table to watch.
+ * @param {string} prefix Prefix of keys to watch.
+ * @param {module:syncbase.nosql.watch.ResumeMarker} resumeMarker ResumeMarker
+ * to resume watching from.
+ * @param {function} [cb] Optional callback that will be called after watch RPC
+ * finishes.
+ * @returns {stream} Stream of WatchChange objects.
+ */
+Database.prototype.watch = function(ctx, tableName, prefix, resumeMarker, cb) {
+  var globReq = new watchVdl.GlobRequest({
+    pattern: vanadium.naming.join(tableName, prefix + '*'),
+    resumeMarker: resumeMarker
+  });
+
+  var watchChangeEncoder = through2({
+    objectMode: true
+  }, function(change, enc, cb) {
+    var changeType;
+    switch (change.state) {
+      case watchVdl.Exists.val:
+        changeType = 'put';
+        break;
+      case watchVdl.DoesNotExist.val:
+        changeType = 'delete';
+        break;
+      default:
+        return cb(new Error('invalid change state ' + change.state));
+    }
+
+    var wc = new watch.WatchChange({
+      tableName: vanadium.naming.stripBasename(change.name),
+      rowName: vanadium.naming.basename(change.name),
+      changeType: changeType,
+      valueBytes: changeType === 'put' ? change.value.value : null,
+      resumeMarker: change.resumeMarker,
+      fromSync: change.value.fromSync,
+      continued: change.continued
+    });
+    return cb(null, wc);
+  });
+
+  var stream = this._wire(ctx).watchGlob(ctx, globReq, cb).stream;
+
+  var watchChangeStream = stream.pipe(watchChangeEncoder);
+  stream.on('error', function(err) {
+    watchChangeStream.emit('error', err);
+  });
+
+  return watchChangeStream;
+};
+
+/**
+ * Gets the ResumeMarker that points to the current end of the event log.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.getResumeMarker = function(ctx, cb) {
+  this._wire(ctx).getResumeMarker(ctx, cb);
+};
+
+/**
+ * Replaces the current Permissions for the Database.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:vanadium.security.access.Permissions} perms Permissions for
+ * the database.
+ * @param {string} version Version of the current Permissions object which will
+ * be overwritten. If empty, SetPermissions will perform an unconditional
+ * update.
+ * @param {function} cb Callback.
+ */
+Database.prototype.setPermissions = function(ctx, perms, version, cb) {
+  this._wire(ctx).setPermissions(ctx, perms, version, cb);
+};
+
+/**
+ * Returns the current Permissions for the Database.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.getPermissions = function(ctx, cb) {
+  this._wire(ctx).getPermissions(ctx, cb);
+};
+
+/**
+ * Creates a new batch. Instead of calling this function directly, clients are
+ * encouraged to use the RunInBatch() helper function, which detects "concurrent
+ * batch" errors and handles retries internally.
+ *
+ * Default concurrency semantics:
+ * - Reads (e.g. gets, scans) inside a batch operate over a consistent snapshot
+ *   taken during beginBatch(), and will see the effects of prior writes
+ *   performed inside the batch.
+ * - commit() may fail with errConcurrentBatch, indicating that after
+ *   beginBatch() but before commit(), some concurrent routine wrote to a key
+ *   that matches a key or row-range read inside this batch.
+ * - Other methods will never fail with error errConcurrentBatch, even if it is
+ *   known that commit() will fail with this error.
+ *
+ * Once a batch has been committed or aborted, subsequent method calls will
+ * fail with no effect.
+ *
+ * Concurrency semantics can be configured using BatchOptions.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:vanadium.syncbase.nosql.BatchOptions} opts BatchOptions.
+ * @param {function} cb Callback.
+ */
+Database.prototype.beginBatch = function(ctx, opts, cb) {
+  var self = this;
+  this._wire(ctx).beginBatch(ctx, this.schemaVersion, opts,
+    function(err, relativeName) {
+      if (err) {
+        return cb(err);
+      }
+
+      // The relativeName returned from the beginBatch() call above is different
+      // than the relativeName of the current database. We must create a new
+      // Database with this new relativeName, and then create a BatchDatabase
+      // from that new Database.
+      var db = new Database(self._parentFullName, relativeName);
+      return cb(null, new BatchDatabase(db));
+    });
+};
+
+/**
+ * Gets a handle to the SyncGroup with the given name.
+ *
+ * @param {string} name SyncGroup name.
+ */
+Database.prototype.syncGroup = function(name) {
+  return new SyncGroup(this, name);
+};
+
+/**
+ * Gets the global names of all SyncGroups attached to this database.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.getSyncGroupNames = function(ctx, cb) {
+  this._wire(ctx).getSyncGroupNames(ctx, cb);
+};
+
+/**
+ * Compares the current schema version of the database with the schema version
+ * provided while creating this database handle. If the current database schema
+ * version is lower, then schema.updater is called. If schema.updater is
+ * successful this method stores the new schema metadata in database.
+ *
+ * It is important not to access or modify the database until upgradeIfOutdated
+ * has called its callback.
+ *
+ * TODO(nlacasse): Consider locking the database in some way so that the
+ * upgrader function can access it, but all other attempts fail immediately
+ * with a helpful error.
+ *
+ * Note: schema can be nil, in which case this method skips schema check and
+ * the caller is responsible for maintaining schema sanity.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype.upgradeIfOutdated = function(ctx, cb) {
+  var self = this;
+  if (!self.schema) {
+    return process.nextTick(function() {
+      cb(new verror.BadStateError(ctx,
+          'schema or schema.metadata cannot be nil.  ' +
+          'A valid schema needs to be used when creating Database handle.'));
+    });
+  }
+
+  if (self.schema.metadata.version < 0) {
+    return process.nextTick(function() {
+      cb(new verror.BadStateError(ctx,
+          'schema version cannot be less than zero'));
+    });
+  }
+
+  self._getSchemaMetadata(ctx, function(err, currMeta) {
+    if (err) {
+      if (!(err instanceof verror.NoExistError)) {
+        return cb(err);
+      }
+
+      // If the client app did not set a schema as part of create db
+      // getSchemaMetadata() will return a NoExistError. If so we set the
+      // schema here.
+      self._setSchemaMetadata(ctx, self.schema.metadata, function(err) {
+
+        // The database may not yet exist. If so above call will return
+        // NoExistError and we return db without error. If the error is
+        // different then return the error to the caller.
+        if (err && !(err instanceof verror.NoExistError)) {
+          return cb(err);
+        }
+        return cb(null, false);
+      });
+
+      return;
+    }
+
+    if (currMeta.version >= self.schema.metadata.version) {
+      return cb(null, false);
+    }
+
+    // Call the Upgrader provided by the app to upgrade the schema.
+    //
+    // TODO(nlacasse,jlodhia): disable sync before running Upgrader and
+    // reenable once Upgrader is finished.
+    //
+    // TODO(nlacasse,jlodhia): prevent other processes (local/remote) from
+    // accessing the database while upgrade is in progress.
+    self.schema.upgrader(self, currMeta.version, self.schema.metadata.version,
+        function(err) {
+      if (err) {
+        return cb(err);
+      }
+
+      // Update the schema metadata in db to the latest version.
+      self._setSchemaMetadata(ctx, self.schema.metadata, function(err) {
+        if (err) {
+          return cb(err);
+        }
+        cb(null, true);
+      });
+    });
+  });
+};
+
+/**
+ * Retrieves the schema metadata for the database.
+ * @private
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Database.prototype._getSchemaMetadata = function(ctx, cb) {
+  return this._wire(ctx).getSchemaMetadata(ctx, cb);
+};
+
+/**
+ * Stores the schema metadata for the database.
+ * @private
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.schema.SchemaMetadata} metadata Schema metadata.
+ * @param {function} cb Callback.
+ */
+Database.prototype._setSchemaMetadata = function(ctx, metadata, cb) {
+  return this._wire(ctx).setSchemaMetadata(ctx, metadata, cb);
+};
+
+/**
+ * Returns a handle to the blob with the given blobRef.
+ * @param {module:syncbase.nosql.BlobRef} blobRef BlobRef of blob to get.
+ *
+ */
+Database.prototype.blob = function(blobRef) {
+  return new Blob(this, blobRef);
+};
+
+/**
+ * Creates a new blob.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ *
+ */
+Database.prototype.createBlob = function(ctx, cb) {
+  var self = this;
+  this._wire(ctx).createBlob(ctx, function(err, blobRef) {
+    if (err) {
+      return cb(err);
+    }
+    return cb(null, new Blob(self, blobRef));
+  });
+};
+
diff --git a/src/nosql/index.js b/src/nosql/index.js
new file mode 100644
index 0000000..469e1ed
--- /dev/null
+++ b/src/nosql/index.js
@@ -0,0 +1,30 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var rowrange = require('./rowrange');
+var runInBatch = require('./batch');
+var Schema = require('./schema');
+var vdl = require('../gen-vdl/v.io/syncbase/v23/services/syncbase/nosql');
+var watch = require('./watch');
+
+/**
+ * @summary
+ * Defines the client API for the NoSQL part of Syncbase.
+ * @namespace
+ * @name nosql
+ * @memberof module:syncbase
+ */
+module.exports = {
+  BatchOptions: vdl.BatchOptions,
+  BlobRef: vdl.BlobRef,
+  ReadOnlyBatchError: vdl.ReadOnlyBatchError,
+  ResumeMarker: watch.ResumeMarker,
+  rowrange: rowrange,
+  runInBatch: runInBatch,
+  Schema: Schema,
+  SchemaMetadata: vdl.SchemaMetadata,
+  SyncGroupMemberInfo: vdl.SyncGroupMemberInfo,
+  SyncGroupSpec: vdl.SyncGroupSpec,
+  WatchChange: watch.WatchChange
+};
diff --git a/src/nosql/row.js b/src/nosql/row.js
new file mode 100644
index 0000000..ae679b1
--- /dev/null
+++ b/src/nosql/row.js
@@ -0,0 +1,136 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var vanadium = require('vanadium');
+
+var nosqlVdl = require('../gen-vdl/v.io/syncbase/v23/services/syncbase/nosql');
+
+module.exports = Row;
+
+/**
+ * @summary
+ * Represents a single row in a Table.
+ * Private constructor, use table.row() to get an instance.
+ * @param {number} schemaVersion Database schema version expected by client.
+ * @inner
+ * @constructor
+ * @memberof module:syncbase.nosql
+ */
+function Row(parentFullName, key, schemaVersion) {
+  if (!(this instanceof Row)) {
+    return new Row(parentFullName, key, schemaVersion);
+  }
+
+  // TODO(aghassemi) We may need to escape the key. Align with Go implementation
+  // when that decision is made.
+  // Also for Database and Table, we throw error if name has a slash.
+  // Should they all behave the same or is row key really different?
+  var fullName = vanadium.naming.join(parentFullName, key);
+
+  this.schemaVersion = schemaVersion;
+  /**
+   * The key of this Row.
+   * @property name
+   * @type {string}
+   */
+  Object.defineProperty(this, 'key', {
+    value: key,
+    writable: false,
+    enumerable: true
+  });
+
+  /**
+   * @property name
+   * @type {string}
+   */
+  Object.defineProperty(this, 'fullName', {
+    value: fullName,
+    writable: false,
+    enumerable: true
+  });
+
+  /**
+   * Caches the table wire object.
+   * @private
+   */
+  Object.defineProperty(this, '_wireObj', {
+    enumerable: false,
+    value: null,
+    writable: true
+  });
+}
+
+/**
+ * @private
+ */
+Row.prototype._wire = function(ctx) {
+  if (this._wireObj) {
+    return this._wireObj;
+  }
+  var client = vanadium.runtimeForContext(ctx).newClient();
+  var signature = [nosqlVdl.Row.prototype._serviceDescription];
+
+  this._wireObj = client.bindWithSignature(this.fullName, signature);
+  return this._wireObj;
+};
+
+/**
+ * Returns true only if this Row exists.
+ * Insufficient permissions cause exists to return false instead of an error.
+ * TODO(ivanpi): exists may fail with an error if higher levels of hierarchy
+ * do not exist.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Row.prototype.exists = function(ctx, cb) {
+  this._wire(ctx).exists(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Returns the value for this Row.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Row.prototype.get = function(ctx, cb) {
+  this._wire(ctx).get(ctx, this.schemaVersion, function(err, value) {
+    if (err) {
+      return cb(err);
+    }
+
+    vanadium.vom.decode(value, false, null, cb);
+  });
+};
+
+/**
+ * Writes the given value for this Row.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {*} value Value to write.
+ * @param {module:vanadium.vdl.Type} [type] Type of value.
+ * @param {function} cb Callback.
+ */
+Row.prototype.put = function(ctx, value, type, cb) {
+  if (typeof cb === 'undefined' && typeof type === 'function') {
+    cb = type;
+    type = undefined;
+  }
+
+  // NOTE(aghassemi) Currently server side does not want to encode for
+  // performance reasons, so encoding/decoding is happening on the client side.
+  var encodedVal;
+  try {
+    encodedVal = vanadium.vom.encode(value, type);
+  } catch (e) {
+    return cb(e);
+  }
+  this._wire(ctx).put(ctx, this.schemaVersion, encodedVal, cb);
+};
+
+/**
+ * Deletes this Row.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Row.prototype.delete = function(ctx, cb) {
+  this._wire(ctx).delete(ctx, this.schemaVersion, cb);
+};
diff --git a/src/nosql/rowrange.js b/src/nosql/rowrange.js
new file mode 100644
index 0000000..98ed55a
--- /dev/null
+++ b/src/nosql/rowrange.js
@@ -0,0 +1,129 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var inherits = require('inherits');
+
+var util = require('../util');
+
+/**
+ * @summary
+ * Provides utility methods to create different rowranges.
+ * @namespace
+ * @name rowrange
+ * @memberof module:syncbase.nosql
+ */
+module.exports = {
+  range: range,
+  singleRow: singleRow,
+  prefix: prefix
+};
+
+/**
+ * Creates a range for the given start and limit.
+ * RowRange represents all rows with keys in [start, limit). If limit is "", all
+ * rows with keys >= start are included.
+ * @param {string} start Start of the range.
+ * @param {string} limit Range limit.
+ * @return {module:syncbase.nosql~RowRange} A RowRange object.
+ */
+function range(start, limit) {
+  var startBytes = util.stringToUTF8Bytes(start);
+  var limitBytes = util.stringToUTF8Bytes(limit);
+
+  return new RowRange(startBytes, limitBytes);
+}
+
+/**
+ * Creates a range that only matches items of the given prefix.
+ * @param {string} prefix Prefix.
+ * @return {module:syncbase.nosql~PrefixRange} A PrefixRange object. PrefixRange
+ * inherits from {@link module:syncbase.nosql~RowRange}
+ */
+function prefix(p) {
+  return new PrefixRange(p);
+}
+
+var ASCII_NULL = '\x00';
+/**
+ * Creates a range that only matches a single row of the given key.
+ * @param {string} row Row key.
+ * @return {module:syncbase.nosql~RowRange} A RowRange object.
+ */
+function singleRow(row) {
+  var startBytes = util.stringToUTF8Bytes(row);
+  var limitBytes = util.stringToUTF8Bytes(row + ASCII_NULL);
+  return new RowRange(startBytes, limitBytes);
+}
+
+/*
+ * @summary
+ * Represents a range of row values.
+ * Private constructor. Use one of the utility methods such as
+ * {@link module:syncbase.nosql.rowrange#rowrange},
+ * {@link module:syncbase.nosql.rowrange#prefix},
+ * {@link module:syncbase.nosql.rowrange#singleRow}
+ * to create instances.
+ * @inner
+ * @constructor
+ * @memberof {module:syncbase.nosql.rowrange}
+ */
+function RowRange(start, limit) {
+  if (!(this instanceof RowRange)) {
+    return new RowRange(start, limit);
+  }
+
+  /**
+   * Start of range as byte[]
+   * @type {Uint8Array}
+   */
+  Object.defineProperty(this, 'start', {
+    value: start,
+    writable: false,
+    enumerable: true
+  });
+
+  /**
+   * Limit of range as byte[]
+   * @type {Uint8Array}
+   */
+  Object.defineProperty(this, 'limit', {
+    value: limit,
+    writable: false,
+    enumerable: true
+  });
+}
+
+/*
+ * @summary
+ * PrefixRange is a sub type of {@link module:syncbase.nosql.rowrange~RowRange}
+ * that indicates all ranges matching a prefix.
+ * Private constructor, use {@link module:syncbase.nosql.rowrange#prefix} to
+ * create an instance.
+ * @inherits module:syncbase.nosql.rowrange~RowRange
+ * @inner
+ * @constructor
+ * @memberof {module:syncbase.nosql.rowrange}
+ */
+function PrefixRange(prefix) {
+  if (!(this instanceof PrefixRange)) {
+    return new PrefixRange(prefix);
+  }
+
+  var startBytes = util.stringToUTF8Bytes(prefix);
+  var limitBytes = util.stringToUTF8Bytes(prefix);
+  util.prefixRangeLimit(limitBytes);
+
+  /**
+   * Prefix
+   * @type {string}
+   */
+  Object.defineProperty(this, 'prefix', {
+    value: prefix,
+    writable: false,
+    enumerable: true
+  });
+
+  RowRange.call(this, startBytes, limitBytes);
+}
+inherits(PrefixRange, RowRange);
diff --git a/src/nosql/schema.js b/src/nosql/schema.js
new file mode 100644
index 0000000..b98f77a
--- /dev/null
+++ b/src/nosql/schema.js
@@ -0,0 +1,43 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = Schema;
+
+/**
+ * Each database has a Schema associated with it which defines the current
+ * version of the database. When a new version of app wishes to change its data
+ * in a way that it is not compatible with the old app's data, the app must
+ * change the schema version and provide relevant upgrade logic in the
+ * Upgrader. The conflict resolution rules are also associated with the schema
+ * version. Hence if the conflict resolution rules change then the schema
+ * version also must be bumped.
+ *
+ * Schema provides metadata and an upgrader for a given database.
+ *
+ * @constructor
+ * @param {module:syncbase.nosql.SchemaMetadata} metadata Schema metadata.
+ * @param {module:syncbase.nosql.Schema~upgrader} upgrader Upgrader function.
+ */
+function Schema(metadata, upgrader) {
+  Object.defineProperty(this, 'metadata', {
+    value: metadata,
+    writable: false,
+    enumerable: false
+  });
+
+  Object.defineProperty(this, 'upgrader', {
+    value: upgrader,
+    writable: false,
+    enumerable: false
+  });
+}
+
+/**
+ * Schema upgrader function.
+ * @callback module:syncbase.Schema~upgrader
+ * @param {module:syncbase.nosql.Database} db Database.
+ * @param {num} oldVersion Old version.
+ * @param {num} newVersion New version.
+ * @param {function} cb Callback to call when done.
+ */
diff --git a/src/nosql/syncgroup.js b/src/nosql/syncgroup.js
new file mode 100644
index 0000000..f7615b6
--- /dev/null
+++ b/src/nosql/syncgroup.js
@@ -0,0 +1,153 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = SyncGroup;
+
+/**
+ * SyncGroup is the interface for a SyncGroup in the store.
+ */
+function SyncGroup(db, name) {
+  if (!(this instanceof SyncGroup)) {
+    return new SyncGroup(db, name);
+  }
+
+  /**
+   * @private
+   */
+  Object.defineProperty(this, '_db', {
+    enumerable: false,
+    value: db,
+    writable: false
+  });
+
+  /**
+   * @property name
+   * @type {string}
+   */
+  Object.defineProperty(this, 'name', {
+    enumerable: true,
+    value: name,
+    writable: false
+  });
+}
+
+/**
+ * Creates a new SyncGroup with the given spec.
+ *
+ * Requires: Client must have at least Read access on the Database; prefix ACL
+ * must exist at each SyncGroup prefix; Client must have at least Read access
+ * on each of these prefix ACLs.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.SyncGroupSpec} spec SyncGroupSpec.
+ * @param {module:syncbase.nosql.SyncGroupMemberInfo} myInfo
+ * SyncGroupMemberInfo.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.create = function(ctx, spec, myInfo, cb) {
+  this._db._wire(ctx).createSyncGroup(ctx, this.name, spec, myInfo, cb);
+};
+
+/**
+ * Joins a SyncGroup.
+ *
+ * Requires: Client must have at least Read access on the Database and on the
+ * SyncGroup ACL.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.SyncGroupMemberInfo} myInfo
+ * SyncGroupMemberInfo.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.join = function(ctx, myInfo, cb) {
+  this._db._wire(ctx).joinSyncGroup(ctx, this.name, myInfo, cb);
+};
+
+/**
+ * Leaves the SyncGroup. Previously synced data will continue to be
+ * available.
+ *
+ * Requires: Client must have at least Read access on the Database.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.leave = function(ctx, cb) {
+  this._db._wire(ctx).leaveSyncGroup(ctx, this.name, cb);
+};
+
+/**
+ * Destroys a SyncGroup. Previously synced data will continue to be available
+ * to all members.
+ *
+ * Requires: Client must have at least Read access on the Database, and must
+ * have Admin access on the SyncGroup ACL.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.destroy = function(ctx, cb) {
+  this._db._wire(ctx).destroySyncGroup(ctx, this.name, cb);
+};
+
+/**
+ * Ejects a member from the SyncGroup. The ejected member will not be able to
+ * sync further, but will retain any data it has already synced.
+ *
+ * Requires: Client must have at least Read access on the Database, and must
+ * have Admin access on the SyncGroup ACL.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.SyncGroupMemberInfo} member
+ * SyncGroupMemberInfo.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.eject = function(ctx, member, cb) {
+  this._db._wire(ctx).ejectFromSyncGroup(ctx, this.name, member, cb);
+};
+
+/**
+ *
+ * Gets the SyncGroup spec. version allows for atomic read-modify-write of the
+ * spec - see comment for setSpec.
+ *
+ * Requires: Client must have at least Read access on the Database and on the
+ * SyncGroup ACL.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.getSpec = function(ctx, cb) {
+  this._db._wire(ctx).getSyncGroupSpec(ctx, this.name, cb);
+};
+
+/**
+ * Sets the SyncGroup spec. version may be either empty or the value from a
+ * previous Get. If not empty, Set will only succeed if the current version
+ * matches the specified one.
+ *
+ * Requires: Client must have at least Read access on the Database, and must
+ * have Admin access on the SyncGroup ACL.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.SyncGroupSpec} spec SyncGroupSpec.
+ * @param {string} version Version of the current SyncGroupSpec object which
+ * will be overwritten. If empty, setSpec will perform an unconditional update.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.setSpec = function(ctx, spec, version, cb) {
+  this._db._wire(ctx).setSyncGroupSpec(ctx, this.name, spec, version, cb);
+};
+
+/**
+ * Gets the info objects for members of the SyncGroup.
+ *
+ * Requires: Client must have at least Read access on the Database and on the
+ * SyncGroup ACL.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+SyncGroup.prototype.getMembers = function(ctx, cb) {
+  this._db._wire(ctx).getSyncGroupMembers(ctx, this.name, cb);
+};
diff --git a/src/nosql/table.js b/src/nosql/table.js
new file mode 100644
index 0000000..96a6e98
--- /dev/null
+++ b/src/nosql/table.js
@@ -0,0 +1,272 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var through2 = require('through2');
+var vanadium = require('vanadium');
+
+var nosqlVdl = require('../gen-vdl/v.io/syncbase/v23/services/syncbase/nosql');
+var prefix = require('./rowrange').prefix;
+var Row = require('./row');
+
+module.exports = Table;
+
+var util = require('../util');
+
+/**
+ * @summary
+ * Table represents a collection of Rows.
+ * Private constructor. Use database.table() to get an instance.
+ * @param {string} parentFullName Full name of Database which contains this
+ * Table.
+ * @param {string} relativeName Relative name of this Table.  Must not
+ * contain slashes.
+ * @param {number} schemaVersion Database schema version expected by client.
+ * @constructor
+ * @inner
+ * @memberof {module:syncbase.nosql}
+ */
+function Table(parentFullName, relativeName, schemaVersion) {
+  if (!(this instanceof Table)) {
+    return new Table(parentFullName, relativeName, schemaVersion);
+  }
+
+  util.addNameProperties(this, parentFullName, relativeName);
+
+  this.schemaVersion = schemaVersion;
+
+  /**
+   * Caches the table wire object.
+   * @private
+   */
+  Object.defineProperty(this, '_wireObj', {
+    enumerable: false,
+    value: null,
+    writable: true
+  });
+}
+
+/**
+ * @private
+ */
+Table.prototype._wire = function(ctx) {
+  if (this._wireObj) {
+    return this._wireObj;
+  }
+  var client = vanadium.runtimeForContext(ctx).newClient();
+  var signature = [nosqlVdl.Table.prototype._serviceDescription];
+
+  this._wireObj = client.bindWithSignature(this.fullName, signature);
+  return this._wireObj;
+};
+
+/**
+ * Returns true only if this Table exists.
+ * Insufficient permissions cause exists to return false instead of an error.
+ * TODO(ivanpi): exists may fail with an error if higher levels of hierarchy
+ * do not exist.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {function} cb Callback.
+ */
+Table.prototype.exists = function(ctx, cb) {
+  this._wire(ctx).exists(ctx, this.schemaVersion, cb);
+};
+
+/**
+ * Creates a row the given primary key in this table.
+ * @param {string} key Primary key for the row.
+ * @return {module:syncbase.row.Row} Row object.
+ */
+Table.prototype.row = function(key) {
+  return new Row(this.fullName, key, this.schemaVersion);
+};
+
+/**
+ * Get stores the value for the given primary key in value. If value's type
+ * does not match the stored type, Get will return an error.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} key Primary key of the row.
+ * @param {function} cb Callback.
+ */
+Table.prototype.get = function(ctx, key, cb) {
+  this.row(key).get(ctx, cb);
+};
+
+/**
+ * Put writes the given value to this Table. The value's primary key field
+ * must be set.
+ *
+ * Note that if you want to sync data with a Go syncbase client, or if you want
+ * to use syncbase queries, you must either specify the type of the value, or
+ * use a vdl value that includes its type.
+ *
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} key Primary key of the row.
+ * @param {*} value Value to put in the row.
+ * @param {module:vanadium.vdl.Type} [type] Type of value.
+ * @param {function} cb Callback.
+ */
+Table.prototype.put = function(ctx, key, value, type, cb) {
+  this.row(key).put(ctx, value, type, cb);
+};
+
+/**
+ * Delete deletes all rows in the given range. If the last row that is covered
+ * by a prefix from SetPermissions is deleted, that (prefix, perms) pair is
+ * removed.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.rowrange.RowRange} range Row ranges to delete.
+ * @param {function} cb Callback.
+ */
+Table.prototype.delete = function(ctx, range, cb) {
+  this._wire(ctx).deleteRowRange(
+        ctx, this.schemaVersion, range.start, range.limit, cb);
+};
+
+/**
+ * Scan returns all rows in the given range.
+ * Concurrency semantics: It is legal to perform writes concurrently with
+ * Scan. The returned stream reads from a consistent snapshot taken at the
+ * time of the RPC, and will not reflect subsequent writes to keys not yet
+ * reached by the stream.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.rowrange.RowRange} range Row ranges to scan.
+ * @param {function} cb Callback.
+ * @returns {stream} Stream of row objects.
+ */
+Table.prototype.scan = function(ctx, range, cb) {
+  var vomStreamDecoder = through2({
+    objectMode: true
+  }, function(row, enc, cb) {
+    vanadium.vom.decode(row.value, false, null, function(err, decodedVal) {
+      if (err) {
+        return cb(err);
+      }
+      row.value = decodedVal;
+      cb(null, row);
+    });
+  });
+
+  var stream = this._wire(ctx)
+        .scan(ctx, this.schemaVersion, range.start, range.limit, cb).stream;
+  var decodedStream = stream.pipe(vomStreamDecoder);
+  stream.on('error', function(err) {
+    decodedStream.emit('error', err);
+  });
+
+  return decodedStream;
+};
+
+/**
+ * SetPermissions sets the permissions for all current and future rows with
+ * the given prefix. If the prefix overlaps with an existing prefix, the
+ * longest prefix that matches a row applies. For example:
+ *     setPermissions(ctx, prefix('a/b'), perms1)
+ *     setPermissions(ctx, prefix('a/b/c'), perms2)
+ * The permissions for row "a/b/1" are perms1, and the permissions for row
+ * "a/b/c/1" are perms2.
+ *
+ * SetPermissions will fail if called with a prefix that does not match any
+ * rows.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.rowrane.PrefixRange|string} prefix Prefix or
+ * PrefixRange.
+ * @param @param {module:vanadium.security.access.Permissions} perms Permissions
+ * for the rows matching the prefix.
+ * @param {function} cb Callback.
+ */
+Table.prototype.setPermissions = function(ctx, prefix, perms, cb) {
+  this._wire(ctx).setPermissions(
+        ctx, this.schemaVersion, stringifyPrefix(prefix), perms, cb);
+};
+
+
+
+/**
+ * GetPermissions returns an array of (prefix, perms) pairs. The array is
+ * sorted from longest prefix to shortest, so element zero is the one that
+ * applies to the row with the given key. The last element is always the
+ * prefix "" which represents the table's permissions -- the array will always
+ * have at least one element.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {string} key Row key to get permissions for.
+ * @param {function} cb Callback.
+ */
+Table.prototype.getPermissions = function(ctx, key, cb) {
+  // There are two PrefixPermission types, one is the wire type which has
+  // Prefix as a string and then there is the client type where prefix is a
+  // PrefixRange, therefore we convert between the wire and client types.
+  this._wire(ctx).getPermissions(ctx, this.schemaVersion, key,
+      function(err, wirePerms) {
+        if (err) {
+          return cb(err);
+        }
+
+        var perms = wirePerms.map(function(v) {
+          return new PrefixPermissions(
+            prefix(v.prefix),
+            v.perms
+          );
+        });
+
+        cb(null, perms);
+      }
+  );
+};
+
+/**
+ * DeletePermissions deletes the permissions for the specified prefix. Any
+ * rows covered by this prefix will use the next longest prefix's permissions.
+ * @param {module:vanadium.context.Context} ctx Vanadium context.
+ * @param {module:syncbase.nosql.rowrane.PrefixRange|string} prefix Prefix or
+ * PrefixRange.
+ * @param {function} cb Callback.
+ */
+Table.prototype.deletePermissions = function(ctx, prefix, cb) {
+  //TODO(aghassemi): Why is prefix a PrefixRange in Go?
+  this._wire(ctx).deletePermissions(
+        ctx, this.schemaVersion, stringifyPrefix(prefix), cb);
+};
+
+function stringifyPrefix(prefix) {
+  var prefixStr = prefix;
+  if (typeof prefix === 'object') {
+    // assume it is a PrefixRange
+    prefixStr = prefix.prefix;
+  }
+  return prefixStr;
+}
+
+/**
+ * @summary
+ * Represents a pair of {@link module:syncbase.nosql~PrefixRange} and
+ * {@link module:vanadium.security.access.Permissions}.
+ * @constructor
+ * @inner
+ * @memberof {module:syncbase.nosql}
+ */
+function PrefixPermissions(prefixRange, perms) {
+  if (!(this instanceof PrefixPermissions)) {
+    return new PrefixPermissions(prefixRange, perms);
+  }
+
+  /**
+   * Prefix
+   * @type {module:syncbase.nosql~PrefixRange}
+   */
+  Object.defineProperty(this, 'prefix', {
+    value: prefixRange,
+    writable: false,
+    enumerable: true
+  });
+
+  /**
+   * Permissions
+   * @type {module:vanadium.security.access.Permissions}
+   */
+  Object.defineProperty(this, 'perms', {
+    value: perms,
+    writable: false,
+    enumerable: true
+  });
+}
diff --git a/src/nosql/watch.js b/src/nosql/watch.js
new file mode 100644
index 0000000..dba87e2
--- /dev/null
+++ b/src/nosql/watch.js
@@ -0,0 +1,107 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var vom = require('vanadium').vom;
+
+var watchVdl = require('../gen-vdl/v.io/v23/services/watch');
+
+module.exports = {
+  ResumeMarker: watchVdl.ResumeMarker,
+  WatchChange: WatchChange
+};
+
+/**
+ * WatchChange represents the new value for a watched entity.
+ * @constructor
+ */
+function WatchChange(opts) {
+  /**
+   * @property tableName
+   * The name of the table that contains the changed row
+   */
+  Object.defineProperty(this, 'tableName', {
+    enumerable: true,
+    value: opts.tableName,
+    writable: false
+  });
+
+  /**
+   * @property rowName
+   * Name of the changed row.
+   */
+  Object.defineProperty(this, 'rowName', {
+    enumerable: true,
+    value: opts.rowName,
+    writable: false
+  });
+
+  /**
+   * @property changeType
+   * Describes the type of the change. If the changeType equals 'put', then the
+   * row exists in the table and the value contains the new value for this row.
+   * If the state equals 'delete', then the row was removed from the table.
+   */
+  Object.defineProperty(this, 'changeType', {
+    enumerable: true,
+    value: opts.changeType,
+    writable: false
+  });
+
+  /**
+   * @property valueBytes
+   * The new VOM-encoded value for the row if the changeType is 'put' or nil
+   * otherwise.
+   */
+  Object.defineProperty(this, 'valueBytes', {
+    enumerable: true,
+    value: opts.valueBytes,
+    writable: false
+  });
+
+  /**
+   * @property resumeMarker
+   * Provides a compact representation of all the messages that have been
+   * received by the caller for the given watch call.  This marker can be
+   * provided in the request message to allow the caller to resume the stream
+   * watching at a specific point without fetching the initial state.
+   */
+  Object.defineProperty(this, 'resumeMarker', {
+    enumerable: true,
+    value: opts.resumeMarker,
+    writable: false
+  });
+
+  /**
+   * @property fromSync
+   * Indicates whether the change came from sync. If fromSync is false, then
+   * the change originated from the local device.
+   */
+  Object.defineProperty(this, 'fromSync', {
+    enumerable: true,
+    value: opts.fromSync || false,
+    writable: false
+  });
+
+  /**
+   * @property continued
+   * If true, this WatchChange is followed by more WatchChanges that are in the
+   * same batch as this WatchChange
+   */
+  Object.defineProperty(this, 'continued', {
+    enumerable: true,
+    value: opts.continued || false,
+    writable: false
+  });
+}
+
+/**
+ * Decodes the new value of the watched element.
+ */
+WatchChange.prototype.getValue = function(cb) {
+  if (this.changeType === 'delete') {
+    return cb(new Error('invalid change type'));
+  }
+
+  vom.decode(this.valueBytes, false, null, cb);
+};
diff --git a/src/service.js b/src/service.js
new file mode 100644
index 0000000..a8c61c1
--- /dev/null
+++ b/src/service.js
@@ -0,0 +1,70 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var vanadium = require('vanadium');
+
+var App = require('./app');
+var util = require('./util');
+var vdl = require('./gen-vdl/v.io/syncbase/v23/services/syncbase');
+
+// TODO(aghassemi): This looks clunky,
+// https://github.com/vanadium/issues/issues/499 to deal with it.
+var wireSignature = vdl.Service.prototype._serviceDescription;
+
+module.exports = Service;
+
+function Service(fullName) {
+  if (!(this instanceof Service)) {
+    return new Service(fullName);
+  }
+
+  /**
+   * @property fullName
+   * @type {string}
+   */
+  Object.defineProperty(this, 'fullName', {
+    value: fullName,
+    writable: false,
+    enumerable: true
+  });
+
+  /**
+   * Caches the database wire object.
+   * @private
+   */
+  Object.defineProperty(this, '_wireObj', {
+    enumerable: false,
+    value: null,
+    writable: true
+  });
+}
+
+// app returns the app with the given name. relativeName should not contain
+// slashes.
+Service.prototype.app = function(relativeName) {
+  return new App(this.fullName, relativeName);
+};
+
+// listApps returns a list of all app names.
+Service.prototype.listApps = function(ctx, cb) {
+  util.getChildNames(ctx, this.fullName, cb);
+};
+
+Service.prototype.getPermissions = function(ctx, cb) {
+  this._wire(ctx).getPermissions(ctx, cb);
+};
+
+Service.prototype.setPermissions = function(ctx, perms, version, cb) {
+  this._wire(ctx).setPermissions(ctx, perms, version, cb);
+};
+
+Service.prototype._wire = function(ctx, cb) {
+  if (!this._wireObj) {
+    var rt = vanadium.runtimeForContext(ctx);
+    var client = rt.newClient();
+    this._wireObj = client.bindWithSignature(this.fullName, [wireSignature]);
+  }
+
+  return this._wireObj;
+};
diff --git a/src/syncbase.js b/src/syncbase.js
new file mode 100644
index 0000000..4ef1c4a
--- /dev/null
+++ b/src/syncbase.js
@@ -0,0 +1,24 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var Service = require('./service');
+var nosql = require('./nosql');
+
+module.exports = {
+  newService: newService,
+  nosql: nosql,
+  // syncbaseSuffix is used for Syncbase-to-Syncbase RPCs.  It should be
+  // completely internal to syncbase, but currently syncgroup names must
+  // include it for implementation-dependant reasons.
+  //
+  // TODO(nlacasse): This suffix should go away.  One possibility is to detect
+  // "internal" RPCs by the method they call, and dispatch to different object
+  // based on that method.  We could also have the client or server inject the
+  // suffix automatically.
+  syncbaseSuffix: '$sync'
+};
+
+function newService(fullName) {
+  return new Service(fullName);
+}
diff --git a/src/util.js b/src/util.js
new file mode 100644
index 0000000..815738c
--- /dev/null
+++ b/src/util.js
@@ -0,0 +1,142 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var inherits = require('inherits');
+var vanadium = require('vanadium');
+
+module.exports = {
+  addNameProperties: addNameProperties,
+  getChildNames: getChildNames,
+  prefixRangeLimit: prefixRangeLimit,
+  InvalidNameError: InvalidNameError,
+  stringToUTF8Bytes: stringToUTF8Bytes
+};
+
+/**
+ * Creates the 'name' and 'fullName' properties on an object.
+ * @private
+ */
+function addNameProperties(self, parentFullName, relativeName) {
+  if (relativeName.indexOf('/') >= 0) {
+    throw new InvalidNameError(relativeName);
+  }
+
+  var fullName = vanadium.naming.join(parentFullName, relativeName);
+
+  /**
+   * @property _parentFullName
+   * @private
+   * @type {string}
+   */
+  Object.defineProperty(self, '_parentFullName', {
+    value: parentFullName,
+    writable: false,
+    enumerable: false
+  });
+
+  /**
+   * @property name
+   * @type {string}
+   */
+  Object.defineProperty(self, 'name', {
+    value: relativeName,
+    writable: false,
+    enumerable: true
+  });
+
+  /**
+   * @property fullName
+   * @type {string}
+   */
+  Object.defineProperty(self, 'fullName', {
+    value: fullName,
+    writable: false,
+    enumerable: true
+  });
+}
+
+function InvalidNameError(name) {
+  Error.call(this);
+  this.message = 'Invalid name "' + name + '". ' +
+    ' Use vanadium.naming.encodeAsNamePart() to escape.';
+}
+inherits(InvalidNameError, Error);
+
+/**
+ * getChildNames returns all names that are children of the parentFullName.
+ * @private
+ */
+function getChildNames(ctx, parentFullName, cb) {
+  var rt = vanadium.runtimeForContext(ctx);
+  var namespace = rt.namespace();
+  var childNames = [];
+
+  var globPattern = vanadium.naming.join(parentFullName, '*');
+
+  var streamErr = null;
+
+  var stream = namespace.glob(ctx, globPattern, function(err) {
+    if (err) {
+      return cb(err);
+    }
+
+    if (streamErr) {
+      return cb(streamErr);
+    }
+
+    cb(null, childNames);
+  }).stream;
+
+  stream.on('data', function(globResult) {
+    var fullName = globResult.name;
+    var name = vanadium.naming.basename(fullName);
+    childNames.push(name);
+  });
+
+  stream.on('error', function(err) {
+    console.error('Stream error: ' + JSON.stringify(err));
+    // Store the first stream error in streamErr.
+    streamErr = streamErr || err.error;
+  });
+}
+
+/**
+ * PrefixRangeLimit returns the limit of the row range for the given prefix.
+ * @private
+ * @param {Uint8Array} bytes Integer ArrayBuffer to modify.
+ */
+function prefixRangeLimit(bytes) {
+  // For a given Uint8Array,
+  // The code below effectively adds 1 to it, then chops off any
+  // trailing \x00 bytes.
+  // If the input string consists entirely of \xff bytes, we would empty out the
+  // buffer
+  while (bytes.length > 0) {
+    var last = bytes.length - 1;
+    if (bytes[last] === 255) {
+      bytes = bytes.slice(0, last); // remove trailing \x00
+    } else {
+      bytes[last] += 1; // add 1
+      return; // no carry
+    }
+  }
+}
+
+/**
+ * stringToUTF8Bytes converts a JavaScript string to a array of bytes
+ * representing the string in UTF8 format.
+ * @private
+ * @param {string} str String to convert to UTF8 bytes.
+ * @return {Uint8Array} UTF8 bytes.
+ */
+function stringToUTF8Bytes(str) {
+  var utf8String = unescape(encodeURIComponent(str)); //jshint ignore:line
+  var bytes = new Uint8Array(utf8String.length);
+
+  for (var i = 0; i < utf8String.length; i++) {
+    bytes[i] = utf8String.charCodeAt(i);
+  }
+
+  return bytes;
+}
diff --git a/test/integration/service-name.js b/test/integration/service-name.js
new file mode 100644
index 0000000..4c6431a
--- /dev/null
+++ b/test/integration/service-name.js
@@ -0,0 +1,5 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = 'test/syncbased';
diff --git a/test/integration/test-app.js b/test/integration/test-app.js
new file mode 100644
index 0000000..597a569
--- /dev/null
+++ b/test/integration/test-app.js
@@ -0,0 +1,144 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var test = require('prova');
+var vanadium = require('vanadium');
+
+var syncbase = require('../..');
+
+var testUtil = require('./util');
+var setupApp = testUtil.setupApp;
+var setupService = testUtil.setupService;
+var uniqueName = testUtil.uniqueName;
+
+test('Creating a service and checking its full name', function(t) {
+  var mockServiceName = 'foo/bar/baz';
+
+  var service = syncbase.newService(mockServiceName);
+  t.equals(service.fullName, mockServiceName, 'Service name matches');
+  t.end();
+});
+
+test('Getting a handle to an app', function(t) {
+  setupService(t, function(err, o) {
+    if (err) {
+      return t.end(err, 'Failed to setup');
+    }
+
+    var appName = uniqueName('app');
+
+    var app = o.service.app(appName);
+
+    t.equals(app.name, appName, 'App name matches');
+    t.equals(app.fullName, vanadium.naming.join(o.service.fullName, appName),
+      'App full name matches');
+
+    o.teardown(t.end);
+  });
+});
+
+test('Creating and listing apps', function(t) {
+  setupService(t, function(err, o) {
+    if (err) {
+      return t.end(err, 'Failed to setup');
+    }
+
+    // Create multiple apps.
+    var appNames = [
+      uniqueName('app'),
+      uniqueName('app'),
+      uniqueName('app')
+    ];
+
+    async.waterfall([
+      // Verify none of the apps exist using exists().
+      async.apply(async.map, appNames, function(appName, cb) {
+        o.service.app(appName).exists(o.ctx, cb);
+      }),
+      function(existsArray, cb) {
+        t.deepEqual(existsArray, [false, false, false],
+          'exists: no apps exist');
+        cb(null);
+      },
+
+      // Verify none of the apps exist using listApps().
+      o.service.listApps.bind(o.service, o.ctx),
+      function(appList, cb) {
+        t.deepEqual(appList, [],
+          'listApps: no apps exist');
+        cb(null);
+      },
+
+      // Create all apps.
+      async.apply(async.forEach, appNames, function(appName, cb) {
+        o.service.app(appName).create(o.ctx, {}, cb);
+      }),
+
+      // Verify each app exists using exists().
+      async.apply(async.map, appNames, function(appName, cb) {
+        o.service.app(appName).exists(o.ctx, cb);
+      }),
+      function(existsArray, cb) {
+        t.deepEqual(existsArray, [true, true, true],
+          'exists: all apps exist');
+        cb(null);
+      },
+
+      // Verify all the apps exist using listApps().
+      o.service.listApps.bind(o.service, o.ctx),
+      function(appList, cb) {
+        t.deepEqual(appList.sort(), appNames.sort(),
+          'listApps: all apps exist');
+        cb(null);
+      }
+    ], function(err) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('Deleting an app', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    async.waterfall([
+      // Verify app exists.
+      o.app.exists.bind(o.app, o.ctx),
+      function(exists, cb) {
+        t.ok(exists, 'app exists');
+        cb(null);
+      },
+
+      // Delete app.
+      o.app.delete.bind(o.app, o.ctx),
+
+      // Verify app no longer exists.
+      o.app.exists.bind(o.app, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'app no longer exists');
+        cb(null);
+      }
+    ], function(err, arg) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('Getting/Setting permissions of an app', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    testUtil.testGetSetPermissions(t, o.ctx, o.app, function(err) {
+      t.error(err);
+      return o.teardown(t.end);
+    });
+  });
+});
diff --git a/test/integration/test-batch.js b/test/integration/test-batch.js
new file mode 100644
index 0000000..2b200d5
--- /dev/null
+++ b/test/integration/test-batch.js
@@ -0,0 +1,499 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var test = require('prova');
+
+var BatchDatabase = require('../../src/nosql/batch-database');
+
+var nosql = require('../..').nosql;
+var BatchOptions = nosql.BatchOptions;
+var range = nosql.rowrange;
+var ReadOnlyBatchError = nosql.ReadOnlyBatchError;
+
+var testUtil = require('./util');
+var assertScanRows = testUtil.assertScanRows;
+var setupDatabase = testUtil.setupDatabase;
+var setupTable = testUtil.setupTable;
+var uniqueName = testUtil.uniqueName;
+
+test('db.beginBatch creates a BatchDatabase with name', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    o.database.beginBatch(o.ctx, new BatchOptions({}), function(err, batch) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      t.ok(batch instanceof BatchDatabase, 'batch is a BatchDatabase');
+      t.notEqual(batch.name, o.database.name,
+                 'batch has different name than database');
+      t.notEqual(batch.fullName, o.database.fullName,
+                 'batch has different fullName than database');
+
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('transactions are not visible until commit', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    var keyName = uniqueName('key');
+    var value = uniqueName('val');
+
+    var emptyPrefix = range.prefix('');
+
+    db.beginBatch(ctx, new BatchOptions({}), put);
+
+    var batch;
+    function put(err, _batch) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      batch = _batch;
+      var batchTable = batch.table(table.name);
+      batchTable.put(ctx, keyName, value, assertNoRows);
+    }
+
+    function assertNoRows(err) {
+      if (err) {
+        return end(err);
+      }
+
+      assertScanRows(ctx, table, emptyPrefix, [], commit);
+    }
+
+    function commit(err) {
+      if (err) {
+        return end(err);
+      }
+
+      batch.commit(ctx, assertRow);
+    }
+
+    function assertRow(err) {
+      if (err) {
+        return end(err);
+      }
+
+      var wantRows = [{
+        key: keyName,
+        value: value
+      }];
+
+      assertScanRows(ctx, table, emptyPrefix, wantRows, end);
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
+
+test('concurrent transactions are isolated', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    var batches;
+    var batchTables;
+    var rows;
+
+    var emptyPrefix = range.prefix('');
+
+    startBatches();
+
+    // Create two batches.
+    function startBatches() {
+      async.times(2, function(n, cb) {
+        db.beginBatch(ctx, {}, cb);
+      }, addRows);
+    }
+
+    // Each batch adds a new row.
+    function addRows(err, _batches) {
+      if (err) {
+        return end(err);
+      }
+
+      batches = _batches;
+      batchTables = batches.map(function(batch) {
+        return batch.table(table.name);
+      });
+
+      // Put to the same key in each batch.
+      var key = uniqueName('key');
+      async.mapSeries(batchTables, function(batchTable, cb) {
+        // Put different value in each batch.
+        var value = uniqueName('value');
+        batchTable.put(ctx, key, value, function(err) {
+          if (err) {
+            return cb(err);
+          }
+          return cb(null, [{key: key, value: value}]);
+        });
+      }, assertBatchesSeeCorrectRows);
+    }
+
+    // Verify that each batch sees only its own rows.
+    function assertBatchesSeeCorrectRows(err, _rows) {
+      if (err) {
+        return end(err);
+      }
+
+      rows = _rows;
+      async.forEachOfSeries(batchTables, function(batchTable, idx, cb) {
+        // NOTE(nlacasse): Currently, a scan() inside a batch will return only
+        // the rows that existed in the snapshot when the batch was started.
+        // Thus, we can't use assertScanRows() to check that the batch has the
+        // correct rows.  Instead we must call get() on the table directly to
+        // ensure that the new rows exist in the snapshot.
+        batchTable.get(ctx, rows[idx][0].key, function(err, row) {
+          if (err) {
+            return cb(err);
+          }
+          t.equal(rows[idx].key, row.key, 'row has correct key');
+          t.equal(rows[idx].value, row.value, 'row has correct value');
+          return cb(null);
+        });
+      }, commitFirstBatch);
+    }
+
+    function commitFirstBatch(err) {
+      if (err) {
+        return end(err);
+      }
+
+      batches[0].commit(ctx, commitSecondBatch);
+    }
+
+    function commitSecondBatch(err) {
+      if (err) {
+        return end(err);
+      }
+
+      // Second batch should fail on commit.
+      batches[1].commit(ctx, function(err) {
+        t.ok(err, 'second batch should fail on commit');
+
+        assertFirstBatchesRowsExist();
+      });
+    }
+
+    function assertFirstBatchesRowsExist() {
+      // Check that only first batch's rows exist in table.
+      assertScanRows(ctx, table, emptyPrefix, rows[0], end);
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
+
+test('readonly batches', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    var key = uniqueName('key');
+    var value = uniqueName('value');
+
+    var batch;
+    var batchTable;
+
+    table.put(ctx, key, value, startReadOnlyBatch);
+
+    function startReadOnlyBatch(err) {
+      if (err) {
+        return end(err);
+      }
+
+      var opts = new BatchOptions(new Map([
+        ['ReadOnly', true]
+      ]));
+
+      db.beginBatch(ctx, opts, attemptBatchPut);
+    }
+
+    function attemptBatchPut(err, _batch) {
+      if (err) {
+        return end(err);
+      }
+
+      batch = _batch;
+      batchTable = batch.table(table.name);
+
+      batchTable.put(ctx, uniqueName('key'), uniqueName('val'), function(err) {
+        assertReadOnlyBatchError(err);
+        attemptBatchDeletePrefix();
+      });
+    }
+
+    function attemptBatchDeletePrefix() {
+      batchTable.delete(ctx, range.prefix(key), function(err) {
+        assertReadOnlyBatchError(err);
+        attemptBatchDeleteRow();
+      });
+    }
+
+    function attemptBatchDeleteRow() {
+      batchTable.row(key).delete(ctx, function(err) {
+        assertReadOnlyBatchError(err);
+        end();
+      });
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+
+    function assertReadOnlyBatchError(err) {
+      t.ok(err, 'should error');
+      t.ok(err instanceof ReadOnlyBatchError,
+           'err should be ReadOnlyBatchError');
+    }
+
+  });
+});
+
+test('new batch operations fail after successful batch commit', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    db.beginBatch(ctx, {}, put);
+
+    var batch;
+
+    function put(err, _batch) {
+      if (err) {
+        return end(err);
+      }
+
+      batch = _batch;
+      var batchTable = batch.table(table.name);
+
+      batchTable.put(ctx, uniqueName('key'), uniqueName('val'), commit);
+    }
+
+    function commit(err) {
+      if (err) {
+        return end(err);
+      }
+
+      batch.commit(ctx, function(err) {
+        if (err) {
+          return end(err);
+        }
+
+        assertOpsFail(t, ctx, batch, table.name, end);
+      });
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
+
+test('new batch operations fail after unsuccessful batch commit', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    db.beginBatch(ctx, {}, readBatchTable);
+
+    var key = uniqueName('key');
+    var value = uniqueName('value');
+
+    var batch;
+    var batchTable;
+
+    function readBatchTable(err, _batch) {
+      if (err) {
+        return t.end(err);
+      }
+
+      batch = _batch;
+      batchTable = batch.table(table.name);
+
+      batchTable.get(ctx, key, function(err) {
+        // Should error because the key does not exist yet.
+        t.ok(err, 'get should error when key does not exist');
+        putTable();
+      });
+    }
+
+    function putTable(err) {
+      if (err) {
+        return end(err);
+      }
+
+      // Put on the table directly, not the batch table.  This will conflict
+      // with future batchTable.put() call.
+      table.put(ctx, key, value, putBatchTable);
+    }
+
+    function putBatchTable(err) {
+      if (err) {
+        return end(err);
+      }
+
+      var newValue = uniqueName('value');
+
+      batchTable.put(ctx, key, newValue, commit);
+    }
+
+    function commit(err) {
+      if (err) {
+        return end(err);
+      }
+
+      batch.commit(ctx, function(err) {
+        t.ok(err, 'commit() should error');
+        assertOpsFail(t, ctx, batch, table.name, end);
+      });
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
+
+test('new batch operations fail after batch is aborted', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    db.beginBatch(ctx, {}, abort);
+
+    function abort(err, batch) {
+      if (err) {
+        return end(err);
+      }
+
+      batch.abort(ctx, function(err) {
+        if (err) {
+          return end(err);
+        }
+
+        assertOpsFail(t, ctx, batch, table.name, end);
+      });
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
+
+function assertOpsFail(t, ctx, batch, tableName, cb) {
+  var batchTable = batch.table(tableName);
+
+  async.series([
+    assertGetFails,
+    assertScanFails,
+    assertPutFails,
+    assertDeleteFails,
+    assertRowDeleteFails,
+    assertCommitFails
+  ], cb);
+
+  function assertGetFails(cb) {
+    batchTable.get(ctx, uniqueName('key'), function(err) {
+      t.ok(err, 'get() should error');
+      cb(null);
+    });
+  }
+
+  function assertScanFails(cb) {
+    var streamGotError = false;
+
+    var stream = batchTable.scan(ctx, range.prefix(''), function(err) {
+      t.ok(err, 'scan() should pass error to callback');
+      t.ok(streamGotError, 'scan() should send error to stream');
+      cb(null);
+    });
+
+    stream.on('error', function(err) {
+      streamGotError = true;
+    });
+  }
+
+  function assertPutFails(cb) {
+    batchTable.put(ctx, uniqueName('key'), uniqueName('val'),
+                   function(err) {
+      t.ok(err, 'put() should error');
+      cb(null);
+    });
+  }
+
+  function assertDeleteFails(cb) {
+    batchTable.delete(ctx, range.prefix(uniqueName('key')), function(err) {
+      t.ok(err, 'delete() should error');
+      cb(null);
+    });
+  }
+
+  function assertRowDeleteFails(cb) {
+    batchTable.row(uniqueName('key')).delete(ctx, function(err) {
+      t.ok(err, 'row.delete() should error');
+      cb(null);
+    });
+  }
+
+  function assertCommitFails(cb) {
+    batch.commit(ctx, function(err) {
+      t.ok(err, 'commit() should error');
+      cb(null);
+    });
+  }
+}
diff --git a/test/integration/test-blob.js b/test/integration/test-blob.js
new file mode 100644
index 0000000..d1aee34
--- /dev/null
+++ b/test/integration/test-blob.js
@@ -0,0 +1,150 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var arrayToStream = require('stream-array');
+var streamToArray = require('stream-to-array');
+var test = require('prova');
+
+/* jshint -W079 */
+// Silence jshint's error about redefining 'Blob'.
+var Blob = require('../../src/nosql/blob');
+/* jshint +W079 */
+var vdl =
+  require('../../src/gen-vdl/v.io/syncbase/v23/services/syncbase/nosql');
+
+var testUtil = require('./util');
+var setupDatabase = testUtil.setupDatabase;
+var uniqueName = testUtil.uniqueName;
+
+test('db.blob returns the correct blob', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var blobRef = uniqueName('blobRef');
+    var blob = o.database.blob(blobRef);
+
+    t.ok(blob instanceof Blob);
+    t.equals(blob.ref, blobRef);
+
+    o.teardown(t.end);
+  });
+});
+
+// Tests local blob get before and after a put.
+test('blob put then get', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+
+    var blob;
+
+    var data = new Uint8Array(new ArrayBuffer(256));
+
+    db.createBlob(ctx, function(err, _blob) {
+      if (err) {
+        return end(err);
+      }
+
+      blob = _blob;
+      t.ok(blob instanceof Blob, 'createBlob returns a new blob');
+      t.equals(typeof blob.ref, 'string', 'blob has blobRef');
+
+      getEmptyBlob();
+    });
+
+    function getEmptyBlob() {
+      var blobStream = blob.get(ctx, 0, function(err) {
+        t.ok(err instanceof vdl.BlobNotCommittedError,
+             'blob.get should fail for uncommitted blobs');
+      });
+
+      streamToArray(blobStream, function(err) {
+        t.ok(err instanceof vdl.BlobNotCommittedError,
+             'blob.get should fail for uncommitted blobs');
+        fetchEmptyBlob();
+      });
+    }
+
+    function fetchEmptyBlob() {
+      var blobStatusStream = blob.fetch(ctx, 100, function(err) {
+        t.ok(err instanceof vdl.BlobNotCommittedError,
+             'blob.fetch should fail for uncommitted blobs');
+      });
+
+      streamToArray(blobStatusStream, function(err) {
+        t.ok(err instanceof vdl.BlobNotCommittedError,
+             'blob status stream should fail for uncommitted blobs');
+        assertBlobIsEmpty();
+      });
+    }
+
+    function assertBlobIsEmpty() {
+      blob.size(ctx, function(err, size) {
+        if (err) {
+          return end(err);
+        }
+        t.equals(size.toNativeNumber(), 0, 'blob is empty');
+
+        putToBlob();
+      });
+    }
+
+    function putToBlob() {
+      var byteStream = blob.put(ctx, function(err) {
+        if (err) {
+          return t.end(err);
+        }
+
+        assertBlobSize();
+      });
+
+      arrayToStream([data]).pipe(byteStream);
+    }
+
+    function assertBlobSize() {
+      blob.size(ctx, function(err, size) {
+        if (err) {
+          return end(err);
+        }
+        t.equals(size.toNativeNumber(), data.length, 'blob has correct size');
+
+        commitBlob();
+      });
+    }
+
+    function commitBlob() {
+      blob.commit(ctx, function(err) {
+        if (err) {
+          return end(err);
+        }
+
+        assertGetBlob();
+      });
+    }
+
+    function assertGetBlob() {
+      var blobStream = blob.get(ctx, 0, t.error);
+
+      streamToArray(blobStream, function(err, gotData) {
+        if (err) {
+          return end(err);
+        }
+
+        t.deepEquals(gotData, [data], 'blob has correct data');
+        end();
+      });
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
diff --git a/test/integration/test-database.js b/test/integration/test-database.js
new file mode 100644
index 0000000..007c477
--- /dev/null
+++ b/test/integration/test-database.js
@@ -0,0 +1,616 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var format = require('format');
+var stringify = require('json-stable-stringify');
+var test = require('prova');
+var toArray = require('stream-to-array');
+
+var vanadium = require('vanadium');
+var naming = vanadium.naming;
+var vdl = vanadium.vdl;
+
+var Database = require('../../src/nosql/database');
+var Table = require('../../src/nosql/table');
+
+var nosql = require('../..').nosql;
+var Schema = nosql.Schema;
+var SchemaMetadata = nosql.SchemaMetadata;
+
+var testUtil = require('./util');
+var setupApp = testUtil.setupApp;
+var setupDatabase = testUtil.setupDatabase;
+var setupTable = testUtil.setupTable;
+var uniqueName = testUtil.uniqueName;
+
+test('app.noSqlDatabase(name) returns a database with correct name',
+     function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var dbName = uniqueName('db');
+    var db = o.app.noSqlDatabase(dbName);
+
+    t.ok(db, 'Database is constructed.');
+    t.ok(db instanceof Database, 'database is a Database object.');
+    t.equal(db.name, dbName, 'Database has the correct name.');
+
+    db.name = 'foo';
+    t.equal(db.name, dbName, 'Setting the name has no effect.');
+
+    var expectedFullName = naming.join(o.app.fullName, dbName);
+    t.equal(db.fullName, expectedFullName, 'Database has correct fullName.');
+
+    db.fullName = 'bar';
+    t.equal(db.fullName, expectedFullName, 'Setting fullName has no effect.');
+
+    o.teardown(t.end);
+  });
+});
+
+test('app.noSqlDatabase(name, schema) returns a database with correct schema',
+     function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var version = 123;
+    var md = new SchemaMetadata({version: version});
+    var updater = function() {};
+    var schema = new Schema(md, updater);
+
+    var dbName = uniqueName('db');
+    var db = o.app.noSqlDatabase(dbName, schema);
+
+    t.ok(db, 'Database is constructed.');
+    t.ok(db instanceof Database, 'database is a Database object.');
+    t.equal(db.schema, schema, 'database has correct schema.');
+    t.equal(db.schemaVersion, version, 'database has correct schemaVersion');
+
+    o.teardown(t.end);
+  });
+});
+
+test('app.noSqlDatabase with slashes in the name', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var dbName = 'bad/name';
+    t.throws(function() {
+      o.app.noSqlDatabase(dbName);
+    }, 'should throw');
+
+    o.teardown(t.end);
+  });
+});
+
+test('db.create() creates a database', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.app.noSqlDatabase(uniqueName('db'));
+    var db2 = o.app.noSqlDatabase(uniqueName('db'));
+
+    async.waterfall([
+      // Verify database does not exist yet.
+      db.exists.bind(db, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'exists: database doesn\'t exist yet');
+        cb(null);
+      },
+
+      // Verify database list is empty.
+      o.app.listDatabases.bind(o.app, o.ctx),
+      function(dbList, cb) {
+        t.deepEqual(dbList, [],
+          'listDatabases: no databases exist');
+        cb(null);
+      },
+
+      // Create database.
+      db.create.bind(db, o.ctx, {}),
+
+      // Verify database exists.
+      db.exists.bind(db, o.ctx),
+      function(exists, cb) {
+        t.ok(exists, 'exists: database exists');
+        cb(null);
+      },
+
+      // Verify database list contains the database.
+      o.app.listDatabases.bind(o.app, o.ctx),
+      function(dbList, cb) {
+        t.deepEqual(dbList, [db.name],
+          'listDatabases: database exists');
+        cb(null);
+      },
+
+      // Create another database.
+      db2.create.bind(db2, o.ctx, {}),
+
+      // Verify database list contains both databases.
+      o.app.listDatabases.bind(o.app, o.ctx),
+      function(dbList, cb) {
+        t.deepEqual(dbList.sort(), [db.name, db2.name].sort(),
+          'listDatabases: both databases exist');
+        cb(null);
+      },
+    ], function(err, arg) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('creating a database twice should error', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.app.noSqlDatabase(uniqueName('db'));
+
+    // Create once.
+    db.create(o.ctx, {}, function(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      // Create again.
+      db.create(o.ctx, {}, function(err) {
+        t.ok(err, 'should error.');
+        o.teardown(t.end);
+      });
+    });
+  });
+});
+
+test('db.delete() deletes a database', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.app.noSqlDatabase(uniqueName('db'));
+
+    async.waterfall([
+      // Create database.
+      db.create.bind(db, o.ctx, {}),
+
+      // Verify database exists.
+      db.exists.bind(db, o.ctx),
+      function(exists, cb) {
+        t.ok(exists, 'database exists');
+        cb(null);
+      },
+
+      // Delete database.
+      db.delete.bind(db, o.ctx),
+
+      // Verify database no longer exists.
+      db.exists.bind(db, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'database no longer exists');
+        cb(null);
+      },
+    ], function(err, arg) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('deleting a db that has not been created should not error', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.app.noSqlDatabase(uniqueName('db'));
+
+    db.delete(o.ctx, function(err) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('db.table() returns a table', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.app.noSqlDatabase(uniqueName('db'));
+    var tableName = uniqueName('table');
+    var table = db.table(tableName);
+
+    t.ok(table, 'table is created.');
+    t.ok(table instanceof Table, 'table is a Table object.');
+    t.equal(table.name, tableName, 'table has the correct name.');
+    t.equal(table.fullName, vanadium.naming.join(db.fullName, tableName),
+      'table has the correct fullName.');
+
+    o.teardown(t.end);
+  });
+});
+
+test('db.createTable() creates a table', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var table = db.table(uniqueName('table'));
+    var table2 = db.table(uniqueName('table'));
+
+    async.waterfall([
+      // Verify table does not exist yet.
+      table.exists.bind(table, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'exists: table doesn\'t exist yet');
+        cb(null);
+      },
+
+      // Verify table list is empty.
+      db.listTables.bind(db, o.ctx),
+      function(tableList, cb) {
+        t.deepEqual(tableList, [],
+          'listTables: no tables exist');
+        cb(null);
+      },
+
+      // Create table.
+      db.createTable.bind(db, o.ctx, table.name, {}),
+
+      // Verify table exists.
+      table.exists.bind(table, o.ctx),
+      function(exists, cb) {
+        t.ok(exists, 'exists: table exists');
+        cb(null);
+      },
+
+      // Verify table list contains the table.
+      db.listTables.bind(db, o.ctx),
+      function(tableList, cb) {
+        t.deepEqual(tableList, [table.name],
+          'listTables: table exists');
+        cb(null);
+      },
+
+      // Create another table.
+      db.createTable.bind(db, o.ctx, table2.name, {}),
+
+      // Verify table list contains both tables.
+      db.listTables.bind(db, o.ctx),
+      function(tableList, cb) {
+        t.deepEqual(tableList.sort(), [table.name, table2.name].sort(),
+          'listTables: both tables exist');
+        cb(null);
+      },
+    ], function(err, arg) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('db.deleteTable() deletes a table', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var table = db.table(uniqueName('table'));
+
+    async.waterfall([
+      // Create table.
+      db.createTable.bind(db, o.ctx, table.name, {}),
+
+      // Verify table exists.
+      table.exists.bind(table, o.ctx),
+      function(exists, cb) {
+        t.ok(exists, 'table exists');
+        cb(null);
+      },
+
+      // Delete table.
+      db.deleteTable.bind(db, o.ctx, table.name),
+
+      // Verify table no longer exists.
+      table.exists.bind(table, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'table no longer exists');
+        cb(null);
+      },
+    ], function(err, arg) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('deleting a table that does not exist should error', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var tableName = uniqueName('table');
+
+    db.deleteTable(o.ctx, tableName, function(err) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('Getting/Setting permissions of a database', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    testUtil.testGetSetPermissions(t, o.ctx, o.database, function(err) {
+      t.error(err);
+      return o.teardown(t.end);
+    });
+  });
+});
+
+test('database.exec', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    var personType = new vdl.Type({
+      kind: vdl.kind.STRUCT,
+      name: 'personType',
+      fields: [
+        {
+          name: 'first',
+          type: vdl.types.STRING
+        },
+        {
+          name: 'last',
+          type: vdl.types.STRING
+        },
+        {
+          name: 'employed',
+          type: vdl.types.BOOL
+        },
+        {
+          name: 'age',
+          type: vdl.types.INT32
+        }
+      ]
+    });
+
+    var homer = {
+      first: 'Homer',
+      last: 'Simpson',
+      employed: true,
+      age: 38
+    };
+
+    var bart = {
+      first: 'Bart',
+      last: 'Simpson',
+      employed: false,
+      age: 10
+    };
+
+    var maggie = {
+      first: 'Maggie',
+      last: 'Simpson',
+      employed: false,
+      age: 1
+    };
+
+    var moe = {
+      first: 'Moe',
+      last: 'Syzlak',
+      employed: true,
+      age: 46
+    };
+
+    var people = [homer, bart, maggie, moe];
+
+    var cityType = new vdl.Type({
+      kind: vdl.kind.STRUCT,
+      name: 'cityType',
+      fields: [
+        {
+          name: 'name',
+          type: vdl.types.STRING
+        },
+        {
+          name: 'population',
+          type: vdl.types.INT32
+        },
+        {
+          name: 'age',
+          type: vdl.types.INT32
+        }
+      ]
+    });
+
+    var springfield = {
+      name: 'Springfield',
+      population: 30720,
+      age: 219
+    };
+
+    var shelbyville = {
+      name: 'Shelbyville',
+      population: 600000,
+      age: 220
+    };
+
+    var cities = [springfield, shelbyville];
+
+    var testCases = [
+      {
+        q: 'select k, v from %s',
+        want: [
+          ['k', 'v'],
+          ['Homer', homer],
+          ['Bart', bart],
+          ['Moe', moe],
+          ['Maggie', maggie],
+          ['Springfield', springfield],
+          ['Shelbyville', shelbyville]
+        ]
+      },
+      {
+        q: 'select k, v.Age from %s',
+        want: [
+          ['k', 'v.Age'],
+          ['Homer', homer.age],
+          ['Bart', bart.age],
+          ['Moe', moe.age],
+          ['Maggie', maggie.age],
+          ['Springfield', springfield.age],
+          ['Shelbyville', shelbyville.age]
+        ]
+      },
+      {
+        q: 'select k, v.First from %s where Type(v) = "personType"',
+        want: [
+          ['k', 'v.First'],
+          ['Homer', homer.first],
+          ['Bart', bart.first],
+          ['Moe', moe.first],
+          ['Maggie', maggie.first]
+        ]
+      },
+      {
+        q: 'select k, v.Population from %s where Type(v) = "cityType"',
+        want: [
+          ['k', 'v.Population'],
+          ['Shelbyville', shelbyville.population],
+          ['Springfield', springfield.population],
+        ]
+      },
+      {
+        q: 'select k, v from %s where v.Age = 10',
+        want: [
+          ['k', 'v'],
+          ['Bart', bart]
+        ]
+      },
+      {
+        q: 'select k, v from %s where k = "Homer"',
+        want: [
+          ['k', 'v'],
+          ['Homer', homer],
+        ]
+      },
+      {
+        // Note the double-percent below. The query is passed through 'format'
+        // to insert the table name. The double %% will be replaced with a
+        // single %.
+        q: 'select k, v from %s where k like "M%%"',
+        want: [
+          ['k', 'v'],
+          ['Moe', moe],
+          ['Maggie', maggie],
+        ]
+      },
+      {
+        q: 'select k, v from %s where v.Employed = true',
+        want: [
+          ['k', 'v'],
+          ['Homer', homer],
+          ['Moe', moe],
+        ]
+      },
+    ];
+
+    putPeople();
+
+    // Put all people, keyed by their first name.
+    function putPeople() {
+      async.forEach(people, function(person, cb) {
+        table.put(ctx, person.first, person, personType, cb);
+      }, putCities);
+    }
+
+    // Put all cities, keyed by their name.
+    function putCities(err) {
+      if (err) {
+        return end(err);
+      }
+
+      async.forEach(cities, function(city, cb) {
+        table.put(ctx, city.name, city, cityType, cb);
+      }, runTestCases);
+    }
+
+    // Check all the test cases.
+    function runTestCases(err) {
+      if (err) {
+        return end(err);
+      }
+
+      async.forEachSeries(testCases, function(testCase, cb) {
+        assertExec(format(testCase.q, table.name), testCase.want, cb);
+      }, end);
+    }
+
+    function end(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+
+    // Assert that query 'q' returns the rows in 'want'.
+    function assertExec(q, want, cb) {
+      var stream = db.exec(ctx, q, function(err) {
+        t.error(err);
+        cb();
+      });
+      stream.on('error', t.error);
+      toArray(stream, function(err, got) {
+        t.error(err);
+        got.sort(arrayCompare);
+        want.sort(arrayCompare);
+
+        var msg = 'query: "' + q + '" returns the correct values';
+        t.deepEqual(got, want, msg);
+      });
+    }
+  });
+});
+
+// Compare two arrays by json-encoding all items, then joining and treating it
+// as string.  Used to sort an array of arrays deterministically.
+function arrayCompare(a1, a2) {
+  var a1s = a1.map(stringify).join('/');
+  var a2s = a2.map(stringify).join('/');
+
+  if (a1s <= a2s) {
+    return -1;
+  }
+  if (a1s >= a2s) {
+    return 1;
+  }
+  return 0;
+}
diff --git a/test/integration/test-run-in-batch.js b/test/integration/test-run-in-batch.js
new file mode 100644
index 0000000..cc29d5a
--- /dev/null
+++ b/test/integration/test-run-in-batch.js
@@ -0,0 +1,97 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var test = require('prova');
+
+var runInBatch = require('../..').nosql.runInBatch;
+
+function MockBatchDb(failOnCommit) {
+  this.abortCalled = false;
+  this.commitCalled = false;
+  this._failOnCommit = failOnCommit;
+}
+
+MockBatchDb.prototype.abort = function(ctx, cb) {
+  this.abortCalled = true;
+  cb();
+};
+
+MockBatchDb.prototype.commit = function(ctx, cb) {
+  this.commitCalled = true;
+  if (this._failOnCommit) {
+    return cb(new Error('error committing'));
+  }
+  cb();
+};
+
+function MockDb(failOnCommit) {
+  this.batchDb = null;
+  this._failOnCommit = failOnCommit;
+}
+
+MockDb.prototype.beginBatch = function(ctx, opts, cb) {
+  this.batchDb = new MockBatchDb(this._failOnCommit);
+  return cb(null, this.batchDb);
+};
+
+test('runInBatch commits on success', function(t) {
+  var ctx = {};
+  var db = new MockDb();
+
+  function willSucceed(db, cb) {
+    cb(null);
+  }
+
+  runInBatch(ctx, db, {}, willSucceed, function(err) {
+    if (err) {
+      return t.end(err);
+    }
+
+    t.ok(db.batchDb, 'batch db is created');
+    t.ok(db.batchDb.commitCalled, 'batchDb.commit() was called');
+    t.notok(db.batchDb.abortCalled, 'batchDb.abort() was not called');
+
+    t.end();
+  });
+});
+
+test('runInBatch aborts on failure', function(t) {
+  var ctx = {};
+  var db = new MockDb();
+  var error = new Error('boom!');
+
+  function willFail(db, cb) {
+    cb(error);
+  }
+
+  runInBatch(ctx, db, {}, willFail, function(err) {
+    t.ok(err, 'runInBatch should return an error');
+    t.equal(err, error, 'runInBatch returns the correct error');
+
+    t.ok(db.batchDb, 'batch db is created');
+    t.notok(db.batchDb.commitCalled, 'batchDb.commit() was not called');
+    t.ok(db.batchDb.abortCalled, 'batchDb.abort() was called');
+
+    t.end();
+  });
+});
+
+test('runInBatch does not abort if commit fails', function(t) {
+  var ctx = {};
+  var db = new MockDb(true);
+
+  function willSucceed(db, cb) {
+    cb(null);
+  }
+
+  runInBatch(ctx, db, {}, willSucceed, function(err) {
+    t.ok(err, 'runInBatch should return an error');
+
+    t.ok(db.batchDb, 'batch db is created');
+    t.ok(db.batchDb.commitCalled, 'batchDb.commit() was called');
+    t.notok(db.batchDb.abortCalled, 'batchDb.abort() was not called');
+
+    t.end();
+  });
+});
diff --git a/test/integration/test-schema.js b/test/integration/test-schema.js
new file mode 100644
index 0000000..299777c
--- /dev/null
+++ b/test/integration/test-schema.js
@@ -0,0 +1,107 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var test = require('prova');
+
+var testUtil = require('./util');
+var setupApp = testUtil.setupApp;
+var uniqueName = testUtil.uniqueName;
+
+var nosql = require('../..').nosql;
+var Schema = nosql.Schema;
+var SchemaMetadata = nosql.SchemaMetadata;
+
+test('schema check', function(t) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var app = o.app;
+    var ctx = o.ctx;
+
+    var dbName = uniqueName('db');
+
+    var upgraderCallCount = 0;
+    var upgrader = function(db, oldVer, newVer, cb) {
+      upgraderCallCount++;
+      process.nextTick(function() {
+        cb(null);
+      });
+    };
+
+    var version = 123;
+    var md = new SchemaMetadata({version: version});
+    var schema = new Schema(md, upgrader);
+
+    var otherDb, otherSchema, newVersion;
+
+    var db = app.noSqlDatabase(dbName, schema);
+
+    async.waterfall([
+      // Verify that calling Upgrade on a non existing database does not throw
+      // errors.
+      db.upgradeIfOutdated.bind(db, ctx),
+      function(upgraded, cb) {
+        t.equal(upgraded, false, 'upgradeIfOutdated should return false');
+        t.equal(upgraderCallCount, 0,
+                'upgrader function should not have been called');
+        cb(null);
+      },
+
+      // Create db, this step also stores the schema provided above
+      db.create.bind(db, ctx, new Map()),
+      // Verify schema was stored as part of create.
+      function(cb) {
+        cb(null);
+      },
+
+      db._getSchemaMetadata.bind(db, ctx),
+
+      function(metadata, cb) {
+        t.equal(metadata.version, version, 'metadata has correct version');
+        cb(null);
+      },
+
+      // Make redundant call to Upgrade to verify that it is a no-op
+      db.upgradeIfOutdated.bind(db, ctx),
+      function(res, cb) {
+        t.notOk(res, 'upgradeIfOutdated should not return true');
+        t.equal(upgraderCallCount, 0,
+                'upgrader function should not have been called');
+        cb(null);
+      },
+
+      // Try to make a new database object for the same database but with an
+      // incremented schema version.
+      function(cb) {
+        newVersion = version + 1;
+        var otherMd = new SchemaMetadata({version: newVersion});
+        otherSchema = new Schema(otherMd, upgrader);
+        otherDb = app.noSqlDatabase(dbName, otherSchema);
+        otherDb.upgradeIfOutdated(ctx, cb);
+      },
+
+      function(res, cb) {
+        t.ok(res, 'otherDb.upgradeIfOutdated expected to return true');
+        t.equal(upgraderCallCount, 1, 'upgrader should have been called once');
+        cb(null);
+      },
+
+      // check if the contents of SchemaMetadata are correctly stored in the db.
+      function(cb) {
+        otherDb._getSchemaMetadata(ctx, cb);
+      },
+
+      function(metadata, cb) {
+        t.equal(metadata.version, newVersion, 'metadata has correct version');
+        cb(null);
+      }
+    ], function(err) {
+      t.error(err);
+      o.teardown(t.end);
+    });
+  });
+});
diff --git a/test/integration/test-syncgroup.js b/test/integration/test-syncgroup.js
new file mode 100644
index 0000000..80a2666
--- /dev/null
+++ b/test/integration/test-syncgroup.js
@@ -0,0 +1,332 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var naming = require('vanadium').naming;
+var test = require('prova');
+
+var syncbase = require('../..');
+var nosql = syncbase.nosql;
+var syncbaseSuffix = syncbase.syncbaseSuffix;
+var SyncGroup = require('../../src/nosql/syncgroup');
+var verror = require('vanadium').verror;
+
+var testUtil = require('./util');
+var setupDatabase = testUtil.setupDatabase;
+var setupSyncGroup = testUtil.setupSyncGroup;
+var uniqueName = testUtil.uniqueName;
+
+// TODO(nlacasse): Where does this magic number 8 come from? It's in
+// syncgroup_test.go.
+var myInfo = new nosql.SyncGroupMemberInfo({
+  syncPriority: 8
+});
+
+test('db.syncGroup returns a SyncGroup with name', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+
+    var sgName = uniqueName('syncgroup');
+    var sg = db.syncGroup(sgName);
+    t.ok(sg instanceof SyncGroup, 'syncgroup is instanceof SyncGroup');
+    t.equal(sg.name, sgName, 'syncgroup has correct name');
+    o.teardown(t.end);
+  });
+});
+
+test('syncgroup.create with empty spec', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var ctx = o.ctx;
+
+    var spec = new nosql.SyncGroupSpec();
+    var name = uniqueName('syncgroup');
+
+    db.syncGroup(name).create(ctx, spec, myInfo, function(err) {
+      t.ok(err, 'should error');
+      t.ok(err instanceof verror.BadArgError, 'err is BadArgError');
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('syncgroup.create with valid spec', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var ctx = o.ctx;
+
+    // TODO(nlacasse): It's not obvious that the syncgroup name needs to be
+    // appended to a syncbase service name.
+    var name = naming.join(o.service.fullName,
+                           syncbaseSuffix,
+                           uniqueName('syncgroup'));
+    var prefix = 't1/foo';
+
+    var spec = new nosql.SyncGroupSpec({
+      description: 'test syncgroup ' + name,
+      perms: {},
+      prefixes: [prefix]
+    });
+
+    db.syncGroup(name).create(ctx, spec, myInfo, function(err) {
+      t.error(err, 'should not error');
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('creating a nested syncgroup', function(t) {
+  var perms = {};
+  var prefixes = ['t1/foo'];
+
+  setupSyncGroup(t, perms, prefixes, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var ctx = o.ctx;
+
+    var prefixes2 = ['t1/foobar'];
+
+    // TODO(nlacasse): It's not obvious that the syncgroup name needs to be
+    // appended to a syncbase service name.
+    var name = naming.join(o.service.fullName,
+                           syncbaseSuffix,
+                           uniqueName('syncgroup'));
+
+    var spec = new nosql.SyncGroupSpec({
+      description: 'another syncgroup named ' + name,
+      perms: {},
+      prefixes: prefixes2
+    });
+
+    var sg2 = db.syncGroup(name);
+    sg2.create(ctx, spec, myInfo, function(err) {
+      t.error(err, 'should not error');
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('creating a syncgroup that already exists', function(t) {
+  var perms = {};
+  var prefixes = ['t1/foo'];
+
+  setupSyncGroup(t, perms, prefixes, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var db = o.database;
+    var ctx = o.ctx;
+
+    var name = o.syncgroup.name;
+
+    var spec = new nosql.SyncGroupSpec({
+      description: 'another syncgroup named ' + name,
+      perms: {},
+      prefixes: ['another/prefix']
+    });
+
+    var sg2 = db.syncGroup(name);
+    sg2.create(ctx, spec, myInfo, function(err) {
+      t.ok(err, 'should error');
+      t.ok(err instanceof verror.ExistError, 'err is ExistError');
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('syncgroup.join succeeds if user has Read access', function(t) {
+  var perms = new Map([
+    ['Read', {
+      'in': ['...']
+    }]
+  ]);
+  var prefixes = ['t1/foo'];
+
+  setupSyncGroup(t, perms, prefixes, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    o.syncgroup.join(o.ctx, myInfo, function(err) {
+      t.error(err, 'should not error');
+      o.teardown(t.end);
+    });
+  });
+});
+
+test('syncgroup.join fails if user does not have Read access', function(t) {
+  var perms = new Map([
+    ['Read', {
+      'in': ['some/blessing/name']
+    }]
+  ]);
+  var prefixes = ['t1/foo'];
+
+  setupSyncGroup(t, perms, prefixes, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var sg = o.syncgroup;
+    var ctx = o.ctx;
+
+    sg.join(ctx, myInfo, function(err) {
+      t.ok(err, 'should not error');
+      t.ok(err instanceof verror.NoAccessError, 'err is NoAccessError');
+      o.teardown(t.end);
+    });
+  });
+});
+
+// TODO(nlacasse): Enable this test once Syncbase server implements
+// Database.GetSyncGroupNames.
+test.skip('db.getSyncGroupNames returns the correct names', function(t) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+
+    var names = [
+      uniqueName('syncgroup'),
+      uniqueName('syncgroup'),
+      uniqueName('syncgroup')
+    ];
+
+    var fullNames = names.map(function(name) {
+      return naming.join(o.service.fullName,
+                         syncbaseSuffix,
+                         name);
+    });
+
+    createSyncGroups();
+
+    function createSyncGroups() {
+      async.forEach(fullNames, function(fullName, cb) {
+        var spec = new nosql.SyncGroupSpec({
+          description: 'syncgroup named ' + fullName,
+          prefixes: ['']
+        });
+
+        db.syncGroup(fullName).create(ctx, spec, myInfo, cb);
+      }, getSyncGroupNames);
+    }
+
+    function getSyncGroupNames(err) {
+      if (err) {
+        t.error(err);
+        o.teardown(t.end);
+      }
+
+      db.getSyncGroupNames(ctx, assertSyncGroupNames);
+    }
+
+    function assertSyncGroupNames(err, gotNames) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      fullNames.sort();
+      gotNames.sort();
+      t.deepEqual(fullNames, gotNames, 'syncgroup names are correct');
+      o.teardown(t.end);
+    }
+  });
+});
+
+test('syncgroup.get/setSpec', function(t) {
+  var perms = {};
+  var prefixes = ['biz/bazz'];
+
+  var firstVersion;
+
+  var newSpec = new nosql.SyncGroupSpec({
+    description: 'new spec'
+  });
+
+  var newSpec2 = new nosql.SyncGroupSpec({
+    description: 'another new spec'
+  });
+
+  setupSyncGroup(t, perms, prefixes, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var sg = o.syncgroup;
+    var ctx = o.ctx;
+    sg.getSpec(ctx, assertSpec);
+
+    function assertSpec(err, spec, version) {
+      if (err) {
+        return done(err);
+      }
+
+      firstVersion = version;
+
+      t.deepEqual(spec.perms, perms, 'sg has correct perms');
+      t.deepEqual(spec.prefixes, prefixes, 'sg has correct prefixes');
+      t.equal(typeof version, 'string', 'version is string');
+
+      // Set spec with bogus version.
+      var bogusVersion = 'totally-bogus';
+
+      sg.setSpec(ctx, newSpec, bogusVersion, assertSetSpecFails);
+    }
+
+    function assertSetSpecFails(err) {
+      // TODO(nlacasse): Syncbase does not currently enforce that the version
+      // sent on SetSpec matches the current version.  Once it does enforce
+      // this, the following assertion should be uncommented.
+      // t.ok(err, 'setting spec with bogus version should fail');
+
+      // Set spec with empty version.
+      sg.setSpec(ctx, newSpec, '', assertSetSpecSucceeds);
+    }
+
+    function assertSetSpecSucceeds(err) {
+      if (err) {
+        return done(err);
+      }
+
+      sg.getSpec(ctx, assertGetSpec);
+    }
+
+    function assertGetSpec(err, spec, version) {
+      if (err) {
+        return done(err);
+      }
+
+      t.equal(spec.name, newSpec.name, 'spec has the correct name');
+      t.equal(typeof version, 'string', 'version is string');
+
+      // Set spec with previous version.
+      sg.setSpec(ctx, newSpec2, version, done);
+    }
+
+    function done(err) {
+      t.error(err);
+      o.teardown(t.end);
+    }
+  });
+});
diff --git a/test/integration/test-table.js b/test/integration/test-table.js
new file mode 100644
index 0000000..7e924f8
--- /dev/null
+++ b/test/integration/test-table.js
@@ -0,0 +1,323 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var test = require('prova');
+
+var syncbase = require('../..');
+
+var testUtil = require('./util');
+var assertScanRows = testUtil.assertScanRows;
+var setupTable = testUtil.setupTable;
+var uniqueName = testUtil.uniqueName;
+
+//TODO(aghassemi): We fail to bind to Unicode names, investigate.
+//var ROW_KEY = 'چשKEYઑᜰ';
+//var ROW_VAL = '⛓⛸VALϦӪ';
+var ROW_KEY = 'row_key';
+var ROW_VAL = 'row value';
+
+test('Putting a string value in a row', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var key = uniqueName(ROW_KEY);
+    var value = uniqueName(ROW_VAL);
+
+    var table = o.table;
+    table.put(o.ctx, key, value, function(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      table.get(o.ctx, key, function(err, val) {
+        t.error(err);
+        t.equals(val, value, 'put was successful');
+        o.teardown(t.end);
+      });
+    });
+  });
+});
+
+test('Deleting a row', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var key = uniqueName(ROW_KEY);
+    var value = uniqueName(ROW_VAL);
+
+    var table = o.table;
+    var row = o.table.row(key);
+
+    async.waterfall([
+      // Verify row doesn't exist yet.
+      row.exists.bind(row, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'row doesn\'t exist yet');
+        cb(null);
+      },
+
+      // Put row.
+      table.put.bind(table, o.ctx, key, value),
+
+      // Verify row exists.
+      row.exists.bind(row, o.ctx),
+      function(exists, cb) {
+        t.ok(exists, 'row exists');
+        cb(null);
+      },
+
+      // Delete row.
+      row.delete.bind(row, o.ctx),
+
+      // Verify row no longer exists.
+      row.exists.bind(row, o.ctx),
+      function(exists, cb) {
+        t.notok(exists, 'row no longer exists');
+        cb(null);
+      },
+    ], function(err, arg) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      table.get(o.ctx, key, function(err, val) {
+        t.ok(err, 'get should error after row is deleted');
+        o.teardown(t.end);
+      });
+    });
+  });
+});
+
+test('Scanning table by single row', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var key = uniqueName(ROW_KEY);
+    var value = uniqueName(ROW_VAL);
+
+    var table = o.table;
+    table.put(o.ctx, key, value, scan);
+
+    function scan(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      var wantRows = [{
+        key: key,
+        value: value
+      }];
+      var range = syncbase.nosql.rowrange.singleRow(key);
+      assertScanRows(o.ctx, table, range, wantRows, function(err) {
+        t.error(err);
+        o.teardown(t.end);
+      });
+    }
+  });
+});
+
+test('Scanning table by a prefix range', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var table = o.table;
+
+    // create multiple rows all with ROW_KEY as prefix
+    var prefixedRows = [{
+      key: uniqueName(ROW_KEY),
+      value: uniqueName(ROW_VAL)
+    }, {
+      key: uniqueName(ROW_KEY),
+      value: uniqueName(ROW_VAL)
+    }];
+
+    // create multiple rows with a different prefix
+    var otherRows = [{
+      key: uniqueName('misc_row_keys'),
+      value: uniqueName(ROW_VAL)
+    }];
+
+    var allRows = prefixedRows.concat(otherRows);
+    async.forEach(allRows, function(pair, cb) {
+      table.put(o.ctx, pair.key, pair.value, cb);
+    }, scan);
+
+    function scan(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      var range = syncbase.nosql.rowrange.prefix(ROW_KEY);
+      assertScanRows(o.ctx, table, range, prefixedRows, function(err) {
+        t.error(err);
+        o.teardown(t.end);
+      });
+    }
+  });
+});
+
+test('Deleting rows by a prefix range', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var table = o.table;
+    var range = syncbase.nosql.rowrange.prefix(ROW_KEY);
+
+    // create multiple rows all with ROW_KEY as prefix
+    var rows = [{
+      key: uniqueName(ROW_KEY),
+      value: uniqueName(ROW_VAL)
+    }, {
+      key: uniqueName(ROW_KEY),
+      value: uniqueName(ROW_VAL)
+    }];
+
+    async.forEach(rows, function(pair, cb) {
+      table.put(o.ctx, pair.key, pair.value, cb);
+    }, deleteRows);
+
+    function deleteRows(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      table.delete(o.ctx, range, scan);
+    }
+
+    function scan(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      var wantRows = [];
+      assertScanRows(o.ctx, table, range, wantRows, function(err) {
+        t.error(err);
+        o.teardown(t.end);
+      });
+    }
+  });
+});
+
+//TODO(aghassemi) Skipped test.
+//Set permission for prefix != "" is not implemented.
+test.skip('Getting/Setting permissions on rows', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var table = o.table;
+
+    // create multiple rows with different suffixes
+    var prefix1Rows = [{
+      key: uniqueName('prefix1'),
+      value: uniqueName(ROW_VAL)
+    }];
+
+    var prefix2Rows = [{
+      key: uniqueName('prefix2'),
+      value: uniqueName(ROW_VAL)
+    }];
+
+    var allRows = prefix1Rows.concat(prefix2Rows);
+    async.forEach(allRows, function(pair, cb) {
+      table.put(o.ctx, pair.key, pair.value, cb);
+    }, setPermissions);
+
+    function setPermissions(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      // set up different ACLS for different prefixes
+      var prefix1Perms = new Map([
+        ['Read', {
+          'in': ['...', 'canRead1'],
+          'notIn': ['cantRead1']
+        }],
+        ['Write', {
+          'in': ['...', 'canWrite1'],
+          'notIn': ['cantWrite1']
+        }],
+        ['Admin', {
+          'in': ['...', 'canAdmin1'],
+          'notIn': ['cantAdmin1']
+        }]
+      ]);
+
+      var prefix2Perms = new Map([
+        ['Read', {
+          'in': ['...', 'canRead2'],
+          'notIn': ['cantRead2']
+        }],
+        ['Write', {
+          'in': ['...', 'canWrite2'],
+          'notIn': ['cantWrite2']
+        }],
+        ['Admin', {
+          'in': ['...', 'canAdmin2'],
+          'notIn': ['cantAdmin2']
+        }]
+      ]);
+
+      var prefix1 = syncbase.nosql.rowrange.prefix('prefix1');
+      var prefix2 = syncbase.nosql.rowrange.prefix('prefix2');
+
+      table.setPermissions(o.ctx, prefix1, prefix1Perms, function(err) {
+        if (err) {
+          t.error(err);
+          return o.teardown(t.end);
+        }
+
+        table.setPermissions(o.ctx, prefix2, prefix2Perms, getPermissions);
+      });
+
+      function getPermissions(err) {
+        if (err) {
+          t.error(err);
+          return o.teardown(t.end);
+        }
+
+        table.getPermissions(o.ctx, prefix1Rows[0].key, function(err, perms) {
+          if (err) {
+            t.error(err);
+            return o.teardown(t.end);
+          }
+
+          t.deepEquals(perms.prefix, prefix1);
+          t.deepEquals(perms.perms, prefix1Perms);
+
+          table.getPermissions(o.ctx, prefix2Rows[0].key, function(err, perms) {
+            if (err) {
+              t.error(err);
+              return o.teardown(t.end);
+            }
+
+            t.deepEquals(perms.prefix, prefix2);
+            t.deepEquals(perms.perms, prefix2Perms);
+            o.teardown(t.end);
+          });
+        });
+      }
+    }
+  });
+});
diff --git a/test/integration/test-watch.js b/test/integration/test-watch.js
new file mode 100644
index 0000000..fca1cec
--- /dev/null
+++ b/test/integration/test-watch.js
@@ -0,0 +1,132 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var async = require('async');
+var test = require('prova');
+var vom = require('vanadium').vom;
+
+var syncbase = require('../..');
+var WatchChange = syncbase.nosql.WatchChange;
+
+var testUtil = require('./util');
+var setupTable = testUtil.setupTable;
+var uniqueName = testUtil.uniqueName;
+
+// Tests the basic client watch functionality (no perms or batches).  First
+// does some puts and deletes, fetching a ResumeMarker after each operation.
+// Then calls 'watch' with different prefixes and ResumeMarkers and verifies
+// that the resulting stream contains the correct changes.
+test('basic client watch', function(t) {
+  setupTable(t, function(err, o) {
+    if (err) {
+      return t.end(err);
+    }
+
+    var ctx = o.ctx;
+    var db = o.database;
+    var table = o.table;
+
+    var row1Prefix = 'row-abc';
+    var row1 = table.row(uniqueName(row1Prefix));
+    var value1 = uniqueName('value');
+
+    var row2Prefix = 'row-a';
+    var row2 = table.row(uniqueName(row2Prefix));
+    var value2 = uniqueName('value');
+
+    var resumeMarkers = [];
+
+    function getAndAppendResumeMarker(cb) {
+      db.getResumeMarker(ctx, function(err, rm) {
+        if (err) {
+          return cb(err);
+        }
+        resumeMarkers.push(rm);
+        cb(null);
+      });
+    }
+
+    // Generate the data and resume markers.
+    async.waterfall([
+      // Initial state.
+      getAndAppendResumeMarker,
+
+      // Put to row1.
+      row1.put.bind(row1, ctx, value1),
+      getAndAppendResumeMarker,
+
+      // Delete row1.
+      row1.delete.bind(row1, ctx),
+      getAndAppendResumeMarker,
+
+      // Put to row2.
+      row2.put.bind(row2, ctx, value2),
+      getAndAppendResumeMarker
+    ], assertCorrectChanges);
+
+    function assertCorrectChanges(err) {
+      if (err) {
+        t.error(err);
+        return o.teardown(t.end);
+      }
+
+      var allExpectedChanges = [new WatchChange({
+        tableName: table.name,
+        rowName: row1.key,
+        changeType: 'put',
+        valueBytes: vom.encode(value1),
+        resumeMarker: resumeMarkers[1]
+      }), new WatchChange({
+        tableName: table.name,
+        rowName: row1.key,
+        changeType: 'delete',
+        valueBytes: null,
+        resumeMarker: resumeMarkers[2]
+      }), new WatchChange({
+        tableName: table.name,
+        rowName: row2.key,
+        changeType: 'put',
+        valueBytes: vom.encode(value2),
+        resumeMarker: resumeMarkers[3]
+      })];
+
+      async.series([
+        assertWatch.bind(null, t, ctx, db, table.name, row2Prefix,
+                         resumeMarkers[0], allExpectedChanges),
+        assertWatch.bind(null, t, ctx, db, table.name, row2Prefix,
+                         resumeMarkers[1], allExpectedChanges.slice(1)),
+        assertWatch.bind(null, t, ctx, db, table.name, row2Prefix,
+                         resumeMarkers[2], allExpectedChanges.slice(2)),
+
+        assertWatch.bind(null, t, ctx, db, table.name, row1Prefix,
+                         resumeMarkers[0], allExpectedChanges.slice(0,2)),
+        assertWatch.bind(null, t, ctx, db, table.name, row1Prefix,
+                         resumeMarkers[1], allExpectedChanges.slice(1,2)),
+      ], function(err) {
+        t.error(err);
+        o.teardown(t.end);
+      });
+    }
+  });
+});
+
+function assertWatch(t, ctx, db, tableName, rowPrefix, resumeMarker,
+                     expectedWatchChanges, cb) {
+  var cctx = ctx.withCancel();
+  var stream = db.watch(ctx, tableName, rowPrefix, resumeMarker);
+
+  async.timesSeries(expectedWatchChanges.length, function(i, next) {
+    stream.once('data', function(gotWatchChange) {
+      t.deepEqual(gotWatchChange, expectedWatchChanges[i]);
+
+      next(null);
+    });
+  }, function(err) {
+    cctx.finish();
+    if (err) {
+      return cb(err);
+    }
+    cb(null);
+  });
+}
diff --git a/test/integration/util.js b/test/integration/util.js
new file mode 100644
index 0000000..7242f07
--- /dev/null
+++ b/test/integration/util.js
@@ -0,0 +1,263 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+module.exports = {
+  setupApp: setupApp,
+  setupDatabase: setupDatabase,
+  setupService: setupService,
+  setupSyncGroup: setupSyncGroup,
+  setupTable: setupTable,
+
+  assertScanRows: assertScanRows,
+  testGetSetPermissions: testGetSetPermissions,
+  uniqueName: uniqueName
+};
+
+var deepEqual = require('deep-equal');
+var extend = require('xtend');
+var streamToArray = require('stream-to-array');
+var vanadium = require('vanadium');
+
+var syncbase = require('../..');
+var syncbaseSuffix = syncbase.syncbaseSuffix;
+
+var SERVICE_NAME = require('./service-name');
+
+// Helper function to generate unique names.
+var nameCounter = Date.now();
+
+function uniqueName(prefix) {
+  prefix = prefix || 'name';
+  return prefix + '_' + nameCounter++;
+}
+
+// Initializes Vanadium runtime.
+function setupService(t, cb) {
+  vanadium.init(function(err, rt) {
+    if (err) {
+      return cb(err);
+    }
+
+    function teardown(cb) {
+      rt.close(function(err) {
+        t.error(err, 'rt.close should not error.');
+        cb(null);
+      });
+    }
+
+    var service = syncbase.newService(SERVICE_NAME);
+
+    return cb(null, {
+      ctx: rt.getContext(),
+      rt: rt,
+      service: service,
+      teardown: teardown
+    });
+  });
+}
+
+// Initializes Vanadium runtime and creates an App.
+function setupApp(t, cb) {
+  setupService(t, function(err, o) {
+    if (err) {
+      return cb(err);
+    }
+
+    var app = o.service.app(uniqueName('app'));
+
+    app.create(o.ctx, {}, function(err) {
+      if (err) {
+        o.rt.close(t.error);
+        return cb(err);
+      }
+
+      return cb(null, extend(o, {
+        app: app
+      }));
+    });
+  });
+}
+
+// Initializes Vanadium runtime and creates an App and a Database.
+function setupDatabase(t, cb) {
+  setupApp(t, function(err, o) {
+    if (err) {
+      return cb(err);
+    }
+
+    var db = o.app.noSqlDatabase(uniqueName('db'));
+
+    db.create(o.ctx, {}, function(err) {
+      if (err) {
+        o.rt.close(t.error);
+        return cb(err);
+      }
+
+      return cb(null, extend(o, {
+        database: db
+      }));
+    });
+  });
+}
+
+// Initializes Vanadium runtime and creats an App, Database and SyncGroup.
+function setupSyncGroup(t, perms, prefixes, cb) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return cb(err);
+    }
+
+    var sgName = uniqueName('syncgroup');
+    var fullSgName = vanadium.naming.join(o.service.fullName,
+                                          syncbaseSuffix,
+                                          sgName);
+
+    // TODO(nlacasse): Where does this magic number 8 come from? It's in
+    // syncgroup_test.go.
+    var myInfo = new syncbase.nosql.SyncGroupMemberInfo({
+      syncPriority: 8
+    });
+
+    var spec = new syncbase.nosql.SyncGroupSpec({
+      description: 'test syncgroup ' + fullSgName,
+      perms: perms,
+      prefixes: prefixes
+    });
+
+    var sg = o.database.syncGroup(fullSgName);
+    sg.create(o.ctx, spec, myInfo, function(err) {
+      if (err) {
+        o.rt.close(t.error);
+        return cb(err);
+      }
+
+      return cb(null, extend(o, {
+        syncgroup: sg
+      }));
+    });
+  });
+}
+
+// Initializes Vanadium runtime and creates an App, a Database and a Table.
+function setupTable(t, cb) {
+  setupDatabase(t, function(err, o) {
+    if (err) {
+      return cb(err);
+    }
+    var db = o.database;
+
+    var tableName = uniqueName('table');
+    db.createTable(o.ctx, tableName, {}, function(err) {
+      if (err) {
+        o.rt.close(t.error);
+        return cb(err);
+      }
+
+      return cb(null, extend(o, {
+        table: db.table(tableName)
+      }));
+    });
+  });
+}
+
+// Assert that two permissions objects are equal.
+function assertPermissionsEqual(t, got, want) {
+  t.equal(got.size, want.size, 'Permissions size matches');
+  want.forEach(function(value, key) {
+    t.deepEqual(got.get(key), value, 'Permission value matches');
+  });
+}
+
+// For any object that implements get/setPermissions, test that getting and
+// setting permissions behaves as it should.
+function testGetSetPermissions(t, ctx, obj, cb) {
+  obj.getPermissions(ctx, function(err, perms, version) {
+    if (err) {
+      t.error('error getting permissions ' + err);
+      return cb(err);
+    }
+
+    t.ok(perms, 'Has permissions');
+    t.ok(version, 'Has a version');
+
+    var newPerms = new Map([
+      ['Read', {
+        'in': ['...', 'canRead'],
+        'notIn': ['cantRead']
+      }],
+      ['Write', {
+        'in': ['...', 'canWrite'],
+        'notIn': ['cantWrite']
+      }],
+      ['Admin', {
+        'in': ['...', 'canAdmin'],
+        'notIn': ['cantAdmin']
+      }]
+    ]);
+
+    obj.setPermissions(ctx, newPerms, version, function(err) {
+      if (err) {
+        t.error('error setting permissions ' + err);
+        return cb(err);
+      }
+
+      obj.getPermissions(ctx, function(err, gotPerms, gotVersion) {
+        if (err) {
+          t.error('error getting permissions ' + err);
+          return cb(err);
+        }
+
+        t.ok(perms, 'Has permissions');
+        t.ok(version, 'Has a version');
+
+        t.notEqual(version, gotVersion, 'should have a new version');
+        assertPermissionsEqual(t, gotPerms, newPerms);
+        return cb(null);
+      });
+    });
+  });
+}
+
+function compareRows(r1, r2) {
+  if (r1.key > r2.key) {
+    return 1;
+  }
+  if (r1.key < r2.key) {
+    return -1;
+  }
+  if (r1.value > r2.value) {
+    return 1;
+  }
+  if (r1.value < r2.value) {
+    return -1;
+  }
+  return 0;
+}
+
+function assertScanRows(ctx, table, range, wantRows, cb) {
+  var stream = table.scan(ctx, range, function(err) {
+    if (err) {
+      return cb(err);
+    }
+  });
+
+  streamToArray(stream, function(err, rows) {
+    if (err) {
+      return cb(err);
+    }
+
+    rows = rows || [];
+
+    rows.sort(compareRows);
+    wantRows.sort(compareRows);
+
+    if (!deepEqual(rows, wantRows)) {
+      var error = new Error('Expected rows to be ' + JSON.stringify(wantRows) +
+                        ' but got ' + JSON.stringify(rows));
+      return cb(error);
+    }
+
+    cb(null);
+  });
+}
diff --git a/test/start-syncbased.sh b/test/start-syncbased.sh
new file mode 100755
index 0000000..bedc838
--- /dev/null
+++ b/test/start-syncbased.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2015 The Vanadium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Start syncbased and mount in the mounttable.
+
+# TODO(nlacasse): This file is needed because the javascript service-runner
+# does not allow flags or arguments to the executables it starts.  We should
+# fix service-runner to allow flags/arguments, and then have it start syncbased
+# directly with the appropriate flags.  Then we can delete this file.
+
+TESTDIR="$(mktemp -d "${TMPDIR:-/tmp}"/sbtest.XXXXXXXX)"
+# Delete TESTDIR and stop syncbased on exit.
+function cleanup {
+	rm -rf "${TESTDIR}"
+	kill -TERM "${CHILD}" 2>/dev/null
+	exit 0
+}
+trap cleanup SIGINT SIGTERM EXIT
+
+syncbased -v=3 --name test/syncbased --engine "${STORAGE_ENGINE:-leveldb}" --root-dir "${TESTDIR}" --v23.tcp.address 127.0.0.1:0 &
+
+CHILD=$!
+wait "${CHILD}"