Use sqlx library for db access, and sql-migrate for migrations.

This CL splits the DB migration and schema logic out of the compilerd
code.

The migrations are now handled by sql-migrate. The migration files live
in go/src/playground/migrations. The README has been updated with
instructions for writing, running, and rolling back migrations.

I've added tests that will perform all migrations, and then roll them
back, three times.

The lsql library has been replaced with sqlx library. The storage model
lives in a package at compilerd/storage/model.go, and has tests.

The compilerd/storage.go file now only contains the http handlers for
load and store, and calls methods on the storage package for all
database work.

The CL at go/vcl/9522 adds all new dependencies to third_party repo.

There's still more cleanup to do, but this CL should provide a clean
foundation for changes to come.

MultiPart: 1/3

Change-Id: Ide1b4c01134bae44fd2e286e70af6a75d0f175d4
diff --git a/go/src/playground/Makefile b/go/src/playground/Makefile
index b8d33f6..6f4bfa0 100644
--- a/go/src/playground/Makefile
+++ b/go/src/playground/Makefile
@@ -34,7 +34,6 @@
 start: config/db.json all
 	compilerd \
 		--sqlconf=$< \
-		--setupdb=true \
 		--listen-timeout=0 \
 		--address=$(host):$(port) \
 		--use-docker=false
diff --git a/go/src/playground/README.md b/go/src/playground/README.md
index a838980..5b3a58a 100644
--- a/go/src/playground/README.md
+++ b/go/src/playground/README.md
@@ -45,6 +45,7 @@
     $ cd $V23_ROOT/release/projects/playground/client && make src/example_bundles
     $ docker run -i playground < $V23_ROOT/release/projects/playground/client/bundles/fortune/bundle_js_go.json
 
+
 ## Running the playground server (compilerd)
 
 Install the playground binaries:
@@ -66,4 +67,98 @@
 make the client talk to your server. Add `?debug=1` to see debug info from
 the builder.
 
-TODO(ivanpi): Describe storage.
+## Running local SQL database
+
+NOTE: These instructions should only be used for local development and testing,
+and not for production deploys.
+
+Install MariaDB:
+
+    $ sudo apt-get install mariadb-server
+
+Sign in to Maria as root:
+
+    $ mysql -u root -p
+
+Create playground databases:
+
+    MariaDB [(none)]> CREATE DATABASE IF NOT EXISTS playground;
+
+Create a playground user who has access to the playground database:
+
+    MariaDB [(none)]> GRANT ALL PRIVILEGES ON playground.* TO 'playground'@'localhost';
+
+Create config/db.json from example:
+
+    $ cp config/db-local-example.json config/db.json
+
+Edit config/db.json and set username, password, and database.
+
+TODO(ivanpi): Describe cloud storage.
+
+# Running tests
+
+Make sure you have built a docker playground image, following the steps above.
+
+Make sure that MariaDB is installed, following the steps above.
+
+Run sql_test_setup.sh. You will be prompted for your MariaDB password for root
+account. You only need to do this once.
+
+    $ ./sql_test_setup.sh
+
+This script will create a playground_test database, and a playground_test user
+that can access it.
+
+Run the tests:
+
+    $ GOPATH=$V23_ROOT/release/projects/playground/go v23 go test playground/compilerd/...
+
+
+# Database migrations
+
+## Running migrations
+
+Build the `sql-migrate` tool:
+
+    $ v23 go install github.com/rubenv/sql-migrate/sql-migrate
+
+Edit config/migrate.yml. Find or define whatever environment you plan to
+migrate, and make sure the datasource is correct.
+
+To see the current migration status, run:
+
+    $ $V23_ROOT/third_party/go/bin/sql-migrate status -config=./config/migrate.yml -env=<environment>
+
+To migrate up, first run with -dryrun:
+
+    $ $V23_ROOT/third_party/go/bin/sql-migrate up -config=./config/migrate.yml -env=<environment> -dryrun
+
+If everything looks good, run the tool without -dryrun:
+
+    $ $V23_ROOT/third_party/go/bin/sql-migrate up -config=./config/migrate.yml -env=<environment>
+
+You can undo the last migration with:
+
+    $ $V23_ROOT/third_party/go/bin/sql-migrate down -limit=1 -config=./config/migrate.yml -env=<environment>
+
+For more options and infomation, see https://github.com/rubenv/sql-migrate#usage
+
+## Writing migrations
+
+Migrations are kept in the `migrations` directory. They are ordered
+alphabetically, so please name each migration consecutively. Never delete or
+modify an existing migration. Only add new ones.
+
+Each migration file must define an "up" section, which begins with the comment
+
+    -- +migrate Up
+
+and a "down" section, which begins with the comment
+
+    -- +migrate Down
+
+Applying a single migration "up" and then "down" should leave the database in
+the same state it was to begin with.
+
+For more information on writing migrations, see https://github.com/rubenv/sql-migrate#writing-migrations
diff --git a/go/src/playground/compilerd/compile.go b/go/src/playground/compilerd/compile.go
index a63a549..206799d 100644
--- a/go/src/playground/compilerd/compile.go
+++ b/go/src/playground/compilerd/compile.go
@@ -22,6 +22,7 @@
 	"playground/compilerd/jobqueue"
 	"playground/lib"
 	"playground/lib/event"
+	"playground/lib/hash"
 )
 
 var (
@@ -111,7 +112,7 @@
 	// response status and body.
 	// NOTE(sadovsky): In the client we may shift timestamps (based on current
 	// time) and introduce a fake delay.
-	requestBodyHash := rawHash(requestBody)
+	requestBodyHash := hash.Raw(requestBody)
 	if cr, ok := cache.Get(requestBodyHash); ok {
 		if cachedResponseStruct, ok := cr.(cachedResponse); ok {
 			res := openResponse(cachedResponseStruct.Status)
diff --git a/go/src/playground/compilerd/compile_test.go b/go/src/playground/compilerd/compile_test.go
index 040b230..8cd1a43 100644
--- a/go/src/playground/compilerd/compile_test.go
+++ b/go/src/playground/compilerd/compile_test.go
@@ -14,6 +14,7 @@
 
 	"playground/compilerd/jobqueue"
 	"playground/lib/event"
+	"playground/lib/hash"
 )
 
 // mockDispatcher implements the jobqueue.Dispatcher interface.
@@ -103,7 +104,7 @@
 	bodyString := "foobar"
 	body := bytes.NewBufferString(bodyString)
 	bodyBytes := body.Bytes()
-	requestBodyHash := rawHash(bodyBytes)
+	requestBodyHash := hash.Raw(bodyBytes)
 
 	// Check that body is not already in cache.
 	if _, ok := cache.Get(requestBodyHash); ok {
@@ -153,7 +154,7 @@
 	bodyString := "bazbar"
 	body := bytes.NewBufferString(bodyString)
 	bodyBytes := body.Bytes()
-	requestBodyHash := rawHash(bodyBytes)
+	requestBodyHash := hash.Raw(bodyBytes)
 
 	// Check that body is not already in cache.
 	if _, ok := cache.Get(requestBodyHash); ok {
diff --git a/go/src/playground/compilerd/main.go b/go/src/playground/compilerd/main.go
index a47adab..118a0e1 100644
--- a/go/src/playground/compilerd/main.go
+++ b/go/src/playground/compilerd/main.go
@@ -12,9 +12,7 @@
 import (
 	"bytes"
 	crand "crypto/rand"
-	"crypto/sha256"
 	"encoding/binary"
-	"encoding/hex"
 	"flag"
 	"fmt"
 	"io"
@@ -26,6 +24,10 @@
 	"runtime"
 	"syscall"
 	"time"
+
+	"v.io/x/lib/dbutil"
+
+	"playground/compilerd/storage"
 )
 
 func init() {
@@ -57,6 +59,9 @@
 	// Maximum time to finish serving currently running requests before exiting
 	// cleanly. No new requests are accepted during this time.
 	exitDelay = 30 * time.Second
+
+	// Path to SQL configuration file, as described in v.io/x/lib/dbutil/mysql.go.
+	sqlConf = flag.String("sqlconf", "", "Path to SQL configuration file. If empty, load and save requests are disabled. "+dbutil.SqlConfigFileDescription)
 )
 
 // Seeds the non-secure random number generator.
@@ -90,13 +95,28 @@
 		go waitForExit(c, time.Nanosecond*time.Duration(delayNs))
 	}
 
-	if err := initDBHandles(); err != nil {
-		log.Fatal(err)
+	if *sqlConf != "" {
+		// Parse SQL configuration file and set up TLS.
+		dbConfig, err := dbutil.ActivateSqlConfigFromFile(*sqlConf)
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		// Connect to storage backend.
+		if err := storage.Connect(dbConfig); err != nil {
+			log.Fatal(err)
+		}
+
+		// Add routes for storage.
+		http.HandleFunc("/load", handlerLoad)
+		http.HandleFunc("/save", handlerSave)
+	} else {
+		// Return 501 Not Implemented for the /load and /save routes.
+		http.HandleFunc("/load", handlerNotImplemented)
+		http.HandleFunc("/save", handlerNotImplemented)
 	}
 
 	http.HandleFunc("/compile", c.handlerCompile)
-	http.HandleFunc("/load", handlerLoad)
-	http.HandleFunc("/save", handlerSave)
 	http.HandleFunc("/healthz", handlerHealthz)
 
 	log.Printf("Serving %s\n", *address)
@@ -143,6 +163,13 @@
 	// queued to be sent.
 	time.Sleep(2 * time.Second)
 
+	// Close database connections.
+	if *sqlConf != "" {
+		if err := storage.Close(); err != nil {
+			fmt.Errorf("storage.Close() failed: %v", err)
+		}
+	}
+
 	os.Exit(0)
 }
 
@@ -203,14 +230,7 @@
 	}
 }
 
-//////////////////////////////////////////
-// Shared helper functions
-
-func stringHash(data []byte) string {
-	hv := rawHash(data)
-	return hex.EncodeToString(hv[:])
-}
-
-func rawHash(data []byte) [32]byte {
-	return sha256.Sum256(data)
+func handlerNotImplemented(w http.ResponseWriter, r *http.Request) {
+	w.WriteHeader(http.StatusNotImplemented)
+	return
 }
diff --git a/go/src/playground/compilerd/storage.go b/go/src/playground/compilerd/storage.go
index 6a7d960..30e4797 100644
--- a/go/src/playground/compilerd/storage.go
+++ b/go/src/playground/compilerd/storage.go
@@ -13,118 +13,14 @@
 package main
 
 import (
-	crand "crypto/rand"
-	"encoding/binary"
-	"encoding/hex"
 	"encoding/json"
-	"flag"
 	"fmt"
 	"log"
 	"net/http"
-	"time"
 
-	"v.io/x/lib/dbutil"
-
-	"playground/lib/lsql"
+	"playground/compilerd/storage"
 )
 
-var (
-	// Path to SQL configuration file, as described in v.io/x/lib/dbutil/mysql.go.
-	sqlConf = flag.String("sqlconf", "", "Path to SQL configuration file. If empty, load and save requests are disabled. "+dbutil.SqlConfigFileDescription)
-
-	// Testing parameter, use default value for production.
-	// Name of dataset to use. Used as table name prefix (a single SQL database
-	// can contain multiple datasets).
-	dataset = flag.String("dataset", "pg", "Testing: Name of dataset to use (and create if needed, when allowed by setupdb).")
-
-	// If set, will attempt to create any missing database tables for the given
-	// dataset. Idempotent.
-	setupDB = flag.Bool("setupdb", false, "Whether to create missing database tables for dataset.")
-)
-
-//////////////////////////////////////////
-// SqlData type definitions
-
-// === High-level schema ===
-// Each bundle save request generates a unique BundleID.
-// Every BundleID corresponds to exactly one bundle Json, stored as a Unicode
-// text blob. However, a single bundle Json can correspond to an unlimited
-// number of BundleIDs.
-// BundleIDs are mapped to Jsons via the BundleHash, which is a hash value of
-// the Json. The bundleLink table stores records of BundleID (primary key) and
-// BundleHash, as well as creation and last access timestamps for each record.
-// The bundleData table stores records of BundleHash (primary key) and Json.
-// The bundleLink.BundleHash column references bundleData.BundleHash, but can
-// be set to NULL to allow deletion of bundleData entries.
-// The additional layer of indirection allows storing identical bundles more
-// efficiently and makes the bundle ID independent of its contents, allowing
-// implementation of change history, sharing, expiration etc.
-// TODO(ivanpi): Revisit ON DELETE SET NULL when delete functionality is added.
-// === Schema type details ===
-// The BundleID is a 64-character string consisting of URL-friendly characters
-// (alphanumeric, '-', '_', '.', '~'), beginning with an underscore.
-// The timestamps are UTC DATETIME values.
-// The BundleHash is a raw (sequence of 32 bytes) SHA256 hash.
-// The Json is a MEDIUMTEXT (up to 16 MiB) Unicode (utf8mb4) blob.
-// Note: If bundles larger than ~1 MiB are to be stored, the max_allowed_packed
-// SQL connection parameter must be increased.
-// TODO(ivanpi): Normalize the Json (e.g. file order).
-
-// All SqlData types should be added here, in order of table initialization.
-var dataTypes = []lsql.SqlData{&bundleData{}, &bundleLink{}}
-
-type bundleData struct {
-	// Raw SHA256 of the bundle contents
-	BundleHash []byte // primary key
-	// The bundle contents
-	Json string
-}
-
-func (bd *bundleData) TableName() string {
-	return *dataset + "_bundleData"
-}
-
-func (bd *bundleData) TableDef() *lsql.SqlTable {
-	return lsql.NewSqlTable(bd, []lsql.SqlColumn{
-		{Name: "BundleHash", Type: "BINARY(32)", Null: false},
-		{Name: "Json", Type: "MEDIUMTEXT", Null: false},
-	}, []string{})
-}
-
-func (bd *bundleData) QueryRefs() []interface{} {
-	return []interface{}{&bd.BundleHash, &bd.Json}
-}
-
-type bundleLink struct {
-	// 64-byte printable ASCII string
-	BundleID string // primary key
-	// Raw SHA256 of the bundle contents
-	BundleHash []byte // foreign key => bundleData.BundleHash
-	// Link record creation time
-	TCreated time.Time
-	// Link record last access time
-	TLastAccessed time.Time
-}
-
-func (bl *bundleLink) TableName() string {
-	return *dataset + "_bundleLink"
-}
-
-func (bl *bundleLink) TableDef() *lsql.SqlTable {
-	return lsql.NewSqlTable(bl, []lsql.SqlColumn{
-		{Name: "BundleID", Type: "CHAR(64) CHARACTER SET ascii", Null: false},
-		{Name: "BundleHash", Type: "BINARY(32)", Null: true},
-		{Name: "TCreated", Type: "DATETIME", Null: false},
-		{Name: "TLastAccessed", Type: "DATETIME", Null: false},
-	}, []string{
-		"FOREIGN KEY (BundleHash) REFERENCES " + (&bundleData{}).TableName() + "(BundleHash) ON DELETE SET NULL",
-	})
-}
-
-func (bl *bundleLink) QueryRefs() []interface{} {
-	return []interface{}{&bl.BundleID, &bl.BundleHash, &bl.TCreated, &bl.TLastAccessed}
-}
-
 //////////////////////////////////////////
 // HTTP request handlers
 
@@ -143,15 +39,8 @@
 		storageError(w, http.StatusBadRequest, "Must specify id to load.")
 		return
 	}
-
-	if !checkDBInit(w) {
-		return
-	}
-
-	var bLink bundleLink
-	// Get the entry for the provided id.
-	err := dbhRead.EFetch(bId, &bLink)
-	if err == lsql.ErrNoSuchEntity {
+	bData, err := storage.GetBundleDataByLinkId(bId)
+	if err == storage.ErrNotFound {
 		storageError(w, http.StatusNotFound, "No data found for provided id.")
 		return
 	} else if err != nil {
@@ -159,34 +48,6 @@
 		return
 	}
 
-	var bData bundleData
-	// Get the bundle data for the hash linked in the entry.
-	// Note: This can fail if the bundle is deleted between fetching bundleLink
-	// and bundleData. However, it is highly unlikely, costly to mitigate (using
-	// a serializable transaction), and unimportant (error 500 instead of 404).
-	err = dbhRead.EFetch(bLink.BundleHash, &bData)
-	if err != nil {
-		storageInternalError(w, "Error getting bundleData for id", bId, ":", err)
-		return
-	}
-
-	// Update the last access timestamp for the link.
-	// Since we don't use transactions, this might get clobbered by a slightly
-	// older timestamp.
-	for try := 0; try < 3; try++ {
-		bLink.TLastAccessed = time.Now()
-		// Ignore no rows affected. It means either the timestamp is up to date (no
-		// change was needed) or the record had been deleted.
-		if err = dbhSeq.EUpdate(&bLink); err == nil || err == lsql.ErrNoRowsAffected {
-			err = nil
-			break
-		}
-	}
-	if err != nil {
-		storageInternalError(w, "Error updating last access timestamp for id", bId, ":", err)
-		return
-	}
-
 	storageRespond(w, http.StatusOK, &StorageResponse{
 		Link: bId,
 		Data: bData.Json,
@@ -212,88 +73,18 @@
 		return
 	}
 
-	if !checkDBInit(w) {
+	// TODO(ivanpi): Check if bundle is parseable. Format/lint?
+
+	bLink, bData, err := storage.StoreBundleLinkAndData(requestBody)
+	if err != nil {
+		storageInternalError(w, err)
 		return
 	}
 
-	// TODO(ivanpi): Check if bundle is parseable. Format/lint?
-
-	bHashFixed := rawHash(requestBody)
-	bHash := bHashFixed[:]
-
-	randomLink := func() string {
-		h := make([]byte, 16, 16+len(bHash))
-		err := binary.Read(crand.Reader, binary.LittleEndian, h)
-		if err != nil {
-			panic(fmt.Errorf("rng failed: %v", err))
-		}
-		return "_" + stringHash(append(h, bHash...))[1:]
-	}
-
-	bNewData := bundleData{
-		BundleHash: bHash,
-		Json:       string(requestBody),
-	}
-	bNewLink := bundleLink{
-		BundleID:   randomLink(),
-		BundleHash: bHash,
-	}
-
-	// TODO(ivanpi): The function is not idempotent, there is a small probability
-	// of making a duplicate entry (if a commit succeeds, but a transient network
-	// issue reports it as failed).
-	err := dbhSeq.RunInTransaction(3, func(txh *lsql.DBHandle) error {
-		// If a bundleLink entry exists for the generated BundleID, regenerate
-		// BundleID and retry. Buying lottery ticket optional.
-		bLinkFound, err := txh.EExists(bNewLink.BundleID, &bNewLink)
-		if err != nil {
-			log.Println("error checking bundleLink existence for id", bNewLink.BundleID, ":", err)
-			return err
-		} else if bLinkFound {
-			log.Println("random generation resulted in duplicate id", bNewLink.BundleID)
-			bNewLink.BundleID = randomLink()
-			return lsql.RetryTransaction
-		}
-
-		// Check if a bundleData entry exists for this BundleHash.
-		bDataFound, err := txh.EExists(bNewData.BundleHash, &bNewData)
-		if err != nil {
-			log.Println("error checking bundleData existence for hash", hex.EncodeToString(bHash), ":", err)
-			return err
-		} else if !bDataFound {
-			// If not, save the bundleData.
-			err = txh.EInsert(&bNewData)
-			if err != nil {
-				log.Println("error storing bundleData for hash", hex.EncodeToString(bHash), ":", err)
-				return err
-			}
-		}
-
-		// Save the bundleLink with the generated BundleID referring to the
-		// bundleData. Assign timestamps just before saving.
-		bNewLink.TCreated = time.Now()
-		bNewLink.TLastAccessed = bNewLink.TCreated
-		err = txh.EInsert(&bNewLink)
-		if err != nil {
-			log.Println("error storing bundleLink for id", bNewLink.BundleID, ":", err)
-			return err
-		}
-
-		return nil
+	storageRespond(w, http.StatusOK, &StorageResponse{
+		Link: bLink.Id,
+		Data: bData.Json,
 	})
-
-	if err == nil {
-		storageRespond(w, http.StatusOK, &StorageResponse{
-			Link: bNewLink.BundleID,
-			Data: bNewData.Json,
-		})
-	} else if err == lsql.ErrTooManyRetries {
-		storageInternalError(w, err)
-	} else {
-		// An informative error message has already been printed.
-		storageInternalError(w)
-	}
-	return
 }
 
 //////////////////////////////////////////
@@ -331,80 +122,3 @@
 	}
 	storageError(w, http.StatusInternalServerError, "Internal error, please retry.")
 }
-
-//////////////////////////////////////////
-// SQL database handles
-
-// Data writes for the schema are complex enough to require transactions with
-// SERIALIZABLE isolation. However, reads do not require SERIALIZABLE. Since
-// database/sql only allows setting transaction isolation per connection,
-// a separate connection with only READ-COMMITTED isolation is used for reads
-// to reduce lock contention and deadlock frequency.
-
-var (
-	// Database handle with SERIALIZABLE transaction isolation.
-	// Used for read-write transactions.
-	dbhSeq *lsql.DBHandle
-	// Database handle with READ_COMMITTED transaction isolation.
-	// Used for non-transactional reads.
-	dbhRead *lsql.DBHandle
-)
-
-func newDBHandle(sqlConfig *dbutil.ActiveSqlConfig, txIsolation string, dataTypes []lsql.SqlData, setupdb, readonly bool) (*lsql.DBHandle, error) {
-	// Create a database handle.
-	dbc, err := sqlConfig.NewSqlDBConn(txIsolation)
-	if err != nil {
-		return nil, err
-	}
-	dbh := lsql.NewDBHandle(dbc)
-	if setupdb {
-		// Create missing database tables.
-		for _, t := range dataTypes {
-			if err := dbh.CreateTable(t, true, dbutil.SqlCreateTableSuffix); err != nil {
-				return nil, fmt.Errorf("failed initializing database tables: %v", err)
-			}
-			// TODO(ivanpi): Initialize database with fixed-ID examples?
-		}
-	}
-	// Prepare simple database queries.
-	for _, t := range dataTypes {
-		if err := dbh.RegisterType(t, readonly); err != nil {
-			return nil, fmt.Errorf("failed preparing database queries: %v", err)
-		}
-	}
-	return dbh, nil
-}
-
-func initDBHandles() error {
-	if *sqlConf == "" {
-		return nil
-	}
-
-	// Parse SQL configuration file and set up TLS.
-	sqlConfig, err := dbutil.ActivateSqlConfigFromFile(*sqlConf)
-	if err != nil {
-		return err
-	}
-
-	// When setupDB is set, tables should be initialized only once, on the
-	// handle that is opened first.
-	if dbhSeq, err = newDBHandle(sqlConfig, "SERIALIZABLE", dataTypes, *setupDB, false); err != nil {
-		return err
-	}
-	// The READ-COMMITTED connection is used only for reads, so it is not
-	// necessary to prepare writing statements such as INSERT.
-	if dbhRead, err = newDBHandle(sqlConfig, "READ-COMMITTED", dataTypes, false, true); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func checkDBInit(w http.ResponseWriter) bool {
-	if dbhSeq == nil || dbhRead == nil {
-		storageError(w, http.StatusInternalServerError, "Internal error: cannot connect to database.")
-		return false
-	}
-
-	return true
-}
diff --git a/go/src/playground/compilerd/storage/db.go b/go/src/playground/compilerd/storage/db.go
new file mode 100644
index 0000000..9214252
--- /dev/null
+++ b/go/src/playground/compilerd/storage/db.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+	"fmt"
+
+	_ "github.com/go-sql-driver/mysql"
+	"github.com/jmoiron/sqlx"
+
+	"v.io/x/lib/dbutil"
+)
+
+var (
+	// Database handle with READ_COMMITTED transaction isolation.
+	// Used for non-transactional reads.
+	dbRead *sqlx.DB
+
+	// Database handle with SERIALIZABLE transaction isolation.
+	// Used for read-write transactions.
+	dbSeq *sqlx.DB
+)
+
+// connectDb is a helper method to connect a single database with the given
+// isolation parameter.
+func connectDb(sqlConfig *dbutil.ActiveSqlConfig, isolation string) (*sqlx.DB, error) {
+	// Open db connection from config,
+	conn, err := sqlConfig.NewSqlDBConn(isolation)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create sqlx DB.
+	db := sqlx.NewDb(conn, "mysql")
+
+	// Ping db to check connection.
+	if err := db.Ping(); err != nil {
+		return nil, fmt.Errorf("Error connecting to database: %v", err)
+	}
+	return db, nil
+}
+
+// Connect opens 2 connections to the database, one read-only, and one
+// serializable.
+func Connect(sqlConfig *dbutil.ActiveSqlConfig) (err error) {
+
+	// Data writes for the schema are complex enough to require transactions with
+	// SERIALIZABLE isolation. However, reads do not require SERIALIZABLE. Since
+	// database/sql only allows setting transaction isolation per connection,
+	// a separate connection with only READ-COMMITTED isolation is used for reads
+	// to reduce lock contention and deadlock frequency.
+
+	dbRead, err = connectDb(sqlConfig, "READ-COMMITTED")
+	if err != nil {
+		return err
+	}
+
+	dbSeq, err = connectDb(sqlConfig, "SERIALIZABLE")
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Close closes both databases.
+func Close() error {
+	if err := dbRead.Close(); err != nil {
+		return err
+	}
+
+	if err := dbSeq.Close(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/go/src/playground/compilerd/storage/migration_test.go b/go/src/playground/compilerd/storage/migration_test.go
new file mode 100644
index 0000000..2c3f523
--- /dev/null
+++ b/go/src/playground/compilerd/storage/migration_test.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests that the migrations succeed up and down.
+//
+// NOTE: These tests cannot be run in parallel on the same machine because they
+// interact with a fixed database on the machine.
+
+package storage
+
+import (
+	"fmt"
+	"testing"
+
+	"v.io/x/lib/dbutil"
+
+	_ "github.com/go-sql-driver/mysql"
+	"github.com/rubenv/sql-migrate"
+)
+
+var (
+	dataSourceName = "playground_test@tcp(localhost:3306)/playground_test?parseTime=true"
+)
+
+// Tests that migrations can be applied to a database and rolled back multiple
+// times.
+func TestMigrationsUpAndDown(t *testing.T) {
+	// TODO(nlacasse): This setup is very similar to the setup() func in
+	// model_test.go. Consider combining them.
+	migrationSource := &migrate.FileMigrationSource{
+		Dir: "../../migrations",
+	}
+	migrate.SetTable("migrations")
+
+	sqlConfig := dbutil.SqlConfig{
+		DataSourceName: dataSourceName,
+		TLSDisable:     true,
+	}
+	activeSqlConfig, err := sqlConfig.Activate("")
+	db, err := activeSqlConfig.NewSqlDBConn("SERIALIZABLE")
+	if err != nil {
+		t.Fatalf("Error opening database: %v", err)
+	}
+
+	// Remove any existing tables.
+	tableNames := []string{"bundle_link", "bundle_data", "migrations"}
+	for _, tableName := range tableNames {
+		db.Exec("DROP TABLE " + tableName)
+	}
+
+	// Run all migrations up and down three times.
+	for i := 0; i < 3; i++ {
+		up, err := migrate.Exec(db, "mysql", migrationSource, migrate.Up)
+		if err != nil {
+			t.Fatalf("Error migrating up: %v", err)
+		}
+		fmt.Printf("Applied %v migration up.\n", up)
+
+		down, err := migrate.Exec(db, "mysql", migrationSource, migrate.Down)
+		if err != nil {
+			t.Fatalf("Error migrating down: %v", err)
+		}
+		fmt.Printf("Applied %v migration down.\n", down)
+	}
+
+	// Run each migration up, down, up individually.
+	migrations, err := migrationSource.FindMigrations()
+	if err != nil {
+		t.Fatalf("migrationSource.FindMigrations() failed: %v", err)
+	}
+	for i, migration := range migrations {
+		memMigrationSource := &migrate.MemoryMigrationSource{
+			Migrations: []*migrate.Migration{migration},
+		}
+
+		// Migrate up.
+		if _, err := migrate.Exec(db, "mysql", memMigrationSource, migrate.Up); err != nil {
+			t.Fatalf("Error migrating migration %v up: %v", i, err)
+		}
+		fmt.Printf("Applied migration %v up.\n", i)
+
+		// Migrate down.
+		if _, err := migrate.Exec(db, "mysql", memMigrationSource, migrate.Down); err != nil {
+			t.Fatalf("Error migrating migration %v down: %v", i, err)
+		}
+		fmt.Printf("Applied migration %v down.\n", i)
+
+		// Migrate up.
+		if _, err := migrate.Exec(db, "mysql", memMigrationSource, migrate.Up); err != nil {
+			t.Fatalf("Error migrating migration %v up: %v", i, err)
+		}
+		fmt.Printf("Applied migration %v up.\n", i)
+	}
+}
diff --git a/go/src/playground/compilerd/storage/model.go b/go/src/playground/compilerd/storage/model.go
new file mode 100644
index 0000000..ecd5167
--- /dev/null
+++ b/go/src/playground/compilerd/storage/model.go
@@ -0,0 +1,264 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//=== High-level schema ===
+// Each playground bundle is stored once as a BundleData in the bundle_data
+// table. The BundleData contains the json string corresponding to the bundle
+// files, and is indexed by the hash of the json.
+//
+// Links to a BundleData are stored in BundleLinks. There can be multiple
+// BundleLinks corresponding to a single BundleData. BundleLinks are indexed by
+// a unique id, and contain the hash of the BundleData that they correspond to.
+//
+// The additional layer of indirection provided by BundleLinks allows storing
+// identical bundles more efficiently and makes the bundle Id independent of
+// its contents, allowing implementation of change history, sharing, expiration
+// etc.
+//
+// Each bundle save request first generates and stores a new BundleLink object,
+// and will store a new BundleData only if it does not already exist in the
+// database.
+//
+// Note: If bundles larger than ~1 MiB are to be stored, the max_allowed_packed
+// SQL connection parameter must be increased.
+//
+// TODO(ivanpi): Normalize the Json (e.g. file order).
+
+package storage
+
+import (
+	crand "crypto/rand"
+	"database/sql"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"time"
+
+	_ "github.com/go-sql-driver/mysql"
+	"github.com/jmoiron/sqlx"
+
+	"playground/lib/hash"
+)
+
+var (
+	// Error returned when requested item is not found in the database.
+	ErrNotFound = errors.New("Not found")
+
+	// Error returned when retries are exhausted.
+	errTooManyRetries = errors.New("Too many retries")
+
+	// Error returned from a transaction callback to trigger a rollback and
+	// retry. Other errors cause a rollback and abort.
+	errRetryTransaction = errors.New("Retry transaction")
+)
+
+//////////////////////////////////////////
+// SqlData type definitions
+
+type BundleData struct {
+	// Raw SHA256 of the bundle contents
+	Hash []byte `db:"hash"` // primary key
+	// The bundle contents
+	Json string `db:"json"`
+}
+
+type BundleLink struct {
+	// 64-byte printable ASCII string
+	Id string `db:"id"` // primary key
+	// Raw SHA256 of the bundle contents
+	Hash []byte `db:"hash"` // foreign key => BundleData.Hash
+	// Link record creation time
+	CreatedAt time.Time `db:"created_at"`
+}
+
+///////////////////////////////////////
+// DB read-only methods
+
+// TODO(nlacasse): Use prepared statements, otherwise we have an extra
+// round-trip to the db, which is slow on cloud sql.
+
+func getBundleLinkById(q sqlx.Queryer, id string) (*BundleLink, error) {
+	bLink := BundleLink{}
+	if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE id=?", id); err != nil {
+		if err == sql.ErrNoRows {
+			err = ErrNotFound
+		}
+		return nil, err
+	}
+	return &bLink, nil
+}
+
+func getBundleDataByHash(q sqlx.Queryer, hash []byte) (*BundleData, error) {
+	bData := BundleData{}
+	if err := sqlx.Get(q, &bData, "SELECT * FROM bundle_data WHERE hash=?", hash); err != nil {
+		if err == sql.ErrNoRows {
+			err = ErrNotFound
+		}
+		return nil, err
+	}
+	return &bData, nil
+}
+
+// GetBundleDataByLinkId returns a BundleData object linked to by a BundleLink
+// with a particular id.
+// Note: This can fail if the bundle is deleted between fetching BundleLink
+// and BundleData. However, it is highly unlikely, costly to mitigate (using
+// a serializable transaction), and unimportant (error 500 instead of 404).
+func GetBundleDataByLinkId(id string) (*BundleData, error) {
+	bLink, err := getBundleLinkById(dbRead, id)
+	if err != nil {
+		return nil, err
+	}
+	bData, err := getBundleDataByHash(dbRead, bLink.Hash)
+	if err != nil {
+		return nil, err
+	}
+	return bData, nil
+}
+
+////////////////////////////////////
+// DB write methods
+
+func storeBundleData(ext sqlx.Ext, bData *BundleData) error {
+	_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_data (hash, json) VALUES (:hash, :json)", bData)
+	return err
+}
+
+func storeBundleLink(ext sqlx.Ext, bLink *BundleLink) error {
+	_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_link (id, hash) VALUES (:id, :hash)", bLink)
+	return err
+}
+
+// StoreBundleLinkAndData creates a new bundle data for a given json byte slice
+// if one does not already exist. It will create a new bundle link pointing to
+// that data. All DB access is done in a transaction, which will retry up to 3
+// times. Both the link and the data are returned, or an error if one occured.
+func StoreBundleLinkAndData(json []byte) (bLink *BundleLink, bData *BundleData, retErr error) {
+	bHashRaw := hash.Raw(json)
+	bHash := bHashRaw[:]
+
+	// Attempt transaction up to 3 times.
+	runInTransaction(3, func(tx *sqlx.Tx) error {
+		// Generate a random id for the bundle link.
+		id, err := randomLink(bHash)
+		if err != nil {
+			return fmt.Errorf("Error creaking link id: %v", err)
+		}
+
+		// Check if bundle link with this id already exists in DB.
+		if _, err := getBundleLinkById(tx, id); err == nil {
+			// Bundle was found. Retry with new id.
+			return errRetryTransaction
+		} else if err != ErrNotFound {
+			return fmt.Errorf("Error getting bundle link: %v", err)
+		}
+
+		// Check if bundle data with this hash already exists in DB.
+		bData, err = getBundleDataByHash(tx, bHash)
+		if err != nil {
+			if err != ErrNotFound {
+				return fmt.Errorf("Error getting bundle data: %v", err)
+			}
+
+			// Bundle does not exist in DB. Store it.
+			bData = &BundleData{
+				Hash: bHash,
+				Json: string(json),
+			}
+			if err = storeBundleData(tx, bData); err != nil {
+				return fmt.Errorf("Error storing bundle data: %v", err)
+			}
+		}
+
+		// Store the bundle link.
+		bLink = &BundleLink{
+			Id:   id,
+			Hash: bHash,
+		}
+		if err = storeBundleLink(tx, bLink); err != nil {
+			return fmt.Errorf("Error storing bundle link: %v", err)
+		}
+
+		return nil
+	})
+
+	return
+}
+
+//////////////////////////////////////////
+// Transaction support
+
+// Runs function txf inside a SQL transaction. txf should only use the database
+// handle passed to it, which shares the prepared transaction cache with the
+// original handle. If txf returns nil, the transaction is committed.
+// Otherwise, it is rolled back.
+// txf is retried at most maxRetries times, with a fresh transaction for every
+// attempt, until the commit is successful. txf should not have side effects
+// that could affect subsequent retries (apart from database operations, which
+// are rolled back).
+// If the error returned from txf is errRetryTransaction, txf is retried as if
+// the commit failed. Otherwise, txf is not retried, and RunInTransaction
+// returns the error.
+// In rare cases, txf may be retried even if the transaction was successfully
+// committed (when commit falsely returns an error). txf should be idempotent
+// or able to detect this case.
+// If maxRetries is exhausted, runInTransaction returns errTooManyRetries.
+// Nested transactions are not supported and result in undefined behaviour.
+// Inspired by https://cloud.google.com/appengine/docs/go/datastore/reference#RunInTransaction
+func runInTransaction(maxRetries int, txf func(tx *sqlx.Tx) error) error {
+	for i := 0; i < maxRetries; i++ {
+		err := attemptInTransaction(txf)
+		if err == nil {
+			return nil
+		} else if err != errRetryTransaction {
+			return err
+		}
+	}
+	return errTooManyRetries
+}
+
+func attemptInTransaction(txf func(tx *sqlx.Tx) error) (rerr error) {
+	tx, err := dbSeq.Beginx()
+	if err != nil {
+		return fmt.Errorf("Failed opening transaction: %v", err)
+	}
+	defer func() {
+		// UPSTREAM BUG WORKAROUND: Rollback anyway to release transaction after
+		// manual commit.
+		//if rerr != nil {
+		if true {
+			// Silently ignore rollback error, we cannot do anything. Transaction
+			// will timeout eventually.
+			// UPSTREAM BUG: Transaction does not timeout, connection gets reused.
+			// This case is unlikely, but dangerous.
+			// TODO(ivanpi): Remove workaround when bug is resolved.
+			_ = tx.Rollback()
+		}
+	}()
+	// Call txf with the transaction handle - a shallow copy of the database
+	// handle (sharing the mutex, database connection, queries) with the
+	// transaction object added.
+	if err := txf(tx); err != nil {
+		return err
+	}
+	// UPSTREAM BUG WORKAROUND: Commit manually.
+	//if err = tx.Commit(); err != nil {
+	if _, err = tx.Exec("COMMIT"); err != nil {
+		return errRetryTransaction
+	}
+	return nil
+}
+
+////////////////////////////////////////////
+// Helper methods
+
+// randomLink creates a random link id for a given hash.
+func randomLink(bHash []byte) (string, error) {
+	h := make([]byte, 16, 16+len(bHash))
+	err := binary.Read(crand.Reader, binary.LittleEndian, h)
+	if err != nil {
+		return "", fmt.Errorf("RNG failed: %v", err)
+	}
+	return "_" + hash.String(append(h, bHash...))[1:], nil
+}
diff --git a/go/src/playground/compilerd/storage/model_test.go b/go/src/playground/compilerd/storage/model_test.go
new file mode 100644
index 0000000..12b580d
--- /dev/null
+++ b/go/src/playground/compilerd/storage/model_test.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests for the storage model.
+// These tests only test the exported API of the storage model.
+//
+// NOTE: These tests cannot be run in parallel on the same machine because they
+// interact with a fixed database on the machine.
+
+package storage_test
+
+import (
+	"fmt"
+	"testing"
+
+	_ "github.com/go-sql-driver/mysql"
+	"github.com/rubenv/sql-migrate"
+
+	"v.io/x/lib/dbutil"
+
+	"playground/compilerd/storage"
+)
+
+var (
+	dataSourceName = "playground_test@tcp(localhost:3306)/playground_test?parseTime=true"
+)
+
+// setup cleans the database, runs migrations, and connects to the database.
+// It returns a teardown function that closes the database connection.
+func setup(t *testing.T) func() {
+	// Migrate down then up.
+	migrations := &migrate.FileMigrationSource{
+		Dir: "../../migrations",
+	}
+	migrate.SetTable("migrations")
+
+	sqlConfig := dbutil.SqlConfig{
+		DataSourceName: dataSourceName,
+		TLSDisable:     true,
+	}
+	activeSqlConfig, err := sqlConfig.Activate("")
+
+	db, err := activeSqlConfig.NewSqlDBConn("SERIALIZABLE")
+	if err != nil {
+		t.Fatalf("Error opening database: %v", err)
+	}
+
+	// Remove any existing tables.
+	tableNames := []string{"bundle_link", "bundle_data", "migrations"}
+	for _, tableName := range tableNames {
+		db.Exec("DROP TABLE " + tableName)
+	}
+
+	if _, err = migrate.Exec(db, "mysql", migrations, migrate.Up); err != nil {
+		t.Fatalf("Error migrating up: %v", err)
+	}
+	if err := db.Close(); err != nil {
+		t.Fatalf("db.Close() failed: %v", err)
+	}
+
+	// Connect to the storage.
+	if err := storage.Connect(activeSqlConfig); err != nil {
+		t.Fatalf("storage.Connect(%v) failed: %v", activeSqlConfig, err)
+	}
+
+	teardown := func() {
+		if err := storage.Close(); err != nil {
+			t.Fatalf("storage.Close() failed: %v", err)
+		}
+	}
+	return teardown
+}
+
+func TestGetBundleDataByLinkId(t *testing.T) {
+	defer setup(t)()
+
+	// Get with a unknown id should return ErrNotFound.
+	id := "foobar"
+	if _, err := storage.GetBundleDataByLinkId(id); err != storage.ErrNotFound {
+		t.Errorf("Expected GetBundleDataByLinkId with unknown id to return ErrNotFound, but instead got %v", err)
+	}
+
+	// Add a bundle.
+	json := []byte("mock_json_data")
+	bLink, _, err := storage.StoreBundleLinkAndData(json)
+	if err != nil {
+		t.Fatalf("Expected StoreBundleLinkAndData(%v) not to error, but got %v", json, err)
+	}
+
+	// Bundle should exist.
+	gotBdata, err := storage.GetBundleDataByLinkId(bLink.Id)
+	if err != nil {
+		t.Errorf("Expected GetBundleDataByLinkId(%v) not to error, but got %v", bLink.Id, err)
+	}
+
+	// Bundle should have expected json.
+	if got, want := gotBdata.Json, string(json); got != want {
+		t.Errorf("Expected %v to equal %v.", got, want)
+	}
+}
+
+func assertValidLinkDataPair(json string, bLink *storage.BundleLink, bData *storage.BundleData) error {
+	if string(bLink.Hash) != string(bData.Hash) {
+		return fmt.Errorf("Expected %v to equal %v", string(bLink.Hash), string(bData.Hash))
+	}
+
+	if bLink.Id == "" {
+		return fmt.Errorf("Expected bundle link to have id.")
+	}
+
+	if bData.Json != json {
+		return fmt.Errorf("Expected %v to equal %v", bData.Json, json)
+	}
+	return nil
+}
+
+func TestStoreBundleLinkAndData(t *testing.T) {
+	defer setup(t)()
+
+	mockJson := []byte("bizbaz")
+
+	// Storing the json once should succeed.
+	bLink1, bData1, err := storage.StoreBundleLinkAndData(mockJson)
+	if err != nil {
+		t.Fatalf("StoreBundleLinkAndData(%v) failed: %v", mockJson, err)
+	}
+	if err := assertValidLinkDataPair(string(mockJson), bLink1, bData1); err != nil {
+		t.Fatalf("Got invalid link data pair: %v", err)
+	}
+
+	// Storing the bundle again should succeed.
+	bLink2, bData2, err := storage.StoreBundleLinkAndData(mockJson)
+	if err != nil {
+		t.Fatalf("StoreBundleLinkAndData(%v) failed: %v", mockJson, err)
+	}
+	if err := assertValidLinkDataPair(string(mockJson), bLink2, bData2); err != nil {
+		t.Error("Got invalid link data pair: %v", err)
+	}
+
+	// Bundle links should have different ids.
+	if bLink1.Id == bLink2.Id {
+		t.Errorf("Expected bundle links to have different ids, but got %v and %v", bLink1.Id, bLink2.Id)
+	}
+
+	// Bundle datas should have equal hashes.
+	if want, got := string(bData1.Hash), string(bData2.Hash); want != got {
+		t.Errorf("Expected bundle datas to have equal hashes, but got %v and %v", want, got)
+	}
+}
diff --git a/go/src/playground/config/migrate.yml b/go/src/playground/config/migrate.yml
new file mode 100644
index 0000000..84dc6e8
--- /dev/null
+++ b/go/src/playground/config/migrate.yml
@@ -0,0 +1,12 @@
+development:
+    dialect: mysql
+    datasource: playground:@tcp(localhost:3306)/playground?parseTime=true
+    dir: ./migrations
+    table: migrations
+
+production:
+    dialect: mysql
+    datasource: <username>:<password>@tcp(<ip_address>:3306)/<database>
+    dir: ./migrations
+    table: migrations
+
diff --git a/go/src/playground/lib/hash/hash.go b/go/src/playground/lib/hash/hash.go
new file mode 100644
index 0000000..e7f76a7
--- /dev/null
+++ b/go/src/playground/lib/hash/hash.go
@@ -0,0 +1,19 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hash
+
+import (
+	"crypto/sha256"
+	"encoding/hex"
+)
+
+func Raw(data []byte) [32]byte {
+	return sha256.Sum256(data)
+}
+
+func String(data []byte) string {
+	hv := Raw(data)
+	return hex.EncodeToString(hv[:])
+}
diff --git a/go/src/playground/lib/lsql/doc.go b/go/src/playground/lib/lsql/doc.go
deleted file mode 100644
index 0eb5e40..0000000
--- a/go/src/playground/lib/lsql/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lsql implements a utility wrapper around the database/sql library.
-//
-// It simplifies common operations on data entities. An entity type corresponds
-// to a SQL table, and an instance of the type corresponds to a single row in
-// the table.
-// Each entity type must implement the SqlData interface, which specifies the
-// table schema and mapping of entity instance fields to SQL table columns.
-// Each entity must have exactly one primary key. More complex relations, such
-// as foreign key constraints, need to be handled with care (for example,
-// executing statements in the correct order).
-//
-// The library supports creating database tables from the schema, building
-// query/statement strings for common operations such as SELECT and INSERT,
-// caching prepared queries keyed by labels and entity types, and transactional
-// execution. The most common CRUD operations are supported directly by
-// automatically prepared queries and helper functions.
-package lsql
diff --git a/go/src/playground/lib/lsql/handle.go b/go/src/playground/lib/lsql/handle.go
deleted file mode 100644
index c7d83b6..0000000
--- a/go/src/playground/lib/lsql/handle.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// lsql database connection wrapper providing prepared statement caching with
-// transaction support and simple CRUD operations over SqlData types.
-//
-// DBHandle wraps a SQL database connection. It provides Prepare*() and Get*()
-// methods, which allow saving and retrieving prepared SQL statements keyed by
-// a label and, optionally, SqlData type.
-// E*() methods are convenience methods for common CRUD operations on a SqlData
-// entity. The entity type must be registered beforehand using RegisterType()
-// and the corresponding table created.
-// RunInTransaction() supports executing a sequence of operations inside of a
-// transaction with retry on failure support.
-
-package lsql
-
-import (
-	"database/sql"
-	"errors"
-	"fmt"
-	"sync"
-)
-
-var (
-	// Error returned by EFetch when no entity is found for the given key.
-	ErrNoSuchEntity = errors.New("lsql: no such entity")
-	// Error returned by RunInTransaction when retries are exhausted.
-	ErrTooManyRetries = errors.New("lsql: too many retries")
-	// Error returned by EInsert and EUpdate when no rows are affected.
-	// Note: In case of EUpdate, this can happen if the entity had been deleted,
-	// but also (depending on database configuration) if the entity was unchanged
-	// by the update.
-	ErrNoRowsAffected = errors.New("lsql: no rows affected")
-	// Error that should be returned from a transaction callback to trigger a
-	// rollback and retry. Other errors cause a rollback and abort.
-	RetryTransaction = errors.New("lsql: retry transaction")
-)
-
-// SQL database handle storing prepared statements.
-// Initialize using NewDBHandle.
-type DBHandle struct {
-	dbc *sql.DB
-	tx  *sql.Tx
-
-	mu      *sync.RWMutex
-	queries map[string]*sql.Stmt
-}
-
-func NewDBHandle(dbc *sql.DB) *DBHandle {
-	return &DBHandle{
-		dbc:     dbc,
-		mu:      &sync.RWMutex{},
-		queries: make(map[string]*sql.Stmt),
-	}
-}
-
-//////////////////////////////////////////
-// Transaction support
-
-// Runs function txf inside a SQL transaction. txf should only use the database
-// handle passed to it, which shares the prepared transaction cache with the
-// original handle. If txf returns nil, the transaction is committed.
-// Otherwise, it is rolled back.
-// txf is retried at most maxRetries times, with a fresh transaction for every
-// attempt, until the commit is successful. txf should not have side effects
-// that could affect subsequent retries (apart from database operations, which
-// are rolled back).
-// If the error returned from txf is RetryTransaction, txf is retried as if the
-// commit failed. Otherwise, txf is not retried, and RunInTransaction returns
-// the error.
-// In rare cases, txf may be retried even if the transaction was successfully
-// committed (when commit falsely returns an error). txf should be idempotent
-// or able to detect this case.
-// If maxRetries is exhausted, RunInTransaction returns ErrTooManyRetries.
-// Nested transactions are not supported and result in undefined behaviour.
-// Inspired by https://cloud.google.com/appengine/docs/go/datastore/reference#RunInTransaction
-func (h *DBHandle) RunInTransaction(maxRetries int, txf func(txh *DBHandle) error) error {
-	for i := 0; i < maxRetries; i++ {
-		err := h.attemptInTransaction(txf)
-		if err == nil {
-			return nil
-		} else if err != RetryTransaction {
-			return err
-		}
-	}
-	return ErrTooManyRetries
-}
-
-func (h *DBHandle) attemptInTransaction(txf func(txh *DBHandle) error) (rerr error) {
-	tx, err := h.dbc.Begin()
-	if err != nil {
-		return fmt.Errorf("lsql: failed opening transaction: %v", err)
-	}
-	defer func() {
-		// UPSTREAM BUG WORKAROUND: Rollback anyway to release transaction after
-		// manual commit.
-		//if rerr != nil {
-		if true {
-			// Silently ignore rollback error, we cannot do anything. Transaction
-			// will timeout eventually.
-			// UPSTREAM BUG: Transaction does not timeout, connection gets reused.
-			// This case is unlikely, but dangerous.
-			// TODO(ivanpi): Remove workaround when bug is resolved.
-			_ = tx.Rollback()
-		}
-	}()
-	// Call txf with the transaction handle - a shallow copy of the database
-	// handle (sharing the mutex, database connection, queries) with the
-	// transaction object added.
-	err = txf(&DBHandle{
-		dbc:     h.dbc,
-		mu:      h.mu,
-		queries: h.queries,
-		tx:      tx,
-	})
-	if err != nil {
-		return err
-	}
-	// UPSTREAM BUG WORKAROUND: Commit manually.
-	//if err = tx.Commit(); err != nil {
-	if _, err = tx.Exec("COMMIT"); err != nil {
-		return RetryTransaction
-	}
-	return nil
-}
-
-//////////////////////////////////////////
-// Prepared statement caching support
-
-// Prepares the SQL statement/query and saves it under the provided label.
-func (h *DBHandle) Prepare(label, query string) error {
-	return h.prepareInternal("c:"+label, query)
-}
-
-// Retrieves the previously prepared SQL statement/query for the label.
-func (h *DBHandle) Get(label string) *sql.Stmt {
-	return h.getInternal("c:" + label)
-}
-
-// PrepareFor and GetFor are equivalent to Prepare and Get, with the label
-// interpreted in the namespace of the SqlData type.
-
-func (h *DBHandle) PrepareFor(proto SqlData, label, query string) error {
-	return h.prepareInternal("t:"+proto.TableName()+":"+label, query)
-}
-
-func (h *DBHandle) GetFor(proto SqlData, label string) *sql.Stmt {
-	return h.getInternal("t:" + proto.TableName() + ":" + label)
-}
-
-func (h *DBHandle) prepareInternal(label, query string) error {
-	stmt, err := h.dbc.Prepare(query)
-	if err != nil {
-		return err
-	}
-	h.mu.Lock()
-	defer h.mu.Unlock()
-	h.queries[label] = stmt
-	return nil
-}
-
-func (h *DBHandle) getInternal(label string) (stmt *sql.Stmt) {
-	if h.tx != nil {
-		defer func() {
-			stmt = h.tx.Stmt(stmt)
-		}()
-	}
-	h.mu.RLock()
-	defer h.mu.RUnlock()
-	return h.queries[label]
-}
-
-//////////////////////////////////////////
-// SqlData common operation support
-
-// Creates the table for the given SqlData prototype.
-func (h *DBHandle) CreateTable(proto SqlData, ifNotExists bool, stmtSuffix string) error {
-	_, err := h.dbc.Exec(proto.TableDef().GetCreateTable(ifNotExists) + " " + stmtSuffix)
-	return err
-}
-
-// Prepares common operation queries/statements for the given SqlData type.
-// If readonly is set, prepares only reading queries, not writing statements.
-func (h *DBHandle) RegisterType(proto SqlData, readonly bool) error {
-	t := proto.TableDef()
-	if err := h.PrepareFor(proto, "fetch", t.GetSelectQuery(t.GetWhereKey())); err != nil {
-		return err
-	}
-	if err := h.PrepareFor(proto, "exists", t.GetCountQuery(t.GetWhereKey())); err != nil {
-		return err
-	}
-	if !readonly {
-		if err := h.PrepareFor(proto, "insert", t.GetInsertQuery()); err != nil {
-			return err
-		}
-		if err := h.PrepareFor(proto, "update", t.GetUpdateQuery(t.GetWhereKey())); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// Functions below execute common operations for the SqlData type.
-// The type must be registered with RegisterType beforehand.
-
-// Fetches the entity with the given key and stores it into dst.
-// If no entity for key is found, returns ErrNoSuchEntity.
-func (h *DBHandle) EFetch(key interface{}, dst SqlData) error {
-	err := h.GetFor(dst, "fetch").QueryRow(key).Scan(dst.QueryRefs()...)
-	if err == sql.ErrNoRows {
-		err = ErrNoSuchEntity
-	}
-	return err
-}
-
-// Checks if an entity of the given SqlData prototype with the given key
-// exists in the database.
-func (h *DBHandle) EExists(key interface{}, proto SqlData) (bool, error) {
-	var cnt int
-	err := h.GetFor(proto, "exists").QueryRow(key).Scan(&cnt)
-	return cnt > 0, err
-}
-
-// Inserts the entity stored in src into the database.
-func (h *DBHandle) EInsert(src SqlData) error {
-	res, err := h.GetFor(src, "insert").Exec(src.QueryRefs()...)
-	if err == nil {
-		err = checkOneRowAffected(res)
-	}
-	return err
-}
-
-// Updates the database record for the entity stored in src.
-func (h *DBHandle) EUpdate(src SqlData) error {
-	qrefs := src.QueryRefs()
-	// Move primary key reference to the end for WHERE clause.
-	qrefs = append(qrefs[1:], qrefs[0])
-	res, err := h.GetFor(src, "update").Exec(qrefs...)
-	if err == nil {
-		err = checkOneRowAffected(res)
-	}
-	return err
-}
-
-func checkOneRowAffected(res sql.Result) error {
-	raf, err := res.RowsAffected()
-	if err != nil {
-		return fmt.Errorf("lsql: failed checking number of rows affected: %v", err)
-	}
-	if raf > 1 {
-		return fmt.Errorf("lsql: more than one row affected")
-	}
-	if raf == 0 {
-		return ErrNoRowsAffected
-	}
-	return nil
-}
diff --git a/go/src/playground/lib/lsql/model.go b/go/src/playground/lib/lsql/model.go
deleted file mode 100644
index 0d1a601..0000000
--- a/go/src/playground/lib/lsql/model.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2015 The Vanadium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// lsql support for representing an entity type as a SQL table.
-//
-// Each entity type (for example, a struct) corresponding to a single SQL
-// table should implement the SqlData interface, specifying the table name,
-// definition of all table columns and constraints (as a SqlTable), and
-// mapping of entity instance data fields to columns.
-//
-// The SqlTable::Get*() methods generate SQL query/statement strings for
-// creating the table and other common SQL operations over the specified
-// type. Generated strings and string fragments are in prepared statement
-// format (with question mark placeholders for data values). Default values
-// and projection are not supported.
-
-package lsql
-
-import (
-	"strings"
-)
-
-// An entity type representable as a SQL table row.
-// Should be implemented by each type to be persisted in a SQL table via lsql.
-type SqlData interface {
-	// SQL table name (must be a valid SQL identifier).
-	// Return value must depend only on entity type, not entity instance.
-	TableName() string
-	// SQL table definition (refer to NewSqlTable).
-	// Return value must depend only on entity type, not entity instance.
-	TableDef() *SqlTable
-	// Array of references to entity instance data fields mapping to SQL columns,
-	// in the same order as specified in the table definition. Every column must
-	// have a corresponding reference of the appropriate type.
-	QueryRefs() []interface{}
-}
-
-//////////////////////////////////////////
-// Table schema description
-
-// Read-only specification of a SQL column.
-type SqlColumn struct {
-	// Column name (must be a valid SQL identifier).
-	Name string
-	// Column SQL type, as specified in a CREATE TABLE statement.
-	Type string
-	// Whether the column is allowed to be NULL.
-	Null bool
-}
-
-// Initialize using NewSqlTable.
-type SqlTable struct {
-	tableName   string
-	columns     []SqlColumn
-	constraints []string
-}
-
-// proto: Instance of the corresponding data entity.
-// columns: SqlColumn specifications for each column.
-// The first column is used as the primary key.
-// constraints: Table constraints in SQL syntax (excluding PRIMARY KEY).
-func NewSqlTable(proto SqlData, columns []SqlColumn, constraints []string) *SqlTable {
-	return &SqlTable{
-		tableName:   proto.TableName(),
-		columns:     columns,
-		constraints: constraints,
-	}
-}
-
-func (t *SqlTable) TableName() string {
-	return t.tableName
-}
-
-func (t *SqlTable) KeyName() string {
-	return t.columns[0].Name
-}
-
-// List of column names.
-func (t *SqlTable) ColumnNames() []string {
-	colNames := make([]string, 0, len(t.columns))
-	for _, c := range t.columns {
-		colNames = append(colNames, c.Name)
-	}
-	return colNames
-}
-
-func (c *SqlColumn) getColumnSpec() string {
-	nullSpec := "NULL"
-	if !c.Null {
-		nullSpec = "NOT NULL"
-	}
-	return c.Name + " " + c.Type + " " + nullSpec
-}
-
-//////////////////////////////////////////
-// Statement/query string generation
-
-// SQL statement to create the table.
-func (t *SqlTable) GetCreateTable(ifNotExists bool) string {
-	createCmd := "CREATE TABLE "
-	if ifNotExists {
-		createCmd += "IF NOT EXISTS "
-	}
-	clauses := make([]string, 0, len(t.columns)+1+len(t.constraints))
-	for _, c := range t.columns {
-		clauses = append(clauses, c.getColumnSpec())
-	}
-	clauses = append(clauses, "PRIMARY KEY ("+t.KeyName()+")")
-	clauses = append(clauses, t.constraints...)
-	return createCmd + t.TableName() + " ( " + strings.Join(clauses, ", ") + " )"
-}
-
-// Get* methods below return SQL query/statement strings with placeholders for
-// common CRUD operations over the corresponding type.
-
-// WHERE constraint fragment for matching primary key.
-func (t *SqlTable) GetWhereKey() string {
-	return "(" + t.KeyName() + "=?)"
-}
-
-// SELECT query for entities matching WHERE clause. No projection.
-func (t *SqlTable) GetSelectQuery(whereClause string) string {
-	query := "SELECT " + strings.Join(t.ColumnNames(), ",") + " FROM " + t.TableName()
-	return appendWhere(query, whereClause)
-}
-
-// COUNT query for entities matching WHERE clause.
-func (t *SqlTable) GetCountQuery(whereClause string) string {
-	query := "SELECT COUNT(" + t.KeyName() + ") FROM " + t.TableName()
-	return appendWhere(query, whereClause)
-}
-
-// INSERT query for entity. Default values not supported, all columns mapped.
-func (t *SqlTable) GetInsertQuery() string {
-	colNames := t.ColumnNames()
-	query := "INSERT INTO " + t.TableName() +
-		" (" + strings.Join(colNames, ",") + ")" +
-		" VALUES (?" + strings.Repeat(",?", len(colNames)-1) + ")"
-	return query
-}
-
-// UPDATE query for entity. Updates all columns (except the primary key).
-func (t *SqlTable) GetUpdateQuery(whereClause string) string {
-	query := "UPDATE " + t.TableName() + " SET "
-	colUps := t.ColumnNames()[1:]
-	for i := range colUps {
-		colUps[i] += "=?"
-	}
-	query += strings.Join(colUps, ",")
-	return appendWhere(query, whereClause)
-}
-
-func appendWhere(query, whereClause string) string {
-	return query + " WHERE (" + whereClause + ")"
-}
diff --git a/go/src/playground/migrations/001-create-tables.sql b/go/src/playground/migrations/001-create-tables.sql
new file mode 100644
index 0000000..8e6626b
--- /dev/null
+++ b/go/src/playground/migrations/001-create-tables.sql
@@ -0,0 +1,18 @@
+-- +migrate Up
+
+CREATE TABLE bundle_data (
+	hash BINARY(32) NOT NULL PRIMARY KEY,
+	json MEDIUMTEXT NOT NULL
+) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
+
+CREATE TABLE bundle_link (
+	id CHAR(64) CHARACTER SET ascii NOT NULL PRIMARY KEY,
+	hash BINARY(32),
+	created_at TIMESTAMP NOT NULL DEFAULT now(),
+	FOREIGN KEY (hash) REFERENCES bundle_data(hash) ON DELETE SET NULL
+) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
+
+-- +migrate Down
+
+DROP TABLE bundle_link;
+DROP TABLE bundle_data;
diff --git a/go/src/playground/sql_test_setup.sh b/go/src/playground/sql_test_setup.sh
new file mode 100755
index 0000000..f69e223
--- /dev/null
+++ b/go/src/playground/sql_test_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 2015 The Vanadium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Creates a playground_test database and playground_test user with access
+# permissions to that database.
+
+mysql -u root -p -e "CREATE DATABASE IF NOT EXISTS playground_test; \
+	GRANT ALL PRIVILEGES ON playground_test.* TO 'playground_test'@'localhost';"