playground: Add basic bundle save/load support to compilerd.

When a database connection is configured, accepts POST requests at
/save to store data under a random ID, and GET requests at /load to
retrieve data for a given ID.
Includes lightweight SQL library for database setup and common
operations.

Change-Id: I952f1ed1904f0b45c5292818a2b470ec468d12a8
diff --git a/compilerd/compile.go b/compilerd/compile.go
index 4ceb271..e110990 100644
--- a/compilerd/compile.go
+++ b/compilerd/compile.go
@@ -1,3 +1,10 @@
+// Handler for HTTP requests to compile and run playground examples.
+//
+// handlerCompile() handles a POST request with bundled example source code.
+// The bundle is passed to the builder command, which is run inside a Docker
+// sandbox. Builder output is streamed back to the client in realtime and
+// cached.
+
 package main
 
 import (
@@ -19,11 +26,6 @@
 	"v.io/playground/lib/event"
 )
 
-type CachedResponse struct {
-	Status int
-	Events []event.Event
-}
-
 var (
 	useDocker = flag.Bool("use-docker", true, "Whether to use Docker to run builder; if false, we run the builder directly.")
 
@@ -39,6 +41,9 @@
 	cache = lru.New(10000)
 )
 
+//////////////////////////////////////////
+// HTTP request handler
+
 // POST request that compiles and runs the bundle and streams output to client.
 func handlerCompile(w http.ResponseWriter, r *http.Request) {
 	if !handleCORS(w, r) {
@@ -225,6 +230,14 @@
 	}
 }
 
+//////////////////////////////////////////
+// Event write and cache support
+
+type CachedResponse struct {
+	Status int
+	Events []event.Event
+}
+
 // Each line written to the returned writer, up to limit bytes total, is parsed
 // into an Event and written to Sink.
 // If the limit is reached or an invalid line read, the corresponding callback
@@ -302,6 +315,9 @@
 	return events
 }
 
+//////////////////////////////////////////
+// Miscellaneous helper functions
+
 func docker(args ...string) *exec.Cmd {
 	fullArgs := []string{"docker"}
 	fullArgs = append(fullArgs, args...)
diff --git a/compilerd/main.go b/compilerd/main.go
index 6437979..ad1c432 100644
--- a/compilerd/main.go
+++ b/compilerd/main.go
@@ -1,3 +1,5 @@
+// HTTP server for saving, loading, and executing playground examples.
+
 package main
 
 import (
@@ -5,6 +7,7 @@
 	crand "crypto/rand"
 	"crypto/sha256"
 	"encoding/binary"
+	"encoding/hex"
 	"flag"
 	"fmt"
 	"log"
@@ -47,7 +50,8 @@
 	return nil
 }
 
-//// HTTP server
+//////////////////////////////////////////
+// HTTP server
 
 func healthz(w http.ResponseWriter, r *http.Request) {
 	select {
@@ -75,8 +79,14 @@
 		go waitForShutdown(time.Minute * time.Duration(delay_min))
 	}
 
+	if err := initDBHandles(); err != nil {
+		log.Fatal(err)
+	}
+
 	http.HandleFunc("/healthz", healthz)
 	http.HandleFunc("/compile", handlerCompile)
+	http.HandleFunc("/load", handlerLoad)
+	http.HandleFunc("/save", handlerSave)
 
 	log.Printf("Serving %s\n", *address)
 	http.ListenAndServe(*address, nil)
@@ -123,7 +133,8 @@
 	os.Exit(0)
 }
 
-//// HTTP request helpers
+//////////////////////////////////////////
+// HTTP request helpers
 
 // Handles CORS options and pre-flight requests.
 // Returns false iff response processing should not continue.
@@ -145,6 +156,17 @@
 	return true
 }
 
+// Checks if the GET method was used.
+// Returns false iff response processing should not continue.
+func checkGetMethod(w http.ResponseWriter, r *http.Request) bool {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return false
+	}
+
+	return true
+}
+
 // Checks if the POST method was used and returns the request body.
 // Returns nil iff response processing should not continue.
 func getPostBody(w http.ResponseWriter, r *http.Request) []byte {
@@ -158,7 +180,13 @@
 	return buf.Bytes()
 }
 
-//// Shared helper functions
+//////////////////////////////////////////
+// Shared helper functions
+
+func stringHash(data []byte) string {
+	hv := rawHash(data)
+	return hex.EncodeToString(hv[:])
+}
 
 func rawHash(data []byte) [32]byte {
 	return sha256.Sum256(data)
diff --git a/compilerd/storage.go b/compilerd/storage.go
new file mode 100644
index 0000000..e6bde9c
--- /dev/null
+++ b/compilerd/storage.go
@@ -0,0 +1,373 @@
+// Handlers for HTTP requests to save and load playground examples.
+//
+// handlerSave() handles a POST request with bundled playground source code.
+// The bundle is persisted in a database and a unique ID returned.
+// handlerLoad() handles a GET request with an id parameter. It returns the
+// bundle saved under the provided ID, if any.
+// The current implementation uses a MySQL-like SQL database for persistence.
+
+package main
+
+import (
+	crand "crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"log"
+	"net/http"
+
+	"v.io/playground/lib"
+	"v.io/playground/lib/lsql"
+)
+
+var (
+	// Database connection string as specified in
+	// https://github.com/go-sql-driver/mysql/#dsn-data-source-name
+	// Query parameters are not supported.
+	sqlConf = flag.String("sqlconf", "", "The go-sql-driver database connection string. If empty, load and save requests are disabled.")
+
+	// Testing parameter, use default value for production.
+	// Name of dataset to use. Used as table name prefix (a single SQL database
+	// can contain multiple datasets).
+	dataset = flag.String("dataset", "pg", "Testing: Name of dataset to use (and create if needed, when allowed by setupdb).")
+
+	// If set, will attempt to create any missing database tables for the given
+	// dataset. Idempotent.
+	setupDB = flag.Bool("setupdb", false, "Whether to create missing database tables for dataset.")
+)
+
+//////////////////////////////////////////
+// SqlData type definitions
+
+// === High-level schema ===
+// Each bundle save request generates a unique BundleID.
+// Every BundleID corresponds to exactly one bundle Json, stored as a Unicode
+// text blob. However, a single bundle Json can correspond to an unlimited
+// number of BundleIDs.
+// BundleIDs are mapped to Jsons via the BundleHash, which is a hash value of
+// the Json. The bundleLink table stores records of BundleID (primary key) and
+// BundleHash. The bundleData table stores records of BundleHash (primary key)
+// and Json. The bundleLink.BundleHash column references bundleData.BundleHash,
+// but can be set to NULL to allow deletion of bundleData entries.
+// The additional layer of indirection allows storing identical bundles more
+// efficiently and makes the bundle ID independent of its contents, allowing
+// implementation of change history, sharing, expiration etc.
+// TODO(ivanpi): Revisit ON DELETE SET NULL when delete functionality is added.
+// === Schema type details ===
+// The BundleID is a 64-character string consisting of URL-friendly characters
+// (alphanumeric, '-', '_', '.', '~'), beginning with an underscore.
+// The BundleHash is a raw (sequence of 32 bytes) SHA256 hash.
+// The Json is a MEDIUMTEXT (up to 16 MiB) Unicode (utf8mb4) blob.
+// Note: If bundles larger than ~1 MiB are to be stored, the max_allowed_packed
+// SQL connection parameter must be increased.
+// TODO(ivanpi): Normalize the Json (e.g. file order).
+
+// All SqlData types should be added here, in order of table initialization.
+var dataTypes = []lsql.SqlData{&bundleData{}, &bundleLink{}}
+
+type bundleData struct {
+	// Raw SHA256 of the bundle contents
+	BundleHash []byte // primary key
+	// The bundle contents
+	Json string
+}
+
+func (bd *bundleData) TableName() string {
+	return *dataset + "_bundleData"
+}
+
+func (bd *bundleData) TableDef() *lsql.SqlTable {
+	return lsql.NewSqlTable(bd, "BundleHash", []lsql.SqlColumn{
+		{Name: "BundleHash", Type: "BINARY(32)", Null: false},
+		{Name: "Json", Type: "MEDIUMTEXT", Null: false},
+	}, []string{})
+}
+
+func (bd *bundleData) QueryRefs() []interface{} {
+	return []interface{}{&bd.BundleHash, &bd.Json}
+}
+
+type bundleLink struct {
+	// 64-byte printable ASCII string
+	BundleID string // primary key
+	// Raw SHA256 of the bundle contents
+	BundleHash []byte // foreign key => bundleData.BundleHash
+	// TODO(ivanpi): Add creation (and expiration, last access?) timestamps.
+}
+
+func (bl *bundleLink) TableName() string {
+	return *dataset + "_bundleLink"
+}
+
+func (bl *bundleLink) TableDef() *lsql.SqlTable {
+	return lsql.NewSqlTable(bl, "BundleID", []lsql.SqlColumn{
+		{Name: "BundleID", Type: "CHAR(64) CHARACTER SET ascii", Null: false},
+		{Name: "BundleHash", Type: "BINARY(32)", Null: true},
+	}, []string{
+		"FOREIGN KEY (BundleHash) REFERENCES " + (&bundleData{}).TableName() + "(BundleHash) ON DELETE SET NULL",
+	})
+}
+
+func (bl *bundleLink) QueryRefs() []interface{} {
+	return []interface{}{&bl.BundleID, &bl.BundleHash}
+}
+
+//////////////////////////////////////////
+// HTTP request handlers
+
+// GET request that returns the saved bundle for the given id.
+func handlerLoad(w http.ResponseWriter, r *http.Request) {
+	if !handleCORS(w, r) {
+		return
+	}
+
+	// Check method and read GET parameters.
+	if !checkGetMethod(w, r) {
+		return
+	}
+	bId := r.FormValue("id")
+	if bId == "" {
+		storageError(w, http.StatusBadRequest, "Must specify id to load.")
+		return
+	}
+
+	if !checkDBInit(w) {
+		return
+	}
+
+	var bLink bundleLink
+	// Get the entry for the provided id.
+	err := dbhRead.QFetch(bId, &bLink)
+	if err == lsql.ErrNoSuchEntity {
+		storageError(w, http.StatusNotFound, "No data found for provided id.")
+		return
+	} else if err != nil {
+		storageInternalError(w, "Error getting bundleLink for id", bId, ":", err)
+		return
+	}
+
+	var bData bundleData
+	// Get the bundle data for the hash linked in the entry.
+	// Note: This can fail if the bundle is deleted between fetching bundleLink
+	// and bundleData. However, it is highly unlikely, costly to mitigate (using
+	// a serializable transaction), and unimportant (error 500 instead of 404).
+	err = dbhRead.QFetch(bLink.BundleHash, &bData)
+	if err != nil {
+		storageInternalError(w, "Error getting bundleData for id", bId, ":", err)
+		return
+	}
+
+	storageRespond(w, http.StatusOK, &StorageResponse{
+		Link: bId,
+		Data: bData.Json,
+	})
+	return
+}
+
+// POST request that saves the body as a new bundle and returns the bundle id.
+func handlerSave(w http.ResponseWriter, r *http.Request) {
+	if !handleCORS(w, r) {
+		return
+	}
+
+	// Check method and read POST body.
+	requestBody := getPostBody(w, r)
+	if requestBody == nil {
+		return
+	}
+	if len(requestBody) > maxSize {
+		storageError(w, http.StatusBadRequest, "Program too large.")
+		return
+	}
+
+	if !checkDBInit(w) {
+		return
+	}
+
+	// TODO(ivanpi): Check if bundle is parseable. Format/lint?
+
+	bHashFixed := rawHash(requestBody)
+	bHash := bHashFixed[:]
+
+	randomLink := func() string {
+		h := make([]byte, 16, 16+len(bHash))
+		err := binary.Read(crand.Reader, binary.LittleEndian, h)
+		if err != nil {
+			panic(fmt.Errorf("rng failed: %v", err))
+		}
+		return "_" + stringHash(append(h, bHash...))[1:]
+	}
+
+	bNewData := bundleData{
+		BundleHash: bHash,
+		Json:       string(requestBody),
+	}
+	bNewLink := bundleLink{
+		BundleID:   randomLink(),
+		BundleHash: bHash,
+	}
+
+	// TODO(ivanpi): The function is not idempotent, there is a small probability
+	// of making a duplicate entry (if a commit succeeds, but a transient network
+	// issue reports it as failed).
+	err := dbhSeq.RunInTransaction(3, func(txh *lsql.DBHandle) error {
+		// If a bundleLink entry exists for the generated BundleID, regenerate
+		// BundleID and retry. Buying lottery ticket optional.
+		bLinkFound, err := txh.QExists(bNewLink.BundleID, &bNewLink)
+		if err != nil {
+			log.Println("error checking bundleLink existence for id", bNewLink.BundleID, ":", err)
+			return err
+		} else if bLinkFound {
+			log.Println("random generation resulted in duplicate id", bNewLink.BundleID)
+			bNewLink.BundleID = randomLink()
+			return lsql.RetryTransaction
+		}
+
+		// Check if a bundleData entry exists for this BundleHash.
+		bDataFound, err := txh.QExists(bNewData.BundleHash, &bNewData)
+		if err != nil {
+			log.Println("error checking bundleData existence for hash", hex.EncodeToString(bHash), ":", err)
+			return err
+		} else if !bDataFound {
+			// If not, save the bundleData.
+			err = txh.QInsert(&bNewData)
+			if err != nil {
+				log.Println("error storing bundleData for hash", hex.EncodeToString(bHash), ":", err)
+				return err
+			}
+		}
+
+		// Save the bundleLink with the generated BundleID referring to the
+		// bundleData.
+		err = txh.QInsert(&bNewLink)
+		if err != nil {
+			log.Println("error storing bundleLink for id", bNewLink.BundleID, ":", err)
+			return err
+		}
+
+		return nil
+	})
+
+	if err == nil {
+		storageRespond(w, http.StatusOK, &StorageResponse{
+			Link: bNewLink.BundleID,
+			Data: bNewData.Json,
+		})
+	} else if err == lsql.ErrTooManyRetries {
+		storageInternalError(w, err)
+	} else {
+		// An informative error message has already been printed.
+		storageInternalError(w)
+	}
+	return
+}
+
+//////////////////////////////////////////
+// Response handling
+
+type StorageResponse struct {
+	// Error message. If empty, request was successful.
+	Error string
+	// Bundle ID for the saved/loaded bundle.
+	Link string
+	// Contents of the loaded bundle.
+	Data string
+}
+
+// Sends response to client. Request handler should exit after this call.
+func storageRespond(w http.ResponseWriter, status int, body *StorageResponse) {
+	bodyJson, _ := json.Marshal(body)
+	w.Header().Add("Content-Type", "application/json")
+	w.Header().Add("Content-Length", fmt.Sprintf("%d", len(bodyJson)))
+	w.WriteHeader(status)
+	w.Write(bodyJson)
+}
+
+// Sends error response with specified message to client.
+func storageError(w http.ResponseWriter, status int, msg string) {
+	storageRespond(w, status, &StorageResponse{
+		Error: msg,
+	})
+}
+
+// Logs error internally and sends non-specific error response to client.
+func storageInternalError(w http.ResponseWriter, v ...interface{}) {
+	if len(v) > 0 {
+		log.Println(v...)
+	}
+	storageError(w, http.StatusInternalServerError, "Internal error, please retry.")
+}
+
+//////////////////////////////////////////
+// SQL database handles
+
+// Data writes for the schema are complex enough to require transactions with
+// SERIALIZABLE isolation. However, reads do not require SERIALIZABLE. Since
+// database/sql only allows setting transaction isolation per connection,
+// a separate connection with only READ-COMMITTED isolation is used for reads
+// to reduce lock contention and deadlock frequency.
+
+var (
+	// Database handle with SERIALIZABLE transaction isolation.
+	// Used for read-write transactions.
+	dbhSeq *lsql.DBHandle
+	// Database handle with READ_COMMITTED transaction isolation.
+	// Used for non-transactional reads.
+	dbhRead *lsql.DBHandle
+)
+
+func newDBHandle(sqlConfig, txIsolation string, dataTypes []lsql.SqlData, setupdb, readonly bool) (*lsql.DBHandle, error) {
+	// Create a database handle.
+	dbc, err := lib.NewDBConn(sqlConfig, txIsolation)
+	if err != nil {
+		return nil, err
+	}
+	dbh := lsql.NewDBHandle(dbc)
+	if setupdb {
+		// Create missing database tables.
+		for _, t := range dataTypes {
+			if err := dbh.CreateTable(t, true, lib.CreateTableSuffix); err != nil {
+				return nil, fmt.Errorf("failed initializing database tables: %v", err)
+			}
+			// TODO(ivanpi): Initialize database with fixed-ID examples?
+		}
+	}
+	// Prepare simple database queries.
+	for _, t := range dataTypes {
+		if err := dbh.RegisterType(t, readonly); err != nil {
+			return nil, fmt.Errorf("failed preparing database queries: %v", err)
+		}
+	}
+	return dbh, nil
+}
+
+func initDBHandles() error {
+	if *sqlConf == "" {
+		return nil
+	}
+
+	var err error
+	// If setupDB is set, tables should be initialized only once, on the handle
+	// that is opened first.
+	if dbhSeq, err = newDBHandle(*sqlConf, "SERIALIZABLE", dataTypes, *setupDB, false); err != nil {
+		return err
+	}
+	// The READ-COMMITTED connection is used only for reads, so it is not
+	// necessary to prepare writing statements such as INSERT.
+	if dbhRead, err = newDBHandle(*sqlConf, "READ-COMMITTED", dataTypes, false, true); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func checkDBInit(w http.ResponseWriter) bool {
+	if dbhSeq == nil || dbhRead == nil {
+		storageError(w, http.StatusInternalServerError, "Internal error: cannot connect to database.")
+		return false
+	}
+
+	return true
+}
diff --git a/lib/lsql/doc.go b/lib/lsql/doc.go
new file mode 100644
index 0000000..e0ae71f
--- /dev/null
+++ b/lib/lsql/doc.go
@@ -0,0 +1,17 @@
+// Package lsql implements a utility wrapper around the database/sql library.
+//
+// It simplifies common operations on data entities. An entity type corresponds
+// to a SQL table, and an instance of the type corresponds to a single row in
+// the table.
+// Each entity type must implement the SqlData interface, which specifies the
+// table schema and mapping of entity instance fields to SQL table columns.
+// Each entity must have exactly one primary key. More complex relations, such
+// as foreign key constraints, need to be handled with care (for example,
+// executing statements in the correct order).
+//
+// The library supports creating database tables from the schema, building
+// query/statement strings for common operations such as SELECT and INSERT,
+// caching prepared queries keyed by labels and entity types, and transactional
+// execution. The most common CRUD operations are supported directly by
+// automatically prepared queries and helper functions.
+package lsql
diff --git a/lib/lsql/handle.go b/lib/lsql/handle.go
new file mode 100644
index 0000000..510d13d
--- /dev/null
+++ b/lib/lsql/handle.go
@@ -0,0 +1,218 @@
+// lsql database connection wrapper providing prepared statement caching with
+// transaction support and simple CRUD operations over SqlData types.
+//
+// DBHandle wraps a SQL database connection. It provides Prepare*() and Get*()
+// methods, which allow saving and retrieving prepared SQL statements keyed by
+// a label and, optionally, SqlData type.
+// Q*() methods are convenience methods for common CRUD operations on a SqlData
+// entity. The entity type must be registered beforehand using RegisterType()
+// and the corresponding table created.
+// RunInTransaction() supports executing a sequence of operations inside of a
+// transaction with retry on failure support.
+
+package lsql
+
+import (
+	"database/sql"
+	"errors"
+	"fmt"
+	"sync"
+)
+
+var (
+	// Error returned by QFetch when no entity is found for the given key.
+	ErrNoSuchEntity = errors.New("lsql: no such entity")
+	// Error returned by RunInTransaction when retries are exhausted.
+	ErrTooManyRetries = errors.New("lsql: too many retries")
+	// Error that should be returned from a transaction callback to trigger a
+	// rollback and retry. Other errors cause a rollback and abort.
+	RetryTransaction = errors.New("lsql: retry transaction")
+)
+
+// SQL database handle storing prepared statements.
+// Initialize using NewDBHandle.
+type DBHandle struct {
+	dbc *sql.DB
+	tx  *sql.Tx
+
+	mu      *sync.RWMutex
+	queries map[string]*sql.Stmt
+}
+
+func NewDBHandle(dbc *sql.DB) *DBHandle {
+	return &DBHandle{
+		dbc:     dbc,
+		mu:      &sync.RWMutex{},
+		queries: make(map[string]*sql.Stmt),
+	}
+}
+
+//////////////////////////////////////////
+// Transaction support
+
+// Runs function txf inside a SQL transaction. txf should only use the database
+// handle passed to it, which shares the prepared transaction cache with the
+// original handle. If txf returns nil, the transaction is committed.
+// Otherwise, it is rolled back.
+// txf is retried at most maxRetries times, with a fresh transaction for every
+// attempt, until the commit is successful. txf should not have side effects
+// that could affect subsequent retries (apart from database operations, which
+// are rolled back).
+// If the error returned from txf is RetryTransaction, txf is retried as if the
+// commit failed. Otherwise, txf is not retried, and RunInTransaction returns
+// the error.
+// In rare cases, txf may be retried even if the transaction was successfully
+// committed (when commit falsely returns an error). txf should be idempotent
+// or able to detect this case.
+// If maxRetries is exhausted, RunInTransaction returns ErrTooManyRetries.
+// Nested transactions are not supported and result in undefined behaviour.
+// Inspired by https://cloud.google.com/appengine/docs/go/datastore/reference#RunInTransaction
+func (h *DBHandle) RunInTransaction(maxRetries int, txf func(txh *DBHandle) error) error {
+	for i := 0; i < maxRetries; i++ {
+		err := h.attemptInTransaction(txf)
+		if err == nil {
+			return nil
+		} else if err != RetryTransaction {
+			return err
+		}
+	}
+	return ErrTooManyRetries
+}
+
+func (h *DBHandle) attemptInTransaction(txf func(txh *DBHandle) error) (rerr error) {
+	tx, err := h.dbc.Begin()
+	if err != nil {
+		return fmt.Errorf("lsql: failed opening transaction: %v", err)
+	}
+	defer func() {
+		// UPSTREAM BUG WORKAROUND: Rollback anyway to release transaction after
+		// manual commit.
+		//if rerr != nil {
+		if true {
+			// Silently ignore rollback error, we cannot do anything. Transaction
+			// will timeout eventually.
+			// UPSTREAM BUG: Transaction does not timeout, connection gets reused.
+			// This case is unlikely, but dangerous.
+			// TODO(ivanpi): Remove workaround when bug is resolved.
+			_ = tx.Rollback()
+		}
+	}()
+	// Call txf with the transaction handle - a shallow copy of the database
+	// handle (sharing the mutex, database connection, queries) with the
+	// transaction object added.
+	err = txf(&DBHandle{
+		dbc:     h.dbc,
+		mu:      h.mu,
+		queries: h.queries,
+		tx:      tx,
+	})
+	if err != nil {
+		return err
+	}
+	// UPSTREAM BUG WORKAROUND: Commit manually.
+	//if err = tx.Commit(); err != nil {
+	if _, err = tx.Exec("COMMIT"); err != nil {
+		return RetryTransaction
+	}
+	return nil
+}
+
+//////////////////////////////////////////
+// Prepared statement caching support
+
+// Prepares the SQL statement/query and saves it under the provided label.
+func (h *DBHandle) Prepare(label, query string) error {
+	return h.prepareInternal("c:"+label, query)
+}
+
+// Retrieves the previously prepared SQL statement/query for the label.
+func (h *DBHandle) Get(label string) *sql.Stmt {
+	return h.getInternal("c:" + label)
+}
+
+// PrepareFor and GetFor are equivalent to Prepare and Get, with the label
+// interpreted in the namespace of the SqlData type.
+
+func (h *DBHandle) PrepareFor(proto SqlData, label, query string) error {
+	return h.prepareInternal("t:"+proto.TableName()+":"+label, query)
+}
+
+func (h *DBHandle) GetFor(proto SqlData, label string) *sql.Stmt {
+	return h.getInternal("t:" + proto.TableName() + ":" + label)
+}
+
+func (h *DBHandle) prepareInternal(label, query string) error {
+	stmt, err := h.dbc.Prepare(query)
+	if err != nil {
+		return err
+	}
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	h.queries[label] = stmt
+	return nil
+}
+
+func (h *DBHandle) getInternal(label string) (stmt *sql.Stmt) {
+	if h.tx != nil {
+		defer func() {
+			stmt = h.tx.Stmt(stmt)
+		}()
+	}
+	h.mu.RLock()
+	defer h.mu.RUnlock()
+	return h.queries[label]
+}
+
+//////////////////////////////////////////
+// SqlData common operation support
+
+// Creates the table for the given SqlData prototype.
+func (h *DBHandle) CreateTable(proto SqlData, ifNotExists bool, stmtSuffix string) error {
+	_, err := h.dbc.Exec(proto.TableDef().GetCreateTable(ifNotExists) + " " + stmtSuffix)
+	return err
+}
+
+// Prepares common operation queries/statements for the given SqlData type.
+// If readonly is set, prepares only reading queries, not writing statements.
+func (h *DBHandle) RegisterType(proto SqlData, readonly bool) error {
+	t := proto.TableDef()
+	if err := h.PrepareFor(proto, "fetch", t.GetSelectQuery(t.GetWhereKey())); err != nil {
+		return err
+	}
+	if err := h.PrepareFor(proto, "exists", t.GetCountQuery(t.GetWhereKey())); err != nil {
+		return err
+	}
+	if !readonly {
+		if err := h.PrepareFor(proto, "insert", t.GetInsertQuery()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Functions below execute common operations for the SqlData type.
+// The type must be registered with RegisterType beforehand.
+
+// Fetches the entity with the given key and stores it into dst.
+// If no entity for key is found, returns ErrNoSuchEntity.
+func (h *DBHandle) QFetch(key interface{}, dst SqlData) error {
+	err := h.GetFor(dst, "fetch").QueryRow(key).Scan(dst.QueryRefs()...)
+	if err == sql.ErrNoRows {
+		err = ErrNoSuchEntity
+	}
+	return err
+}
+
+// Checks if an entity of the given SqlData prototype with the given key
+// exists in the database.
+func (h *DBHandle) QExists(key interface{}, proto SqlData) (bool, error) {
+	var cnt int
+	err := h.GetFor(proto, "exists").QueryRow(key).Scan(&cnt)
+	return cnt > 0, err
+}
+
+// Inserts the entity stored in src into the database.
+func (h *DBHandle) QInsert(src SqlData) error {
+	_, err := h.GetFor(src, "insert").Exec(src.QueryRefs()...)
+	return err
+}
diff --git a/lib/lsql/model.go b/lib/lsql/model.go
new file mode 100644
index 0000000..f9b0798
--- /dev/null
+++ b/lib/lsql/model.go
@@ -0,0 +1,143 @@
+// lsql support for representing an entity type as a SQL table.
+//
+// Each entity type (for example, a struct) corresponding to a single SQL
+// table should implement the SqlData interface, specifying the table name,
+// definition of all table columns and constraints (as a SqlTable), and
+// mapping of entity instance data fields to columns.
+//
+// The SqlTable::Get*() methods generate SQL query/statement strings for
+// creating the table and other common SQL operations over the specified
+// type. Generated strings and string fragments are in prepared statement
+// format (with question mark placeholders for data values). Default values
+// and projection are not supported.
+
+package lsql
+
+import (
+	"strings"
+)
+
+// An entity type representable as a SQL table row.
+// Should be implemented by each type to be persisted in a SQL table via lsql.
+type SqlData interface {
+	// SQL table name (must be a valid SQL identifier).
+	// Return value must depend only on entity type, not entity instance.
+	TableName() string
+	// SQL table definition (refer to NewSqlTable).
+	// Return value must depend only on entity type, not entity instance.
+	TableDef() *SqlTable
+	// Array of references to entity instance data fields mapping to SQL columns,
+	// in the same order as specified in the table definition. Every column must
+	// have a corresponding reference of the appropriate type.
+	QueryRefs() []interface{}
+}
+
+//////////////////////////////////////////
+// Table schema description
+
+// Read-only specification of a SQL column.
+type SqlColumn struct {
+	// Column name (must be a valid SQL identifier).
+	Name string
+	// Column SQL type, as specified in a CREATE TABLE statement.
+	Type string
+	// Whether the column is allowed to be NULL.
+	Null bool
+}
+
+// Initialize using NewSqlTable.
+type SqlTable struct {
+	tableName   string
+	keyName     string
+	columns     []SqlColumn
+	constraints []string
+}
+
+// proto: Instance of the corresponding data entity.
+// keyName: Name of the primary key column.
+// columns: SqlColumn specifications for each column.
+// constraints: Table constraints in SQL syntax (excluding PRIMARY KEY).
+func NewSqlTable(proto SqlData, keyName string, columns []SqlColumn, constraints []string) *SqlTable {
+	return &SqlTable{
+		tableName:   proto.TableName(),
+		keyName:     keyName,
+		columns:     columns,
+		constraints: constraints,
+	}
+}
+
+func (t *SqlTable) TableName() string {
+	return t.tableName
+}
+
+func (t *SqlTable) KeyName() string {
+	return t.keyName
+}
+
+// List of column names.
+func (t *SqlTable) ColumnNames() []string {
+	colNames := make([]string, 0, len(t.columns))
+	for _, c := range t.columns {
+		colNames = append(colNames, c.Name)
+	}
+	return colNames
+}
+
+func (c *SqlColumn) getColumnSpec() string {
+	nullSpec := "NULL"
+	if !c.Null {
+		nullSpec = "NOT NULL"
+	}
+	return c.Name + " " + c.Type + " " + nullSpec
+}
+
+//////////////////////////////////////////
+// Statement/query string generation
+
+// SQL statement to create the table.
+func (t *SqlTable) GetCreateTable(ifNotExists bool) string {
+	createCmd := "CREATE TABLE "
+	if ifNotExists {
+		createCmd += "IF NOT EXISTS "
+	}
+	clauses := make([]string, 0, len(t.columns)+1+len(t.constraints))
+	for _, c := range t.columns {
+		clauses = append(clauses, c.getColumnSpec())
+	}
+	clauses = append(clauses, "PRIMARY KEY ("+t.KeyName()+")")
+	clauses = append(clauses, t.constraints...)
+	return createCmd + t.TableName() + " ( " + strings.Join(clauses, ", ") + " )"
+}
+
+// Get* methods below return SQL query/statement strings with placeholders for
+// common CRUD operations over the corresponding type.
+
+// WHERE constraint fragment for matching primary key.
+func (t *SqlTable) GetWhereKey() string {
+	return "(" + t.KeyName() + "=?)"
+}
+
+// SELECT query for entities matching WHERE clause. No projection.
+func (t *SqlTable) GetSelectQuery(whereClause string) string {
+	query := "SELECT " + strings.Join(t.ColumnNames(), ",") + " FROM " + t.TableName()
+	return appendWhere(query, whereClause)
+}
+
+// COUNT query for entities matching WHERE clause.
+func (t *SqlTable) GetCountQuery(whereClause string) string {
+	query := "SELECT COUNT(" + t.KeyName() + ") FROM " + t.TableName()
+	return appendWhere(query, whereClause)
+}
+
+// INSERT query for entity. Default values not supported, all columns mapped.
+func (t *SqlTable) GetInsertQuery() string {
+	colNames := t.ColumnNames()
+	query := "INSERT INTO " + t.TableName() +
+		" (" + strings.Join(colNames, ",") + ")" +
+		" VALUES (?" + strings.Repeat(",?", len(colNames)-1) + ")"
+	return query
+}
+
+func appendWhere(query, whereClause string) string {
+	return query + " WHERE (" + whereClause + ")"
+}
diff --git a/lib/mysql.go b/lib/mysql.go
new file mode 100644
index 0000000..d6df75a
--- /dev/null
+++ b/lib/mysql.go
@@ -0,0 +1,48 @@
+// Functions for opening and configuring a MySQL-like database connection.
+
+package lib
+
+import (
+	"database/sql"
+	"fmt"
+	"net/url"
+
+	_ "github.com/go-sql-driver/mysql"
+)
+
+// SQL statement suffix to be appended when creating tables.
+const CreateTableSuffix = "CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci"
+
+// Opens a connection to the SQL database using the provided sqlConfig
+// connection string (see (1); query parameters are not supported) and
+// transaction isolation (see (2)).
+// 1 -> https://github.com/go-sql-driver/mysql/#dsn-data-source-name
+// 2 -> https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation
+func NewDBConn(sqlConfig, txIsolation string) (*sql.DB, error) {
+	return openDBConn(configureDBConn(sqlConfig, txIsolation))
+}
+
+func configureDBConn(sqlConfig, txIsolation string) string {
+	// TODO(ivanpi): Parse parameters from sqlConfig?
+	params := url.Values{}
+	// Setting charset is unneccessary when collation is set, according to
+	// https://github.com/go-sql-driver/mysql/#charset
+	params.Set("collation", "utf8mb4_general_ci")
+	params.Set("parseTime", "true")
+	params.Set("loc", "UTC")
+	params.Set("time_zone", "'+00:00'")
+	// TODO(ivanpi): Configure TLS certificates for Cloud SQL connection.
+	params.Set("tx_isolation", "'"+txIsolation+"'")
+	return sqlConfig + "?" + params.Encode()
+}
+
+func openDBConn(sqlConfig string) (*sql.DB, error) {
+	db, err := sql.Open("mysql", sqlConfig)
+	if err != nil {
+		return nil, fmt.Errorf("failed opening database connection at %s : %v", sqlConfig, err)
+	}
+	if err := db.Ping(); err != nil {
+		return nil, fmt.Errorf("failed connecting to database at %s : %v", sqlConfig, err)
+	}
+	return db, nil
+}