veyron/tools/playground: Add response caching to the playground.

Uses an LRU cache (golang/groupcache/lru). Dependent on this CL:
https://veyron-review.googlesource.com/#/c/4253/

Also implements a max size of 65k for both the program and response
size.

NOTE: This is the same CL as https://veyron-review.googlesource.com/#/c/4251/.

That CL was merged before the groupcache dependency made it into
third-party. It broke the build, and I reverted it.

Now that the groupcache dependency is in third-party
(https://veyron-review.googlesource.com/#/c/4253/), this CL should be
good-to-go again.

Change-Id: Ieeab07a49bddbca4e8243e3633828f0361a30ada
diff --git a/tools/playground/compilerd/main.go b/tools/playground/compilerd/main.go
index d2bcf02..aba5af6 100644
--- a/tools/playground/compilerd/main.go
+++ b/tools/playground/compilerd/main.go
@@ -2,9 +2,11 @@
 
 import (
 	"bytes"
+	"crypto/sha1"
 	"encoding/json"
 	"flag"
 	"fmt"
+	"io"
 	"math/rand"
 	"net/http"
 	"os"
@@ -12,6 +14,8 @@
 	"os/signal"
 	"syscall"
 	"time"
+
+	"github.com/golang/groupcache/lru"
 )
 
 type Event struct {
@@ -19,11 +23,16 @@
 	Message string
 }
 
-type Response struct {
+type ResponseBody struct {
 	Errors string
 	Events []Event
 }
 
+type CachedResponse struct {
+	Status int
+	Body   ResponseBody
+}
+
 var (
 	// This channel is closed when the server begins shutting down.
 	// No values are ever sent to it.
@@ -33,6 +42,14 @@
 
 	// Note, shutdown triggers on SIGTERM or when the time limit is hit.
 	shutdown = flag.Bool("shutdown", true, "whether to ever shutdown the machine")
+
+	// Maximum request and response size. Same limit imposed by the go-tour.
+	maxSize = 1 << 16
+
+	// In-memory LRU cache of request/response bodies. Keys are sha1 sum of
+	// request bodies (20 bytes each), values are of type CachedResponse.
+	// TODO(nlacasse): Figure out the optimal number of entries. Using 10k for now.
+	cache = lru.New(10000)
 )
 
 func healthz(w http.ResponseWriter, r *http.Request) {
@@ -64,9 +81,35 @@
 		return
 	}
 
+	requestBody := streamToBytes(r.Body)
+
+	if len(requestBody) > maxSize {
+		responseBody := new(ResponseBody)
+		responseBody.Errors = "Program too large."
+		respondWithBody(w, http.StatusBadRequest, responseBody)
+		return
+	}
+
+	// Hash the body and see if it's been cached. If so, return the cached
+	// response status and body.
+	requestBodyHash := sha1.Sum(requestBody)
+	if cachedResponse, ok := cache.Get(requestBodyHash); ok {
+		if cachedResponseStruct, ok := cachedResponse.(CachedResponse); ok {
+			respondWithBody(w, cachedResponseStruct.Status, cachedResponseStruct.Body)
+			return
+		} else {
+			fmt.Println("Invalid cached response: %v", cachedResponse)
+			cache.Remove(requestBodyHash)
+		}
+	}
+
+	// TODO(nlacasse): It would be cool if we could stream the output
+	// messages while the process is running, rather than waiting for it to
+	// exit and dumping all the output then.
+
 	id := <-uniq
 	cmd := Docker("run", "-i", "--name", id, "playground")
-	cmd.Stdin = r.Body
+	cmd.Stdin = bytes.NewReader(requestBody)
 	buf := new(bytes.Buffer)
 	cmd.Stdout = buf
 	cmd.Stderr = buf
@@ -83,12 +126,40 @@
 	}
 	Docker("rm", "-f", id).Run()
 
-	response := new(Response)
-	response.Events = append(response.Events, Event{0, buf.String()})
-	body, _ := json.Marshal(response)
+	// If the response is bigger than the limit, cache the response and return an error.
+	if buf.Len() > maxSize {
+		status := http.StatusBadRequest
+		responseBody := new(ResponseBody)
+		responseBody.Errors = "Program output too large."
+		cache.Add(requestBodyHash, CachedResponse{
+			Status: status,
+			Body:   *responseBody,
+		})
+		respondWithBody(w, status, responseBody)
+		return
+	}
+
+	responseBody := new(ResponseBody)
+	responseBody.Events = append(responseBody.Events, Event{0, buf.String()})
+
+	cache.Add(requestBodyHash, CachedResponse{
+		Status: http.StatusOK,
+		Body:   *responseBody,
+	})
+	respondWithBody(w, http.StatusOK, responseBody)
+}
+
+func respondWithBody(w http.ResponseWriter, status int, body interface{}) {
+	bodyJson, _ := json.Marshal(body)
 	w.Header().Add("Content-Type", "application/json")
-	w.Header().Add("Content-Length", fmt.Sprintf("%d", len(body)))
-	w.Write(body)
+	w.Header().Add("Content-Length", fmt.Sprintf("%d", len(bodyJson)))
+	w.Write(bodyJson)
+}
+
+func streamToBytes(stream io.Reader) []byte {
+	buf := new(bytes.Buffer)
+	buf.ReadFrom(stream)
+	return buf.Bytes()
 }
 
 func Docker(args ...string) *exec.Cmd {