Merge "playground: adding missing README.md"
diff --git a/go/src/v.io/x/playground/compilerd/compile.go b/go/src/v.io/x/playground/compilerd/compile.go
index 2eeef40..0a37eca 100644
--- a/go/src/v.io/x/playground/compilerd/compile.go
+++ b/go/src/v.io/x/playground/compilerd/compile.go
@@ -13,7 +13,6 @@
 
 import (
 	"flag"
-	"log"
 	"net/http"
 	"time"
 
@@ -23,6 +22,7 @@
 	"v.io/x/playground/lib"
 	"v.io/x/playground/lib/event"
 	"v.io/x/playground/lib/hash"
+	"v.io/x/playground/lib/log"
 )
 
 var (
@@ -98,7 +98,7 @@
 		// The response is hard limited to 2*maxSize: maxSize for builder stdout,
 		// and another maxSize for compilerd error and status messages.
 		return event.NewResponseEventSink(lib.NewLimitedWriter(w, 2*(*maxSize), lib.DoOnce(func() {
-			log.Println("Hard response size limit reached.")
+			log.Debugln("Hard response size limit reached.")
 		})), !wantDebug)
 	}
 
@@ -108,6 +108,8 @@
 		return
 	}
 
+	log.Debugln("Got valid compile request.")
+
 	// Hash the body and see if it's been cached. If so, return the cached
 	// response status and body.
 	// NOTE(sadovsky): In the client we may shift timestamps (based on current
@@ -117,6 +119,7 @@
 		if cachedResponseStruct, ok := cr.(cachedResponse); ok {
 			res := openResponse(cachedResponseStruct.Status)
 			event.Debug(res, "Sending cached response")
+			log.Debugln("Sending cached response.")
 			res.Write(cachedResponseStruct.Events...)
 			return
 		} else {
@@ -158,17 +161,19 @@
 			// If the client disconnects before job finishes, cancel the job.
 			// If job has already started, the job will finish and the results
 			// will be cached.
-			log.Printf("Client disconnected. Cancelling job.")
+			log.Debugln("Client disconnected. Cancelling job.")
 			job.Cancel()
 		case result := <-resultChan:
 			if result.Success {
 				event.Debug(res, "Caching response")
+				log.Debugln("Caching response.")
 				cache.Add(requestBodyHash, cachedResponse{
 					Status: http.StatusOK,
 					Events: result.Events,
 				})
 			} else {
-				event.Debug(res, "Internal errors encountered, not caching response")
+				event.Debug(res, "Internal errors encountered, not caching response.")
+				log.Warnln("Internal errors encountered, not caching response.")
 			}
 			return
 		}
diff --git a/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go b/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go
index e9df960..3b182d5 100644
--- a/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go
+++ b/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go
@@ -33,7 +33,6 @@
 	"bytes"
 	"fmt"
 	"io/ioutil"
-	"log"
 	"os"
 	"os/exec"
 	"sync"
@@ -41,6 +40,7 @@
 
 	"v.io/x/playground/lib"
 	"v.io/x/playground/lib/event"
+	"v.io/x/playground/lib/log"
 )
 
 var (
@@ -99,7 +99,7 @@
 func (j *Job) Cancel() {
 	j.mu.Lock()
 	defer j.mu.Unlock()
-	log.Printf("Cancelling job %v.\n", j.id)
+	log.Debugf("Cancelling job %v.\n", j.id)
 	j.cancelled = true
 }
 
@@ -125,6 +125,7 @@
 var _ = Dispatcher((*dispatcherImpl)(nil))
 
 func NewDispatcher(workers int, jobQueueCap int) Dispatcher {
+	log.Debugf("Creating new dispatcher with %v workers and %v queue capacity.\n", workers, jobQueueCap)
 	d := &dispatcherImpl{
 		jobQueue: make(chan *Job, jobQueueCap),
 		stopped:  make(chan bool),
@@ -137,7 +138,7 @@
 // start starts a given number of workers, then reads from the jobQueue and
 // assigns jobs to free workers.
 func (d *dispatcherImpl) start(num int) {
-	log.Printf("Dispatcher starting %d workers.\n", num)
+	log.Debugf("Dispatcher starting.\n", num)
 
 	// Workers are published on the workerQueue when they are free.
 	workerQueue := make(chan *worker, num)
@@ -166,18 +167,18 @@
 					cancelled := job.cancelled
 					job.mu.Unlock()
 					if cancelled {
-						log.Printf("Dispatcher encountered cancelled job %v, rejecting.\n", job.id)
+						log.Debugf("Dispatcher encountered cancelled job %v, rejecting.\n", job.id)
 						job.resultChan <- Result{
 							Success: false,
 							Events:  nil,
 						}
 						workerQueue <- worker
 					} else {
-						log.Printf("Dispatching job %v to worker %v.\n", job.id, worker.id)
+						log.Debugf("Dispatching job %v to worker %v.\n", job.id, worker.id)
 						d.wg.Add(1)
 						go func() {
 							job.resultChan <- worker.run(job)
-							log.Printf("Job %v finished on worker %v.\n", job.id, worker.id)
+							log.Debugf("Job %v finished on worker %v.\n", job.id, worker.id)
 							d.wg.Done()
 							workerQueue <- worker
 						}()
@@ -186,19 +187,19 @@
 			}
 		}
 
-		log.Printf("Dispatcher stopped.\n")
+		log.Debugf("Dispatcher stopped.\n")
 
 		// Dispatcher stopped, treat all remaining jobs as cancelled.
 		for {
 			select {
 			case job := <-d.jobQueue:
-				log.Printf("Dispatcher is stopped, rejecting job %v.\n", job.id)
+				log.Debugf("Dispatcher is stopped, rejecting job %v.\n", job.id)
 				job.resultChan <- Result{
 					Success: false,
 					Events:  nil,
 				}
 			default:
-				log.Printf("Dispatcher job queue drained.\n")
+				log.Debugf("Dispatcher job queue drained.\n")
 				d.wg.Done()
 				return
 			}
@@ -213,7 +214,7 @@
 // jobs, rather than rejecting them.  Or, put logic in the client to retry
 // cancelled jobs.
 func (d *dispatcherImpl) Stop() {
-	log.Printf("Stopping dispatcher.\n")
+	log.Debugf("Stopping dispatcher.\n")
 	d.stopped <- true
 
 	// Wait for workers to finish their current jobs.
@@ -282,7 +283,7 @@
 		// each other.
 		tmpDir, err := ioutil.TempDir("", "pg-builder-")
 		if err != nil {
-			panic(fmt.Errorf("Error creating temp dir for builder: %v", err))
+			log.Panicf("Error creating temp dir for builder: %v\n", err)
 		} else {
 			cmd.Dir = tmpDir
 		}
@@ -320,7 +321,7 @@
 	userErrorCallback := func(err error) {
 		// A relay error can result from unparseable JSON caused by a builder bug
 		// or a malicious exploit inside Docker. Panicking could lead to a DoS.
-		log.Println(j.id, "builder stdout relay error:", err)
+		log.Errorln(j.id, "builder stdout relay error:", err)
 		erroredOut = true
 		cmdKill()
 	}
@@ -376,7 +377,7 @@
 	// Log builder internal errors, if any.
 	// TODO(ivanpi): Prevent caching? Report to client if debug requested?
 	if errBuffer.Len() > 0 {
-		log.Println(j.id, "builder stderr:", errBuffer.String())
+		log.Warnln(j.id, "builder stderr:", errBuffer.String())
 	}
 
 	event.Debug(j.res, "Response finished")
diff --git a/go/src/v.io/x/playground/compilerd/main.go b/go/src/v.io/x/playground/compilerd/main.go
index 5dd9b00..136f9d9 100644
--- a/go/src/v.io/x/playground/compilerd/main.go
+++ b/go/src/v.io/x/playground/compilerd/main.go
@@ -16,7 +16,6 @@
 	"flag"
 	"fmt"
 	"io"
-	"log"
 	"math/rand"
 	"net/http"
 	"os"
@@ -27,6 +26,7 @@
 
 	"v.io/x/lib/dbutil"
 	"v.io/x/playground/compilerd/storage"
+	"v.io/x/playground/lib/log"
 )
 
 func init() {
@@ -75,10 +75,13 @@
 }
 
 func main() {
+	log.InitSyslogLoggers()
+
+	log.Debugf("Compilerd starting.\n")
 	flag.Parse()
 
 	if err := seedRNG(); err != nil {
-		panic(err)
+		log.Panic(err)
 	}
 
 	c := newCompiler()
@@ -94,33 +97,45 @@
 		go waitForExit(c, time.Nanosecond*time.Duration(delayNs))
 	}
 
+	serveMux := http.NewServeMux()
+
 	if *sqlConf != "" {
+		log.Debugf("Using sql config %v\n", *sqlConf)
+
 		// Parse SQL configuration file and set up TLS.
 		dbConfig, err := dbutil.ActivateSqlConfigFromFile(*sqlConf)
 		if err != nil {
-			log.Fatal(err)
+			log.Panic(err)
 		}
 
 		// Connect to storage backend.
 		if err := storage.Connect(dbConfig); err != nil {
-			log.Fatal(err)
+			log.Panic(err)
 		}
 
 		// Add routes for storage.
-		http.HandleFunc("/load", handlerLoad)
-		http.HandleFunc("/save", handlerSave)
+		serveMux.HandleFunc("/load", handlerLoad)
+		serveMux.HandleFunc("/save", handlerSave)
 	} else {
+		log.Debugln("No sql config provided. Disabling /load and /save routes.")
+
 		// Return 501 Not Implemented for the /load and /save routes.
-		http.HandleFunc("/load", handlerNotImplemented)
-		http.HandleFunc("/save", handlerNotImplemented)
+		serveMux.HandleFunc("/load", handlerNotImplemented)
+		serveMux.HandleFunc("/save", handlerNotImplemented)
 	}
 
-	http.HandleFunc("/compile", c.handlerCompile)
-	http.HandleFunc("/healthz", handlerHealthz)
+	serveMux.HandleFunc("/compile", c.handlerCompile)
+	serveMux.HandleFunc("/healthz", handlerHealthz)
 
-	log.Printf("Serving %s\n", *address)
-	if err := http.ListenAndServe(*address, nil); err != nil {
-		panic(err)
+	log.Debugf("Serving %s\n", *address)
+	s := http.Server{
+		Addr:     *address,
+		Handler:  serveMux,
+		ErrorLog: log.ErrorLogger,
+	}
+
+	if err := s.ListenAndServe(); err != nil {
+		log.Panic(err)
 	}
 }
 
@@ -131,15 +146,15 @@
 
 	// Or if the time limit expires.
 	deadline := time.After(limit)
-	log.Println("Exiting at", time.Now().Add(limit))
+	log.Debugln("Exiting at", time.Now().Add(limit))
 Loop:
 	for {
 		select {
 		case <-deadline:
-			log.Println("Deadline expired, exiting in at most", exitDelay)
+			log.Debugln("Deadline expired, exiting in at most", exitDelay)
 			break Loop
 		case <-term:
-			log.Println("Got SIGTERM, exiting in at most", exitDelay)
+			log.Debugln("Got SIGTERM, exiting in at most", exitDelay)
 			break Loop
 		}
 	}
@@ -150,7 +165,7 @@
 	go func() {
 		select {
 		case <-time.After(exitDelay):
-			fmt.Errorf("Dispatcher did not stop in %v, exiting.", exitDelay)
+			log.Warnf("Dispatcher did not stop in %v, exiting.", exitDelay)
 			os.Exit(1)
 		}
 	}()
@@ -165,7 +180,7 @@
 	// Close database connections.
 	if *sqlConf != "" {
 		if err := storage.Close(); err != nil {
-			fmt.Errorf("storage.Close() failed: %v", err)
+			log.Errorf("storage.Close() failed: %v", err)
 		}
 	}
 
diff --git a/go/src/v.io/x/playground/compilerd/storage.go b/go/src/v.io/x/playground/compilerd/storage.go
index 5529625..52ab94c 100644
--- a/go/src/v.io/x/playground/compilerd/storage.go
+++ b/go/src/v.io/x/playground/compilerd/storage.go
@@ -15,10 +15,10 @@
 import (
 	"encoding/json"
 	"fmt"
-	"log"
 	"net/http"
 
 	"v.io/x/playground/compilerd/storage"
+	"v.io/x/playground/lib/log"
 )
 
 //////////////////////////////////////////
@@ -118,7 +118,7 @@
 // Logs error internally and sends non-specific error response to client.
 func storageInternalError(w http.ResponseWriter, v ...interface{}) {
 	if len(v) > 0 {
-		log.Println(v...)
+		log.Errorln(v...)
 	}
 	storageError(w, http.StatusInternalServerError, "Internal error, please retry.")
 }
diff --git a/go/src/v.io/x/playground/lib/log/log.go b/go/src/v.io/x/playground/lib/log/log.go
new file mode 100644
index 0000000..1b8dd08
--- /dev/null
+++ b/go/src/v.io/x/playground/lib/log/log.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package log exports three diferent loggers: ErrorLogger, WarnLogger, and
+// DebugLogger, and convenience methods for logging messages to different
+// loggers depending on the level.
+//
+// By default, the loggers will only write to stdout. The default logger will
+// also be configured to print to stdout rather than stderr.
+//
+// Calling "InitSyslogLoggers", will create new loggers which will log to both
+// syslog and stdout, and also set the default logger to log to syslog and
+// stdout. The function will panic if syslog is unavailable.
+
+package log
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"log/syslog"
+	"os"
+)
+
+func init() {
+	// By default, the loggers only log to stdout.
+	ErrorLogger = newLogger(os.Stdout)
+	WarnLogger = newLogger(os.Stdout)
+	DebugLogger = newLogger(os.Stdout)
+
+	// Default logger should also log to stdout.
+	log.SetOutput(os.Stdout)
+}
+
+// InitSyslogLoggers creates loggers that will log to syslog as well as stdout.
+// It will panic if syslog is unavailable.
+func InitSyslogLoggers() {
+	ErrorLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_ERR))
+	WarnLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_WARNING))
+	DebugLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_DEBUG))
+
+	// Default logger should also log to syslog and stdout.
+	log.SetOutput(newSyslogStdoutWriter(syslog.LOG_DEBUG))
+}
+
+var (
+	ErrorLogger *log.Logger
+	WarnLogger  *log.Logger
+	DebugLogger *log.Logger
+)
+
+// Debug functions use DebugLogger.
+func Debug(args ...interface{}) {
+	DebugLogger.Print(args...)
+}
+
+func Debugf(s string, args ...interface{}) {
+	DebugLogger.Printf(s, args...)
+}
+
+func Debugln(args ...interface{}) {
+	DebugLogger.Println(args...)
+}
+
+// Warn functions use WarnLogger.
+func Warn(args ...interface{}) {
+	WarnLogger.Print(args...)
+}
+
+func Warnf(s string, args ...interface{}) {
+	WarnLogger.Printf(s, args...)
+}
+
+func Warnln(args ...interface{}) {
+	WarnLogger.Println(args...)
+}
+
+// Error functions use ErrorLogger.
+func Error(args ...interface{}) {
+	ErrorLogger.Print(args...)
+}
+
+func Errorf(s string, args ...interface{}) {
+	ErrorLogger.Printf(s, args...)
+}
+
+func Errorln(args ...interface{}) {
+	ErrorLogger.Println(args...)
+}
+
+func Panic(args ...interface{}) {
+	ErrorLogger.Panic(args...)
+}
+
+func Panicf(s string, args ...interface{}) {
+	ErrorLogger.Panicf(s, args...)
+}
+
+func Panicln(args ...interface{}) {
+	ErrorLogger.Panicln(args...)
+}
+
+// Helper method to create a logger with given writer.
+func newLogger(w io.Writer) *log.Logger {
+	return log.New(w, "", log.LstdFlags)
+}
+
+// Helper method to create a writer that writes to syslog and stdout.
+func newSyslogStdoutWriter(level syslog.Priority) io.Writer {
+	if syslogWriter, err := syslog.New(level|syslog.LOG_USER, "playground"); err != nil {
+		panic(fmt.Errorf("Error connecting to syslog: %v", err))
+	} else {
+		return io.MultiWriter(io.MultiWriter(syslogWriter, os.Stdout))
+	}
+}
diff --git a/go/src/v.io/x/playground/lib/multi_writer.go b/go/src/v.io/x/playground/lib/multi_writer.go
index 015d64e..69f5cb7 100644
--- a/go/src/v.io/x/playground/lib/multi_writer.go
+++ b/go/src/v.io/x/playground/lib/multi_writer.go
@@ -12,6 +12,8 @@
 import (
 	"io"
 	"sync"
+
+	"v.io/x/playground/lib/log"
 )
 
 // Initialize using NewMultiWriter.
@@ -32,7 +34,7 @@
 	t.mu.Lock()
 	defer t.mu.Unlock()
 	if t.wrote {
-		panic("Tried to add writer after data has been written.")
+		log.Panic("Tried to add writer after data has been written.")
 	}
 	t.writers = append(t.writers, w)
 	return t