playground: Logger lint.

Removed redundant *ln() functions from logger and redundant newlines.
Reverted logger to defaulting to stderr if not connected to syslog.
Removed logger from libraries used by builder.

Change-Id: I48b7481561c7535b53c2981a0147d543bc4f0b0d
diff --git a/go/src/v.io/x/playground/builder/main.go b/go/src/v.io/x/playground/builder/main.go
index 6a1670d..05fe76f 100644
--- a/go/src/v.io/x/playground/builder/main.go
+++ b/go/src/v.io/x/playground/builder/main.go
@@ -352,7 +352,7 @@
 		case "js":
 			return f.startJs()
 		default:
-			return fmt.Errorf("Cannot run file: %v", f.Name)
+			return fmt.Errorf("Cannot run file %q", f.Name)
 		}
 	}()
 	if err != nil {
diff --git a/go/src/v.io/x/playground/compilerd/compile.go b/go/src/v.io/x/playground/compilerd/compile.go
index 0a37eca..d205196 100644
--- a/go/src/v.io/x/playground/compilerd/compile.go
+++ b/go/src/v.io/x/playground/compilerd/compile.go
@@ -98,7 +98,7 @@
 		// The response is hard limited to 2*maxSize: maxSize for builder stdout,
 		// and another maxSize for compilerd error and status messages.
 		return event.NewResponseEventSink(lib.NewLimitedWriter(w, 2*(*maxSize), lib.DoOnce(func() {
-			log.Debugln("Hard response size limit reached.")
+			log.Error("Hard response size limit reached.")
 		})), !wantDebug)
 	}
 
@@ -108,7 +108,7 @@
 		return
 	}
 
-	log.Debugln("Got valid compile request.")
+	log.Debug("Got valid compile request.")
 
 	// Hash the body and see if it's been cached. If so, return the cached
 	// response status and body.
@@ -119,11 +119,11 @@
 		if cachedResponseStruct, ok := cr.(cachedResponse); ok {
 			res := openResponse(cachedResponseStruct.Status)
 			event.Debug(res, "Sending cached response")
-			log.Debugln("Sending cached response.")
+			log.Debug("Sending cached response.")
 			res.Write(cachedResponseStruct.Events...)
 			return
 		} else {
-			log.Panicf("Invalid cached response: %v\n", cr)
+			log.Panicf("Invalid cached response: %v", cr)
 		}
 	}
 
@@ -161,19 +161,19 @@
 			// If the client disconnects before job finishes, cancel the job.
 			// If job has already started, the job will finish and the results
 			// will be cached.
-			log.Debugln("Client disconnected. Cancelling job.")
+			log.Debug("Client disconnected. Cancelling job.")
 			job.Cancel()
 		case result := <-resultChan:
 			if result.Success {
 				event.Debug(res, "Caching response")
-				log.Debugln("Caching response.")
+				log.Debug("Caching response.")
 				cache.Add(requestBodyHash, cachedResponse{
 					Status: http.StatusOK,
 					Events: result.Events,
 				})
 			} else {
 				event.Debug(res, "Internal errors encountered, not caching response.")
-				log.Warnln("Internal errors encountered, not caching response.")
+				log.Warn("Internal errors encountered, not caching response.")
 			}
 			return
 		}
diff --git a/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go b/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go
index c87baaf..29810d9 100644
--- a/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go
+++ b/go/src/v.io/x/playground/compilerd/jobqueue/jobqueue.go
@@ -99,7 +99,7 @@
 func (j *Job) Cancel() {
 	j.mu.Lock()
 	defer j.mu.Unlock()
-	log.Debugf("Cancelling job %v.\n", j.id)
+	log.Debugf("Cancelling job %v.", j.id)
 	j.cancelled = true
 }
 
@@ -125,7 +125,7 @@
 var _ = Dispatcher((*dispatcherImpl)(nil))
 
 func NewDispatcher(workers int, jobQueueCap int) Dispatcher {
-	log.Debugf("Creating new dispatcher with %v workers and %v queue capacity.\n", workers, jobQueueCap)
+	log.Debugf("Creating new dispatcher with %v workers and %v queue capacity.", workers, jobQueueCap)
 	d := &dispatcherImpl{
 		jobQueue: make(chan *Job, jobQueueCap),
 		stopped:  make(chan bool),
@@ -138,7 +138,7 @@
 // start starts a given number of workers, then reads from the jobQueue and
 // assigns jobs to free workers.
 func (d *dispatcherImpl) start(num int) {
-	log.Debugf("Dispatcher starting.\n")
+	log.Debug("Dispatcher starting.")
 
 	// Workers are published on the workerQueue when they are free.
 	workerQueue := make(chan *worker, num)
@@ -167,18 +167,18 @@
 					cancelled := job.cancelled
 					job.mu.Unlock()
 					if cancelled {
-						log.Debugf("Dispatcher encountered cancelled job %v, rejecting.\n", job.id)
+						log.Debugf("Dispatcher encountered cancelled job %v, rejecting.", job.id)
 						job.resultChan <- Result{
 							Success: false,
 							Events:  nil,
 						}
 						workerQueue <- worker
 					} else {
-						log.Debugf("Dispatching job %v to worker %v.\n", job.id, worker.id)
+						log.Debugf("Dispatching job %v to worker %v.", job.id, worker.id)
 						d.wg.Add(1)
 						go func() {
 							job.resultChan <- worker.run(job)
-							log.Debugf("Job %v finished on worker %v.\n", job.id, worker.id)
+							log.Debugf("Job %v finished on worker %v.", job.id, worker.id)
 							d.wg.Done()
 							workerQueue <- worker
 						}()
@@ -187,19 +187,19 @@
 			}
 		}
 
-		log.Debugf("Dispatcher stopped.\n")
+		log.Debug("Dispatcher stopped.")
 
 		// Dispatcher stopped, treat all remaining jobs as cancelled.
 		for {
 			select {
 			case job := <-d.jobQueue:
-				log.Debugf("Dispatcher is stopped, rejecting job %v.\n", job.id)
+				log.Debugf("Dispatcher is stopped, rejecting job %v.", job.id)
 				job.resultChan <- Result{
 					Success: false,
 					Events:  nil,
 				}
 			default:
-				log.Debugf("Dispatcher job queue drained.\n")
+				log.Debug("Dispatcher job queue drained.")
 				d.wg.Done()
 				return
 			}
@@ -214,7 +214,7 @@
 // jobs, rather than rejecting them.  Or, put logic in the client to retry
 // cancelled jobs.
 func (d *dispatcherImpl) Stop() {
-	log.Debugf("Stopping dispatcher.\n")
+	log.Debug("Stopping dispatcher.")
 	d.stopped <- true
 
 	// Wait for workers to finish their current jobs.
@@ -315,13 +315,14 @@
 		cmdKill()
 	}
 	systemLimitCallback := func() {
+		log.Warn(j.id, " builder stderr output too large, killing.")
 		erroredOut = true
 		cmdKill()
 	}
 	userErrorCallback := func(err error) {
 		// A relay error can result from unparseable JSON caused by a builder bug
 		// or a malicious exploit inside Docker. Panicking could lead to a DoS.
-		log.Errorln(j.id, "builder stdout relay error:", err)
+		log.Error(j.id, " builder stdout relay error: ", err)
 		erroredOut = true
 		cmdKill()
 	}
@@ -377,7 +378,7 @@
 	// Log builder internal errors, if any.
 	// TODO(ivanpi): Prevent caching? Report to client if debug requested?
 	if errBuffer.Len() > 0 {
-		log.Warnln(j.id, "builder stderr:", errBuffer.String())
+		log.Warn(j.id, " builder stderr: ", errBuffer.String())
 	}
 
 	event.Debug(j.res, "Response finished")
diff --git a/go/src/v.io/x/playground/compilerd/main.go b/go/src/v.io/x/playground/compilerd/main.go
index b2c53f7..fc9ce91 100644
--- a/go/src/v.io/x/playground/compilerd/main.go
+++ b/go/src/v.io/x/playground/compilerd/main.go
@@ -77,7 +77,7 @@
 func main() {
 	log.InitSyslogLoggers()
 
-	log.Debugf("Compilerd starting.\n")
+	log.Debug("Compilerd starting.")
 	flag.Parse()
 
 	if err := seedRNG(); err != nil {
@@ -100,7 +100,7 @@
 	serveMux := http.NewServeMux()
 
 	if *sqlConf != "" {
-		log.Debugf("Using sql config %v\n", *sqlConf)
+		log.Debugf("Using sql config %q", *sqlConf)
 
 		// Parse SQL configuration file and set up TLS.
 		dbConfig, err := dbutil.ActivateSqlConfigFromFile(*sqlConf)
@@ -117,7 +117,7 @@
 		serveMux.HandleFunc("/load", handlerLoad)
 		serveMux.HandleFunc("/save", handlerSave)
 	} else {
-		log.Debugln("No sql config provided. Disabling /load and /save routes.")
+		log.Debug("No sql config provided. Disabling /load and /save routes.")
 
 		// Return 501 Not Implemented for the /load and /save routes.
 		serveMux.HandleFunc("/load", handlerNotImplemented)
@@ -127,7 +127,7 @@
 	serveMux.HandleFunc("/compile", c.handlerCompile)
 	serveMux.HandleFunc("/healthz", handlerHealthz)
 
-	log.Debugf("Serving %s\n", *address)
+	log.Debugf("Serving %s", *address)
 	s := http.Server{
 		Addr:     *address,
 		Handler:  serveMux,
@@ -146,15 +146,15 @@
 
 	// Or if the time limit expires.
 	deadline := time.After(limit)
-	log.Debugln("Exiting at", time.Now().Add(limit))
+	log.Debug("Exiting at ", time.Now().Add(limit))
 Loop:
 	for {
 		select {
 		case <-deadline:
-			log.Debugln("Deadline expired, exiting in at most", exitDelay)
+			log.Debug("Deadline expired, exiting in at most ", exitDelay)
 			break Loop
 		case <-term:
-			log.Debugln("Got SIGTERM, exiting in at most", exitDelay)
+			log.Debug("Got SIGTERM, exiting in at most ", exitDelay)
 			break Loop
 		}
 	}
diff --git a/go/src/v.io/x/playground/compilerd/storage.go b/go/src/v.io/x/playground/compilerd/storage.go
index 52ab94c..b9f5ae1 100644
--- a/go/src/v.io/x/playground/compilerd/storage.go
+++ b/go/src/v.io/x/playground/compilerd/storage.go
@@ -44,7 +44,7 @@
 		storageError(w, http.StatusNotFound, "No data found for provided id.")
 		return
 	} else if err != nil {
-		storageInternalError(w, "Error getting bundleLink for id", bId, ":", err)
+		storageInternalError(w, "Error getting bundleLink for id ", bId, ": ", err)
 		return
 	}
 
@@ -77,7 +77,7 @@
 
 	bLink, bData, err := storage.StoreBundleLinkAndData(requestBody)
 	if err != nil {
-		storageInternalError(w, err)
+		storageInternalError(w, "Error storing bundle: ", err)
 		return
 	}
 
@@ -118,7 +118,7 @@
 // Logs error internally and sends non-specific error response to client.
 func storageInternalError(w http.ResponseWriter, v ...interface{}) {
 	if len(v) > 0 {
-		log.Errorln(v...)
+		log.Error(v...)
 	}
 	storageError(w, http.StatusInternalServerError, "Internal error, please retry.")
 }
diff --git a/go/src/v.io/x/playground/compilerd/storage/model.go b/go/src/v.io/x/playground/compilerd/storage/model.go
index f0c711c..084f44b 100644
--- a/go/src/v.io/x/playground/compilerd/storage/model.go
+++ b/go/src/v.io/x/playground/compilerd/storage/model.go
@@ -143,7 +143,7 @@
 		// Generate a random id for the bundle link.
 		id, err := randomLink(bHash)
 		if err != nil {
-			return fmt.Errorf("Error creating link id: %v", err)
+			return fmt.Errorf("error creating link id: %v", err)
 		}
 
 		// Check if bundle link with this id already exists in DB.
@@ -151,14 +151,14 @@
 			// Bundle was found. Retry with new id.
 			return errRetryTransaction
 		} else if err != ErrNotFound {
-			return fmt.Errorf("Error getting bundle link: %v", err)
+			return fmt.Errorf("error checking for bundle link: %v", err)
 		}
 
 		// Check if bundle data with this hash already exists in DB.
 		bData, err = getBundleDataByHash(tx, bHash)
 		if err != nil {
 			if err != ErrNotFound {
-				return fmt.Errorf("Error getting bundle data: %v", err)
+				return fmt.Errorf("error checking for bundle data: %v", err)
 			}
 
 			// Bundle does not exist in DB. Store it.
@@ -167,7 +167,7 @@
 				Json: string(json),
 			}
 			if err = storeBundleData(tx, bData); err != nil {
-				return fmt.Errorf("Error storing bundle data: %v", err)
+				return fmt.Errorf("error storing bundle data: %v", err)
 			}
 		}
 
@@ -177,7 +177,7 @@
 			Hash: bHash,
 		}
 		if err = storeBundleLink(tx, bLink); err != nil {
-			return fmt.Errorf("Error storing bundle link: %v", err)
+			return fmt.Errorf("error storing bundle link: %v", err)
 		}
 
 		return nil
diff --git a/go/src/v.io/x/playground/lib/log/log.go b/go/src/v.io/x/playground/lib/log/log.go
index 1b8dd08..70c8032 100644
--- a/go/src/v.io/x/playground/lib/log/log.go
+++ b/go/src/v.io/x/playground/lib/log/log.go
@@ -25,9 +25,9 @@
 
 func init() {
 	// By default, the loggers only log to stdout.
-	ErrorLogger = newLogger(os.Stdout)
-	WarnLogger = newLogger(os.Stdout)
-	DebugLogger = newLogger(os.Stdout)
+	ErrorLogger = newLogger(os.Stdout, "ERROR: ")
+	WarnLogger = newLogger(os.Stdout, "WARN: ")
+	DebugLogger = newLogger(os.Stdout, "")
 
 	// Default logger should also log to stdout.
 	log.SetOutput(os.Stdout)
@@ -36,12 +36,12 @@
 // InitSyslogLoggers creates loggers that will log to syslog as well as stdout.
 // It will panic if syslog is unavailable.
 func InitSyslogLoggers() {
-	ErrorLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_ERR))
-	WarnLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_WARNING))
-	DebugLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_DEBUG))
+	ErrorLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_ERR), "ERROR: ")
+	WarnLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_WARNING), "WARN: ")
+	DebugLogger = newLogger(newSyslogStdoutWriter(syslog.LOG_DEBUG), "")
 
 	// Default logger should also log to syslog and stdout.
-	log.SetOutput(newSyslogStdoutWriter(syslog.LOG_DEBUG))
+	log.SetOutput(newSyslogStdoutWriter(syslog.LOG_WARNING))
 }
 
 var (
@@ -59,10 +59,6 @@
 	DebugLogger.Printf(s, args...)
 }
 
-func Debugln(args ...interface{}) {
-	DebugLogger.Println(args...)
-}
-
 // Warn functions use WarnLogger.
 func Warn(args ...interface{}) {
 	WarnLogger.Print(args...)
@@ -72,10 +68,6 @@
 	WarnLogger.Printf(s, args...)
 }
 
-func Warnln(args ...interface{}) {
-	WarnLogger.Println(args...)
-}
-
 // Error functions use ErrorLogger.
 func Error(args ...interface{}) {
 	ErrorLogger.Print(args...)
@@ -85,10 +77,6 @@
 	ErrorLogger.Printf(s, args...)
 }
 
-func Errorln(args ...interface{}) {
-	ErrorLogger.Println(args...)
-}
-
 func Panic(args ...interface{}) {
 	ErrorLogger.Panic(args...)
 }
@@ -97,13 +85,9 @@
 	ErrorLogger.Panicf(s, args...)
 }
 
-func Panicln(args ...interface{}) {
-	ErrorLogger.Panicln(args...)
-}
-
 // Helper method to create a logger with given writer.
-func newLogger(w io.Writer) *log.Logger {
-	return log.New(w, "", log.LstdFlags)
+func newLogger(w io.Writer, prefix string) *log.Logger {
+	return log.New(w, prefix, log.LstdFlags)
 }
 
 // Helper method to create a writer that writes to syslog and stdout.
@@ -111,6 +95,6 @@
 	if syslogWriter, err := syslog.New(level|syslog.LOG_USER, "playground"); err != nil {
 		panic(fmt.Errorf("Error connecting to syslog: %v", err))
 	} else {
-		return io.MultiWriter(io.MultiWriter(syslogWriter, os.Stdout))
+		return io.MultiWriter(syslogWriter, os.Stdout)
 	}
 }
diff --git a/go/src/v.io/x/playground/lib/multi_writer.go b/go/src/v.io/x/playground/lib/multi_writer.go
index 69f5cb7..015d64e 100644
--- a/go/src/v.io/x/playground/lib/multi_writer.go
+++ b/go/src/v.io/x/playground/lib/multi_writer.go
@@ -12,8 +12,6 @@
 import (
 	"io"
 	"sync"
-
-	"v.io/x/playground/lib/log"
 )
 
 // Initialize using NewMultiWriter.
@@ -34,7 +32,7 @@
 	t.mu.Lock()
 	defer t.mu.Unlock()
 	if t.wrote {
-		log.Panic("Tried to add writer after data has been written.")
+		panic("Tried to add writer after data has been written.")
 	}
 	t.writers = append(t.writers, w)
 	return t