Merge client from www into release.go.playground/client.
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e315280
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+builder/gitcookies
+builder/hgrc
+
+# Vanadium
+/.v23
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..d4e5a64
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,67 @@
+FROM ubuntu
+RUN /usr/sbin/useradd -d /home/playground -m playground
+
+# Install various prereqs.
+RUN apt-get update
+RUN apt-get install -y curl g++ git libc6-i386 make mercurial python
+
+# Install Go. Note, the apt-get "golang" target is too old.
+RUN (cd /tmp; curl -O https://storage.googleapis.com/golang/go1.4.linux-amd64.tar.gz)
+RUN tar -C /usr/local -xzf /tmp/go1.4.linux-amd64.tar.gz
+ENV PATH /usr/local/go/bin:$PATH
+
+ENV HOME /root
+ENV VANADIUM_ROOT /usr/local/vanadium
+ENV GOPATH /home/playground:$VANADIUM_ROOT/release/go
+ENV VDLPATH $GOPATH
+
+# Setup Vanadium and Vanadium profiles.
+# Note: This will be cached! If you want to re-build the docker image using
+# fresh Vanadium code, you must pass "--no-cache" to the docker build command.
+# See README.md.
+ADD builder/gitcookies /root/.gitcookies
+RUN git config --global http.cookiefile ~/.gitcookies
+RUN curl -u vanadium:D6HT]P,LrJ7e https://dev.v.io/noproxy/vanadium-setup.sh | bash
+RUN rm /root/.gitcookies
+ADD builder/hgrc /root/.hgrc
+RUN $VANADIUM_ROOT/bin/v23 profile setup web
+RUN rm /root/.hgrc
+
+# Install the release/javascript/core library.
+# TODO(nlacasse): Switch to "npm install -g veyron" once release/javascript/core is publicly
+# visible in NPM.
+WORKDIR /usr/local/vanadium/release/javascript/core
+# NOTE(sadovsky): NPM is flaky. If any of the NPM commands below fail, simply
+# retry them.
+RUN $VANADIUM_ROOT/environment/cout/node/bin/npm install --production
+RUN $VANADIUM_ROOT/environment/cout/node/bin/npm link
+WORKDIR /home/playground
+RUN $VANADIUM_ROOT/environment/cout/node/bin/npm link veyron
+
+# Install Vanadium Go dependencies.
+WORKDIR /usr/local/vanadium/release
+ENV PATH $VANADIUM_ROOT/release/go/bin:$VANADIUM_ROOT/bin:$PATH
+RUN v23 go install v.io/core/...
+RUN v23 go install v.io/playground/...
+
+# Uncomment the following lines to install a version of the builder tool using
+# your local version of the code. This is useful when developing and testing
+# local changes.
+#RUN rm $VANADIUM_ROOT/release/go/bin/builder
+#RUN rm -rf $VANADIUM_ROOT/release/go/src/v.io/playground/builder/
+#RUN rm -rf $VANADIUM_ROOT/release/go/src/v.io/playground/lib/
+#ADD builder/ $VANADIUM_ROOT/release/go/src/v.io/playground/builder/
+#ADD lib/ $VANADIUM_ROOT/release/go/src/v.io/playground/lib/
+#RUN v23 go install v.io/playground/builder/...
+
+# Copy proxyd's main.go to ./proxyd_main.go, then uncomment the following
+# lines to install a version of proxyd (used by the builder tool) using your
+# local version of the code. This is useful when developing and testing local
+# changes.
+#RUN rm $VANADIUM_ROOT/release/go/bin/proxyd
+#ADD proxyd_main.go $VANADIUM_ROOT/release/go/src/v.io/core/veyron/services/proxy/proxyd/main.go
+#RUN v23 go install v.io/core/veyron/services/proxy/proxyd
+
+USER playground
+WORKDIR /home/playground
+ENTRYPOINT /usr/local/vanadium/release/go/bin/builder
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3bcf818
--- /dev/null
+++ b/README.md
@@ -0,0 +1,57 @@
+# Building a Docker image and running the playground server locally
+
+## Docker setup
+
+Install Docker:
+
+* Goobuntu: http://go/installdocker
+* OS X: https://github.com/boot2docker/osx-installer/releases
+
+On Goobuntu, we recommend overriding the default graph dir (`/var/lib/docker`)
+to avoid filling up the root filesystem partition, which is quite small. To do
+so, add the following line to your `/etc/default/docker`:
+
+ DOCKER_OPTS="${DOCKER_OPTS} -g /usr/local/google/docker"
+
+Start (or restart) the Docker daemon:
+
+ $ sudo service docker restart
+
+Build the playground Docker image (this will take a while...):
+
+ $ cp ~/.gitcookies $VANADIUM_ROOT/release/go/src/v.io/playground/builder/gitcookies
+ $ cp ~/.hgrc $VANADIUM_ROOT/release/go/src/v.io/playground/builder/hgrc
+ $ sudo docker build -t playground $VANADIUM_ROOT/release/go/src/v.io/playground/.
+
+Note: If you want to ensure an up-to-date version of Vanadium is installed in
+the Docker image, run the above command with the "--no-cache" flag.
+
+The 'docker build' command above will compile builder from the main Vanadium
+repository. If you want to use local code instead, open Dockerfile and
+uncomment marked lines before running the command.
+
+Test your image (without running compilerd):
+
+ $ sudo docker run -i playground < /usr/local/google/home/sadovsky/dev/veyron-www/content/playgrounds/code/fortune/ex0-go/bundle.json
+
+## Running the playground server (compilerd)
+
+Install the playground binaries:
+
+ $ v23 go install v.io/playground/...
+
+Run the compiler binary as root:
+
+ $ sudo $VANADIUM_ROOT/release/go/bin/compilerd --shutdown=false --address=localhost:8181
+
+Or, run it without Docker (for faster iterations during development):
+
+ $ cd $(mktemp -d "/tmp/XXXXXXXX")
+ $ PATH=$VANADIUM_ROOT/release/go/bin:$PATH compilerd --shutdown=false --address=localhost:8181 --use-docker=false
+
+The server should now be running at http://localhost:8181 and responding to
+compile requests at http://localhost:8181/compile.
+
+Add `?pgaddr=//localhost:8181` to any veyron-www page to make its embedded
+playgrounds talk to your server. Add `?debug=1` to see debug info from the
+builder.
diff --git a/builder/credentials.go b/builder/credentials.go
new file mode 100644
index 0000000..7bcc1f6
--- /dev/null
+++ b/builder/credentials.go
@@ -0,0 +1,108 @@
+// Functions to create and bless principals.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+
+ "v.io/core/veyron/lib/flags/consts"
+)
+
+type credentials struct {
+ Name string
+ Blesser string
+ Duration string
+ Files []string
+}
+
+func (c credentials) create() error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ if c.Blesser == "" && c.Duration != "" {
+ return c.initWithDuration()
+ }
+ if c.Blesser != "" {
+ return c.getblessed()
+ }
+ return nil
+}
+
+func (c credentials) init() error {
+ dir := path.Join("credentials", c.Name)
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ return c.toolCmd("", "create", dir, c.Name).Run()
+ }
+ return nil
+}
+
+func (c credentials) initWithDuration() error {
+ // (1) principal blessself --for=<duration> <c.Name> | principal store setdefault -
+ // (2) principal store default | principal store set - ...
+ if err := c.pipe(c.toolCmd(c.Name, "blessself", "--for", c.Duration),
+ c.toolCmd(c.Name, "store", "setdefault", "-")); err != nil {
+ return err
+ }
+ if err := c.pipe(c.toolCmd(c.Name, "store", "default"),
+ c.toolCmd(c.Name, "store", "set", "-", "...")); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c credentials) getblessed() error {
+ // (1) VEYRON_CREDENTIALS=<c.Blesser> principal bless <c.Name> --for=<c.Duration> <c.Name> | VEYRON_CREDENTIALS=<c.Name> principal store setdefault -
+ // (2) principal store default | principal store set - ...
+ duration := c.Duration
+ if duration == "" {
+ duration = "1h"
+ }
+ if err := c.pipe(c.toolCmd(c.Blesser, "bless", "--for", duration, path.Join("credentials", c.Name), c.Name),
+ c.toolCmd(c.Name, "store", "setdefault", "-")); err != nil {
+ return err
+ }
+ if err := c.pipe(c.toolCmd(c.Name, "store", "default"),
+ c.toolCmd(c.Name, "store", "set", "-", "...")); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c credentials) pipe(from, to *exec.Cmd) error {
+ buf := new(bytes.Buffer)
+ from.Stdout = buf
+ to.Stdin = buf
+ if err := from.Run(); err != nil {
+ return fmt.Errorf("%v %v: %v", from.Path, from.Args, err)
+ }
+ if err := to.Run(); err != nil {
+ return fmt.Errorf("%v %v: %v", to.Path, to.Args, err)
+ }
+ return nil
+}
+
+func (c credentials) toolCmd(as string, args ...string) *exec.Cmd {
+ cmd := makeCmd("<principal>", false, "principal", args...)
+ if as != "" {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%v=%s", consts.VeyronCredentials, path.Join("credentials", as)))
+ }
+ // Set Stdout to /dev/null so that output does not leak into the
+ // playground output. If the output is needed, it can be overridden by
+ // clients of this method.
+ cmd.Stdout = nil
+ return cmd
+}
+
+func createCredentials(creds []credentials) error {
+ debug("Generating credentials")
+ for _, c := range creds {
+ if err := c.create(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/builder/main.go b/builder/main.go
new file mode 100644
index 0000000..8ab5c5b
--- /dev/null
+++ b/builder/main.go
@@ -0,0 +1,411 @@
+// Compiles and runs code for the Vanadium playground. Code is passed via
+// os.Stdin as a JSON encoded request struct.
+
+// NOTE(nlacasse): We use log.Panic() instead of log.Fatal() everywhere in this
+// file. We do this because log.Panic calls panic(), which allows any deferred
+// function to run. In particular, this will cause the mounttable and proxy
+// processes to be killed in the event of a compilation error. log.Fatal, on
+// the other hand, calls os.Exit(1), which does not call deferred functions,
+// and will leave proxy and mounttable processes running. This is not a big
+// deal for production environment, because the Docker instance gets cleaned up
+// after each run, but during development and testing these extra processes can
+// cause issues.
+
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "v.io/core/veyron/lib/flags/consts"
+ "v.io/playground/lib"
+ "v.io/playground/lib/event"
+)
+
+const runTimeout = 3 * time.Second
+
+var (
+ verbose = flag.Bool("v", true, "Whether to output debug messages.")
+
+ includeServiceOutput = flag.Bool("includeServiceOutput", false, "Whether to stream service (mounttable, wspr, proxy) output to clients.")
+
+ includeV23Env = flag.Bool("includeV23Env", false, "Whether to log the output of \"v23 env\" before compilation.")
+
+ // Sink for writing events (debug and run output) to stdout as JSON, one event per line.
+ out event.Sink
+
+ // Whether we have stopped execution of running files.
+ stopped = false
+
+ mu sync.Mutex
+)
+
+// Type of data sent to the builder on stdin. Input should contain Files. We
+// look for a file whose Name ends with .id, and parse that into credentials.
+//
+// TODO(ribrdb): Consider moving credentials parsing into the http server.
+type request struct {
+ Files []*codeFile
+ Credentials []credentials
+}
+
+// Type of file data. Only Name and Body should be initially set. The other
+// fields are added as the file is parsed.
+type codeFile struct {
+ Name string
+ Body string
+ // Language the file is written in. Inferred from the file extension.
+ lang string
+ // Credentials to associate with the file's process.
+ credentials string
+ // The executable flag denotes whether the file should be executed as
+ // part of the playground run. This is currently used only for
+ // javascript files, and go files with package "main".
+ executable bool
+ // Name of the binary (for go files).
+ binaryName string
+ // Running cmd process for the file.
+ cmd *exec.Cmd
+ // Any subprocesses that are needed to support running the file (e.g. wspr).
+ subprocs []*os.Process
+ // The index of the file in the request.
+ index int
+}
+
+type exit struct {
+ name string
+ err error
+}
+
+func debug(args ...interface{}) {
+ event.Debug(out, args...)
+}
+
+func panicOnError(err error) {
+ if err != nil {
+ log.Panic(err)
+ }
+}
+
+func logV23Env() error {
+ if *includeV23Env {
+ return makeCmd("<environment>", false, "v23", "env").Run()
+ }
+ return nil
+}
+
+func parseRequest(in io.Reader) (r request, err error) {
+ debug("Parsing input")
+ data, err := ioutil.ReadAll(in)
+ if err == nil {
+ err = json.Unmarshal(data, &r)
+ }
+ m := make(map[string]*codeFile)
+ for i := 0; i < len(r.Files); i++ {
+ f := r.Files[i]
+ f.index = i
+ if path.Ext(f.Name) == ".id" {
+ err = json.Unmarshal([]byte(f.Body), &r.Credentials)
+ if err != nil {
+ return
+ }
+ r.Files = append(r.Files[:i], r.Files[i+1:]...)
+ i--
+ } else {
+ switch path.Ext(f.Name) {
+ case ".js":
+ // JavaScript files are always executable.
+ f.executable = true
+ f.lang = "js"
+ case ".go":
+ // Go files will be marked as executable if their package name is
+ // "main". This happens in the "maybeSetExecutableAndBinaryName"
+ // function.
+ f.lang = "go"
+ case ".vdl":
+ f.lang = "vdl"
+ default:
+ return r, fmt.Errorf("Unknown file type: %q", f.Name)
+ }
+
+ basename := path.Base(f.Name)
+ if _, ok := m[basename]; ok {
+ return r, fmt.Errorf("Two files with same basename: %q", basename)
+ }
+ m[basename] = f
+ }
+ }
+ if len(r.Credentials) == 0 {
+ // Run everything with the same credentials if none are specified.
+ r.Credentials = append(r.Credentials, credentials{Name: "default"})
+ for _, f := range r.Files {
+ f.credentials = "default"
+ }
+ } else {
+ for _, creds := range r.Credentials {
+ for _, basename := range creds.Files {
+ // Check that the file associated with the credentials exists. We ignore
+ // cases where it doesn't because the test .id files get used for
+ // multiple different code files. See testdata/ids/authorized.id, for
+ // example.
+ if m[basename] != nil {
+ m[basename].credentials = creds.Name
+ }
+ }
+ }
+ }
+ return
+}
+
+func writeFiles(files []*codeFile) error {
+ debug("Writing files")
+ for _, f := range files {
+ if err := f.write(); err != nil {
+ return fmt.Errorf("Error writing %s: %v", f.Name, err)
+ }
+ }
+ return nil
+}
+
+// If compilation failed due to user error (bad input), returns badInput=true
+// and err=nil. Only internal errors return non-nil err.
+func compileFiles(files []*codeFile) (badInput bool, err error) {
+ needToCompile := false
+ for _, f := range files {
+ if f.lang == "vdl" || f.lang == "go" {
+ needToCompile = true
+ break
+ }
+ }
+ if !needToCompile {
+ return
+ }
+
+ debug("Compiling files")
+ pwd, err := os.Getwd()
+ if err != nil {
+ return
+ }
+ os.Setenv("GOPATH", pwd+":"+os.Getenv("GOPATH"))
+ os.Setenv("VDLPATH", pwd+":"+os.Getenv("VDLPATH"))
+ // We set isService=false for compilation because "go install" only produces
+ // output on error, and we always want clients to see such errors.
+ err = makeCmd("<compile>", false, "v23", "go", "install", "./...").Run()
+ // TODO(ivanpi): We assume *exec.ExitError results from uncompilable input
+ // files; other cases can result from bugs in playground backend or compiler
+ // itself.
+ if _, ok := err.(*exec.ExitError); ok {
+ badInput, err = true, nil
+ }
+ return
+}
+
+func runFiles(files []*codeFile) {
+ debug("Running files")
+ exit := make(chan exit)
+ running := 0
+ for _, f := range files {
+ if f.executable {
+ f.run(exit)
+ running++
+ }
+ }
+
+ timeout := time.After(runTimeout)
+
+ for running > 0 {
+ select {
+ case <-timeout:
+ panicOnError(out.Write(event.New("", "stderr", "Ran for too long; terminated.")))
+ stopAll(files)
+ case status := <-exit:
+ if status.err == nil {
+ panicOnError(out.Write(event.New(status.name, "stdout", "Exited cleanly.")))
+ } else {
+ panicOnError(out.Write(event.New(status.name, "stderr", fmt.Sprintf("Exited with error: %v", status.err))))
+ }
+ running--
+ stopAll(files)
+ }
+ }
+}
+
+func stopAll(files []*codeFile) {
+ mu.Lock()
+ defer mu.Unlock()
+ if !stopped {
+ stopped = true
+ for _, f := range files {
+ f.stop()
+ }
+ }
+}
+
+func (f *codeFile) maybeSetExecutableAndBinaryName() error {
+ debug("Parsing package from", f.Name)
+ file, err := parser.ParseFile(token.NewFileSet(), f.Name,
+ strings.NewReader(f.Body), parser.PackageClauseOnly)
+ if err != nil {
+ return err
+ }
+ pkg := file.Name.String()
+ if pkg == "main" {
+ f.executable = true
+ basename := path.Base(f.Name)
+ f.binaryName = basename[:len(basename)-len(path.Ext(basename))]
+ }
+ return nil
+}
+
+func (f *codeFile) write() error {
+ debug("Writing file", f.Name)
+ if f.lang == "go" || f.lang == "vdl" {
+ if err := f.maybeSetExecutableAndBinaryName(); err != nil {
+ return err
+ }
+ }
+ // Retain the original file tree structure.
+ if err := os.MkdirAll(path.Dir(f.Name), 0755); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(f.Name, []byte(f.Body), 0644)
+}
+
+func (f *codeFile) startJs() error {
+ wsprProc, wsprPort, err := startWspr(f.Name, f.credentials)
+ if err != nil {
+ return fmt.Errorf("Error starting wspr: %v", err)
+ }
+ f.subprocs = append(f.subprocs, wsprProc)
+ os.Setenv("WSPR", "http://localhost:"+strconv.Itoa(wsprPort))
+ node := filepath.Join(os.Getenv("VANADIUM_ROOT"), "environment", "cout", "node", "bin", "node")
+ f.cmd = makeCmd(f.Name, false, node, f.Name)
+ return f.cmd.Start()
+}
+
+func (f *codeFile) startGo() error {
+ f.cmd = makeCmd(f.Name, false, filepath.Join("bin", f.binaryName))
+ if f.credentials != "" {
+ f.cmd.Env = append(f.cmd.Env, fmt.Sprintf("%v=%s", consts.VeyronCredentials, filepath.Join("credentials", f.credentials)))
+ }
+ return f.cmd.Start()
+}
+
+func (f *codeFile) run(ch chan exit) {
+ debug("Running", f.Name)
+ err := func() error {
+ mu.Lock()
+ defer mu.Unlock()
+ if stopped {
+ return fmt.Errorf("Execution has stopped; not running %s", f.Name)
+ }
+
+ switch f.lang {
+ case "go":
+ return f.startGo()
+ case "js":
+ return f.startJs()
+ default:
+ return fmt.Errorf("Cannot run file: %v", f.Name)
+ }
+ }()
+ if err != nil {
+ debug("Failed to start", f.Name, "-", err)
+ // Use a goroutine to avoid deadlock.
+ go func() {
+ ch <- exit{f.Name, err}
+ }()
+ return
+ }
+
+ // Wait for the process to exit and send result to channel.
+ go func() {
+ debug("Waiting for", f.Name)
+ err := f.cmd.Wait()
+ debug("Done waiting for", f.Name)
+ ch <- exit{f.Name, err}
+ }()
+}
+
+func (f *codeFile) stop() {
+ debug("Attempting to stop", f.Name)
+ if f.cmd == nil {
+ debug("Cannot stop:", f.Name, "cmd is nil")
+ } else if f.cmd.Process == nil {
+ debug("Cannot stop:", f.Name, "cmd is not nil, but cmd.Process is nil")
+ } else {
+ debug("Sending SIGTERM to", f.Name)
+ f.cmd.Process.Signal(syscall.SIGTERM)
+ }
+ for i, subproc := range f.subprocs {
+ debug("Killing subprocess", i, "for", f.Name)
+ subproc.Kill()
+ }
+}
+
+// Creates a cmd whose outputs (stdout and stderr) are streamed to stdout as
+// Event objects. If you want to watch the output streams yourself, add your
+// own writer(s) to the MultiWriter before starting the command.
+func makeCmd(fileName string, isService bool, progName string, args ...string) *exec.Cmd {
+ cmd := exec.Command(progName, args...)
+ cmd.Env = os.Environ()
+ stdout, stderr := lib.NewMultiWriter(), lib.NewMultiWriter()
+ prefix := ""
+ if isService {
+ prefix = "svc-"
+ }
+ if !isService || *includeServiceOutput {
+ stdout.Add(event.NewStreamWriter(out, fileName, prefix+"stdout"))
+ stderr.Add(event.NewStreamWriter(out, fileName, prefix+"stderr"))
+ }
+ cmd.Stdout, cmd.Stderr = stdout, stderr
+ return cmd
+}
+
+func main() {
+ flag.Parse()
+
+ out = event.NewJsonSink(os.Stdout, !*verbose)
+
+ r, err := parseRequest(os.Stdin)
+ panicOnError(err)
+
+ panicOnError(createCredentials(r.Credentials))
+
+ mt, err := startMount(runTimeout)
+ panicOnError(err)
+ defer mt.Kill()
+
+ proxy, err := startProxy()
+ panicOnError(err)
+ defer proxy.Kill()
+
+ panicOnError(writeFiles(r.Files))
+
+ logV23Env()
+
+ badInput, err := compileFiles(r.Files)
+ // Panic on internal error, but not on user error.
+ panicOnError(err)
+ if badInput {
+ panicOnError(out.Write(event.New("<compile>", "stderr", "Compilation error.")))
+ return
+ }
+
+ runFiles(r.Files)
+}
diff --git a/builder/services.go b/builder/services.go
new file mode 100644
index 0000000..f9febc5
--- /dev/null
+++ b/builder/services.go
@@ -0,0 +1,143 @@
+// Functions to start services needed by the Vanadium playground.
+// These should never trigger program exit.
+// TODO(ivanpi): Use the modules library to start the services instead.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "os/exec"
+ "path"
+ "regexp"
+ "strconv"
+ "syscall"
+ "time"
+
+ "v.io/core/veyron/lib/flags/consts"
+ "v.io/playground/lib"
+)
+
+var (
+ proxyName = "proxy"
+)
+
+// Note: This was copied from release/go/src/v.io/core/veyron/tools/findunusedport.
+// I would like to be able to import that package directly, but it defines a
+// main(), so can't be imported. An alternative solution would be to call the
+// 'findunusedport' binary, but that would require starting another process and
+// parsing the output. It seemed simpler to just copy the function here.
+func findUnusedPort() (int, error) {
+ rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for i := 0; i < 1000; i++ {
+ port := 1024 + rnd.Int31n(64512)
+ fd, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
+ if err != nil {
+ continue
+ }
+ sa := &syscall.SockaddrInet4{Port: int(port)}
+ if err := syscall.Bind(fd, sa); err != nil {
+ continue
+ }
+ syscall.Close(fd)
+ return int(port), nil
+ }
+ return 0, fmt.Errorf("Can't find unused port.")
+}
+
+// startMount starts a mounttabled process, and sets the NAMESPACE_ROOT env
+// variable to the mounttable's location. We run one mounttabled process for
+// the entire environment.
+func startMount(timeLimit time.Duration) (proc *os.Process, err error) {
+ cmd := makeCmd("<mounttabled>", true, "mounttabled", "-veyron.tcp.address=127.0.0.1:0")
+ matches, err := startAndWaitFor(cmd, timeLimit, regexp.MustCompile("Mount table .+ endpoint: (.+)\n"))
+ if err != nil {
+ return nil, fmt.Errorf("Error starting mounttabled: %v", err)
+ }
+ endpoint := matches[1]
+ if endpoint == "" {
+ return nil, fmt.Errorf("Failed to get mounttable endpoint")
+ }
+ return cmd.Process, os.Setenv(consts.NamespaceRootPrefix, endpoint)
+}
+
+// startProxy starts a proxyd process. We run one proxyd process for the
+// entire environment.
+func startProxy() (proc *os.Process, err error) {
+ cmd := makeCmd("<proxyd>", true, "proxyd", "-name="+proxyName, "-address=127.0.0.1:0", "-http=")
+ err = cmd.Start()
+ if err != nil {
+ return nil, err
+ }
+ return cmd.Process, err
+}
+
+// startWspr starts a wsprd process. We run one wsprd process for each
+// javascript file being run.
+func startWspr(fileName, credentials string) (proc *os.Process, port int, err error) {
+ port, err = findUnusedPort()
+ if err != nil {
+ return nil, port, err
+ }
+ cmd := makeCmd("<wsprd>:"+fileName, true,
+ "wsprd",
+ // Verbose logging so we can watch the output for "Listening" log line.
+ "-v=3",
+ "-veyron.proxy="+proxyName,
+ "-veyron.tcp.address=127.0.0.1:0",
+ "-port="+strconv.Itoa(port),
+ // Retry RPC calls for 1 second. If a client makes an RPC call before the
+ // server is running, it won't immediately fail, but will retry while the
+ // server is starting.
+ // TODO(nlacasse): Remove this when javascript can tell wspr how long to
+ // retry for. Right now it's a global setting in wspr.
+ "-retry-timeout=1",
+ // The identd server won't be used, so pass a fake name.
+ "-identd=/unused")
+ if credentials != "" {
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%v=%s", consts.VeyronCredentials, path.Join("credentials", credentials)))
+ }
+ if _, err := startAndWaitFor(cmd, 3*time.Second, regexp.MustCompile("Listening")); err != nil {
+ return nil, 0, fmt.Errorf("Error starting wspr: %v", err)
+ }
+ return cmd.Process, port, nil
+}
+
+// Helper function to start a command and wait for output. Arguments are a cmd
+// to run, a timeout, and a regexp. The slice of strings matched by the regexp
+// is returned.
+// TODO(nlacasse): Consider standardizing how services log when they start
+// listening, and their endpoints (if any). Then this could become a common
+// util function.
+func startAndWaitFor(cmd *exec.Cmd, timeout time.Duration, outputRegexp *regexp.Regexp) ([]string, error) {
+ reader, writer := io.Pipe()
+ // TODO(sadovsky): Why must we listen to both stdout and stderr? We should
+ // know which one produces the "Listening" log line...
+ cmd.Stdout.(*lib.MultiWriter).Add(writer)
+ cmd.Stderr.(*lib.MultiWriter).Add(writer)
+ err := cmd.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ buf := bufio.NewReader(reader)
+ t := time.After(timeout)
+ ch := make(chan []string)
+ go (func() {
+ for line, err := buf.ReadString('\n'); err == nil; line, err = buf.ReadString('\n') {
+ if matches := outputRegexp.FindStringSubmatch(line); matches != nil {
+ ch <- matches
+ }
+ }
+ close(ch)
+ })()
+ select {
+ case <-t:
+ return nil, fmt.Errorf("Timeout starting service.")
+ case matches := <-ch:
+ return matches, nil
+ }
+}
diff --git a/compilerd/compile.go b/compilerd/compile.go
new file mode 100644
index 0000000..4ceb271
--- /dev/null
+++ b/compilerd/compile.go
@@ -0,0 +1,322 @@
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os/exec"
+ "sync"
+ "time"
+
+ "github.com/golang/groupcache/lru"
+
+ "v.io/playground/lib"
+ "v.io/playground/lib/event"
+)
+
+type CachedResponse struct {
+ Status int
+ Events []event.Event
+}
+
+var (
+ useDocker = flag.Bool("use-docker", true, "Whether to use Docker to run builder; if false, we run the builder directly.")
+
+ // Arbitrary deadline (enough to compile, run, shutdown).
+ // TODO(sadovsky): For now this is set high to avoid spurious timeouts.
+ // Playground execution speed needs to be optimized.
+ maxTime = 10 * time.Second
+
+ // In-memory LRU cache of request/response bodies. Keys are sha256 sums of
+ // request bodies (32 bytes each), values are of type CachedResponse.
+ // NOTE(nlacasse): The cache size (10k) was chosen arbitrarily and should
+ // perhaps be optimized.
+ cache = lru.New(10000)
+)
+
+// POST request that compiles and runs the bundle and streams output to client.
+func handlerCompile(w http.ResponseWriter, r *http.Request) {
+ if !handleCORS(w, r) {
+ return
+ }
+
+ // Check method and read POST body.
+ requestBody := getPostBody(w, r)
+ if requestBody == nil {
+ return
+ }
+
+ // If the request does not include query param debug=true, strip any debug
+ // events produced by the builder. Note, these events don't contain any
+ // sensitive information, so guarding with a query parameter is sufficient.
+ wantDebug := r.FormValue("debug") == "1"
+
+ openResponse := func(status int) *responseEventSink {
+ w.Header().Add("Content-Type", "application/json")
+ // No Content-Length, using chunked encoding.
+ w.WriteHeader(status)
+ // The response is hard limited to 2*maxSize: maxSize for builder stdout,
+ // and another maxSize for compilerd error and status messages.
+ return newResponseEventSink(lib.NewLimitedWriter(w, 2*maxSize, lib.DoOnce(func() {
+ log.Println("Hard response size limit reached.")
+ })), !wantDebug)
+ }
+
+ if len(requestBody) > maxSize {
+ res := openResponse(http.StatusBadRequest)
+ res.Write(event.New("", "stderr", "Program too large."))
+ return
+ }
+
+ // Hash the body and see if it's been cached. If so, return the cached
+ // response status and body.
+ // NOTE(sadovsky): In the client we may shift timestamps (based on current
+ // time) and introduce a fake delay.
+ requestBodyHash := rawHash(requestBody)
+ if cachedResponse, ok := cache.Get(requestBodyHash); ok {
+ if cachedResponseStruct, ok := cachedResponse.(CachedResponse); ok {
+ res := openResponse(cachedResponseStruct.Status)
+ event.Debug(res, "Sending cached response")
+ res.Write(cachedResponseStruct.Events...)
+ return
+ } else {
+ log.Panicf("Invalid cached response: %v\n", cachedResponse)
+ }
+ }
+
+ res := openResponse(http.StatusOK)
+
+ id := <-uniq
+
+ event.Debug(res, "Preparing to run program")
+
+ // TODO(sadovsky): Set runtime constraints on CPU and memory usage.
+ // http://docs.docker.com/reference/run/#runtime-constraints-on-cpu-and-memory
+ var cmd *exec.Cmd
+ if *useDocker {
+ cmd = docker("run", "-i", "--name", id, "playground")
+ } else {
+ cmd = exec.Command("builder")
+ }
+ cmdKill := lib.DoOnce(func() {
+ event.Debug(res, "Killing program")
+ cmd.Process.Kill()
+ if *useDocker {
+ // Sudo doesn't pass sigkill to child processes, so we need to find and
+ // kill the docker process directly.
+ // The docker client can get in a state where stopping/killing/rm-ing
+ // the container will not kill the client. The opposite should work
+ // correctly (killing the docker client stops the container).
+ // If not, the docker rm call below will.
+ exec.Command("sudo", "pkill", "-SIGKILL", "-f", id).Run()
+ }
+ })
+
+ cmd.Stdin = bytes.NewReader(requestBody)
+
+ // Builder will return all normal output as JSON Events on stdout, and will
+ // return unexpected errors on stderr.
+ // TODO(sadovsky): Security issue: what happens if the program output is huge?
+ // We can restrict memory use of the Docker container, but these buffers are
+ // outside Docker.
+ // TODO(ivanpi): Revisit above comment.
+ sizedOut := false
+ erroredOut := false
+
+ userLimitCallback := func() {
+ sizedOut = true
+ cmdKill()
+ }
+ systemLimitCallback := func() {
+ erroredOut = true
+ cmdKill()
+ }
+ userErrorCallback := func(err error) {
+ // A relay error can result from unparseable JSON caused by a builder bug
+ // or a malicious exploit inside Docker. Panicking could lead to a DoS.
+ log.Println(id, "builder stdout relay error:", err)
+ erroredOut = true
+ cmdKill()
+ }
+
+ outRelay, outStop := limitedEventRelay(res, maxSize, userLimitCallback, userErrorCallback)
+ // Builder stdout should already contain a JSON Event stream.
+ cmd.Stdout = outRelay
+
+ // Any stderr is unexpected, most likely a bug (panic) in builder, but could
+ // also result from a malicious exploit inside Docker.
+ // It is quietly logged as long as it doesn't exceed maxSize.
+ errBuffer := new(bytes.Buffer)
+ cmd.Stderr = lib.NewLimitedWriter(errBuffer, maxSize, systemLimitCallback)
+
+ event.Debug(res, "Running program")
+
+ timeout := time.After(maxTime)
+ // User code execution is time limited in builder.
+ // This flag signals only unexpected timeouts. maxTime should be sufficient
+ // for end-to-end request processing by builder for worst-case user input.
+ // TODO(ivanpi): builder doesn't currently time compilation, so builder
+ // worst-case execution time is not clearly bounded.
+ timedOut := false
+
+ exit := make(chan error)
+ go func() { exit <- cmd.Run() }()
+
+ select {
+ case err := <-exit:
+ if err != nil && !sizedOut {
+ erroredOut = true
+ }
+ case <-timeout:
+ timedOut = true
+ cmdKill()
+ <-exit
+ }
+
+ // Close and wait for the output relay.
+ outStop()
+
+ event.Debug(res, "Program exited")
+
+ // Return the appropriate error message to the client.
+ if timedOut {
+ res.Write(event.New("", "stderr", "Internal timeout, please retry."))
+ } else if erroredOut {
+ res.Write(event.New("", "stderr", "Internal error, please retry."))
+ } else if sizedOut {
+ res.Write(event.New("", "stderr", "Program output too large, killed."))
+ }
+
+ // Log builder internal errors, if any.
+ // TODO(ivanpi): Prevent caching? Report to client if debug requested?
+ if errBuffer.Len() > 0 {
+ log.Println(id, "builder stderr:", errBuffer.String())
+ }
+
+ event.Debug(res, "Response finished")
+
+ // If we timed out or errored out, do not cache anything.
+ // TODO(sadovsky): This policy is helpful for development, but may not be wise
+ // for production. Revisit.
+ if !timedOut && !erroredOut {
+ cache.Add(requestBodyHash, CachedResponse{
+ Status: http.StatusOK,
+ Events: res.popWrittenEvents(),
+ })
+ event.Debug(res, "Caching response")
+ } else {
+ event.Debug(res, "Internal errors encountered, not caching response")
+ }
+
+ // TODO(nlacasse): This "docker rm" can be slow (several seconds), and seems
+ // to block other Docker commands, thereby slowing down other concurrent
+ // requests. We should figure out how to make it not block other Docker
+ // commands. Setting GOMAXPROCS may or may not help.
+ // See: https://github.com/docker/docker/issues/6480
+ if *useDocker {
+ go func() {
+ docker("rm", "-f", id).Run()
+ }()
+ }
+}
+
+// Each line written to the returned writer, up to limit bytes total, is parsed
+// into an Event and written to Sink.
+// If the limit is reached or an invalid line read, the corresponding callback
+// is called and the relay stopped.
+// The returned stop() function stops the relaying.
+func limitedEventRelay(sink event.Sink, limit int, limitCallback func(), errorCallback func(err error)) (writer io.Writer, stop func()) {
+ pipeReader, pipeWriter := io.Pipe()
+ done := make(chan bool)
+ stop = lib.DoOnce(func() {
+ // Closing the pipe will cause the main relay loop to stop reading (EOF).
+ // Writes will fail with ErrClosedPipe.
+ pipeReader.Close()
+ pipeWriter.Close()
+ // Wait for the relay goroutine to finish.
+ <-done
+ })
+ writer = lib.NewLimitedWriter(pipeWriter, limit, func() {
+ limitCallback()
+ stop()
+ })
+ go func() {
+ bufr := bufio.NewReaderSize(pipeReader, limit)
+ var line []byte
+ var err error
+ // Relay complete lines (events) until EOF or a read error is encountered.
+ for line, err = bufr.ReadBytes('\n'); err == nil; line, err = bufr.ReadBytes('\n') {
+ var e event.Event
+ err = json.Unmarshal(line, &e)
+ if err != nil {
+ err = fmt.Errorf("failed unmarshalling event: %s", line)
+ break
+ }
+ sink.Write(e)
+ }
+ if err != io.EOF && err != io.ErrClosedPipe {
+ errorCallback(err)
+ // Use goroutine to prevent deadlock on done channel.
+ go stop()
+ }
+ done <- true
+ }()
+ return
+}
+
+// Initialize using newResponseEventSink.
+// An event.Sink which also saves all written Events regardless of successful
+// writes to the underlying ResponseWriter.
+type responseEventSink struct {
+ // The mutex is used to ensure the same sequence of events being written to
+ // both the JsonSink and the written Event array.
+ mu sync.Mutex
+ event.JsonSink
+ written []event.Event
+}
+
+func newResponseEventSink(writer io.Writer, filterDebug bool) *responseEventSink {
+ return &responseEventSink{
+ JsonSink: *event.NewJsonSink(writer, filterDebug),
+ }
+}
+
+func (r *responseEventSink) Write(events ...event.Event) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.written = append(r.written, events...)
+ return r.JsonSink.Write(events...)
+}
+
+// Returns and clears the history of Events written to the responseEventSink.
+func (r *responseEventSink) popWrittenEvents() []event.Event {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ events := r.written
+ r.written = nil
+ return events
+}
+
+func docker(args ...string) *exec.Cmd {
+ fullArgs := []string{"docker"}
+ fullArgs = append(fullArgs, args...)
+ return exec.Command("sudo", fullArgs...)
+}
+
+// A channel which returns unique ids for the containers.
+var uniq = make(chan string)
+
+func init() {
+ val := time.Now().UnixNano()
+ go func() {
+ for {
+ uniq <- fmt.Sprintf("playground_%d", val)
+ val++
+ }
+ }()
+}
diff --git a/compilerd/main.go b/compilerd/main.go
new file mode 100644
index 0000000..6437979
--- /dev/null
+++ b/compilerd/main.go
@@ -0,0 +1,165 @@
+package main
+
+import (
+ "bytes"
+ crand "crypto/rand"
+ "crypto/sha256"
+ "encoding/binary"
+ "flag"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "os"
+ "os/exec"
+ "os/signal"
+ "syscall"
+ "time"
+)
+
+var (
+ // This channel is closed when the server begins shutting down.
+ // No values are ever sent to it.
+ lameduck chan bool = make(chan bool)
+
+ address = flag.String("address", ":8181", "Address to listen on.")
+
+ // Note, shutdown triggers on SIGTERM or when the time limit is hit.
+ enableShutdown = flag.Bool("shutdown", true, "Whether to ever shutdown the machine.")
+
+ // Maximum request and output size. Same limit as imposed by Go tour.
+ // Note: The response includes error and status messages as well as output,
+ // so it can be larger (usually by a small constant, hard limited to
+ // 2*maxSize).
+ // maxSize should be large enough to fit all error and status messages
+ // written by compilerd to prevent reaching the hard limit.
+ maxSize = 1 << 16
+)
+
+// Seeds the non-secure random number generator.
+func seedRNG() error {
+ var seed int64
+ err := binary.Read(crand.Reader, binary.LittleEndian, &seed)
+ if err != nil {
+ return fmt.Errorf("reseed failed: %v", err)
+ }
+ rand.Seed(seed)
+ return nil
+}
+
+//// HTTP server
+
+func healthz(w http.ResponseWriter, r *http.Request) {
+ select {
+ case <-lameduck:
+ w.WriteHeader(http.StatusInternalServerError)
+ default:
+ w.Write([]byte("ok"))
+ }
+}
+
+func main() {
+ flag.Parse()
+
+ if err := seedRNG(); err != nil {
+ panic(err)
+ }
+
+ if *enableShutdown {
+ limit_min := 60
+ delay_min := limit_min/2 + rand.Intn(limit_min/2)
+
+ // VMs will be periodically killed to prevent any owned VMs from causing
+ // damage. We want to shutdown cleanly before then so we don't cause
+ // requests to fail.
+ go waitForShutdown(time.Minute * time.Duration(delay_min))
+ }
+
+ http.HandleFunc("/healthz", healthz)
+ http.HandleFunc("/compile", handlerCompile)
+
+ log.Printf("Serving %s\n", *address)
+ http.ListenAndServe(*address, nil)
+}
+
+func waitForShutdown(limit time.Duration) {
+ var beforeExit func() error
+
+ // Shutdown if we get a SIGTERM.
+ term := make(chan os.Signal, 1)
+ signal.Notify(term, syscall.SIGTERM)
+
+ // Or if the time limit expires.
+ deadline := time.After(limit)
+ log.Println("Shutting down at", time.Now().Add(limit))
+Loop:
+ for {
+ select {
+ case <-deadline:
+ // Shutdown the VM.
+ log.Println("Deadline expired, shutting down.")
+ beforeExit = exec.Command("sudo", "halt").Run
+ break Loop
+ case <-term:
+ log.Println("Got SIGTERM, shutting down.")
+ // VM is probably already shutting down, so just exit.
+ break Loop
+ }
+ }
+
+ // Fail health checks so we stop getting requests.
+ close(lameduck)
+
+ // Give running requests time to finish.
+ time.Sleep(30 * time.Second)
+
+ // Go ahead and shutdown.
+ if beforeExit != nil {
+ err := beforeExit()
+ if err != nil {
+ panic(err)
+ }
+ }
+ os.Exit(0)
+}
+
+//// HTTP request helpers
+
+// Handles CORS options and pre-flight requests.
+// Returns false iff response processing should not continue.
+func handleCORS(w http.ResponseWriter, r *http.Request) bool {
+ // CORS headers.
+ // TODO(nlacasse): Fill the origin header in with actual playground origin
+ // before going to production.
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
+ w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding")
+
+ // CORS sends an OPTIONS pre-flight request to make sure the request will be
+ // allowed.
+ if r.Method == "OPTIONS" {
+ w.WriteHeader(http.StatusOK)
+ return false
+ }
+
+ return true
+}
+
+// Checks if the POST method was used and returns the request body.
+// Returns nil iff response processing should not continue.
+func getPostBody(w http.ResponseWriter, r *http.Request) []byte {
+ if r.Body == nil || r.Method != "POST" {
+ w.WriteHeader(http.StatusBadRequest)
+ return nil
+ }
+
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(r.Body)
+ return buf.Bytes()
+}
+
+//// Shared helper functions
+
+func rawHash(data []byte) [32]byte {
+ return sha256.Sum256(data)
+}
diff --git a/compilerd/pool_template.json b/compilerd/pool_template.json
new file mode 100644
index 0000000..a8d077a
--- /dev/null
+++ b/compilerd/pool_template.json
@@ -0,0 +1,38 @@
+{
+ "template": {
+ "action": {
+ "commands": [
+ "sudo mount /dev/sdb1 /mnt",
+ "sudo docker load < /mnt/playground.tar.gz",
+ "sudo docker run playground &> /dev/null || true",
+ "start-stop-daemon --start -c ribrdb --exec /mnt/compilerd &> /tmp/compilerd.out &"
+ ]
+ },
+ "healthChecks": [{
+ "name": "healthz",
+ "path": "/healthz",
+ "port": "8181",
+ }],
+ "vmParams": {
+ "machineType": "n1-standard-1",
+ "baseInstanceName": "pg-replica",
+ "disksToCreate": [{
+ "boot": "true",
+ "initializeParams": {
+ "sourceImage": "https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140522",
+ "diskSizeGb": "200"
+ }
+ }],
+ "disksToAttach": [{
+ "source": "pg-data-20140820"
+ }],
+ "networkInterfaces": [{
+ "network": "playground",
+ "accessConfigs": [{
+ "type": "ONE_TO_ONE_NAT",
+ "name": "External NAT"
+ }]
+ }]
+ }
+ }
+}
diff --git a/compilerd/update.sh b/compilerd/update.sh
new file mode 100755
index 0000000..3ea4d61
--- /dev/null
+++ b/compilerd/update.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+# Script to rebuild and deploy compilerd and the Docker image (builder) to the
+# playground backends.
+#
+# Usage:
+# gcutil ssh --project google.com:veyron playground-master
+# sudo su - veyron
+# v23 update
+# bash $VANADIUM_ROOT/release/go/src/v.io/playground/compilerd/update.sh
+
+set -e
+set -u
+
+readonly DATE=$(date +"%Y%m%d-%H%M%S")
+readonly DISK="pg-data-${DATE}"
+
+function unmount() {
+ sudo umount /mnt
+ gcloud compute --project "google.com:veyron" instances detach-disk --disk=${DISK} $(hostname) --zone us-central1-a
+}
+
+trap cleanup INT TERM EXIT
+
+function cleanup() {
+ # Unset the trap so that it doesn't run again on exit.
+ trap - INT TERM EXIT
+ if [[ -e /mnt/compilerd ]]; then
+ # The disk is still mounted on the master, which means it's not yet mounted
+ # on any backends. It's safe to unmount and delete it.
+ unmount
+ gcloud compute --project "google.com:veyron" disks delete ${DISK} --zone "us-central1-a"
+ fi
+ sudo docker rm ${DISK} &> /dev/null || true
+}
+
+function main() {
+ if [[ ! -e ~/.gitcookies ]]; then
+ echo "Unable to access git, missing ~/.gitcookies"
+ exit 1
+ fi
+ if [[ ! -e ~/.hgrc ]]; then
+ echo "Unable to access mercurial, missing ~/.hgrc"
+ exit 1
+ fi
+
+ local ROLLING="1"
+ if [[ $# -gt 0 && ("$1" == "--no-rolling") ]]; then
+ local ROLLING="0"
+ fi
+
+ gcloud compute --project "google.com:veyron" disks create ${DISK} --size "200" --zone "us-central1-a" --source-snapshot "pg-data-20140702" --type "pd-standard"
+ gcloud compute --project "google.com:veyron" instances attach-disk --disk=${DISK} $(hostname) --zone us-central1-a
+ sudo mount /dev/sdb1 /mnt
+
+ # Build the docker image.
+ cd ${VANADIUM_ROOT}/release/go/src/v.io/playground
+ cp ~/.gitcookies ./builder/gitcookies
+ cp ~/.hgrc ./builder/hgrc
+ sudo docker build --no-cache -t playground .
+ rm -f ./builder/gitcookies
+ rm -f ./builder/hgrc
+
+ # Export the docker image to disk.
+ sudo docker save -o /mnt/playground.tar.gz playground
+
+ # TODO(sadovsky): Before deploying the new playground image, we should run it
+ # with real input and make sure it works (produces the expected output).
+
+ # Copy the compilerd binary from the docker image to the disk.
+ # NOTE(sadovsky): The purpose of the following line is to create a container
+ # out of the docker image, so that we can copy out the compilerd binary.
+ # Annoyingly, the only way to create the container is to run the image.
+ # TODO(sadovsky): Why don't we just build compilerd using "v23 go install"?
+ sudo docker run --name=${DISK} playground &> /dev/null || true
+ sudo docker cp ${DISK}:/usr/local/vanadium/release/go/bin/compilerd /tmp
+ sudo mv /tmp/compilerd /mnt/compilerd
+ sudo docker rm ${DISK}
+
+ # Detach the disk so the backends can mount it.
+ unmount
+
+ # Update the template to use the new disk.
+ cd compilerd
+ sed -i -e s/pg-data-20140820/${DISK}/ pool_template.json
+ gcloud preview replica-pools --zone=us-central1-a update-template --template=pool_template.json playground-pool
+ git checkout -- pool_template.json
+
+ # Perform a rolling restart of all the replicas.
+ INSTANCES=$(gcloud preview replica-pools --zone=us-central1-a replicas --pool=playground-pool list|grep name:|cut -d: -f2)
+ for i in ${INSTANCES}; do
+ gcloud preview replica-pools --zone=us-central1-a replicas --pool=playground-pool restart ${i}
+ if [[ "$ROLLING" == "1" ]]; then
+ sleep 5m
+ fi
+ done
+}
+
+main "$@"
diff --git a/dummy.go b/dummy.go
new file mode 100644
index 0000000..f9eb780
--- /dev/null
+++ b/dummy.go
@@ -0,0 +1,4 @@
+package playground
+
+// This empty file is needed by test.sh to find the absolute path of this
+// directory.
diff --git a/lib/event/event.go b/lib/event/event.go
new file mode 100644
index 0000000..22b996a
--- /dev/null
+++ b/lib/event/event.go
@@ -0,0 +1,37 @@
+package event
+
+import (
+ "fmt"
+ "time"
+)
+
+// Typed representation of data sent to stdin/stdout from a command. These
+// will be JSON-encoded and sent to the client.
+type Event struct {
+ // File associated with the command.
+ File string
+ // The text sent to stdin/stderr.
+ Message string
+ // Stream that the message was sent to, either "stdout" or "stderr".
+ Stream string
+ // Unix time, the number of nanoseconds elapsed since January 1, 1970 UTC.
+ Timestamp int64
+}
+
+func New(file string, stream string, message string) Event {
+ return Event{
+ File: file,
+ Message: message,
+ Stream: stream,
+ Timestamp: time.Now().UnixNano(),
+ }
+}
+
+// Stream for writing Events to.
+type Sink interface {
+ Write(events ...Event) error
+}
+
+func Debug(es Sink, args ...interface{}) {
+ es.Write(New("", "debug", fmt.Sprintln(args...)))
+}
diff --git a/lib/event/json_sink.go b/lib/event/json_sink.go
new file mode 100644
index 0000000..03c8e7a
--- /dev/null
+++ b/lib/event/json_sink.go
@@ -0,0 +1,87 @@
+// JsonSink is an Event Sink that serializes written Events to JSON and writes
+// them one per line.
+//
+// JsonSink.Write is thread-safe. The underlying io.Writer is flushed after
+// every write, if it supports flushing. Optionally filters out debug Events.
+
+package event
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Initialize using NewJsonSink.
+type JsonSink struct {
+ filterDebug bool
+
+ mu sync.Mutex
+ w io.Writer
+}
+
+var _ Sink = (*JsonSink)(nil)
+
+func NewJsonSink(writer io.Writer, filterDebug bool) *JsonSink {
+ return &JsonSink{
+ w: writer,
+ filterDebug: filterDebug,
+ }
+}
+
+func (es *JsonSink) Write(events ...Event) error {
+ if es.filterDebug {
+ events = filter(events...)
+ }
+ evJson, err := jsonize(events...)
+ if err != nil {
+ return err
+ }
+ return es.writeJson(evJson...)
+}
+
+// Filters out debug Events.
+func filter(events ...Event) []Event {
+ filtered := make([]Event, 0, len(events))
+ for _, ev := range events {
+ if ev.Stream != "debug" {
+ filtered = append(filtered, ev)
+ }
+ }
+ return filtered
+}
+
+// Converts Events to JSON.
+func jsonize(events ...Event) (evJson [][]byte, err error) {
+ evJson = make([][]byte, 0, len(events))
+ for _, ev := range events {
+ var js []byte
+ js, err = json.Marshal(&ev)
+ if err != nil {
+ return
+ }
+ evJson = append(evJson, js)
+ }
+ return
+}
+
+// Writes JSON lines and flushes output.
+func (es *JsonSink) writeJson(evJson ...[]byte) error {
+ es.mu.Lock()
+ defer es.mu.Unlock()
+ defer es.flush()
+ for _, js := range evJson {
+ _, err := es.w.Write(append(js, '\n'))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (es *JsonSink) flush() {
+ if f, ok := es.w.(http.Flusher); ok {
+ f.Flush()
+ }
+}
diff --git a/lib/event/stream_writer.go b/lib/event/stream_writer.go
new file mode 100644
index 0000000..358ec5f
--- /dev/null
+++ b/lib/event/stream_writer.go
@@ -0,0 +1,31 @@
+// Implementation of io.Writer that streams each write as an Event to the
+// wrapped Sink.
+
+package event
+
+import (
+ "io"
+)
+
+// Initialize using NewStreamWriter.
+type streamWriter struct {
+ es Sink
+ fileName string
+ streamName string
+}
+
+var _ io.Writer = (*streamWriter)(nil)
+
+func NewStreamWriter(es Sink, fileName, streamName string) *streamWriter {
+ return &streamWriter{es: es, fileName: fileName, streamName: streamName}
+}
+
+func (ew *streamWriter) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if err := ew.es.Write(New(ew.fileName, ew.streamName, string(p))); err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
diff --git a/lib/limited_writer.go b/lib/limited_writer.go
new file mode 100644
index 0000000..a93464b
--- /dev/null
+++ b/lib/limited_writer.go
@@ -0,0 +1,64 @@
+// LimitedWriter is an io.Writer wrapper that limits the total number of bytes
+// written to the underlying writer.
+//
+// All attempted writes count against the limit, regardless of whether they
+// succeed.
+// Not thread-safe.
+
+package lib
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+var ErrWriteLimitExceeded = errors.New("LimitedWriter: write limit exceeded")
+
+// Initialize using NewLimitedWriter.
+type LimitedWriter struct {
+ io.Writer
+ maxLen int
+ maxLenExceededCb func()
+ lenWritten int
+}
+
+func NewLimitedWriter(writer io.Writer, maxLen int, maxLenExceededCb func()) *LimitedWriter {
+ return &LimitedWriter{
+ Writer: writer,
+ maxLen: maxLen,
+ maxLenExceededCb: maxLenExceededCb,
+ }
+}
+
+func (t *LimitedWriter) Write(p []byte) (n int, err error) {
+ if t.lenWritten+len(p) > t.maxLen {
+ t.lenWritten = t.maxLen
+ if t.maxLenExceededCb != nil {
+ t.maxLenExceededCb()
+ }
+ return 0, ErrWriteLimitExceeded
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ t.lenWritten += len(p)
+ return t.Writer.Write(p)
+}
+
+var _ http.Flusher = (*LimitedWriter)(nil)
+
+func (t *LimitedWriter) Flush() {
+ if f, ok := t.Writer.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+// Wraps a function to prevent it from executing more than once.
+func DoOnce(f func()) func() {
+ var once sync.Once
+ return func() {
+ once.Do(f)
+ }
+}
diff --git a/lib/multi_writer.go b/lib/multi_writer.go
new file mode 100644
index 0000000..f38203e
--- /dev/null
+++ b/lib/multi_writer.go
@@ -0,0 +1,52 @@
+// MultiWriter creates a writer that duplicates its writes to all the
+// provided writers, similar to the Unix tee(1) command.
+//
+// Similar to http://golang.org/src/pkg/io/multi.go.
+
+package lib
+
+import (
+ "io"
+ "sync"
+)
+
+// Initialize using NewMultiWriter.
+type MultiWriter struct {
+ writers []io.Writer
+ mu sync.Mutex
+ wrote bool
+}
+
+var _ io.Writer = (*MultiWriter)(nil)
+
+func NewMultiWriter() *MultiWriter {
+ return &MultiWriter{writers: []io.Writer{}}
+}
+
+// Returns self for convenience.
+func (t *MultiWriter) Add(w io.Writer) *MultiWriter {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.wrote {
+ panic("Tried to add writer after data has been written.")
+ }
+ t.writers = append(t.writers, w)
+ return t
+}
+
+func (t *MultiWriter) Write(p []byte) (n int, err error) {
+ t.mu.Lock()
+ t.wrote = true
+ t.mu.Unlock()
+ for _, w := range t.writers {
+ n, err = w.Write(p)
+ if err != nil {
+ return
+ }
+ if n != len(p) {
+ err = io.ErrShortWrite
+ return
+ }
+ }
+ return len(p), nil
+}
diff --git a/lib/pg_test_util.sh b/lib/pg_test_util.sh
new file mode 100755
index 0000000..42a19a3
--- /dev/null
+++ b/lib/pg_test_util.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+# Utilities for testing the playground builder tool.
+# Used by tests in v.io/playground and veyron-www.
+
+source "$(go list -f {{.Dir}} v.io/core/shell/lib)/shell_test.sh"
+
+# Sets up environment variables required for the tests.
+setup_environment() {
+ export GOPATH="$(pwd):$(v23 env GOPATH)"
+ export VDLPATH="$(pwd):$(v23 env VDLPATH)"
+ export PATH="$(pwd):${shell_test_BIN_DIR}:${VANADIUM_ROOT}/environment/cout/node/bin:${PATH}"
+
+ # We unset all environment variables that supply a principal in order to
+ # simulate production playground setup.
+ unset VEYRON_CREDENTIALS
+ unset VEYRON_AGENT_FD
+}
+
+# Installs the release/javascript/core library and makes it accessible to
+# Javascript files in the Vanadium playground test under the module name
+# 'veyron'.
+install_vanadium_js() {
+ # TODO(nlacasse): Once release/javascript/core is publicly available in npm, replace this
+ # with "npm install vanadium".
+
+ pushd "${VANADIUM_ROOT}/release/javascript/vom"
+ npm link
+ popd
+ pushd "${VANADIUM_ROOT}/release/javascript/core"
+ npm link vom
+ npm link
+ popd
+ npm link veyron
+}
+
+# Installs the pgbundle tool.
+install_pgbundle() {
+ pushd "${VANADIUM_ROOT}/release/javascript/pgbundle"
+ npm link
+ popd
+ npm link pgbundle
+}
+
+# Installs various go binaries.
+build_go_binaries() {
+ shell_test::build_go_binary 'v.io/core/veyron/tools/principal'
+ shell_test::build_go_binary 'v.io/core/veyron/services/proxy/proxyd'
+ shell_test::build_go_binary 'v.io/core/veyron/services/mounttable/mounttabled'
+ shell_test::build_go_binary 'v.io/core/veyron2/vdl/vdl'
+ shell_test::build_go_binary 'v.io/playground/builder'
+ shell_test::build_go_binary 'v.io/wspr/veyron/services/wsprd'
+}
+
+# Bundles a playground example and tests it using builder.
+# $1: root directory of example to test
+# $2: arguments to call builder with
+test_pg_example() {
+ local -r PGBUNDLE_DIR="$1"
+ local -r BUILDER_ARGS="$2"
+
+ ./node_modules/.bin/pgbundle "${PGBUNDLE_DIR}"
+
+ # Create a fresh dir to run builder in.
+ local -r ORIG_DIR=$(pwd)
+ pushd $(shell::tmp_dir)
+ ln -s "${ORIG_DIR}/node_modules" ./ # for release/javascript/core
+ "${shell_test_BIN_DIR}/builder" ${BUILDER_ARGS} < "${PGBUNDLE_DIR}/bundle.json" 2>&1 | tee builder.out
+ # Move builder output to original dir for verification.
+ mv builder.out "${ORIG_DIR}"
+ popd
+}
diff --git a/monitor.py b/monitor.py
new file mode 100755
index 0000000..ce7b86b
--- /dev/null
+++ b/monitor.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python2.7
+
+"""Playground GCE monitoring script.
+
+This needs to run on a GCE VM with the replica pool service account scope
+(https://www.googleapis.com/auth/ndev.cloudman).
+
+You also need to enable preview in gcloud:
+$ gcloud components update preview
+
+Then add it to your crontab, e.g.
+*/10 * * * * gcloud preview replica-pools --zone us-central1-a replicas --pool playground-pool list|monitor.py
+"""
+
+import datetime
+import subprocess
+import sys
+import yaml
+
+DESIRED = 2
+MAX_ALIVE_MIN = 60
+POOL = 'playground-pool'
+
+
+def RunCommand(*args):
+ cmd = ['gcloud', 'preview', 'replica-pools', '--zone', 'us-central1-a']
+ cmd.extend(args)
+ subprocess.check_call(cmd)
+
+
+def ResizePool(size):
+ RunCommand('resize', '--new-size', str(size), POOL)
+
+
+def ShouldRestart(replica):
+ if replica['status']['state'] == 'PERMANENTLY_FAILING':
+ print 'Replica %s failed: %s' % (
+ replica['name'], replica['status']['details'])
+ return True
+ return IsTooOld(replica)
+
+
+def IsTooOld(replica):
+ start_text = replica['status']['vmStartTime']
+ if start_text:
+ start = yaml.load(start_text)
+ uptime = datetime.datetime.now() - start
+ return uptime.seconds > MAX_ALIVE_MIN * 60
+
+
+def RestartReplica(replica):
+ print 'Restarting replica ' + replica['name']
+ ResizePool(DESIRED + 1)
+ RunCommand('replicas', '--pool', POOL, 'delete', replica['name'])
+
+
+def MaybeRestartReplica(replica):
+ if ShouldRestart(replica):
+ RestartReplica(replica)
+
+
+def main():
+ replicas = yaml.load_all(sys.stdin.read())
+ for replica in replicas:
+ MaybeRestartReplica(replica)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test.sh b/test.sh
new file mode 100755
index 0000000..ace87a2
--- /dev/null
+++ b/test.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+# Tests the playground builder tool.
+
+source "$(go list -f {{.Dir}} v.io/core/shell/lib)/shell_test.sh"
+source "$(go list -f {{.Dir}} v.io/playground)/lib/pg_test_util.sh"
+
+# Sets up a directory with the given files, then runs builder.
+test_with_files() {
+ local -r TESTDATA_DIR="$(go list -f {{.Dir}} v.io/playground)/testdata"
+
+ # Write input files to a fresh dir before bundling and running them.
+ local -r PGBUNDLE_DIR=$(shell::tmp_dir)
+ for f in $@; do
+ fdir="${PGBUNDLE_DIR}/$(dirname ${f})"
+ mkdir -p "${fdir}"
+ cp "${TESTDATA_DIR}/${f}" "${fdir}/"
+ done
+
+ test_pg_example "${PGBUNDLE_DIR}" "-v=false --includeV23Env=true"
+}
+
+main() {
+ cd "${shell_test_WORK_DIR}"
+
+ setup_environment
+
+ build_go_binaries
+ install_vanadium_js
+ install_pgbundle
+
+ echo -e "\n\n>>>>> Test as the same principal\n\n"
+
+ test_with_files "src/pingpong/wire.vdl" "src/pong/pong.go" "src/ping/ping.go" || shell_test::fail "line ${LINENO}: basic ping (go -> go)"
+ grep -q PING builder.out || shell_test::fail "line ${LINENO}: no PING"
+ grep -q PONG builder.out || shell_test::fail "line ${LINENO}: no PONG"
+
+ test_with_files "src/pong/pong.js" "src/ping/ping.js" || shell_test::fail "line ${LINENO}: basic ping (js -> js)"
+ grep -q PING builder.out || shell_test::fail "line ${LINENO}: no PING"
+ grep -q PONG builder.out || shell_test::fail "line ${LINENO}: no PONG"
+
+ test_with_files "src/pong/pong.go" "src/ping/ping.js" "src/pingpong/wire.vdl" || shell_test::fail "line ${LINENO}: basic ping (js -> go)"
+ grep -q PING builder.out || shell_test::fail "line ${LINENO}: no PING"
+ grep -q PONG builder.out || shell_test::fail "line ${LINENO}: no PONG"
+
+ test_with_files "src/pong/pong.js" "src/ping/ping.go" "src/pingpong/wire.vdl" || shell_test::fail "line ${LINENO}: basic ping (go -> js)"
+ grep -q PING builder.out || shell_test::fail "line ${LINENO}: no PING"
+ grep -q PONG builder.out || shell_test::fail "line ${LINENO}: no PONG"
+
+ echo -e "\n\n>>>>> Test with authorized blessings\n\n"
+
+ test_with_files "src/pong/pong.go" "src/ping/ping.go" "src/pingpong/wire.vdl" "src/ids/authorized.id" || shell_test::fail "line ${LINENO}: authorized id (go -> go)"
+ grep -q PING builder.out || shell_test::fail "line ${LINENO}: no PING"
+ grep -q PONG builder.out || shell_test::fail "line ${LINENO}: no PONG"
+
+ test_with_files "src/pong/pong.js" "src/ping/ping.js" "src/ids/authorized.id" || shell_test::fail "line ${LINENO}: authorized id (js -> js)"
+ grep -q PING builder.out || shell_test::fail "line ${LINENO}: no PING"
+ grep -q PONG builder.out || shell_test::fail "line ${LINENO}: no PONG"
+
+ echo -e "\n\n>>>>> Test with expired blessings\n\n"
+
+ test_with_files "src/pong/pong.go" "src/ping/ping.go" "src/pingpong/wire.vdl" "src/ids/expired.id" || shell_test::fail "line ${LINENO}: expired id (go -> go)"
+ grep -q "not authorized" builder.out || shell_test::fail "line ${LINENO}: rpc with expired id succeeded (go -> go)"
+
+ test_with_files "src/pong/pong.js" "src/ping/ping.js" "src/ids/expired.id" || shell_test::fail "line ${LINENO}: expired id (js -> js)"
+ grep -q "not authorized" builder.out || shell_test::fail "line ${LINENO}: rpc with expired id succeeded (js -> js)"
+
+ echo -e "\n\n>>>>> Test with unauthorized blessings\n\n"
+
+ test_with_files "src/pong/pong.go" "src/ping/ping.go" "src/pingpong/wire.vdl" "src/ids/unauthorized.id" || shell_test::fail "line ${LINENO}: unauthorized id (go -> go)"
+ grep -q "not authorized" builder.out || shell_test::fail "line ${LINENO}: rpc with unauthorized id succeeded (go -> go)"
+
+ test_with_files "src/pong/pong.js" "src/ping/ping.js" "src/ids/unauthorized.id" || shell_test::fail "line ${LINENO}: unauthorized id (js -> js)"
+ grep -q "not authorized" builder.out || shell_test::fail "line ${LINENO}: rpc with unauthorized id succeeded (js -> js)"
+
+ shell_test::pass
+}
+
+main "$@"
diff --git a/testdata/src/ids/authorized.id b/testdata/src/ids/authorized.id
new file mode 100644
index 0000000..2df5321
--- /dev/null
+++ b/testdata/src/ids/authorized.id
@@ -0,0 +1,11 @@
+[
+{
+ "Name": "myserver",
+ "Files": ["pong.go", "pong.js"]
+},
+{
+ "Name": "myclient",
+ "Blesser": "myserver",
+ "Files": ["ping.go", "ping.js"]
+}
+]
diff --git a/testdata/src/ids/expired.id b/testdata/src/ids/expired.id
new file mode 100644
index 0000000..446851f
--- /dev/null
+++ b/testdata/src/ids/expired.id
@@ -0,0 +1,12 @@
+[
+{
+ "Name": "myserver",
+ "Files": ["pong.go", "pong.js"]
+},
+{
+ "Name": "myclient",
+ "Blesser": "myserver",
+ "Duration": "0s",
+ "Files": ["ping.go", "ping.js"]
+}
+]
diff --git a/testdata/src/ids/unauthorized.id b/testdata/src/ids/unauthorized.id
new file mode 100644
index 0000000..cd39f52
--- /dev/null
+++ b/testdata/src/ids/unauthorized.id
@@ -0,0 +1,15 @@
+[
+{
+ "Name": "myorg"
+},
+{
+ "Name": "myserver",
+ "Blesser": "myorg",
+ "Files": ["pong.go", "pong.js"]
+},
+{
+ "Name": "myclient",
+ "Blesser": "myorg",
+ "Files": ["ping.go", "ping.js"]
+}
+]
diff --git a/testdata/src/ping/ping.go b/testdata/src/ping/ping.go
new file mode 100644
index 0000000..368516d
--- /dev/null
+++ b/testdata/src/ping/ping.go
@@ -0,0 +1,24 @@
+// +build OMIT
+package main
+
+import (
+ "fmt"
+
+ _ "v.io/core/veyron/profiles"
+ "v.io/core/veyron2"
+
+ "pingpong"
+)
+
+func main() {
+ ctx, shutdown := veyron2.Init()
+ defer shutdown()
+ log := veyron2.GetLogger(ctx)
+
+ s := pingpong.PingPongClient("pingpong")
+ pong, err := s.Ping(ctx, "PING")
+ if err != nil {
+ log.Fatal("error pinging: ", err)
+ }
+ fmt.Println(pong)
+}
diff --git a/testdata/src/ping/ping.js b/testdata/src/ping/ping.js
new file mode 100644
index 0000000..6b8129e
--- /dev/null
+++ b/testdata/src/ping/ping.js
@@ -0,0 +1,19 @@
+var veyron = require('veyron');
+var context = veyron.context;
+
+veyron.init(function(err, rt) {
+ if (err) throw err;
+
+ var ctx = new context.Context();
+
+ rt.bindTo(ctx, 'pingpong', function(err, s) {
+ if (err) throw err;
+
+ s.ping(ctx, 'PING', function(err, pong) {
+ if (err) throw err;
+
+ console.log(pong);
+ process.exit(0);
+ });
+ });
+});
diff --git a/testdata/src/pingpong/wire.vdl b/testdata/src/pingpong/wire.vdl
new file mode 100644
index 0000000..dba0c57
--- /dev/null
+++ b/testdata/src/pingpong/wire.vdl
@@ -0,0 +1,5 @@
+package pingpong
+
+type PingPong interface {
+ Ping(message string) (string | error)
+}
diff --git a/testdata/src/pingpong/wire.vdl.go b/testdata/src/pingpong/wire.vdl.go
new file mode 100644
index 0000000..ee9612e
--- /dev/null
+++ b/testdata/src/pingpong/wire.vdl.go
@@ -0,0 +1,131 @@
+// This file was auto-generated by the veyron vdl tool.
+// Source: wire.vdl
+
+package pingpong
+
+import (
+ // The non-user imports are prefixed with "__" to prevent collisions.
+ __veyron2 "v.io/core/veyron2"
+ __context "v.io/core/veyron2/context"
+ __ipc "v.io/core/veyron2/ipc"
+)
+
+// PingPongClientMethods is the client interface
+// containing PingPong methods.
+type PingPongClientMethods interface {
+ Ping(ctx *__context.T, message string, opts ...__ipc.CallOpt) (string, error)
+}
+
+// PingPongClientStub adds universal methods to PingPongClientMethods.
+type PingPongClientStub interface {
+ PingPongClientMethods
+ __ipc.UniversalServiceMethods
+}
+
+// PingPongClient returns a client stub for PingPong.
+func PingPongClient(name string, opts ...__ipc.BindOpt) PingPongClientStub {
+ var client __ipc.Client
+ for _, opt := range opts {
+ if clientOpt, ok := opt.(__ipc.Client); ok {
+ client = clientOpt
+ }
+ }
+ return implPingPongClientStub{name, client}
+}
+
+type implPingPongClientStub struct {
+ name string
+ client __ipc.Client
+}
+
+func (c implPingPongClientStub) c(ctx *__context.T) __ipc.Client {
+ if c.client != nil {
+ return c.client
+ }
+ return __veyron2.GetClient(ctx)
+}
+
+func (c implPingPongClientStub) Ping(ctx *__context.T, i0 string, opts ...__ipc.CallOpt) (o0 string, err error) {
+ var call __ipc.Call
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "Ping", []interface{}{i0}, opts...); err != nil {
+ return
+ }
+ if ierr := call.Finish(&o0, &err); ierr != nil {
+ err = ierr
+ }
+ return
+}
+
+// PingPongServerMethods is the interface a server writer
+// implements for PingPong.
+type PingPongServerMethods interface {
+ Ping(ctx __ipc.ServerContext, message string) (string, error)
+}
+
+// PingPongServerStubMethods is the server interface containing
+// PingPong methods, as expected by ipc.Server.
+// There is no difference between this interface and PingPongServerMethods
+// since there are no streaming methods.
+type PingPongServerStubMethods PingPongServerMethods
+
+// PingPongServerStub adds universal methods to PingPongServerStubMethods.
+type PingPongServerStub interface {
+ PingPongServerStubMethods
+ // Describe the PingPong interfaces.
+ Describe__() []__ipc.InterfaceDesc
+}
+
+// PingPongServer returns a server stub for PingPong.
+// It converts an implementation of PingPongServerMethods into
+// an object that may be used by ipc.Server.
+func PingPongServer(impl PingPongServerMethods) PingPongServerStub {
+ stub := implPingPongServerStub{
+ impl: impl,
+ }
+ // Initialize GlobState; always check the stub itself first, to handle the
+ // case where the user has the Glob method defined in their VDL source.
+ if gs := __ipc.NewGlobState(stub); gs != nil {
+ stub.gs = gs
+ } else if gs := __ipc.NewGlobState(impl); gs != nil {
+ stub.gs = gs
+ }
+ return stub
+}
+
+type implPingPongServerStub struct {
+ impl PingPongServerMethods
+ gs *__ipc.GlobState
+}
+
+func (s implPingPongServerStub) Ping(ctx __ipc.ServerContext, i0 string) (string, error) {
+ return s.impl.Ping(ctx, i0)
+}
+
+func (s implPingPongServerStub) Globber() *__ipc.GlobState {
+ return s.gs
+}
+
+func (s implPingPongServerStub) Describe__() []__ipc.InterfaceDesc {
+ return []__ipc.InterfaceDesc{PingPongDesc}
+}
+
+// PingPongDesc describes the PingPong interface.
+var PingPongDesc __ipc.InterfaceDesc = descPingPong
+
+// descPingPong hides the desc to keep godoc clean.
+var descPingPong = __ipc.InterfaceDesc{
+ Name: "PingPong",
+ PkgPath: "v.io/playground/testdata/src/pingpong",
+ Methods: []__ipc.MethodDesc{
+ {
+ Name: "Ping",
+ InArgs: []__ipc.ArgDesc{
+ {"message", ``}, // string
+ },
+ OutArgs: []__ipc.ArgDesc{
+ {"", ``}, // string
+ {"", ``}, // error
+ },
+ },
+ },
+}
diff --git a/testdata/src/pong/pong.go b/testdata/src/pong/pong.go
new file mode 100644
index 0000000..6336f7f
--- /dev/null
+++ b/testdata/src/pong/pong.go
@@ -0,0 +1,48 @@
+// +build OMIT
+package main
+
+import (
+ "fmt"
+
+ "v.io/core/veyron/lib/signals"
+ "v.io/core/veyron/profiles"
+ "v.io/core/veyron2"
+ "v.io/core/veyron2/ipc"
+
+ "pingpong"
+)
+
+type pongd struct{}
+
+func (f *pongd) Ping(ctx ipc.ServerContext, message string) (result string, err error) {
+ remote := ctx.RemoteBlessings().ForContext(ctx)
+ fmt.Printf("%v: %q\n", remote, message)
+ return "PONG", nil
+}
+
+func main() {
+ ctx, shutdown := veyron2.Init()
+ defer shutdown()
+ log := veyron2.GetLogger(ctx)
+
+ s, err := veyron2.NewServer(ctx)
+ if err != nil {
+ log.Fatal("failure creating server: ", err)
+ }
+ log.Info("Waiting for ping")
+
+ serverPong := pingpong.PingPongServer(&pongd{})
+
+ if endpoint, err := s.Listen(profiles.LocalListenSpec); err == nil {
+ fmt.Printf("Listening at: %v\n", endpoint)
+ } else {
+ log.Fatal("error listening to service: ", err)
+ }
+
+ if err := s.Serve("pingpong", serverPong, nil); err != nil {
+ log.Fatal("error serving service: ", err)
+ }
+
+ // Wait forever.
+ <-signals.ShutdownOnSignals(ctx)
+}
diff --git a/testdata/src/pong/pong.js b/testdata/src/pong/pong.js
new file mode 100644
index 0000000..5fa5ce6
--- /dev/null
+++ b/testdata/src/pong/pong.js
@@ -0,0 +1,16 @@
+var veyron = require('veyron');
+
+var pingPongService = {
+ ping: function(ctx, msg){
+ console.log('['+ctx.remoteBlessingStrings+'] '+msg);
+ return 'PONG';
+ }
+};
+
+veyron.init(function(err, rt) {
+ if (err) throw err;
+
+ rt.serve('pingpong', pingPongService, function(err) {
+ if (err) throw err;
+ });
+});