TBR
v: renaming the v directory to go
Change-Id: I4fd9f6ee2895d8034c23b65927eb118980b3c17a
diff --git a/services/mgmt/node/impl/counter.go b/services/mgmt/node/impl/counter.go
new file mode 100644
index 0000000..e70f4bf
--- /dev/null
+++ b/services/mgmt/node/impl/counter.go
@@ -0,0 +1,12 @@
+package impl
+
+import "strconv"
+
+// counter is a closure used for generating unique identifiers.
+func counter() func() string {
+ var n int = 0
+ return func() string {
+ n++
+ return strconv.Itoa(n)
+ }
+}
diff --git a/services/mgmt/node/impl/dispatcher.go b/services/mgmt/node/impl/dispatcher.go
new file mode 100644
index 0000000..53c606b
--- /dev/null
+++ b/services/mgmt/node/impl/dispatcher.go
@@ -0,0 +1,29 @@
+package impl
+
+import (
+ "veyron2/ipc"
+ "veyron2/security"
+ "veyron2/services/mgmt/application"
+ "veyron2/services/mgmt/node"
+)
+
+// dispatcher holds the state of the node manager dispatcher.
+type dispatcher struct {
+ envelope *application.Envelope
+ origin string
+}
+
+// NewDispatcher is the dispatcher factory.
+func NewDispatcher(envelope *application.Envelope, origin string) *dispatcher {
+ return &dispatcher{
+ envelope: envelope,
+ origin: origin,
+ }
+}
+
+// DISPATCHER INTERFACE IMPLEMENTATION
+
+func (d *dispatcher) Lookup(suffix string) (ipc.Invoker, security.Authorizer, error) {
+ invoker := ipc.ReflectInvoker(node.NewServerNode(NewInvoker(d.envelope, d.origin, suffix)))
+ return invoker, nil, nil
+}
diff --git a/services/mgmt/node/impl/impl_test.go b/services/mgmt/node/impl/impl_test.go
new file mode 100644
index 0000000..c8ebeb8
--- /dev/null
+++ b/services/mgmt/node/impl/impl_test.go
@@ -0,0 +1,288 @@
+package impl_test
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "testing"
+
+ "veyron/lib/signals"
+ "veyron/lib/testutil"
+ "veyron/lib/testutil/blackbox"
+ "veyron/services/mgmt/node/impl"
+ mtlib "veyron/services/mounttable/lib"
+
+ "veyron2"
+ "veyron2/ipc"
+ "veyron2/naming"
+ "veyron2/rt"
+ "veyron2/services/mgmt/application"
+ "veyron2/services/mgmt/content"
+ "veyron2/services/mgmt/node"
+ "veyron2/vlog"
+)
+
+var (
+ errOperationFailed = errors.New("operation failed")
+)
+
+type arInvoker struct {
+ envelope *application.Envelope
+}
+
+func (i *arInvoker) Match(_ ipc.Context, _ []string) (application.Envelope, error) {
+ vlog.VI(1).Infof("Match()")
+ return *i.envelope, nil
+}
+
+const bufferLength = 1024
+
+type cmInvoker struct{}
+
+func (i *cmInvoker) Delete(_ ipc.Context) error {
+ return nil
+}
+
+func (i *cmInvoker) Download(_ ipc.Context, stream content.ContentServiceDownloadStream) error {
+ vlog.VI(1).Infof("Download()")
+ file, err := os.Open(os.Args[0])
+ if err != nil {
+ vlog.Errorf("Open() failed: %v", err)
+ return errOperationFailed
+ }
+ defer file.Close()
+ buffer := make([]byte, bufferLength)
+ for {
+ n, err := file.Read(buffer)
+ if err != nil && err != io.EOF {
+ vlog.Errorf("Read() failed: %v", err)
+ return errOperationFailed
+ }
+ if n == 0 {
+ break
+ }
+ if err := stream.Send(buffer[:n]); err != nil {
+ vlog.Errorf("Send() failed: %v", err)
+ return errOperationFailed
+ }
+ }
+ return nil
+}
+
+func (i *cmInvoker) Upload(_ ipc.Context, _ content.ContentServiceUploadStream) (string, error) {
+ return "", nil
+}
+
+func init() {
+ blackbox.CommandTable["nodeManager"] = nodeManager
+}
+
+func getProcessID(t *testing.T, child *blackbox.Child) int {
+ line, err := child.ReadLineFromChild()
+ if err != nil {
+ child.Cleanup()
+ t.Fatalf("ReadLineFromChild() failed: %v", err)
+ }
+ pid, err := strconv.Atoi(line)
+ if err != nil {
+ t.Fatalf("Atoi(%v) failed: %v", line, err)
+ }
+ return pid
+}
+
+func invokeUpdate(t *testing.T, nmAddress string) {
+ address := naming.JoinAddressName(nmAddress, "nm")
+ nmClient, err := node.BindNode(address)
+ if err != nil {
+ t.Fatalf("BindNode(%v) failed: %v", address, err)
+ }
+ if err := nmClient.Update(); err != nil {
+ t.Fatalf("%v.Update() failed: %v", address, err)
+ }
+}
+
+// nodeManager is an enclosure for the node manager blackbox process.
+func nodeManager(argv []string) {
+ origin := argv[0]
+ runtime := rt.Init()
+ defer runtime.Shutdown()
+
+ _, nmCleanup := startNodeManager(runtime, origin)
+ defer nmCleanup()
+ // Wait until shutdown.
+ <-signals.ShutdownOnSignals()
+ blackbox.WaitForEOFOnStdin()
+}
+
+func spawnNodeManager(t *testing.T, arAddress, mtAddress string, idFile string) *blackbox.Child {
+ child := blackbox.HelperCommand(t, "nodeManager", arAddress)
+ child.Cmd.Env = append(child.Cmd.Env, fmt.Sprintf("MOUNTTABLE_ROOT=%v", mtAddress), fmt.Sprintf("VEYRON_IDENTITY=%v", idFile))
+ if err := child.Cmd.Start(); err != nil {
+ t.Fatalf("Start() failed: %v", err)
+ }
+ return child
+}
+
+func startApplicationRepository(t *testing.T, runtime veyron2.Runtime, cmAddress string, envelope *application.Envelope) (string, func()) {
+ server, err := runtime.NewServer()
+ if err != nil {
+ t.Fatalf("NewServer() failed: %v", err)
+ }
+ suffix, dispatcher := "ar", ipc.SoloDispatcher(application.NewServerRepository(&arInvoker{envelope: envelope}), nil)
+ if err := server.Register(suffix, dispatcher); err != nil {
+ t.Fatalf("Register(%v, %v) failed: %v", suffix, dispatcher, err)
+ }
+ protocol, hostname := "tcp", "localhost:0"
+ endpoint, err := server.Listen(protocol, hostname)
+ if err != nil {
+ t.Fatalf("Listen(%v, %v) failed: %v", protocol, hostname, err)
+ }
+ address := naming.JoinAddressName(endpoint.String(), suffix)
+ vlog.VI(1).Infof("Application repository running at endpoint: %s", address)
+ return address, func() {
+ if err := server.Stop(); err != nil {
+ t.Fatalf("Stop() failed: %v", err)
+ }
+ }
+}
+
+func startContentManager(t *testing.T, runtime veyron2.Runtime) (string, func()) {
+ server, err := runtime.NewServer()
+ if err != nil {
+ t.Fatalf("NewServer() failed: %v", err)
+ }
+ suffix, dispatcher := "cm", ipc.SoloDispatcher(content.NewServerContent(&cmInvoker{}), nil)
+ if err := server.Register(suffix, dispatcher); err != nil {
+ t.Fatalf("Register(%v, %v) failed: %v", suffix, dispatcher, err)
+ }
+ protocol, hostname := "tcp", "localhost:0"
+ endpoint, err := server.Listen(protocol, hostname)
+ if err != nil {
+ t.Fatalf("Listen(%v, %v) failed: %v", protocol, hostname, err)
+ }
+ address := naming.JoinAddressName(endpoint.String(), suffix)
+ vlog.VI(1).Infof("Content manager running at endpoint: %s", address)
+ return address, func() {
+ if err := server.Stop(); err != nil {
+ t.Fatalf("Stop() failed: %v", err)
+ }
+ }
+}
+
+func startMountTable(t *testing.T, runtime veyron2.Runtime) (string, func()) {
+ server, err := runtime.NewServer()
+ if err != nil {
+ t.Fatalf("NewServer() failed: %v", err)
+ }
+ suffix, dispatcher := "mt", mtlib.NewMountTable()
+ if err := server.Register(suffix, dispatcher); err != nil {
+ t.Fatalf("Register(%v, %v) failed: %v", suffix, dispatcher, err)
+ }
+ protocol, hostname := "tcp", "localhost:0"
+ endpoint, err := server.Listen(protocol, hostname)
+ if err != nil {
+ t.Fatalf("Listen(%v, %v) failed: %v", protocol, hostname, err)
+ }
+ address := naming.JoinAddressName(endpoint.String(), suffix)
+ vlog.VI(1).Infof("Mount table running at endpoint: %s", address)
+ return address, func() {
+ if err := server.Stop(); err != nil {
+ t.Fatalf("Stop() failed: %v", err)
+ }
+ }
+}
+
+func startNodeManager(runtime veyron2.Runtime, origin string) (string, func()) {
+ server, err := runtime.NewServer()
+ if err != nil {
+ vlog.Fatalf("NewServer() failed: %v", err)
+ }
+ protocol, hostname := "tcp", "localhost:0"
+ endpoint, err := server.Listen(protocol, hostname)
+ if err != nil {
+ vlog.Fatalf("Listen(%v, %v) failed: %v", protocol, hostname, err)
+ }
+ suffix, dispatcher := "", impl.NewDispatcher(&application.Envelope{}, origin)
+ if err := server.Register(suffix, dispatcher); err != nil {
+ vlog.Fatalf("Register(%v, %v) failed: %v", suffix, dispatcher, err)
+ }
+ address := naming.JoinAddressName(endpoint.String(), suffix)
+ vlog.VI(1).Infof("Node manager running at endpoint: %q", address)
+ name := "nm"
+ if err := server.Publish(name); err != nil {
+ vlog.Fatalf("Publish(%v) failed: %v", name, err)
+ }
+ fmt.Printf("%d\n", os.Getpid())
+ return address, func() {
+ if err := server.Stop(); err != nil {
+ vlog.Fatalf("Stop() failed: %v", err)
+ }
+ }
+}
+
+func TestHelperProcess(t *testing.T) {
+ blackbox.HelperProcess(t)
+}
+
+func TestUpdate(t *testing.T) {
+ // Set up a mount table, a content manager, and an application repository.
+ runtime := rt.Init()
+ defer runtime.Shutdown()
+ mtAddress, mtCleanup := startMountTable(t, runtime)
+ defer mtCleanup()
+ mt := runtime.MountTable()
+ cmAddress, cmCleanup := startContentManager(t, runtime)
+ defer cmCleanup()
+ envelope := application.Envelope{}
+ arAddress, arCleanup := startApplicationRepository(t, runtime, cmAddress, &envelope)
+ defer arCleanup()
+
+ // Spawn a node manager with an identity blessed by the mounttable's identity.
+ // under the name "test", and obtain its endpoint.
+ // TODO(ataly): Eventually we want to use the same identity the node manager
+ // would have if it was running in production.
+
+ idFile := testutil.SaveIdentityToFile(testutil.NewBlessedIdentity(runtime.Identity(), "test"))
+ defer os.Remove(idFile)
+ child := spawnNodeManager(t, arAddress, mtAddress, idFile)
+ defer child.Cleanup()
+ _ = getProcessID(t, child) // sync with the child
+ envelope.Args = child.Cmd.Args[1:]
+ envelope.Env = child.Cmd.Env
+ envelope.Binary = cmAddress
+
+ name := naming.Join(mtAddress, "nm")
+ results, err := mt.Resolve(name)
+ if err != nil {
+ t.Fatalf("Resolve(%v) failed: %v", name, err)
+ }
+ if expected, got := 1, len(results); expected != got {
+ t.Fatalf("Unexpected number of results: expected %d, got %d", expected, got)
+ }
+ nmAddress := results[0]
+ vlog.VI(1).Infof("Node manager running at endpoint: %q -> %s", name, nmAddress)
+
+ // Invoke the Update method and check that another instance of the
+ // node manager binary has been started.
+ invokeUpdate(t, nmAddress)
+ pid := getProcessID(t, child)
+
+ if results, err := mt.Resolve(name); err != nil {
+ t.Fatalf("Resolve(%v) failed: %v", name, err)
+ } else {
+ if expected, got := 2, len(results); expected != got {
+ t.Fatalf("Unexpected number of results: expected %d, got %d", expected, got)
+ }
+ }
+
+ // Terminate the node manager binaries.
+ process, err := os.FindProcess(pid)
+ if err != nil {
+ t.Fatalf("FindProcess(%v) failed: %v", pid, err)
+ }
+ if err := process.Kill(); err != nil {
+ t.Fatalf("Kill() failed: %v", err)
+ }
+}
diff --git a/services/mgmt/node/impl/invoker.go b/services/mgmt/node/impl/invoker.go
new file mode 100644
index 0000000..132146f
--- /dev/null
+++ b/services/mgmt/node/impl/invoker.go
@@ -0,0 +1,449 @@
+package impl
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+ "syscall"
+
+ vexec "veyron/runtimes/google/lib/exec"
+ ibuild "veyron/services/mgmt/build"
+ "veyron/services/mgmt/profile"
+ "veyron2/ipc"
+ "veyron2/services/mgmt/application"
+ "veyron2/services/mgmt/build"
+ "veyron2/services/mgmt/content"
+ "veyron2/services/mgmt/node"
+ "veyron2/vlog"
+)
+
+var updateSuffix = regexp.MustCompile(`^apps\/.*$`)
+
+// invoker holds the state of a node manager invocation.
+type invoker struct {
+ // envelope is the node manager application envelope.
+ envelope *application.Envelope
+ // origin is a veyron name that resolves to the node manager
+ // envelope.
+ origin string
+ // suffix is the suffix of the current invocation that is assumed to
+ // be used as a relative veyron name to identify an application,
+ // installation, or instance.
+ suffix string
+}
+
+var (
+ errInvalidSuffix = errors.New("invalid suffix")
+ errOperationFailed = errors.New("operation failed")
+)
+
+// NewInvoker is the invoker factory.
+func NewInvoker(envelope *application.Envelope, origin, suffix string) *invoker {
+ return &invoker{
+ envelope: envelope,
+ origin: origin,
+ suffix: suffix,
+ }
+}
+
+// NODE INTERFACE IMPLEMENTATION
+
+// computeNodeProfile generates a description of the runtime
+// environment (supported file format, OS, architecture, libraries) of
+// the host node.
+//
+// TODO(jsimsa): Avoid computing the host node description from
+// scratch if a recent cached copy exists.
+func (i *invoker) computeNodeProfile() (*profile.Specification, error) {
+ result := profile.Specification{Format: profile.Format{Attributes: make(map[string]string)}}
+
+ // Find out what the supported file format, operating system, and
+ // architecture is.
+ switch runtime.GOOS {
+ case "linux":
+ result.Format.Name = ibuild.ELF.String()
+ result.Format.Attributes["os"] = ibuild.LINUX.String()
+ case "darwin":
+ result.Format.Name = ibuild.MACH.String()
+ result.Format.Attributes["os"] = ibuild.DARWIN.String()
+ case "windows":
+ result.Format.Name = ibuild.PE.String()
+ result.Format.Attributes["os"] = ibuild.WINDOWS.String()
+ default:
+ return nil, errors.New("Unsupported operating system: " + runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64":
+ result.Format.Attributes["arch"] = ibuild.AMD64.String()
+ case "arm":
+ result.Format.Attributes["arch"] = ibuild.AMD64.String()
+ case "x86":
+ result.Format.Attributes["arch"] = ibuild.AMD64.String()
+ default:
+ return nil, errors.New("Unsupported hardware architecture: " + runtime.GOARCH)
+ }
+
+ // Find out what the installed dynamically linked libraries are.
+ switch runtime.GOOS {
+ case "linux":
+ // For Linux, we identify what dynamically linked libraries are
+ // install by parsing the output of "ldconfig -p".
+ command := exec.Command("ldconfig", "-p")
+ output, err := command.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+ buf := bytes.NewBuffer(output)
+ // Throw away the first line of output from ldconfig.
+ if _, err := buf.ReadString('\n'); err != nil {
+ return nil, errors.New("Could not identify libraries.")
+ }
+ // Extract the library name and version from every subsequent line.
+ result.Libraries = make(map[profile.Library]struct{})
+ line, err := buf.ReadString('\n')
+ for err == nil {
+ words := strings.Split(strings.Trim(line, " \t\n"), " ")
+ if len(words) > 0 {
+ tokens := strings.Split(words[0], ".so")
+ if len(tokens) != 2 {
+ return nil, errors.New("Could not identify library: " + words[0])
+ }
+ name := strings.TrimPrefix(tokens[0], "lib")
+ major, minor := "", ""
+ tokens = strings.SplitN(tokens[1], ".", 3)
+ if len(tokens) >= 2 {
+ major = tokens[1]
+ }
+ if len(tokens) >= 3 {
+ minor = tokens[2]
+ }
+ result.Libraries[profile.Library{Name: name, MajorVersion: major, MinorVersion: minor}] = struct{}{}
+ }
+ line, err = buf.ReadString('\n')
+ }
+ case "darwin":
+ // TODO(jsimsa): Implement.
+ case "windows":
+ // TODO(jsimsa): Implement.
+ default:
+ return nil, errors.New("Unsupported operating system: " + runtime.GOOS)
+ }
+ return &result, nil
+}
+
+// getProfile gets a profile description for the given profile.
+//
+// TODO(jsimsa): Avoid retrieving the list of known profiles from a
+// remote server if a recent cached copy exists.
+func (i *invoker) getProfile(name string) (*profile.Specification, error) {
+ // TODO(jsimsa): This function assumes the existence of a profile
+ // server from which the profiles can be retrieved. The profile
+ // server is a work in progress. When it exists, the commented out
+ // code below should work.
+ var profile profile.Specification
+ /*
+ client, err := r.NewClient()
+ if err != nil {
+ vlog.Errorf("NewClient() failed: %v", err)
+ return nil, err
+ }
+ defer client.Close()
+ server := // TODO
+ method := "Specification"
+ inputs := make([]interface{}, 0)
+ call, err := client.StartCall(server + "/" + name, method, inputs)
+ if err != nil {
+ vlog.Errorf("StartCall(%s, %q, %v) failed: %v\n", server + "/" + name, method, inputs, err)
+ return nil, err
+ }
+ if err := call.Finish(&profiles); err != nil {
+ vlog.Errorf("Finish(%v) failed: %v\n", &profiles, err)
+ return nil, err
+ }
+ */
+ return &profile, nil
+}
+
+// getKnownProfiles gets a list of description for all publicly known
+// profiles.
+//
+// TODO(jsimsa): Avoid retrieving the list of known profiles from a
+// remote server if a recent cached copy exists.
+func (i *invoker) getKnownProfiles() ([]profile.Specification, error) {
+ // TODO(jsimsa): This function assumes the existence of a profile
+ // server from which a list of known profiles can be retrieved. The
+ // profile server is a work in progress. When it exists, the
+ // commented out code below should work.
+ knownProfiles := make([]profile.Specification, 0)
+ /*
+ client, err := r.NewClient()
+ if err != nil {
+ vlog.Errorf("NewClient() failed: %v\n", err)
+ return nil, err
+ }
+ defer client.Close()
+ server := // TODO
+ method := "List"
+ inputs := make([]interface{}, 0)
+ call, err := client.StartCall(server, method, inputs)
+ if err != nil {
+ vlog.Errorf("StartCall(%s, %q, %v) failed: %v\n", server, method, inputs, err)
+ return nil, err
+ }
+ if err := call.Finish(&knownProfiles); err != nil {
+ vlog.Errorf("Finish(&knownProfile) failed: %v\n", err)
+ return nil, err
+ }
+ */
+ return knownProfiles, nil
+}
+
+// matchProfiles inputs a profile that describes the host node and a
+// set of publicly known profiles and outputs a node description that
+// identifies the publicly known profiles supported by the host node.
+func (i *invoker) matchProfiles(p *profile.Specification, known []profile.Specification) node.Description {
+ result := node.Description{Profiles: make(map[string]struct{})}
+loop:
+ for _, profile := range known {
+ if profile.Format.Name != p.Format.Name {
+ continue
+ }
+ if profile.Format.Attributes["os"] != p.Format.Attributes["os"] {
+ continue
+ }
+ if profile.Format.Attributes["arch"] != p.Format.Attributes["arch"] {
+ continue
+ }
+ for library := range profile.Libraries {
+ // Current implementation requires exact library name and version match.
+ if _, found := p.Libraries[library]; !found {
+ continue loop
+ }
+ }
+ result.Profiles[profile.Label] = struct{}{}
+ }
+ return result
+}
+
+func (i *invoker) Describe(call ipc.Context) (node.Description, error) {
+ vlog.VI(0).Infof("%v.Describe()", i.suffix)
+ empty := node.Description{}
+ nodeProfile, err := i.computeNodeProfile()
+ if err != nil {
+ return empty, err
+ }
+ knownProfiles, err := i.getKnownProfiles()
+ if err != nil {
+ return empty, err
+ }
+ result := i.matchProfiles(nodeProfile, knownProfiles)
+ return result, nil
+}
+
+func (i *invoker) IsRunnable(call ipc.Context, binary build.BinaryDescription) (bool, error) {
+ vlog.VI(0).Infof("%v.IsRunnable(%v)", i.suffix, binary)
+ nodeProfile, err := i.computeNodeProfile()
+ if err != nil {
+ return false, err
+ }
+ binaryProfiles := make([]profile.Specification, 0)
+ for name, _ := range binary.Profiles {
+ profile, err := i.getProfile(name)
+ if err != nil {
+ return false, err
+ }
+ binaryProfiles = append(binaryProfiles, *profile)
+ }
+ result := i.matchProfiles(nodeProfile, binaryProfiles)
+ return len(result.Profiles) > 0, nil
+}
+
+func (i *invoker) Reset(call ipc.Context, deadline uint64) error {
+ vlog.VI(0).Infof("%v.Reset(%v)", i.suffix, deadline)
+ // TODO(jsimsa): Implement.
+ return nil
+}
+
+// APPLICATION INTERFACE IMPLEMENTATION
+
+func downloadBinary(binary string) (string, error) {
+ stub, err := content.BindContent(binary)
+ if err != nil {
+ vlog.Errorf("BindContent(%q) failed: %v", binary, err)
+ return "", errOperationFailed
+ }
+ stream, err := stub.Download()
+ if err != nil {
+ vlog.Errorf("Download() failed: %v", err)
+ return "", errOperationFailed
+ }
+ tmpDir, prefix := "", ""
+ file, err := ioutil.TempFile(tmpDir, prefix)
+ if err != nil {
+ vlog.Errorf("TempFile(%q, %q) failed: %v", tmpDir, prefix, err)
+ return "", errOperationFailed
+ }
+ defer file.Close()
+ for {
+ bytes, err := stream.Recv()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ vlog.Errorf("Recv() failed: %v", err)
+ os.Remove(file.Name())
+ return "", errOperationFailed
+ }
+ if _, err := file.Write(bytes); err != nil {
+ vlog.Errorf("Write() failed: %v", err)
+ os.Remove(file.Name())
+ return "", errOperationFailed
+ }
+ }
+ if err := stream.Finish(); err != nil {
+ vlog.Errorf("Finish() failed: %v", err)
+ os.Remove(file.Name())
+ return "", errOperationFailed
+ }
+ mode := os.FileMode(0755)
+ if err := file.Chmod(mode); err != nil {
+ vlog.Errorf("Chmod(%v) failed: %v", mode, err)
+ os.Remove(file.Name())
+ return "", errOperationFailed
+ }
+ return file.Name(), nil
+}
+
+func fetchEnvelope(origin string) (*application.Envelope, error) {
+ stub, err := application.BindRepository(origin)
+ if err != nil {
+ vlog.Errorf("BindRepository(%v) failed: %v", origin, err)
+ return nil, errOperationFailed
+ }
+ // TODO(jsimsa): Include logic that computes the set of supported
+ // profiles.
+ profiles := []string{}
+ envelope, err := stub.Match(profiles)
+ if err != nil {
+ vlog.Errorf("Match(%v) failed: %v", profiles, err)
+ return nil, errOperationFailed
+ }
+ return &envelope, nil
+}
+
+func replaceBinary(oldBinary, newBinary string) error {
+ // Replace the old binary with the new one.
+ if err := syscall.Unlink(oldBinary); err != nil {
+ vlog.Errorf("Unlink(%v) failed: %v", oldBinary, err)
+ return errOperationFailed
+ }
+ if err := os.Rename(newBinary, oldBinary); err != nil {
+ vlog.Errorf("Rename(%v, %v) failed: %v", newBinary, oldBinary, err)
+ return errOperationFailed
+ }
+ return nil
+}
+
+func spawnNodeManager(envelope *application.Envelope) error {
+ cmd := exec.Command(os.Args[0], envelope.Args...)
+ cmd.Env = envelope.Env
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ handle := vexec.NewParentHandle(cmd, "")
+ if err := handle.Start(); err != nil {
+ vlog.Errorf("Start() failed: %v", err)
+ return errOperationFailed
+ }
+ return nil
+}
+
+func (i *invoker) Install(call ipc.Context) (string, error) {
+ vlog.VI(0).Infof("%v.Install()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return "", nil
+}
+
+func (i *invoker) Start(call ipc.Context) ([]string, error) {
+ vlog.VI(0).Infof("%v.Start()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return make([]string, 0), nil
+}
+
+func (i *invoker) Uninstall(call ipc.Context) error {
+ vlog.VI(0).Infof("%v.Uninstall()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return nil
+}
+
+func (i *invoker) Update(call ipc.Context) error {
+ vlog.VI(0).Infof("%v.Update()", i.suffix)
+ switch {
+ case i.suffix == "nm":
+ // This branch attempts to update the node manager updates itself.
+ envelope, err := fetchEnvelope(i.origin)
+ if err != nil {
+ return err
+ }
+ if envelope.Binary != i.envelope.Binary {
+ file, err := downloadBinary(envelope.Binary)
+ if err != nil {
+ return err
+ }
+ if err := replaceBinary(os.Args[0], file); err != nil {
+ os.Remove(file)
+ return err
+ }
+ }
+ if !reflect.DeepEqual(envelope, i.envelope) {
+ i.envelope = envelope
+ if err := spawnNodeManager(i.envelope); err != nil {
+ return err
+ }
+ // TODO(jsimsa): When Bogdan implements the shutdown API, use it
+ // to stop itself (or have the caller do that).
+ }
+ return nil
+ case updateSuffix.MatchString(i.suffix):
+ // TODO(jsimsa): Implement.
+ return nil
+ default:
+ return errInvalidSuffix
+ }
+}
+
+func (i *invoker) Refresh(call ipc.Context) error {
+ vlog.VI(0).Infof("%v.Refresh()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return nil
+}
+
+func (i *invoker) Restart(call ipc.Context) error {
+ vlog.VI(0).Infof("%v.Restart()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return nil
+}
+
+func (i *invoker) Resume(call ipc.Context) error {
+ vlog.VI(0).Infof("%v.Resume()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return nil
+}
+
+func (i *invoker) Shutdown(call ipc.Context, deadline uint64) error {
+ vlog.VI(0).Infof("%v.Shutdown(%v)", i.suffix, deadline)
+ // TODO(jsimsa): Implement.
+ return nil
+}
+
+func (i *invoker) Suspend(call ipc.Context) error {
+ vlog.VI(0).Infof("%v.Suspend()", i.suffix)
+ // TODO(jsimsa): Implement.
+ return nil
+}
diff --git a/services/mgmt/node/noded/main.go b/services/mgmt/node/noded/main.go
new file mode 100644
index 0000000..7330204
--- /dev/null
+++ b/services/mgmt/node/noded/main.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "flag"
+
+ "veyron/lib/signals"
+ "veyron/services/mgmt/node/impl"
+ "veyron2/rt"
+ "veyron2/services/mgmt/application"
+ "veyron2/vlog"
+)
+
+func main() {
+ var name, origin string
+ flag.StringVar(&name, "name", "", "name to publish the node manager at")
+ flag.StringVar(&origin, "origin", "", "node manager application repository")
+ flag.Parse()
+ if origin == "" {
+ vlog.Fatalf("Specify an origin using --origin=<name>")
+ }
+ runtime := rt.Init()
+ defer runtime.Shutdown()
+ server, err := runtime.NewServer()
+ if err != nil {
+ vlog.Fatalf("NewServer() failed: %v", err)
+ }
+ defer server.Stop()
+ envelope := &application.Envelope{}
+ dispatcher := impl.NewDispatcher(envelope, origin)
+ suffix := ""
+ if err := server.Register(suffix, dispatcher); err != nil {
+ vlog.Errorf("Register(%v, %v) failed: %v", suffix, dispatcher, err)
+ return
+ }
+ protocol, hostname := "tcp", "localhost:0"
+ endpoint, err := server.Listen(protocol, hostname)
+ if err != nil {
+ vlog.Errorf("Listen(%v, %v) failed: %v", protocol, hostname, err)
+ return
+ }
+ vlog.VI(0).Infof("Listening on %v", endpoint)
+ if len(name) > 0 {
+ if err := server.Publish(name); err != nil {
+ vlog.Errorf("Publish(%v) failed: %v", name, err)
+ return
+ }
+ }
+ // Wait until shutdown.
+ <-signals.ShutdownOnSignals()
+}