Merge "veyron/services/node/impl/node*: helper has uname associations"
diff --git a/services/mgmt/node/impl/app_invoker.go b/services/mgmt/node/impl/app_invoker.go
index 7dc6543..6c2ab54 100644
--- a/services/mgmt/node/impl/app_invoker.go
+++ b/services/mgmt/node/impl/app_invoker.go
@@ -105,6 +105,7 @@
"veyron.io/veyron/veyron2/services/mgmt/application"
"veyron.io/veyron/veyron2/services/mounttable"
"veyron.io/veyron/veyron2/services/mounttable/types"
+ "veyron.io/veyron/veyron2/verror"
"veyron.io/veyron/veyron2/vlog"
vexec "veyron.io/veyron/veyron/lib/exec"
@@ -153,6 +154,7 @@
// suffix. It is used to identify an application, installation, or
// instance.
suffix []string
+ uat systemNameIdentityAssociation
}
func saveEnvelope(dir string, envelope *application.Envelope) error {
@@ -387,9 +389,52 @@
return instanceDir, instanceID, nil
}
+// isSetuid is defined like this so we can override its
+// implementation for tests.
+var isSetuid = func(fileStat os.FileInfo) bool {
+ vlog.VI(2).Infof("running the original isSetuid")
+ return fileStat.Mode()&os.ModeSetuid == os.ModeSetuid
+}
+
+// systemAccountForHelper returns the uname that the helper uses to invoke the
+// application. If the helper exists and is setuid, the node manager
+// requires that there is a uname associated with the Veyron
+// identity that requested starting an application.
+// TODO(rjkroege): This function assumes a desktop installation target
+// and is probably not a good fit in other contexts. Revisit the design
+// as appropriate. This function also internalizes a decision as to when
+// it is possible to start an application that needs to be made explicit.
+func systemAccountForHelper(helperStat os.FileInfo, identityNames []string, uat systemNameIdentityAssociation) (systemName string, err error) {
+ haveHelper := isSetuid(helperStat)
+ systemName, present := uat.associatedSystemAccount(identityNames)
+
+ switch {
+ case haveHelper && present:
+ return systemName, nil
+ case haveHelper && !present:
+ // The helper is owned by the node manager and installed as setuid root.
+ // Therefore, the node manager must never run an app as itself to
+ // prevent an app trivially granting itself root permissions.
+ // There must be an associated uname for the account in this case.
+ return "", verror.NoAccessf("use of setuid helper requires an associated uname.")
+ case !haveHelper:
+ // When the helper is not setuid, the helper can't change the
+ // app's uid so just run the app as the node manager's uname
+ // whether or not there is an association.
+ vlog.VI(1).Infof("helper not setuid. Node manager will invoke app with its own userid")
+ user, err := user.Current()
+ if err != nil {
+ vlog.Errorf("user.Current() failed: %v", err)
+ return "", errOperationFailed
+ }
+ return user.Username, nil
+ }
+ return "", errOperationFailed
+}
+
// TODO(rjkroege): Turning on the setuid feature of the suidhelper
// requires an installer with root permissions to install it in <config.Root>/helper
-func genCmd(instanceDir string, helperPath string) (*exec.Cmd, error) {
+func genCmd(instanceDir string, helperPath string, uat systemNameIdentityAssociation, identityNames []string) (*exec.Cmd, error) {
versionLink := filepath.Join(instanceDir, "version")
versionDir, err := filepath.EvalSymlinks(versionLink)
if err != nil {
@@ -414,18 +459,11 @@
cmd := exec.Command(helperPath)
cmd.Args = append(cmd.Args, "--username")
- if helperStat.Mode()&os.ModeSetuid == 0 {
- vlog.Errorf("helper not setuid. Node manager will invoke app with its own userid")
- user, err := user.Current()
- if err != nil {
- vlog.Errorf("user.Current() failed: %v", err)
- return nil, errOperationFailed
- }
- cmd.Args = append(cmd.Args, user.Username)
- } else {
- // TODO(rjkroege): Use the username associated with the veyron identity.
- return nil, errOperationFailed
+ uname, err := systemAccountForHelper(helperStat, identityNames, uat)
+ if err != nil {
+ return nil, err
}
+ cmd.Args = append(cmd.Args, uname)
// TODO(caprita): Also pass in configuration info like NAMESPACE_ROOT to
// the app (to point to the device mounttable).
@@ -510,11 +548,11 @@
return nil
}
-func (i *appInvoker) run(instanceDir string) error {
+func (i *appInvoker) run(instanceDir string, blessings []string) error {
if err := transitionInstance(instanceDir, suspended, starting); err != nil {
return err
}
- cmd, err := genCmd(instanceDir, filepath.Join(i.config.Root, "helper"))
+ cmd, err := genCmd(instanceDir, filepath.Join(i.config.Root, "helper"), i.uat, blessings)
if err == nil {
err = i.startCmd(instanceDir, cmd)
}
@@ -525,10 +563,10 @@
return transitionInstance(instanceDir, starting, started)
}
-func (i *appInvoker) Start(ipc.ServerContext) ([]string, error) {
+func (i *appInvoker) Start(call ipc.ServerContext) ([]string, error) {
instanceDir, instanceID, err := i.newInstance()
if err == nil {
- err = i.run(instanceDir)
+ err = i.run(instanceDir, call.RemoteBlessings().ForContext(call))
}
if err != nil {
cleanupDir(instanceDir)
@@ -551,12 +589,13 @@
return instanceDir, nil
}
-func (i *appInvoker) Resume(ipc.ServerContext) error {
+// TODO(rjkroege): Only the original invoking identity may resume an application.
+func (i *appInvoker) Resume(call ipc.ServerContext) error {
instanceDir, err := i.instanceDir()
if err != nil {
return err
}
- return i.run(instanceDir)
+ return i.run(instanceDir, call.RemoteBlessings().ForContext(call))
}
func stopAppRemotely(appVON string) error {
diff --git a/services/mgmt/node/impl/args_darwin_test.go b/services/mgmt/node/impl/args_darwin_test.go
new file mode 100644
index 0000000..bf4b58d
--- /dev/null
+++ b/services/mgmt/node/impl/args_darwin_test.go
@@ -0,0 +1,5 @@
+package impl_test
+
+const (
+ testUserName = "_uucp"
+)
diff --git a/services/mgmt/node/impl/args_linux_test.go b/services/mgmt/node/impl/args_linux_test.go
new file mode 100644
index 0000000..07c557f
--- /dev/null
+++ b/services/mgmt/node/impl/args_linux_test.go
@@ -0,0 +1,5 @@
+package impl_test
+
+const (
+ testUserName = "uucp"
+)
diff --git a/services/mgmt/node/impl/association_state.go b/services/mgmt/node/impl/association_state.go
new file mode 100644
index 0000000..04f04f3
--- /dev/null
+++ b/services/mgmt/node/impl/association_state.go
@@ -0,0 +1,50 @@
+package impl
+
+import (
+ "veyron.io/veyron/veyron2/services/mgmt/node"
+)
+
+// TODO(rjk): Replace this with disk-backed storage in the node
+// manager's directory hierarchy.
+type systemNameIdentityAssociation map[string]string
+
+// associatedUname returns a system name from the identity to system
+// name association store if one exists for any of the listed
+// identities.
+func (u systemNameIdentityAssociation) associatedSystemAccount(identityNames []string) (string, bool) {
+ systemName := ""
+ present := false
+
+ for _, n := range identityNames {
+ if systemName, present = u[n]; present {
+ break
+ }
+ }
+ return systemName, present
+}
+
+func (u systemNameIdentityAssociation) getAssociations() ([]node.Association, error) {
+ assocs := make([]node.Association, 0)
+ for k, v := range u {
+ assocs = append(assocs, node.Association{k, v})
+ }
+ return assocs, nil
+}
+
+func (u systemNameIdentityAssociation) addAssociations(identityNames []string, systemName string) error {
+ for _, n := range identityNames {
+ u[n] = systemName
+ }
+ return nil
+}
+
+func (u systemNameIdentityAssociation) deleteAssociations(identityNames []string) error {
+ for _, n := range identityNames {
+ delete(u, n)
+ }
+ return nil
+}
+
+func newSystemNameIdentityAssociation() systemNameIdentityAssociation {
+ return make(map[string]string)
+}
diff --git a/services/mgmt/node/impl/dispatcher.go b/services/mgmt/node/impl/dispatcher.go
index ed930d9..3631710 100644
--- a/services/mgmt/node/impl/dispatcher.go
+++ b/services/mgmt/node/impl/dispatcher.go
@@ -49,7 +49,8 @@
config *config.State
// dispatcherMutex is a lock for coordinating concurrent access to some
// dispatcher methods.
- mu sync.RWMutex
+ mu sync.RWMutex
+ uat systemNameIdentityAssociation
}
var _ ipc.Dispatcher = (*dispatcher)(nil)
@@ -83,6 +84,7 @@
updating: newUpdatingState(),
},
config: config,
+ uat: newSystemNameIdentityAssociation(),
}
// If there exists a signed ACL from a previous instance we prefer that.
aclFile, sigFile, _ := d.getACLFilePaths()
@@ -235,6 +237,7 @@
updating: d.internal.updating,
config: d.config,
disp: d,
+ uat: d.uat,
})
return ipc.ReflectInvoker(receiver), d.auth, nil
case appsSuffix:
@@ -242,6 +245,7 @@
callback: d.internal.callback,
config: d.config,
suffix: components[1:],
+ uat: d.uat,
})
// TODO(caprita,rjkroege): Once we implement per-object ACLs
// (i.e. each installation and instance), replace d.auth with
diff --git a/services/mgmt/node/impl/impl_test.go b/services/mgmt/node/impl/impl_test.go
index 961fded..83510d5 100644
--- a/services/mgmt/node/impl/impl_test.go
+++ b/services/mgmt/node/impl/impl_test.go
@@ -47,7 +47,7 @@
// TestHelperProcess is blackbox boilerplate.
func TestHelperProcess(t *testing.T) {
// All TestHelperProcess invocations need a Runtime. Create it here.
- rt.Init()
+ rt.Init(veyron2.ForceNewSecurityModel{})
// Disable the cache because we will be manipulating/using the namespace
// across multiple processes and want predictable behaviour without
@@ -232,7 +232,7 @@
output := "#!/bin/bash\n"
output += "VEYRON_SUIDHELPER_TEST=1"
output += " "
- output += "exec" + " " + os.Args[0] + " -test.run=TestSuidHelper $*"
+ output += "exec" + " " + os.Args[0] + " " + "-minuid=1" + " " + "-test.run=TestSuidHelper $*"
output += "\n"
vlog.VI(1).Infof("script\n%s", output)
@@ -578,11 +578,7 @@
// Start an instance of the app.
instance1ID := startApp(t, appID)
- u, err := user.Current()
- if err != nil {
- t.Fatalf("user.Current() failed: %v", err)
- }
- verifyHelperArgs(t, <-pingCh, u.Username) // Wait until the app pings us that it's ready.
+ verifyHelperArgs(t, <-pingCh, userName(t)) // Wait until the app pings us that it's ready.
v1EP1 := resolve(t, "appV1", 1)[0]
@@ -591,7 +587,7 @@
resolveExpectNotFound(t, "appV1")
resumeApp(t, appID, instance1ID)
- verifyHelperArgs(t, <-pingCh, u.Username) // Wait until the app pings us that it's ready.
+ verifyHelperArgs(t, <-pingCh, userName(t)) // Wait until the app pings us that it's ready.
oldV1EP1 := v1EP1
if v1EP1 = resolve(t, "appV1", 1)[0]; v1EP1 == oldV1EP1 {
t.Fatalf("Expected a new endpoint for the app after suspend/resume")
@@ -599,7 +595,7 @@
// Start a second instance.
instance2ID := startApp(t, appID)
- verifyHelperArgs(t, <-pingCh, u.Username) // Wait until the app pings us that it's ready.
+ verifyHelperArgs(t, <-pingCh, userName(t)) // Wait until the app pings us that it's ready.
// There should be two endpoints mounted as "appV1", one for each
// instance of the app.
@@ -645,7 +641,7 @@
// Resume first instance.
resumeApp(t, appID, instance1ID)
- verifyHelperArgs(t, <-pingCh, u.Username) // Wait until the app pings us that it's ready.
+ verifyHelperArgs(t, <-pingCh, userName(t)) // Wait until the app pings us that it's ready.
// Both instances should still be running the first version of the app.
// Check that the mounttable contains two endpoints, one of which is
// v1EP2.
@@ -669,7 +665,7 @@
// Start a third instance.
instance3ID := startApp(t, appID)
- verifyHelperArgs(t, <-pingCh, u.Username) // Wait until the app pings us that it's ready.
+ verifyHelperArgs(t, <-pingCh, userName(t)) // Wait until the app pings us that it's ready.
resolve(t, "appV2", 1)
// Stop second instance.
@@ -685,7 +681,7 @@
// Start a fourth instance. It should be started from version 1.
instance4ID := startApp(t, appID)
- verifyHelperArgs(t, <-pingCh, u.Username) // Wait until the app pings us that it's ready.
+ verifyHelperArgs(t, <-pingCh, userName(t)) // Wait until the app pings us that it's ready.
resolve(t, "appV1", 1)
stopApp(t, appID, instance4ID)
resolveExpectNotFound(t, "appV1")
@@ -1079,3 +1075,233 @@
tsecurity.SetDefaultBlessings(p, b)
return nil
}
+
+// Code to make Association lists sortable.
+type byIdentity []node.Association
+
+func (a byIdentity) Len() int { return len(a) }
+func (a byIdentity) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byIdentity) Less(i, j int) bool { return a[i].IdentityName < a[j].IdentityName }
+
+func listAndVerifyAssociations(t *testing.T, stub node.Node, run veyron2.Runtime, expected []node.Association) {
+ assocs, err := stub.ListAssociations(run.NewContext())
+ if err != nil {
+ t.Fatalf("ListAssociations failed %v", err)
+ }
+ sort.Sort(byIdentity(assocs))
+ sort.Sort(byIdentity(expected))
+ if !reflect.DeepEqual(assocs, expected) {
+ t.Fatalf("ListAssociations() got %v, expected %v", assocs, expected)
+ }
+}
+
+// TODO(rjkroege): Verify that associations persist across restarts
+// once permanent storage is added.
+func TestAccountAssociation(t *testing.T) {
+ defer setupLocalNamespace(t)()
+
+ root, cleanup := setupRootDir(t)
+ defer cleanup()
+
+ var (
+ proot = newRootPrincipal("root")
+ // The two "processes"/runtimes which will act as IPC clients to
+ // the nodemanager process.
+ selfRT = rt.R()
+ otherRT = newRuntime(t)
+ )
+ defer otherRT.Cleanup()
+ // By default, selfRT and otherRT will have blessings generated based
+ // on the username/machine name running this process. Since these
+ // blessings will appear in test expecations, give them readable
+ // names.
+ if err := setDefaultBlessings(selfRT.Principal(), proot, "self"); err != nil {
+ t.Fatal(err)
+ }
+ if err := setDefaultBlessings(otherRT.Principal(), proot, "other"); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set up the node manager.
+ nm := blackbox.HelperCommand(t, "nodeManager", "nm", root, "unused app repo name", "unused curr link")
+ defer setupChildCommand(nm)()
+ if err := nm.Cmd.Start(); err != nil {
+ t.Fatalf("Start() failed: %v", err)
+ }
+ defer nm.Cleanup()
+ readPID(t, nm)
+
+ nodeStub, err := node.BindNode("nm//nm")
+ if err != nil {
+ t.Fatalf("BindNode failed %v", err)
+ }
+
+ // Attempt to list associations on the node manager without having
+ // claimed it.
+ if list, err := nodeStub.ListAssociations(otherRT.NewContext()); err != nil || list != nil {
+ t.Fatalf("ListAssociations should fail on unclaimed node manager but did not: %v", err)
+ }
+ // self claims the node manager.
+ if err = nodeStub.Claim(selfRT.NewContext(), &granter{p: selfRT.Principal(), extension: "alice"}); err != nil {
+ t.Fatalf("Claim failed: %v", err)
+ }
+
+ vlog.VI(2).Info("Verify that associations start out empty.")
+ listAndVerifyAssociations(t, nodeStub, selfRT, []node.Association(nil))
+
+ if err = nodeStub.AssociateAccount(selfRT.NewContext(), []string{"root/self", "root/other"}, "alice_system_account"); err != nil {
+ t.Fatalf("ListAssociations failed %v", err)
+ }
+ vlog.VI(2).Info("Added association should appear.")
+ listAndVerifyAssociations(t, nodeStub, selfRT, []node.Association{
+ {
+ "root/self",
+ "alice_system_account",
+ },
+ {
+ "root/other",
+ "alice_system_account",
+ },
+ })
+
+ if err = nodeStub.AssociateAccount(selfRT.NewContext(), []string{"root/self", "root/other"}, "alice_other_account"); err != nil {
+ t.Fatalf("AssociateAccount failed %v", err)
+ }
+ vlog.VI(2).Info("Change the associations and the change should appear.")
+ listAndVerifyAssociations(t, nodeStub, selfRT, []node.Association{
+ {
+ "root/self",
+ "alice_other_account",
+ },
+ {
+ "root/other",
+ "alice_other_account",
+ },
+ })
+
+ err = nodeStub.AssociateAccount(selfRT.NewContext(), []string{"root/other"}, "")
+ if err != nil {
+ t.Fatalf("AssociateAccount failed %v", err)
+ }
+ vlog.VI(2).Info("Verify that we can remove an association.")
+ listAndVerifyAssociations(t, nodeStub, selfRT, []node.Association{
+ {
+ "root/self",
+ "alice_other_account",
+ },
+ })
+}
+
+// userName is a helper function to determine the system name that the
+// test is running under.
+func userName(t *testing.T) string {
+ u, err := user.Current()
+ if err != nil {
+ t.Fatalf("user.Current() failed: %v", err)
+ }
+ return u.Username
+}
+
+func TestAppWithSuidHelper(t *testing.T) {
+ // Set up mount table, application, and binary repositories.
+ defer setupLocalNamespace(t)()
+ envelope, cleanup := startApplicationRepository()
+ defer cleanup()
+ defer startBinaryRepository()()
+
+ root, cleanup := setupRootDir(t)
+ defer cleanup()
+
+ var (
+ proot = newRootPrincipal("root")
+ // The two "processes"/runtimes which will act as IPC clients to
+ // the nodemanager process.
+ selfRT = rt.R()
+ otherRT = newRuntime(t)
+ )
+ defer otherRT.Cleanup()
+
+ // By default, selfRT and otherRT will have blessings generated
+ // based on the username/machine name running this process. Since
+ // these blessings can appear in debugging output, give them
+ // recognizable names.
+ if err := setDefaultBlessings(selfRT.Principal(), proot, "self"); err != nil {
+ t.Fatal(err)
+ }
+ if err := setDefaultBlessings(otherRT.Principal(), proot, "other"); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a script wrapping the test target that implements
+ // suidhelper.
+ generateSuidHelperScript(t, root)
+
+ // Set up the node manager. Since we won't do node manager updates,
+ // don't worry about its application envelope and current link.
+ nm := blackbox.HelperCommand(t, "nodeManager", "-mocksetuid", "nm", root, "unused app repo name", "unused curr link")
+ defer setupChildCommand(nm)()
+ if err := nm.Cmd.Start(); err != nil {
+ t.Fatalf("Start() failed: %v", err)
+ }
+ defer nm.Cleanup()
+ readPID(t, nm)
+
+ nodeStub, err := node.BindNode("nm//nm")
+ if err != nil {
+ t.Fatalf("BindNode failed %v", err)
+ }
+
+ // Create the local server that the app uses to tell us which system name
+ // the node manager wished to run it as.
+ server, _ := newServer()
+ defer server.Stop()
+ pingCh := make(chan string, 1)
+ if err := server.Serve("pingserver", ipc.LeafDispatcher(pingServerDisp(pingCh), nil)); err != nil {
+ t.Fatalf("Serve(%q, <dispatcher>) failed: %v", "pingserver", err)
+ }
+
+ // Create an envelope for the app.
+ app := blackbox.HelperCommand(t, "app", "appV1")
+ defer setupChildCommandWithBlessing(app, "alice/child")()
+ appTitle := "google naps"
+ *envelope = *envelopeFromCmd(appTitle, app.Cmd)
+
+ // Install and start the app as root/self.
+ appID := installApp(t, selfRT)
+
+ // Claim the nodemanager with selfRT as root/self/alice
+ if err = nodeStub.Claim(selfRT.NewContext(), &granter{p: selfRT.Principal(), extension: "alice"}); err != nil {
+ t.Fatal(err)
+ }
+
+ // Start an instance of the app but this time it should fail: we do
+ // not have an associated uname for the invoking identity.
+ startAppExpectError(t, appID, verror.NoAccess, selfRT)
+
+ // Create an association for selfRT
+ if err = nodeStub.AssociateAccount(selfRT.NewContext(), []string{"root/self"}, testUserName); err != nil {
+ t.Fatalf("AssociateAccount failed %v", err)
+ }
+
+ startApp(t, appID, selfRT)
+ verifyHelperArgs(t, <-pingCh, testUserName) // Wait until the app pings us that it's ready.
+
+ vlog.VI(2).Infof("other attempting to run an app without access. Should fail.")
+ startAppExpectError(t, appID, verror.NoAccess, otherRT)
+
+ // Self will now let other also run apps.
+ if err = nodeStub.AssociateAccount(selfRT.NewContext(), []string{"root/other"}, testUserName); err != nil {
+ t.Fatalf("AssociateAccount failed %v", err)
+ }
+ // Add Start to the ACL list for root/other.
+ newACL := security.ACL{In: map[security.BlessingPattern]security.LabelSet{"root/other/...": security.AllLabels}}
+ if err = nodeStub.SetACL(selfRT.NewContext(), newACL, ""); err != nil {
+ t.Fatalf("SetACL failed %v", err)
+ }
+
+ vlog.VI(2).Infof("other attempting to run an app with access. Should succeed.")
+ startApp(t, appID, otherRT)
+ verifyHelperArgs(t, <-pingCh, testUserName) // Wait until the app pings us that it's ready.
+
+ // TODO(rjkroege): Make sure that all apps have been terminated.
+}
diff --git a/services/mgmt/node/impl/node_invoker.go b/services/mgmt/node/impl/node_invoker.go
index 92bbb80..22a0431 100644
--- a/services/mgmt/node/impl/node_invoker.go
+++ b/services/mgmt/node/impl/node_invoker.go
@@ -49,6 +49,7 @@
vexec "veyron.io/veyron/veyron/lib/exec"
"veyron.io/veyron/veyron/lib/glob"
+ "veyron.io/veyron/veyron/lib/netstate"
"veyron.io/veyron/veyron/services/mgmt/node/config"
"veyron.io/veyron/veyron/services/mgmt/profile"
)
@@ -88,6 +89,7 @@
callback *callbackState
config *config.State
disp *dispatcher
+ uat systemNameIdentityAssociation
}
func (i *nodeInvoker) Claim(call ipc.ServerContext) error {
@@ -400,3 +402,39 @@
}
return nil
}
+
+func sameMachineCheck(call ipc.ServerContext) error {
+ switch local, err := netstate.SameMachine(call.RemoteEndpoint().Addr()); {
+ case err != nil:
+ return err
+ case local == false:
+ vlog.Errorf("SameMachine() indicates that endpoint is not on the same node")
+ return errOperationFailed
+ }
+ return nil
+}
+
+// TODO(rjkroege): Make it possible for users on the same system to also
+// associate their accounts with their identities.
+func (i *nodeInvoker) AssociateAccount(call ipc.ServerContext, identityNames []string, accountName string) error {
+ if err := sameMachineCheck(call); err != nil {
+ return err
+ }
+
+ if accountName == "" {
+ return i.uat.deleteAssociations(identityNames)
+ } else {
+ // TODO(rjkroege): Optionally verify here that the required uname is a valid.
+ return i.uat.addAssociations(identityNames, accountName)
+ }
+}
+
+func (i *nodeInvoker) ListAssociations(call ipc.ServerContext) (associations []node.Association, err error) {
+ // Temporary code. Dump this.
+ vlog.VI(2).Infof("ListAssociations given blessings: %v\n", call.RemoteBlessings().ForContext(call))
+
+ if err := sameMachineCheck(call); err != nil {
+ return nil, err
+ }
+ return i.uat.getAssociations()
+}
diff --git a/services/mgmt/node/impl/only_for_test.go b/services/mgmt/node/impl/only_for_test.go
index a0a1d71..3261c42 100644
--- a/services/mgmt/node/impl/only_for_test.go
+++ b/services/mgmt/node/impl/only_for_test.go
@@ -1,6 +1,7 @@
package impl
import (
+ "flag"
"os"
"path/filepath"
@@ -10,6 +11,8 @@
// This file contains code in the impl package that we only want built for tests
// (it exposes public API methods that we don't want to normally expose).
+var mockIsSetuid = flag.Bool("mocksetuid", false, "set flag to pretend to have a helper with setuid permissions")
+
func (c *callbackState) leaking() bool {
c.Lock()
defer c.Unlock()
@@ -28,4 +31,10 @@
vlog.Errorf("Rename(%v, %v) failed: %v", dir, renamed, err)
}
}
+ isSetuid = possiblyMockIsSetuid
+}
+
+func possiblyMockIsSetuid(fileStat os.FileInfo) bool {
+ vlog.VI(2).Infof("Mock isSetuid is reporting: %v", *mockIsSetuid)
+ return *mockIsSetuid
}
diff --git a/services/mgmt/node/impl/util_test.go b/services/mgmt/node/impl/util_test.go
index ea0fed8..3788352 100644
--- a/services/mgmt/node/impl/util_test.go
+++ b/services/mgmt/node/impl/util_test.go
@@ -180,6 +180,14 @@
// The following set of functions are convenience wrappers around various app
// management methods.
+func ort(opt []veyron2.Runtime) veyron2.Runtime {
+ if len(opt) > 0 {
+ return opt[0]
+ } else {
+ return rt.R()
+ }
+}
+
func appStub(t *testing.T, nameComponents ...string) node.Application {
appsName := "nm//apps"
appName := naming.Join(append([]string{appsName}, nameComponents...)...)
@@ -190,16 +198,16 @@
return stub
}
-func installApp(t *testing.T) string {
- appID, err := appStub(t).Install(rt.R().NewContext(), "ar")
+func installApp(t *testing.T, opt ...veyron2.Runtime) string {
+ appID, err := appStub(t).Install(ort(opt).NewContext(), "ar")
if err != nil {
t.Fatalf("Install failed: %v", err)
}
return appID
}
-func startAppImpl(t *testing.T, appID string) (string, error) {
- if instanceIDs, err := appStub(t, appID).Start(rt.R().NewContext()); err != nil {
+func startAppImpl(t *testing.T, appID string, opt []veyron2.Runtime) (string, error) {
+ if instanceIDs, err := appStub(t, appID).Start(ort(opt).NewContext()); err != nil {
return "", err
} else {
if want, got := 1, len(instanceIDs); want != got {
@@ -209,40 +217,40 @@
}
}
-func startApp(t *testing.T, appID string) string {
- instanceID, err := startAppImpl(t, appID)
+func startApp(t *testing.T, appID string, opt ...veyron2.Runtime) string {
+ instanceID, err := startAppImpl(t, appID, opt)
if err != nil {
t.Fatalf("Start(%v) failed: %v", appID, err)
}
return instanceID
}
-func startAppExpectError(t *testing.T, appID string, expectedError verror.ID) {
- if _, err := startAppImpl(t, appID); err == nil || !verror.Is(err, expectedError) {
+func startAppExpectError(t *testing.T, appID string, expectedError verror.ID, opt ...veyron2.Runtime) {
+ if _, err := startAppImpl(t, appID, opt); err == nil || !verror.Is(err, expectedError) {
t.Fatalf("Start(%v) expected to fail with %v, got %v instead", appID, expectedError, err)
}
}
-func stopApp(t *testing.T, appID, instanceID string) {
- if err := appStub(t, appID, instanceID).Stop(rt.R().NewContext(), 5); err != nil {
+func stopApp(t *testing.T, appID, instanceID string, opt ...veyron2.Runtime) {
+ if err := appStub(t, appID, instanceID).Stop(ort(opt).NewContext(), 5); err != nil {
t.Fatalf("Stop(%v/%v) failed: %v", appID, instanceID, err)
}
}
-func suspendApp(t *testing.T, appID, instanceID string) {
- if err := appStub(t, appID, instanceID).Suspend(rt.R().NewContext()); err != nil {
+func suspendApp(t *testing.T, appID, instanceID string, opt ...veyron2.Runtime) {
+ if err := appStub(t, appID, instanceID).Suspend(ort(opt).NewContext()); err != nil {
t.Fatalf("Suspend(%v/%v) failed: %v", appID, instanceID, err)
}
}
-func resumeApp(t *testing.T, appID, instanceID string) {
- if err := appStub(t, appID, instanceID).Resume(rt.R().NewContext()); err != nil {
+func resumeApp(t *testing.T, appID, instanceID string, opt ...veyron2.Runtime) {
+ if err := appStub(t, appID, instanceID).Resume(ort(opt).NewContext()); err != nil {
t.Fatalf("Resume(%v/%v) failed: %v", appID, instanceID, err)
}
}
-func updateApp(t *testing.T, appID string) {
- if err := appStub(t, appID).Update(rt.R().NewContext()); err != nil {
+func updateApp(t *testing.T, appID string, opt ...veyron2.Runtime) {
+ if err := appStub(t, appID).Update(ort(opt).NewContext()); err != nil {
t.Fatalf("Update(%v) failed: %v", appID, err)
}
}