Merge "services/device/deviced/internal/impl: add stats on app runs and restarts"
diff --git a/lib/exec/exec_test.go b/lib/exec/exec_test.go
index 33aa710..00e58ec 100644
--- a/lib/exec/exec_test.go
+++ b/lib/exec/exec_test.go
@@ -10,6 +10,7 @@
"log"
"os"
"os/exec"
+ "runtime"
"strings"
"sync"
"syscall"
@@ -31,6 +32,14 @@
const baselineOpenFiles = 3
func init() {
+ if os.Getenv("GOMAXPROCS") == "" {
+ // Set the number of logical processors to 1 if GOMAXPROCS is
+ // not set in the environment.
+ //
+ // TODO(caprita): the default in Go 1.5 is num cpus, which
+ // causes flakiness. Figure out why.
+ runtime.GOMAXPROCS(1)
+ }
if os.Getenv("GO_WANT_HELPER_PROCESS_EXEC") == "1" {
return
}
@@ -379,7 +388,6 @@
}
func TestToCompletion(t *testing.T) {
- t.Skip("failing on Go1.5, see http://v.io/i/682")
ph := readyHelper(t, "TestToCompletion", "testSuccess", "...ok")
e := ph.Wait(time.Second)
if e != nil {
@@ -398,7 +406,6 @@
}
func TestExtraFiles(t *testing.T) {
- t.Skip("failing on Go1.5, see http://v.io/i/682")
cmd := helperCommand("testExtraFiles")
rd, wr, err := os.Pipe()
if err != nil {
diff --git a/lib/pubsub/config_test.go b/lib/pubsub/config_test.go
index fcad24b..b354437 100644
--- a/lib/pubsub/config_test.go
+++ b/lib/pubsub/config_test.go
@@ -200,9 +200,15 @@
}
}
-func consumer(t *testing.T, pub *pubsub.Publisher, limit, bufsize int, waiter *sync.WaitGroup) {
+func consumer(t *testing.T, pub *pubsub.Publisher, limit, bufsize int, errch chan error, starter, waiter *sync.WaitGroup) {
+ defer close(errch)
ch := make(chan pubsub.Setting, bufsize)
- st, _ := pub.ForkStream("net", ch)
+ st, err := pub.ForkStream("net", ch)
+ if err != nil {
+ errch <- err
+ return
+ }
+ starter.Done()
i, i2 := 0, 0
if st.Latest["i"] != nil {
i = int(st.Latest["i"].Value().(int))
@@ -218,25 +224,28 @@
switch v := s.Value().(type) {
case int:
if i%2 != 0 {
- t.Errorf("expected a float, got an int")
- break
+ errch <- fmt.Errorf("expected a float, got an int")
+ return
}
if v != i {
- t.Errorf("got %d, want %d", v, i)
+ errch <- fmt.Errorf("got %d, want %d", v, i)
+ return
}
case float64:
if i%2 != 1 {
- t.Errorf("expected an int, got a float")
- break
+ errch <- fmt.Errorf("expected an int, got a float")
+ return
}
if v != float64(i) {
- t.Errorf("got %f, want %f", v, float64(i))
+ errch <- fmt.Errorf("got %f, want %f", v, float64(i))
+ return
}
}
i++
}
if i < limit {
- t.Errorf("didn't read enough settings: got %d, want >= %d", i, limit)
+ errch <- fmt.Errorf("didn't read enough settings: got %d, want >= %d", i, limit)
+ return
}
waiter.Done()
}
@@ -244,7 +253,10 @@
func testStream(t *testing.T, consumerBufSize int) {
in := make(chan pubsub.Setting)
pub := pubsub.NewPublisher()
- stop, _ := pub.CreateStream("net", "network settings", in)
+ stop, err := pub.CreateStream("net", "network settings", in)
+ if err != nil {
+ t.Fatal(err)
+ }
rand.Seed(time.Now().UnixNano())
limit := rand.Intn(5000)
@@ -262,16 +274,34 @@
i := <-progress
t.Logf("limit/2 = %d", i)
- // We use a lot of buffering in this unittest to ensure that
- // we never miss any settings.
- go consumer(t, pub, limit, consumerBufSize, &waiter)
- go consumer(t, pub, limit, consumerBufSize, &waiter)
+ err1 := make(chan error, 1)
+ err2 := make(chan error, 1)
+ var starter sync.WaitGroup
+ starter.Add(2)
+ go consumer(t, pub, limit, consumerBufSize, err1, &starter, &waiter)
+ go consumer(t, pub, limit, consumerBufSize, err2, &starter, &waiter)
reached := <-progress
+ // Give the consumers a chance to get going before shutting down
+ // the producer.
+ starter.Wait()
+ time.Sleep(100 * time.Millisecond)
pub.Shutdown()
shutdown := <-progress
t.Logf("reached %d, shut down at %d", reached, shutdown)
+ // This is a little annoying, we check for the presence of errors on the error
+ // channels once everything has run its course since it's not allowed to call
+ // t.Fatal/Errorf from a separate goroutine. We wait until here so that we
+ // don't block waiting for errors that don't occur when the tests all work.
+ err = <-err1
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = <-err2
+ if err != nil {
+ t.Fatal(err)
+ }
// Wait for all goroutines to finish.
waiter.Wait()
}
diff --git a/lib/vdl/codegen/java/file_struct.go b/lib/vdl/codegen/java/file_struct.go
index 3c7190f..941b9fa 100644
--- a/lib/vdl/codegen/java/file_struct.go
+++ b/lib/vdl/codegen/java/file_struct.go
@@ -119,9 +119,9 @@
result += ", ";
{{ end }}
{{ if .IsArray }}
- result += "{{$field.LowercaseName}}:" + java.util.Arrays.toString({{$field.LowercaseName}});
+ result += "{{$field.LowercaseName}}:" + java.util.Arrays.toString(this.{{$field.LowercaseName}});
{{ else }}
- result += "{{$field.LowercaseName}}:" + {{$field.LowercaseName}};
+ result += "{{$field.LowercaseName}}:" + this.{{$field.LowercaseName}};
{{ end}} {{/* if is array */}}
{{ end }} {{/* range over fields */}}
return result + "}";
@@ -164,7 +164,7 @@
AccessModifier: accessModifierForName(fld.Name),
Class: javaType(fld.Type, true, env),
Doc: javaDoc(tdef.FieldDoc[i], tdef.FieldDocSuffix[i]),
- HashcodeComputation: javaHashCode(vdlutil.FirstRuneToLower(fld.Name), fld.Type, env),
+ HashcodeComputation: javaHashCode("this." + vdlutil.FirstRuneToLower(fld.Name), fld.Type, env),
IsClass: isClass(fld.Type, env),
IsArray: isJavaNativeArray(fld.Type, env),
LowercaseName: vdlutil.FirstRuneToLower(fld.Name),
diff --git a/runtime/internal/rpc/benchmark/benchmark/doc.go b/runtime/internal/rpc/benchmark/benchmark/doc.go
index 22a524e..8016f5e 100644
--- a/runtime/internal/rpc/benchmark/benchmark/doc.go
+++ b/runtime/internal/rpc/benchmark/benchmark/doc.go
@@ -64,7 +64,7 @@
if >=0, sets runtime.MemProfileRate
-test.outputdir=
directory in which to write profiles
- -test.parallel=12
+ -test.parallel=<number of threads>
maximum test parallelism
-test.run=
regular expression to select tests and examples to run
diff --git a/runtime/internal/rpc/benchmark/benchmarkd/doc.go b/runtime/internal/rpc/benchmark/benchmarkd/doc.go
index 990f4ad..a2bf3f8 100644
--- a/runtime/internal/rpc/benchmark/benchmarkd/doc.go
+++ b/runtime/internal/rpc/benchmark/benchmarkd/doc.go
@@ -50,7 +50,7 @@
if >=0, sets runtime.MemProfileRate
-test.outputdir=
directory in which to write profiles
- -test.parallel=12
+ -test.parallel=<number of threads>
maximum test parallelism
-test.run=
regular expression to select tests and examples to run