ref: Move v.io/x/ref/runtimes/googe to v.io/x/ref/profiles/internal
This better captures the rule that you should not depend on the runtime
implementation. Instead you should use the public API by including a
profile.
In order to make this move I had to move some libraries that are commonly
used by outside code from runtimes/google/lib to lib. These are:
- upcqueue
- deque
- sync
- publisher
I will revisit these and see if they can be moved back seperately.
In addition the package runtimes/google/ipc/stream/proxy had many
dependents. In the sort term I have moved it to profiles/proxy.
This is certainly the wrong place for this package, but it is
the only place that allows all dependency restrictions to be met.
I will also seperately work to resolve that issue.
MultiPart: 2/2
Change-Id: I1d3d02b4a311ee4412d2b4f327c3c4bfe2086cf3
diff --git a/profiles/chrome/chromeinit.go b/profiles/chrome/chromeinit.go
index 45f2511..776e74d 100644
--- a/profiles/chrome/chromeinit.go
+++ b/profiles/chrome/chromeinit.go
@@ -13,9 +13,9 @@
"v.io/x/ref/lib/flags"
"v.io/x/ref/lib/websocket"
"v.io/x/ref/profiles/internal"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/ws"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/wsh_nacl"
- grt "v.io/x/ref/runtimes/google/rt"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/ws"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/wsh_nacl"
+ grt "v.io/x/ref/profiles/internal/rt"
)
var commonFlags *flags.Flags
diff --git a/profiles/gce/init.go b/profiles/gce/init.go
index 663f07b..ac9d9f1 100644
--- a/profiles/gce/init.go
+++ b/profiles/gce/init.go
@@ -20,10 +20,10 @@
"v.io/x/ref/lib/websocket"
"v.io/x/ref/profiles/internal"
"v.io/x/ref/profiles/internal/gce"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/tcp"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/ws"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/wsh"
- grt "v.io/x/ref/runtimes/google/rt"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/tcp"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/ws"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/wsh"
+ grt "v.io/x/ref/profiles/internal/rt"
)
var commonFlags *flags.Flags
diff --git a/profiles/genericinit.go b/profiles/genericinit.go
index 1758b8d..f401981 100644
--- a/profiles/genericinit.go
+++ b/profiles/genericinit.go
@@ -12,10 +12,10 @@
"v.io/x/ref/lib/flags"
"v.io/x/ref/lib/websocket"
"v.io/x/ref/profiles/internal"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/tcp"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/ws"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/wsh"
- grt "v.io/x/ref/runtimes/google/rt"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/tcp"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/ws"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/wsh"
+ grt "v.io/x/ref/profiles/internal/rt"
)
var commonFlags *flags.Flags
diff --git a/profiles/internal/GO.PACKAGE b/profiles/internal/GO.PACKAGE
deleted file mode 100644
index eaa997e..0000000
--- a/profiles/internal/GO.PACKAGE
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "dependencies": {
- "incoming": [
- {"allow": "v.io/x/ref/runtimes/google/ipc", "comment":"temporarily allowing dependency from veyron/runtimes/google/ipc"}
- ]
- }
-}
diff --git a/profiles/internal/README b/profiles/internal/README
new file mode 100644
index 0000000..875a4e1
--- /dev/null
+++ b/profiles/internal/README
@@ -0,0 +1,2 @@
+This directory and all of its subdirectories contain an implementation of the
+public APIs defined in v23.
diff --git a/profiles/internal/ipc/benchmark/README.txt b/profiles/internal/ipc/benchmark/README.txt
new file mode 100644
index 0000000..5b511f0
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/README.txt
@@ -0,0 +1,129 @@
+This directory contains code uses to measure the performance of the Vanadium IPC
+stack.
+
+================================================================================
+
+The ipc_test.go file uses GO's testing package to run benchmarks. Each
+benchmark involves one server and one client. The server has two very simple
+methods that echo the data received from the client back to the client.
+
+client ---- Echo(payload) ----> server
+client <--- return payload ---- server
+
+There are two versions of the Echo method:
+ - Echo(payload []byte) ([]byte], error)
+ - EchoStream() <[]byte,[]byte> error
+
+The first benchmarks use the non-streaming version of Echo with a varying
+payload size. The second benchmarks use the streaming version with varying
+number of chunks and payload sizes. The third one is for measuring the
+performance with multiple clients hosted in the same process.
+
+This test creates a VC before the benchmark begins. So, the VC creation
+overhead is excluded.
+
+$ v23 go test -bench=. -timeout=1h -cpu=1 -benchtime=5s \
+ v.io/x/ref/profiles/internal/ipc/benchmark
+PASS
+Benchmark____1B 1000 8301357 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 7 Max: 17 Avg: 7.89
+ ------------------------------------------------------------
+ [ 7, 8) 505 50.5% 50.5% #####
+ [ 8, 9) 389 38.9% 89.4% ####
+ [ 9, 10) 38 3.8% 93.2%
+ [ 10, 11) 12 1.2% 94.4%
+ [ 11, 12) 4 0.4% 94.8%
+ [ 12, 14) 19 1.9% 96.7%
+ [ 14, 16) 23 2.3% 99.0%
+ [ 16, 18) 10 1.0% 100.0%
+ [ 18, 21) 0 0.0% 100.0%
+ [ 21, 24) 0 0.0% 100.0%
+ [ 24, inf) 0 0.0% 100.0%
+Benchmark___10B 1000 8587341 ns/op 0.00 MB/s
+...
+
+RESULTS.txt has the full benchmark results.
+
+================================================================================
+
+benchmarkd/main.go and benchmark/main.go are simple command-line tools to run the
+benchmark server and client as separate processes. Unlike the benchmarks above,
+this test includes the startup cost of name resolution, creating the VC, etc. in
+the first RPC.
+
+$ v23 go run benchmarkd/main.go \
+ -veyron.tcp.address=localhost:8888 -veyron.acl.literal='{"Read": {"In": ["..."]}}'
+
+(In a different shell)
+$ v23 go run benchmark/main.go \
+ -server=/localhost:8888 -iterations=100 -chunk_count=0 -payload_size=10
+iterations: 100 chunk_count: 0 payload_size: 10
+elapsed time: 1.369034277s
+Histogram (unit: ms)
+Count: 100 Min: 7 Max: 94 Avg: 13.17
+------------------------------------------------------------
+[ 7, 8) 1 1.0% 1.0%
+[ 8, 9) 4 4.0% 5.0%
+[ 9, 10) 17 17.0% 22.0% ##
+[ 10, 12) 24 24.0% 46.0% ##
+[ 12, 15) 24 24.0% 70.0% ##
+[ 15, 19) 28 28.0% 98.0% ###
+[ 19, 24) 1 1.0% 99.0%
+[ 24, 32) 0 0.0% 99.0%
+[ 32, 42) 0 0.0% 99.0%
+[ 42, 56) 0 0.0% 99.0%
+[ 56, 75) 0 0.0% 99.0%
+[ 75, 101) 1 1.0% 100.0%
+[101, 136) 0 0.0% 100.0%
+[136, 183) 0 0.0% 100.0%
+[183, 247) 0 0.0% 100.0%
+[247, 334) 0 0.0% 100.0%
+[334, inf) 0 0.0% 100.0%
+
+
+On a Raspberry Pi, everything is much slower. The same tests show the following
+results:
+
+$ ./benchmarks.test -test.bench=. -test.cpu=1 -test.benchtime=5s 2>/dev/null
+PASS
+Benchmark____1B 500 21316148 ns/op
+Benchmark___10B 500 23304638 ns/op
+Benchmark__100B 500 21860446 ns/op
+Benchmark___1KB 500 24000346 ns/op
+Benchmark__10KB 200 37530575 ns/op
+Benchmark_100KB 100 136243310 ns/op
+Benchmark_N_RPCs____1_chunk_____1B 500 19957506 ns/op
+Benchmark_N_RPCs____1_chunk____10B 500 22868392 ns/op
+Benchmark_N_RPCs____1_chunk___100B 500 19635412 ns/op
+Benchmark_N_RPCs____1_chunk____1KB 500 22572190 ns/op
+Benchmark_N_RPCs____1_chunk___10KB 500 37570948 ns/op
+Benchmark_N_RPCs___10_chunks___1KB 100 51670740 ns/op
+Benchmark_N_RPCs__100_chunks___1KB 50 364938740 ns/op
+Benchmark_N_RPCs_1000_chunks___1KB 2 3586374500 ns/op
+Benchmark_1_RPC_N_chunks_____1B 10000 1034042 ns/op
+Benchmark_1_RPC_N_chunks____10B 5000 1894875 ns/op
+Benchmark_1_RPC_N_chunks___100B 5000 2857289 ns/op
+Benchmark_1_RPC_N_chunks____1KB 5000 6465839 ns/op
+Benchmark_1_RPC_N_chunks___10KB 100 80019430 ns/op
+Benchmark_1_RPC_N_chunks__100KB Killed
+
+The simple 1 KB RPCs take an average of 24 ms. The streaming equivalent takes
+about 22 ms, and streaming many 1 KB chunks takes about 6.5 ms per chunk.
+
+
+$ ./benchmarkd --address=localhost:8888 --veyron.acl.literal='{"...":"A"}'
+
+$ ./benchmark --server=/localhost:8888 --count=10 --payload_size=1000
+CallEcho 0 2573406000
+CallEcho 1 44669000
+CallEcho 2 54442000
+CallEcho 3 33934000
+CallEcho 4 47985000
+CallEcho 5 61324000
+CallEcho 6 51654000
+CallEcho 7 47043000
+CallEcho 8 44995000
+CallEcho 9 53166000
+
+On the pi, the first RPC takes ~2.5 sec to execute.
diff --git a/profiles/internal/ipc/benchmark/RESULTS.txt b/profiles/internal/ipc/benchmark/RESULTS.txt
new file mode 100644
index 0000000..eec067a
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/RESULTS.txt
@@ -0,0 +1,1613 @@
+* 'Benchmark___NNB' shows the average time to execute a simple Echo RPC with a payload
+ of NN bytes.
+* 'Benchmark___CC_chunk____NNB' shows the average time to execute a streaming RPC with
+ a payload of CC chunks of NN bytes.
+* 'Benchmark__per_chunk___NNB' shows the average time to send one chunk of NN bytes.
+* 'Benchmark___NNB_mux___CC_chunks___MMB' shows the average time to execute a simple
+ Echo RPC with a payload of NN bytes while streaming payloads of CC chunks of MM bytes
+ continuously in the same process.
+
+================================================================================
+Date: 02/03/2015
+Platform: Intel(R) Xeon(R) CPU E5-2689 0 @ 2.60GHz, 66114888KB Memory
+
+Benchmark____1B 5000 1911937 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 4 Avg: 1.33
+ ------------------------------------------------------------
+ [ 1, 2) 3837 76.7% 76.7% ########
+ [ 2, 3) 720 14.4% 91.1% #
+ [ 3, 4) 386 7.7% 98.9% #
+ [ 4, inf) 57 1.1% 100.0%
+Benchmark____1B-2 5000 1735328 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 4 Avg: 1.16
+ ------------------------------------------------------------
+ [ 1, 2) 4518 90.4% 90.4% #########
+ [ 2, 3) 149 3.0% 93.3%
+ [ 3, 4) 332 6.6% 100.0% #
+ [ 4, inf) 1 0.0% 100.0%
+Benchmark___10B 5000 1976083 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 8 Avg: 1.37
+ ------------------------------------------------------------
+ [ 1, 2) 4554 91.1% 91.1% #########
+ [ 2, 3) 19 0.4% 91.5%
+ [ 3, 4) 102 2.0% 93.5%
+ [ 4, 5) 23 0.5% 94.0%
+ [ 5, 6) 74 1.5% 95.4%
+ [ 6, 7) 136 2.7% 98.2%
+ [ 7, 9) 92 1.8% 100.0%
+ [ 9, inf) 0 0.0% 100.0%
+Benchmark___10B-2 5000 1764546 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 6 Avg: 1.17
+ ------------------------------------------------------------
+ [ 1, 2) 4752 95.0% 95.0% ##########
+ [ 2, 3) 0 0.0% 95.0%
+ [ 3, 4) 0 0.0% 95.0%
+ [ 4, 5) 123 2.5% 97.5%
+ [ 5, 6) 123 2.5% 100.0%
+ [ 6, inf) 2 0.0% 100.0%
+Benchmark__100B 5000 2065635 ns/op 0.10 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 14 Avg: 1.44
+ ------------------------------------------------------------
+ [ 1, 2) 4711 94.2% 94.2% #########
+ [ 2, 3) 6 0.1% 94.3%
+ [ 3, 4) 11 0.2% 94.6%
+ [ 4, 5) 41 0.8% 95.4%
+ [ 5, 6) 29 0.6% 96.0%
+ [ 6, 8) 1 0.0% 96.0%
+ [ 8, 10) 64 1.3% 97.3%
+ [ 10, 13) 111 2.2% 99.5%
+ [ 13, 16) 26 0.5% 100.0%
+ [ 16, 20) 0 0.0% 100.0%
+ [ 20, 25) 0 0.0% 100.0%
+ [ 25, 31) 0 0.0% 100.0%
+ [ 31, 38) 0 0.0% 100.0%
+ [ 38, inf) 0 0.0% 100.0%
+Benchmark__100B-2 5000 1800099 ns/op 0.11 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 9 Avg: 1.19
+ ------------------------------------------------------------
+ [ 1, 2) 4834 96.7% 96.7% ##########
+ [ 2, 3) 1 0.0% 96.7%
+ [ 3, 4) 0 0.0% 96.7%
+ [ 4, 5) 0 0.0% 96.7%
+ [ 5, 6) 0 0.0% 96.7%
+ [ 6, 8) 157 3.1% 99.8%
+ [ 8, 10) 8 0.2% 100.0%
+ [ 10, 12) 0 0.0% 100.0%
+ [ 12, inf) 0 0.0% 100.0%
+Benchmark___1KB 3000 2160307 ns/op 0.93 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 19 Avg: 1.50
+ ------------------------------------------------------------
+ [ 1, 2) 2883 96.1% 96.1% ##########
+ [ 2, 3) 0 0.0% 96.1%
+ [ 3, 4) 0 0.0% 96.1%
+ [ 4, 5) 0 0.0% 96.1%
+ [ 5, 7) 8 0.3% 96.4%
+ [ 7, 9) 20 0.7% 97.0%
+ [ 9, 12) 0 0.0% 97.0%
+ [ 12, 15) 28 0.9% 98.0%
+ [ 15, 19) 53 1.8% 99.7%
+ [ 19, 24) 8 0.3% 100.0%
+ [ 24, 30) 0 0.0% 100.0%
+ [ 30, 38) 0 0.0% 100.0%
+ [ 38, 48) 0 0.0% 100.0%
+ [ 48, 60) 0 0.0% 100.0%
+ [ 60, 74) 0 0.0% 100.0%
+ [ 74, 91) 0 0.0% 100.0%
+ [ 91, inf) 0 0.0% 100.0%
+Benchmark___1KB-2 5000 1845717 ns/op 1.08 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 10 Avg: 1.22
+ ------------------------------------------------------------
+ [ 1, 2) 4857 97.1% 97.1% ##########
+ [ 2, 3) 7 0.1% 97.3%
+ [ 3, 4) 0 0.0% 97.3%
+ [ 4, 5) 0 0.0% 97.3%
+ [ 5, 6) 0 0.0% 97.3%
+ [ 6, 8) 0 0.0% 97.3%
+ [ 8, 10) 123 2.5% 99.7%
+ [ 10, 12) 13 0.3% 100.0%
+ [ 12, 15) 0 0.0% 100.0%
+ [ 15, inf) 0 0.0% 100.0%
+Benchmark__10KB 3000 2461125 ns/op 8.13 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 24 Avg: 1.65
+ ------------------------------------------------------------
+ [ 1, 2) 2865 95.5% 95.5% ##########
+ [ 2, 3) 19 0.6% 96.1%
+ [ 3, 4) 0 0.0% 96.1%
+ [ 4, 5) 0 0.0% 96.1%
+ [ 5, 7) 0 0.0% 96.1%
+ [ 7, 9) 27 0.9% 97.0%
+ [ 9, 12) 1 0.0% 97.1%
+ [ 12, 16) 0 0.0% 97.1%
+ [ 16, 21) 28 0.9% 98.0%
+ [ 21, 27) 60 2.0% 100.0%
+ [ 27, 35) 0 0.0% 100.0%
+ [ 35, 44) 0 0.0% 100.0%
+ [ 44, 56) 0 0.0% 100.0%
+ [ 56, 71) 0 0.0% 100.0%
+ [ 71, 89) 0 0.0% 100.0%
+ [ 89, 111) 0 0.0% 100.0%
+ [111, inf) 0 0.0% 100.0%
+Benchmark__10KB-2 5000 2002215 ns/op 9.99 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 12 Avg: 1.32
+ ------------------------------------------------------------
+ [ 1, 2) 4634 92.7% 92.7% #########
+ [ 2, 3) 230 4.6% 97.3%
+ [ 3, 4) 0 0.0% 97.3%
+ [ 4, 5) 0 0.0% 97.3%
+ [ 5, 6) 0 0.0% 97.3%
+ [ 6, 8) 0 0.0% 97.3%
+ [ 8, 10) 0 0.0% 97.3%
+ [ 10, 13) 136 2.7% 100.0%
+ [ 13, 16) 0 0.0% 100.0%
+ [ 16, 20) 0 0.0% 100.0%
+ [ 20, 24) 0 0.0% 100.0%
+ [ 24, inf) 0 0.0% 100.0%
+Benchmark_100KB 2000 4987528 ns/op 40.10 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 3 Max: 28 Avg: 4.44
+ ------------------------------------------------------------
+ [ 3, 4) 1846 92.3% 92.3% #########
+ [ 4, 5) 3 0.2% 92.5%
+ [ 5, 6) 0 0.0% 92.5%
+ [ 6, 7) 0 0.0% 92.5%
+ [ 7, 9) 28 1.4% 93.9%
+ [ 9, 11) 1 0.1% 93.9%
+ [ 11, 14) 0 0.0% 93.9%
+ [ 14, 18) 0 0.0% 93.9%
+ [ 18, 23) 29 1.5% 95.4%
+ [ 23, 29) 93 4.7% 100.0%
+ [ 29, 37) 0 0.0% 100.0%
+ [ 37, 47) 0 0.0% 100.0%
+ [ 47, 60) 0 0.0% 100.0%
+ [ 60, 76) 0 0.0% 100.0%
+ [ 76, 96) 0 0.0% 100.0%
+ [ 96, 120) 0 0.0% 100.0%
+ [120, inf) 0 0.0% 100.0%
+Benchmark_100KB-2 2000 3358703 ns/op 59.55 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 15 Avg: 2.78
+ ------------------------------------------------------------
+ [ 2, 3) 1755 87.8% 87.8% #########
+ [ 3, 4) 74 3.7% 91.5%
+ [ 4, 5) 53 2.7% 94.1%
+ [ 5, 6) 0 0.0% 94.1%
+ [ 6, 7) 0 0.0% 94.1%
+ [ 7, 9) 0 0.0% 94.1%
+ [ 9, 11) 0 0.0% 94.1%
+ [ 11, 14) 36 1.8% 95.9%
+ [ 14, 17) 82 4.1% 100.0%
+ [ 17, 21) 0 0.0% 100.0%
+ [ 21, 26) 0 0.0% 100.0%
+ [ 26, 32) 0 0.0% 100.0%
+ [ 32, 39) 0 0.0% 100.0%
+ [ 39, inf) 0 0.0% 100.0%
+
+Benchmark____1_chunk_____1B 3000 2147302 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 4 Avg: 1.43
+ ------------------------------------------------------------
+ [ 1, 2) 2049 68.3% 68.3% #######
+ [ 2, 3) 628 20.9% 89.2% ##
+ [ 3, 4) 321 10.7% 99.9% #
+ [ 4, inf) 2 0.1% 100.0%
+Benchmark____1_chunk_____1B-2 5000 1892882 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 4 Avg: 1.20
+ ------------------------------------------------------------
+ [ 1, 2) 4363 87.3% 87.3% #########
+ [ 2, 3) 286 5.7% 93.0% #
+ [ 3, 4) 350 7.0% 100.0% #
+ [ 4, inf) 1 0.0% 100.0%
+Benchmark____1_chunk____10B 3000 2225520 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 7 Avg: 1.44
+ ------------------------------------------------------------
+ [ 1, 2) 2609 87.0% 87.0% #########
+ [ 2, 3) 62 2.1% 89.0%
+ [ 3, 4) 75 2.5% 91.5%
+ [ 4, 5) 46 1.5% 93.1%
+ [ 5, 6) 90 3.0% 96.1%
+ [ 6, 7) 112 3.7% 99.8%
+ [ 7, inf) 6 0.2% 100.0%
+Benchmark____1_chunk____10B-2 5000 1921505 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 6 Avg: 1.21
+ ------------------------------------------------------------
+ [ 1, 2) 4576 91.5% 91.5% #########
+ [ 2, 3) 100 2.0% 93.5%
+ [ 3, 4) 27 0.5% 94.1%
+ [ 4, 5) 279 5.6% 99.6% #
+ [ 5, 6) 17 0.3% 100.0%
+ [ 6, inf) 1 0.0% 100.0%
+Benchmark____1_chunk___100B 3000 2282133 ns/op 0.09 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 11 Avg: 1.47
+ ------------------------------------------------------------
+ [ 1, 2) 2705 90.2% 90.2% #########
+ [ 2, 3) 66 2.2% 92.4%
+ [ 3, 4) 13 0.4% 92.8%
+ [ 4, 5) 59 2.0% 94.8%
+ [ 5, 6) 0 0.0% 94.8%
+ [ 6, 8) 68 2.3% 97.0%
+ [ 8, 10) 51 1.7% 98.7%
+ [ 10, 12) 38 1.3% 100.0%
+ [ 12, 15) 0 0.0% 100.0%
+ [ 15, 18) 0 0.0% 100.0%
+ [ 18, inf) 0 0.0% 100.0%
+Benchmark____1_chunk___100B-2 5000 1953792 ns/op 0.10 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 6 Avg: 1.25
+ ------------------------------------------------------------
+ [ 1, 2) 4543 90.9% 90.9% #########
+ [ 2, 3) 234 4.7% 95.5%
+ [ 3, 4) 0 0.0% 95.5%
+ [ 4, 5) 0 0.0% 95.5%
+ [ 5, 6) 91 1.8% 97.4%
+ [ 6, inf) 132 2.6% 100.0%
+Benchmark____1_chunk____1KB 3000 2417281 ns/op 0.83 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 19 Avg: 1.63
+ ------------------------------------------------------------
+ [ 1, 2) 2522 84.1% 84.1% ########
+ [ 2, 3) 331 11.0% 95.1% #
+ [ 3, 4) 1 0.0% 95.1%
+ [ 4, 5) 1 0.0% 95.2%
+ [ 5, 7) 30 1.0% 96.2%
+ [ 7, 9) 2 0.1% 96.2%
+ [ 9, 12) 30 1.0% 97.2%
+ [ 12, 15) 49 1.6% 98.9%
+ [ 15, 19) 33 1.1% 100.0%
+ [ 19, 24) 1 0.0% 100.0%
+ [ 24, 30) 0 0.0% 100.0%
+ [ 30, 38) 0 0.0% 100.0%
+ [ 38, 48) 0 0.0% 100.0%
+ [ 48, 60) 0 0.0% 100.0%
+ [ 60, 74) 0 0.0% 100.0%
+ [ 74, 91) 0 0.0% 100.0%
+ [ 91, inf) 0 0.0% 100.0%
+Benchmark____1_chunk____1KB-2 5000 2015100 ns/op 0.99 MB/s
+--- Histogram (unit: ms)
+ Count: 5000 Min: 1 Max: 8 Avg: 1.30
+ ------------------------------------------------------------
+ [ 1, 2) 4453 89.1% 89.1% #########
+ [ 2, 3) 379 7.6% 96.6% #
+ [ 3, 4) 0 0.0% 96.6%
+ [ 4, 5) 0 0.0% 96.6%
+ [ 5, 6) 0 0.0% 96.6%
+ [ 6, 7) 0 0.0% 96.6%
+ [ 7, 9) 168 3.4% 100.0%
+ [ 9, inf) 0 0.0% 100.0%
+Benchmark____1_chunk___10KB 3000 2703920 ns/op 7.40 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 2 Max: 20 Avg: 2.61
+ ------------------------------------------------------------
+ [ 2, 3) 2858 95.3% 95.3% ##########
+ [ 3, 4) 0 0.0% 95.3%
+ [ 4, 5) 0 0.0% 95.3%
+ [ 5, 6) 0 0.0% 95.3%
+ [ 6, 8) 34 1.1% 96.4%
+ [ 8, 10) 1 0.0% 96.4%
+ [ 10, 13) 0 0.0% 96.4%
+ [ 13, 16) 34 1.1% 97.6%
+ [ 16, 20) 49 1.6% 99.2%
+ [ 20, 25) 24 0.8% 100.0%
+ [ 25, 31) 0 0.0% 100.0%
+ [ 31, 39) 0 0.0% 100.0%
+ [ 39, 49) 0 0.0% 100.0%
+ [ 49, 61) 0 0.0% 100.0%
+ [ 61, 75) 0 0.0% 100.0%
+ [ 75, 92) 0 0.0% 100.0%
+ [ 92, inf) 0 0.0% 100.0%
+Benchmark____1_chunk___10KB-2 3000 2201032 ns/op 9.09 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 1 Max: 11 Avg: 1.48
+ ------------------------------------------------------------
+ [ 1, 2) 2307 76.9% 76.9% ########
+ [ 2, 3) 593 19.8% 96.7% ##
+ [ 3, 4) 0 0.0% 96.7%
+ [ 4, 5) 0 0.0% 96.7%
+ [ 5, 6) 0 0.0% 96.7%
+ [ 6, 8) 0 0.0% 96.7%
+ [ 8, 10) 50 1.7% 98.3%
+ [ 10, 12) 50 1.7% 100.0%
+ [ 12, 15) 0 0.0% 100.0%
+ [ 15, 18) 0 0.0% 100.0%
+ [ 18, inf) 0 0.0% 100.0%
+Benchmark____1_chunk__100KB 2000 5233249 ns/op 38.22 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 3 Max: 27 Avg: 4.53
+ ------------------------------------------------------------
+ [ 3, 4) 1654 82.7% 82.7% ########
+ [ 4, 5) 130 6.5% 89.2% #
+ [ 5, 6) 0 0.0% 89.2%
+ [ 6, 7) 0 0.0% 89.2%
+ [ 7, 9) 70 3.5% 92.7%
+ [ 9, 11) 2 0.1% 92.8%
+ [ 11, 14) 0 0.0% 92.8%
+ [ 14, 18) 0 0.0% 92.8%
+ [ 18, 23) 87 4.4% 97.2%
+ [ 23, 29) 57 2.9% 100.0%
+ [ 29, 37) 0 0.0% 100.0%
+ [ 37, 47) 0 0.0% 100.0%
+ [ 47, 59) 0 0.0% 100.0%
+ [ 59, 74) 0 0.0% 100.0%
+ [ 74, 93) 0 0.0% 100.0%
+ [ 93, 117) 0 0.0% 100.0%
+ [117, inf) 0 0.0% 100.0%
+Benchmark____1_chunk__100KB-2 2000 3569385 ns/op 56.03 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 13 Avg: 2.82
+ ------------------------------------------------------------
+ [ 2, 3) 1669 83.5% 83.5% ########
+ [ 3, 4) 163 8.2% 91.6% #
+ [ 4, 5) 29 1.5% 93.1%
+ [ 5, 6) 0 0.0% 93.1%
+ [ 6, 7) 0 0.0% 93.1%
+ [ 7, 9) 0 0.0% 93.1%
+ [ 9, 11) 0 0.0% 93.1%
+ [ 11, 14) 139 7.0% 100.0% #
+ [ 14, 17) 0 0.0% 100.0%
+ [ 17, 21) 0 0.0% 100.0%
+ [ 21, 25) 0 0.0% 100.0%
+ [ 25, inf) 0 0.0% 100.0%
+Benchmark___10_chunk_____1B 2000 3741137 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 29 Avg: 3.15
+ ------------------------------------------------------------
+ [ 2, 3) 1180 59.0% 59.0% ######
+ [ 3, 4) 739 37.0% 96.0% ####
+ [ 4, 5) 0 0.0% 96.0%
+ [ 5, 6) 0 0.0% 96.0%
+ [ 6, 8) 0 0.0% 96.0%
+ [ 8, 11) 3 0.2% 96.1%
+ [ 11, 14) 17 0.9% 97.0%
+ [ 14, 18) 0 0.0% 97.0%
+ [ 18, 23) 20 1.0% 98.0%
+ [ 23, 30) 41 2.1% 100.0%
+ [ 30, 39) 0 0.0% 100.0%
+ [ 39, 50) 0 0.0% 100.0%
+ [ 50, 63) 0 0.0% 100.0%
+ [ 63, 80) 0 0.0% 100.0%
+ [ 80, 101) 0 0.0% 100.0%
+ [101, 128) 0 0.0% 100.0%
+ [128, inf) 0 0.0% 100.0%
+Benchmark___10_chunk_____1B-2 3000 2846887 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 2 Max: 18 Avg: 2.43
+ ------------------------------------------------------------
+ [ 2, 3) 2700 90.0% 90.0% #########
+ [ 3, 4) 211 7.0% 97.0% #
+ [ 4, 5) 1 0.0% 97.1%
+ [ 5, 6) 0 0.0% 97.1%
+ [ 6, 8) 0 0.0% 97.1%
+ [ 8, 10) 0 0.0% 97.1%
+ [ 10, 13) 25 0.8% 97.9%
+ [ 13, 16) 31 1.0% 98.9%
+ [ 16, 20) 32 1.1% 100.0%
+ [ 20, 25) 0 0.0% 100.0%
+ [ 25, 31) 0 0.0% 100.0%
+ [ 31, 38) 0 0.0% 100.0%
+ [ 38, 47) 0 0.0% 100.0%
+ [ 47, 58) 0 0.0% 100.0%
+ [ 58, 71) 0 0.0% 100.0%
+ [ 71, 87) 0 0.0% 100.0%
+ [ 87, inf) 0 0.0% 100.0%
+Benchmark___10_chunk____10B 2000 3793093 ns/op 0.05 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 33 Avg: 3.19
+ ------------------------------------------------------------
+ [ 2, 3) 1187 59.4% 59.4% ######
+ [ 3, 4) 737 36.9% 96.2% ####
+ [ 4, 5) 0 0.0% 96.2%
+ [ 5, 6) 0 0.0% 96.2%
+ [ 6, 8) 0 0.0% 96.2%
+ [ 8, 11) 0 0.0% 96.2%
+ [ 11, 14) 20 1.0% 97.2%
+ [ 14, 18) 0 0.0% 97.2%
+ [ 18, 24) 20 1.0% 98.2%
+ [ 24, 31) 2 0.1% 98.3%
+ [ 31, 40) 34 1.7% 100.0%
+ [ 40, 52) 0 0.0% 100.0%
+ [ 52, 67) 0 0.0% 100.0%
+ [ 67, 86) 0 0.0% 100.0%
+ [ 86, 110) 0 0.0% 100.0%
+ [110, 140) 0 0.0% 100.0%
+ [140, inf) 0 0.0% 100.0%
+Benchmark___10_chunk____10B-2 3000 2683120 ns/op 0.07 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 2 Max: 15 Avg: 2.39
+ ------------------------------------------------------------
+ [ 2, 3) 2713 90.4% 90.4% #########
+ [ 3, 4) 207 6.9% 97.3% #
+ [ 4, 5) 0 0.0% 97.3%
+ [ 5, 6) 0 0.0% 97.3%
+ [ 6, 7) 0 0.0% 97.3%
+ [ 7, 9) 0 0.0% 97.3%
+ [ 9, 11) 0 0.0% 97.3%
+ [ 11, 14) 7 0.2% 97.6%
+ [ 14, 17) 73 2.4% 100.0%
+ [ 17, 21) 0 0.0% 100.0%
+ [ 21, 26) 0 0.0% 100.0%
+ [ 26, 32) 0 0.0% 100.0%
+ [ 32, 39) 0 0.0% 100.0%
+ [ 39, inf) 0 0.0% 100.0%
+Benchmark___10_chunk___100B 2000 3847823 ns/op 0.52 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 38 Avg: 3.35
+ ------------------------------------------------------------
+ [ 2, 3) 902 45.1% 45.1% #####
+ [ 3, 4) 1034 51.7% 96.8% #####
+ [ 4, 5) 0 0.0% 96.8%
+ [ 5, 7) 0 0.0% 96.8%
+ [ 7, 9) 0 0.0% 96.8%
+ [ 9, 12) 0 0.0% 96.8%
+ [ 12, 16) 16 0.8% 97.6%
+ [ 16, 21) 0 0.0% 97.6%
+ [ 21, 27) 16 0.8% 98.4%
+ [ 27, 35) 0 0.0% 98.4%
+ [ 35, 45) 32 1.6% 100.0%
+ [ 45, 58) 0 0.0% 100.0%
+ [ 58, 75) 0 0.0% 100.0%
+ [ 75, 97) 0 0.0% 100.0%
+ [ 97, 125) 0 0.0% 100.0%
+ [125, 161) 0 0.0% 100.0%
+ [161, inf) 0 0.0% 100.0%
+Benchmark___10_chunk___100B-2 3000 2917670 ns/op 0.69 MB/s
+--- Histogram (unit: ms)
+ Count: 3000 Min: 2 Max: 23 Avg: 2.47
+ ------------------------------------------------------------
+ [ 2, 3) 2687 89.6% 89.6% #########
+ [ 3, 4) 227 7.6% 97.1% #
+ [ 4, 5) 14 0.5% 97.6%
+ [ 5, 6) 0 0.0% 97.6%
+ [ 6, 8) 0 0.0% 97.6%
+ [ 8, 10) 0 0.0% 97.6%
+ [ 10, 13) 0 0.0% 97.6%
+ [ 13, 17) 30 1.0% 98.6%
+ [ 17, 22) 37 1.2% 99.8%
+ [ 22, 28) 5 0.2% 100.0%
+ [ 28, 35) 0 0.0% 100.0%
+ [ 35, 44) 0 0.0% 100.0%
+ [ 44, 55) 0 0.0% 100.0%
+ [ 55, 68) 0 0.0% 100.0%
+ [ 68, 85) 0 0.0% 100.0%
+ [ 85, 105) 0 0.0% 100.0%
+ [105, inf) 0 0.0% 100.0%
+Benchmark___10_chunk____1KB 2000 4485605 ns/op 4.46 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 3 Max: 53 Avg: 4.14
+ ------------------------------------------------------------
+ [ 3, 4) 1903 95.2% 95.2% ##########
+ [ 4, 5) 3 0.2% 95.3%
+ [ 5, 6) 11 0.6% 95.9%
+ [ 6, 8) 12 0.6% 96.5%
+ [ 8, 10) 0 0.0% 96.5%
+ [ 10, 13) 0 0.0% 96.5%
+ [ 13, 17) 0 0.0% 96.5%
+ [ 17, 23) 22 1.1% 97.6%
+ [ 23, 31) 0 0.0% 97.6%
+ [ 31, 41) 22 1.1% 98.7%
+ [ 41, 54) 27 1.4% 100.0%
+ [ 54, 71) 0 0.0% 100.0%
+ [ 71, 93) 0 0.0% 100.0%
+ [ 93, 122) 0 0.0% 100.0%
+ [122, 160) 0 0.0% 100.0%
+ [160, 210) 0 0.0% 100.0%
+ [210, inf) 0 0.0% 100.0%
+Benchmark___10_chunk____1KB-2 2000 3174383 ns/op 6.30 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 24 Avg: 2.56
+ ------------------------------------------------------------
+ [ 2, 3) 1733 86.7% 86.7% #########
+ [ 3, 4) 198 9.9% 96.6% #
+ [ 4, 5) 20 1.0% 97.6%
+ [ 5, 6) 0 0.0% 97.6%
+ [ 6, 8) 0 0.0% 97.6%
+ [ 8, 10) 0 0.0% 97.6%
+ [ 10, 13) 0 0.0% 97.6%
+ [ 13, 17) 1 0.1% 97.6%
+ [ 17, 22) 27 1.4% 99.0%
+ [ 22, 28) 21 1.1% 100.0%
+ [ 28, 35) 0 0.0% 100.0%
+ [ 35, 44) 0 0.0% 100.0%
+ [ 44, 55) 0 0.0% 100.0%
+ [ 55, 69) 0 0.0% 100.0%
+ [ 69, 86) 0 0.0% 100.0%
+ [ 86, 107) 0 0.0% 100.0%
+ [107, inf) 0 0.0% 100.0%
+Benchmark___10_chunk___10KB 1000 6269496 ns/op 31.90 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 4 Max: 46 Avg: 5.53
+ ------------------------------------------------------------
+ [ 4, 5) 884 88.4% 88.4% #########
+ [ 5, 6) 70 7.0% 95.4% #
+ [ 6, 7) 0 0.0% 95.4%
+ [ 7, 9) 0 0.0% 95.4%
+ [ 9, 11) 0 0.0% 95.4%
+ [ 11, 14) 0 0.0% 95.4%
+ [ 14, 18) 7 0.7% 96.1%
+ [ 18, 23) 0 0.0% 96.1%
+ [ 23, 30) 0 0.0% 96.1%
+ [ 30, 39) 7 0.7% 96.8%
+ [ 39, 51) 32 3.2% 100.0%
+ [ 51, 66) 0 0.0% 100.0%
+ [ 66, 85) 0 0.0% 100.0%
+ [ 85, 110) 0 0.0% 100.0%
+ [110, 142) 0 0.0% 100.0%
+ [142, 184) 0 0.0% 100.0%
+ [184, inf) 0 0.0% 100.0%
+Benchmark___10_chunk___10KB-2 2000 4354054 ns/op 45.93 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 2 Max: 26 Avg: 3.88
+ ------------------------------------------------------------
+ [ 2, 3) 3 0.2% 0.2%
+ [ 3, 4) 1758 87.9% 88.1% #########
+ [ 4, 5) 40 2.0% 90.1%
+ [ 5, 6) 120 6.0% 96.1% #
+ [ 6, 8) 1 0.1% 96.1%
+ [ 8, 10) 0 0.0% 96.1%
+ [ 10, 13) 0 0.0% 96.1%
+ [ 13, 17) 0 0.0% 96.1%
+ [ 17, 22) 30 1.5% 97.6%
+ [ 22, 28) 48 2.4% 100.0%
+ [ 28, 36) 0 0.0% 100.0%
+ [ 36, 46) 0 0.0% 100.0%
+ [ 46, 58) 0 0.0% 100.0%
+ [ 58, 73) 0 0.0% 100.0%
+ [ 73, 92) 0 0.0% 100.0%
+ [ 92, 116) 0 0.0% 100.0%
+ [116, inf) 0 0.0% 100.0%
+Benchmark___10_chunk__100KB 300 27690944 ns/op 72.23 MB/s
+--- Histogram (unit: ms)
+ Count: 300 Min: 21 Max: 58 Avg: 27.25
+ ------------------------------------------------------------
+ [ 21, 22) 209 69.7% 69.7% #######
+ [ 22, 23) 15 5.0% 74.7% #
+ [ 23, 24) 1 0.3% 75.0%
+ [ 24, 26) 0 0.0% 75.0%
+ [ 26, 28) 0 0.0% 75.0%
+ [ 28, 31) 22 7.3% 82.3% #
+ [ 31, 35) 0 0.0% 82.3%
+ [ 35, 40) 0 0.0% 82.3%
+ [ 40, 46) 0 0.0% 82.3%
+ [ 46, 54) 23 7.7% 90.0% #
+ [ 54, 65) 30 10.0% 100.0% #
+ [ 65, 79) 0 0.0% 100.0%
+ [ 79, 96) 0 0.0% 100.0%
+ [ 96, 118) 0 0.0% 100.0%
+ [118, 147) 0 0.0% 100.0%
+ [147, 183) 0 0.0% 100.0%
+ [183, inf) 0 0.0% 100.0%
+Benchmark___10_chunk__100KB-2 500 15686136 ns/op 127.50 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 11 Max: 37 Avg: 15.20
+ ------------------------------------------------------------
+ [ 11, 12) 147 29.4% 29.4% ###
+ [ 12, 13) 237 47.4% 76.8% #####
+ [ 13, 14) 11 2.2% 79.0%
+ [ 14, 15) 2 0.4% 79.4%
+ [ 15, 17) 14 2.8% 82.2%
+ [ 17, 19) 2 0.4% 82.6%
+ [ 19, 22) 0 0.0% 82.6%
+ [ 22, 26) 0 0.0% 82.6%
+ [ 26, 31) 49 9.8% 92.4% #
+ [ 31, 38) 38 7.6% 100.0% #
+ [ 38, 46) 0 0.0% 100.0%
+ [ 46, 56) 0 0.0% 100.0%
+ [ 56, 69) 0 0.0% 100.0%
+ [ 69, 85) 0 0.0% 100.0%
+ [ 85, 105) 0 0.0% 100.0%
+ [105, 130) 0 0.0% 100.0%
+ [130, inf) 0 0.0% 100.0%
+
+Benchmark__100_chunk_____1B 500 15105687 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 14 Max: 16 Avg: 14.60
+ ------------------------------------------------------------
+ [ 14, 15) 222 44.4% 44.4% ####
+ [ 15, 16) 258 51.6% 96.0% #####
+ [ 16, inf) 20 4.0% 100.0%
+Benchmark__100_chunk_____1B-2 1000 10873485 ns/op 0.02 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 8 Max: 13 Avg: 10.38
+ ------------------------------------------------------------
+ [ 8, 9) 5 0.5% 0.5%
+ [ 9, 10) 119 11.9% 12.4% #
+ [ 10, 11) 465 46.5% 58.9% #####
+ [ 11, 12) 316 31.6% 90.5% ###
+ [ 12, 13) 93 9.3% 99.8% #
+ [ 13, inf) 2 0.2% 100.0%
+Benchmark__100_chunk____10B 500 15092213 ns/op 0.13 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 13 Max: 17 Avg: 14.67
+ ------------------------------------------------------------
+ [ 13, 14) 1 0.2% 0.2%
+ [ 14, 15) 174 34.8% 35.0% ###
+ [ 15, 16) 317 63.4% 98.4% ######
+ [ 16, 17) 7 1.4% 99.8%
+ [ 17, inf) 1 0.2% 100.0%
+Benchmark__100_chunk____10B-2 1000 10896191 ns/op 0.18 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 8 Max: 13 Avg: 10.39
+ ------------------------------------------------------------
+ [ 8, 9) 10 1.0% 1.0%
+ [ 9, 10) 113 11.3% 12.3% #
+ [ 10, 11) 387 38.7% 51.0% ####
+ [ 11, 12) 458 45.8% 96.8% #####
+ [ 12, 13) 31 3.1% 99.9%
+ [ 13, inf) 1 0.1% 100.0%
+Benchmark__100_chunk___100B 500 15641654 ns/op 1.28 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 13 Max: 17 Avg: 15.17
+ ------------------------------------------------------------
+ [ 13, 14) 1 0.2% 0.2%
+ [ 14, 15) 106 21.2% 21.4% ##
+ [ 15, 16) 208 41.6% 63.0% ####
+ [ 16, 17) 177 35.4% 98.4% ####
+ [ 17, inf) 8 1.6% 100.0%
+Benchmark__100_chunk___100B-2 1000 11037083 ns/op 1.81 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 8 Max: 13 Avg: 10.53
+ ------------------------------------------------------------
+ [ 8, 9) 21 2.1% 2.1%
+ [ 9, 10) 175 17.5% 19.6% ##
+ [ 10, 11) 285 28.5% 48.1% ###
+ [ 11, 12) 298 29.8% 77.9% ###
+ [ 12, 13) 211 21.1% 99.0% ##
+ [ 13, inf) 10 1.0% 100.0%
+Benchmark__100_chunk____1KB 500 17686556 ns/op 11.31 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 15 Max: 21 Avg: 17.16
+ ------------------------------------------------------------
+ [ 15, 16) 30 6.0% 6.0% #
+ [ 16, 17) 125 25.0% 31.0% ###
+ [ 17, 18) 122 24.4% 55.4% ##
+ [ 18, 19) 191 38.2% 93.6% ####
+ [ 19, 20) 26 5.2% 98.8% #
+ [ 20, 21) 4 0.8% 99.6%
+ [ 21, inf) 2 0.4% 100.0%
+Benchmark__100_chunk____1KB-2 1000 11974933 ns/op 16.70 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 8 Max: 16 Avg: 11.48
+ ------------------------------------------------------------
+ [ 8, 9) 6 0.6% 0.6%
+ [ 9, 10) 87 8.7% 9.3% #
+ [ 10, 11) 202 20.2% 29.5% ##
+ [ 11, 12) 246 24.6% 54.1% ##
+ [ 12, 13) 125 12.5% 66.6% #
+ [ 13, 15) 332 33.2% 99.8% ###
+ [ 15, 17) 2 0.2% 100.0%
+ [ 17, 19) 0 0.0% 100.0%
+ [ 19, inf) 0 0.0% 100.0%
+Benchmark__100_chunk___10KB 200 38603079 ns/op 51.81 MB/s
+--- Histogram (unit: ms)
+ Count: 200 Min: 35 Max: 41 Avg: 38.10
+ ------------------------------------------------------------
+ [ 35, 36) 5 2.5% 2.5%
+ [ 36, 37) 4 2.0% 4.5%
+ [ 37, 38) 21 10.5% 15.0% #
+ [ 38, 39) 123 61.5% 76.5% ######
+ [ 39, 40) 31 15.5% 92.0% ##
+ [ 40, 41) 15 7.5% 99.5% #
+ [ 41, inf) 1 0.5% 100.0%
+Benchmark__100_chunk___10KB-2 300 21864340 ns/op 91.47 MB/s
+--- Histogram (unit: ms)
+ Count: 300 Min: 18 Max: 24 Avg: 21.35
+ ------------------------------------------------------------
+ [ 18, 19) 6 2.0% 2.0%
+ [ 19, 20) 12 4.0% 6.0%
+ [ 20, 21) 36 12.0% 18.0% #
+ [ 21, 22) 94 31.3% 49.3% ###
+ [ 22, 23) 124 41.3% 90.7% ####
+ [ 23, 24) 24 8.0% 98.7% #
+ [ 24, inf) 4 1.3% 100.0%
+Benchmark__100_chunk__100KB 30 218748547 ns/op 91.43 MB/s
+--- Histogram (unit: ms)
+ Count: 30 Min: 214 Max: 225 Avg: 218.23
+ ------------------------------------------------------------
+ [214, 215) 4 13.3% 13.3% #
+ [215, 216) 4 13.3% 26.7% #
+ [216, 217) 3 10.0% 36.7% #
+ [217, 218) 3 10.0% 46.7% #
+ [218, 219) 3 10.0% 56.7% #
+ [219, 221) 5 16.7% 73.3% ##
+ [221, 223) 4 13.3% 86.7% #
+ [223, 226) 4 13.3% 100.0% #
+ [226, 229) 0 0.0% 100.0%
+ [229, 233) 0 0.0% 100.0%
+ [233, 237) 0 0.0% 100.0%
+ [237, inf) 0 0.0% 100.0%
+Benchmark__100_chunk__100KB-2 50 113819574 ns/op 175.72 MB/s
+--- Histogram (unit: ms)
+ Count: 50 Min: 108 Max: 118 Avg: 113.30
+ ------------------------------------------------------------
+ [108, 109) 1 2.0% 2.0%
+ [109, 110) 2 4.0% 6.0%
+ [110, 111) 1 2.0% 8.0%
+ [111, 112) 9 18.0% 26.0% ##
+ [112, 113) 8 16.0% 42.0% ##
+ [113, 115) 10 20.0% 62.0% ##
+ [115, 117) 16 32.0% 94.0% ###
+ [117, 119) 3 6.0% 100.0% #
+ [119, 122) 0 0.0% 100.0%
+ [122, 125) 0 0.0% 100.0%
+ [125, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk_____1B 50 129084503 ns/op 0.02 MB/s
+--- Histogram (unit: ms)
+ Count: 50 Min: 123 Max: 135 Avg: 128.56
+ ------------------------------------------------------------
+ [123, 124) 1 2.0% 2.0%
+ [124, 125) 0 0.0% 2.0%
+ [125, 126) 1 2.0% 4.0%
+ [126, 127) 0 0.0% 4.0%
+ [127, 128) 9 18.0% 22.0% ##
+ [128, 130) 29 58.0% 80.0% ######
+ [130, 132) 6 12.0% 92.0% #
+ [132, 135) 3 6.0% 98.0% #
+ [135, 138) 1 2.0% 100.0%
+ [138, 142) 0 0.0% 100.0%
+ [142, 147) 0 0.0% 100.0%
+ [147, 153) 0 0.0% 100.0%
+ [153, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk_____1B-2 100 85455855 ns/op 0.02 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 78 Max: 99 Avg: 84.98
+ ------------------------------------------------------------
+ [ 78, 79) 5 5.0% 5.0% #
+ [ 79, 80) 3 3.0% 8.0%
+ [ 80, 81) 8 8.0% 16.0% #
+ [ 81, 82) 10 10.0% 26.0% #
+ [ 82, 84) 12 12.0% 38.0% #
+ [ 84, 86) 19 19.0% 57.0% ##
+ [ 86, 89) 19 19.0% 76.0% ##
+ [ 89, 93) 21 21.0% 97.0% ##
+ [ 93, 98) 1 1.0% 98.0%
+ [ 98, 104) 2 2.0% 100.0%
+ [104, 111) 0 0.0% 100.0%
+ [111, 120) 0 0.0% 100.0%
+ [120, 131) 0 0.0% 100.0%
+ [131, 144) 0 0.0% 100.0%
+ [144, 161) 0 0.0% 100.0%
+ [161, 181) 0 0.0% 100.0%
+ [181, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk____10B 50 132496755 ns/op 0.15 MB/s
+--- Histogram (unit: ms)
+ Count: 50 Min: 126 Max: 141 Avg: 131.96
+ ------------------------------------------------------------
+ [126, 127) 1 2.0% 2.0%
+ [127, 128) 0 0.0% 2.0%
+ [128, 129) 1 2.0% 4.0%
+ [129, 130) 3 6.0% 10.0% #
+ [130, 132) 21 42.0% 52.0% ####
+ [132, 134) 13 26.0% 78.0% ###
+ [134, 136) 5 10.0% 88.0% #
+ [136, 139) 3 6.0% 94.0% #
+ [139, 143) 3 6.0% 100.0% #
+ [143, 148) 0 0.0% 100.0%
+ [148, 154) 0 0.0% 100.0%
+ [154, 161) 0 0.0% 100.0%
+ [161, 169) 0 0.0% 100.0%
+ [169, 179) 0 0.0% 100.0%
+ [179, 191) 0 0.0% 100.0%
+ [191, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk____10B-2 100 84457521 ns/op 0.24 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 76 Max: 99 Avg: 83.95
+ ------------------------------------------------------------
+ [ 76, 77) 1 1.0% 1.0%
+ [ 77, 78) 3 3.0% 4.0%
+ [ 78, 79) 6 6.0% 10.0% #
+ [ 79, 80) 6 6.0% 16.0% #
+ [ 80, 82) 11 11.0% 27.0% #
+ [ 82, 84) 23 23.0% 50.0% ##
+ [ 84, 87) 28 28.0% 78.0% ###
+ [ 87, 91) 13 13.0% 91.0% #
+ [ 91, 96) 8 8.0% 99.0% #
+ [ 96, 102) 1 1.0% 100.0%
+ [102, 110) 0 0.0% 100.0%
+ [110, 119) 0 0.0% 100.0%
+ [119, 131) 0 0.0% 100.0%
+ [131, 146) 0 0.0% 100.0%
+ [146, 164) 0 0.0% 100.0%
+ [164, 186) 0 0.0% 100.0%
+ [186, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk___100B 50 134022599 ns/op 1.49 MB/s
+--- Histogram (unit: ms)
+ Count: 50 Min: 127 Max: 138 Avg: 133.44
+ ------------------------------------------------------------
+ [127, 128) 1 2.0% 2.0%
+ [128, 129) 0 0.0% 2.0%
+ [129, 130) 0 0.0% 2.0%
+ [130, 131) 1 2.0% 4.0%
+ [131, 132) 3 6.0% 10.0% #
+ [132, 134) 27 54.0% 64.0% #####
+ [134, 136) 11 22.0% 86.0% ##
+ [136, 139) 7 14.0% 100.0% #
+ [139, 142) 0 0.0% 100.0%
+ [142, 146) 0 0.0% 100.0%
+ [146, 150) 0 0.0% 100.0%
+ [150, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk___100B-2 100 85330166 ns/op 2.34 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 76 Max: 96 Avg: 84.86
+ ------------------------------------------------------------
+ [ 76, 77) 1 1.0% 1.0%
+ [ 77, 78) 2 2.0% 3.0%
+ [ 78, 79) 9 9.0% 12.0% #
+ [ 79, 80) 5 5.0% 17.0% #
+ [ 80, 82) 8 8.0% 25.0% #
+ [ 82, 84) 10 10.0% 35.0% #
+ [ 84, 87) 29 29.0% 64.0% ###
+ [ 87, 91) 25 25.0% 89.0% ###
+ [ 91, 95) 9 9.0% 98.0% #
+ [ 95, 101) 2 2.0% 100.0%
+ [101, 108) 0 0.0% 100.0%
+ [108, 116) 0 0.0% 100.0%
+ [116, 126) 0 0.0% 100.0%
+ [126, 139) 0 0.0% 100.0%
+ [139, 155) 0 0.0% 100.0%
+ [155, 174) 0 0.0% 100.0%
+ [174, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk____1KB 50 155794224 ns/op 12.84 MB/s
+--- Histogram (unit: ms)
+ Count: 50 Min: 148 Max: 164 Avg: 155.34
+ ------------------------------------------------------------
+ [148, 149) 1 2.0% 2.0%
+ [149, 150) 0 0.0% 2.0%
+ [150, 151) 0 0.0% 2.0%
+ [151, 152) 3 6.0% 8.0% #
+ [152, 154) 4 8.0% 16.0% #
+ [154, 156) 20 40.0% 56.0% ####
+ [156, 159) 17 34.0% 90.0% ###
+ [159, 162) 3 6.0% 96.0% #
+ [162, 166) 2 4.0% 100.0%
+ [166, 171) 0 0.0% 100.0%
+ [171, 177) 0 0.0% 100.0%
+ [177, 184) 0 0.0% 100.0%
+ [184, 193) 0 0.0% 100.0%
+ [193, 204) 0 0.0% 100.0%
+ [204, 217) 0 0.0% 100.0%
+ [217, 233) 0 0.0% 100.0%
+ [233, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk____1KB-2 100 95134335 ns/op 21.02 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 81 Max: 105 Avg: 94.59
+ ------------------------------------------------------------
+ [ 81, 82) 1 1.0% 1.0%
+ [ 82, 83) 1 1.0% 2.0%
+ [ 83, 84) 0 0.0% 2.0%
+ [ 84, 85) 1 1.0% 3.0%
+ [ 85, 87) 3 3.0% 6.0%
+ [ 87, 89) 10 10.0% 16.0% #
+ [ 89, 92) 6 6.0% 22.0% #
+ [ 92, 96) 31 31.0% 53.0% ###
+ [ 96, 101) 39 39.0% 92.0% ####
+ [101, 107) 8 8.0% 100.0% #
+ [107, 115) 0 0.0% 100.0%
+ [115, 125) 0 0.0% 100.0%
+ [125, 137) 0 0.0% 100.0%
+ [137, 152) 0 0.0% 100.0%
+ [152, 171) 0 0.0% 100.0%
+ [171, 195) 0 0.0% 100.0%
+ [195, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk___10KB 20 340093645 ns/op 58.81 MB/s
+--- Histogram (unit: ms)
+ Count: 20 Min: 331 Max: 352 Avg: 339.65
+ ------------------------------------------------------------
+ [331, 332) 1 5.0% 5.0% #
+ [332, 333) 2 10.0% 15.0% #
+ [333, 334) 0 0.0% 15.0%
+ [334, 335) 0 0.0% 15.0%
+ [335, 337) 2 10.0% 25.0% #
+ [337, 339) 4 20.0% 45.0% ##
+ [339, 342) 5 25.0% 70.0% ###
+ [342, 346) 3 15.0% 85.0% ##
+ [346, 351) 2 10.0% 95.0% #
+ [351, 357) 1 5.0% 100.0% #
+ [357, 364) 0 0.0% 100.0%
+ [364, 373) 0 0.0% 100.0%
+ [373, 384) 0 0.0% 100.0%
+ [384, 397) 0 0.0% 100.0%
+ [397, 414) 0 0.0% 100.0%
+ [414, 434) 0 0.0% 100.0%
+ [434, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk___10KB-2 50 180545176 ns/op 110.78 MB/s
+--- Histogram (unit: ms)
+ Count: 50 Min: 171 Max: 190 Avg: 180.06
+ ------------------------------------------------------------
+ [171, 172) 1 2.0% 2.0%
+ [172, 173) 0 0.0% 2.0%
+ [173, 174) 1 2.0% 4.0%
+ [174, 175) 0 0.0% 4.0%
+ [175, 177) 7 14.0% 18.0% #
+ [177, 179) 8 16.0% 34.0% ##
+ [179, 182) 14 28.0% 62.0% ###
+ [182, 185) 16 32.0% 94.0% ###
+ [185, 189) 2 4.0% 98.0%
+ [189, 194) 1 2.0% 100.0%
+ [194, 201) 0 0.0% 100.0%
+ [201, 209) 0 0.0% 100.0%
+ [209, 219) 0 0.0% 100.0%
+ [219, 231) 0 0.0% 100.0%
+ [231, 246) 0 0.0% 100.0%
+ [246, 264) 0 0.0% 100.0%
+ [264, inf) 0 0.0% 100.0%
+Benchmark___1K_chunk__100KB 3 2241353636 ns/op 89.23 MB/s
+--- Histogram (unit: s)
+ Count: 3 Min: 2 Max: 2 Avg: 2.00
+ ------------------------------------------------------------
+ [ 2, inf) 3 100.0% 100.0% ##########
+Benchmark___1K_chunk__100KB-2 5 1152111781 ns/op 173.59 MB/s
+--- Histogram (unit: s)
+ Count: 5 Min: 1 Max: 1 Avg: 1.00
+ ------------------------------------------------------------
+ [ 1, inf) 5 100.0% 100.0% ##########
+
+Benchmark__per_chunk____1B 50000 124707 ns/op 0.02 MB/s
+Benchmark__per_chunk____1B-2 100000 78017 ns/op 0.03 MB/s
+Benchmark__per_chunk___10B 50000 122584 ns/op 0.16 MB/s
+Benchmark__per_chunk___10B-2 100000 75094 ns/op 0.27 MB/s
+Benchmark__per_chunk__100B 50000 124183 ns/op 1.61 MB/s
+Benchmark__per_chunk__100B-2 100000 74955 ns/op 2.67 MB/s
+Benchmark__per_chunk___1KB 50000 144432 ns/op 13.85 MB/s
+Benchmark__per_chunk___1KB-2 100000 79305 ns/op 25.22 MB/s
+Benchmark__per_chunk__10KB 20000 314733 ns/op 63.55 MB/s
+Benchmark__per_chunk__10KB-2 50000 163532 ns/op 122.30 MB/s
+Benchmark__per_chunk_100KB 3000 2015176 ns/op 99.25 MB/s
+Benchmark__per_chunk_100KB-2 10000 1048505 ns/op 190.75 MB/s
+
+Benchmark___10B_mux__100_chunks___10B 500 16408021 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 7 Max: 18 Avg: 15.99
+ ------------------------------------------------------------
+ [ 7, 8) 1 0.2% 0.2%
+ [ 8, 9) 2 0.4% 0.6%
+ [ 9, 10) 2 0.4% 1.0%
+ [ 10, 11) 0 0.0% 1.0%
+ [ 11, 12) 1 0.2% 1.2%
+ [ 12, 14) 8 1.6% 2.8%
+ [ 14, 16) 34 6.8% 9.6% #
+ [ 16, 19) 452 90.4% 100.0% #########
+ [ 19, 22) 0 0.0% 100.0%
+ [ 22, 26) 0 0.0% 100.0%
+ [ 26, 30) 0 0.0% 100.0%
+ [ 30, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks___10B-2 2000 3878477 ns/op 0.01 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 9 Avg: 3.39
+ ------------------------------------------------------------
+ [ 1, 2) 122 6.1% 6.1% #
+ [ 2, 3) 599 30.0% 36.1% ###
+ [ 3, 4) 439 22.0% 58.0% ##
+ [ 4, 5) 389 19.5% 77.5% ##
+ [ 5, 6) 235 11.8% 89.2% #
+ [ 6, 8) 194 9.7% 98.9% #
+ [ 8, 10) 22 1.1% 100.0%
+ [ 10, 12) 0 0.0% 100.0%
+ [ 12, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks__100B 500 17052264 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 7 Max: 21 Avg: 16.57
+ ------------------------------------------------------------
+ [ 7, 8) 1 0.2% 0.2%
+ [ 8, 9) 0 0.0% 0.2%
+ [ 9, 10) 0 0.0% 0.2%
+ [ 10, 11) 1 0.2% 0.4%
+ [ 11, 13) 1 0.2% 0.6%
+ [ 13, 15) 53 10.6% 11.2% #
+ [ 15, 17) 152 30.4% 41.6% ###
+ [ 17, 20) 281 56.2% 97.8% ######
+ [ 20, 24) 11 2.2% 100.0%
+ [ 24, 28) 0 0.0% 100.0%
+ [ 28, 33) 0 0.0% 100.0%
+ [ 33, 39) 0 0.0% 100.0%
+ [ 39, 47) 0 0.0% 100.0%
+ [ 47, 56) 0 0.0% 100.0%
+ [ 56, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks__100B-2 2000 4022209 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 12 Avg: 3.53
+ ------------------------------------------------------------
+ [ 1, 2) 99 5.0% 5.0%
+ [ 2, 3) 733 36.6% 41.6% ####
+ [ 3, 4) 384 19.2% 60.8% ##
+ [ 4, 5) 296 14.8% 75.6% #
+ [ 5, 6) 169 8.5% 84.1% #
+ [ 6, 8) 207 10.4% 94.4% #
+ [ 8, 10) 94 4.7% 99.1%
+ [ 10, 13) 18 0.9% 100.0%
+ [ 13, 16) 0 0.0% 100.0%
+ [ 16, 20) 0 0.0% 100.0%
+ [ 20, 24) 0 0.0% 100.0%
+ [ 24, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks___1KB 500 19001451 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 7 Max: 23 Avg: 18.44
+ ------------------------------------------------------------
+ [ 7, 8) 1 0.2% 0.2%
+ [ 8, 9) 0 0.0% 0.2%
+ [ 9, 10) 0 0.0% 0.2%
+ [ 10, 11) 0 0.0% 0.2%
+ [ 11, 13) 2 0.4% 0.6%
+ [ 13, 15) 3 0.6% 1.2%
+ [ 15, 18) 244 48.8% 50.0% #####
+ [ 18, 21) 64 12.8% 62.8% #
+ [ 21, 25) 186 37.2% 100.0% ####
+ [ 25, 30) 0 0.0% 100.0%
+ [ 30, 36) 0 0.0% 100.0%
+ [ 36, 43) 0 0.0% 100.0%
+ [ 43, 52) 0 0.0% 100.0%
+ [ 52, 63) 0 0.0% 100.0%
+ [ 63, 76) 0 0.0% 100.0%
+ [ 76, 92) 0 0.0% 100.0%
+ [ 92, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks___1KB-2 2000 4690985 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 14 Avg: 4.20
+ ------------------------------------------------------------
+ [ 1, 2) 66 3.3% 3.3%
+ [ 2, 3) 547 27.4% 30.7% ###
+ [ 3, 4) 441 22.1% 52.7% ##
+ [ 4, 5) 267 13.4% 66.0% #
+ [ 5, 6) 228 11.4% 77.5% #
+ [ 6, 8) 213 10.7% 88.1% #
+ [ 8, 10) 118 5.9% 94.0% #
+ [ 10, 13) 95 4.8% 98.8%
+ [ 13, 16) 25 1.2% 100.0%
+ [ 16, 20) 0 0.0% 100.0%
+ [ 20, 25) 0 0.0% 100.0%
+ [ 25, 31) 0 0.0% 100.0%
+ [ 31, 38) 0 0.0% 100.0%
+ [ 38, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks__10KB 200 39337476 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 200 Min: 31 Max: 43 Avg: 38.81
+ ------------------------------------------------------------
+ [ 31, 32) 1 0.5% 0.5%
+ [ 32, 33) 0 0.0% 0.5%
+ [ 33, 34) 0 0.0% 0.5%
+ [ 34, 35) 28 14.0% 14.5% #
+ [ 35, 36) 18 9.0% 23.5% #
+ [ 36, 38) 43 21.5% 45.0% ##
+ [ 38, 40) 1 0.5% 45.5%
+ [ 40, 43) 77 38.5% 84.0% ####
+ [ 43, 46) 32 16.0% 100.0% ##
+ [ 46, 50) 0 0.0% 100.0%
+ [ 50, 55) 0 0.0% 100.0%
+ [ 55, 61) 0 0.0% 100.0%
+ [ 61, inf) 0 0.0% 100.0%
+Benchmark___10B_mux__100_chunks__10KB-2 500 12546220 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 1 Max: 26 Avg: 12.06
+ ------------------------------------------------------------
+ [ 1, 2) 12 2.4% 2.4%
+ [ 2, 3) 72 14.4% 16.8% #
+ [ 3, 4) 32 6.4% 23.2% #
+ [ 4, 5) 22 4.4% 27.6%
+ [ 5, 7) 32 6.4% 34.0% #
+ [ 7, 9) 26 5.2% 39.2% #
+ [ 9, 12) 39 7.8% 47.0% #
+ [ 12, 16) 61 12.2% 59.2% #
+ [ 16, 21) 105 21.0% 80.2% ##
+ [ 21, 27) 99 19.8% 100.0% ##
+ [ 27, 35) 0 0.0% 100.0%
+ [ 35, 45) 0 0.0% 100.0%
+ [ 45, 58) 0 0.0% 100.0%
+ [ 58, 74) 0 0.0% 100.0%
+ [ 74, 94) 0 0.0% 100.0%
+ [ 94, 118) 0 0.0% 100.0%
+ [118, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks___10B 100 90698047 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 3 Max: 146 Avg: 90.14
+ ------------------------------------------------------------
+ [ 3, 4) 1 1.0% 1.0%
+ [ 4, 5) 0 0.0% 1.0%
+ [ 5, 6) 3 3.0% 4.0%
+ [ 6, 8) 1 1.0% 5.0%
+ [ 8, 11) 0 0.0% 5.0%
+ [ 11, 16) 0 0.0% 5.0%
+ [ 16, 23) 2 2.0% 7.0%
+ [ 23, 33) 0 0.0% 7.0%
+ [ 33, 47) 4 4.0% 11.0%
+ [ 47, 66) 16 16.0% 27.0% ##
+ [ 66, 93) 28 28.0% 55.0% ###
+ [ 93, 131) 30 30.0% 85.0% ###
+ [131, 183) 15 15.0% 100.0% ##
+ [183, 256) 0 0.0% 100.0%
+ [256, 358) 0 0.0% 100.0%
+ [358, 501) 0 0.0% 100.0%
+ [501, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks___10B-2 2000 5197620 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 44 Avg: 4.71
+ ------------------------------------------------------------
+ [ 1, 2) 42 2.1% 2.1%
+ [ 2, 3) 914 45.7% 47.8% #####
+ [ 3, 4) 548 27.4% 75.2% ###
+ [ 4, 6) 191 9.6% 84.8% #
+ [ 6, 8) 48 2.4% 87.2%
+ [ 8, 11) 50 2.5% 89.7%
+ [ 11, 15) 76 3.8% 93.5%
+ [ 15, 20) 30 1.5% 95.0%
+ [ 20, 27) 46 2.3% 97.2%
+ [ 27, 36) 40 2.0% 99.2%
+ [ 36, 48) 15 0.8% 100.0%
+ [ 48, 63) 0 0.0% 100.0%
+ [ 63, 83) 0 0.0% 100.0%
+ [ 83, 109) 0 0.0% 100.0%
+ [109, 142) 0 0.0% 100.0%
+ [142, 185) 0 0.0% 100.0%
+ [185, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks__100B 100 94502013 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 4 Max: 157 Avg: 93.97
+ ------------------------------------------------------------
+ [ 4, 5) 1 1.0% 1.0%
+ [ 5, 6) 0 0.0% 1.0%
+ [ 6, 7) 0 0.0% 1.0%
+ [ 7, 9) 1 1.0% 2.0%
+ [ 9, 12) 0 0.0% 2.0%
+ [ 12, 17) 0 0.0% 2.0%
+ [ 17, 24) 0 0.0% 2.0%
+ [ 24, 34) 1 1.0% 3.0%
+ [ 34, 48) 3 3.0% 6.0%
+ [ 48, 68) 19 19.0% 25.0% ##
+ [ 68, 96) 27 27.0% 52.0% ###
+ [ 96, 136) 39 39.0% 91.0% ####
+ [136, 191) 9 9.0% 100.0% #
+ [191, 269) 0 0.0% 100.0%
+ [269, 378) 0 0.0% 100.0%
+ [378, 531) 0 0.0% 100.0%
+ [531, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks__100B-2 2000 5453937 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 44 Avg: 4.96
+ ------------------------------------------------------------
+ [ 1, 2) 28 1.4% 1.4%
+ [ 2, 3) 870 43.5% 44.9% ####
+ [ 3, 4) 565 28.2% 73.2% ###
+ [ 4, 6) 245 12.2% 85.4% #
+ [ 6, 8) 41 2.1% 87.5%
+ [ 8, 11) 42 2.1% 89.6%
+ [ 11, 15) 24 1.2% 90.8%
+ [ 15, 20) 73 3.7% 94.4%
+ [ 20, 27) 53 2.7% 97.1%
+ [ 27, 36) 37 1.9% 98.9%
+ [ 36, 48) 22 1.1% 100.0%
+ [ 48, 63) 0 0.0% 100.0%
+ [ 63, 83) 0 0.0% 100.0%
+ [ 83, 109) 0 0.0% 100.0%
+ [109, 142) 0 0.0% 100.0%
+ [142, 185) 0 0.0% 100.0%
+ [185, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks___1KB 100 107052234 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 9 Max: 169 Avg: 106.52
+ ------------------------------------------------------------
+ [ 9, 10) 1 1.0% 1.0%
+ [ 10, 11) 0 0.0% 1.0%
+ [ 11, 12) 0 0.0% 1.0%
+ [ 12, 14) 0 0.0% 1.0%
+ [ 14, 17) 2 2.0% 3.0%
+ [ 17, 22) 0 0.0% 3.0%
+ [ 22, 29) 0 0.0% 3.0%
+ [ 29, 39) 1 1.0% 4.0%
+ [ 39, 53) 2 2.0% 6.0%
+ [ 53, 74) 14 14.0% 20.0% #
+ [ 74, 103) 38 38.0% 58.0% ####
+ [103, 144) 11 11.0% 69.0% #
+ [144, 201) 31 31.0% 100.0% ###
+ [201, 282) 0 0.0% 100.0%
+ [282, 396) 0 0.0% 100.0%
+ [396, 555) 0 0.0% 100.0%
+ [555, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks___1KB-2 2000 6698998 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 54 Avg: 6.20
+ ------------------------------------------------------------
+ [ 1, 2) 7 0.4% 0.4%
+ [ 2, 3) 554 27.7% 28.1% ###
+ [ 3, 4) 655 32.8% 60.8% ###
+ [ 4, 6) 433 21.7% 82.5% ##
+ [ 6, 8) 59 3.0% 85.4%
+ [ 8, 11) 38 1.9% 87.3%
+ [ 11, 15) 41 2.1% 89.4%
+ [ 15, 21) 63 3.2% 92.5%
+ [ 21, 29) 45 2.2% 94.8%
+ [ 29, 39) 61 3.1% 97.8%
+ [ 39, 53) 40 2.0% 99.8%
+ [ 53, 71) 4 0.2% 100.0%
+ [ 71, 94) 0 0.0% 100.0%
+ [ 94, 125) 0 0.0% 100.0%
+ [125, 165) 0 0.0% 100.0%
+ [165, 218) 0 0.0% 100.0%
+ [218, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks__10KB 100 72865483 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 17 Max: 119 Avg: 72.31
+ ------------------------------------------------------------
+ [ 17, 18) 4 4.0% 4.0%
+ [ 18, 19) 2 2.0% 6.0%
+ [ 19, 20) 0 0.0% 6.0%
+ [ 20, 22) 1 1.0% 7.0%
+ [ 22, 25) 1 1.0% 8.0%
+ [ 25, 29) 0 0.0% 8.0%
+ [ 29, 35) 1 1.0% 9.0%
+ [ 35, 43) 8 8.0% 17.0% #
+ [ 43, 54) 7 7.0% 24.0% #
+ [ 54, 70) 17 17.0% 41.0% ##
+ [ 70, 91) 33 33.0% 74.0% ###
+ [ 91, 120) 26 26.0% 100.0% ###
+ [120, 160) 0 0.0% 100.0%
+ [160, 215) 0 0.0% 100.0%
+ [215, 289) 0 0.0% 100.0%
+ [289, 391) 0 0.0% 100.0%
+ [391, inf) 0 0.0% 100.0%
+Benchmark___10B_mux___1K_chunks__10KB-2 200 36049367 ns/op 0.00 MB/s
+--- Histogram (unit: ms)
+ Count: 200 Min: 2 Max: 77 Avg: 35.57
+ ------------------------------------------------------------
+ [ 2, 3) 9 4.5% 4.5%
+ [ 3, 4) 7 3.5% 8.0%
+ [ 4, 5) 3 1.5% 9.5%
+ [ 5, 7) 6 3.0% 12.5%
+ [ 7, 10) 6 3.0% 15.5%
+ [ 10, 14) 6 3.0% 18.5%
+ [ 14, 19) 9 4.5% 23.0%
+ [ 19, 26) 17 8.5% 31.5% #
+ [ 26, 36) 29 14.5% 46.0% #
+ [ 36, 49) 46 23.0% 69.0% ##
+ [ 49, 66) 54 27.0% 96.0% ###
+ [ 66, 89) 8 4.0% 100.0%
+ [ 89, 120) 0 0.0% 100.0%
+ [120, 162) 0 0.0% 100.0%
+ [162, 218) 0 0.0% 100.0%
+ [218, 292) 0 0.0% 100.0%
+ [292, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks___10B 500 17557482 ns/op 0.11 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 11 Max: 35 Avg: 17.08
+ ------------------------------------------------------------
+ [ 11, 12) 4 0.8% 0.8%
+ [ 12, 13) 3 0.6% 1.4%
+ [ 13, 14) 0 0.0% 1.4%
+ [ 14, 15) 98 19.6% 21.0% ##
+ [ 15, 17) 317 63.4% 84.4% ######
+ [ 17, 19) 11 2.2% 86.6%
+ [ 19, 22) 2 0.4% 87.0%
+ [ 22, 26) 4 0.8% 87.8%
+ [ 26, 31) 6 1.2% 89.0%
+ [ 31, 37) 55 11.0% 100.0% #
+ [ 37, 45) 0 0.0% 100.0%
+ [ 45, 55) 0 0.0% 100.0%
+ [ 55, 67) 0 0.0% 100.0%
+ [ 67, 82) 0 0.0% 100.0%
+ [ 82, 101) 0 0.0% 100.0%
+ [101, 125) 0 0.0% 100.0%
+ [125, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks___10B-2 2000 4004379 ns/op 0.50 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 21 Avg: 3.51
+ ------------------------------------------------------------
+ [ 1, 2) 40 2.0% 2.0%
+ [ 2, 3) 894 44.7% 46.7% ####
+ [ 3, 4) 504 25.2% 71.9% ###
+ [ 4, 5) 259 13.0% 84.9% #
+ [ 5, 7) 124 6.2% 91.1% #
+ [ 7, 9) 35 1.8% 92.8%
+ [ 9, 12) 88 4.4% 97.2%
+ [ 12, 16) 32 1.6% 98.8%
+ [ 16, 20) 16 0.8% 99.6%
+ [ 20, 26) 8 0.4% 100.0%
+ [ 26, 33) 0 0.0% 100.0%
+ [ 33, 41) 0 0.0% 100.0%
+ [ 41, 51) 0 0.0% 100.0%
+ [ 51, 64) 0 0.0% 100.0%
+ [ 64, 80) 0 0.0% 100.0%
+ [ 80, 99) 0 0.0% 100.0%
+ [ 99, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks__100B 500 18342199 ns/op 0.11 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 8 Max: 39 Avg: 17.82
+ ------------------------------------------------------------
+ [ 8, 9) 1 0.2% 0.2%
+ [ 9, 10) 0 0.0% 0.2%
+ [ 10, 11) 0 0.0% 0.2%
+ [ 11, 12) 0 0.0% 0.2%
+ [ 12, 14) 4 0.8% 1.0%
+ [ 14, 17) 411 82.2% 83.2% ########
+ [ 17, 20) 15 3.0% 86.2%
+ [ 20, 24) 3 0.6% 86.8%
+ [ 24, 30) 7 1.4% 88.2%
+ [ 30, 37) 41 8.2% 96.4% #
+ [ 37, 46) 18 3.6% 100.0%
+ [ 46, 58) 0 0.0% 100.0%
+ [ 58, 73) 0 0.0% 100.0%
+ [ 73, 92) 0 0.0% 100.0%
+ [ 92, 116) 0 0.0% 100.0%
+ [116, 146) 0 0.0% 100.0%
+ [146, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks__100B-2 2000 4158739 ns/op 0.48 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 24 Avg: 3.66
+ ------------------------------------------------------------
+ [ 1, 2) 27 1.4% 1.4%
+ [ 2, 3) 875 43.8% 45.1% ####
+ [ 3, 4) 522 26.1% 71.2% ###
+ [ 4, 5) 257 12.9% 84.1% #
+ [ 5, 7) 153 7.7% 91.7% #
+ [ 7, 9) 19 1.0% 92.7%
+ [ 9, 12) 51 2.6% 95.2%
+ [ 12, 16) 58 2.9% 98.1%
+ [ 16, 21) 20 1.0% 99.1%
+ [ 21, 27) 18 0.9% 100.0%
+ [ 27, 35) 0 0.0% 100.0%
+ [ 35, 44) 0 0.0% 100.0%
+ [ 44, 56) 0 0.0% 100.0%
+ [ 56, 71) 0 0.0% 100.0%
+ [ 71, 89) 0 0.0% 100.0%
+ [ 89, 111) 0 0.0% 100.0%
+ [111, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks___1KB 300 20721971 ns/op 0.10 MB/s
+--- Histogram (unit: ms)
+ Count: 300 Min: 13 Max: 42 Avg: 20.25
+ ------------------------------------------------------------
+ [ 13, 14) 1 0.3% 0.3%
+ [ 14, 15) 0 0.0% 0.3%
+ [ 15, 16) 0 0.0% 0.3%
+ [ 16, 17) 21 7.0% 7.3% #
+ [ 17, 19) 213 71.0% 78.3% #######
+ [ 19, 22) 15 5.0% 83.3% #
+ [ 22, 25) 3 1.0% 84.3%
+ [ 25, 29) 7 2.3% 86.7%
+ [ 29, 35) 8 2.7% 89.3%
+ [ 35, 42) 31 10.3% 99.7% #
+ [ 42, 51) 1 0.3% 100.0%
+ [ 51, 62) 0 0.0% 100.0%
+ [ 62, 76) 0 0.0% 100.0%
+ [ 76, 94) 0 0.0% 100.0%
+ [ 94, 117) 0 0.0% 100.0%
+ [117, 145) 0 0.0% 100.0%
+ [145, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks___1KB-2 2000 4859684 ns/op 0.41 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 27 Avg: 4.37
+ ------------------------------------------------------------
+ [ 1, 2) 16 0.8% 0.8%
+ [ 2, 3) 597 29.9% 30.7% ###
+ [ 3, 4) 522 26.1% 56.8% ###
+ [ 4, 5) 321 16.1% 72.8% ##
+ [ 5, 7) 331 16.6% 89.4% ##
+ [ 7, 9) 57 2.9% 92.2%
+ [ 9, 12) 30 1.5% 93.7%
+ [ 12, 16) 65 3.2% 97.0%
+ [ 16, 21) 25 1.2% 98.2%
+ [ 21, 28) 36 1.8% 100.0%
+ [ 28, 36) 0 0.0% 100.0%
+ [ 36, 46) 0 0.0% 100.0%
+ [ 46, 59) 0 0.0% 100.0%
+ [ 59, 75) 0 0.0% 100.0%
+ [ 75, 95) 0 0.0% 100.0%
+ [ 95, 120) 0 0.0% 100.0%
+ [120, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks__10KB 200 41306487 ns/op 0.05 MB/s
+--- Histogram (unit: ms)
+ Count: 200 Min: 14 Max: 62 Avg: 40.84
+ ------------------------------------------------------------
+ [ 14, 15) 1 0.5% 0.5%
+ [ 15, 16) 0 0.0% 0.5%
+ [ 16, 17) 1 0.5% 1.0%
+ [ 17, 19) 0 0.0% 1.0%
+ [ 19, 21) 0 0.0% 1.0%
+ [ 21, 24) 1 0.5% 1.5%
+ [ 24, 28) 0 0.0% 1.5%
+ [ 28, 34) 7 3.5% 5.0%
+ [ 34, 41) 123 61.5% 66.5% ######
+ [ 41, 51) 20 10.0% 76.5% #
+ [ 51, 64) 47 23.5% 100.0% ##
+ [ 64, 81) 0 0.0% 100.0%
+ [ 81, 103) 0 0.0% 100.0%
+ [103, 131) 0 0.0% 100.0%
+ [131, 168) 0 0.0% 100.0%
+ [168, 215) 0 0.0% 100.0%
+ [215, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux__100_chunks__10KB-2 500 13333632 ns/op 0.15 MB/s
+--- Histogram (unit: ms)
+ Count: 500 Min: 1 Max: 35 Avg: 12.88
+ ------------------------------------------------------------
+ [ 1, 2) 6 1.2% 1.2%
+ [ 2, 3) 85 17.0% 18.2% ##
+ [ 3, 4) 28 5.6% 23.8% #
+ [ 4, 6) 21 4.2% 28.0%
+ [ 6, 8) 24 4.8% 32.8%
+ [ 8, 11) 58 11.6% 44.4% #
+ [ 11, 15) 41 8.2% 52.6% #
+ [ 15, 20) 164 32.8% 85.4% ###
+ [ 20, 26) 11 2.2% 87.6%
+ [ 26, 34) 59 11.8% 99.4% #
+ [ 34, 44) 3 0.6% 100.0%
+ [ 44, 57) 0 0.0% 100.0%
+ [ 57, 73) 0 0.0% 100.0%
+ [ 73, 94) 0 0.0% 100.0%
+ [ 94, 120) 0 0.0% 100.0%
+ [120, 153) 0 0.0% 100.0%
+ [153, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks___10B 100 95489036 ns/op 0.02 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 6 Max: 149 Avg: 95.00
+ ------------------------------------------------------------
+ [ 6, 7) 1 1.0% 1.0%
+ [ 7, 8) 0 0.0% 1.0%
+ [ 8, 9) 0 0.0% 1.0%
+ [ 9, 11) 1 1.0% 2.0%
+ [ 11, 14) 0 0.0% 2.0%
+ [ 14, 19) 0 0.0% 2.0%
+ [ 19, 26) 0 0.0% 2.0%
+ [ 26, 36) 3 3.0% 5.0%
+ [ 36, 50) 8 8.0% 13.0% #
+ [ 50, 69) 10 10.0% 23.0% #
+ [ 69, 96) 21 21.0% 44.0% ##
+ [ 96, 134) 38 38.0% 82.0% ####
+ [134, 186) 18 18.0% 100.0% ##
+ [186, 259) 0 0.0% 100.0%
+ [259, 361) 0 0.0% 100.0%
+ [361, 504) 0 0.0% 100.0%
+ [504, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks___10B-2 2000 5187056 ns/op 0.39 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 49 Avg: 4.70
+ ------------------------------------------------------------
+ [ 1, 2) 19 1.0% 1.0%
+ [ 2, 3) 848 42.4% 43.4% ####
+ [ 3, 4) 637 31.9% 75.2% ###
+ [ 4, 6) 247 12.4% 87.6% #
+ [ 6, 8) 26 1.3% 88.9%
+ [ 8, 11) 19 1.0% 89.8%
+ [ 11, 15) 44 2.2% 92.0%
+ [ 15, 21) 40 2.0% 94.0%
+ [ 21, 28) 93 4.7% 98.7%
+ [ 28, 38) 24 1.2% 99.9%
+ [ 38, 51) 3 0.2% 100.0%
+ [ 51, 68) 0 0.0% 100.0%
+ [ 68, 90) 0 0.0% 100.0%
+ [ 90, 118) 0 0.0% 100.0%
+ [118, 155) 0 0.0% 100.0%
+ [155, 202) 0 0.0% 100.0%
+ [202, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks__100B 100 96851485 ns/op 0.02 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 16 Max: 168 Avg: 96.35
+ ------------------------------------------------------------
+ [ 16, 17) 1 1.0% 1.0%
+ [ 17, 18) 1 1.0% 2.0%
+ [ 18, 19) 0 0.0% 2.0%
+ [ 19, 21) 0 0.0% 2.0%
+ [ 21, 24) 0 0.0% 2.0%
+ [ 24, 29) 2 2.0% 4.0%
+ [ 29, 36) 2 2.0% 6.0%
+ [ 36, 46) 2 2.0% 8.0%
+ [ 46, 60) 7 7.0% 15.0% #
+ [ 60, 80) 16 16.0% 31.0% ##
+ [ 80, 108) 29 29.0% 60.0% ###
+ [108, 147) 36 36.0% 96.0% ####
+ [147, 202) 4 4.0% 100.0%
+ [202, 279) 0 0.0% 100.0%
+ [279, 387) 0 0.0% 100.0%
+ [387, 538) 0 0.0% 100.0%
+ [538, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks__100B-2 2000 5385397 ns/op 0.37 MB/s
+--- Histogram (unit: ms)
+ Count: 2000 Min: 1 Max: 56 Avg: 4.88
+ ------------------------------------------------------------
+ [ 1, 2) 22 1.1% 1.1%
+ [ 2, 3) 822 41.1% 42.2% ####
+ [ 3, 4) 657 32.9% 75.0% ###
+ [ 4, 6) 262 13.1% 88.2% #
+ [ 6, 8) 33 1.7% 89.8%
+ [ 8, 11) 15 0.8% 90.6%
+ [ 11, 15) 16 0.8% 91.4%
+ [ 15, 21) 40 2.0% 93.4%
+ [ 21, 29) 93 4.7% 98.0%
+ [ 29, 40) 29 1.5% 99.5%
+ [ 40, 54) 10 0.5% 100.0%
+ [ 54, 72) 1 0.1% 100.0%
+ [ 72, 96) 0 0.0% 100.0%
+ [ 96, 128) 0 0.0% 100.0%
+ [128, 170) 0 0.0% 100.0%
+ [170, 225) 0 0.0% 100.0%
+ [225, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks___1KB 100 110514593 ns/op 0.02 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 6 Max: 174 Avg: 110.01
+ ------------------------------------------------------------
+ [ 6, 7) 1 1.0% 1.0%
+ [ 7, 8) 0 0.0% 1.0%
+ [ 8, 9) 0 0.0% 1.0%
+ [ 9, 11) 0 0.0% 1.0%
+ [ 11, 14) 1 1.0% 2.0%
+ [ 14, 19) 1 1.0% 3.0%
+ [ 19, 26) 1 1.0% 4.0%
+ [ 26, 36) 3 3.0% 7.0%
+ [ 36, 51) 3 3.0% 10.0%
+ [ 51, 72) 13 13.0% 23.0% #
+ [ 72, 102) 23 23.0% 46.0% ##
+ [102, 144) 19 19.0% 65.0% ##
+ [144, 204) 35 35.0% 100.0% ####
+ [204, 288) 0 0.0% 100.0%
+ [288, 407) 0 0.0% 100.0%
+ [407, 575) 0 0.0% 100.0%
+ [575, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks___1KB-2 1000 7283778 ns/op 0.27 MB/s
+--- Histogram (unit: ms)
+ Count: 1000 Min: 1 Max: 44 Avg: 6.78
+ ------------------------------------------------------------
+ [ 1, 2) 23 2.3% 2.3%
+ [ 2, 3) 268 26.8% 29.1% ###
+ [ 3, 4) 295 29.5% 58.6% ###
+ [ 4, 6) 204 20.4% 79.0% ##
+ [ 6, 8) 51 5.1% 84.1% #
+ [ 8, 11) 13 1.3% 85.4%
+ [ 11, 15) 9 0.9% 86.3%
+ [ 15, 20) 29 2.9% 89.2%
+ [ 20, 27) 12 1.2% 90.4%
+ [ 27, 36) 80 8.0% 98.4% #
+ [ 36, 48) 16 1.6% 100.0%
+ [ 48, 63) 0 0.0% 100.0%
+ [ 63, 83) 0 0.0% 100.0%
+ [ 83, 109) 0 0.0% 100.0%
+ [109, 142) 0 0.0% 100.0%
+ [142, 185) 0 0.0% 100.0%
+ [185, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks__10KB 100 76530881 ns/op 0.03 MB/s
+--- Histogram (unit: ms)
+ Count: 100 Min: 17 Max: 137 Avg: 75.99
+ ------------------------------------------------------------
+ [ 17, 18) 6 6.0% 6.0% #
+ [ 18, 19) 0 0.0% 6.0%
+ [ 19, 20) 1 1.0% 7.0%
+ [ 20, 22) 2 2.0% 9.0%
+ [ 22, 25) 0 0.0% 9.0%
+ [ 25, 29) 0 0.0% 9.0%
+ [ 29, 35) 0 0.0% 9.0%
+ [ 35, 44) 5 5.0% 14.0% #
+ [ 44, 56) 10 10.0% 24.0% #
+ [ 56, 73) 20 20.0% 44.0% ##
+ [ 73, 97) 27 27.0% 71.0% ###
+ [ 97, 130) 28 28.0% 99.0% ###
+ [130, 176) 1 1.0% 100.0%
+ [176, 239) 0 0.0% 100.0%
+ [239, 326) 0 0.0% 100.0%
+ [326, 445) 0 0.0% 100.0%
+ [445, inf) 0 0.0% 100.0%
+Benchmark___1KB_mux___1K_chunks__10KB-2 200 38503258 ns/op 0.05 MB/s
+--- Histogram (unit: ms)
+ Count: 200 Min: 2 Max: 80 Avg: 38.01
+ ------------------------------------------------------------
+ [ 2, 3) 10 5.0% 5.0% #
+ [ 3, 4) 1 0.5% 5.5%
+ [ 4, 5) 2 1.0% 6.5%
+ [ 5, 7) 4 2.0% 8.5%
+ [ 7, 10) 5 2.5% 11.0%
+ [ 10, 14) 12 6.0% 17.0% #
+ [ 14, 19) 4 2.0% 19.0%
+ [ 19, 26) 12 6.0% 25.0% #
+ [ 26, 36) 36 18.0% 43.0% ##
+ [ 36, 49) 54 27.0% 70.0% ###
+ [ 49, 67) 47 23.5% 93.5% ##
+ [ 67, 91) 13 6.5% 100.0% #
+ [ 91, 123) 0 0.0% 100.0%
+ [123, 166) 0 0.0% 100.0%
+ [166, 224) 0 0.0% 100.0%
+ [224, 302) 0 0.0% 100.0%
+ [302, inf) 0 0.0% 100.0%
diff --git a/profiles/internal/ipc/benchmark/benchmark.vdl b/profiles/internal/ipc/benchmark/benchmark.vdl
new file mode 100644
index 0000000..3e9f0ea
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/benchmark.vdl
@@ -0,0 +1,14 @@
+// package benchmark provides simple tools to measure the performance of the
+// IPC system.
+package benchmark
+
+import (
+ "v.io/v23/services/security/access"
+)
+
+type Benchmark interface {
+ // Echo returns the payload that it receives.
+ Echo(Payload []byte) ([]byte | error) {access.Read}
+ // EchoStream returns the payload that it receives via the stream.
+ EchoStream() stream<[]byte,[]byte> error {access.Read}
+}
diff --git a/profiles/internal/ipc/benchmark/benchmark.vdl.go b/profiles/internal/ipc/benchmark/benchmark.vdl.go
new file mode 100644
index 0000000..69c808b
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/benchmark.vdl.go
@@ -0,0 +1,352 @@
+// This file was auto-generated by the veyron vdl tool.
+// Source: benchmark.vdl
+
+// package benchmark provides simple tools to measure the performance of the
+// IPC system.
+package benchmark
+
+import (
+ // VDL system imports
+ "io"
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/vdl"
+
+ // VDL user imports
+ "v.io/v23/services/security/access"
+)
+
+// BenchmarkClientMethods is the client interface
+// containing Benchmark methods.
+type BenchmarkClientMethods interface {
+ // Echo returns the payload that it receives.
+ Echo(ctx *context.T, Payload []byte, opts ...ipc.CallOpt) ([]byte, error)
+ // EchoStream returns the payload that it receives via the stream.
+ EchoStream(*context.T, ...ipc.CallOpt) (BenchmarkEchoStreamClientCall, error)
+}
+
+// BenchmarkClientStub adds universal methods to BenchmarkClientMethods.
+type BenchmarkClientStub interface {
+ BenchmarkClientMethods
+ ipc.UniversalServiceMethods
+}
+
+// BenchmarkClient returns a client stub for Benchmark.
+func BenchmarkClient(name string, opts ...ipc.BindOpt) BenchmarkClientStub {
+ var client ipc.Client
+ for _, opt := range opts {
+ if clientOpt, ok := opt.(ipc.Client); ok {
+ client = clientOpt
+ }
+ }
+ return implBenchmarkClientStub{name, client}
+}
+
+type implBenchmarkClientStub struct {
+ name string
+ client ipc.Client
+}
+
+func (c implBenchmarkClientStub) c(ctx *context.T) ipc.Client {
+ if c.client != nil {
+ return c.client
+ }
+ return v23.GetClient(ctx)
+}
+
+func (c implBenchmarkClientStub) Echo(ctx *context.T, i0 []byte, opts ...ipc.CallOpt) (o0 []byte, err error) {
+ var call ipc.ClientCall
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "Echo", []interface{}{i0}, opts...); err != nil {
+ return
+ }
+ err = call.Finish(&o0)
+ return
+}
+
+func (c implBenchmarkClientStub) EchoStream(ctx *context.T, opts ...ipc.CallOpt) (ocall BenchmarkEchoStreamClientCall, err error) {
+ var call ipc.ClientCall
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "EchoStream", nil, opts...); err != nil {
+ return
+ }
+ ocall = &implBenchmarkEchoStreamClientCall{ClientCall: call}
+ return
+}
+
+// BenchmarkEchoStreamClientStream is the client stream for Benchmark.EchoStream.
+type BenchmarkEchoStreamClientStream interface {
+ // RecvStream returns the receiver side of the Benchmark.EchoStream client stream.
+ RecvStream() interface {
+ // Advance stages an item so that it may be retrieved via Value. Returns
+ // true iff there is an item to retrieve. Advance must be called before
+ // Value is called. May block if an item is not available.
+ Advance() bool
+ // Value returns the item that was staged by Advance. May panic if Advance
+ // returned false or was not called. Never blocks.
+ Value() []byte
+ // Err returns any error encountered by Advance. Never blocks.
+ Err() error
+ }
+ // SendStream returns the send side of the Benchmark.EchoStream client stream.
+ SendStream() interface {
+ // Send places the item onto the output stream. Returns errors
+ // encountered while sending, or if Send is called after Close or
+ // the stream has been canceled. Blocks if there is no buffer
+ // space; will unblock when buffer space is available or after
+ // the stream has been canceled.
+ Send(item []byte) error
+ // Close indicates to the server that no more items will be sent;
+ // server Recv calls will receive io.EOF after all sent items.
+ // This is an optional call - e.g. a client might call Close if it
+ // needs to continue receiving items from the server after it's
+ // done sending. Returns errors encountered while closing, or if
+ // Close is called after the stream has been canceled. Like Send,
+ // blocks if there is no buffer space available.
+ Close() error
+ }
+}
+
+// BenchmarkEchoStreamClientCall represents the call returned from Benchmark.EchoStream.
+type BenchmarkEchoStreamClientCall interface {
+ BenchmarkEchoStreamClientStream
+ // Finish performs the equivalent of SendStream().Close, then blocks until
+ // the server is done, and returns the positional return values for the call.
+ //
+ // Finish returns immediately if the call has been canceled; depending on the
+ // timing the output could either be an error signaling cancelation, or the
+ // valid positional return values from the server.
+ //
+ // Calling Finish is mandatory for releasing stream resources, unless the call
+ // has been canceled or any of the other methods return an error. Finish should
+ // be called at most once.
+ Finish() error
+}
+
+type implBenchmarkEchoStreamClientCall struct {
+ ipc.ClientCall
+ valRecv []byte
+ errRecv error
+}
+
+func (c *implBenchmarkEchoStreamClientCall) RecvStream() interface {
+ Advance() bool
+ Value() []byte
+ Err() error
+} {
+ return implBenchmarkEchoStreamCallRecv{c}
+}
+
+type implBenchmarkEchoStreamCallRecv struct {
+ c *implBenchmarkEchoStreamClientCall
+}
+
+func (c implBenchmarkEchoStreamCallRecv) Advance() bool {
+ c.c.errRecv = c.c.Recv(&c.c.valRecv)
+ return c.c.errRecv == nil
+}
+func (c implBenchmarkEchoStreamCallRecv) Value() []byte {
+ return c.c.valRecv
+}
+func (c implBenchmarkEchoStreamCallRecv) Err() error {
+ if c.c.errRecv == io.EOF {
+ return nil
+ }
+ return c.c.errRecv
+}
+func (c *implBenchmarkEchoStreamClientCall) SendStream() interface {
+ Send(item []byte) error
+ Close() error
+} {
+ return implBenchmarkEchoStreamCallSend{c}
+}
+
+type implBenchmarkEchoStreamCallSend struct {
+ c *implBenchmarkEchoStreamClientCall
+}
+
+func (c implBenchmarkEchoStreamCallSend) Send(item []byte) error {
+ return c.c.Send(item)
+}
+func (c implBenchmarkEchoStreamCallSend) Close() error {
+ return c.c.CloseSend()
+}
+func (c *implBenchmarkEchoStreamClientCall) Finish() (err error) {
+ err = c.ClientCall.Finish()
+ return
+}
+
+// BenchmarkServerMethods is the interface a server writer
+// implements for Benchmark.
+type BenchmarkServerMethods interface {
+ // Echo returns the payload that it receives.
+ Echo(ctx ipc.ServerCall, Payload []byte) ([]byte, error)
+ // EchoStream returns the payload that it receives via the stream.
+ EchoStream(BenchmarkEchoStreamContext) error
+}
+
+// BenchmarkServerStubMethods is the server interface containing
+// Benchmark methods, as expected by ipc.Server.
+// The only difference between this interface and BenchmarkServerMethods
+// is the streaming methods.
+type BenchmarkServerStubMethods interface {
+ // Echo returns the payload that it receives.
+ Echo(ctx ipc.ServerCall, Payload []byte) ([]byte, error)
+ // EchoStream returns the payload that it receives via the stream.
+ EchoStream(*BenchmarkEchoStreamContextStub) error
+}
+
+// BenchmarkServerStub adds universal methods to BenchmarkServerStubMethods.
+type BenchmarkServerStub interface {
+ BenchmarkServerStubMethods
+ // Describe the Benchmark interfaces.
+ Describe__() []ipc.InterfaceDesc
+}
+
+// BenchmarkServer returns a server stub for Benchmark.
+// It converts an implementation of BenchmarkServerMethods into
+// an object that may be used by ipc.Server.
+func BenchmarkServer(impl BenchmarkServerMethods) BenchmarkServerStub {
+ stub := implBenchmarkServerStub{
+ impl: impl,
+ }
+ // Initialize GlobState; always check the stub itself first, to handle the
+ // case where the user has the Glob method defined in their VDL source.
+ if gs := ipc.NewGlobState(stub); gs != nil {
+ stub.gs = gs
+ } else if gs := ipc.NewGlobState(impl); gs != nil {
+ stub.gs = gs
+ }
+ return stub
+}
+
+type implBenchmarkServerStub struct {
+ impl BenchmarkServerMethods
+ gs *ipc.GlobState
+}
+
+func (s implBenchmarkServerStub) Echo(ctx ipc.ServerCall, i0 []byte) ([]byte, error) {
+ return s.impl.Echo(ctx, i0)
+}
+
+func (s implBenchmarkServerStub) EchoStream(ctx *BenchmarkEchoStreamContextStub) error {
+ return s.impl.EchoStream(ctx)
+}
+
+func (s implBenchmarkServerStub) Globber() *ipc.GlobState {
+ return s.gs
+}
+
+func (s implBenchmarkServerStub) Describe__() []ipc.InterfaceDesc {
+ return []ipc.InterfaceDesc{BenchmarkDesc}
+}
+
+// BenchmarkDesc describes the Benchmark interface.
+var BenchmarkDesc ipc.InterfaceDesc = descBenchmark
+
+// descBenchmark hides the desc to keep godoc clean.
+var descBenchmark = ipc.InterfaceDesc{
+ Name: "Benchmark",
+ PkgPath: "v.io/x/ref/profiles/internal/ipc/benchmark",
+ Methods: []ipc.MethodDesc{
+ {
+ Name: "Echo",
+ Doc: "// Echo returns the payload that it receives.",
+ InArgs: []ipc.ArgDesc{
+ {"Payload", ``}, // []byte
+ },
+ OutArgs: []ipc.ArgDesc{
+ {"", ``}, // []byte
+ },
+ Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+ },
+ {
+ Name: "EchoStream",
+ Doc: "// EchoStream returns the payload that it receives via the stream.",
+ Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+ },
+ },
+}
+
+// BenchmarkEchoStreamServerStream is the server stream for Benchmark.EchoStream.
+type BenchmarkEchoStreamServerStream interface {
+ // RecvStream returns the receiver side of the Benchmark.EchoStream server stream.
+ RecvStream() interface {
+ // Advance stages an item so that it may be retrieved via Value. Returns
+ // true iff there is an item to retrieve. Advance must be called before
+ // Value is called. May block if an item is not available.
+ Advance() bool
+ // Value returns the item that was staged by Advance. May panic if Advance
+ // returned false or was not called. Never blocks.
+ Value() []byte
+ // Err returns any error encountered by Advance. Never blocks.
+ Err() error
+ }
+ // SendStream returns the send side of the Benchmark.EchoStream server stream.
+ SendStream() interface {
+ // Send places the item onto the output stream. Returns errors encountered
+ // while sending. Blocks if there is no buffer space; will unblock when
+ // buffer space is available.
+ Send(item []byte) error
+ }
+}
+
+// BenchmarkEchoStreamContext represents the context passed to Benchmark.EchoStream.
+type BenchmarkEchoStreamContext interface {
+ ipc.ServerCall
+ BenchmarkEchoStreamServerStream
+}
+
+// BenchmarkEchoStreamContextStub is a wrapper that converts ipc.StreamServerCall into
+// a typesafe stub that implements BenchmarkEchoStreamContext.
+type BenchmarkEchoStreamContextStub struct {
+ ipc.StreamServerCall
+ valRecv []byte
+ errRecv error
+}
+
+// Init initializes BenchmarkEchoStreamContextStub from ipc.StreamServerCall.
+func (s *BenchmarkEchoStreamContextStub) Init(call ipc.StreamServerCall) {
+ s.StreamServerCall = call
+}
+
+// RecvStream returns the receiver side of the Benchmark.EchoStream server stream.
+func (s *BenchmarkEchoStreamContextStub) RecvStream() interface {
+ Advance() bool
+ Value() []byte
+ Err() error
+} {
+ return implBenchmarkEchoStreamContextRecv{s}
+}
+
+type implBenchmarkEchoStreamContextRecv struct {
+ s *BenchmarkEchoStreamContextStub
+}
+
+func (s implBenchmarkEchoStreamContextRecv) Advance() bool {
+ s.s.errRecv = s.s.Recv(&s.s.valRecv)
+ return s.s.errRecv == nil
+}
+func (s implBenchmarkEchoStreamContextRecv) Value() []byte {
+ return s.s.valRecv
+}
+func (s implBenchmarkEchoStreamContextRecv) Err() error {
+ if s.s.errRecv == io.EOF {
+ return nil
+ }
+ return s.s.errRecv
+}
+
+// SendStream returns the send side of the Benchmark.EchoStream server stream.
+func (s *BenchmarkEchoStreamContextStub) SendStream() interface {
+ Send(item []byte) error
+} {
+ return implBenchmarkEchoStreamContextSend{s}
+}
+
+type implBenchmarkEchoStreamContextSend struct {
+ s *BenchmarkEchoStreamContextStub
+}
+
+func (s implBenchmarkEchoStreamContextSend) Send(item []byte) error {
+ return s.s.Send(item)
+}
diff --git a/profiles/internal/ipc/benchmark/benchmark/main.go b/profiles/internal/ipc/benchmark/benchmark/main.go
new file mode 100644
index 0000000..71190f5
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/benchmark/main.go
@@ -0,0 +1,55 @@
+// A simple command-line tool to run the benchmark client.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ tbm "v.io/x/ref/lib/testutil/benchmark"
+ _ "v.io/x/ref/profiles"
+ "v.io/x/ref/profiles/internal/ipc/benchmark/internal"
+
+ "v.io/v23"
+ "v.io/x/lib/vlog"
+)
+
+var (
+ server = flag.String("server", "", "address of the server to connect to")
+
+ iterations = flag.Int("iterations", 100, "number of iterations to run")
+
+ chunkCnt = flag.Int("chunk_count", 0, "number of chunks to send per streaming RPC (if zero, use non-streaming RPC)")
+ payloadSize = flag.Int("payload_size", 0, "size of payload in bytes")
+ chunkCntMux = flag.Int("mux_chunk_count", 0, "number of chunks to send in background")
+ payloadSizeMux = flag.Int("mux_payload_size", 0, "size of payload to send in background")
+)
+
+func main() {
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ if *chunkCntMux > 0 && *payloadSizeMux > 0 {
+ dummyB := testing.B{}
+ _, stop := internal.StartEchoStream(&dummyB, ctx, *server, 0, *chunkCntMux, *payloadSizeMux, nil)
+ defer stop()
+ vlog.Infof("Started background streaming (chunk_size=%d, payload_size=%d)", *chunkCntMux, *payloadSizeMux)
+ }
+
+ dummyB := testing.B{}
+ stats := tbm.NewStats(16)
+
+ now := time.Now()
+ if *chunkCnt == 0 {
+ internal.CallEcho(&dummyB, ctx, *server, *iterations, *payloadSize, stats)
+ } else {
+ internal.CallEchoStream(&dummyB, ctx, *server, *iterations, *chunkCnt, *payloadSize, stats)
+ }
+ elapsed := time.Since(now)
+
+ fmt.Printf("iterations: %d chunk_count: %d payload_size: %d\n", *iterations, *chunkCnt, *payloadSize)
+ fmt.Printf("elapsed time: %v\n", elapsed)
+ stats.Print(os.Stdout)
+}
diff --git a/profiles/internal/ipc/benchmark/benchmark_test.go b/profiles/internal/ipc/benchmark/benchmark_test.go
new file mode 100644
index 0000000..f788349
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/benchmark_test.go
@@ -0,0 +1,122 @@
+package benchmark_test
+
+import (
+ "os"
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+
+ "v.io/x/ref/lib/testutil"
+ "v.io/x/ref/lib/testutil/benchmark"
+ "v.io/x/ref/profiles/internal/ipc/benchmark/internal"
+ _ "v.io/x/ref/profiles/static"
+)
+
+var (
+ serverAddr string
+ ctx *context.T
+)
+
+// Benchmarks for non-streaming RPC.
+func runEcho(b *testing.B, payloadSize int) {
+ internal.CallEcho(b, ctx, serverAddr, b.N, payloadSize, benchmark.AddStats(b, 16))
+}
+
+func Benchmark____1B(b *testing.B) { runEcho(b, 1) }
+func Benchmark___10B(b *testing.B) { runEcho(b, 10) }
+func Benchmark__100B(b *testing.B) { runEcho(b, 100) }
+func Benchmark___1KB(b *testing.B) { runEcho(b, 1000) }
+func Benchmark__10KB(b *testing.B) { runEcho(b, 10000) }
+func Benchmark_100KB(b *testing.B) { runEcho(b, 100000) }
+
+// Benchmarks for streaming RPC.
+func runEchoStream(b *testing.B, chunkCnt, payloadSize int) {
+ internal.CallEchoStream(b, ctx, serverAddr, b.N, chunkCnt, payloadSize, benchmark.AddStats(b, 16))
+}
+
+func Benchmark____1_chunk_____1B(b *testing.B) { runEchoStream(b, 1, 1) }
+func Benchmark____1_chunk____10B(b *testing.B) { runEchoStream(b, 1, 10) }
+func Benchmark____1_chunk___100B(b *testing.B) { runEchoStream(b, 1, 100) }
+func Benchmark____1_chunk____1KB(b *testing.B) { runEchoStream(b, 1, 1000) }
+func Benchmark____1_chunk___10KB(b *testing.B) { runEchoStream(b, 1, 10000) }
+func Benchmark____1_chunk__100KB(b *testing.B) { runEchoStream(b, 1, 100000) }
+func Benchmark___10_chunk_____1B(b *testing.B) { runEchoStream(b, 10, 1) }
+func Benchmark___10_chunk____10B(b *testing.B) { runEchoStream(b, 10, 10) }
+func Benchmark___10_chunk___100B(b *testing.B) { runEchoStream(b, 10, 100) }
+func Benchmark___10_chunk____1KB(b *testing.B) { runEchoStream(b, 10, 1000) }
+func Benchmark___10_chunk___10KB(b *testing.B) { runEchoStream(b, 10, 10000) }
+func Benchmark___10_chunk__100KB(b *testing.B) { runEchoStream(b, 10, 100000) }
+func Benchmark__100_chunk_____1B(b *testing.B) { runEchoStream(b, 100, 1) }
+func Benchmark__100_chunk____10B(b *testing.B) { runEchoStream(b, 100, 10) }
+func Benchmark__100_chunk___100B(b *testing.B) { runEchoStream(b, 100, 100) }
+func Benchmark__100_chunk____1KB(b *testing.B) { runEchoStream(b, 100, 1000) }
+func Benchmark__100_chunk___10KB(b *testing.B) { runEchoStream(b, 100, 10000) }
+func Benchmark__100_chunk__100KB(b *testing.B) { runEchoStream(b, 100, 100000) }
+func Benchmark___1K_chunk_____1B(b *testing.B) { runEchoStream(b, 1000, 1) }
+func Benchmark___1K_chunk____10B(b *testing.B) { runEchoStream(b, 1000, 10) }
+func Benchmark___1K_chunk___100B(b *testing.B) { runEchoStream(b, 1000, 100) }
+func Benchmark___1K_chunk____1KB(b *testing.B) { runEchoStream(b, 1000, 1000) }
+func Benchmark___1K_chunk___10KB(b *testing.B) { runEchoStream(b, 1000, 10000) }
+func Benchmark___1K_chunk__100KB(b *testing.B) { runEchoStream(b, 1000, 100000) }
+
+// Benchmarks for per-chunk throughput in streaming RPC.
+func runPerChunk(b *testing.B, payloadSize int) {
+ internal.CallEchoStream(b, ctx, serverAddr, 1, b.N, payloadSize, benchmark.NewStats(1))
+}
+
+func Benchmark__per_chunk____1B(b *testing.B) { runPerChunk(b, 1) }
+func Benchmark__per_chunk___10B(b *testing.B) { runPerChunk(b, 10) }
+func Benchmark__per_chunk__100B(b *testing.B) { runPerChunk(b, 100) }
+func Benchmark__per_chunk___1KB(b *testing.B) { runPerChunk(b, 1000) }
+func Benchmark__per_chunk__10KB(b *testing.B) { runPerChunk(b, 10000) }
+func Benchmark__per_chunk_100KB(b *testing.B) { runPerChunk(b, 100000) }
+
+// Benchmarks for non-streaming RPC while running streaming RPC in background.
+func runMux(b *testing.B, payloadSize, chunkCntB, payloadSizeB int) {
+ _, stop := internal.StartEchoStream(&testing.B{}, ctx, serverAddr, 0, chunkCntB, payloadSizeB, benchmark.NewStats(1))
+ internal.CallEcho(b, ctx, serverAddr, b.N, payloadSize, benchmark.AddStats(b, 16))
+ stop()
+}
+
+func Benchmark___10B_mux__100_chunks___10B(b *testing.B) { runMux(b, 10, 100, 10) }
+func Benchmark___10B_mux__100_chunks__100B(b *testing.B) { runMux(b, 10, 100, 100) }
+func Benchmark___10B_mux__100_chunks___1KB(b *testing.B) { runMux(b, 10, 100, 1000) }
+func Benchmark___10B_mux__100_chunks__10KB(b *testing.B) { runMux(b, 10, 100, 10000) }
+func Benchmark___10B_mux___1K_chunks___10B(b *testing.B) { runMux(b, 10, 1000, 10) }
+func Benchmark___10B_mux___1K_chunks__100B(b *testing.B) { runMux(b, 10, 1000, 100) }
+func Benchmark___10B_mux___1K_chunks___1KB(b *testing.B) { runMux(b, 10, 1000, 1000) }
+func Benchmark___10B_mux___1K_chunks__10KB(b *testing.B) { runMux(b, 10, 1000, 10000) }
+func Benchmark___1KB_mux__100_chunks___10B(b *testing.B) { runMux(b, 1000, 100, 10) }
+func Benchmark___1KB_mux__100_chunks__100B(b *testing.B) { runMux(b, 1000, 100, 100) }
+func Benchmark___1KB_mux__100_chunks___1KB(b *testing.B) { runMux(b, 1000, 100, 1000) }
+func Benchmark___1KB_mux__100_chunks__10KB(b *testing.B) { runMux(b, 1000, 100, 10000) }
+func Benchmark___1KB_mux___1K_chunks___10B(b *testing.B) { runMux(b, 1000, 1000, 10) }
+func Benchmark___1KB_mux___1K_chunks__100B(b *testing.B) { runMux(b, 1000, 1000, 100) }
+func Benchmark___1KB_mux___1K_chunks___1KB(b *testing.B) { runMux(b, 1000, 1000, 1000) }
+func Benchmark___1KB_mux___1K_chunks__10KB(b *testing.B) { runMux(b, 1000, 1000, 10000) }
+
+// A single empty test to avoid:
+// testing: warning: no tests to run
+// from showing up when running benchmarks in this package via "go test"
+func TestNoOp(t *testing.T) {}
+
+func TestMain(m *testing.M) {
+ // We do not use defer here since this program will exit at the end of
+ // this function through os.Exit().
+ var shutdown v23.Shutdown
+ ctx, shutdown = testutil.InitForTest()
+
+ var serverStop func()
+ serverAddr, serverStop = internal.StartServer(ctx, v23.GetListenSpec(ctx))
+
+ // Create a VC to exclude the VC setup time from the benchmark.
+ internal.CallEcho(&testing.B{}, ctx, serverAddr, 1, 0, benchmark.NewStats(1))
+
+ r := benchmark.RunTestMain(m)
+
+ serverStop()
+ shutdown()
+
+ os.Exit(r)
+}
diff --git a/profiles/internal/ipc/benchmark/benchmarkd/main.go b/profiles/internal/ipc/benchmark/benchmarkd/main.go
new file mode 100644
index 0000000..b68e17b
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/benchmarkd/main.go
@@ -0,0 +1,21 @@
+// A simple command-line tool to run the benchmark server.
+package main
+
+import (
+ "v.io/v23"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/signals"
+ "v.io/x/ref/profiles/internal/ipc/benchmark/internal"
+ _ "v.io/x/ref/profiles/roaming"
+)
+
+func main() {
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ addr, stop := internal.StartServer(ctx, v23.GetListenSpec(ctx))
+ vlog.Infof("Listening on %s", addr)
+ defer stop()
+ <-signals.ShutdownOnSignals(ctx)
+}
diff --git a/profiles/internal/ipc/benchmark/glob/README.txt b/profiles/internal/ipc/benchmark/glob/README.txt
new file mode 100644
index 0000000..0103cd2
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/glob/README.txt
@@ -0,0 +1,120 @@
+Glob Benchmarks
+
+The benchmarks in this directory attempt to provide some guidance for the amount
+of buffering to use with the channels returned by Glob__ and GlobChildren__.
+
+The first set of benchmarks (BenchmarkChanN) shows the relationship between the
+buffer size and the latency of a very simple channel with one writer and one
+reader doing nothing else.
+
+The second set of benchmarks (BenchmarkGlobN) does the same thing but with a
+Glob__ server and a Glob client. The third set (BenchmarkGlobChildrenN) uses
+GlobChildren__.
+
+As of 2014-12-03, the conclusion is that the queue size has very little impact
+on performance.
+
+The BenchmarkChanN set shows that increasing the queue size improves latency for
+the very simple case, but not for Glob__ or GlobChildren__.
+
+An interesting observation is that all the benchmarks get slower as the number
+of cpus increases.
+
+Below are the test results for 1, 2, and 4 cpus on a HP Z420 workstation with
+2 × 6 cpu cores (Intel(R) Xeon(R) CPU E5-1650 v2 @ 3.50GHz).
+
+$ ./glob.test -test.bench=. -test.benchtime=5s -test.cpu=1
+BenchmarkChan0 20000000 464 ns/op
+BenchmarkChan1 20000000 585 ns/op
+BenchmarkChan2 20000000 484 ns/op
+BenchmarkChan4 20000000 425 ns/op
+BenchmarkChan8 50000000 396 ns/op
+BenchmarkChan16 50000000 381 ns/op
+BenchmarkChan32 50000000 371 ns/op
+BenchmarkChan64 50000000 365 ns/op
+BenchmarkChan128 50000000 363 ns/op
+BenchmarkChan256 50000000 362 ns/op
+BenchmarkGlob0 500000 35029 ns/op
+BenchmarkGlob1 500000 63536 ns/op
+BenchmarkGlob2 500000 34753 ns/op
+BenchmarkGlob4 500000 26379 ns/op
+BenchmarkGlob8 500000 19293 ns/op
+BenchmarkGlob16 1000000 18149 ns/op
+BenchmarkGlob32 500000 52364 ns/op
+BenchmarkGlob64 500000 83879 ns/op
+BenchmarkGlob128 100000 88448 ns/op
+BenchmarkGlob256 100000 57922 ns/op
+BenchmarkGlobChildren0 100000 118448 ns/op
+BenchmarkGlobChildren1 100000 123274 ns/op
+BenchmarkGlobChildren2 100000 116110 ns/op
+BenchmarkGlobChildren4 100000 134175 ns/op
+BenchmarkGlobChildren8 100000 118776 ns/op
+BenchmarkGlobChildren16 100000 123191 ns/op
+BenchmarkGlobChildren32 100000 132195 ns/op
+BenchmarkGlobChildren64 100000 126004 ns/op
+BenchmarkGlobChildren128 100000 135072 ns/op
+BenchmarkGlobChildren256 100000 127399 ns/op
+
+$ ./glob.test -test.bench=. -test.benchtime=5s -test.cpu=2
+BenchmarkChan0-2 5000000 1595 ns/op
+BenchmarkChan1-2 5000000 1649 ns/op
+BenchmarkChan2-2 10000000 1245 ns/op
+BenchmarkChan4-2 10000000 1299 ns/op
+BenchmarkChan8-2 10000000 982 ns/op
+BenchmarkChan16-2 10000000 929 ns/op
+BenchmarkChan32-2 10000000 916 ns/op
+BenchmarkChan64-2 10000000 903 ns/op
+BenchmarkChan128-2 10000000 907 ns/op
+BenchmarkChan256-2 10000000 914 ns/op
+BenchmarkGlob0-2 500000 61455 ns/op
+BenchmarkGlob1-2 500000 46890 ns/op
+BenchmarkGlob2-2 200000 56462 ns/op
+BenchmarkGlob4-2 500000 22783 ns/op
+BenchmarkGlob8-2 200000 64783 ns/op
+BenchmarkGlob16-2 1000000 68119 ns/op
+BenchmarkGlob32-2 200000 78611 ns/op
+BenchmarkGlob64-2 500000 82180 ns/op
+BenchmarkGlob128-2 1000000 81548 ns/op
+BenchmarkGlob256-2 100000 88278 ns/op
+BenchmarkGlobChildren0-2 100000 83188 ns/op
+BenchmarkGlobChildren1-2 100000 81751 ns/op
+BenchmarkGlobChildren2-2 100000 81896 ns/op
+BenchmarkGlobChildren4-2 100000 81857 ns/op
+BenchmarkGlobChildren8-2 100000 81531 ns/op
+BenchmarkGlobChildren16-2 100000 89915 ns/op
+BenchmarkGlobChildren32-2 100000 81112 ns/op
+BenchmarkGlobChildren64-2 100000 80997 ns/op
+BenchmarkGlobChildren128-2 100000 81350 ns/op
+BenchmarkGlobChildren256-2 100000 81344 ns/op
+
+$ ./glob.test -test.bench=. -test.benchtime=5s -test.cpu=4
+BenchmarkChan0-4 5000000 2012 ns/op
+BenchmarkChan1-4 5000000 3149 ns/op
+BenchmarkChan2-4 5000000 1839 ns/op
+BenchmarkChan4-4 10000000 957 ns/op
+BenchmarkChan8-4 20000000 660 ns/op
+BenchmarkChan16-4 20000000 523 ns/op
+BenchmarkChan32-4 20000000 507 ns/op
+BenchmarkChan64-4 20000000 509 ns/op
+BenchmarkChan128-4 20000000 507 ns/op
+BenchmarkChan256-4 20000000 511 ns/op
+BenchmarkGlob0-4 100000 103269 ns/op
+BenchmarkGlob1-4 100000 101222 ns/op
+BenchmarkGlob2-4 100000 102049 ns/op
+BenchmarkGlob4-4 100000 102763 ns/op
+BenchmarkGlob8-4 100000 101939 ns/op
+BenchmarkGlob16-4 100000 102989 ns/op
+BenchmarkGlob32-4 100000 103898 ns/op
+BenchmarkGlob64-4 100000 102838 ns/op
+BenchmarkGlob128-4 100000 101532 ns/op
+BenchmarkGlob256-4 100000 101059 ns/op
+BenchmarkGlobChildren0-4 100000 106617 ns/op
+BenchmarkGlobChildren1-4 100000 102576 ns/op
+BenchmarkGlobChildren2-4 100000 106313 ns/op
+BenchmarkGlobChildren4-4 100000 102774 ns/op
+BenchmarkGlobChildren8-4 100000 102886 ns/op
+BenchmarkGlobChildren16-4 100000 106771 ns/op
+BenchmarkGlobChildren32-4 100000 103309 ns/op
+BenchmarkGlobChildren64-4 100000 105112 ns/op
+BenchmarkGlobChildren128-4 100000 102295 ns/op
+BenchmarkGlobChildren256-4 100000 102951 ns/op
diff --git a/profiles/internal/ipc/benchmark/glob/doc.go b/profiles/internal/ipc/benchmark/glob/doc.go
new file mode 100644
index 0000000..62e4116
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/glob/doc.go
@@ -0,0 +1,4 @@
+package glob
+
+// This file exists only to prevent build failures from having a test-only
+// package.
diff --git a/profiles/internal/ipc/benchmark/glob/glob_test.go b/profiles/internal/ipc/benchmark/glob/glob_test.go
new file mode 100644
index 0000000..1889c0c
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/glob/glob_test.go
@@ -0,0 +1,254 @@
+package glob_test
+
+import (
+ "fmt"
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+)
+
+func TestNothing(t *testing.T) {
+}
+
+func RunBenchmarkChan(b *testing.B, bufferSize int) {
+ ch := make(chan string, bufferSize)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ ch <- fmt.Sprintf("%09d", i)
+ }
+ close(ch)
+ }()
+ for _ = range ch {
+ continue
+ }
+}
+
+func BenchmarkChan0(b *testing.B) {
+ RunBenchmarkChan(b, 0)
+}
+
+func BenchmarkChan1(b *testing.B) {
+ RunBenchmarkChan(b, 1)
+}
+
+func BenchmarkChan2(b *testing.B) {
+ RunBenchmarkChan(b, 2)
+}
+
+func BenchmarkChan4(b *testing.B) {
+ RunBenchmarkChan(b, 4)
+}
+
+func BenchmarkChan8(b *testing.B) {
+ RunBenchmarkChan(b, 8)
+}
+
+func BenchmarkChan16(b *testing.B) {
+ RunBenchmarkChan(b, 16)
+}
+
+func BenchmarkChan32(b *testing.B) {
+ RunBenchmarkChan(b, 32)
+}
+
+func BenchmarkChan64(b *testing.B) {
+ RunBenchmarkChan(b, 64)
+}
+
+func BenchmarkChan128(b *testing.B) {
+ RunBenchmarkChan(b, 128)
+}
+
+func BenchmarkChan256(b *testing.B) {
+ RunBenchmarkChan(b, 256)
+}
+
+type disp struct {
+ obj interface{}
+}
+
+func (d *disp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ return d.obj, nil, nil
+}
+
+func startServer(b *testing.B, ctx *context.T, obj interface{}) (string, func(), error) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to start server: %v", err)
+ }
+ endpoints, err := server.Listen(v23.GetListenSpec(ctx))
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to listen: %v", err)
+ }
+ if err := server.ServeDispatcher("", &disp{obj}); err != nil {
+ return "", nil, err
+ }
+ return endpoints[0].Name(), func() { server.Stop() }, nil
+}
+
+type globObject struct {
+ b *testing.B
+ bufferSize int
+}
+
+func (o *globObject) Glob__(ctx ipc.ServerCall, pattern string) (<-chan naming.VDLGlobReply, error) {
+ if pattern != "*" {
+ panic("this benchmark only works with pattern='*'")
+ }
+ ch := make(chan naming.VDLGlobReply, o.bufferSize)
+ go func() {
+ for i := 0; i < o.b.N; i++ {
+ name := fmt.Sprintf("%09d", i)
+ ch <- naming.VDLGlobReplyEntry{naming.VDLMountEntry{Name: name}}
+ }
+ close(ch)
+ }()
+ return ch, nil
+}
+
+type globChildrenObject struct {
+ b *testing.B
+ bufferSize int
+}
+
+func (o *globChildrenObject) GlobChildren__(ctx ipc.ServerCall) (<-chan string, error) {
+ if ctx.Suffix() != "" {
+ return nil, nil
+ }
+ ch := make(chan string, o.bufferSize)
+ go func() {
+ for i := 0; i < o.b.N; i++ {
+ ch <- fmt.Sprintf("%09d", i)
+ }
+ close(ch)
+ }()
+ return ch, nil
+}
+
+func globClient(b *testing.B, ctx *context.T, name string) (int, error) {
+ client := v23.GetClient(ctx)
+ call, err := client.StartCall(ctx, name, ipc.GlobMethod, []interface{}{"*"})
+ if err != nil {
+ return 0, err
+ }
+ var me naming.VDLMountEntry
+ b.ResetTimer()
+ count := 0
+ for {
+ if err := call.Recv(&me); err != nil {
+ break
+ }
+ count++
+ }
+ b.StopTimer()
+ if err := call.Finish(); err != nil {
+ return 0, err
+ }
+ return count, nil
+}
+
+func RunBenchmarkGlob(b *testing.B, obj interface{}) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ addr, stop, err := startServer(b, ctx, obj)
+ if err != nil {
+ b.Fatalf("startServer failed: %v", err)
+ }
+ defer stop()
+
+ count, err := globClient(b, ctx, addr)
+ if err != nil {
+ b.Fatalf("globClient failed: %v", err)
+ }
+ if count != b.N {
+ b.Fatalf("unexpected number of results: got %d, expected %d", count, b.N)
+ }
+}
+
+func BenchmarkGlob0(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 0})
+}
+
+func BenchmarkGlob1(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 1})
+}
+
+func BenchmarkGlob2(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 2})
+}
+
+func BenchmarkGlob4(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 4})
+}
+
+func BenchmarkGlob8(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 8})
+}
+
+func BenchmarkGlob16(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 16})
+}
+
+func BenchmarkGlob32(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 32})
+}
+
+func BenchmarkGlob64(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 64})
+}
+
+func BenchmarkGlob128(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 128})
+}
+
+func BenchmarkGlob256(b *testing.B) {
+ RunBenchmarkGlob(b, &globObject{b, 256})
+}
+
+func BenchmarkGlobChildren0(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 0})
+}
+
+func BenchmarkGlobChildren1(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 1})
+}
+
+func BenchmarkGlobChildren2(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 2})
+}
+
+func BenchmarkGlobChildren4(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 4})
+}
+
+func BenchmarkGlobChildren8(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 8})
+}
+
+func BenchmarkGlobChildren16(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 16})
+}
+
+func BenchmarkGlobChildren32(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 32})
+}
+
+func BenchmarkGlobChildren64(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 64})
+}
+
+func BenchmarkGlobChildren128(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 128})
+}
+
+func BenchmarkGlobChildren256(b *testing.B) {
+ RunBenchmarkGlob(b, &globChildrenObject{b, 256})
+}
diff --git a/profiles/internal/ipc/benchmark/internal/client.go b/profiles/internal/ipc/benchmark/internal/client.go
new file mode 100644
index 0000000..f8534b2
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/internal/client.go
@@ -0,0 +1,148 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/x/lib/vlog"
+
+ tbm "v.io/x/ref/lib/testutil/benchmark"
+ "v.io/x/ref/profiles/internal/ipc/benchmark"
+)
+
+// CallEcho calls 'Echo' method 'iterations' times with the given payload size.
+func CallEcho(b *testing.B, ctx *context.T, address string, iterations, payloadSize int, stats *tbm.Stats) {
+ stub := benchmark.BenchmarkClient(address)
+ payload := make([]byte, payloadSize)
+ for i := range payload {
+ payload[i] = byte(i & 0xff)
+ }
+
+ b.SetBytes(int64(payloadSize) * 2) // 2 for round trip of each payload.
+ b.ResetTimer() // Exclude setup time from measurement.
+
+ for i := 0; i < iterations; i++ {
+ b.StartTimer()
+ start := time.Now()
+
+ r, err := stub.Echo(ctx, payload)
+
+ elapsed := time.Since(start)
+ b.StopTimer()
+
+ if err != nil {
+ vlog.Fatalf("Echo failed: %v", err)
+ }
+ if !bytes.Equal(r, payload) {
+ vlog.Fatalf("Echo returned %v, but expected %v", r, payload)
+ }
+
+ stats.Add(elapsed)
+ }
+}
+
+// CallEchoStream calls 'EchoStream' method 'iterations' times. Each iteration sends
+// 'chunkCnt' chunks on the stream and receives the same number of chunks back. Each
+// chunk has the given payload size.
+func CallEchoStream(b *testing.B, ctx *context.T, address string, iterations, chunkCnt, payloadSize int, stats *tbm.Stats) {
+ done, _ := StartEchoStream(b, ctx, address, iterations, chunkCnt, payloadSize, stats)
+ <-done
+}
+
+// StartEchoStream starts to call 'EchoStream' method 'iterations' times. This does
+// not block, and returns a channel that will receive the number of iterations when
+// it's done. It also returns a callback function to stop the streaming. Each iteration
+// requests 'chunkCnt' chunks on the stream and receives that number of chunks back.
+// Each chunk has the given payload size. Zero 'iterations' means unlimited.
+func StartEchoStream(b *testing.B, ctx *context.T, address string, iterations, chunkCnt, payloadSize int, stats *tbm.Stats) (<-chan int, func()) {
+ stub := benchmark.BenchmarkClient(address)
+ payload := make([]byte, payloadSize)
+ for i := range payload {
+ payload[i] = byte(i & 0xff)
+ }
+
+ stop := make(chan struct{})
+ stopped := func() bool {
+ select {
+ case <-stop:
+ return true
+ default:
+ return false
+ }
+ }
+ done := make(chan int, 1)
+
+ if b.N > 0 {
+ // 2 for round trip of each payload.
+ b.SetBytes(int64((iterations*chunkCnt/b.N)*payloadSize) * 2)
+ }
+ b.ResetTimer() // Exclude setup time from measurement.
+
+ go func() {
+ defer close(done)
+
+ n := 0
+ for ; !stopped() && (iterations == 0 || n < iterations); n++ {
+ b.StartTimer()
+ start := time.Now()
+
+ stream, err := stub.EchoStream(ctx)
+ if err != nil {
+ vlog.Fatalf("EchoStream failed: %v", err)
+ }
+
+ rDone := make(chan error, 1)
+ go func() {
+ defer close(rDone)
+
+ rStream := stream.RecvStream()
+ i := 0
+ for ; rStream.Advance(); i++ {
+ r := rStream.Value()
+ if !bytes.Equal(r, payload) {
+ rDone <- fmt.Errorf("EchoStream returned %v, but expected %v", r, payload)
+ return
+ }
+ }
+ if i != chunkCnt {
+ rDone <- fmt.Errorf("EchoStream returned %d chunks, but expected %d", i, chunkCnt)
+ return
+ }
+ rDone <- rStream.Err()
+ }()
+
+ sStream := stream.SendStream()
+ for i := 0; i < chunkCnt; i++ {
+ if err = sStream.Send(payload); err != nil {
+ vlog.Fatalf("EchoStream Send failed: %v", err)
+ }
+ }
+ if err = sStream.Close(); err != nil {
+ vlog.Fatalf("EchoStream Close failed: %v", err)
+ }
+
+ if err = <-rDone; err != nil {
+ vlog.Fatalf("%v", err)
+ }
+
+ if err = stream.Finish(); err != nil {
+ vlog.Fatalf("Finish failed: %v", err)
+ }
+
+ elapsed := time.Since(start)
+ b.StopTimer()
+
+ stats.Add(elapsed)
+ }
+
+ done <- n
+ }()
+
+ return done, func() {
+ close(stop)
+ <-done
+ }
+}
diff --git a/profiles/internal/ipc/benchmark/internal/server.go b/profiles/internal/ipc/benchmark/internal/server.go
new file mode 100644
index 0000000..cc4731b
--- /dev/null
+++ b/profiles/internal/ipc/benchmark/internal/server.go
@@ -0,0 +1,54 @@
+package internal
+
+import (
+ "v.io/x/ref/security/flag"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/profiles/internal/ipc/benchmark"
+)
+
+type impl struct {
+}
+
+func (i *impl) Echo(ctx ipc.ServerCall, payload []byte) ([]byte, error) {
+ return payload, nil
+}
+
+func (i *impl) EchoStream(ctx benchmark.BenchmarkEchoStreamContext) error {
+ rStream := ctx.RecvStream()
+ sStream := ctx.SendStream()
+ for rStream.Advance() {
+ sStream.Send(rStream.Value())
+ }
+ if err := rStream.Err(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// StartServer starts a server that implements the Benchmark service. The
+// server listens to the given protocol and address, and returns the veyron
+// address of the server and a callback function to stop the server.
+func StartServer(ctx *context.T, listenSpec ipc.ListenSpec) (string, func()) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ vlog.Fatalf("NewServer failed: %v", err)
+ }
+ eps, err := server.Listen(listenSpec)
+ if err != nil {
+ vlog.Fatalf("Listen failed: %v", err)
+ }
+
+ if err := server.Serve("", benchmark.BenchmarkServer(&impl{}), flag.NewAuthorizerOrDie()); err != nil {
+ vlog.Fatalf("Serve failed: %v", err)
+ }
+ return eps[0].Name(), func() {
+ if err := server.Stop(); err != nil {
+ vlog.Fatalf("Stop() failed: %v", err)
+ }
+ }
+}
diff --git a/profiles/internal/ipc/blessings_cache.go b/profiles/internal/ipc/blessings_cache.go
new file mode 100644
index 0000000..f0baa06
--- /dev/null
+++ b/profiles/internal/ipc/blessings_cache.go
@@ -0,0 +1,167 @@
+package ipc
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "v.io/v23/ipc"
+ "v.io/v23/security"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+// clientEncodeBlessings gets or inserts the blessings into the cache.
+func clientEncodeBlessings(cache stream.VCDataCache, blessings security.Blessings) ipc.BlessingsRequest {
+ blessingsCacheAny := cache.GetOrInsert(clientBlessingsCacheKey{}, newClientBlessingsCache)
+ blessingsCache := blessingsCacheAny.(*clientBlessingsCache)
+ return blessingsCache.getOrInsert(blessings)
+}
+
+// clientAckBlessings verifies that the server has updated its cache to include blessings.
+// This means that subsequent rpcs from the client with blessings can send only a cache key.
+func clientAckBlessings(cache stream.VCDataCache, blessings security.Blessings) {
+ blessingsCacheAny := cache.GetOrInsert(clientBlessingsCacheKey{}, newClientBlessingsCache)
+ blessingsCache := blessingsCacheAny.(*clientBlessingsCache)
+ blessingsCache.acknowledge(blessings)
+}
+
+// serverDecodeBlessings insert the key and blessings into the cache or get the blessings if only
+// key is provided in req.
+func serverDecodeBlessings(cache stream.VCDataCache, req ipc.BlessingsRequest, stats *ipcStats) (security.Blessings, error) {
+ blessingsCacheAny := cache.GetOrInsert(serverBlessingsCacheKey{}, newServerBlessingsCache)
+ blessingsCache := blessingsCacheAny.(*serverBlessingsCache)
+ return blessingsCache.getOrInsert(req, stats)
+}
+
+// IMPLEMENTATION DETAILS BELOW
+
+// clientBlessingsCache is a thread-safe map from blessings to cache id.
+type clientBlessingsCache struct {
+ sync.RWMutex
+ m map[[sha256.Size]byte]clientCacheValue
+ curId uint64
+}
+
+type clientCacheValue struct {
+ id uint64
+ // ack is set to true once the server has confirmed receipt of the cache id.
+ // Clients that insert into the cache when ack is false must send both the id
+ // and the blessings.
+ ack bool
+}
+
+// clientBlessingsCacheKey is the key used to retrieve the clientBlessingsCache from the VCDataCache.
+type clientBlessingsCacheKey struct{}
+
+func newClientBlessingsCache() interface{} {
+ return &clientBlessingsCache{m: make(map[[sha256.Size]byte]clientCacheValue)}
+}
+
+func getBlessingsHashKey(blessings security.Blessings) (key [sha256.Size]byte) {
+ h := sha256.New()
+ for _, chain := range security.MarshalBlessings(blessings).CertificateChains {
+ if len(chain) == 0 {
+ continue
+ }
+ cert := chain[len(chain)-1]
+ s := sha256.Sum256(cert.Signature.R)
+ h.Write(s[:])
+ s = sha256.Sum256(cert.Signature.S)
+ h.Write(s[:])
+ }
+ copy(key[:], h.Sum(nil))
+ return
+}
+
+func (c *clientBlessingsCache) getOrInsert(blessings security.Blessings) ipc.BlessingsRequest {
+ key := getBlessingsHashKey(blessings)
+ c.RLock()
+ val, exists := c.m[key]
+ c.RUnlock()
+ if exists {
+ return c.makeBlessingsRequest(val, blessings)
+ }
+ // If the val doesn't exist we must create a new id, update the cache, and send the id and blessings.
+ c.Lock()
+ // We must check that the val wasn't inserted in the time we changed locks.
+ val, exists = c.m[key]
+ if !exists {
+ val = clientCacheValue{id: c.nextIdLocked()}
+ c.m[key] = val
+ }
+ c.Unlock()
+ return c.makeBlessingsRequest(val, blessings)
+}
+
+func (c *clientBlessingsCache) acknowledge(blessings security.Blessings) {
+ key := getBlessingsHashKey(blessings)
+ c.Lock()
+ val := c.m[key]
+ val.ack = true
+ c.m[key] = val
+ c.Unlock()
+}
+
+func (c *clientBlessingsCache) makeBlessingsRequest(val clientCacheValue, blessings security.Blessings) ipc.BlessingsRequest {
+ if val.ack {
+ // when the value is acknowledged, only send the key, since the server has confirmed that it knows the key.
+ return ipc.BlessingsRequest{Key: val.id}
+ }
+ // otherwise we still need to send both key and blessings, but we must ensure that we send the same key.
+ return ipc.BlessingsRequest{val.id, &blessings}
+}
+
+// nextIdLocked creates a new id for inserting blessings. It must be called after acquiring a writer lock.
+func (c *clientBlessingsCache) nextIdLocked() uint64 {
+ c.curId++
+ return c.curId
+}
+
+// serverBlessingsCache is a thread-safe map from cache key to blessings.
+type serverBlessingsCache struct {
+ sync.RWMutex
+ m map[uint64]security.Blessings
+}
+
+// serverBlessingsCacheKey is the key used to retrieve the serverBlessingsCache from the VCDataCache.
+type serverBlessingsCacheKey struct{}
+
+func newServerBlessingsCache() interface{} {
+ return &serverBlessingsCache{m: make(map[uint64]security.Blessings)}
+}
+
+func (c *serverBlessingsCache) getOrInsert(req ipc.BlessingsRequest, stats *ipcStats) (security.Blessings, error) {
+ // In the case that the key sent is 0, we are running in VCSecurityNone
+ // and should return the zero value.
+ if req.Key == 0 {
+ return security.Blessings{}, nil
+ }
+ if req.Blessings == nil {
+ // Fastpath, lookup based on the key.
+ c.RLock()
+ cached, exists := c.m[req.Key]
+ c.RUnlock()
+ if !exists {
+ return security.Blessings{}, fmt.Errorf("ipc: key was not in the cache")
+ }
+ stats.recordBlessingCache(true)
+ return cached, nil
+ }
+ // Always count the slow path as a cache miss since we do not get the benefit of sending only the cache key.
+ stats.recordBlessingCache(false)
+ // Slowpath, might need to update the cache, or check that the received blessings are
+ // the same as what's in the cache.
+ recv := *req.Blessings
+ c.Lock()
+ defer c.Unlock()
+ if cached, exists := c.m[req.Key]; exists {
+ // TODO(suharshs): Replace this reflect.DeepEqual() with a less expensive check.
+ if !reflect.DeepEqual(cached, recv) {
+ return security.Blessings{}, fmt.Errorf("client sent invalid Blessings")
+ }
+ return cached, nil
+ }
+ c.m[req.Key] = recv
+ return recv, nil
+}
diff --git a/profiles/internal/ipc/cancel_test.go b/profiles/internal/ipc/cancel_test.go
new file mode 100644
index 0000000..45cc811
--- /dev/null
+++ b/profiles/internal/ipc/cancel_test.go
@@ -0,0 +1,123 @@
+package ipc
+
+import (
+ "testing"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ tnaming "v.io/x/ref/profiles/internal/testing/mocks/naming"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/security"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+type fakeAuthorizer int
+
+func (fakeAuthorizer) Authorize(security.Call) error {
+ return nil
+}
+
+type canceld struct {
+ sm stream.Manager
+ ns ns.Namespace
+ name string
+ child string
+ started chan struct{}
+ canceled chan struct{}
+ stop func() error
+}
+
+func (c *canceld) Run(ctx ipc.StreamServerCall) error {
+ close(c.started)
+
+ client, err := InternalNewClient(c.sm, c.ns)
+ if err != nil {
+ vlog.Error(err)
+ return err
+ }
+
+ if c.child != "" {
+ if _, err = client.StartCall(ctx.Context(), c.child, "Run", []interface{}{}); err != nil {
+ vlog.Error(err)
+ return err
+ }
+ }
+
+ vlog.Info(c.name, " waiting for cancellation")
+ <-ctx.Context().Done()
+ vlog.Info(c.name, " canceled")
+ close(c.canceled)
+ return nil
+}
+
+func makeCanceld(ns ns.Namespace, name, child string) (*canceld, error) {
+ sm := manager.InternalNew(naming.FixedRoutingID(0x111111111))
+ ctx := testContext()
+ s, err := testInternalNewServer(ctx, sm, ns)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := s.Listen(listenSpec); err != nil {
+ return nil, err
+ }
+
+ c := &canceld{
+ sm: sm,
+ ns: ns,
+ name: name,
+ child: child,
+ started: make(chan struct{}, 0),
+ canceled: make(chan struct{}, 0),
+ stop: s.Stop,
+ }
+
+ if err := s.Serve(name, c, fakeAuthorizer(0)); err != nil {
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// TestCancellationPropagation tests that cancellation propogates along an
+// RPC call chain without user intervention.
+func TestCancellationPropagation(t *testing.T) {
+ sm := manager.InternalNew(naming.FixedRoutingID(0x555555555))
+ ns := tnaming.NewSimpleNamespace()
+
+ client, err := InternalNewClient(sm, ns)
+ if err != nil {
+ t.Error(err)
+ }
+
+ c1, err := makeCanceld(ns, "c1", "c2")
+ if err != nil {
+ t.Fatal("Can't start server:", err)
+ }
+ defer c1.stop()
+
+ c2, err := makeCanceld(ns, "c2", "")
+ if err != nil {
+ t.Fatal("Can't start server:", err)
+ }
+ defer c2.stop()
+
+ ctx, cancel := context.WithCancel(testContext())
+ _, err = client.StartCall(ctx, "c1", "Run", []interface{}{})
+ if err != nil {
+ t.Fatal("can't call: ", err)
+ }
+
+ <-c1.started
+ <-c2.started
+
+ vlog.Info("cancelling initial call")
+ cancel()
+
+ vlog.Info("waiting for children to be canceled")
+ <-c1.canceled
+ <-c2.canceled
+}
diff --git a/profiles/internal/ipc/client.go b/profiles/internal/ipc/client.go
new file mode 100644
index 0000000..38bc2b5
--- /dev/null
+++ b/profiles/internal/ipc/client.go
@@ -0,0 +1,933 @@
+package ipc
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "net"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/i18n"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/security"
+ "v.io/v23/vdl"
+ vtime "v.io/v23/vdlroot/time"
+ "v.io/v23/verror"
+ "v.io/v23/vom"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ ivtrace "v.io/x/ref/profiles/internal/vtrace"
+)
+
+const pkgPath = "v.io/x/ref/profiles/internal/ipc"
+
+// TODO(cnicolaou): for local errors, automatically assign a new 'id',
+// don't use pkgPath etc. Can then move them into being defined on each line
+// and not here.
+var (
+ // Local errs that are used to provide details to the public ones.
+ errClientCloseAlreadyCalled = verror.Register(pkgPath+".closeAlreadyCalled", verror.NoRetry,
+ "ipc.Client.Close has already been called")
+
+ errClientFinishAlreadyCalled = verror.Register(pkgPath+".finishAlreadyCalled", verror.NoRetry, "ipc.ClientCall.Finish has already been called")
+
+ errNonRootedName = verror.Register(pkgPath+".nonRootedName", verror.NoRetry, "{3} does not appear to contain an address")
+
+ errInvalidEndpoint = verror.Register(pkgPath+".invalidEndpoint", verror.RetryRefetch, "{3} is an invalid endpoint")
+
+ errIncompatibleEndpoint = verror.Register(pkgPath+".invalidEndpoint", verror.RetryRefetch, "{3} is an incompatible endpoint")
+
+ errNewServerAuthorizer = verror.Register(pkgPath+".newServerAuthorizer", verror.NoRetry, "failed to create server authorizer{:3}")
+
+ errNotTrusted = verror.Register(pkgPath+".notTrusted", verror.NoRetry, "name {3} not trusted using blessings {4}{:5}")
+
+ errAuthError = verror.Register(pkgPath+".authError", verror.RetryRefetch, "authentication error from server {3}{:4}")
+
+ errSystemRetry = verror.Register(pkgPath+".sysErrorRetryConnection", verror.RetryConnection, "{:3:}")
+
+ errVomEncoder = verror.Register(pkgPath+".vomEncoder", verror.NoRetry, "failed to create vom encoder {:3}")
+ errVomDecoder = verror.Register(pkgPath+".vomDecoder", verror.NoRetry, "failed to create vom decoder {:3}")
+
+ errRequestEncoding = verror.Register(pkgPath+".requestEncoding", verror.NoRetry, "failed to encode request {3}{:4}")
+
+ errDischargeEncoding = verror.Register(pkgPath+".dischargeEncoding", verror.NoRetry, "failed to encode discharges {:3}")
+
+ errBlessingEncoding = verror.Register(pkgPath+".blessingEncoding", verror.NoRetry, "failed to encode blessing {3}{:4}")
+
+ errArgEncoding = verror.Register(pkgPath+".argEncoding", verror.NoRetry, "failed to encode arg #{3}{:4:}")
+
+ errMismatchedResults = verror.Register(pkgPath+".mismatchedResults", verror.NoRetry, "got {3} results, but want {4}")
+
+ errResultDecoding = verror.Register(pkgPath+".resultDecoding", verror.NoRetry, "failed to decode result #{3}{:4}")
+
+ errResponseDecoding = verror.Register(pkgPath+".responseDecoding", verror.NoRetry, "failed to decode response{:3}")
+
+ errRemainingStreamResults = verror.Register(pkgPath+".remaingStreamResults", verror.NoRetry, "stream closed with remaining stream results")
+
+ errNoBlessingsForPeer = verror.Register(pkgPath+".noBlessingsForPeer", verror.NoRetry, "no blessings tagged for peer {3}{:4}")
+
+ errDefaultAuthDenied = verror.Register(pkgPath+".defaultAuthDenied", verror.NoRetry, "default authorization precludes talking to server with blessings{:3}")
+
+ errBlessingGrant = verror.Register(pkgPath+".blessingGrantFailed", verror.NoRetry, "failed to grant blessing to server with blessings {3}{:4}")
+
+ errBlessingAdd = verror.Register(pkgPath+".blessingAddFailed", verror.NoRetry, "failed to add blessing granted to server {3}{:4}")
+)
+
+type client struct {
+ streamMgr stream.Manager
+ ns ns.Namespace
+ vcOpts []stream.VCOpt // vc opts passed to dial
+ preferredProtocols []string
+
+ // We cache the IP networks on the device since it is not that cheap to read
+ // network interfaces through os syscall.
+ // TODO(jhahn): Add monitoring the network interface changes.
+ ipNets []*net.IPNet
+
+ // We support concurrent calls to StartCall and Close, so we must protect the
+ // vcMap. Everything else is initialized upon client construction, and safe
+ // to use concurrently.
+ vcMapMu sync.Mutex
+ vcMap map[vcMapKey]*vcInfo
+
+ dc vc.DischargeClient
+}
+
+var _ ipc.Client = (*client)(nil)
+
+type vcInfo struct {
+ vc stream.VC
+ remoteEP naming.Endpoint
+}
+
+type vcMapKey struct {
+ endpoint string
+ encrypted bool
+}
+
+func InternalNewClient(streamMgr stream.Manager, ns ns.Namespace, opts ...ipc.ClientOpt) (ipc.Client, error) {
+ c := &client{
+ streamMgr: streamMgr,
+ ns: ns,
+ ipNets: ipNetworks(),
+ vcMap: make(map[vcMapKey]*vcInfo),
+ }
+ c.dc = InternalNewDischargeClient(nil, c)
+ for _, opt := range opts {
+ // Collect all client opts that are also vc opts.
+ switch v := opt.(type) {
+ case stream.VCOpt:
+ c.vcOpts = append(c.vcOpts, v)
+ case PreferredProtocols:
+ c.preferredProtocols = v
+ }
+ }
+
+ return c, nil
+}
+
+func (c *client) createFlow(ctx *context.T, ep naming.Endpoint, vcOpts []stream.VCOpt) (stream.Flow, error) {
+ c.vcMapMu.Lock()
+ defer c.vcMapMu.Unlock()
+ if c.vcMap == nil {
+ return nil, verror.New(errClientCloseAlreadyCalled, ctx)
+ }
+ vcKey := vcMapKey{ep.String(), vcEncrypted(vcOpts)}
+ if vcinfo := c.vcMap[vcKey]; vcinfo != nil {
+ if flow, err := vcinfo.vc.Connect(); err == nil {
+ return flow, nil
+ }
+ // If the vc fails to establish a new flow, we assume it's
+ // broken, remove it from the map, and proceed to establishing
+ // a new vc.
+ // TODO(caprita): Should we distinguish errors due to vc being
+ // closed from other errors? If not, should we call vc.Close()
+ // before removing the vc from the map?
+ delete(c.vcMap, vcKey)
+ }
+ sm := c.streamMgr
+ c.vcMapMu.Unlock()
+ vc, err := sm.Dial(ep, vcOpts...)
+ c.vcMapMu.Lock()
+ if err != nil {
+ if strings.Contains(err.Error(), "authentication failed") {
+ return nil, verror.New(errAuthError, ctx, ep, err)
+ } else {
+ return nil, verror.New(errSystemRetry, ctx, err)
+ }
+ }
+ if c.vcMap == nil {
+ sm.ShutdownEndpoint(ep)
+ return nil, verror.New(errClientCloseAlreadyCalled, ctx)
+ }
+ if othervc, exists := c.vcMap[vcKey]; exists {
+ vc = othervc.vc
+ // TODO(ashankar,toddw): Figure out how to close up the VC that
+ // is discarded. vc.Close?
+ } else {
+ c.vcMap[vcKey] = &vcInfo{vc: vc, remoteEP: ep}
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+
+ return nil, verror.New(errAuthError, ctx, ep, err)
+ }
+ return flow, nil
+}
+
+// A randomized exponential backoff. The randomness deters error convoys from forming.
+// TODO(cnicolaou): rationalize this and the backoff in ipc.Server. Note
+// that rand is not thread safe and may crash.
+func backoff(n int, deadline time.Time) bool {
+ b := time.Duration(math.Pow(1.5+(rand.Float64()/2.0), float64(n)) * float64(time.Second))
+ if b > maxBackoff {
+ b = maxBackoff
+ }
+ r := deadline.Sub(time.Now())
+ if b > r {
+ // We need to leave a little time for the call to start or
+ // we'll just timeout in startCall before we actually do
+ // anything. If we just have a millisecond left, give up.
+ if r <= time.Millisecond {
+ return false
+ }
+ b = r - time.Millisecond
+ }
+ time.Sleep(b)
+ return true
+}
+
+func (c *client) StartCall(ctx *context.T, name, method string, args []interface{}, opts ...ipc.CallOpt) (ipc.ClientCall, error) {
+ defer vlog.LogCall()()
+ return c.startCall(ctx, name, method, args, opts)
+}
+
+func mkDischargeImpetus(serverBlessings []string, method string, args []interface{}) (security.DischargeImpetus, error) {
+ var impetus security.DischargeImpetus
+ if len(serverBlessings) > 0 {
+ impetus.Server = make([]security.BlessingPattern, len(serverBlessings))
+ for i, b := range serverBlessings {
+ impetus.Server[i] = security.BlessingPattern(b)
+ }
+ }
+ impetus.Method = method
+ if len(args) > 0 {
+ impetus.Arguments = make([]*vdl.Value, len(args))
+ for i, a := range args {
+ vArg, err := vdl.ValueFromReflect(reflect.ValueOf(a))
+ if err != nil {
+ return security.DischargeImpetus{}, err
+ }
+ impetus.Arguments[i] = vArg
+ }
+ }
+ return impetus, nil
+}
+
+// startCall ensures StartCall always returns verror.E.
+func (c *client) startCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.ClientCall, error) {
+ if !ctx.Initialized() {
+ return nil, verror.ExplicitNew(verror.ErrBadArg, i18n.NoLangID, "ipc.Client", "StartCall")
+ }
+ if err := canCreateServerAuthorizer(opts); err != nil {
+ return nil, verror.New(errNewServerAuthorizer, ctx, err)
+ }
+ ctx, span := vtrace.SetNewSpan(ctx, fmt.Sprintf("<client>%q.%s", name, method))
+ ctx = verror.ContextWithComponentName(ctx, "ipc.Client")
+
+ // Context specified deadline.
+ deadline, hasDeadline := ctx.Deadline()
+ if !hasDeadline {
+ // Default deadline.
+ deadline = time.Now().Add(defaultCallTimeout)
+ }
+ if r, ok := getRetryTimeoutOpt(opts); ok {
+ // Caller specified deadline.
+ deadline = time.Now().Add(r)
+ }
+
+ var lastErr error
+ for retries := 0; ; retries++ {
+ if retries != 0 {
+ if !backoff(retries, deadline) {
+ break
+ }
+ }
+ call, action, err := c.tryCall(ctx, name, method, args, opts)
+ if err == nil {
+ return call, nil
+ }
+ lastErr = err
+ shouldRetry := true
+ switch {
+ case getNoRetryOpt(opts):
+ shouldRetry = false
+ case action != verror.RetryConnection && action != verror.RetryRefetch:
+ shouldRetry = false
+ case time.Now().After(deadline):
+ shouldRetry = false
+ case action == verror.RetryRefetch && getNoResolveOpt(opts):
+ // If we're skipping resolution and there are no servers for
+ // this call retrying is not going to help, we can't come up
+ // with new servers if there is no resolution.
+ shouldRetry = false
+ }
+ if !shouldRetry {
+ span.Annotatef("Cannot retry after error: %s", err)
+ break
+ }
+ span.Annotatef("Retrying due to error: %s", err)
+ }
+ return nil, lastErr
+}
+
+type serverStatus struct {
+ index int
+ suffix string
+ flow stream.Flow
+ blessings []string // authorized server blessings
+ rejectedBlessings []security.RejectedBlessing // rejected server blessings
+ err error
+}
+
+// tryCreateFlow attempts to establish a Flow to "server" (which must be a
+// rooted name), over which a method invocation request could be sent.
+//
+// The server at the remote end of the flow is authorized using the provided
+// authorizer, both during creation of the VC underlying the flow and the
+// flow itself.
+// TODO(cnicolaou): implement real, configurable load balancing.
+func (c *client) tryCreateFlow(ctx *context.T, index int, name, server, method string, auth security.Authorizer, ch chan<- *serverStatus, vcOpts []stream.VCOpt) {
+ status := &serverStatus{index: index}
+ var span vtrace.Span
+ ctx, span = vtrace.SetNewSpan(ctx, "<client>tryCreateFlow")
+ span.Annotatef("address:%v", server)
+ defer func() {
+ ch <- status
+ span.Finish()
+ }()
+
+ address, suffix := naming.SplitAddressName(server)
+ if len(address) == 0 {
+ status.err = verror.New(errNonRootedName, ctx, server)
+ return
+ }
+ status.suffix = suffix
+
+ ep, err := inaming.NewEndpoint(address)
+ if err != nil {
+ status.err = verror.New(errInvalidEndpoint, ctx, address)
+ return
+ }
+ if err = version.CheckCompatibility(ep); err != nil {
+ status.err = verror.New(errIncompatibleEndpoint, ctx, ep)
+ return
+ }
+ if status.flow, status.err = c.createFlow(ctx, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth})); status.err != nil {
+ vlog.VI(2).Infof("ipc: Failed to create Flow with %v: %v", server, status.err)
+ return
+ }
+
+ // Authorize the remote end of the flow using the provided authorizer.
+ if status.flow.LocalPrincipal() == nil {
+ // LocalPrincipal is nil which means we are operating under
+ // VCSecurityNone.
+ return
+ }
+
+ // TODO(ataly, bprosnitx, ashankar): Add 'ctx' to the security.Call created below
+ // otherwise any custom caveat validators (defined in ctx) cannot be used while validating
+ // caveats in this context. Also see: https://github.com/veyron/release-issues/issues/1230
+ secctx := security.NewCall(&security.CallParams{
+ LocalPrincipal: status.flow.LocalPrincipal(),
+ LocalBlessings: status.flow.LocalBlessings(),
+ RemoteBlessings: status.flow.RemoteBlessings(),
+ LocalEndpoint: status.flow.LocalEndpoint(),
+ RemoteEndpoint: status.flow.RemoteEndpoint(),
+ RemoteDischarges: status.flow.RemoteDischarges(),
+ Method: method,
+ Suffix: status.suffix})
+ if err := auth.Authorize(secctx); err != nil {
+ status.err = verror.New(verror.ErrNotTrusted, ctx, name, status.flow.RemoteBlessings(), err)
+ vlog.VI(2).Infof("ipc: Failed to authorize Flow created with server %v: %s", server, status.err)
+ status.flow.Close()
+ status.flow = nil
+ return
+ }
+ status.blessings, status.rejectedBlessings = status.flow.RemoteBlessings().ForCall(secctx)
+ return
+}
+
+// tryCall makes a single attempt at a call. It may connect to multiple servers
+// (all that serve "name"), but will invoke the method on at most one of them
+// (the server running on the most preferred protcol and network amongst all
+// the servers that were successfully connected to and authorized).
+func (c *client) tryCall(ctx *context.T, name, method string, args []interface{}, opts []ipc.CallOpt) (ipc.ClientCall, verror.ActionCode, error) {
+ var resolved *naming.MountEntry
+ var err error
+ if resolved, err = c.ns.Resolve(ctx, name, getResolveOpts(opts)...); err != nil {
+ vlog.Errorf("Resolve: %v", err)
+ // We always return NoServers as the error so that the caller knows
+ // that's ok to retry the operation since the name may be registered
+ // in the near future.
+ if verror.Is(err, naming.ErrNoSuchName.ID) {
+ return nil, verror.RetryRefetch, verror.New(verror.ErrNoServers, ctx, name)
+ }
+ return nil, verror.NoRetry, verror.New(verror.ErrNoServers, ctx, name, err)
+ } else {
+ if len(resolved.Servers) == 0 {
+ return nil, verror.RetryRefetch, verror.New(verror.ErrNoServers, ctx, name)
+ }
+ // An empty set of protocols means all protocols...
+ if resolved.Servers, err = filterAndOrderServers(resolved.Servers, c.preferredProtocols, c.ipNets); err != nil {
+ return nil, verror.RetryRefetch, verror.New(verror.ErrNoServers, ctx, name, err)
+ }
+ }
+
+ // servers is now ordered by the priority heurestic implemented in
+ // filterAndOrderServers.
+ //
+ // Try to connect to all servers in parallel. Provide sufficient
+ // buffering for all of the connections to finish instantaneously. This
+ // is important because we want to process the responses in priority
+ // order; that order is indicated by the order of entries in servers.
+ // So, if two respones come in at the same 'instant', we prefer the
+ // first in the resolved.Servers)
+ attempts := len(resolved.Servers)
+
+ responses := make([]*serverStatus, attempts)
+ ch := make(chan *serverStatus, attempts)
+ vcOpts := append(getVCOpts(opts), c.vcOpts...)
+ for i, server := range resolved.Names() {
+ vcOptsCopy := make([]stream.VCOpt, len(vcOpts))
+ copy(vcOptsCopy, vcOpts)
+ go c.tryCreateFlow(ctx, i, name, server, method, newServerAuthorizer(ctx, bpatterns(resolved.Servers[i].BlessingPatterns), opts...), ch, vcOptsCopy)
+ }
+
+ var timeoutChan <-chan time.Time
+ if deadline, ok := ctx.Deadline(); ok {
+ timeoutChan = time.After(deadline.Sub(time.Now()))
+ }
+
+ for {
+ // Block for at least one new response from the server, or the timeout.
+ select {
+ case r := <-ch:
+ responses[r.index] = r
+ // Read as many more responses as we can without blocking.
+ LoopNonBlocking:
+ for {
+ select {
+ default:
+ break LoopNonBlocking
+ case r := <-ch:
+ responses[r.index] = r
+ }
+ }
+ case <-timeoutChan:
+ vlog.VI(2).Infof("ipc: timeout on connection to server %v ", name)
+ _, _, err := c.failedTryCall(ctx, name, method, responses, ch)
+ if !verror.Is(err, verror.ErrTimeout.ID) {
+ return nil, verror.NoRetry, verror.New(verror.ErrTimeout, ctx, err)
+ }
+ return nil, verror.NoRetry, err
+ }
+
+ dc := c.dc
+ if shouldNotFetchDischarges(opts) {
+ dc = nil
+ }
+ // Process new responses, in priority order.
+ numResponses := 0
+ for _, r := range responses {
+ if r != nil {
+ numResponses++
+ }
+ if r == nil || r.flow == nil {
+ continue
+ }
+
+ doneChan := ctx.Done()
+ r.flow.SetDeadline(doneChan)
+
+ fc, err := newFlowClient(ctx, r.flow, r.blessings, dc)
+ if err != nil {
+ return nil, verror.NoRetry, err.(error)
+ }
+
+ if err := fc.prepareBlessingsAndDischarges(method, args, r.rejectedBlessings, opts); err != nil {
+ r.err = verror.New(verror.ErrNotTrusted, ctx, name, r.flow.RemoteBlessings(), err)
+ vlog.VI(2).Infof("ipc: err: %s", r.err)
+ r.flow.Close()
+ r.flow = nil
+ continue
+ }
+
+ // This is the 'point of no return'; once the RPC is started (fc.start
+ // below) we can't be sure if it makes it to the server or not so, this
+ // code will never call fc.start more than once to ensure that we provide
+ // 'at-most-once' rpc semantics at this level. Retrying the network
+ // connections (i.e. creating flows) is fine since we can cleanup that
+ // state if we abort a call (i.e. close the flow).
+ //
+ // We must ensure that all flows other than r.flow are closed.
+ //
+ // TODO(cnicolaou): all errors below are marked as NoRetry
+ // because we want to provide at-most-once rpc semantics so
+ // we only ever attempt an RPC once. In the future, we'll cache
+ // responses on the server and then we can retry in-process
+ // RPCs.
+ go cleanupTryCall(r, responses, ch)
+
+ if doneChan != nil {
+ go func() {
+ select {
+ case <-doneChan:
+ vtrace.GetSpan(fc.ctx).Annotate("Cancelled")
+ fc.flow.Cancel()
+ case <-fc.flow.Closed():
+ }
+ }()
+ }
+
+ deadline, _ := ctx.Deadline()
+ if verr := fc.start(r.suffix, method, args, deadline); verr != nil {
+ return nil, verror.NoRetry, verr
+ }
+ return fc, verror.NoRetry, nil
+ }
+ if numResponses == len(responses) {
+ return c.failedTryCall(ctx, name, method, responses, ch)
+ }
+ }
+}
+
+// cleanupTryCall ensures we've waited for every response from the tryCreateFlow
+// goroutines, and have closed the flow from each one except skip. This is a
+// blocking function; it should be called in its own goroutine.
+func cleanupTryCall(skip *serverStatus, responses []*serverStatus, ch chan *serverStatus) {
+ numPending := 0
+ for _, r := range responses {
+ switch {
+ case r == nil:
+ // The response hasn't arrived yet.
+ numPending++
+ case r == skip || r.flow == nil:
+ // Either we should skip this flow, or we've closed the flow for this
+ // response already; nothing more to do.
+ default:
+ // We received the response, but haven't closed the flow yet.
+ r.flow.Close()
+ }
+ }
+ // Now we just need to wait for the pending responses and close their flows.
+ for i := 0; i < numPending; i++ {
+ if r := <-ch; r.flow != nil {
+ r.flow.Close()
+ }
+ }
+}
+
+// failedTryCall performs asynchronous cleanup for tryCall, and returns an
+// appropriate error from the responses we've already received. All parallel
+// calls in tryCall failed or we timed out if we get here.
+func (c *client) failedTryCall(ctx *context.T, name, method string, responses []*serverStatus, ch chan *serverStatus) (ipc.ClientCall, verror.ActionCode, error) {
+ go cleanupTryCall(nil, responses, ch)
+ c.ns.FlushCacheEntry(name)
+ noconn, untrusted := []string{}, []string{}
+ for _, r := range responses {
+ if r != nil && r.err != nil {
+ switch {
+ case verror.Is(r.err, verror.ErrNotTrusted.ID) || verror.Is(r.err, errAuthError.ID):
+ untrusted = append(untrusted, "("+r.err.Error()+") ")
+ default:
+ noconn = append(noconn, "("+r.err.Error()+") ")
+ }
+ }
+ }
+ // TODO(cnicolaou): we get system errors for things like dialing using
+ // the 'ws' protocol which can never succeed even if we retry the connection,
+ // hence we return RetryRefetch in all cases below. In the future, we'll
+ // pick out this error and then we can retry the connection also. This also
+ // plays into the 'at-most-once' rpc semantics change that's needed in order
+ // to retry an in-flight RPC.
+ switch {
+ case len(untrusted) > 0 && len(noconn) > 0:
+ return nil, verror.RetryRefetch, verror.New(verror.ErrNoServersAndAuth, ctx, append(noconn, untrusted...))
+ case len(noconn) > 0:
+ return nil, verror.RetryRefetch, verror.New(verror.ErrNoServers, ctx, noconn)
+ case len(untrusted) > 0:
+ return nil, verror.NoRetry, verror.New(verror.ErrNotTrusted, ctx, untrusted)
+ default:
+ return nil, verror.RetryRefetch, verror.New(verror.ErrTimeout, ctx)
+ }
+}
+
+// prepareBlessingsAndDischarges prepares blessings and discharges for
+// the call.
+//
+// This includes: (1) preparing blessings that must be granted to the
+// server, (2) preparing blessings that the client authenticates with,
+// and, (3) preparing any discharges for third-party caveats on the client's
+// blessings.
+func (fc *flowClient) prepareBlessingsAndDischarges(method string, args []interface{}, rejectedServerBlessings []security.RejectedBlessing, opts []ipc.CallOpt) error {
+ // LocalPrincipal is nil which means we are operating under
+ // VCSecurityNone.
+ if fc.flow.LocalPrincipal() == nil {
+ return nil
+ }
+
+ // Prepare blessings that must be granted to the server (using any
+ // ipc.Granter implementation in 'opts').
+ if err := fc.prepareGrantedBlessings(opts); err != nil {
+ return err
+ }
+
+ // Fetch blessings from the client's blessing store that are to be
+ // shared with the server.
+ if fc.blessings = fc.flow.LocalPrincipal().BlessingStore().ForPeer(fc.server...); fc.blessings.IsZero() {
+ // TODO(ataly, ashankar): We need not error out here and instead can just send the <nil> blessings
+ // to the server.
+ return verror.New(errNoBlessingsForPeer, fc.ctx, fc.server, rejectedServerBlessings)
+ }
+
+ // Fetch any discharges for third-party caveats on the client's blessings.
+ if !fc.blessings.IsZero() && fc.dc != nil {
+ impetus, err := mkDischargeImpetus(fc.server, method, args)
+ if err != nil {
+ // TODO(toddw): Fix up the internal error.
+ return verror.New(verror.ErrBadProtocol, fc.ctx, fmt.Errorf("couldn't make discharge impetus: %v", err))
+ }
+ fc.discharges = fc.dc.PrepareDischarges(fc.ctx, fc.blessings.ThirdPartyCaveats(), impetus)
+ }
+ return nil
+}
+
+func (fc *flowClient) prepareGrantedBlessings(opts []ipc.CallOpt) error {
+ for _, o := range opts {
+ switch v := o.(type) {
+ case ipc.Granter:
+ if b, err := v.Grant(fc.flow.RemoteBlessings()); err != nil {
+ return verror.New(errBlessingGrant, fc.ctx, fc.server, err)
+ } else if fc.grantedBlessings, err = security.UnionOfBlessings(fc.grantedBlessings, b); err != nil {
+ return verror.New(errBlessingAdd, fc.ctx, fc.server, err)
+ }
+ }
+ }
+ return nil
+}
+
+func (c *client) Close() {
+ defer vlog.LogCall()()
+ c.vcMapMu.Lock()
+ for _, v := range c.vcMap {
+ c.streamMgr.ShutdownEndpoint(v.remoteEP)
+ }
+ c.vcMap = nil
+ c.vcMapMu.Unlock()
+}
+
+// flowClient implements the RPC client-side protocol for a single RPC, over a
+// flow that's already connected to the server.
+type flowClient struct {
+ ctx *context.T // context to annotate with call details
+ dec *vom.Decoder // to decode responses and results from the server
+ enc *vom.Encoder // to encode requests and args to the server
+ server []string // Blessings bound to the server that authorize it to receive the IPC request from the client.
+ flow stream.Flow // the underlying flow
+ response ipc.Response // each decoded response message is kept here
+
+ discharges []security.Discharge // discharges used for this request
+ dc vc.DischargeClient // client-global discharge-client
+
+ blessings security.Blessings // the local blessings for the current RPC.
+ grantedBlessings security.Blessings // the blessings granted to the server.
+
+ sendClosedMu sync.Mutex
+ sendClosed bool // is the send side already closed? GUARDED_BY(sendClosedMu)
+ finished bool // has Finish() already been called?
+}
+
+var _ ipc.ClientCall = (*flowClient)(nil)
+var _ ipc.Stream = (*flowClient)(nil)
+
+func newFlowClient(ctx *context.T, flow stream.Flow, server []string, dc vc.DischargeClient) (*flowClient, error) {
+ fc := &flowClient{
+ ctx: ctx,
+ flow: flow,
+ server: server,
+ dc: dc,
+ }
+ var err error
+ if fc.enc, err = vom.NewEncoder(flow); err != nil {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errVomEncoder, fc.ctx, err))
+ return nil, fc.close(berr)
+ }
+ if fc.dec, err = vom.NewDecoder(flow); err != nil {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errVomDecoder, fc.ctx, err))
+ return nil, fc.close(berr)
+ }
+ return fc, nil
+}
+
+func (fc *flowClient) close(err error) error {
+ if cerr := fc.flow.Close(); cerr != nil && err == nil {
+ return verror.New(verror.ErrInternal, fc.ctx, err)
+ }
+ switch {
+ case verror.Is(err, verror.ErrBadProtocol.ID):
+ switch fc.ctx.Err() {
+ case context.DeadlineExceeded:
+ // TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
+ //return verror.Append(verror.New(verror.ErrTimeout, fc.ctx), verr)
+ return verror.New(verror.ErrTimeout, fc.ctx, err.Error())
+ case context.Canceled:
+ // TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
+ //return verror.Append(verror.New(verror.ErrCanceled, fc.ctx), verr)
+ return verror.New(verror.ErrCanceled, fc.ctx, err.Error())
+ }
+ case verror.Is(err, verror.ErrTimeout.ID):
+ // Canceled trumps timeout.
+ if fc.ctx.Err() == context.Canceled {
+ // TODO(cnicolaou,m3b): reintroduce 'append' when the new verror API is done.
+ return verror.New(verror.ErrCanceled, fc.ctx, err.Error())
+ }
+ }
+ return err
+}
+
+func (fc *flowClient) start(suffix, method string, args []interface{}, deadline time.Time) error {
+ // Encode the Blessings information for the client to authorize the flow.
+ var blessingsRequest ipc.BlessingsRequest
+ if fc.flow.LocalPrincipal() != nil {
+ blessingsRequest = clientEncodeBlessings(fc.flow.VCDataCache(), fc.blessings)
+ }
+
+ discharges := make([]security.WireDischarge, len(fc.discharges))
+ for i, d := range fc.discharges {
+ discharges[i] = security.MarshalDischarge(d)
+ }
+ req := ipc.Request{
+ Suffix: suffix,
+ Method: method,
+ NumPosArgs: uint64(len(args)),
+ Deadline: vtime.Deadline{deadline},
+ GrantedBlessings: fc.grantedBlessings,
+ Blessings: blessingsRequest,
+ Discharges: discharges,
+ TraceRequest: ivtrace.Request(fc.ctx),
+ }
+ if err := fc.enc.Encode(req); err != nil {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errRequestEncoding, fc.ctx, fmt.Sprintf("%#v", req), err))
+ return fc.close(berr)
+ }
+
+ for ix, arg := range args {
+ if err := fc.enc.Encode(arg); err != nil {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errArgEncoding, fc.ctx, ix, err))
+ return fc.close(berr)
+ }
+ }
+ return nil
+}
+
+func (fc *flowClient) Send(item interface{}) error {
+ defer vlog.LogCall()()
+ if fc.sendClosed {
+ return verror.New(verror.ErrAborted, fc.ctx)
+ }
+
+ // The empty request header indicates what follows is a streaming arg.
+ if err := fc.enc.Encode(ipc.Request{}); err != nil {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errRequestEncoding, fc.ctx, ipc.Request{}, err))
+ return fc.close(berr)
+ }
+ if err := fc.enc.Encode(item); err != nil {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errArgEncoding, fc.ctx, -1, err))
+ return fc.close(berr)
+ }
+ return nil
+}
+
+func decodeNetError(ctx *context.T, err error) verror.IDAction {
+ if neterr, ok := err.(net.Error); ok {
+ if neterr.Timeout() || neterr.Temporary() {
+ // If a read is canceled in the lower levels we see
+ // a timeout error - see readLocked in vc/reader.go
+ if ctx.Err() == context.Canceled {
+ return verror.ErrCanceled
+ }
+ return verror.ErrTimeout
+ }
+ }
+ return verror.ErrBadProtocol
+}
+
+func (fc *flowClient) Recv(itemptr interface{}) error {
+ defer vlog.LogCall()()
+ switch {
+ case fc.response.Error != nil:
+ // TODO(cnicolaou): this will become a verror.E when we convert the
+ // server.
+ return verror.New(verror.ErrBadProtocol, fc.ctx, fc.response.Error)
+ case fc.response.EndStreamResults:
+ return io.EOF
+ }
+
+ // Decode the response header and handle errors and EOF.
+ if err := fc.dec.Decode(&fc.response); err != nil {
+ berr := verror.New(decodeNetError(fc.ctx, err), fc.ctx, verror.New(errResponseDecoding, fc.ctx, err))
+ return fc.close(berr)
+ }
+ if fc.response.Error != nil {
+ // TODO(cnicolaou): this will become a verror.E when we convert the
+ // server.
+ return verror.New(verror.ErrBadProtocol, fc.ctx, fc.response.Error)
+ }
+ if fc.response.EndStreamResults {
+ // Return EOF to indicate to the caller that there are no more stream
+ // results. Any error sent by the server is kept in fc.response.Error, and
+ // returned to the user in Finish.
+ return io.EOF
+ }
+ // Decode the streaming result.
+ if err := fc.dec.Decode(itemptr); err != nil {
+ berr := verror.New(decodeNetError(fc.ctx, err), fc.ctx, verror.New(errResponseDecoding, fc.ctx, err))
+ // TODO(cnicolaou): should we be caching this?
+ fc.response.Error = berr
+ return fc.close(berr)
+ }
+ return nil
+}
+
+func (fc *flowClient) CloseSend() error {
+ defer vlog.LogCall()()
+ return fc.closeSend()
+}
+
+// closeSend ensures CloseSend always returns verror.E.
+func (fc *flowClient) closeSend() error {
+ fc.sendClosedMu.Lock()
+ defer fc.sendClosedMu.Unlock()
+ if fc.sendClosed {
+ return nil
+ }
+ if err := fc.enc.Encode(ipc.Request{EndStreamArgs: true}); err != nil {
+ // TODO(caprita): Indiscriminately closing the flow below causes
+ // a race as described in:
+ // https://docs.google.com/a/google.com/document/d/1C0kxfYhuOcStdV7tnLZELZpUhfQCZj47B0JrzbE29h8/edit
+ //
+ // There should be a finer grained way to fix this (for example,
+ // encoding errors should probably still result in closing the
+ // flow); on the flip side, there may exist other instances
+ // where we are closing the flow but should not.
+ //
+ // For now, commenting out the line below removes the flakiness
+ // from our existing unit tests, but this needs to be revisited
+ // and fixed correctly.
+ //
+ // return fc.close(verror.ErrBadProtocolf("ipc: end stream args encoding failed: %v", err))
+ }
+ fc.sendClosed = true
+ return nil
+}
+
+func (fc *flowClient) Finish(resultptrs ...interface{}) error {
+ defer vlog.LogCall()()
+ err := fc.finish(resultptrs...)
+ vtrace.GetSpan(fc.ctx).Finish()
+ return err
+}
+
+// finish ensures Finish always returns a verror.E.
+func (fc *flowClient) finish(resultptrs ...interface{}) error {
+ if fc.finished {
+ err := verror.New(errClientFinishAlreadyCalled, fc.ctx)
+ return fc.close(verror.New(verror.ErrBadState, fc.ctx, err))
+ }
+ fc.finished = true
+
+ // Call closeSend implicitly, if the user hasn't already called it. There are
+ // three cases:
+ // 1) Server is blocked on Recv waiting for the final request message.
+ // 2) Server has already finished processing, the final response message and
+ // out args are queued up on the client, and the flow is closed.
+ // 3) Between 1 and 2: the server isn't blocked on Recv, but the final
+ // response and args aren't queued up yet, and the flow isn't closed.
+ //
+ // We must call closeSend to handle case (1) and unblock the server; otherwise
+ // we'll deadlock with both client and server waiting for each other. We must
+ // ignore the error (if any) to handle case (2). In that case the flow is
+ // closed, meaning writes will fail and reads will succeed, and closeSend will
+ // always return an error. But this isn't a "real" error; the client should
+ // read the rest of the results and succeed.
+ _ = fc.closeSend()
+ // Decode the response header, if it hasn't already been decoded by Recv.
+ if fc.response.Error == nil && !fc.response.EndStreamResults {
+ if err := fc.dec.Decode(&fc.response); err != nil {
+ berr := verror.New(decodeNetError(fc.ctx, err), fc.ctx, verror.New(errResponseDecoding, fc.ctx, err))
+ return fc.close(berr)
+ }
+ // The response header must indicate the streaming results have ended.
+ if fc.response.Error == nil && !fc.response.EndStreamResults {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errRemainingStreamResults, fc.ctx))
+ return fc.close(berr)
+ }
+ }
+ if fc.response.AckBlessings {
+ clientAckBlessings(fc.flow.VCDataCache(), fc.blessings)
+ }
+ // Incorporate any VTrace info that was returned.
+ ivtrace.Merge(fc.ctx, fc.response.TraceResponse)
+ if fc.response.Error != nil {
+ // TODO(cnicolaou): remove verror.ErrNoAccess with verror version
+ // when ipc.Server is converted.
+ if verror.Is(fc.response.Error, verror.ErrNoAccess.ID) && fc.dc != nil {
+ // In case the error was caused by a bad discharge, we do not want to get stuck
+ // with retrying again and again with this discharge. As there is no direct way
+ // to detect it, we conservatively flush all discharges we used from the cache.
+ // TODO(ataly,andreser): add verror.BadDischarge and handle it explicitly?
+ vlog.VI(3).Infof("Discarding %d discharges as RPC failed with %v", len(fc.discharges), fc.response.Error)
+ fc.dc.Invalidate(fc.discharges...)
+ }
+ return fc.close(verror.Convert(verror.ErrInternal, fc.ctx, fc.response.Error))
+ }
+ if got, want := fc.response.NumPosResults, uint64(len(resultptrs)); got != want {
+ berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errMismatchedResults, fc.ctx, got, want))
+ return fc.close(berr)
+ }
+ for ix, r := range resultptrs {
+ if err := fc.dec.Decode(r); err != nil {
+ berr := verror.New(decodeNetError(fc.ctx, err), fc.ctx, verror.New(errResultDecoding, fc.ctx, ix, err))
+ return fc.close(berr)
+ }
+ }
+ return fc.close(nil)
+}
+
+func (fc *flowClient) RemoteBlessings() ([]string, security.Blessings) {
+ return fc.server, fc.flow.RemoteBlessings()
+}
+
+func bpatterns(patterns []string) []security.BlessingPattern {
+ if patterns == nil {
+ return nil
+ }
+ bpatterns := make([]security.BlessingPattern, len(patterns))
+ for i, p := range patterns {
+ bpatterns[i] = security.BlessingPattern(p)
+ }
+ return bpatterns
+}
diff --git a/profiles/internal/ipc/client_test.go b/profiles/internal/ipc/client_test.go
new file mode 100644
index 0000000..49455d1
--- /dev/null
+++ b/profiles/internal/ipc/client_test.go
@@ -0,0 +1,525 @@
+package ipc_test
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/flags/consts"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/modules/core"
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+func init() {
+ modules.RegisterChild("ping", "<name>", childPing)
+}
+
+func newCtx() (*context.T, v23.Shutdown) {
+ ctx, shutdown := testutil.InitForTest()
+ v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
+ return ctx, shutdown
+}
+
+func runMountTable(t *testing.T, ctx *context.T) (*modules.Shell, func()) {
+ sh, err := modules.NewShell(ctx, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ root, err := sh.Start(core.RootMTCommand, nil)
+ if err != nil {
+ t.Fatalf("unexpected error for root mt: %s", err)
+ }
+ sh.Forget(root)
+
+ rootSession := expect.NewSession(t, root.Stdout(), time.Minute)
+ rootSession.ExpectVar("PID")
+ rootName := rootSession.ExpectVar("MT_NAME")
+
+ deferFn := func() {
+ if testing.Verbose() {
+ vlog.Infof("------ shell cleanup ------")
+ sh.Cleanup(os.Stderr, os.Stderr)
+ vlog.Infof("------ root shutdown ------")
+ root.Shutdown(os.Stderr, os.Stderr)
+ } else {
+ sh.Cleanup(nil, nil)
+ root.Shutdown(nil, nil)
+ }
+ }
+
+ if t.Failed() {
+ deferFn()
+ t.Fatalf("%s", rootSession.Error())
+ }
+ sh.SetVar(consts.NamespaceRootPrefix, rootName)
+ if err = v23.GetNamespace(ctx).SetRoots(rootName); err != nil {
+ t.Fatalf("unexpected error setting namespace roots: %s", err)
+ }
+
+ return sh, deferFn
+}
+
+func runClient(t *testing.T, sh *modules.Shell) error {
+ clt, err := sh.Start(core.EchoClientCommand, nil, "echoServer", "a message")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, clt.Stdout(), 30*time.Second)
+ s.Expect("echoServer: a message")
+ if s.Failed() {
+ return s.Error()
+ }
+ return nil
+}
+
+func numServers(t *testing.T, ctx *context.T, name string) int {
+ me, err := v23.GetNamespace(ctx).Resolve(ctx, name)
+ if err != nil {
+ return 0
+ }
+ return len(me.Servers)
+}
+
+// TODO(cnicolaou): figure out how to test and see what the internals
+// of tryCall are doing - e.g. using stats counters.
+func TestMultipleEndpoints(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+
+ sh, fn := runMountTable(t, ctx)
+ defer fn()
+ srv, err := sh.Start(core.EchoServerCommand, nil, "echoServer", "echoServer")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, srv.Stdout(), time.Minute)
+ s.ExpectVar("PID")
+ s.ExpectVar("NAME")
+
+ // Verify that there are 1 entries for echoServer in the mount table.
+ if got, want := numServers(t, ctx, "echoServer"), 1; got != want {
+ t.Fatalf("got: %d, want: %d", got, want)
+ }
+
+ runClient(t, sh)
+
+ // Create a fake set of 100 entries in the mount table
+ for i := 0; i < 100; i++ {
+ // 203.0.113.0 is TEST-NET-3 from RFC5737
+ ep := naming.FormatEndpoint("tcp", fmt.Sprintf("203.0.113.%d:443", i))
+ n := naming.JoinAddressName(ep, "")
+ if err := v23.GetNamespace(ctx).Mount(ctx, "echoServer", n, time.Hour); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ }
+
+ // Verify that there are 101 entries for echoServer in the mount table.
+ if got, want := numServers(t, ctx, "echoServer"), 101; got != want {
+ t.Fatalf("got: %q, want: %q", got, want)
+ }
+
+ // TODO(cnicolaou): ok, so it works, but I'm not sure how
+ // long it should take or if the parallel connection code
+ // really works. Use counters to inspect it for example.
+ if err := runClient(t, sh); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ srv.CloseStdin()
+ srv.Shutdown(nil, nil)
+
+ // Verify that there are 100 entries for echoServer in the mount table.
+ if got, want := numServers(t, ctx, "echoServer"), 100; got != want {
+ t.Fatalf("got: %d, want: %d", got, want)
+ }
+}
+
+func TestTimeoutCall(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp", "203.0.113.10:443"), "")
+ client := v23.GetClient(ctx)
+ _, err := client.StartCall(ctx, name, "echo", []interface{}{"args don't matter"})
+ if !verror.Is(err, verror.ErrTimeout.ID) {
+ t.Fatalf("wrong error: %s", err)
+ }
+}
+
+func childPing(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+ v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
+
+ name := args[0]
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Ping", nil)
+ if err != nil {
+ fmt.Errorf("unexpected error: %s", err)
+ }
+ got := ""
+ if err := call.Finish(&got); err != nil {
+ fmt.Errorf("unexpected error: %s", err)
+ }
+ fmt.Fprintf(stdout, "RESULT=%s\n", got)
+ return nil
+}
+
+func initServer(t *testing.T, ctx *context.T) (string, func()) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ done := make(chan struct{})
+ deferFn := func() { close(done); server.Stop() }
+
+ eps, err := server.Listen(v23.GetListenSpec(ctx))
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ server.Serve("", &simple{done}, nil)
+ return eps[0].Name(), deferFn
+}
+
+func testForVerror(t *testing.T, err error, verr verror.IDAction) {
+ _, file, line, _ := runtime.Caller(1)
+ loc := fmt.Sprintf("%s:%d", filepath.Base(file), line)
+ if !verror.Is(err, verr.ID) {
+ if _, ok := err.(verror.E); !ok {
+ t.Fatalf("%s: err %v not a verror", loc, err)
+ }
+ stack := ""
+ if err != nil {
+ stack = verror.Stack(err).String()
+ }
+ t.Fatalf("%s: expecting one of: %v, got: %v: stack: %s", loc, verr, err, stack)
+ }
+}
+
+func TestTimeoutResponse(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+ ctx, _ = context.WithTimeout(ctx, time.Millisecond)
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+ if err != nil {
+ testForVerror(t, err, verror.ErrTimeout)
+ return
+ }
+ err = call.Finish()
+ testForVerror(t, err, verror.ErrTimeout)
+}
+
+func TestArgsAndResponses(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", []interface{}{"too many args"})
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ err = call.Finish()
+ testForVerror(t, err, verror.ErrBadProtocol)
+
+ call, err = v23.GetClient(ctx).StartCall(ctx, name, "Ping", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ pong := ""
+ dummy := ""
+ err = call.Finish(&pong, &dummy)
+ testForVerror(t, err, verror.ErrBadProtocol)
+}
+
+func TestAccessDenied(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ ctx1, err := v23.SetPrincipal(ctx, tsecurity.NewPrincipal("test-blessing"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ call, err := v23.GetClient(ctx1).StartCall(ctx1, name, "Sleep", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ err = call.Finish()
+ testForVerror(t, err, verror.ErrNoAccess)
+}
+
+func TestCanceledBeforeFinish(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ ctx, cancel := context.WithCancel(ctx)
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ // Cancel before we call finish.
+ cancel()
+ err = call.Finish()
+ // TOO(cnicolaou): this should be Canceled only.
+ testForVerror(t, err, verror.ErrCanceled)
+}
+
+func TestCanceledDuringFinish(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ ctx, cancel := context.WithCancel(ctx)
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ // Cancel whilst the RPC is running.
+ go func() {
+ time.Sleep(100 * time.Millisecond)
+ cancel()
+ }()
+ err = call.Finish()
+ testForVerror(t, err, verror.ErrCanceled)
+}
+
+func TestRendezvous(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ sh, fn := runMountTable(t, ctx)
+ defer fn()
+
+ name := "echoServer"
+
+ // We start the client before we start the server, StartCall will reresolve
+ // the name until it finds an entry or timesout after an exponential
+ // backoff of some minutes.
+ startServer := func() {
+ time.Sleep(10 * time.Millisecond)
+ srv, _ := sh.Start(core.EchoServerCommand, nil, "message", name)
+ s := expect.NewSession(t, srv.Stdout(), time.Minute)
+ s.ExpectVar("PID")
+ s.ExpectVar("NAME")
+ }
+ go startServer()
+
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Echo", []interface{}{"hello"})
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ response := ""
+ if err := call.Finish(&response); err != nil {
+ testForVerror(t, err, verror.ErrCanceled)
+ return
+ }
+ if got, want := response, "message: hello\n"; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestCallback(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ sh, fn := runMountTable(t, ctx)
+ defer fn()
+
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ srv, err := sh.Start("ping", nil, name)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, srv.Stdout(), time.Minute)
+ if got, want := s.ExpectVar("RESULT"), "pong"; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestStreamTimeout(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ want := 10
+ ctx, _ = context.WithTimeout(ctx, 300*time.Millisecond)
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Source", []interface{}{want})
+ if err != nil {
+ if !verror.Is(err, verror.ErrTimeout.ID) {
+ t.Fatalf("verror should be a timeout not %s: stack %s",
+ err, verror.Stack(err))
+ }
+ return
+ }
+
+ for {
+ got := 0
+ err := call.Recv(&got)
+ if err == nil {
+ if got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ want++
+ continue
+ }
+ // TOO(cnicolaou): this should be Timeout only.
+ testForVerror(t, err, verror.ErrTimeout)
+ break
+ }
+ err = call.Finish()
+ testForVerror(t, err, verror.ErrTimeout)
+}
+
+func TestStreamAbort(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sink", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ want := 10
+ for i := 0; i <= want; i++ {
+ if err := call.Send(i); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ }
+ call.CloseSend()
+ err = call.Send(100)
+ testForVerror(t, err, verror.ErrAborted)
+
+ result := 0
+ err = call.Finish(&result)
+ if err != nil {
+ t.Errorf("unexpected error: %#v", err)
+ }
+ if got := result; got != want {
+ t.Errorf("got %d, want %d", got, want)
+ }
+}
+
+func TestNoServersAvailable(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ _, fn := runMountTable(t, ctx)
+ defer fn()
+ name := "noservers"
+ ctx, _ = context.WithTimeout(ctx, 1000*time.Millisecond)
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+ if err != nil {
+ testForVerror(t, err, verror.ErrNoServers)
+ return
+ }
+ err = call.Finish()
+ testForVerror(t, err, verror.ErrNoServers)
+}
+
+func TestNoMountTable(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ v23.GetNamespace(ctx).SetRoots()
+ name := "a_mount_table_entry"
+
+ // If there is no mount table, then we'll get a NoServers error message.
+ ctx, _ = context.WithTimeout(ctx, 300*time.Millisecond)
+ _, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+ testForVerror(t, err, verror.ErrNoServers)
+}
+
+// TestReconnect verifies that the client transparently re-establishes the
+// connection to the server if the server dies and comes back (on the same
+// endpoint).
+func TestReconnect(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ sh, err := modules.NewShell(ctx, v23.GetPrincipal(ctx))
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+ server, err := sh.Start(core.EchoServerCommand, nil, "--veyron.tcp.address=127.0.0.1:0", "mymessage", "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ session := expect.NewSession(t, server.Stdout(), time.Minute)
+ session.ReadLine()
+ serverName := session.ExpectVar("NAME")
+ serverEP, _ := naming.SplitAddressName(serverName)
+ ep, _ := inaming.NewEndpoint(serverEP)
+
+ makeCall := func(ctx *context.T, opts ...ipc.CallOpt) (string, error) {
+ ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
+ call, err := v23.GetClient(ctx).StartCall(ctx, serverName, "Echo", []interface{}{"bratman"}, opts...)
+ if err != nil {
+ return "", fmt.Errorf("START: %s", err)
+ }
+ var result string
+ if err := call.Finish(&result); err != nil {
+ return "", err
+ }
+ return result, nil
+ }
+
+ expected := "mymessage: bratman\n"
+ if result, err := makeCall(ctx); err != nil || result != expected {
+ t.Errorf("Got (%q, %v) want (%q, nil)", result, err, expected)
+ }
+ // Kill the server, verify client can't talk to it anymore.
+ sh.SetWaitTimeout(time.Minute)
+ if err := server.Shutdown(os.Stderr, os.Stderr); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if _, err := makeCall(ctx, options.NoRetry{}); err == nil || (!strings.HasPrefix(err.Error(), "START") && !strings.Contains(err.Error(), "EOF")) {
+ t.Fatalf(`Got (%v) want ("START: <err>" or "EOF") as server is down`, err)
+ }
+
+ // Resurrect the server with the same address, verify client
+ // re-establishes the connection. This is racy if another
+ // process grabs the port.
+ server, err = sh.Start(core.EchoServerCommand, nil, "--veyron.tcp.address="+ep.Address, "mymessage again", "")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ session = expect.NewSession(t, server.Stdout(), time.Minute)
+ defer server.Shutdown(os.Stderr, os.Stderr)
+ expected = "mymessage again: bratman\n"
+ if result, err := makeCall(ctx); err != nil || result != expected {
+ t.Errorf("Got (%q, %v) want (%q, nil)", result, err, expected)
+ }
+
+}
+
+// TODO(cnicolaou:) tests for:
+// -- Test for bad discharges error and correct invalidation, client.go:870..880
diff --git a/profiles/internal/ipc/consts.go b/profiles/internal/ipc/consts.go
new file mode 100644
index 0000000..279ca52
--- /dev/null
+++ b/profiles/internal/ipc/consts.go
@@ -0,0 +1,16 @@
+package ipc
+
+import "time"
+
+const (
+ // The publisher re-mounts on this period.
+ publishPeriod = time.Minute
+
+ // The server uses this timeout for incoming calls before the real timeout is known.
+ // The client uses this as the default max time for connecting to the server including
+ // name resolution.
+ defaultCallTimeout = time.Minute
+
+ // The client uses this as the maximum time between retry attempts when starting a call.
+ maxBackoff = time.Minute
+)
diff --git a/profiles/internal/ipc/debug_test.go b/profiles/internal/ipc/debug_test.go
new file mode 100644
index 0000000..71f21b7
--- /dev/null
+++ b/profiles/internal/ipc/debug_test.go
@@ -0,0 +1,134 @@
+package ipc
+
+import (
+ "io"
+ "reflect"
+ "sort"
+ "testing"
+
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/stats"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ tnaming "v.io/x/ref/profiles/internal/testing/mocks/naming"
+ "v.io/x/ref/services/mgmt/debug"
+)
+
+func TestDebugServer(t *testing.T) {
+ // Setup the client and server principals, with the client willing to share its
+ // blessing with the server.
+ var (
+ pclient = tsecurity.NewPrincipal("client")
+ pserver = tsecurity.NewPrincipal("server")
+ bclient = bless(pserver, pclient, "client") // server/client blessing.
+ )
+ pclient.AddToRoots(bclient) // Client recognizes "server" as a root of blessings.
+ pclient.BlessingStore().Set(bclient, "server") // Client presents bclient to server
+
+ debugDisp := debug.NewDispatcher(vlog.Log.LogDir, nil)
+
+ sm := manager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns, ReservedNameDispatcher{debugDisp}, vc.LocalPrincipal{pserver})
+ if err != nil {
+ t.Fatalf("InternalNewServer failed: %v", err)
+ }
+ defer server.Stop()
+ eps, err := server.Listen(listenSpec)
+ if err != nil {
+ t.Fatalf("server.Listen failed: %v", err)
+ }
+ if err := server.Serve("", &testObject{}, nil); err != nil {
+ t.Fatalf("server.Serve failed: %v", err)
+ }
+ client, err := InternalNewClient(sm, ns, vc.LocalPrincipal{pclient})
+ if err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ defer client.Close()
+ ep := eps[0]
+ // Call the Foo method on ""
+ {
+ call, err := client.StartCall(ctx, ep.Name(), "Foo", nil)
+ if err != nil {
+ t.Fatalf("client.StartCall failed: %v", err)
+ }
+ var value string
+ if err := call.Finish(&value); err != nil {
+ t.Fatalf("call.Finish failed: %v", err)
+ }
+ if want := "BAR"; value != want {
+ t.Errorf("unexpected value: Got %v, want %v", value, want)
+ }
+ }
+ // Call Value on __debug/stats/testing/foo
+ {
+ foo := stats.NewString("testing/foo")
+ foo.Set("The quick brown fox jumps over the lazy dog")
+ addr := naming.JoinAddressName(ep.String(), "__debug/stats/testing/foo")
+ call, err := client.StartCall(ctx, addr, "Value", nil, options.NoResolve{})
+ if err != nil {
+ t.Fatalf("client.StartCall failed: %v", err)
+ }
+ var value string
+ if err := call.Finish(&value); err != nil {
+ t.Fatalf("call.Finish failed: %v", err)
+ }
+ if want := foo.Value(); value != want {
+ t.Errorf("unexpected result: Got %v, want %v", value, want)
+ }
+ }
+
+ // Call Glob
+ testcases := []struct {
+ name, pattern string
+ expected []string
+ }{
+ {"", "*", []string{}},
+ {"", "__*", []string{"__debug"}},
+ {"", "__*/*", []string{"__debug/logs", "__debug/pprof", "__debug/stats", "__debug/vtrace"}},
+ {"__debug", "*", []string{"logs", "pprof", "stats", "vtrace"}},
+ }
+ for _, tc := range testcases {
+ addr := naming.JoinAddressName(ep.String(), tc.name)
+ call, err := client.StartCall(ctx, addr, ipc.GlobMethod, []interface{}{tc.pattern}, options.NoResolve{})
+ if err != nil {
+ t.Fatalf("client.StartCall failed for %q: %v", tc.name, err)
+ }
+ results := []string{}
+ for {
+ var gr naming.VDLGlobReply
+ if err := call.Recv(&gr); err != nil {
+ if err != io.EOF {
+ t.Fatalf("Recv failed for %q: %v. Results received thus far: %q", tc.name, err, results)
+ }
+ break
+ }
+ switch v := gr.(type) {
+ case naming.VDLGlobReplyEntry:
+ results = append(results, v.Value.Name)
+ }
+ }
+ if err := call.Finish(); err != nil {
+ t.Fatalf("call.Finish failed for %q: %v", tc.name, err)
+ }
+ sort.Strings(results)
+ if !reflect.DeepEqual(tc.expected, results) {
+ t.Errorf("unexpected results for %q. Got %v, want %v", tc.name, results, tc.expected)
+ }
+ }
+}
+
+type testObject struct {
+}
+
+func (o testObject) Foo(ipc.ServerCall) (string, error) {
+ return "BAR", nil
+}
diff --git a/profiles/internal/ipc/default_authorizer.go b/profiles/internal/ipc/default_authorizer.go
new file mode 100644
index 0000000..eb49874
--- /dev/null
+++ b/profiles/internal/ipc/default_authorizer.go
@@ -0,0 +1,34 @@
+package ipc
+
+import (
+ "v.io/v23/security"
+)
+
+// defaultAuthorizer implements a security.Authorizer with an authorization
+// policy that requires one end of the RPC to have a blessing that makes it a
+// delegate of the other.
+type defaultAuthorizer struct{}
+
+func (defaultAuthorizer) Authorize(ctx security.Call) error {
+ var (
+ localForCall, localErr = ctx.LocalBlessings().ForCall(ctx)
+ remote = ctx.RemoteBlessings()
+ remoteForCall, remoteErr = remote.ForCall(ctx)
+ )
+ // Authorize if any element in localForCall is a "delegate of" (i.e., has been
+ // blessed by) any element in remote, OR vice-versa.
+ for _, l := range localForCall {
+ if security.BlessingPattern(l).MatchedBy(remoteForCall...) {
+ // l is a delegate of an element in remote.
+ return nil
+ }
+ }
+ for _, r := range remoteForCall {
+ if security.BlessingPattern(r).MatchedBy(localForCall...) {
+ // r is a delegate of an element in localForCall.
+ return nil
+ }
+ }
+
+ return NewErrInvalidBlessings(nil, remoteForCall, remoteErr, localForCall, localErr)
+}
diff --git a/profiles/internal/ipc/default_authorizer_test.go b/profiles/internal/ipc/default_authorizer_test.go
new file mode 100644
index 0000000..c018f27
--- /dev/null
+++ b/profiles/internal/ipc/default_authorizer_test.go
@@ -0,0 +1,90 @@
+package ipc
+
+import (
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/security"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+)
+
+func TestDefaultAuthorizer(t *testing.T) {
+ var (
+ pali = tsecurity.NewPrincipal()
+ pbob = tsecurity.NewPrincipal()
+ pche = tsecurity.NewPrincipal()
+
+ che, _ = pche.BlessSelf("che")
+ ali, _ = pali.BlessSelf("ali")
+ bob, _ = pbob.BlessSelf("bob")
+
+ // bless(ali, bob, "friend") will generate a blessing for ali, calling him "bob/friend".
+ bless = func(target, extend security.Blessings, extension string) security.Blessings {
+ var p security.Principal
+ switch extend.PublicKey() {
+ case ali.PublicKey():
+ p = pali
+ case bob.PublicKey():
+ p = pbob
+ case che.PublicKey():
+ p = pche
+ default:
+ panic(extend)
+ }
+ ret, err := p.Bless(target.PublicKey(), extend, extension, security.UnconstrainedUse())
+ if err != nil {
+ panic(err)
+ }
+ return ret
+ }
+
+ U = func(blessings ...security.Blessings) security.Blessings {
+ u, err := security.UnionOfBlessings(blessings...)
+ if err != nil {
+ panic(err)
+ }
+ return u
+ }
+
+ // Shorthands for getting blessings for Ali and Bob.
+ A = func(as security.Blessings, extension string) security.Blessings { return bless(ali, as, extension) }
+ B = func(as security.Blessings, extension string) security.Blessings { return bless(bob, as, extension) }
+
+ authorizer defaultAuthorizer
+ )
+ // Make ali, bob (the two ends) recognize all three blessings
+ for ip, p := range []security.Principal{pali, pbob} {
+ for _, b := range []security.Blessings{ali, bob, che} {
+ if err := p.AddToRoots(b); err != nil {
+ t.Fatalf("%d: %v - %v", ip, b, err)
+ }
+ }
+ }
+ // All tests are run as if "ali" is the local end and "bob" is the remote.
+ tests := []struct {
+ local, remote security.Blessings
+ authorized bool
+ }{
+ {ali, ali, true},
+ {ali, bob, false},
+ {ali, B(ali, "friend"), true}, // ali talking to ali/friend
+ {A(bob, "friend"), bob, true}, // bob/friend talking to bob
+ {A(che, "friend"), B(che, "family"), false}, // che/friend talking to che/family
+ {U(ali, A(bob, "friend"), A(che, "friend")),
+ U(bob, B(che, "family")),
+ true}, // {ali, bob/friend, che/friend} talking to {bob, che/family}
+ }
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+ for _, test := range tests {
+ err := authorizer.Authorize(&mockSecurityContext{
+ p: pali,
+ l: test.local,
+ r: test.remote,
+ c: ctx,
+ })
+ if (err == nil) != test.authorized {
+ t.Errorf("Local:%v Remote:%v. Got %v", test.local, test.remote, err)
+ }
+ }
+}
diff --git a/profiles/internal/ipc/discharges.go b/profiles/internal/ipc/discharges.go
new file mode 100644
index 0000000..88f25f7
--- /dev/null
+++ b/profiles/internal/ipc/discharges.go
@@ -0,0 +1,245 @@
+package ipc
+
+import (
+ "sync"
+ "time"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/security"
+ "v.io/v23/vdl"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+)
+
+// NoDischarges specifies that the RPC call should not fetch discharges.
+type NoDischarges struct{}
+
+func (NoDischarges) IPCCallOpt() {}
+func (NoDischarges) NSResolveOpt() {}
+
+// discharger implements vc.DischargeClient.
+type dischargeClient struct {
+ c ipc.Client
+ defaultCtx *context.T
+ cache dischargeCache
+}
+
+// TODO(suharshs): Should we make this configurable?
+// We make this shorter than the vc DischargeExpiryBuffer to ensure the discharges
+// are fetched when the VC needs them.
+const dischargeExpiryBuffer = vc.DischargeExpiryBuffer - (5 * time.Second)
+
+// InternalNewDischargeClient creates a vc.DischargeClient that will be used to
+// fetch discharges to support blessings presented to a remote process.
+//
+// defaultCtx is the context used when none (nil) is explicitly provided to the
+// PrepareDischarges call. This typically happens when fetching discharges on
+// behalf of a server accepting connections, i.e., before any notion of the
+// "context" of an API call has been established.
+func InternalNewDischargeClient(defaultCtx *context.T, client ipc.Client) vc.DischargeClient {
+ return &dischargeClient{
+ c: client,
+ defaultCtx: defaultCtx,
+ cache: dischargeCache{cache: make(map[string]security.Discharge)},
+ }
+}
+
+func (*dischargeClient) IPCStreamListenerOpt() {}
+func (*dischargeClient) IPCStreamVCOpt() {}
+
+// PrepareDischarges retrieves the caveat discharges required for using blessings
+// at server. The discharges are either found in the dischargeCache, in the call
+// options, or requested from the discharge issuer indicated on the caveat.
+// Note that requesting a discharge is an ipc call, so one copy of this
+// function must be able to successfully terminate while another is blocked.
+func (d *dischargeClient) PrepareDischarges(ctx *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) (ret []security.Discharge) {
+ if len(forcaveats) == 0 {
+ return
+ }
+ // Make a copy since this copy will be mutated.
+ var caveats []security.Caveat
+ for _, cav := range forcaveats {
+ // It shouldn't happen, but in case there are non-third-party
+ // caveats, drop them.
+ if tp := cav.ThirdPartyDetails(); tp != nil {
+ caveats = append(caveats, cav)
+ }
+ }
+
+ // Gather discharges from cache.
+ // (Collect a set of pointers, where nil implies a missing discharge)
+ discharges := make([]*security.Discharge, len(caveats))
+ if d.cache.Discharges(caveats, discharges) > 0 {
+ // Fetch discharges for caveats for which no discharges were
+ // found in the cache.
+ if ctx == nil {
+ ctx = d.defaultCtx
+ }
+ if ctx != nil {
+ var span vtrace.Span
+ ctx, span = vtrace.SetNewSpan(ctx, "Fetching Discharges")
+ defer span.Finish()
+ }
+ d.fetchDischarges(ctx, caveats, impetus, discharges)
+ }
+ for _, d := range discharges {
+ if d != nil {
+ ret = append(ret, *d)
+ }
+ }
+ return
+}
+func (d *dischargeClient) Invalidate(discharges ...security.Discharge) {
+ d.cache.invalidate(discharges...)
+}
+
+// fetchDischarges fills out by fetching discharges for caveats from the
+// appropriate discharge service. Since there may be dependencies in the
+// caveats, fetchDischarges keeps retrying until either all discharges can be
+// fetched or no new discharges are fetched.
+// REQUIRES: len(caveats) == len(out)
+// REQUIRES: caveats[i].ThirdPartyDetails() != nil for 0 <= i < len(caveats)
+func (d *dischargeClient) fetchDischarges(ctx *context.T, caveats []security.Caveat, impetus security.DischargeImpetus, out []*security.Discharge) {
+ var wg sync.WaitGroup
+ for {
+ type fetched struct {
+ idx int
+ discharge *security.Discharge
+ }
+ discharges := make(chan fetched, len(caveats))
+ want := 0
+ for i := range caveats {
+ if out[i] != nil {
+ // Already fetched
+ continue
+ }
+ want++
+ wg.Add(1)
+ go func(i int, ctx *context.T, cav security.Caveat) {
+ defer wg.Done()
+ tp := cav.ThirdPartyDetails()
+ vlog.VI(3).Infof("Fetching discharge for %v", tp)
+ call, err := d.c.StartCall(ctx, tp.Location(), "Discharge", []interface{}{cav, filteredImpetus(tp.Requirements(), impetus)}, NoDischarges{})
+ if err != nil {
+ vlog.VI(3).Infof("Discharge fetch for %v failed: %v", tp, err)
+ return
+ }
+ var wire security.WireDischarge
+ if err := call.Finish(&wire); err != nil {
+ vlog.VI(3).Infof("Discharge fetch for %v failed: (%v)", cav, err)
+ return
+ }
+ d := security.NewDischarge(wire)
+ discharges <- fetched{i, &d}
+ }(i, ctx, caveats[i])
+ }
+ wg.Wait()
+ close(discharges)
+ var got int
+ for fetched := range discharges {
+ d.cache.Add(*fetched.discharge)
+ out[fetched.idx] = fetched.discharge
+ got++
+ }
+ if want > 0 {
+ vlog.VI(3).Infof("fetchDischarges: got %d of %d discharge(s) (total %d caveats)", got, want, len(caveats))
+ }
+ if got == 0 || got == want {
+ return
+ }
+ }
+}
+
+// dischargeCache is a concurrency-safe cache for third party caveat discharges.
+// TODO(suharshs,ataly,ashankar): This should be keyed by filtered impetus as well.
+type dischargeCache struct {
+ mu sync.RWMutex
+ cache map[string]security.Discharge // GUARDED_BY(mu)
+}
+
+// Add inserts the argument to the cache, possibly overwriting previous
+// discharges for the same caveat.
+func (dcc *dischargeCache) Add(discharges ...security.Discharge) {
+ dcc.mu.Lock()
+ for _, d := range discharges {
+ dcc.cache[d.ID()] = d
+ }
+ dcc.mu.Unlock()
+}
+
+// Discharges takes a slice of caveats and a slice of discharges of the same
+// length and fills in nil entries in the discharges slice with pointers to
+// cached discharges (if there are any).
+//
+// REQUIRES: len(caveats) == len(out)
+// REQUIRES: caveats[i].ThirdPartyDetails() != nil, for all 0 <= i < len(caveats)
+func (dcc *dischargeCache) Discharges(caveats []security.Caveat, out []*security.Discharge) (remaining int) {
+ dcc.mu.Lock()
+ for i, d := range out {
+ if d != nil {
+ continue
+ }
+ if cached, exists := dcc.cache[caveats[i].ThirdPartyDetails().ID()]; exists {
+ out[i] = &cached
+ // If the discharge has expired purge it from the cache.
+ if !isDischargeUsable(out[i]) {
+ out[i] = nil
+ delete(dcc.cache, cached.ID())
+ remaining++
+ }
+ } else {
+ remaining++
+ }
+ }
+ dcc.mu.Unlock()
+ return
+}
+
+// TODO(suharshs): Have PrepareDischarges try to fetch fresh discharges for the
+// discharges that are about to expire, but if they fail then return what is in the cache.
+func isDischargeUsable(dis *security.Discharge) bool {
+ expiry := dis.Expiry()
+ if expiry.IsZero() {
+ return true
+ }
+ return expiry.After(time.Now().Add(dischargeExpiryBuffer))
+}
+
+func (dcc *dischargeCache) invalidate(discharges ...security.Discharge) {
+ dcc.mu.Lock()
+ for _, d := range discharges {
+ // TODO(ashankar,ataly): The cached discharge might have been
+ // replaced by the time invalidate is called.
+ // Should we have an "Equals" function defined on "Discharge"
+ // and use that? (Could use reflect.DeepEqual as well, but
+ // that will likely be expensive)
+ // if cached := dcc.cache[d.ID()]; cached.Equals(d) {
+ // delete(dcc.cache, d.ID())
+ // }
+ delete(dcc.cache, d.ID())
+ }
+ dcc.mu.Unlock()
+}
+
+// filteredImpetus returns a copy of 'before' after removing any values that are not required as per 'r'.
+func filteredImpetus(r security.ThirdPartyRequirements, before security.DischargeImpetus) (after security.DischargeImpetus) {
+ if r.ReportServer && len(before.Server) > 0 {
+ after.Server = make([]security.BlessingPattern, len(before.Server))
+ for i := range before.Server {
+ after.Server[i] = before.Server[i]
+ }
+ }
+ if r.ReportMethod {
+ after.Method = before.Method
+ }
+ if r.ReportArguments && len(before.Arguments) > 0 {
+ after.Arguments = make([]*vdl.Value, len(before.Arguments))
+ for i := range before.Arguments {
+ after.Arguments[i] = vdl.CopyValue(before.Arguments[i])
+ }
+ }
+ return
+}
diff --git a/profiles/internal/ipc/errors.vdl b/profiles/internal/ipc/errors.vdl
new file mode 100644
index 0000000..e3b55e3
--- /dev/null
+++ b/profiles/internal/ipc/errors.vdl
@@ -0,0 +1,32 @@
+package ipc
+
+import "v.io/v23/security"
+
+error (
+ InvalidBlessings(remote []string, remoteErr []security.RejectedBlessing, local []string, localErr []security.RejectedBlessing) {
+ "en":"All valid blessings for this request: {remote} (rejected {remoteErr}) are disallowed by the policy {local} (rejected {localErr})",
+ }
+
+ // Internal errors.
+ badRequest(err error) {
+ "en": "failed to decode request: {err}",
+ }
+ badNumInputArgs(suffix, method string, numCalled, numWanted uint64) {
+ "en": "wrong number of input arguments for {suffix}.{method} (called with {numCalled} args, want {numWanted})",
+ }
+ badInputArg(suffix, method string, index uint64, err error) {
+ "en": "failed to decode request {suffix}.{method} arg #{index}: {err}",
+ }
+ badBlessings(err error) {
+ "en": "failed to decode blessings: {err}",
+ }
+ badBlessingsCache(err error) {
+ "en": "failed to find blessings in cache: {err}",
+ }
+ badDischarge(index uint64, err error) {
+ "en": "failed to decode discharge #{index}: {err}",
+ }
+ badAuth(suffix, method string, err error) {
+ "en": "not authorized to call {suffix}.{method}: {err}",
+ }
+)
diff --git a/profiles/internal/ipc/errors.vdl.go b/profiles/internal/ipc/errors.vdl.go
new file mode 100644
index 0000000..0488814
--- /dev/null
+++ b/profiles/internal/ipc/errors.vdl.go
@@ -0,0 +1,77 @@
+// This file was auto-generated by the veyron vdl tool.
+// Source: errors.vdl
+
+package ipc
+
+import (
+ // VDL system imports
+ "v.io/v23/context"
+ "v.io/v23/i18n"
+ "v.io/v23/verror"
+
+ // VDL user imports
+ "v.io/v23/security"
+)
+
+var (
+ ErrInvalidBlessings = verror.Register("v.io/x/ref/profiles/internal/ipc.InvalidBlessings", verror.NoRetry, "{1:}{2:} All valid blessings for this request: {3} (rejected {4}) are disallowed by the policy {5} (rejected {6})")
+ // Internal errors.
+ errBadRequest = verror.Register("v.io/x/ref/profiles/internal/ipc.badRequest", verror.NoRetry, "{1:}{2:} failed to decode request: {3}")
+ errBadNumInputArgs = verror.Register("v.io/x/ref/profiles/internal/ipc.badNumInputArgs", verror.NoRetry, "{1:}{2:} wrong number of input arguments for {3}.{4} (called with {5} args, want {6})")
+ errBadInputArg = verror.Register("v.io/x/ref/profiles/internal/ipc.badInputArg", verror.NoRetry, "{1:}{2:} failed to decode request {3}.{4} arg #{5}: {6}")
+ errBadBlessings = verror.Register("v.io/x/ref/profiles/internal/ipc.badBlessings", verror.NoRetry, "{1:}{2:} failed to decode blessings: {3}")
+ errBadBlessingsCache = verror.Register("v.io/x/ref/profiles/internal/ipc.badBlessingsCache", verror.NoRetry, "{1:}{2:} failed to find blessings in cache: {3}")
+ errBadDischarge = verror.Register("v.io/x/ref/profiles/internal/ipc.badDischarge", verror.NoRetry, "{1:}{2:} failed to decode discharge #{3}: {4}")
+ errBadAuth = verror.Register("v.io/x/ref/profiles/internal/ipc.badAuth", verror.NoRetry, "{1:}{2:} not authorized to call {3}.{4}: {5}")
+)
+
+func init() {
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(ErrInvalidBlessings.ID), "{1:}{2:} All valid blessings for this request: {3} (rejected {4}) are disallowed by the policy {5} (rejected {6})")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadRequest.ID), "{1:}{2:} failed to decode request: {3}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadNumInputArgs.ID), "{1:}{2:} wrong number of input arguments for {3}.{4} (called with {5} args, want {6})")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadInputArg.ID), "{1:}{2:} failed to decode request {3}.{4} arg #{5}: {6}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadBlessings.ID), "{1:}{2:} failed to decode blessings: {3}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadBlessingsCache.ID), "{1:}{2:} failed to find blessings in cache: {3}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadDischarge.ID), "{1:}{2:} failed to decode discharge #{3}: {4}")
+ i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadAuth.ID), "{1:}{2:} not authorized to call {3}.{4}: {5}")
+}
+
+// NewErrInvalidBlessings returns an error with the ErrInvalidBlessings ID.
+func NewErrInvalidBlessings(ctx *context.T, remote []string, remoteErr []security.RejectedBlessing, local []string, localErr []security.RejectedBlessing) error {
+ return verror.New(ErrInvalidBlessings, ctx, remote, remoteErr, local, localErr)
+}
+
+// newErrBadRequest returns an error with the errBadRequest ID.
+func newErrBadRequest(ctx *context.T, err error) error {
+ return verror.New(errBadRequest, ctx, err)
+}
+
+// newErrBadNumInputArgs returns an error with the errBadNumInputArgs ID.
+func newErrBadNumInputArgs(ctx *context.T, suffix string, method string, numCalled uint64, numWanted uint64) error {
+ return verror.New(errBadNumInputArgs, ctx, suffix, method, numCalled, numWanted)
+}
+
+// newErrBadInputArg returns an error with the errBadInputArg ID.
+func newErrBadInputArg(ctx *context.T, suffix string, method string, index uint64, err error) error {
+ return verror.New(errBadInputArg, ctx, suffix, method, index, err)
+}
+
+// newErrBadBlessings returns an error with the errBadBlessings ID.
+func newErrBadBlessings(ctx *context.T, err error) error {
+ return verror.New(errBadBlessings, ctx, err)
+}
+
+// newErrBadBlessingsCache returns an error with the errBadBlessingsCache ID.
+func newErrBadBlessingsCache(ctx *context.T, err error) error {
+ return verror.New(errBadBlessingsCache, ctx, err)
+}
+
+// newErrBadDischarge returns an error with the errBadDischarge ID.
+func newErrBadDischarge(ctx *context.T, index uint64, err error) error {
+ return verror.New(errBadDischarge, ctx, index, err)
+}
+
+// newErrBadAuth returns an error with the errBadAuth ID.
+func newErrBadAuth(ctx *context.T, suffix string, method string, err error) error {
+ return verror.New(errBadAuth, ctx, suffix, method, err)
+}
diff --git a/profiles/internal/ipc/full_test.go b/profiles/internal/ipc/full_test.go
new file mode 100644
index 0000000..e92a5fc
--- /dev/null
+++ b/profiles/internal/ipc/full_test.go
@@ -0,0 +1,1838 @@
+package ipc
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/services/security/access"
+ "v.io/v23/uniqueid"
+ "v.io/v23/vdl"
+ "v.io/v23/verror"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+
+ "v.io/x/ref/lib/flags"
+ "v.io/x/ref/lib/netstate"
+ "v.io/x/ref/lib/publisher"
+ "v.io/x/ref/lib/stats"
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/tcp"
+ imanager "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ tnaming "v.io/x/ref/profiles/internal/testing/mocks/naming"
+ ivtrace "v.io/x/ref/profiles/internal/vtrace"
+)
+
+//go:generate v23 test generate
+
+var (
+ errMethod = verror.New(verror.ErrAborted, nil)
+ clock = new(fakeClock)
+ listenAddrs = ipc.ListenAddrs{{"tcp", "127.0.0.1:0"}}
+ listenWSAddrs = ipc.ListenAddrs{{"ws", "127.0.0.1:0"}, {"tcp", "127.0.0.1:0"}}
+ listenSpec = ipc.ListenSpec{Addrs: listenAddrs}
+ listenWSSpec = ipc.ListenSpec{Addrs: listenWSAddrs}
+)
+
+type fakeClock struct {
+ sync.Mutex
+ time int
+}
+
+func (c *fakeClock) Now() int {
+ c.Lock()
+ defer c.Unlock()
+ return c.time
+}
+
+func (c *fakeClock) Advance(steps uint) {
+ c.Lock()
+ c.time += int(steps)
+ c.Unlock()
+}
+
+func testContextWithoutDeadline() *context.T {
+ ctx, _ := context.RootContext()
+ ctx, err := ivtrace.Init(ctx, flags.VtraceFlags{})
+ if err != nil {
+ panic(err)
+ }
+ ctx, _ = vtrace.SetNewTrace(ctx)
+ return ctx
+}
+
+func testInternalNewServer(ctx *context.T, streamMgr stream.Manager, ns ns.Namespace, opts ...ipc.ServerOpt) (ipc.Server, error) {
+ client, err := InternalNewClient(streamMgr, ns)
+ if err != nil {
+ return nil, err
+ }
+ return InternalNewServer(ctx, streamMgr, ns, client, opts...)
+}
+
+type userType string
+
+type testServer struct{}
+
+func (*testServer) Closure(ctx ipc.ServerCall) error {
+ return nil
+}
+
+func (*testServer) Error(ctx ipc.ServerCall) error {
+ return errMethod
+}
+
+func (*testServer) Echo(ctx ipc.ServerCall, arg string) (string, error) {
+ return fmt.Sprintf("method:%q,suffix:%q,arg:%q", ctx.Method(), ctx.Suffix(), arg), nil
+}
+
+func (*testServer) EchoUser(ctx ipc.ServerCall, arg string, u userType) (string, userType, error) {
+ return fmt.Sprintf("method:%q,suffix:%q,arg:%q", ctx.Method(), ctx.Suffix(), arg), u, nil
+}
+
+func (*testServer) EchoBlessings(ctx ipc.ServerCall) (server, client string, _ error) {
+ local, _ := ctx.LocalBlessings().ForCall(ctx)
+ remote, _ := ctx.RemoteBlessings().ForCall(ctx)
+ return fmt.Sprintf("%v", local), fmt.Sprintf("%v", remote), nil
+}
+
+func (*testServer) EchoGrantedBlessings(ctx ipc.ServerCall, arg string) (result, blessing string, _ error) {
+ return arg, fmt.Sprintf("%v", ctx.GrantedBlessings()), nil
+}
+
+func (*testServer) EchoAndError(ctx ipc.ServerCall, arg string) (string, error) {
+ result := fmt.Sprintf("method:%q,suffix:%q,arg:%q", ctx.Method(), ctx.Suffix(), arg)
+ if arg == "error" {
+ return result, errMethod
+ }
+ return result, nil
+}
+
+func (*testServer) Stream(call ipc.StreamServerCall, arg string) (string, error) {
+ result := fmt.Sprintf("method:%q,suffix:%q,arg:%q", call.Method(), call.Suffix(), arg)
+ var u userType
+ var err error
+ for err = call.Recv(&u); err == nil; err = call.Recv(&u) {
+ result += " " + string(u)
+ if err := call.Send(u); err != nil {
+ return "", err
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return result, err
+}
+
+func (*testServer) Unauthorized(ipc.StreamServerCall) (string, error) {
+ return "UnauthorizedResult", nil
+}
+
+type testServerAuthorizer struct{}
+
+func (testServerAuthorizer) Authorize(c security.Call) error {
+ if c.Method() != "Unauthorized" {
+ return nil
+ }
+ return fmt.Errorf("testServerAuthorizer denied access")
+}
+
+type testServerDisp struct{ server interface{} }
+
+func (t testServerDisp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ // If suffix is "nilAuth" we use default authorization, if it is "aclAuth" we
+ // use an ACL based authorizer, and otherwise we use the custom testServerAuthorizer.
+ var authorizer security.Authorizer
+ switch suffix {
+ case "discharger":
+ return &dischargeServer{}, testServerAuthorizer{}, nil
+ case "nilAuth":
+ authorizer = nil
+ case "aclAuth":
+ authorizer = &access.ACL{
+ In: []security.BlessingPattern{"client", "server"},
+ }
+ default:
+ authorizer = testServerAuthorizer{}
+ }
+ return t.server, authorizer, nil
+}
+
+type dischargeServer struct {
+ mu sync.Mutex
+ called bool
+}
+
+func (ds *dischargeServer) Discharge(ctx ipc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.WireDischarge, error) {
+ ds.mu.Lock()
+ ds.called = true
+ ds.mu.Unlock()
+ tp := cav.ThirdPartyDetails()
+ if tp == nil {
+ return nil, fmt.Errorf("discharger: %v does not represent a third-party caveat", cav)
+ }
+ if err := tp.Dischargeable(ctx); err != nil {
+ return nil, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", cav, err)
+ }
+ // Add a fakeTimeCaveat to be able to control discharge expiration via 'clock'.
+ expiry, err := security.NewCaveat(fakeTimeCaveat, clock.Now())
+ if err != nil {
+ return nil, fmt.Errorf("failed to create an expiration on the discharge: %v", err)
+ }
+ d, err := ctx.LocalPrincipal().MintDischarge(cav, expiry)
+ if err != nil {
+ return nil, err
+ }
+ return security.MarshalDischarge(d), nil
+}
+
+func startServer(t *testing.T, principal security.Principal, sm stream.Manager, ns ns.Namespace, name string, disp ipc.Dispatcher, opts ...ipc.ServerOpt) (naming.Endpoint, ipc.Server) {
+ return startServerWS(t, principal, sm, ns, name, disp, noWebsocket, opts...)
+}
+
+func endpointsToStrings(eps []naming.Endpoint) []string {
+ r := make([]string, len(eps))
+ for i, e := range eps {
+ r[i] = e.String()
+ }
+ sort.Strings(r)
+ return r
+}
+
+func startServerWS(t *testing.T, principal security.Principal, sm stream.Manager, ns ns.Namespace, name string, disp ipc.Dispatcher, shouldUseWebsocket websocketMode, opts ...ipc.ServerOpt) (naming.Endpoint, ipc.Server) {
+ vlog.VI(1).Info("InternalNewServer")
+ opts = append(opts, vc.LocalPrincipal{principal})
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns, opts...)
+ if err != nil {
+ t.Errorf("InternalNewServer failed: %v", err)
+ }
+ vlog.VI(1).Info("server.Listen")
+ spec := listenSpec
+ if shouldUseWebsocket {
+ spec = listenWSSpec
+ }
+ eps, err := server.Listen(spec)
+ if err != nil {
+ t.Errorf("server.Listen failed: %v", err)
+ }
+ vlog.VI(1).Info("server.Serve")
+ if err := server.ServeDispatcher(name, disp); err != nil {
+ t.Errorf("server.ServeDispatcher failed: %v", err)
+ }
+
+ status := server.Status()
+ if got, want := endpointsToStrings(status.Endpoints), endpointsToStrings(eps); !reflect.DeepEqual(got, want) {
+ t.Fatalf("got %v, want %v", got, want)
+ }
+ names := status.Mounts.Names()
+ if len(names) != 1 || names[0] != name {
+ t.Fatalf("unexpected names: %v", names)
+ }
+ return eps[0], server
+}
+
+func loc(d int) string {
+ _, file, line, _ := runtime.Caller(d + 1)
+ return fmt.Sprintf("%s:%d", filepath.Base(file), line)
+}
+
+func verifyMount(t *testing.T, ns ns.Namespace, name string) []string {
+ me, err := ns.Resolve(testContext(), name)
+ if err != nil {
+ t.Errorf("%s: %s not found in mounttable", loc(1), name)
+ return nil
+ }
+ return me.Names()
+}
+
+func verifyMountMissing(t *testing.T, ns ns.Namespace, name string) {
+ if me, err := ns.Resolve(testContext(), name); err == nil {
+ names := me.Names()
+ t.Errorf("%s: %s not supposed to be found in mounttable; got %d servers instead: %v (%+v)", loc(1), name, len(names), names, me)
+ }
+}
+
+func stopServer(t *testing.T, server ipc.Server, ns ns.Namespace, name string) {
+ vlog.VI(1).Info("server.Stop")
+ new_name := "should_appear_in_mt/server"
+ verifyMount(t, ns, name)
+
+ // publish a second name
+ if err := server.AddName(new_name); err != nil {
+ t.Errorf("server.Serve failed: %v", err)
+ }
+ verifyMount(t, ns, new_name)
+
+ if err := server.Stop(); err != nil {
+ t.Errorf("server.Stop failed: %v", err)
+ }
+
+ verifyMountMissing(t, ns, name)
+ verifyMountMissing(t, ns, new_name)
+
+ // Check that we can no longer serve after Stop.
+ err := server.AddName("name doesn't matter")
+ if err == nil || !verror.Is(err, verror.ErrBadState.ID) {
+ t.Errorf("either no error, or a wrong error was returned: %v", err)
+ }
+ vlog.VI(1).Info("server.Stop DONE")
+}
+
+// fakeWSName creates a name containing a endpoint address that forces
+// the use of websockets. It does so by resolving the original name
+// and choosing the 'ws' endpoint from the set of endpoints returned.
+// It must return a name since it'll be passed to StartCall.
+func fakeWSName(ns ns.Namespace, name string) (string, error) {
+ // Find the ws endpoint and use that.
+ me, err := ns.Resolve(testContext(), name)
+ if err != nil {
+ return "", err
+ }
+ names := me.Names()
+ for _, s := range names {
+ if strings.Index(s, "@ws@") != -1 {
+ return s, nil
+ }
+ }
+ return "", fmt.Errorf("No ws endpoint found %v", names)
+}
+
+type bundle struct {
+ client ipc.Client
+ server ipc.Server
+ ep naming.Endpoint
+ ns ns.Namespace
+ sm stream.Manager
+ name string
+}
+
+func (b bundle) cleanup(t *testing.T) {
+ if b.server != nil {
+ stopServer(t, b.server, b.ns, b.name)
+ }
+ if b.client != nil {
+ b.client.Close()
+ }
+}
+
+func createBundle(t *testing.T, client, server security.Principal, ts interface{}) (b bundle) {
+ return createBundleWS(t, client, server, ts, noWebsocket)
+}
+
+func createBundleWS(t *testing.T, client, server security.Principal, ts interface{}, shouldUseWebsocket websocketMode) (b bundle) {
+ b.sm = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ b.ns = tnaming.NewSimpleNamespace()
+ b.name = "mountpoint/server"
+ if server != nil {
+ b.ep, b.server = startServerWS(t, server, b.sm, b.ns, b.name, testServerDisp{ts}, shouldUseWebsocket)
+ }
+ if client != nil {
+ var err error
+ if b.client, err = InternalNewClient(b.sm, b.ns, vc.LocalPrincipal{client}); err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ }
+ return
+}
+
+func matchesErrorPattern(err error, id verror.IDAction, pattern string) bool {
+ if len(pattern) > 0 && err != nil && strings.Index(err.Error(), pattern) < 0 {
+ return false
+ }
+ if err == nil && id.ID == "" {
+ return true
+ }
+ return verror.Is(err, id.ID)
+}
+
+func runServer(t *testing.T, ns ns.Namespace, name string, obj interface{}, opts ...ipc.ServerOpt) stream.Manager {
+ rid, err := naming.NewRoutingID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sm := imanager.InternalNew(rid)
+ server, err := testInternalNewServer(testContext(), sm, ns, opts...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := server.Listen(listenSpec); err != nil {
+ t.Fatal(err)
+ }
+ if err := server.Serve(name, obj, acceptAllAuthorizer{}); err != nil {
+ t.Fatal(err)
+ }
+ return sm
+}
+
+func TestMultipleCallsToServeAndName(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ ns := tnaming.NewSimpleNamespace()
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns, vc.LocalPrincipal{tsecurity.NewPrincipal()})
+ if err != nil {
+ t.Errorf("InternalNewServer failed: %v", err)
+ }
+ _, err = server.Listen(listenSpec)
+ if err != nil {
+ t.Errorf("server.Listen failed: %v", err)
+ }
+
+ disp := &testServerDisp{&testServer{}}
+ if err := server.ServeDispatcher("mountpoint/server", disp); err != nil {
+ t.Errorf("server.ServeDispatcher failed: %v", err)
+ }
+
+ n1 := "mountpoint/server"
+ n2 := "should_appear_in_mt/server"
+ n3 := "should_appear_in_mt/server"
+ n4 := "should_not_appear_in_mt/server"
+
+ verifyMount(t, ns, n1)
+
+ if server.ServeDispatcher(n2, disp) == nil {
+ t.Errorf("server.ServeDispatcher should have failed")
+ }
+
+ if err := server.Serve(n2, &testServer{}, nil); err == nil {
+ t.Errorf("server.Serve should have failed")
+ }
+
+ if err := server.AddName(n3); err != nil {
+ t.Errorf("server.AddName failed: %v", err)
+ }
+
+ if err := server.AddName(n3); err != nil {
+ t.Errorf("server.AddName failed: %v", err)
+ }
+ verifyMount(t, ns, n2)
+ verifyMount(t, ns, n3)
+
+ server.RemoveName(n1)
+ verifyMountMissing(t, ns, n1)
+
+ server.RemoveName("some randome name")
+
+ if err := server.ServeDispatcher(n4, &testServerDisp{&testServer{}}); err == nil {
+ t.Errorf("server.ServeDispatcher should have failed")
+ }
+ verifyMountMissing(t, ns, n4)
+
+ if err := server.Stop(); err != nil {
+ t.Errorf("server.Stop failed: %v", err)
+ }
+
+ verifyMountMissing(t, ns, n1)
+ verifyMountMissing(t, ns, n2)
+ verifyMountMissing(t, ns, n3)
+}
+
+func TestRPCServerAuthorization(t *testing.T) {
+ const (
+ publicKeyErr = "not matched by server key"
+ forPeerErr = "no blessings tagged for peer"
+ nameErr = "do not match pattern"
+ allowedErr = "do not match any allowed server patterns"
+ )
+ var (
+ pprovider, pclient, pserver = tsecurity.NewPrincipal("root"), tsecurity.NewPrincipal(), tsecurity.NewPrincipal()
+ pdischarger = pprovider
+ now = time.Now()
+ noErrID verror.IDAction
+
+ // Third-party caveats on blessings presented by server.
+ cavTPValid = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.ExpiryCaveat(now.Add(24*time.Hour))))
+ cavTPExpired = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.ExpiryCaveat(now.Add(-1*time.Second))))
+
+ // Server blessings.
+ bServer = bless(pprovider, pserver, "server")
+ bServerExpired = bless(pprovider, pserver, "server", mkCaveat(security.ExpiryCaveat(time.Now().Add(-1*time.Second))))
+ bServerTPValid = bless(pprovider, pserver, "serverWithTPCaveats", cavTPValid)
+ bServerTPExpired = bless(pprovider, pserver, "serverWithTPCaveats", cavTPExpired)
+ bTwoBlessings, _ = security.UnionOfBlessings(bServer, bServerTPValid)
+
+ mgr = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+ ns = tnaming.NewSimpleNamespace()
+ tests = []struct {
+ server security.Blessings // blessings presented by the server to the client.
+ name string // name provided by the client to StartCall
+ opt ipc.CallOpt // option provided to StartCall.
+ errID verror.IDAction
+ err string
+ }{
+ // Client accepts talking to the server only if the
+ // server's blessings match the provided pattern
+ {bServer, "[...]mountpoint/server", nil, noErrID, ""},
+ {bServer, "[root/server]mountpoint/server", nil, noErrID, ""},
+ {bServer, "[root/otherserver]mountpoint/server", nil, verror.ErrNotTrusted, nameErr},
+ {bServer, "[otherroot/server]mountpoint/server", nil, verror.ErrNotTrusted, nameErr},
+
+ // and, if the server's blessing has third-party
+ // caveats then the server provides appropriate
+ // discharges.
+ {bServerTPValid, "[...]mountpoint/server", nil, noErrID, ""},
+ {bServerTPValid, "[root/serverWithTPCaveats]mountpoint/server", nil, noErrID, ""},
+ {bServerTPValid, "[root/otherserver]mountpoint/server", nil, verror.ErrNotTrusted, nameErr},
+ {bServerTPValid, "[otherroot/server]mountpoint/server", nil, verror.ErrNotTrusted, nameErr},
+
+ // Client does not talk to a server that presents
+ // expired blessings (because the blessing store is
+ // configured to only talk to root).
+ {bServerExpired, "[...]mountpoint/server", nil, verror.ErrNotTrusted, forPeerErr},
+
+ // Client does not talk to a server that fails to
+ // provide discharges for third-party caveats on the
+ // blessings presented by it.
+ {bServerTPExpired, "[...]mountpoint/server", nil, verror.ErrNotTrusted, forPeerErr},
+
+ // Testing the AllowedServersPolicy option.
+ {bServer, "[...]mountpoint/server", options.AllowedServersPolicy{"otherroot"}, verror.ErrNotTrusted, allowedErr},
+ {bServer, "[root/server]mountpoint/server", options.AllowedServersPolicy{"otherroot"}, verror.ErrNotTrusted, allowedErr},
+ {bServer, "[otherroot/server]mountpoint/server", options.AllowedServersPolicy{"root/server"}, verror.ErrNotTrusted, nameErr},
+ {bServer, "[root/server]mountpoint/server", options.AllowedServersPolicy{"root"}, noErrID, ""},
+
+ // Test the ServerPublicKey option.
+ {bServer, "[...]mountpoint/server", options.ServerPublicKey{bServer.PublicKey()}, noErrID, ""},
+ {bServer, "[...]mountpoint/server", options.ServerPublicKey{tsecurity.NewPrincipal("irrelevant").PublicKey()}, verror.ErrNotTrusted, publicKeyErr},
+ // Server presents two blessings: One that satisfies
+ // the pattern provided to StartCall and one that
+ // satisfies the AllowedServersPolicy, so the server is
+ // authorized.
+ {bTwoBlessings, "[root/serverWithTPCaveats]mountpoint/server", options.AllowedServersPolicy{"root/server"}, noErrID, ""},
+ }
+ )
+
+ _, server := startServer(t, pserver, mgr, ns, "mountpoint/server", testServerDisp{&testServer{}})
+ defer stopServer(t, server, ns, "mountpoint/server")
+
+ // Start the discharge server.
+ _, dischargeServer := startServer(t, pdischarger, mgr, ns, "mountpoint/dischargeserver", testutil.LeafDispatcher(&dischargeServer{}, &acceptAllAuthorizer{}))
+ defer stopServer(t, dischargeServer, ns, "mountpoint/dischargeserver")
+
+ // Make the client and server principals trust root certificates from
+ // pprovider
+ pclient.AddToRoots(pprovider.BlessingStore().Default())
+ pserver.AddToRoots(pprovider.BlessingStore().Default())
+ // Set a blessing that the client is willing to share with servers with
+ // blessings from pprovider.
+ pclient.BlessingStore().Set(bless(pprovider, pclient, "client"), "root")
+
+ for i, test := range tests {
+ name := fmt.Sprintf("(#%d: Name:%q, Server:%q, opt:%v)", i, test.name, test.server, test.opt)
+ if err := pserver.BlessingStore().SetDefault(test.server); err != nil {
+ t.Fatalf("SetDefault failed on server's BlessingStore: %v", err)
+ }
+ if _, err := pserver.BlessingStore().Set(test.server, "root"); err != nil {
+ t.Fatalf("Set failed on server's BlessingStore: %v", err)
+ }
+ // Recreate client in each test (so as to not re-use VCs to the server).
+ client, err := InternalNewClient(mgr, ns, vc.LocalPrincipal{pclient})
+ if err != nil {
+ t.Errorf("%s: failed to create client: %v", name, err)
+ continue
+ }
+ ctx, cancel := context.WithTimeout(testContextWithoutDeadline(), 10*time.Second)
+ call, err := client.StartCall(ctx, test.name, "Method", nil, test.opt)
+ if !matchesErrorPattern(err, test.errID, test.err) {
+ t.Errorf(`%s: client.StartCall: got error "%v", want to match "%v"`, name, err, test.err)
+ } else if call != nil {
+ blessings, proof := call.RemoteBlessings()
+ if proof.IsZero() {
+ t.Errorf("%s: Returned zero value for remote blessings", name)
+ }
+ // Currently all tests are configured so that the only
+ // blessings presented by the server that are
+ // recognized by the client match the pattern
+ // "root"
+ if len(blessings) < 1 || !security.BlessingPattern("root").MatchedBy(blessings...) {
+ t.Errorf("%s: Client sees server as %v, expected a single blessing matching root", name, blessings)
+ }
+ }
+ cancel()
+ client.Close()
+ }
+}
+
+func TestServerManInTheMiddleAttack(t *testing.T) {
+ // Test scenario: A server mounts itself, but then some other service
+ // somehow "takes over" the endpoint, thus trying to steal traffic.
+
+ // Start up the attacker's server.
+ attacker, err := testInternalNewServer(
+ testContext(),
+ imanager.InternalNew(naming.FixedRoutingID(0xaaaaaaaaaaaaaaaa)),
+ // (To prevent the attacker for legitimately mounting on the
+ // namespace that the client will use, provide it with a
+ // different namespace).
+ tnaming.NewSimpleNamespace(),
+ vc.LocalPrincipal{tsecurity.NewPrincipal("attacker")})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := attacker.Listen(listenSpec); err != nil {
+ t.Fatal(err)
+ }
+ if err := attacker.ServeDispatcher("mountpoint/server", testServerDisp{&testServer{}}); err != nil {
+ t.Fatal(err)
+ }
+ var ep naming.Endpoint
+ if status := attacker.Status(); len(status.Endpoints) < 1 {
+ t.Fatalf("Attacker server does not have an endpoint: %+v", status)
+ } else {
+ ep = status.Endpoints[0]
+ }
+
+ // The legitimate server would have mounted the same endpoint on the
+ // namespace.
+ ns := tnaming.NewSimpleNamespace()
+ if err := ns.Mount(testContext(), "mountpoint/server", ep.Name(), time.Hour, naming.MountedServerBlessingsOpt{"server"}); err != nil {
+ t.Fatal(err)
+ }
+
+ // The RPC call should fail because the blessings presented by the
+ // (attacker's) server are not consistent with the ones registered in
+ // the mounttable trusted by the client.
+ client, err := InternalNewClient(
+ imanager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc)),
+ ns,
+ vc.LocalPrincipal{tsecurity.NewPrincipal("client")})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer client.Close()
+ if _, err := client.StartCall(testContext(), "mountpoint/server", "Closure", nil); !verror.Is(err, verror.ErrNotTrusted.ID) {
+ t.Errorf("Got error %v (errorid=%v), want errorid=%v", err, verror.ErrorID(err), verror.ErrNotTrusted.ID)
+ }
+ // But the RPC should succeed if the client explicitly
+ // decided to skip server authorization.
+ if _, err := client.StartCall(testContext(), "mountpoint/server", "Closure", nil, options.SkipResolveAuthorization{}); err != nil {
+ t.Errorf("Unexpected error(%v) when skipping server authorization", err)
+ }
+}
+
+type websocketMode bool
+type closeSendMode bool
+
+const (
+ useWebsocket websocketMode = true
+ noWebsocket websocketMode = false
+
+ closeSend closeSendMode = true
+ noCloseSend closeSendMode = false
+)
+
+func TestRPC(t *testing.T) {
+ testRPC(t, closeSend, noWebsocket)
+}
+
+func TestRPCWithWebsocket(t *testing.T) {
+ testRPC(t, closeSend, useWebsocket)
+}
+
+// TestCloseSendOnFinish tests that Finish informs the server that no more
+// inputs will be sent by the client if CloseSend has not already done so.
+func TestRPCCloseSendOnFinish(t *testing.T) {
+ testRPC(t, noCloseSend, noWebsocket)
+}
+
+func TestRPCCloseSendOnFinishWithWebsocket(t *testing.T) {
+ testRPC(t, noCloseSend, useWebsocket)
+}
+
+func testRPC(t *testing.T, shouldCloseSend closeSendMode, shouldUseWebsocket websocketMode) {
+ type v []interface{}
+ type testcase struct {
+ name string
+ method string
+ args v
+ streamArgs v
+ startErr error
+ results v
+ finishErr error
+ }
+ var (
+ tests = []testcase{
+ {"mountpoint/server/suffix", "Closure", nil, nil, nil, nil, nil},
+ {"mountpoint/server/suffix", "Error", nil, nil, nil, nil, errMethod},
+
+ {"mountpoint/server/suffix", "Echo", v{"foo"}, nil, nil, v{`method:"Echo",suffix:"suffix",arg:"foo"`}, nil},
+ {"mountpoint/server/suffix/abc", "Echo", v{"bar"}, nil, nil, v{`method:"Echo",suffix:"suffix/abc",arg:"bar"`}, nil},
+
+ {"mountpoint/server/suffix", "EchoUser", v{"foo", userType("bar")}, nil, nil, v{`method:"EchoUser",suffix:"suffix",arg:"foo"`, userType("bar")}, nil},
+ {"mountpoint/server/suffix/abc", "EchoUser", v{"baz", userType("bla")}, nil, nil, v{`method:"EchoUser",suffix:"suffix/abc",arg:"baz"`, userType("bla")}, nil},
+ {"mountpoint/server/suffix", "Stream", v{"foo"}, v{userType("bar"), userType("baz")}, nil, v{`method:"Stream",suffix:"suffix",arg:"foo" bar baz`}, nil},
+ {"mountpoint/server/suffix/abc", "Stream", v{"123"}, v{userType("456"), userType("789")}, nil, v{`method:"Stream",suffix:"suffix/abc",arg:"123" 456 789`}, nil},
+ {"mountpoint/server/suffix", "EchoBlessings", nil, nil, nil, v{"[server]", "[client]"}, nil},
+ {"mountpoint/server/suffix", "EchoAndError", v{"bugs bunny"}, nil, nil, v{`method:"EchoAndError",suffix:"suffix",arg:"bugs bunny"`}, nil},
+ {"mountpoint/server/suffix", "EchoAndError", v{"error"}, nil, nil, nil, errMethod},
+ }
+ name = func(t testcase) string {
+ return fmt.Sprintf("%s.%s(%v)", t.name, t.method, t.args)
+ }
+
+ pserver = tsecurity.NewPrincipal("server")
+ pclient = tsecurity.NewPrincipal("client")
+
+ b = createBundleWS(t, pclient, pserver, &testServer{}, shouldUseWebsocket)
+ )
+ defer b.cleanup(t)
+ // The server needs to recognize the client's root certificate.
+ pserver.AddToRoots(pclient.BlessingStore().Default())
+ for _, test := range tests {
+ vlog.VI(1).Infof("%s client.StartCall", name(test))
+ vname := test.name
+ if shouldUseWebsocket {
+ var err error
+ vname, err = fakeWSName(b.ns, vname)
+ if err != nil && err != test.startErr {
+ t.Errorf(`%s ns.Resolve got error "%v", want "%v"`, name(test), err, test.startErr)
+ continue
+ }
+ }
+ call, err := b.client.StartCall(testContext(), vname, test.method, test.args)
+ if err != test.startErr {
+ t.Errorf(`%s client.StartCall got error "%v", want "%v"`, name(test), err, test.startErr)
+ continue
+ }
+ for _, sarg := range test.streamArgs {
+ vlog.VI(1).Infof("%s client.Send(%v)", name(test), sarg)
+ if err := call.Send(sarg); err != nil {
+ t.Errorf(`%s call.Send(%v) got unexpected error "%v"`, name(test), sarg, err)
+ }
+ var u userType
+ if err := call.Recv(&u); err != nil {
+ t.Errorf(`%s call.Recv(%v) got unexpected error "%v"`, name(test), sarg, err)
+ }
+ if !reflect.DeepEqual(u, sarg) {
+ t.Errorf("%s call.Recv got value %v, want %v", name(test), u, sarg)
+ }
+ }
+ if shouldCloseSend {
+ vlog.VI(1).Infof("%s call.CloseSend", name(test))
+ // When the method does not involve streaming
+ // arguments, the server gets all the arguments in
+ // StartCall and then sends a response without
+ // (unnecessarily) waiting for a CloseSend message from
+ // the client. If the server responds before the
+ // CloseSend call is made at the client, the CloseSend
+ // call will fail. Thus, only check for errors on
+ // CloseSend if there are streaming arguments to begin
+ // with (i.e., only if the server is expected to wait
+ // for the CloseSend notification).
+ if err := call.CloseSend(); err != nil && len(test.streamArgs) > 0 {
+ t.Errorf(`%s call.CloseSend got unexpected error "%v"`, name(test), err)
+ }
+ }
+ vlog.VI(1).Infof("%s client.Finish", name(test))
+ results := makeResultPtrs(test.results)
+ err = call.Finish(results...)
+ if got, want := err, test.finishErr; (got == nil) != (want == nil) {
+ t.Errorf(`%s call.Finish got error "%v", want "%v'`, name(test), got, want)
+ } else if want != nil && !verror.Is(got, verror.ErrorID(want)) {
+ t.Errorf(`%s call.Finish got error "%v", want "%v"`, name(test), got, want)
+ }
+ checkResultPtrs(t, name(test), results, test.results)
+ }
+}
+
+func TestMultipleFinish(t *testing.T) {
+ type v []interface{}
+ b := createBundle(t, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), &testServer{})
+ defer b.cleanup(t)
+ call, err := b.client.StartCall(testContext(), "mountpoint/server/suffix", "Echo", v{"foo"})
+ if err != nil {
+ t.Fatalf(`client.StartCall got error "%v"`, err)
+ }
+ var results string
+ err = call.Finish(&results)
+ if err != nil {
+ t.Fatalf(`call.Finish got error "%v"`, err)
+ }
+ // Calling Finish a second time should result in a useful error.
+ if err = call.Finish(&results); !matchesErrorPattern(err, verror.ErrBadState, "Finish has already been called") {
+ t.Fatalf(`got "%v", want "%v"`, err, verror.ErrBadState)
+ }
+}
+
+// granter implements ipc.Granter, returning a fixed (security.Blessings, error) pair.
+type granter struct {
+ ipc.CallOpt
+ b security.Blessings
+ err error
+}
+
+func (g granter) Grant(id security.Blessings) (security.Blessings, error) { return g.b, g.err }
+
+func TestGranter(t *testing.T) {
+ var (
+ pclient = tsecurity.NewPrincipal("client")
+ pserver = tsecurity.NewPrincipal("server")
+ b = createBundle(t, pclient, pserver, &testServer{})
+ )
+ defer b.cleanup(t)
+
+ tests := []struct {
+ granter ipc.Granter
+ startErrID, finishErrID verror.IDAction
+ blessing, starterr, finisherr string
+ }{
+ {blessing: ""},
+ {granter: granter{b: bless(pclient, pserver, "blessed")}, blessing: "client/blessed"},
+ {granter: granter{err: errors.New("hell no")}, startErrID: verror.ErrNotTrusted, starterr: "hell no"},
+ {granter: granter{b: pclient.BlessingStore().Default()}, finishErrID: verror.ErrNoAccess, finisherr: "blessing granted not bound to this server"},
+ }
+ for i, test := range tests {
+ call, err := b.client.StartCall(testContext(), "mountpoint/server/suffix", "EchoGrantedBlessings", []interface{}{"argument"}, test.granter)
+ if !matchesErrorPattern(err, test.startErrID, test.starterr) {
+ t.Errorf("%d: %+v: StartCall returned error %v", i, test, err)
+ }
+ if err != nil {
+ continue
+ }
+ var result, blessing string
+ if err = call.Finish(&result, &blessing); !matchesErrorPattern(err, test.finishErrID, test.finisherr) {
+ t.Errorf("%+v: Finish returned error %v", test, err)
+ }
+ if err != nil {
+ continue
+ }
+ if result != "argument" || blessing != test.blessing {
+ t.Errorf("%+v: Got (%q, %q)", test, result, blessing)
+ }
+ }
+}
+
+func mkThirdPartyCaveat(discharger security.PublicKey, location string, c security.Caveat) security.Caveat {
+ tpc, err := security.NewPublicKeyCaveat(discharger, location, security.ThirdPartyRequirements{}, c)
+ if err != nil {
+ panic(err)
+ }
+ return tpc
+}
+
+// dischargeTestServer implements the discharge service. Always fails to
+// issue a discharge, but records the impetus and traceid of the RPC call.
+type dischargeTestServer struct {
+ p security.Principal
+ impetus []security.DischargeImpetus
+ traceid []uniqueid.Id
+}
+
+func (s *dischargeTestServer) Discharge(ctx ipc.ServerCall, cav security.Caveat, impetus security.DischargeImpetus) (security.WireDischarge, error) {
+ s.impetus = append(s.impetus, impetus)
+ s.traceid = append(s.traceid, vtrace.GetSpan(ctx.Context()).Trace())
+ return nil, fmt.Errorf("discharges not issued")
+}
+
+func (s *dischargeTestServer) Release() ([]security.DischargeImpetus, []uniqueid.Id) {
+ impetus, traceid := s.impetus, s.traceid
+ s.impetus, s.traceid = nil, nil
+ return impetus, traceid
+}
+
+func TestDischargeImpetusAndContextPropagation(t *testing.T) {
+ var (
+ pserver = tsecurity.NewPrincipal("server")
+ pdischarger = tsecurity.NewPrincipal("discharger")
+ pclient = tsecurity.NewPrincipal("client")
+ sm = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ ns = tnaming.NewSimpleNamespace()
+
+ mkClient = func(req security.ThirdPartyRequirements) vc.LocalPrincipal {
+ // Setup the client so that it shares a blessing with a third-party caveat with the server.
+ cav, err := security.NewPublicKeyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", req, security.UnconstrainedUse())
+ if err != nil {
+ t.Fatalf("Failed to create ThirdPartyCaveat(%+v): %v", req, err)
+ }
+ b, err := pclient.BlessSelf("client_for_server", cav)
+ if err != nil {
+ t.Fatalf("BlessSelf failed: %v", err)
+ }
+ pclient.BlessingStore().Set(b, "server")
+ return vc.LocalPrincipal{pclient}
+ }
+ )
+ // Initialize the client principal.
+ // It trusts both the application server and the discharger.
+ pclient.AddToRoots(pserver.BlessingStore().Default())
+ pclient.AddToRoots(pdischarger.BlessingStore().Default())
+ // Share a blessing without any third-party caveats with the discharger.
+ // It could share the same blessing as generated by setupClientBlessing, but
+ // that will lead to possibly debugging confusion (since it will try to fetch
+ // a discharge to talk to the discharge service).
+ if b, err := pclient.BlessSelf("client_for_discharger"); err != nil {
+ t.Fatalf("BlessSelf failed: %v", err)
+ } else {
+ pclient.BlessingStore().Set(b, "discharger")
+ }
+
+ // Setup the discharge server.
+ var tester dischargeTestServer
+ ctx := testContext()
+ dischargeServer, err := testInternalNewServer(ctx, sm, ns, vc.LocalPrincipal{pdischarger})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer dischargeServer.Stop()
+ if _, err := dischargeServer.Listen(listenSpec); err != nil {
+ t.Fatal(err)
+ }
+ if err := dischargeServer.Serve("mountpoint/discharger", &tester, &testServerAuthorizer{}); err != nil {
+ t.Fatal(err)
+ }
+
+ // Setup the application server.
+ appServer, err := testInternalNewServer(ctx, sm, ns, vc.LocalPrincipal{pserver})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer appServer.Stop()
+ eps, err := appServer.Listen(listenSpec)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // TODO(bjornick,cnicolaou,ashankar): This is a hack to workaround the
+ // fact that a single Listen on the "tcp" protocol followed by a call
+ // to Serve(<name>, ...) transparently creates two endpoints (one for
+ // tcp, one for websockets) and maps both to <name> via a mount.
+ // Because all endpoints to a name are tried in a parallel, this
+ // transparency makes this test hard to follow (many discharge fetch
+ // attempts are made - one for VIF authentication, one for VC
+ // authentication and one for the actual RPC - and having them be made
+ // to two different endpoints in parallel leads to a lot of
+ // non-determinism). The last plan of record known by the author of
+ // this comment was to stop this sly creation of two endpoints and
+ // require that they be done explicitly. When that happens, this hack
+ // can go away, but till then, this workaround allows the test to be
+ // more predictable by ensuring there is only one VIF/VC/Flow to the
+ // server.
+ object := naming.JoinAddressName(eps[0].String(), "object") // instead of "mountpoint/object"
+ if err := appServer.Serve("mountpoint/object", &testServer{}, &testServerAuthorizer{}); err != nil {
+ t.Fatal(err)
+ }
+ tests := []struct {
+ Requirements security.ThirdPartyRequirements
+ Impetus security.DischargeImpetus
+ }{
+ { // No requirements, no impetus
+ Requirements: security.ThirdPartyRequirements{},
+ Impetus: security.DischargeImpetus{},
+ },
+ { // Require everything
+ Requirements: security.ThirdPartyRequirements{ReportServer: true, ReportMethod: true, ReportArguments: true},
+ Impetus: security.DischargeImpetus{Server: []security.BlessingPattern{"server"}, Method: "Method", Arguments: []*vdl.Value{vdl.StringValue("argument")}},
+ },
+ { // Require only the method name
+ Requirements: security.ThirdPartyRequirements{ReportMethod: true},
+ Impetus: security.DischargeImpetus{Method: "Method"},
+ },
+ }
+
+ for _, test := range tests {
+ pclient := mkClient(test.Requirements)
+ client, err := InternalNewClient(sm, ns, pclient)
+ if err != nil {
+ t.Fatalf("InternalNewClient(%+v) failed: %v", test.Requirements, err)
+ }
+ defer client.Close()
+ ctx := testContext()
+ tid := vtrace.GetSpan(ctx).Trace()
+ // StartCall should fetch the discharge, do not worry about finishing the RPC - do not care about that for this test.
+ if _, err := client.StartCall(ctx, object, "Method", []interface{}{"argument"}); err != nil {
+ t.Errorf("StartCall(%+v) failed: %v", test.Requirements, err)
+ continue
+ }
+ impetus, traceid := tester.Release()
+ // There should have been exactly 1 attempt to fetch discharges when making
+ // the RPC to the remote object.
+ if len(impetus) != 1 || len(traceid) != 1 {
+ t.Errorf("Test %+v: Got (%d, %d) (#impetus, #traceid), wanted exactly one", test.Requirements, len(impetus), len(traceid))
+ continue
+ }
+ // VC creation does not have any "impetus", it is established without
+ // knowledge of the context of the RPC. So ignore that.
+ //
+ // TODO(ashankar): Should the impetus of the RPC that initiated the
+ // VIF/VC creation be propagated?
+ if got, want := impetus[len(impetus)-1], test.Impetus; !reflect.DeepEqual(got, want) {
+ t.Errorf("Test %+v: Got impetus %v, want %v", test.Requirements, got, want)
+ }
+ // But the context used for all of this should be the same
+ // (thereby allowing debug traces to link VIF/VC creation with
+ // the RPC that initiated them).
+ for idx, got := range traceid {
+ if !reflect.DeepEqual(got, tid) {
+ t.Errorf("Test %+v: %d - Got trace id %q, want %q", test.Requirements, idx, hex.EncodeToString(got[:]), hex.EncodeToString(tid[:]))
+ }
+ }
+ }
+}
+
+func TestRPCClientAuthorization(t *testing.T) {
+ type v []interface{}
+ var (
+ // Principals
+ pclient, pserver = tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server")
+ pdischarger = pserver
+
+ now = time.Now()
+
+ serverName = "mountpoint/server"
+ dischargeServerName = "mountpoint/dischargeserver"
+
+ // Caveats on blessings to the client: First-party caveats
+ cavOnlyEcho = mkCaveat(security.MethodCaveat("Echo"))
+ cavExpired = mkCaveat(security.ExpiryCaveat(now.Add(-1 * time.Second)))
+ // Caveats on blessings to the client: Third-party caveats
+ cavTPValid = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/server/discharger", mkCaveat(security.ExpiryCaveat(now.Add(24*time.Hour))))
+ cavTPExpired = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/server/discharger", mkCaveat(security.ExpiryCaveat(now.Add(-1*time.Second))))
+
+ // Client blessings that will be tested.
+ bServerClientOnlyEcho = bless(pserver, pclient, "onlyecho", cavOnlyEcho)
+ bServerClientExpired = bless(pserver, pclient, "expired", cavExpired)
+ bServerClientTPValid = bless(pserver, pclient, "dischargeable_third_party_caveat", cavTPValid)
+ bServerClientTPExpired = bless(pserver, pclient, "expired_third_party_caveat", cavTPExpired)
+ bClient = pclient.BlessingStore().Default()
+ bRandom, _ = pclient.BlessSelf("random")
+
+ mgr = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+ ns = tnaming.NewSimpleNamespace()
+ tests = []struct {
+ blessings security.Blessings // Blessings used by the client
+ name string // object name on which the method is invoked
+ method string
+ args v
+ results v
+ authorized bool // Whether or not the RPC should be authorized by the server.
+ }{
+ // There are three different authorization policies (security.Authorizer implementations)
+ // used by the server, depending on the suffix (see testServerDisp.Lookup):
+ // - nilAuth suffix: the default authorization policy (only delegates of or delegators of the server can call RPCs)
+ // - aclAuth suffix: the ACL only allows blessings matching the patterns "server" or "client"
+ // - other suffixes: testServerAuthorizer allows any principal to call any method except "Unauthorized"
+
+ // Expired blessings should fail nilAuth and aclAuth (which care about names), but should succeed on
+ // other suffixes (which allow all blessings), unless calling the Unauthorized method.
+ {bServerClientExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+ {bServerClientExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+ {bServerClientExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+ // Same for blessings that should fail to obtain a discharge for the third party caveat.
+ {bServerClientTPExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+ {bServerClientTPExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+ {bServerClientTPExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientTPExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+ // The "server/client" blessing (with MethodCaveat("Echo")) should satisfy all authorization policies
+ // when "Echo" is called.
+ {bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientOnlyEcho, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+
+ // The "server/client" blessing (with MethodCaveat("Echo")) should satisfy no authorization policy
+ // when any other method is invoked, except for the testServerAuthorizer policy (which will
+ // not recognize the blessing "server/onlyecho", but it would authorize anyone anyway).
+ {bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Closure", nil, nil, false},
+ {bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Closure", nil, nil, false},
+ {bServerClientOnlyEcho, "mountpoint/server/suffix", "Closure", nil, nil, true},
+
+ // The "client" blessing doesn't satisfy the default authorization policy, but does satisfy
+ // the ACL and the testServerAuthorizer policy.
+ {bClient, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+ {bClient, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+ {bClient, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+ {bClient, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+ // The "random" blessing does not satisfy either the default policy or the ACL, but does
+ // satisfy testServerAuthorizer.
+ {bRandom, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+ {bRandom, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+ {bRandom, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+ {bRandom, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+ // The "server/dischargeable_third_party_caveat" blessing satisfies all policies.
+ // (the discharges should be fetched).
+ {bServerClientTPValid, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientTPValid, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientTPValid, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+ {bServerClientTPValid, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+ }
+ )
+ // Start the main server.
+ _, server := startServer(t, pserver, mgr, ns, serverName, testServerDisp{&testServer{}})
+ defer stopServer(t, server, ns, serverName)
+
+ // Start the discharge server.
+ _, dischargeServer := startServer(t, pdischarger, mgr, ns, dischargeServerName, testutil.LeafDispatcher(&dischargeServer{}, &acceptAllAuthorizer{}))
+ defer stopServer(t, dischargeServer, ns, dischargeServerName)
+
+ // The server should recognize the client principal as an authority on "client" and "random" blessings.
+ pserver.AddToRoots(bClient)
+ pserver.AddToRoots(bRandom)
+ // And the client needs to recognize the server's and discharger's blessings to decide which of its
+ // own blessings to share.
+ pclient.AddToRoots(pserver.BlessingStore().Default())
+ // tsecurity.NewPrincipal sets up a principal that shares blessings with all servers, undo that.
+ pclient.BlessingStore().Set(security.Blessings{}, security.AllPrincipals)
+
+ for _, test := range tests {
+ name := fmt.Sprintf("%q.%s(%v) by %v", test.name, test.method, test.args, test.blessings)
+ client, err := InternalNewClient(mgr, ns, vc.LocalPrincipal{pclient})
+ if err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ defer client.Close()
+
+ pclient.BlessingStore().Set(test.blessings, "server")
+ call, err := client.StartCall(testContext(), test.name, test.method, test.args)
+ if err != nil {
+ t.Errorf(`%s client.StartCall got unexpected error: "%v"`, name, err)
+ continue
+ }
+
+ results := makeResultPtrs(test.results)
+ err = call.Finish(results...)
+ if err != nil && test.authorized {
+ t.Errorf(`%s call.Finish got error: "%v", wanted the RPC to succeed`, name, err)
+ } else if err == nil && !test.authorized {
+ t.Errorf("%s call.Finish succeeded, expected authorization failure", name)
+ } else if !test.authorized && !verror.Is(err, verror.ErrNoAccess.ID) {
+ t.Errorf("%s. call.Finish returned error %v(%v), wanted %v", name, verror.ErrorID(verror.Convert(verror.ErrNoAccess, nil, err)), err, verror.ErrNoAccess)
+ }
+ }
+}
+
+func TestDischargePurgeFromCache(t *testing.T) {
+ var (
+ pserver = tsecurity.NewPrincipal("server")
+ pdischarger = pserver // In general, the discharger can be a separate principal. In this test, it happens to be the server.
+ pclient = tsecurity.NewPrincipal("client")
+ // Client is blessed with a third-party caveat. The discharger service issues discharges with a fakeTimeCaveat.
+ // This blessing is presented to "server".
+ bclient = bless(pserver, pclient, "client", mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/server/discharger", security.UnconstrainedUse()))
+ )
+ // Setup the client to recognize the server's blessing and present bclient to it.
+ pclient.AddToRoots(pserver.BlessingStore().Default())
+ pclient.BlessingStore().Set(bclient, "server")
+
+ b := createBundle(t, nil, pserver, &testServer{})
+ defer b.cleanup(t)
+
+ var err error
+ if b.client, err = InternalNewClient(b.sm, b.ns, vc.LocalPrincipal{pclient}); err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ call := func() error {
+ call, err := b.client.StartCall(testContext(), "mountpoint/server/aclAuth", "Echo", []interface{}{"batman"})
+ if err != nil {
+ return err //fmt.Errorf("client.StartCall failed: %v", err)
+ }
+ var got string
+ if err := call.Finish(&got); err != nil {
+ return err //fmt.Errorf("client.Finish failed: %v", err)
+ }
+ if want := `method:"Echo",suffix:"aclAuth",arg:"batman"`; got != want {
+ return verror.Convert(verror.ErrBadArg, nil, fmt.Errorf("Got [%v] want [%v]", got, want))
+ }
+ return nil
+ }
+
+ // First call should succeed
+ if err := call(); err != nil {
+ t.Fatal(err)
+ }
+ // Advance virtual clock, which will invalidate the discharge
+ clock.Advance(1)
+ if err, want := call(), "not authorized"; !matchesErrorPattern(err, verror.ErrNoAccess, want) {
+ t.Errorf("Got error [%v] wanted to match pattern %q", err, want)
+ }
+ // But retrying will succeed since the discharge should be purged from cache and refreshed
+ if err := call(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+type cancelTestServer struct {
+ started chan struct{}
+ cancelled chan struct{}
+ t *testing.T
+}
+
+func newCancelTestServer(t *testing.T) *cancelTestServer {
+ return &cancelTestServer{
+ started: make(chan struct{}),
+ cancelled: make(chan struct{}),
+ t: t,
+ }
+}
+
+func (s *cancelTestServer) CancelStreamReader(call ipc.StreamServerCall) error {
+ close(s.started)
+ var b []byte
+ if err := call.Recv(&b); err != io.EOF {
+ s.t.Errorf("Got error %v, want io.EOF", err)
+ }
+ <-call.Context().Done()
+ close(s.cancelled)
+ return nil
+}
+
+// CancelStreamIgnorer doesn't read from it's input stream so all it's
+// buffers fill. The intention is to show that call.Done() is closed
+// even when the stream is stalled.
+func (s *cancelTestServer) CancelStreamIgnorer(call ipc.StreamServerCall) error {
+ close(s.started)
+ <-call.Context().Done()
+ close(s.cancelled)
+ return nil
+}
+
+func waitForCancel(t *testing.T, ts *cancelTestServer, cancel context.CancelFunc) {
+ <-ts.started
+ cancel()
+ <-ts.cancelled
+}
+
+// TestCancel tests cancellation while the server is reading from a stream.
+func TestCancel(t *testing.T) {
+ ts := newCancelTestServer(t)
+ b := createBundle(t, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), ts)
+ defer b.cleanup(t)
+
+ ctx, cancel := context.WithCancel(testContext())
+ _, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "CancelStreamReader", []interface{}{})
+ if err != nil {
+ t.Fatalf("Start call failed: %v", err)
+ }
+ waitForCancel(t, ts, cancel)
+}
+
+// TestCancelWithFullBuffers tests that even if the writer has filled the buffers and
+// the server is not reading that the cancel message gets through.
+func TestCancelWithFullBuffers(t *testing.T) {
+ ts := newCancelTestServer(t)
+ b := createBundle(t, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), ts)
+ defer b.cleanup(t)
+
+ ctx, cancel := context.WithCancel(testContext())
+ call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "CancelStreamIgnorer", []interface{}{})
+ if err != nil {
+ t.Fatalf("Start call failed: %v", err)
+ }
+ // Fill up all the write buffers to ensure that cancelling works even when the stream
+ // is blocked.
+ call.Send(make([]byte, vc.MaxSharedBytes))
+ call.Send(make([]byte, vc.DefaultBytesBufferedPerFlow))
+
+ waitForCancel(t, ts, cancel)
+}
+
+type streamRecvInGoroutineServer struct{ c chan error }
+
+func (s *streamRecvInGoroutineServer) RecvInGoroutine(call ipc.StreamServerCall) error {
+ // Spawn a goroutine to read streaming data from the client.
+ go func() {
+ var i interface{}
+ for {
+ err := call.Recv(&i)
+ if err != nil {
+ s.c <- err
+ return
+ }
+ }
+ }()
+ // Imagine the server did some processing here and now that it is done,
+ // it does not care to see what else the client has to say.
+ return nil
+}
+
+func TestStreamReadTerminatedByServer(t *testing.T) {
+ s := &streamRecvInGoroutineServer{c: make(chan error, 1)}
+ b := createBundle(t, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), s)
+ defer b.cleanup(t)
+
+ call, err := b.client.StartCall(testContext(), "mountpoint/server/suffix", "RecvInGoroutine", []interface{}{})
+ if err != nil {
+ t.Fatalf("StartCall failed: %v", err)
+ }
+
+ c := make(chan error, 1)
+ go func() {
+ for i := 0; true; i++ {
+ if err := call.Send(i); err != nil {
+ c <- err
+ return
+ }
+ }
+ }()
+
+ // The goroutine at the server executing "Recv" should have terminated
+ // with EOF.
+ if err := <-s.c; err != io.EOF {
+ t.Errorf("Got %v at server, want io.EOF", err)
+ }
+ // The client Send should have failed since the RPC has been
+ // terminated.
+ if err := <-c; err == nil {
+ t.Errorf("Client Send should fail as the server should have closed the flow")
+ }
+}
+
+// TestConnectWithIncompatibleServers tests that clients ignore incompatible endpoints.
+func TestConnectWithIncompatibleServers(t *testing.T) {
+ b := createBundle(t, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), &testServer{})
+ defer b.cleanup(t)
+
+ // Publish some incompatible endpoints.
+ publisher := publisher.New(testContext(), b.ns, publishPeriod)
+ defer publisher.WaitForStop()
+ defer publisher.Stop()
+ publisher.AddName("incompatible")
+ publisher.AddServer("/@2@tcp@localhost:10000@@1000000@2000000@@", false)
+ publisher.AddServer("/@2@tcp@localhost:10001@@2000000@3000000@@", false)
+
+ ctx, _ := context.WithTimeout(testContext(), 100*time.Millisecond)
+
+ _, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"})
+ if !verror.Is(err, verror.ErrNoServers.ID) {
+ t.Errorf("Expected error %s, found: %v", verror.ErrNoServers, err)
+ }
+
+ // Now add a server with a compatible endpoint and try again.
+ publisher.AddServer("/"+b.ep.String(), false)
+ publisher.AddName("incompatible")
+
+ call, err := b.client.StartCall(testContext(), "incompatible/suffix", "Echo", []interface{}{"foo"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ var result string
+ if err = call.Finish(&result); err != nil {
+ t.Errorf("Unexpected error finishing call %v", err)
+ }
+ expected := `method:"Echo",suffix:"suffix",arg:"foo"`
+ if result != expected {
+ t.Errorf("Wrong result returned. Got %s, wanted %s", result, expected)
+ }
+}
+
+func TestPreferredAddress(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ pa := func(string, []ipc.Address) ([]ipc.Address, error) {
+ a := &net.IPAddr{}
+ a.IP = net.ParseIP("1.1.1.1")
+ return []ipc.Address{&netstate.AddrIfc{Addr: a}}, nil
+ }
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns, vc.LocalPrincipal{tsecurity.NewPrincipal("server")})
+ if err != nil {
+ t.Errorf("InternalNewServer failed: %v", err)
+ }
+ defer server.Stop()
+
+ spec := ipc.ListenSpec{
+ Addrs: ipc.ListenAddrs{{"tcp", ":0"}},
+ AddressChooser: pa,
+ }
+ eps, err := server.Listen(spec)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ iep := eps[0].(*inaming.Endpoint)
+ host, _, err := net.SplitHostPort(iep.Address)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if got, want := host, "1.1.1.1"; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+ // Won't override the specified address.
+ eps, err = server.Listen(listenSpec)
+ iep = eps[0].(*inaming.Endpoint)
+ host, _, err = net.SplitHostPort(iep.Address)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if got, want := host, "127.0.0.1"; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestPreferredAddressErrors(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ paerr := func(_ string, a []ipc.Address) ([]ipc.Address, error) {
+ return nil, fmt.Errorf("oops")
+ }
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns, vc.LocalPrincipal{tsecurity.NewPrincipal("server")})
+ if err != nil {
+ t.Errorf("InternalNewServer failed: %v", err)
+ }
+ defer server.Stop()
+ spec := ipc.ListenSpec{
+ Addrs: ipc.ListenAddrs{{"tcp", ":0"}},
+ AddressChooser: paerr,
+ }
+ eps, err := server.Listen(spec)
+ iep := eps[0].(*inaming.Endpoint)
+ host, _, err := net.SplitHostPort(iep.Address)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ ip := net.ParseIP(host)
+ if ip == nil {
+ t.Fatalf("failed to parse IP address: %q", host)
+ }
+ if !ip.IsUnspecified() {
+ t.Errorf("IP: %q is not unspecified", ip)
+ }
+}
+
+func TestSecurityNone(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns, options.VCSecurityNone)
+ if err != nil {
+ t.Fatalf("InternalNewServer failed: %v", err)
+ }
+ if _, err = server.Listen(listenSpec); err != nil {
+ t.Fatalf("server.Listen failed: %v", err)
+ }
+ disp := &testServerDisp{&testServer{}}
+ if err := server.ServeDispatcher("mp/server", disp); err != nil {
+ t.Fatalf("server.Serve failed: %v", err)
+ }
+ client, err := InternalNewClient(sm, ns)
+ if err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ // When using VCSecurityNone, all authorization checks should be skipped, so
+ // unauthorized methods should be callable.
+ call, err := client.StartCall(ctx, "mp/server", "Unauthorized", nil, options.VCSecurityNone)
+ if err != nil {
+ t.Fatalf("client.StartCall failed: %v", err)
+ }
+ var got string
+ if err := call.Finish(&got); err != nil {
+ t.Errorf("call.Finish failed: %v", err)
+ }
+ if want := "UnauthorizedResult"; got != want {
+ t.Errorf("got (%v), want (%v)", got, want)
+ }
+}
+
+func TestCallWithNilContext(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ client, err := InternalNewClient(sm, ns)
+ if err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ call, err := client.StartCall(nil, "foo", "bar", []interface{}{}, options.VCSecurityNone)
+ if call != nil {
+ t.Errorf("Expected nil interface got: %#v", call)
+ }
+ if !verror.Is(err, verror.ErrBadArg.ID) {
+ t.Errorf("Expected an BadArg error, got: %s", err.Error())
+ }
+}
+
+func TestServerBlessingsOpt(t *testing.T) {
+ var (
+ pserver = tsecurity.NewPrincipal("server")
+ pclient = tsecurity.NewPrincipal("client")
+ batman, _ = pserver.BlessSelf("batman")
+ )
+ // Make the client recognize all server blessings
+ if err := pclient.AddToRoots(batman); err != nil {
+ t.Fatal(err)
+ }
+ if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
+ t.Fatal(err)
+ }
+ // Start a server that uses the ServerBlessings option to configure itself
+ // to act as batman (as opposed to using the default blessing).
+ ns := tnaming.NewSimpleNamespace()
+
+ popt := vc.LocalPrincipal{pserver}
+ defer runServer(t, ns, "mountpoint/batman", &testServer{}, popt, options.ServerBlessings{batman}).Shutdown()
+ defer runServer(t, ns, "mountpoint/default", &testServer{}, popt).Shutdown()
+
+ // And finally, make an RPC and see that the client sees "batman"
+ runClient := func(server string) ([]string, error) {
+ smc := imanager.InternalNew(naming.FixedRoutingID(0xc))
+ defer smc.Shutdown()
+ client, err := InternalNewClient(
+ smc,
+ ns,
+ vc.LocalPrincipal{pclient})
+ if err != nil {
+ return nil, err
+ }
+ defer client.Close()
+ call, err := client.StartCall(testContext(), server, "Closure", nil)
+ if err != nil {
+ return nil, err
+ }
+ blessings, _ := call.RemoteBlessings()
+ return blessings, nil
+ }
+
+ // When talking to mountpoint/batman, should see "batman"
+ // When talking to mountpoint/default, should see "server"
+ if got, err := runClient("mountpoint/batman"); err != nil || len(got) != 1 || got[0] != "batman" {
+ t.Errorf("Got (%v, %v) wanted 'batman'", got, err)
+ }
+ if got, err := runClient("mountpoint/default"); err != nil || len(got) != 1 || got[0] != "server" {
+ t.Errorf("Got (%v, %v) wanted 'server'", got, err)
+ }
+}
+
+func TestNoDischargesOpt(t *testing.T) {
+ var (
+ pdischarger = tsecurity.NewPrincipal("discharger")
+ pserver = tsecurity.NewPrincipal("server")
+ pclient = tsecurity.NewPrincipal("client")
+ )
+ // Make the client recognize all server blessings
+ if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
+ t.Fatal(err)
+ }
+ if err := pclient.AddToRoots(pdischarger.BlessingStore().Default()); err != nil {
+ t.Fatal(err)
+ }
+
+ // Bless the client with a ThirdPartyCaveat.
+ tpcav := mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", mkCaveat(security.ExpiryCaveat(time.Now().Add(time.Hour))))
+ blessings, err := pserver.Bless(pclient.PublicKey(), pserver.BlessingStore().Default(), "tpcav", tpcav)
+ if err != nil {
+ t.Fatalf("failed to create Blessings: %v", err)
+ }
+ if _, err = pclient.BlessingStore().Set(blessings, "server"); err != nil {
+ t.Fatalf("failed to set blessings: %v", err)
+ }
+
+ ns := tnaming.NewSimpleNamespace()
+
+ // Setup the disharger and test server.
+ discharger := &dischargeServer{}
+ defer runServer(t, ns, "mountpoint/discharger", discharger, vc.LocalPrincipal{pdischarger}).Shutdown()
+ defer runServer(t, ns, "mountpoint/testServer", &testServer{}, vc.LocalPrincipal{pserver}).Shutdown()
+
+ runClient := func(noDischarges bool) {
+ rid, err := naming.NewRoutingID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ smc := imanager.InternalNew(rid)
+ defer smc.Shutdown()
+ client, err := InternalNewClient(smc, ns, vc.LocalPrincipal{pclient})
+ if err != nil {
+ t.Fatalf("failed to create client: %v", err)
+ }
+ defer client.Close()
+ var opts []ipc.CallOpt
+ if noDischarges {
+ opts = append(opts, NoDischarges{})
+ }
+ if _, err = client.StartCall(testContext(), "mountpoint/testServer", "Closure", nil, opts...); err != nil {
+ t.Fatalf("failed to StartCall: %v", err)
+ }
+ }
+
+ // Test that when the NoDischarges option is set, dischargeServer does not get called.
+ if runClient(true); discharger.called {
+ t.Errorf("did not expect discharger to be called")
+ }
+ discharger.called = false
+ // Test that when the Nodischarges option is not set, dischargeServer does get called.
+ if runClient(false); !discharger.called {
+ t.Errorf("expected discharger to be called")
+ }
+}
+
+func TestNoImplicitDischargeFetching(t *testing.T) {
+ // This test ensures that discharge clients only fetch discharges for the specified tp caveats and not its own.
+ var (
+ pdischarger1 = tsecurity.NewPrincipal("discharger1")
+ pdischarger2 = tsecurity.NewPrincipal("discharger2")
+ pdischargeClient = tsecurity.NewPrincipal("dischargeClient")
+ )
+
+ // Bless the client with a ThirdPartyCaveat from discharger1.
+ tpcav1 := mkThirdPartyCaveat(pdischarger1.PublicKey(), "mountpoint/discharger1", mkCaveat(security.ExpiryCaveat(time.Now().Add(time.Hour))))
+ blessings, err := pdischarger1.Bless(pdischargeClient.PublicKey(), pdischarger1.BlessingStore().Default(), "tpcav1", tpcav1)
+ if err != nil {
+ t.Fatalf("failed to create Blessings: %v", err)
+ }
+ if err = pdischargeClient.BlessingStore().SetDefault(blessings); err != nil {
+ t.Fatalf("failed to set blessings: %v", err)
+ }
+
+ ns := tnaming.NewSimpleNamespace()
+
+ // Setup the disharger and test server.
+ discharger1 := &dischargeServer{}
+ discharger2 := &dischargeServer{}
+ defer runServer(t, ns, "mountpoint/discharger1", discharger1, vc.LocalPrincipal{pdischarger1}).Shutdown()
+ defer runServer(t, ns, "mountpoint/discharger2", discharger2, vc.LocalPrincipal{pdischarger2}).Shutdown()
+
+ rid, err := naming.NewRoutingID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sm := imanager.InternalNew(rid)
+
+ c, err := InternalNewClient(sm, ns, vc.LocalPrincipal{pdischargeClient})
+ if err != nil {
+ t.Fatalf("failed to create client: %v", err)
+ }
+ dc := c.(*client).dc
+ tpcav2, err := security.NewPublicKeyCaveat(pdischarger2.PublicKey(), "mountpoint/discharger2", security.ThirdPartyRequirements{}, mkCaveat(security.ExpiryCaveat(time.Now().Add(time.Hour))))
+ if err != nil {
+ t.Error(err)
+ }
+ dc.PrepareDischarges(testContext(), []security.Caveat{tpcav2}, security.DischargeImpetus{})
+
+ // Ensure that discharger1 was not called and discharger2 was called.
+ if discharger1.called {
+ t.Errorf("discharge for caveat on discharge client should not have been fetched.")
+ }
+ if !discharger2.called {
+ t.Errorf("discharge for caveat passed to PrepareDischarges should have been fetched.")
+ }
+}
+
+// TestBlessingsCache tests that the VCCache is used to sucessfully used to cache duplicate
+// calls blessings.
+func TestBlessingsCache(t *testing.T) {
+ var (
+ pserver = tsecurity.NewPrincipal("server")
+ pclient = tsecurity.NewPrincipal("client")
+ )
+ // Make the client recognize all server blessings
+ if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
+ t.Fatal(err)
+ }
+
+ ns := tnaming.NewSimpleNamespace()
+
+ serverSM := runServer(t, ns, "mountpoint/testServer", &testServer{}, vc.LocalPrincipal{pserver})
+ defer serverSM.Shutdown()
+ rid := serverSM.RoutingID()
+
+ newClient := func() ipc.Client {
+ rid, err := naming.NewRoutingID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ smc := imanager.InternalNew(rid)
+ defer smc.Shutdown()
+ client, err := InternalNewClient(smc, ns, vc.LocalPrincipal{pclient})
+ if err != nil {
+ t.Fatalf("failed to create client: %v", err)
+ }
+ return client
+ }
+
+ runClient := func(client ipc.Client) {
+ if call, err := client.StartCall(testContext(), "mountpoint/testServer", "Closure", nil); err != nil {
+ t.Fatalf("failed to StartCall: %v", err)
+ } else if err := call.Finish(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ cachePrefix := naming.Join("ipc", "server", "routing-id", rid.String(), "security", "blessings", "cache")
+ cacheHits, err := stats.GetStatsObject(naming.Join(cachePrefix, "hits"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ cacheAttempts, err := stats.GetStatsObject(naming.Join(cachePrefix, "attempts"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that the blessings cache is not used on the first call.
+ clientA := newClient()
+ runClient(clientA)
+ if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 1 || gotHits != 0 {
+ t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(1), cacheHits(0)", gotAttempts, gotHits)
+ }
+ // Check that the cache is hit on the second call with the same blessings.
+ runClient(clientA)
+ if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 2 || gotHits != 1 {
+ t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(2), cacheHits(1)", gotAttempts, gotHits)
+ }
+ clientA.Close()
+ // Check that the cache is not used with a different client.
+ clientB := newClient()
+ runClient(clientB)
+ if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 3 || gotHits != 1 {
+ t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(3), cacheHits(1)", gotAttempts, gotHits)
+ }
+ // clientB changes its blessings, the cache should not be used.
+ blessings, err := pserver.Bless(pclient.PublicKey(), pserver.BlessingStore().Default(), "cav", mkCaveat(security.ExpiryCaveat(time.Now().Add(time.Hour))))
+ if err != nil {
+ t.Fatalf("failed to create Blessings: %v", err)
+ }
+ if _, err = pclient.BlessingStore().Set(blessings, "server"); err != nil {
+ t.Fatalf("failed to set blessings: %v", err)
+ }
+ runClient(clientB)
+ if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 4 || gotHits != 1 {
+ t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(4), cacheHits(1)", gotAttempts, gotHits)
+ }
+ clientB.Close()
+}
+
+var fakeTimeCaveat = security.CaveatDescriptor{
+ Id: uniqueid.Id{0x18, 0xba, 0x6f, 0x84, 0xd5, 0xec, 0xdb, 0x9b, 0xf2, 0x32, 0x19, 0x5b, 0x53, 0x92, 0x80, 0x0},
+ ParamType: vdl.TypeOf(int64(0)),
+}
+
+func TestServerPublicKeyOpt(t *testing.T) {
+ var (
+ pserver = tsecurity.NewPrincipal("server")
+ pother = tsecurity.NewPrincipal("other")
+ pclient = tsecurity.NewPrincipal("client")
+ )
+
+ ns := tnaming.NewSimpleNamespace()
+ mountName := "mountpoint/default"
+
+ // Start a server with pserver.
+ defer runServer(t, ns, mountName, &testServer{}, vc.LocalPrincipal{pserver}).Shutdown()
+
+ smc := imanager.InternalNew(naming.FixedRoutingID(0xc))
+ client, err := InternalNewClient(
+ smc,
+ ns,
+ vc.LocalPrincipal{pclient})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer smc.Shutdown()
+ defer client.Close()
+
+ // The call should succeed when the server presents the same public as the opt...
+ if _, err = client.StartCall(testContext(), mountName, "Closure", nil, options.ServerPublicKey{pserver.PublicKey()}); err != nil {
+ t.Errorf("Expected call to succeed but got %v", err)
+ }
+ // ...but fail if they differ.
+ if _, err = client.StartCall(testContext(), mountName, "Closure", nil, options.ServerPublicKey{pother.PublicKey()}); !verror.Is(err, verror.ErrNotTrusted.ID) {
+ t.Errorf("got %v, want %v", verror.ErrorID(err), verror.ErrNotTrusted.ID)
+ }
+}
+
+type expiryDischarger struct {
+ called bool
+}
+
+func (ed *expiryDischarger) Discharge(ctx ipc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.WireDischarge, error) {
+ tp := cav.ThirdPartyDetails()
+ if tp == nil {
+ return nil, fmt.Errorf("discharger: %v does not represent a third-party caveat", cav)
+ }
+ if err := tp.Dischargeable(ctx); err != nil {
+ return nil, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", cav, err)
+ }
+ expDur := 10 * time.Millisecond
+ if ed.called {
+ expDur = time.Second
+ }
+ expiry, err := security.ExpiryCaveat(time.Now().Add(expDur))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create an expiration on the discharge: %v", err)
+ }
+ d, err := ctx.LocalPrincipal().MintDischarge(cav, expiry)
+ if err != nil {
+ return nil, err
+ }
+ ed.called = true
+ return security.MarshalDischarge(d), nil
+}
+
+func TestDischargeClientFetchExpiredDischarges(t *testing.T) {
+ var (
+ pdischarger = tsecurity.NewPrincipal("discharger")
+ )
+
+ // Bless the client with a ThirdPartyCaveat.
+ tpcav := mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", mkCaveat(security.ExpiryCaveat(time.Now().Add(time.Hour))))
+
+ ns := tnaming.NewSimpleNamespace()
+ ctx := testContext()
+
+ // Setup the disharge server.
+ discharger := &expiryDischarger{}
+ defer runServer(t, ns, "mountpoint/discharger", discharger, vc.LocalPrincipal{pdischarger}).Shutdown()
+
+ // Create a discharge client.
+ rid, err := naming.NewRoutingID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ smc := imanager.InternalNew(rid)
+ defer smc.Shutdown()
+ client, err := InternalNewClient(smc, ns)
+ if err != nil {
+ t.Fatalf("failed to create client: %v", err)
+ }
+ defer client.Close()
+ dc := InternalNewDischargeClient(ctx, client)
+
+ // Fetch discharges for tpcav.
+ dis := dc.PrepareDischarges(nil, []security.Caveat{tpcav}, security.DischargeImpetus{})[0]
+ // Check that the discharges is not yet expired, but is expired after 100 milliseconds.
+ expiry := dis.Expiry()
+ // The discharge should expire.
+ select {
+ case <-time.After(time.Now().Sub(expiry)):
+ break
+ case <-time.After(time.Second):
+ t.Fatalf("discharge didn't expire within a second")
+ }
+ // Preparing Discharges again to get fresh discharges.
+ now := time.Now()
+ dis = dc.PrepareDischarges(nil, []security.Caveat{tpcav}, security.DischargeImpetus{})[0]
+ if expiry = dis.Expiry(); expiry.Before(now) {
+ t.Fatalf("discharge has expired %v, but should be fresh", dis)
+ }
+}
+
+func init() {
+ security.RegisterCaveatValidator(fakeTimeCaveat, func(_ security.Call, t int64) error {
+ if now := clock.Now(); now > int(t) {
+ return fmt.Errorf("fakeTimeCaveat expired: now=%d > then=%d", now, t)
+ }
+ return nil
+ })
+}
diff --git a/profiles/internal/ipc/glob_test.go b/profiles/internal/ipc/glob_test.go
new file mode 100644
index 0000000..57ce06f
--- /dev/null
+++ b/profiles/internal/ipc/glob_test.go
@@ -0,0 +1,282 @@
+package ipc_test
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+
+ "v.io/x/ref/lib/glob"
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+)
+
+func startServer(ctx *context.T, tree *node) (string, func(), error) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to start debug server: %v", err)
+ }
+ endpoints, err := server.Listen(v23.GetListenSpec(ctx))
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to listen: %v", err)
+ }
+ if err := server.ServeDispatcher("", &disp{tree}); err != nil {
+ return "", nil, err
+ }
+ ep := endpoints[0].String()
+ return ep, func() { server.Stop() }, nil
+}
+
+func TestGlob(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ namespace := []string{
+ "a/b/c1/d1",
+ "a/b/c1/d2",
+ "a/b/c2/d1",
+ "a/b/c2/d2",
+ "a/x/y/z",
+ "leaf",
+ }
+ tree := newNode()
+ for _, p := range namespace {
+ tree.find(strings.Split(p, "/"), true)
+ }
+
+ ep, stop, err := startServer(ctx, tree)
+ if err != nil {
+ t.Fatalf("startServer: %v", err)
+ }
+ defer stop()
+
+ testcases := []struct {
+ name, pattern string
+ expected []string
+ }{
+ {"", "...", []string{
+ "",
+ "a",
+ "a/b",
+ "a/b/c1",
+ "a/b/c1/d1",
+ "a/b/c1/d2",
+ "a/b/c2",
+ "a/b/c2/d1",
+ "a/b/c2/d2",
+ "a/x",
+ "a/x/y",
+ "a/x/y/z",
+ "leaf",
+ }},
+ {"a", "...", []string{
+ "",
+ "b",
+ "b/c1",
+ "b/c1/d1",
+ "b/c1/d2",
+ "b/c2",
+ "b/c2/d1",
+ "b/c2/d2",
+ "x",
+ "x/y",
+ "x/y/z",
+ }},
+ {"a/b", "...", []string{
+ "",
+ "c1",
+ "c1/d1",
+ "c1/d2",
+ "c2",
+ "c2/d1",
+ "c2/d2",
+ }},
+ {"a/b/c1", "...", []string{
+ "",
+ "d1",
+ "d2",
+ }},
+ {"a/b/c1/d1", "...", []string{
+ "",
+ }},
+ {"a/x", "...", []string{
+ "",
+ "y",
+ "y/z",
+ }},
+ {"a/x/y", "...", []string{
+ "",
+ "z",
+ }},
+ {"a/x/y/z", "...", []string{
+ "",
+ }},
+ {"", "", []string{""}},
+ {"", "*", []string{"a", "leaf"}},
+ {"a", "", []string{""}},
+ {"a", "*", []string{"b", "x"}},
+ {"a/b", "", []string{""}},
+ {"a/b", "*", []string{"c1", "c2"}},
+ {"a/b/c1", "", []string{""}},
+ {"a/b/c1", "*", []string{"d1", "d2"}},
+ {"a/b/c1/d1", "*", []string{}},
+ {"a/b/c1/d1", "", []string{""}},
+ {"a", "*/c?", []string{"b/c1", "b/c2"}},
+ {"a", "*/*", []string{"b/c1", "b/c2", "x/y"}},
+ {"a", "*/*/*", []string{"b/c1/d1", "b/c1/d2", "b/c2/d1", "b/c2/d2", "x/y/z"}},
+ {"a/x", "*/*", []string{"y/z"}},
+ {"bad", "", []string{}},
+ {"a/bad", "", []string{}},
+ {"a/b/bad", "", []string{}},
+ {"a/b/c1/bad", "", []string{}},
+ {"a/x/bad", "", []string{}},
+ {"a/x/y/bad", "", []string{}},
+ {"leaf", "", []string{""}},
+ // muah is an infinite space to test rescursion limit.
+ {"muah", "*", []string{"ha"}},
+ {"muah", "*/*", []string{"ha/ha"}},
+ {"muah", "*/*/*/*/*/*/*/*/*/*/*/*", []string{"ha/ha/ha/ha/ha/ha/ha/ha/ha/ha/ha/ha"}},
+ {"muah", "...", []string{
+ "",
+ "ha",
+ "ha/ha",
+ "ha/ha/ha",
+ "ha/ha/ha/ha",
+ "ha/ha/ha/ha/ha",
+ "ha/ha/ha/ha/ha/ha",
+ "ha/ha/ha/ha/ha/ha/ha",
+ "ha/ha/ha/ha/ha/ha/ha/ha",
+ "ha/ha/ha/ha/ha/ha/ha/ha/ha",
+ "ha/ha/ha/ha/ha/ha/ha/ha/ha/ha",
+ }},
+ }
+ for _, tc := range testcases {
+ name := naming.JoinAddressName(ep, tc.name)
+ results, err := testutil.GlobName(ctx, name, tc.pattern)
+ if err != nil {
+ t.Errorf("unexpected Glob error for (%q, %q): %v", tc.name, tc.pattern, err)
+ continue
+ }
+ if !reflect.DeepEqual(results, tc.expected) {
+ t.Errorf("unexpected result for (%q, %q). Got %q, want %q", tc.name, tc.pattern, results, tc.expected)
+ }
+ }
+}
+
+type disp struct {
+ tree *node
+}
+
+func (d *disp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ elems := strings.Split(suffix, "/")
+ if len(elems) != 0 && elems[0] == "muah" {
+ // Infinite space. Each node has one child named "ha".
+ return ipc.ChildrenGlobberInvoker("ha"), nil, nil
+
+ }
+ if len(elems) != 0 && elems[0] == "leaf" {
+ return leafObject{}, nil, nil
+ }
+ if len(elems) < 2 || (elems[0] == "a" && elems[1] == "x") {
+ return &vChildrenObject{d.tree, elems}, nil, nil
+ }
+ return &globObject{d.tree, elems}, nil, nil
+}
+
+type globObject struct {
+ n *node
+ suffix []string
+}
+
+func (o *globObject) Glob__(ctx ipc.ServerCall, pattern string) (<-chan naming.VDLGlobReply, error) {
+ g, err := glob.Parse(pattern)
+ if err != nil {
+ return nil, err
+ }
+ n := o.n.find(o.suffix, false)
+ if n == nil {
+ return nil, nil
+ }
+ ch := make(chan naming.VDLGlobReply)
+ go func() {
+ o.globLoop(ch, "", g, n)
+ close(ch)
+ }()
+ return ch, nil
+}
+
+func (o *globObject) globLoop(ch chan<- naming.VDLGlobReply, name string, g *glob.Glob, n *node) {
+ if g.Len() == 0 {
+ ch <- naming.VDLGlobReplyEntry{naming.VDLMountEntry{Name: name}}
+ }
+ if g.Finished() {
+ return
+ }
+ for leaf, child := range n.children {
+ if ok, _, left := g.MatchInitialSegment(leaf); ok {
+ o.globLoop(ch, naming.Join(name, leaf), left, child)
+ }
+ }
+}
+
+type vChildrenObject struct {
+ n *node
+ suffix []string
+}
+
+func (o *vChildrenObject) GlobChildren__(ipc.ServerCall) (<-chan string, error) {
+ n := o.n.find(o.suffix, false)
+ if n == nil {
+ return nil, fmt.Errorf("object does not exist")
+ }
+ ch := make(chan string, len(n.children))
+ for child, _ := range n.children {
+ ch <- child
+ }
+ close(ch)
+ return ch, nil
+}
+
+type node struct {
+ children map[string]*node
+}
+
+func newNode() *node {
+ return &node{make(map[string]*node)}
+}
+
+func (n *node) find(names []string, create bool) *node {
+ if len(names) == 1 && names[0] == "" {
+ return n
+ }
+ for {
+ if len(names) == 0 {
+ return n
+ }
+ if next, ok := n.children[names[0]]; ok {
+ n = next
+ names = names[1:]
+ continue
+ }
+ if create {
+ nn := newNode()
+ n.children[names[0]] = nn
+ n = nn
+ names = names[1:]
+ continue
+ }
+ return nil
+ }
+}
+
+type leafObject struct{}
+
+func (l leafObject) Func(call ipc.ServerCall) error {
+ return nil
+}
diff --git a/profiles/internal/ipc/options.go b/profiles/internal/ipc/options.go
new file mode 100644
index 0000000..197fc17
--- /dev/null
+++ b/profiles/internal/ipc/options.go
@@ -0,0 +1,99 @@
+package ipc
+
+import (
+ "time"
+
+ "v.io/x/ref/profiles/internal/ipc/stream"
+
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+)
+
+// PreferredProtocols instructs the Runtime implementation to select
+// endpoints with the specified protocols when a Client makes a call
+// and to order them in the specified order.
+type PreferredProtocols []string
+
+func (PreferredProtocols) IPCClientOpt() {}
+
+// This option is used to sort and filter the endpoints when resolving the
+// proxy name from a mounttable.
+type PreferredServerResolveProtocols []string
+
+func (PreferredServerResolveProtocols) IPCServerOpt() {}
+
+// ReservedNameDispatcher specifies the dispatcher that controls access
+// to framework managed portion of the namespace.
+type ReservedNameDispatcher struct {
+ Dispatcher ipc.Dispatcher
+}
+
+func (ReservedNameDispatcher) IPCServerOpt() {}
+
+func getRetryTimeoutOpt(opts []ipc.CallOpt) (time.Duration, bool) {
+ for _, o := range opts {
+ if r, ok := o.(options.RetryTimeout); ok {
+ return time.Duration(r), true
+ }
+ }
+ return 0, false
+}
+
+func getNoResolveOpt(opts []ipc.CallOpt) bool {
+ for _, o := range opts {
+ if _, ok := o.(options.NoResolve); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func shouldNotFetchDischarges(opts []ipc.CallOpt) bool {
+ for _, o := range opts {
+ if _, ok := o.(NoDischarges); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func getNoRetryOpt(opts []ipc.CallOpt) bool {
+ for _, o := range opts {
+ if _, ok := o.(options.NoRetry); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func getVCOpts(opts []ipc.CallOpt) (vcOpts []stream.VCOpt) {
+ for _, o := range opts {
+ if v, ok := o.(stream.VCOpt); ok {
+ vcOpts = append(vcOpts, v)
+ }
+ }
+ return
+}
+
+func getResolveOpts(opts []ipc.CallOpt) (resolveOpts []naming.ResolveOpt) {
+ for _, o := range opts {
+ if r, ok := o.(naming.ResolveOpt); ok {
+ resolveOpts = append(resolveOpts, r)
+ }
+ }
+ return
+}
+
+func vcEncrypted(vcOpts []stream.VCOpt) bool {
+ encrypted := true
+ for _, o := range vcOpts {
+ switch o {
+ case options.VCSecurityNone:
+ encrypted = false
+ case options.VCSecurityConfidential:
+ encrypted = true
+ }
+ }
+ return encrypted
+}
diff --git a/profiles/internal/ipc/protocols/tcp/init.go b/profiles/internal/ipc/protocols/tcp/init.go
new file mode 100644
index 0000000..92fb5b9
--- /dev/null
+++ b/profiles/internal/ipc/protocols/tcp/init.go
@@ -0,0 +1,62 @@
+package tcp
+
+import (
+ "net"
+ "time"
+
+ "v.io/x/ref/lib/tcputil"
+
+ "v.io/v23/ipc"
+ "v.io/x/lib/vlog"
+)
+
+func init() {
+ ipc.RegisterProtocol("tcp", net.DialTimeout, net.Listen, "tcp4", "tcp6")
+ ipc.RegisterProtocol("tcp4", net.DialTimeout, net.Listen)
+ ipc.RegisterProtocol("tcp6", net.DialTimeout, net.Listen)
+}
+
+func tcpDial(network, address string, timeout time.Duration) (net.Conn, error) {
+ conn, err := net.DialTimeout(network, address, timeout)
+ if err != nil {
+ return nil, err
+ }
+ if err := tcputil.EnableTCPKeepAlive(conn); err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// tcpListen returns a listener that sets KeepAlive on all accepted connections.
+func tcpListen(network, laddr string) (net.Listener, error) {
+ ln, err := net.Listen(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return &tcpListener{ln}, nil
+}
+
+// tcpListener is a wrapper around net.Listener that sets KeepAlive on all
+// accepted connections.
+type tcpListener struct {
+ netLn net.Listener
+}
+
+func (ln *tcpListener) Accept() (net.Conn, error) {
+ conn, err := ln.netLn.Accept()
+ if err != nil {
+ return nil, err
+ }
+ if err := tcputil.EnableTCPKeepAlive(conn); err != nil {
+ vlog.Errorf("Failed to enable TCP keep alive: %v", err)
+ }
+ return conn, nil
+}
+
+func (ln *tcpListener) Close() error {
+ return ln.netLn.Close()
+}
+
+func (ln *tcpListener) Addr() net.Addr {
+ return ln.netLn.Addr()
+}
diff --git a/profiles/internal/ipc/protocols/ws/init.go b/profiles/internal/ipc/protocols/ws/init.go
new file mode 100644
index 0000000..fc01478
--- /dev/null
+++ b/profiles/internal/ipc/protocols/ws/init.go
@@ -0,0 +1,14 @@
+package websocket
+
+import (
+ "v.io/v23/ipc"
+
+ "v.io/x/ref/lib/websocket"
+)
+
+func init() {
+ // ws, ws4, ws6 represent websocket protocol instances.
+ ipc.RegisterProtocol("ws", websocket.Dial, websocket.Listener, "ws4", "ws6")
+ ipc.RegisterProtocol("ws4", websocket.Dial, websocket.Listener)
+ ipc.RegisterProtocol("ws6", websocket.Dial, websocket.Listener)
+}
diff --git a/profiles/internal/ipc/protocols/wsh/init.go b/profiles/internal/ipc/protocols/wsh/init.go
new file mode 100644
index 0000000..b7eb9b5
--- /dev/null
+++ b/profiles/internal/ipc/protocols/wsh/init.go
@@ -0,0 +1,15 @@
+// Package wsh registers the websocket 'hybrid' protocol.
+// We prefer to use tcp whenever we can to avoid the overhead of websockets.
+package wsh
+
+import (
+ "v.io/v23/ipc"
+
+ "v.io/x/ref/lib/websocket"
+)
+
+func init() {
+ ipc.RegisterProtocol("wsh", websocket.HybridDial, websocket.HybridListener, "tcp4", "tcp6", "ws4", "ws6")
+ ipc.RegisterProtocol("wsh4", websocket.HybridDial, websocket.HybridListener, "tcp4", "ws4")
+ ipc.RegisterProtocol("wsh6", websocket.HybridDial, websocket.HybridListener, "tcp6", "ws6")
+}
diff --git a/profiles/internal/ipc/protocols/wsh_nacl/init.go b/profiles/internal/ipc/protocols/wsh_nacl/init.go
new file mode 100644
index 0000000..82cfd48
--- /dev/null
+++ b/profiles/internal/ipc/protocols/wsh_nacl/init.go
@@ -0,0 +1,17 @@
+// Package wsh_nacl registers the websocket 'hybrid' protocol for nacl
+// architectures.
+package wsh_nacl
+
+import (
+ "v.io/v23/ipc"
+
+ "v.io/x/ref/lib/websocket"
+)
+
+func init() {
+ // We limit wsh to ws since in general nacl does not allow direct access
+ // to TCP/UDP networking.
+ ipc.RegisterProtocol("wsh", websocket.Dial, websocket.Listener, "ws4", "ws6")
+ ipc.RegisterProtocol("wsh4", websocket.Dial, websocket.Listener, "ws4")
+ ipc.RegisterProtocol("wsh6", websocket.Dial, websocket.Listener, "ws6")
+}
diff --git a/profiles/internal/ipc/proxy_test.go b/profiles/internal/ipc/proxy_test.go
new file mode 100644
index 0000000..a314004
--- /dev/null
+++ b/profiles/internal/ipc/proxy_test.go
@@ -0,0 +1,340 @@
+package ipc_test
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/v23/vtrace"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/flags"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/modules/core"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles"
+ iipc "v.io/x/ref/profiles/internal/ipc"
+ imanager "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ tnaming "v.io/x/ref/profiles/internal/testing/mocks/naming"
+ ivtrace "v.io/x/ref/profiles/internal/vtrace"
+)
+
+func testContext() *context.T {
+ ctx, _ := context.WithTimeout(testContextWithoutDeadline(), 20*time.Second)
+ return ctx
+}
+
+func testContextWithoutDeadline() *context.T {
+ ctx, _ := context.RootContext()
+ ctx, err := ivtrace.Init(ctx, flags.VtraceFlags{})
+ if err != nil {
+ panic(err)
+ }
+ ctx, _ = vtrace.SetNewTrace(ctx)
+ return ctx
+}
+
+type testServer struct{}
+
+func (*testServer) Echo(ctx ipc.ServerCall, arg string) (string, error) {
+ return fmt.Sprintf("method:%q,suffix:%q,arg:%q", ctx.Method(), ctx.Suffix(), arg), nil
+}
+
+type testServerAuthorizer struct{}
+
+func (testServerAuthorizer) Authorize(c security.Call) error {
+ return nil
+}
+
+type testServerDisp struct{ server interface{} }
+
+func (t testServerDisp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ return t.server, testServerAuthorizer{}, nil
+}
+
+type proxyHandle struct {
+ ns ns.Namespace
+ sh *modules.Shell
+ proxy modules.Handle
+ name string
+}
+
+func (h *proxyHandle) Start(t *testing.T, ctx *context.T, args ...string) error {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ h.sh = sh
+ p, err := sh.Start(core.ProxyServerCommand, nil, args...)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ h.proxy = p
+ s := expect.NewSession(t, p.Stdout(), time.Minute)
+ s.ReadLine()
+ h.name = s.ExpectVar("PROXY_NAME")
+ if len(h.name) == 0 {
+ t.Fatalf("failed to get PROXY_NAME from proxyd")
+ }
+ return h.ns.Mount(ctx, "proxy", h.name, time.Hour)
+}
+
+func (h *proxyHandle) Stop(ctx *context.T) error {
+ defer h.sh.Cleanup(os.Stderr, os.Stderr)
+ if err := h.proxy.Shutdown(os.Stderr, os.Stderr); err != nil {
+ return err
+ }
+ if len(h.name) == 0 {
+ return nil
+ }
+ return h.ns.Unmount(ctx, "proxy", h.name)
+}
+
+func TestProxyOnly(t *testing.T) {
+ listenSpec := ipc.ListenSpec{Proxy: "proxy"}
+ testProxy(t, listenSpec)
+}
+
+func TestProxy(t *testing.T) {
+ proxyListenSpec := ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "127.0.0.1:0"}}}
+ proxyListenSpec.Proxy = "proxy"
+ testProxy(t, proxyListenSpec)
+}
+
+func TestWSProxy(t *testing.T) {
+ proxyListenSpec := ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "127.0.0.1:0"}}}
+ proxyListenSpec.Proxy = "proxy"
+ // The proxy uses websockets only, but the server is using tcp.
+ testProxy(t, proxyListenSpec, "--veyron.tcp.protocol=ws")
+}
+
+func testProxy(t *testing.T, spec ipc.ListenSpec, args ...string) {
+ var (
+ pserver = tsecurity.NewPrincipal("server")
+ serverKey = pserver.PublicKey()
+ // We use different stream managers for the client and server
+ // to prevent VIF re-use (in other words, we want to test VIF
+ // creation from both the client and server end).
+ smserver = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ smclient = imanager.InternalNew(naming.FixedRoutingID(0x444444444))
+ ns = tnaming.NewSimpleNamespace()
+ )
+ defer smserver.Shutdown()
+ defer smclient.Shutdown()
+ client, err := iipc.InternalNewClient(smserver, ns, vc.LocalPrincipal{tsecurity.NewPrincipal("client")})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer client.Close()
+ ctx := testContext()
+ server, err := iipc.InternalNewServer(ctx, smserver, ns, nil, vc.LocalPrincipal{pserver})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+
+ // If no address is specified then we'll only 'listen' via
+ // the proxy.
+ hasLocalListener := len(spec.Addrs) > 0 && len(spec.Addrs[0].Address) != 0
+
+ name := "mountpoint/server/suffix"
+ makeCall := func(opts ...ipc.CallOpt) (string, error) {
+ ctx, _ := context.WithDeadline(testContext(), time.Now().Add(5*time.Second))
+ // Let's fail fast so that the tests don't take as long to run.
+ call, err := client.StartCall(ctx, name, "Echo", []interface{}{"batman"}, opts...)
+ if err != nil {
+ // proxy is down, we should return here/.... prepend
+ // the error with a well known string so that we can test for that.
+ return "", fmt.Errorf("RESOLVE: %s", err)
+ }
+ var result string
+ if err = call.Finish(&result); err != nil {
+ return "", err
+ }
+ return result, nil
+ }
+ proxy := &proxyHandle{ns: ns}
+ if err := proxy.Start(t, ctx, args...); err != nil {
+ t.Fatal(err)
+ }
+ defer proxy.Stop(ctx)
+ addrs := verifyMount(t, ctx, ns, spec.Proxy)
+ if len(addrs) != 1 {
+ t.Fatalf("failed to lookup proxy")
+ }
+
+ eps, err := server.Listen(spec)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := server.ServeDispatcher("mountpoint/server", testServerDisp{&testServer{}}); err != nil {
+ t.Fatal(err)
+ }
+
+ // Proxy connections are started asynchronously, so we need to wait..
+ waitForMountTable := func(ch chan int, expect int) {
+ then := time.Now().Add(time.Minute)
+ for {
+ me, err := ns.Resolve(ctx, name)
+ if err == nil && len(me.Servers) == expect {
+ ch <- 1
+ return
+ }
+ if time.Now().After(then) {
+ t.Fatalf("timed out")
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ waitForServerStatus := func(ch chan int, proxy string) {
+ then := time.Now().Add(time.Minute)
+ for {
+ status := server.Status()
+ if len(status.Proxies) == 1 && status.Proxies[0].Proxy == proxy {
+ ch <- 2
+ return
+ }
+ if time.Now().After(then) {
+ t.Fatalf("timed out")
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ proxyEP, _ := naming.SplitAddressName(addrs[0])
+ proxiedEP, err := inaming.NewEndpoint(proxyEP)
+ if err != nil {
+ t.Fatalf("unexpected error for %q: %s", proxyEP, err)
+ }
+ proxiedEP.RID = naming.FixedRoutingID(0x555555555)
+ expectedNames := []string{naming.JoinAddressName(proxiedEP.String(), "suffix")}
+ if hasLocalListener {
+ expectedNames = append(expectedNames, naming.JoinAddressName(eps[0].String(), "suffix"))
+ }
+
+ // Proxy connetions are created asynchronously, so we wait for the
+ // expected number of endpoints to appear for the specified service name.
+ ch := make(chan int, 2)
+ go waitForMountTable(ch, len(expectedNames))
+ go waitForServerStatus(ch, spec.Proxy)
+ select {
+ case <-time.After(time.Minute):
+ t.Fatalf("timedout waiting for two entries in the mount table and server status")
+ case i := <-ch:
+ select {
+ case <-time.After(time.Minute):
+ t.Fatalf("timedout waiting for two entries in the mount table or server status")
+ case j := <-ch:
+ if !((i == 1 && j == 2) || (i == 2 && j == 1)) {
+ t.Fatalf("unexpected return values from waiters")
+ }
+ }
+ }
+
+ status := server.Status()
+ if got, want := status.Proxies[0].Endpoint, proxiedEP; !reflect.DeepEqual(got, want) {
+ t.Fatalf("got %q, want %q", got, want)
+ }
+
+ got := []string{}
+ for _, s := range verifyMount(t, ctx, ns, name) {
+ got = append(got, s)
+ }
+ sort.Strings(got)
+ sort.Strings(expectedNames)
+ if !reflect.DeepEqual(got, expectedNames) {
+ t.Errorf("got %v, want %v", got, expectedNames)
+ }
+
+ if hasLocalListener {
+ // Listen will publish both the local and proxied endpoint with the
+ // mount table, given that we're trying to test the proxy, we remove
+ // the local endpoint from the mount table entry! We have to remove both
+ // the tcp and the websocket address.
+ sep := eps[0].String()
+ ns.Unmount(ctx, "mountpoint/server", sep)
+ }
+
+ addrs = verifyMount(t, ctx, ns, name)
+ if len(addrs) != 1 {
+ t.Fatalf("failed to lookup proxy: addrs %v", addrs)
+ }
+
+ // Proxied endpoint should be published and RPC should succeed (through proxy).
+ // Additionally, any server authorizaton options must only apply to the end server
+ // and not the proxy.
+ const expected = `method:"Echo",suffix:"suffix",arg:"batman"`
+ if result, err := makeCall(options.ServerPublicKey{serverKey}); result != expected || err != nil {
+ t.Fatalf("Got (%v, %v) want (%v, nil)", result, err, expected)
+ }
+
+ // Proxy dies, calls should fail and the name should be unmounted.
+ if err := proxy.Stop(ctx); err != nil {
+ t.Fatal(err)
+ }
+
+ if result, err := makeCall(options.NoRetry{}); err == nil || (!strings.HasPrefix(err.Error(), "RESOLVE") && !strings.Contains(err.Error(), "EOF")) {
+ t.Fatalf(`Got (%v, %v) want ("", "RESOLVE: <err>" or "EOF") as proxy is down`, result, err)
+ }
+
+ for {
+ if _, err := ns.Resolve(ctx, name); err != nil {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ verifyMountMissing(t, ctx, ns, name)
+
+ status = server.Status()
+ if len(status.Proxies) != 1 || status.Proxies[0].Proxy != spec.Proxy || !verror.Is(status.Proxies[0].Error, verror.ErrNoServers.ID) {
+ t.Fatalf("proxy status is incorrect: %v", status.Proxies)
+ }
+
+ // Proxy restarts, calls should eventually start succeeding.
+ if err := proxy.Start(t, ctx, args...); err != nil {
+ t.Fatal(err)
+ }
+
+ retries := 0
+ for {
+ if result, err := makeCall(); err == nil {
+ if result != expected {
+ t.Errorf("Got (%v, %v) want (%v, nil)", result, err, expected)
+ }
+ break
+ } else {
+ retries++
+ if retries > 10 {
+ t.Fatalf("Failed after 10 attempts: err: %s", err)
+ }
+ }
+ }
+}
+
+func verifyMount(t *testing.T, ctx *context.T, ns ns.Namespace, name string) []string {
+ me, err := ns.Resolve(ctx, name)
+ if err != nil {
+ t.Errorf("%s not found in mounttable", name)
+ return nil
+ }
+ return me.Names()
+}
+
+func verifyMountMissing(t *testing.T, ctx *context.T, ns ns.Namespace, name string) {
+ if me, err := ns.Resolve(ctx, name); err == nil {
+ names := me.Names()
+ t.Errorf("%s not supposed to be found in mounttable; got %d servers instead: %v", name, len(names), names)
+ }
+}
diff --git a/profiles/internal/ipc/reserved.go b/profiles/internal/ipc/reserved.go
new file mode 100644
index 0000000..ff3f63f
--- /dev/null
+++ b/profiles/internal/ipc/reserved.go
@@ -0,0 +1,361 @@
+package ipc
+
+import (
+ "strings"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/v23/services/security/access"
+ "v.io/v23/vdl"
+ "v.io/v23/vdlroot/signature"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/glob"
+)
+
+// reservedInvoker returns a special invoker for reserved methods. This invoker
+// has access to the internal dispatchers, which allows it to perform special
+// handling for methods like Glob and Signature.
+func reservedInvoker(dispNormal, dispReserved ipc.Dispatcher) ipc.Invoker {
+ methods := &reservedMethods{dispNormal: dispNormal, dispReserved: dispReserved}
+ invoker := ipc.ReflectInvokerOrDie(methods)
+ methods.selfInvoker = invoker
+ return invoker
+}
+
+// reservedMethods is a regular server implementation object, which is passed to
+// the regular ReflectInvoker in order to implement reserved methods. The
+// leading reserved "__" prefix is stripped before any methods are called.
+//
+// To add a new reserved method, simply add a method below, along with a
+// description of the method.
+type reservedMethods struct {
+ dispNormal ipc.Dispatcher
+ dispReserved ipc.Dispatcher
+ selfInvoker ipc.Invoker
+}
+
+func (r *reservedMethods) Describe__() []ipc.InterfaceDesc {
+ return []ipc.InterfaceDesc{{
+ Name: "__Reserved",
+ Doc: `Reserved methods implemented by the IPC framework. Each method name is prefixed with a double underscore "__".`,
+ Methods: []ipc.MethodDesc{
+ {
+ Name: "Glob",
+ Doc: "Glob returns all entries matching the pattern.",
+ InArgs: []ipc.ArgDesc{{Name: "pattern", Doc: ""}},
+ OutStream: ipc.ArgDesc{Doc: "Streams matching entries back to the client."},
+ },
+ {
+ Name: "MethodSignature",
+ Doc: "MethodSignature returns the signature for the given method.",
+ InArgs: []ipc.ArgDesc{{
+ Name: "method",
+ Doc: "Method name whose signature will be returned.",
+ }},
+ OutArgs: []ipc.ArgDesc{{
+ Doc: "Method signature for the given method.",
+ }},
+ },
+ {
+ Name: "Signature",
+ Doc: "Signature returns all interface signatures implemented by the object.",
+ OutArgs: []ipc.ArgDesc{{
+ Doc: "All interface signatures implemented by the object.",
+ }},
+ },
+ },
+ }}
+}
+
+func (r *reservedMethods) Signature(ctxOrig ipc.ServerCall) ([]signature.Interface, error) {
+ // Copy the original context to shield ourselves from changes the flowServer makes.
+ ctx := copyMutableContext(ctxOrig)
+ ctx.M.Method = "__Signature"
+ disp := r.dispNormal
+ if naming.IsReserved(ctx.Suffix()) {
+ disp = r.dispReserved
+ }
+ if disp == nil {
+ return nil, ipc.NewErrUnknownSuffix(ctx.Context(), ctx.Suffix())
+ }
+ obj, _, err := disp.Lookup(ctx.Suffix())
+ switch {
+ case err != nil:
+ return nil, err
+ case obj == nil:
+ return nil, ipc.NewErrUnknownSuffix(ctx.Context(), ctx.Suffix())
+ }
+ invoker, err := objectToInvoker(obj)
+ if err != nil {
+ return nil, err
+ }
+ sig, err := invoker.Signature(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // Append the reserved methods. We wait until now to add the "__" prefix to
+ // each method, so that we can use the regular ReflectInvoker.Signature logic.
+ rsig, err := r.selfInvoker.Signature(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for i := range rsig {
+ for j := range rsig[i].Methods {
+ rsig[i].Methods[j].Name = "__" + rsig[i].Methods[j].Name
+ }
+ }
+ return signature.CleanInterfaces(append(sig, rsig...)), nil
+}
+
+func (r *reservedMethods) MethodSignature(ctxOrig ipc.ServerCall, method string) (signature.Method, error) {
+ // Copy the original context to shield ourselves from changes the flowServer makes.
+ ctx := copyMutableContext(ctxOrig)
+ ctx.M.Method = method
+ // Reserved methods use our self invoker, to describe our own methods,
+ if naming.IsReserved(method) {
+ ctx.M.Method = naming.StripReserved(method)
+ return r.selfInvoker.MethodSignature(ctx, ctx.Method())
+ }
+ disp := r.dispNormal
+ if naming.IsReserved(ctx.Suffix()) {
+ disp = r.dispReserved
+ }
+ if disp == nil {
+ return signature.Method{}, ipc.NewErrUnknownMethod(ctx.Context(), ctx.Method())
+ }
+ obj, _, err := disp.Lookup(ctx.Suffix())
+ switch {
+ case err != nil:
+ return signature.Method{}, err
+ case obj == nil:
+ return signature.Method{}, ipc.NewErrUnknownMethod(ctx.Context(), ctx.Method())
+ }
+ invoker, err := objectToInvoker(obj)
+ if err != nil {
+ return signature.Method{}, err
+ }
+ // TODO(toddw): Decide if we should hide the method signature if the
+ // caller doesn't have access to call it.
+ return invoker.MethodSignature(ctx, ctx.Method())
+}
+
+func (r *reservedMethods) Glob(ctx ipc.StreamServerCall, pattern string) error {
+ // Copy the original call to shield ourselves from changes the flowServer makes.
+ glob := globInternal{r.dispNormal, r.dispReserved, ctx.Suffix()}
+ return glob.Glob(copyMutableCall(ctx), pattern)
+}
+
+// globInternal handles ALL the Glob requests received by a server and
+// constructs a response from the state of internal server objects and the
+// service objects.
+//
+// Internal objects exist only at the root of the server and have a name that
+// starts with a double underscore ("__"). They are only visible in the Glob
+// response if the double underscore is explicitly part of the pattern, e.g.
+// "".Glob("__*/*"), or "".Glob("__debug/...").
+//
+// Service objects may choose to implement either AllGlobber or ChildrenGlobber.
+// AllGlobber is more flexible, but ChildrenGlobber is simpler to implement and
+// less prone to errors.
+//
+// If objects implement AllGlobber, it must be able to handle recursive pattern
+// for the entire namespace below the receiver object, i.e. "a/b".Glob("...")
+// must return the name of all the objects under "a/b".
+//
+// If they implement ChildrenGlobber, it provides a list of the receiver's
+// immediate children names, or a non-nil error if the receiver doesn't exist.
+//
+// globInternal constructs the Glob response by internally accessing the
+// AllGlobber or ChildrenGlobber interface of objects as many times as needed.
+//
+// Before accessing an object, globInternal ensures that the requester is
+// authorized to access it. Internal objects require access.Debug. Service
+// objects require access.Resolve.
+type globInternal struct {
+ dispNormal ipc.Dispatcher
+ dispReserved ipc.Dispatcher
+ receiver string
+}
+
+// The maximum depth of recursion in Glob. We only count recursion levels
+// associated with a recursive glob pattern, e.g. a pattern like "..." will be
+// allowed to recurse up to 10 levels, but "*/*/*/*/..." will go up to 14
+// levels.
+const maxRecursiveGlobDepth = 10
+
+func (i *globInternal) Glob(call *mutableCall, pattern string) error {
+ vlog.VI(3).Infof("ipc Glob: Incoming request: %q.Glob(%q)", i.receiver, pattern)
+ g, err := glob.Parse(pattern)
+ if err != nil {
+ return err
+ }
+ disp := i.dispNormal
+ call.M.Method = ipc.GlobMethod
+ call.M.MethodTags = []*vdl.Value{vdl.ValueOf(access.Resolve)}
+ if naming.IsReserved(i.receiver) || (i.receiver == "" && naming.IsReserved(pattern)) {
+ disp = i.dispReserved
+ call.M.MethodTags = []*vdl.Value{vdl.ValueOf(access.Debug)}
+ }
+ if disp == nil {
+ return ipc.NewErrGlobNotImplemented(call.Context(), i.receiver)
+ }
+
+ type gState struct {
+ name string
+ glob *glob.Glob
+ depth int
+ }
+ queue := []gState{gState{glob: g}}
+
+ for len(queue) != 0 {
+ select {
+ case <-call.Done():
+ // RPC timed out or was canceled.
+ return nil
+ default:
+ }
+ state := queue[0]
+ queue = queue[1:]
+
+ call.M.Suffix = naming.Join(i.receiver, state.name)
+ if state.depth > maxRecursiveGlobDepth {
+ vlog.Errorf("ipc Glob: exceeded recursion limit (%d): %q", maxRecursiveGlobDepth, call.Suffix())
+ continue
+ }
+ obj, auth, err := disp.Lookup(call.Suffix())
+ if err != nil {
+ vlog.VI(3).Infof("ipc Glob: Lookup failed for %q: %v", call.Suffix(), err)
+ continue
+ }
+ if obj == nil {
+ vlog.VI(3).Infof("ipc Glob: object not found for %q", call.Suffix())
+ continue
+ }
+
+ // Verify that that requester is authorized for the current object.
+ if err := authorize(call, auth); err != nil {
+ vlog.VI(3).Infof("ipc Glob: client is not authorized for %q: %v", call.Suffix(), err)
+ continue
+ }
+
+ // If the object implements both AllGlobber and ChildrenGlobber, we'll
+ // use AllGlobber.
+ invoker, err := objectToInvoker(obj)
+ if err != nil {
+ vlog.VI(3).Infof("ipc Glob: object for %q cannot be converted to invoker: %v", call.Suffix(), err)
+ continue
+ }
+ gs := invoker.Globber()
+ if gs == nil || (gs.AllGlobber == nil && gs.ChildrenGlobber == nil) {
+ if state.glob.Len() == 0 {
+ call.Send(naming.VDLGlobReplyEntry{naming.VDLMountEntry{Name: state.name}})
+ }
+ continue
+ }
+ if gs.AllGlobber != nil {
+ vlog.VI(3).Infof("ipc Glob: %q implements AllGlobber", call.Suffix())
+ ch, err := gs.AllGlobber.Glob__(call, state.glob.String())
+ if err != nil {
+ vlog.VI(3).Infof("ipc Glob: %q.Glob(%q) failed: %v", call.Suffix(), state.glob, err)
+ continue
+ }
+ if ch == nil {
+ continue
+ }
+ for gr := range ch {
+ switch v := gr.(type) {
+ case naming.VDLGlobReplyEntry:
+ v.Value.Name = naming.Join(state.name, v.Value.Name)
+ call.Send(v)
+ case naming.VDLGlobReplyError:
+ v.Value.Name = naming.Join(state.name, v.Value.Name)
+ call.Send(v)
+ }
+ }
+ continue
+ }
+ vlog.VI(3).Infof("ipc Glob: %q implements ChildrenGlobber", call.Suffix())
+ children, err := gs.ChildrenGlobber.GlobChildren__(call)
+ // The requested object doesn't exist.
+ if err != nil {
+ continue
+ }
+ // The glob pattern matches the current object.
+ if state.glob.Len() == 0 {
+ call.Send(naming.VDLGlobReplyEntry{naming.VDLMountEntry{Name: state.name}})
+ }
+ // The current object has no children.
+ if children == nil {
+ continue
+ }
+ depth := state.depth
+ // This is a recursive pattern. Make sure we don't recurse forever.
+ if state.glob.Len() == 0 {
+ depth++
+ }
+ for child := range children {
+ if len(child) == 0 || strings.Contains(child, "/") {
+ vlog.Errorf("ipc Glob: %q.GlobChildren__() sent an invalid child name: %q", call.Suffix(), child)
+ continue
+ }
+ if ok, _, left := state.glob.MatchInitialSegment(child); ok {
+ next := naming.Join(state.name, child)
+ queue = append(queue, gState{next, left, depth})
+ }
+ }
+ }
+ return nil
+}
+
+// copyMutableCall returns a new mutableCall copied from call. Changes to the
+// original call don't affect the mutable fields in the returned object.
+func copyMutableCall(call ipc.StreamServerCall) *mutableCall {
+ return &mutableCall{Stream: call, mutableContext: copyMutableContext(call)}
+}
+
+// copyMutableContext returns a new mutableContext copied from ctx. Changes to
+// the original ctx don't affect the mutable fields in the returned object.
+func copyMutableContext(ctx ipc.ServerCall) *mutableContext {
+ c := &mutableContext{T: ctx.Context()}
+ c.M.CallParams.Copy(ctx)
+ c.M.GrantedBlessings = ctx.GrantedBlessings()
+ c.M.Server = ctx.Server()
+ return c
+}
+
+// mutableCall provides a mutable implementation of ipc.StreamServerCall, useful for
+// our various special-cased reserved methods.
+type mutableCall struct {
+ ipc.Stream
+ *mutableContext
+}
+
+// mutableContext is like mutableCall but only provides the context portion.
+type mutableContext struct {
+ *context.T
+ M struct {
+ security.CallParams
+ GrantedBlessings security.Blessings
+ Server ipc.Server
+ }
+}
+
+func (c *mutableContext) Context() *context.T { return c.T }
+func (c *mutableContext) Timestamp() time.Time { return c.M.Timestamp }
+func (c *mutableContext) Method() string { return c.M.Method }
+func (c *mutableContext) MethodTags() []*vdl.Value { return c.M.MethodTags }
+func (c *mutableContext) Name() string { return c.M.Suffix }
+func (c *mutableContext) Suffix() string { return c.M.Suffix }
+func (c *mutableContext) LocalPrincipal() security.Principal { return c.M.LocalPrincipal }
+func (c *mutableContext) LocalBlessings() security.Blessings { return c.M.LocalBlessings }
+func (c *mutableContext) RemoteBlessings() security.Blessings { return c.M.RemoteBlessings }
+func (c *mutableContext) LocalEndpoint() naming.Endpoint { return c.M.LocalEndpoint }
+func (c *mutableContext) RemoteEndpoint() naming.Endpoint { return c.M.RemoteEndpoint }
+func (c *mutableContext) RemoteDischarges() map[string]security.Discharge { return c.M.RemoteDischarges }
+func (c *mutableContext) GrantedBlessings() security.Blessings { return c.M.GrantedBlessings }
+func (c *mutableContext) Server() ipc.Server { return c.M.Server }
+func (c *mutableContext) VanadiumContext() *context.T { return c.T }
diff --git a/profiles/internal/ipc/resolve_internal_test.go b/profiles/internal/ipc/resolve_internal_test.go
new file mode 100644
index 0000000..637e4c6
--- /dev/null
+++ b/profiles/internal/ipc/resolve_internal_test.go
@@ -0,0 +1,9 @@
+package ipc
+
+import (
+ "v.io/v23/ipc"
+)
+
+func InternalServerResolveToEndpoint(s ipc.Server, name string) (string, error) {
+ return s.(*server).resolveToEndpoint(name)
+}
diff --git a/profiles/internal/ipc/resolve_test.go b/profiles/internal/ipc/resolve_test.go
new file mode 100644
index 0000000..9c3dd8a
--- /dev/null
+++ b/profiles/internal/ipc/resolve_test.go
@@ -0,0 +1,82 @@
+package ipc_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/naming"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/modules/core"
+ "v.io/x/ref/lib/testutil"
+ iipc "v.io/x/ref/profiles/internal/ipc"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+func startMT(t *testing.T, sh *modules.Shell) string {
+ h, err := sh.Start(core.RootMTCommand, nil, "--veyron.tcp.address=127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unexpected error for root mt: %s", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.ExpectVar("PID")
+ return s.ExpectVar("MT_NAME")
+}
+
+func TestResolveToEndpoint(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("modules.NewShell failed: %s", err)
+ }
+ defer sh.Cleanup(nil, nil)
+ root := startMT(t, sh)
+
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ ns := v23.GetNamespace(ctx)
+ ns.SetRoots(root)
+
+ proxyEp, _ := inaming.NewEndpoint("proxy.v.io:123")
+ proxyEpStr := proxyEp.String()
+ proxyAddr := naming.JoinAddressName(proxyEpStr, "")
+ if err := ns.Mount(ctx, "proxy", proxyAddr, time.Hour); err != nil {
+ t.Fatalf("ns.Mount failed: %s", err)
+ }
+
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ t.Fatalf("runtime.NewServer failed: %s", err)
+ }
+
+ notfound := fmt.Errorf("not found")
+ testcases := []struct {
+ address string
+ result string
+ err error
+ }{
+ {"/proxy.v.io:123", proxyEpStr, nil},
+ {"proxy.v.io:123", "", notfound},
+ {"proxy", proxyEpStr, nil},
+ {naming.JoinAddressName(root, "proxy"), proxyEpStr, nil},
+ {proxyAddr, proxyEpStr, nil},
+ {proxyEpStr, "", notfound},
+ {"unknown", "", notfound},
+ }
+ for _, tc := range testcases {
+ result, err := iipc.InternalServerResolveToEndpoint(server, tc.address)
+ if (err == nil) != (tc.err == nil) {
+ t.Errorf("Unexpected err for %q. Got %v, expected %v", tc.address, err, tc.err)
+ }
+ if result != tc.result {
+ t.Errorf("Unexpected result for %q. Got %q, expected %q", tc.address, result, tc.result)
+ }
+ }
+ if t.Failed() {
+ t.Logf("proxyEpStr: %v", proxyEpStr)
+ t.Logf("proxyAddr: %v", proxyAddr)
+ }
+}
diff --git a/profiles/internal/ipc/results_store.go b/profiles/internal/ipc/results_store.go
new file mode 100644
index 0000000..d029306
--- /dev/null
+++ b/profiles/internal/ipc/results_store.go
@@ -0,0 +1,122 @@
+package ipc
+
+import (
+ "container/heap"
+ "sync"
+)
+
+const (
+ // TODO(cnicolaou): what are good initial values? Large servers want
+ // large values, most won't.
+ initialResults = 1000
+ initialOutOfOrderResults = 100
+)
+
+type results []interface{}
+
+// Implement heap.Interface to maintain an ordered min-heap of uint64s.
+type intHeap []uint64
+
+func (h intHeap) Len() int { return len(h) }
+func (h intHeap) Less(i, j int) bool { return h[i] < h[j] }
+func (h intHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+
+func (h *intHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ *h = append(*h, x.(uint64))
+}
+
+func (h *intHeap) Pop() interface{} {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+// resultStore is used to store the results of previously exited RPCs
+// until the client indicates that it has received those results and hence
+// the server no longer needs to store them. Store entries are added
+// one at a time, but the client indicates that it has received entries up to
+// given value and that all entries with key lower than that can be deleted.
+// Retrieving values is complicated by the fact that requests may arrive
+// out of order and hence one RPC may have to wait for another to complete
+// in order to access its stored results. A separate map of channels is
+// used to implement this synchronization.
+// TODO(cnicolaou): Servers protect themselves from badly behaved clients by
+// refusing to allocate beyond a certain number of results.
+type resultsStore struct {
+ sync.Mutex
+ store map[uint64]results
+ chans map[uint64]chan struct{}
+ keys intHeap
+ // TODO(cnicolaou): Should addEntry/waitForEntry return an error when
+ // the calls do not match the frontier?
+ frontier uint64 // results with index less than this have been removed.
+}
+
+func newStore() *resultsStore {
+ r := &resultsStore{
+ store: make(map[uint64]results, initialResults),
+ chans: make(map[uint64]chan struct{}, initialOutOfOrderResults),
+ }
+ heap.Init(&r.keys)
+ return r
+}
+
+func (rs *resultsStore) addEntry(key uint64, data results) {
+ rs.Lock()
+ if _, present := rs.store[key]; !present && rs.frontier <= key {
+ rs.store[key] = data
+ heap.Push(&rs.keys, key)
+ }
+ if ch, present := rs.chans[key]; present {
+ close(ch)
+ delete(rs.chans, key)
+ }
+ rs.Unlock()
+}
+
+func (rs *resultsStore) removeEntriesTo(to uint64) {
+ rs.Lock()
+ if rs.frontier > to {
+ rs.Unlock()
+ return
+ }
+ rs.frontier = to + 1
+ for rs.keys.Len() > 0 && to >= rs.keys[0] {
+ k := heap.Pop(&rs.keys).(uint64)
+ delete(rs.store, k)
+ if ch, present := rs.chans[k]; present {
+ close(ch)
+ delete(rs.chans, k)
+ }
+ }
+ rs.Unlock()
+}
+
+func (rs *resultsStore) waitForEntry(key uint64) results {
+ rs.Lock()
+ if r, present := rs.store[key]; present {
+ rs.Unlock()
+ return r
+ }
+ if key < rs.frontier {
+ rs.Unlock()
+ return nil
+ }
+ // entry is not present, need to wait for it.
+ ch, present := rs.chans[key]
+ if !present {
+ heap.Push(&rs.keys, key)
+ ch = make(chan struct{}, 1)
+ rs.chans[key] = ch
+ }
+ rs.Unlock()
+ <-ch
+ rs.Lock()
+ defer rs.Unlock()
+ delete(rs.chans, key) // Allow the channel to be GC'ed.
+ return rs.store[key] // This may be nil if the entry has been removed
+}
diff --git a/profiles/internal/ipc/results_store_test.go b/profiles/internal/ipc/results_store_test.go
new file mode 100644
index 0000000..6af36b7
--- /dev/null
+++ b/profiles/internal/ipc/results_store_test.go
@@ -0,0 +1,140 @@
+package ipc
+
+import (
+ "sort"
+ "sync"
+ "testing"
+
+ "v.io/x/ref/lib/testutil"
+)
+
+func randomKeys() []uint64 {
+ n := (testutil.Rand.Intn(256*10) / 10) + 256
+ k := make([]uint64, n)
+ for i := 0; i < n; i++ {
+ k[i] = uint64(testutil.Rand.Int63())
+ }
+ return k
+}
+
+type keySlice []uint64
+
+func (p keySlice) Len() int { return len(p) }
+func (p keySlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p keySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p keySlice) Sort() { sort.Sort(p) }
+
+func TestStoreRandom(t *testing.T) {
+ store := newStore()
+ keys := randomKeys()
+
+ for i := 0; i < len(keys); i++ {
+ r := []interface{}{i}
+ store.addEntry(keys[i], r)
+ }
+ if len(store.store) != len(keys) {
+ t.Errorf("num stored entries: got %d, want %d", len(store.store), len(keys))
+ }
+ for i := 0; i < len(keys); i++ {
+ // Each call to removeEntries will remove an unknown number of entries
+ // depending on the original randomised value of the ints.
+ store.removeEntriesTo(keys[i])
+ }
+ if len(store.store) != 0 {
+ t.Errorf("store is not empty: %d", len(store.store))
+ }
+}
+
+func TestStoreOrdered(t *testing.T) {
+ store := newStore()
+ keys := randomKeys()
+
+ for i := 0; i < len(keys); i++ {
+ r := []interface{}{i}
+ store.addEntry(keys[i], r)
+ }
+ if len(store.store) != len(keys) {
+ t.Errorf("num stored entries: got %d, want %d", len(store.store), len(keys))
+ }
+
+ (keySlice(keys)).Sort()
+ l := len(keys)
+ for i := 0; i < len(keys); i++ {
+ store.removeEntriesTo(keys[i])
+ l--
+ if len(store.store) != l {
+ t.Errorf("failed to remove a single item(%d): %d != %d", keys[i], len(store.store), l)
+ }
+ }
+ if len(store.store) != 0 {
+ t.Errorf("store is not empty: %d", len(store.store))
+ }
+}
+
+func TestStoreWaitForEntry(t *testing.T) {
+ store := newStore()
+ store.addEntry(1, []interface{}{"1"})
+ r := store.waitForEntry(1)
+ if r[0].(string) != "1" {
+ t.Errorf("Got: %q, Want: %q", r[0], "1")
+ }
+ ch := make(chan string)
+ go func(ch chan string) {
+ r := store.waitForEntry(2)
+ ch <- r[0].(string)
+ }(ch)
+ store.addEntry(2, []interface{}{"2"})
+ if result := <-ch; result != "2" {
+ t.Errorf("Got: %q, Want: %q", r[0], "2")
+ }
+}
+
+func TestStoreWaitForEntryRandom(t *testing.T) {
+ store := newStore()
+ keys := randomKeys()
+ var wg sync.WaitGroup
+ for _, k := range keys {
+ wg.Add(1)
+ go func(t *testing.T, id uint64) {
+ r := store.waitForEntry(id)
+ if r[0].(uint64) != id {
+ t.Errorf("Got: %d, Want: %d", r[0].(uint64), id)
+ }
+ wg.Done()
+ }(t, k)
+ }
+ (keySlice(keys)).Sort()
+ for _, k := range keys {
+ store.addEntry(k, []interface{}{k})
+ }
+ wg.Wait()
+}
+
+func TestStoreWaitForRemovedEntry(t *testing.T) {
+ store := newStore()
+ keys := randomKeys()
+ var wg sync.WaitGroup
+ for _, k := range keys {
+ wg.Add(1)
+ go func(t *testing.T, id uint64) {
+ if r := store.waitForEntry(id); r != nil {
+ t.Errorf("Got %v, want nil", r)
+ }
+ wg.Done()
+ }(t, k)
+ }
+ (keySlice(keys)).Sort()
+ for _, k := range keys {
+ store.removeEntriesTo(k)
+ }
+ wg.Wait()
+}
+
+func TestStoreWaitForOldEntry(t *testing.T) {
+ store := newStore()
+ store.addEntry(1, []interface{}{"result"})
+ store.removeEntriesTo(1)
+ if got := store.waitForEntry(1); got != nil {
+ t.Errorf("Got %T=%v, want nil", got, got)
+ }
+}
diff --git a/profiles/internal/ipc/server.go b/profiles/internal/ipc/server.go
new file mode 100644
index 0000000..06af75b
--- /dev/null
+++ b/profiles/internal/ipc/server.go
@@ -0,0 +1,1289 @@
+package ipc
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "v.io/v23/config"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/services/security/access"
+ "v.io/v23/vdl"
+ "v.io/v23/verror"
+ "v.io/v23/vom"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+
+ "v.io/x/ref/lib/netstate"
+ "v.io/x/ref/lib/publisher"
+ "v.io/x/ref/lib/stats"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ ivtrace "v.io/x/ref/profiles/internal/vtrace"
+
+ // TODO(cnicolaou): finish verror2 -> verror transition, in particular
+ // for communicating from server to client.
+)
+
+// state for each requested listen address
+type listenState struct {
+ protocol, address string
+ ln stream.Listener
+ lep naming.Endpoint
+ lnerr, eperr error
+ roaming bool
+ // We keep track of all of the endpoints, the port and a copy of
+ // the original listen endpoint for use with roaming network changes.
+ ieps []*inaming.Endpoint // list of currently active eps
+ port string // port to use for creating new eps
+ protoIEP inaming.Endpoint // endpoint to use as template for new eps (includes rid, versions etc)
+}
+
+// state for each requested proxy
+type proxyState struct {
+ endpoint naming.Endpoint
+ err error
+}
+
+type dhcpState struct {
+ name string
+ publisher *config.Publisher
+ stream *config.Stream
+ ch chan config.Setting // channel to receive dhcp settings over
+ err error // error status.
+ watchers map[chan<- ipc.NetworkChange]struct{}
+}
+
+type server struct {
+ sync.Mutex
+ // context used by the server to make internal RPCs, error messages etc.
+ ctx *context.T
+ cancel context.CancelFunc // function to cancel the above context.
+ state serverState // track state of the server.
+ streamMgr stream.Manager // stream manager to listen for new flows.
+ publisher publisher.Publisher // publisher to publish mounttable mounts.
+ listenerOpts []stream.ListenerOpt // listener opts for Listen.
+ dhcpState *dhcpState // dhcpState, nil if not using dhcp
+
+ // maps that contain state on listeners.
+ listenState map[*listenState]struct{}
+ listeners map[stream.Listener]struct{}
+
+ // state of proxies keyed by the name of the proxy
+ proxies map[string]proxyState
+
+ // all endpoints generated and returned by this server
+ endpoints []naming.Endpoint
+
+ disp ipc.Dispatcher // dispatcher to serve RPCs
+ dispReserved ipc.Dispatcher // dispatcher for reserved methods
+ active sync.WaitGroup // active goroutines we've spawned.
+ stoppedChan chan struct{} // closed when the server has been stopped.
+ preferredProtocols []string // protocols to use when resolving proxy name to endpoint.
+ // We cache the IP networks on the device since it is not that cheap to read
+ // network interfaces through os syscall.
+ // TODO(jhahn): Add monitoring the network interface changes.
+ ipNets []*net.IPNet
+ ns ns.Namespace
+ servesMountTable bool
+
+ // TODO(cnicolaou): add roaming stats to ipcStats
+ stats *ipcStats // stats for this server.
+}
+
+type serverState int
+
+const (
+ initialized serverState = iota
+ listening
+ serving
+ publishing
+ stopping
+ stopped
+)
+
+// Simple state machine for the server implementation.
+type next map[serverState]bool
+type transitions map[serverState]next
+
+var (
+ states = transitions{
+ initialized: next{listening: true, stopping: true},
+ listening: next{listening: true, serving: true, stopping: true},
+ serving: next{publishing: true, stopping: true},
+ publishing: next{publishing: true, stopping: true},
+ stopping: next{},
+ stopped: next{},
+ }
+
+ externalStates = map[serverState]ipc.ServerState{
+ initialized: ipc.ServerInit,
+ listening: ipc.ServerActive,
+ serving: ipc.ServerActive,
+ publishing: ipc.ServerActive,
+ stopping: ipc.ServerStopping,
+ stopped: ipc.ServerStopped,
+ }
+)
+
+func (s *server) allowed(next serverState, method string) error {
+ if states[s.state][next] {
+ s.state = next
+ return nil
+ }
+ return verror.New(verror.ErrBadState, s.ctx, fmt.Sprintf("%s called out of order or more than once", method))
+}
+
+func (s *server) isStopState() bool {
+ return s.state == stopping || s.state == stopped
+}
+
+var _ ipc.Server = (*server)(nil)
+
+func InternalNewServer(ctx *context.T, streamMgr stream.Manager, ns ns.Namespace, client ipc.Client, opts ...ipc.ServerOpt) (ipc.Server, error) {
+ ctx, cancel := context.WithRootCancel(ctx)
+ ctx, _ = vtrace.SetNewSpan(ctx, "NewServer")
+ statsPrefix := naming.Join("ipc", "server", "routing-id", streamMgr.RoutingID().String())
+ s := &server{
+
+ ctx: ctx,
+ cancel: cancel,
+ streamMgr: streamMgr,
+ publisher: publisher.New(ctx, ns, publishPeriod),
+ listenState: make(map[*listenState]struct{}),
+ listeners: make(map[stream.Listener]struct{}),
+ proxies: make(map[string]proxyState),
+ stoppedChan: make(chan struct{}),
+ ipNets: ipNetworks(),
+ ns: ns,
+ stats: newIPCStats(statsPrefix),
+ }
+ var (
+ principal security.Principal
+ blessings security.Blessings
+ )
+ for _, opt := range opts {
+ switch opt := opt.(type) {
+ case stream.ListenerOpt:
+ // Collect all ServerOpts that are also ListenerOpts.
+ s.listenerOpts = append(s.listenerOpts, opt)
+ switch opt := opt.(type) {
+ case vc.LocalPrincipal:
+ principal = opt.Principal
+ case options.ServerBlessings:
+ blessings = opt.Blessings
+ }
+ case options.ServesMountTable:
+ s.servesMountTable = bool(opt)
+ case ReservedNameDispatcher:
+ s.dispReserved = opt.Dispatcher
+ case PreferredServerResolveProtocols:
+ s.preferredProtocols = []string(opt)
+ }
+ }
+ dc := InternalNewDischargeClient(ctx, client)
+ s.listenerOpts = append(s.listenerOpts, dc)
+ s.listenerOpts = append(s.listenerOpts, vc.DialContext{ctx})
+ blessingsStatsName := naming.Join(statsPrefix, "security", "blessings")
+ // TODO(caprita): revist printing the blessings with %s, and
+ // instead expose them as a list.
+ stats.NewString(blessingsStatsName).Set(fmt.Sprintf("%s", blessings))
+ if principal != nil { // principal should have been passed in, but just in case.
+ stats.NewStringFunc(blessingsStatsName, func() string {
+ return fmt.Sprintf("%s (default)", principal.BlessingStore().Default())
+ })
+ }
+ return s, nil
+}
+
+func (s *server) Status() ipc.ServerStatus {
+ status := ipc.ServerStatus{}
+ defer vlog.LogCall()()
+ s.Lock()
+ defer s.Unlock()
+ status.State = externalStates[s.state]
+ status.ServesMountTable = s.servesMountTable
+ status.Mounts = s.publisher.Status()
+ status.Endpoints = []naming.Endpoint{}
+ for ls, _ := range s.listenState {
+ if ls.eperr != nil {
+ status.Errors = append(status.Errors, ls.eperr)
+ }
+ if ls.lnerr != nil {
+ status.Errors = append(status.Errors, ls.lnerr)
+ }
+ for _, iep := range ls.ieps {
+ status.Endpoints = append(status.Endpoints, iep)
+ }
+ }
+ status.Proxies = make([]ipc.ProxyStatus, 0, len(s.proxies))
+ for k, v := range s.proxies {
+ status.Proxies = append(status.Proxies, ipc.ProxyStatus{k, v.endpoint, v.err})
+ }
+ return status
+}
+
+func (s *server) WatchNetwork(ch chan<- ipc.NetworkChange) {
+ defer vlog.LogCall()()
+ s.Lock()
+ defer s.Unlock()
+ if s.dhcpState != nil {
+ s.dhcpState.watchers[ch] = struct{}{}
+ }
+}
+
+func (s *server) UnwatchNetwork(ch chan<- ipc.NetworkChange) {
+ defer vlog.LogCall()()
+ s.Lock()
+ defer s.Unlock()
+ if s.dhcpState != nil {
+ delete(s.dhcpState.watchers, ch)
+ }
+}
+
+// resolveToEndpoint resolves an object name or address to an endpoint.
+func (s *server) resolveToEndpoint(address string) (string, error) {
+ var resolved *naming.MountEntry
+ var err error
+ if s.ns != nil {
+ if resolved, err = s.ns.Resolve(s.ctx, address); err != nil {
+ return "", err
+ }
+ } else {
+ // Fake a namespace resolution
+ resolved = &naming.MountEntry{Servers: []naming.MountedServer{
+ {Server: address},
+ }}
+ }
+ // An empty set of protocols means all protocols...
+ if resolved.Servers, err = filterAndOrderServers(resolved.Servers, s.preferredProtocols, s.ipNets); err != nil {
+ return "", err
+ }
+ for _, n := range resolved.Names() {
+ address, suffix := naming.SplitAddressName(n)
+ if suffix != "" {
+ continue
+ }
+ if ep, err := inaming.NewEndpoint(address); err == nil {
+ return ep.String(), nil
+ }
+ }
+ return "", fmt.Errorf("unable to resolve %q to an endpoint", address)
+}
+
+// getPossbileAddrs returns an appropriate set of addresses that could be used
+// to contact the supplied protocol, host, port parameters using the supplied
+// chooser function. It returns an indication of whether the supplied address
+// was fully specified or not, returning false if the address was fully
+// specified, and true if it was not.
+func getPossibleAddrs(protocol, host, port string, chooser ipc.AddressChooser) ([]ipc.Address, bool, error) {
+
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return nil, false, fmt.Errorf("failed to parse %q as an IP host", host)
+ }
+
+ addrFromIP := func(ip net.IP) ipc.Address {
+ return &netstate.AddrIfc{
+ Addr: &net.IPAddr{IP: ip},
+ }
+ }
+
+ if ip.IsUnspecified() {
+ if chooser != nil {
+ // Need to find a usable IP address since the call to listen
+ // didn't specify one.
+ if addrs, err := netstate.GetAccessibleIPs(); err == nil {
+ a, err := chooser(protocol, addrs)
+ if err == nil && len(a) > 0 {
+ return a, true, nil
+ }
+ }
+ }
+ // We don't have a chooser, so we just return the address the
+ // underlying system has chosen.
+ return []ipc.Address{addrFromIP(ip)}, true, nil
+ }
+ return []ipc.Address{addrFromIP(ip)}, false, nil
+}
+
+// createEndpoints creates appropriate inaming.Endpoint instances for
+// all of the externally accessible network addresses that can be used
+// to reach this server.
+func (s *server) createEndpoints(lep naming.Endpoint, chooser ipc.AddressChooser) ([]*inaming.Endpoint, string, bool, error) {
+ iep, ok := lep.(*inaming.Endpoint)
+ if !ok {
+ return nil, "", false, fmt.Errorf("internal type conversion error for %T", lep)
+ }
+ if !strings.HasPrefix(iep.Protocol, "tcp") &&
+ !strings.HasPrefix(iep.Protocol, "ws") {
+ // If not tcp, ws, or wsh, just return the endpoint we were given.
+ return []*inaming.Endpoint{iep}, "", false, nil
+ }
+
+ host, port, err := net.SplitHostPort(iep.Address)
+ if err != nil {
+ return nil, "", false, err
+ }
+ addrs, unspecified, err := getPossibleAddrs(iep.Protocol, host, port, chooser)
+ if err != nil {
+ return nil, port, false, err
+ }
+ ieps := make([]*inaming.Endpoint, 0, len(addrs))
+ for _, addr := range addrs {
+ n, err := inaming.NewEndpoint(lep.String())
+ if err != nil {
+ return nil, port, false, err
+ }
+ n.IsMountTable = s.servesMountTable
+ n.Address = net.JoinHostPort(addr.Address().String(), port)
+ ieps = append(ieps, n)
+ }
+ return ieps, port, unspecified, nil
+}
+
+func (s *server) Listen(listenSpec ipc.ListenSpec) ([]naming.Endpoint, error) {
+ defer vlog.LogCall()()
+ useProxy := len(listenSpec.Proxy) > 0
+ if !useProxy && len(listenSpec.Addrs) == 0 {
+ return nil, verror.New(verror.ErrBadArg, s.ctx, "ListenSpec contains no proxy or addresses to listen on")
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ if err := s.allowed(listening, "Listen"); err != nil {
+ return nil, err
+ }
+
+ // Start the proxy as early as possible, ignore duplicate requests
+ // for the same proxy.
+ if _, inuse := s.proxies[listenSpec.Proxy]; useProxy && !inuse {
+ // We have a goroutine for listening on proxy connections.
+ s.active.Add(1)
+ go func() {
+ s.proxyListenLoop(listenSpec.Proxy)
+ s.active.Done()
+ }()
+ }
+
+ roaming := false
+ lnState := make([]*listenState, 0, len(listenSpec.Addrs))
+ for _, addr := range listenSpec.Addrs {
+ if len(addr.Address) > 0 {
+ // Listen if we have a local address to listen on.
+ ls := &listenState{
+ protocol: addr.Protocol,
+ address: addr.Address,
+ }
+ ls.ln, ls.lep, ls.lnerr = s.streamMgr.Listen(addr.Protocol, addr.Address, s.listenerOpts...)
+ lnState = append(lnState, ls)
+ if ls.lnerr != nil {
+ continue
+ }
+ ls.ieps, ls.port, ls.roaming, ls.eperr = s.createEndpoints(ls.lep, listenSpec.AddressChooser)
+ if ls.roaming && ls.eperr == nil {
+ ls.protoIEP = *ls.lep.(*inaming.Endpoint)
+ roaming = true
+ }
+ }
+ }
+
+ found := false
+ for _, ls := range lnState {
+ if ls.ln != nil {
+ found = true
+ break
+ }
+ }
+ if !found && !useProxy {
+ return nil, verror.New(verror.ErrBadArg, s.ctx, "failed to create any listeners")
+ }
+
+ if roaming && s.dhcpState == nil && listenSpec.StreamPublisher != nil {
+ // Create a dhcp listener if we haven't already done so.
+ dhcp := &dhcpState{
+ name: listenSpec.StreamName,
+ publisher: listenSpec.StreamPublisher,
+ watchers: make(map[chan<- ipc.NetworkChange]struct{}),
+ }
+ s.dhcpState = dhcp
+ dhcp.ch = make(chan config.Setting, 10)
+ dhcp.stream, dhcp.err = dhcp.publisher.ForkStream(dhcp.name, dhcp.ch)
+ if dhcp.err == nil {
+ // We have a goroutine to listen for dhcp changes.
+ s.active.Add(1)
+ go func() {
+ s.dhcpLoop(dhcp.ch)
+ s.active.Done()
+ }()
+ }
+ }
+
+ eps := make([]naming.Endpoint, 0, 10)
+ for _, ls := range lnState {
+ s.listenState[ls] = struct{}{}
+ if ls.ln != nil {
+ // We have a goroutine per listener to accept new flows.
+ // Each flow is served from its own goroutine.
+ s.active.Add(1)
+ go func(ln stream.Listener, ep naming.Endpoint) {
+ s.listenLoop(ln, ep)
+ s.active.Done()
+ }(ls.ln, ls.lep)
+ }
+
+ for _, iep := range ls.ieps {
+ s.publisher.AddServer(iep.String(), s.servesMountTable)
+ eps = append(eps, iep)
+ }
+ }
+
+ return eps, nil
+}
+
+func (s *server) reconnectAndPublishProxy(proxy string) (*inaming.Endpoint, stream.Listener, error) {
+ resolved, err := s.resolveToEndpoint(proxy)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to resolve proxy %q (%v)", proxy, err)
+ }
+ ln, ep, err := s.streamMgr.Listen(inaming.Network, resolved, s.listenerOpts...)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to listen on %q: %s", resolved, err)
+ }
+ iep, ok := ep.(*inaming.Endpoint)
+ if !ok {
+ ln.Close()
+ return nil, nil, fmt.Errorf("internal type conversion error for %T", ep)
+ }
+ s.Lock()
+ s.proxies[proxy] = proxyState{iep, nil}
+ s.Unlock()
+ iep.IsMountTable = s.servesMountTable
+ s.publisher.AddServer(iep.String(), s.servesMountTable)
+ return iep, ln, nil
+}
+
+func (s *server) proxyListenLoop(proxy string) {
+ const (
+ min = 5 * time.Millisecond
+ max = 5 * time.Minute
+ )
+
+ iep, ln, err := s.reconnectAndPublishProxy(proxy)
+ if err != nil {
+ vlog.VI(1).Infof("Failed to connect to proxy: %s", err)
+ }
+ // the initial connection maybe have failed, but we enter the retry
+ // loop anyway so that we will continue to try and connect to the
+ // proxy.
+ s.Lock()
+ if s.isStopState() {
+ s.Unlock()
+ return
+ }
+ s.Unlock()
+
+ for {
+ if ln != nil && iep != nil {
+ err := s.listenLoop(ln, iep)
+ // The listener is done, so:
+ // (1) Unpublish its name
+ s.publisher.RemoveServer(iep.String())
+ s.Lock()
+ if err != nil {
+ s.proxies[proxy] = proxyState{iep, verror.New(verror.ErrNoServers, s.ctx, err)}
+ } else {
+ // err will be nill if we're stopping.
+ s.proxies[proxy] = proxyState{iep, nil}
+ s.Unlock()
+ return
+ }
+ s.Unlock()
+ }
+
+ s.Lock()
+ if s.isStopState() {
+ s.Unlock()
+ return
+ }
+ s.Unlock()
+
+ // (2) Reconnect to the proxy unless the server has been stopped
+ backoff := min
+ ln = nil
+ for {
+ select {
+ case <-time.After(backoff):
+ if backoff = backoff * 2; backoff > max {
+ backoff = max
+ }
+ case <-s.stoppedChan:
+ return
+ }
+ // (3) reconnect, publish new address
+ if iep, ln, err = s.reconnectAndPublishProxy(proxy); err != nil {
+ vlog.VI(1).Infof("Failed to reconnect to proxy %q: %s", proxy, err)
+ } else {
+ vlog.VI(1).Infof("Reconnected to proxy %q, %s", proxy, iep)
+ break
+ }
+ }
+ }
+}
+
+// addListener adds the supplied listener taking care to
+// check to see if we're already stopping. It returns true
+// if the listener was added.
+func (s *server) addListener(ln stream.Listener) bool {
+ s.Lock()
+ defer s.Unlock()
+ if s.isStopState() {
+ return false
+ }
+ s.listeners[ln] = struct{}{}
+ return true
+}
+
+// rmListener removes the supplied listener taking care to
+// check if we're already stopping. It returns true if the
+// listener was removed.
+func (s *server) rmListener(ln stream.Listener) bool {
+ s.Lock()
+ defer s.Unlock()
+ if s.isStopState() {
+ return false
+ }
+ delete(s.listeners, ln)
+ return true
+}
+
+func (s *server) listenLoop(ln stream.Listener, ep naming.Endpoint) error {
+ defer vlog.VI(1).Infof("ipc: Stopped listening on %s", ep)
+ var calls sync.WaitGroup
+
+ if !s.addListener(ln) {
+ // We're stopping.
+ return nil
+ }
+
+ defer func() {
+ calls.Wait()
+ s.rmListener(ln)
+ }()
+ for {
+ flow, err := ln.Accept()
+ if err != nil {
+ vlog.VI(10).Infof("ipc: Accept on %v failed: %v", ep, err)
+ return err
+ }
+ calls.Add(1)
+ go func(flow stream.Flow) {
+ defer calls.Done()
+ fs, err := newFlowServer(flow, s)
+ if err != nil {
+ vlog.Errorf("newFlowServer on %v failed: %v", ep, err)
+ return
+ }
+ if err := fs.serve(); err != nil {
+ // TODO(caprita): Logging errors here is too spammy. For example, "not
+ // authorized" errors shouldn't be logged as server errors.
+ // TODO(cnicolaou): revisit this when verror2 transition is
+ // done.
+ if err != io.EOF {
+ vlog.VI(2).Infof("Flow serve on %v failed: %v", ep, err)
+ }
+ }
+ }(flow)
+ }
+}
+
+func (s *server) dhcpLoop(ch chan config.Setting) {
+ defer vlog.VI(1).Infof("ipc: Stopped listen for dhcp changes")
+ vlog.VI(2).Infof("ipc: dhcp loop")
+ for setting := range ch {
+ if setting == nil {
+ return
+ }
+ switch v := setting.Value().(type) {
+ case []ipc.Address:
+ s.Lock()
+ if s.isStopState() {
+ s.Unlock()
+ return
+ }
+ var err error
+ var changed []naming.Endpoint
+ switch setting.Name() {
+ case ipc.NewAddrsSetting:
+ changed = s.addAddresses(v)
+ case ipc.RmAddrsSetting:
+ changed, err = s.removeAddresses(v)
+ }
+ change := ipc.NetworkChange{
+ Time: time.Now(),
+ State: externalStates[s.state],
+ Setting: setting,
+ Changed: changed,
+ Error: err,
+ }
+ vlog.VI(2).Infof("ipc: dhcp: change %v", change)
+ for ch, _ := range s.dhcpState.watchers {
+ select {
+ case ch <- change:
+ default:
+ }
+ }
+ s.Unlock()
+ default:
+ vlog.Errorf("ipc: dhcpLoop: unhandled setting type %T", v)
+ }
+ }
+}
+
+func getHost(address ipc.Address) string {
+ host, _, err := net.SplitHostPort(address.Address().String())
+ if err == nil {
+ return host
+ }
+ return address.Address().String()
+
+}
+
+// Remove all endpoints that have the same host address as the supplied
+// address parameter.
+func (s *server) removeAddresses(addresses []ipc.Address) ([]naming.Endpoint, error) {
+ var removed []naming.Endpoint
+ for _, address := range addresses {
+ host := getHost(address)
+ for ls, _ := range s.listenState {
+ if ls != nil && ls.roaming && len(ls.ieps) > 0 {
+ remaining := make([]*inaming.Endpoint, 0, len(ls.ieps))
+ for _, iep := range ls.ieps {
+ lnHost, _, err := net.SplitHostPort(iep.Address)
+ if err != nil {
+ lnHost = iep.Address
+ }
+ if lnHost == host {
+ vlog.VI(2).Infof("ipc: dhcp removing: %s", iep)
+ removed = append(removed, iep)
+ s.publisher.RemoveServer(iep.String())
+ continue
+ }
+ remaining = append(remaining, iep)
+ }
+ ls.ieps = remaining
+ }
+ }
+ }
+ return removed, nil
+}
+
+// Add new endpoints for the new address. There is no way to know with
+// 100% confidence which new endpoints to publish without shutting down
+// all network connections and reinitializing everything from scratch.
+// Instead, we find all roaming listeners with at least one endpoint
+// and create a new endpoint with the same port as the existing ones
+// but with the new address supplied to us to by the dhcp code. As
+// an additional safeguard we reject the new address if it is not
+// externally accessible.
+// This places the onus on the dhcp/roaming code that sends us addresses
+// to ensure that those addresses are externally reachable.
+func (s *server) addAddresses(addresses []ipc.Address) []naming.Endpoint {
+ var added []naming.Endpoint
+ for _, address := range addresses {
+ if !netstate.IsAccessibleIP(address) {
+ return added
+ }
+ host := getHost(address)
+ for ls, _ := range s.listenState {
+ if ls != nil && ls.roaming {
+ niep := ls.protoIEP
+ niep.Address = net.JoinHostPort(host, ls.port)
+ ls.ieps = append(ls.ieps, &niep)
+ vlog.VI(2).Infof("ipc: dhcp adding: %s", niep)
+ s.publisher.AddServer(niep.String(), s.servesMountTable)
+ added = append(added, &niep)
+ }
+ }
+ }
+ return added
+}
+
+type leafDispatcher struct {
+ invoker ipc.Invoker
+ auth security.Authorizer
+}
+
+func (d leafDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ if suffix != "" {
+ return nil, nil, ipc.NewErrUnknownSuffix(nil, suffix)
+ }
+ return d.invoker, d.auth, nil
+}
+
+func (s *server) Serve(name string, obj interface{}, authorizer security.Authorizer) error {
+ defer vlog.LogCall()()
+ if obj == nil {
+ return verror.New(verror.ErrBadArg, s.ctx, "nil object")
+ }
+ invoker, err := objectToInvoker(obj)
+ if err != nil {
+ return verror.New(verror.ErrBadArg, s.ctx, fmt.Sprintf("bad object: %v", err))
+ }
+ return s.ServeDispatcher(name, &leafDispatcher{invoker, authorizer})
+}
+
+func (s *server) ServeDispatcher(name string, disp ipc.Dispatcher) error {
+ defer vlog.LogCall()()
+ if disp == nil {
+ return verror.New(verror.ErrBadArg, s.ctx, "nil dispatcher")
+ }
+ s.Lock()
+ defer s.Unlock()
+ if err := s.allowed(serving, "Serve or ServeDispatcher"); err != nil {
+ return err
+ }
+ vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
+ s.disp = disp
+ if len(name) > 0 {
+ s.publisher.AddName(name)
+ }
+ return nil
+}
+
+func (s *server) AddName(name string) error {
+ defer vlog.LogCall()()
+ if len(name) == 0 {
+ return verror.New(verror.ErrBadArg, s.ctx, "name is empty")
+ }
+ s.Lock()
+ defer s.Unlock()
+ if err := s.allowed(publishing, "AddName"); err != nil {
+ return err
+ }
+ vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
+ s.publisher.AddName(name)
+ return nil
+}
+
+func (s *server) RemoveName(name string) {
+ defer vlog.LogCall()()
+ s.Lock()
+ defer s.Unlock()
+ if err := s.allowed(publishing, "RemoveName"); err != nil {
+ return
+ }
+ vtrace.GetSpan(s.ctx).Annotate("Removed name: " + name)
+ s.publisher.RemoveName(name)
+}
+
+func (s *server) Stop() error {
+ defer vlog.LogCall()()
+ s.Lock()
+ if s.isStopState() {
+ s.Unlock()
+ return nil
+ }
+ s.state = stopping
+ close(s.stoppedChan)
+ s.Unlock()
+
+ // Delete the stats object.
+ s.stats.stop()
+
+ // Note, It's safe to Stop/WaitForStop on the publisher outside of the
+ // server lock, since publisher is safe for concurrent access.
+
+ // Stop the publisher, which triggers unmounting of published names.
+ s.publisher.Stop()
+ // Wait for the publisher to be done unmounting before we can proceed to
+ // close the listeners (to minimize the number of mounted names pointing
+ // to endpoint that are no longer serving).
+ //
+ // TODO(caprita): See if make sense to fail fast on rejecting
+ // connections once listeners are closed, and parallelize the publisher
+ // and listener shutdown.
+ s.publisher.WaitForStop()
+
+ s.Lock()
+
+ // Close all listeners. No new flows will be accepted, while in-flight
+ // flows will continue until they terminate naturally.
+ nListeners := len(s.listeners)
+ errCh := make(chan error, nListeners)
+
+ for ln, _ := range s.listeners {
+ go func(ln stream.Listener) {
+ errCh <- ln.Close()
+ }(ln)
+ }
+
+ drain := func(ch chan config.Setting) {
+ for {
+ select {
+ case v := <-ch:
+ if v == nil {
+ return
+ }
+ default:
+ close(ch)
+ return
+ }
+ }
+ }
+
+ if dhcp := s.dhcpState; dhcp != nil {
+ // TODO(cnicolaou,caprita): investigate not having to close and drain
+ // the channel here. It's a little awkward right now since we have to
+ // be careful to not close the channel in two places, i.e. here and
+ // and from the publisher's Shutdown method.
+ if err := dhcp.publisher.CloseFork(dhcp.name, dhcp.ch); err == nil {
+ drain(dhcp.ch)
+ }
+ }
+
+ s.Unlock()
+
+ var firstErr error
+ for i := 0; i < nListeners; i++ {
+ if err := <-errCh; err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ // At this point, we are guaranteed that no new requests are going to be
+ // accepted.
+
+ // Wait for the publisher and active listener + flows to finish.
+ done := make(chan struct{}, 1)
+ go func() { s.active.Wait(); done <- struct{}{} }()
+
+ select {
+ case <-done:
+ case <-time.After(5 * time.Minute):
+ vlog.Errorf("Listener Close Error: %v", firstErr)
+ vlog.Errorf("Timedout waiting for goroutines to stop: listeners: %d", nListeners, len(s.listeners))
+ for ln, _ := range s.listeners {
+ vlog.Errorf("Listener: %p", ln)
+ }
+ for ls, _ := range s.listenState {
+ vlog.Errorf("ListenState: %v", ls)
+ }
+ <-done
+ }
+
+ s.Lock()
+ defer s.Unlock()
+ s.disp = nil
+ if firstErr != nil {
+ return verror.New(verror.ErrInternal, s.ctx, firstErr)
+ }
+ s.state = stopped
+ s.cancel()
+ return nil
+}
+
+// flowServer implements the RPC server-side protocol for a single RPC, over a
+// flow that's already connected to the client.
+type flowServer struct {
+ *context.T
+ server *server // ipc.Server that this flow server belongs to
+ disp ipc.Dispatcher // ipc.Dispatcher that will serve RPCs on this flow
+ dec *vom.Decoder // to decode requests and args from the client
+ enc *vom.Encoder // to encode responses and results to the client
+ flow stream.Flow // underlying flow
+
+ // Fields filled in during the server invocation.
+ clientBlessings security.Blessings
+ ackBlessings bool
+ grantedBlessings security.Blessings
+ method, suffix string
+ tags []*vdl.Value
+ discharges map[string]security.Discharge
+ starttime time.Time
+ endStreamArgs bool // are the stream args at EOF?
+ allowDebug bool // true if the caller is permitted to view debug information.
+}
+
+var _ ipc.Stream = (*flowServer)(nil)
+
+func newFlowServer(flow stream.Flow, server *server) (*flowServer, error) {
+ server.Lock()
+ disp := server.disp
+ server.Unlock()
+
+ fs := &flowServer{
+ T: server.ctx,
+ server: server,
+ disp: disp,
+ flow: flow,
+ discharges: make(map[string]security.Discharge),
+ }
+ var err error
+ if fs.dec, err = vom.NewDecoder(flow); err != nil {
+ flow.Close()
+ return nil, err
+ }
+ if fs.enc, err = vom.NewEncoder(flow); err != nil {
+ flow.Close()
+ return nil, err
+ }
+ return fs, nil
+}
+
+func (fs *flowServer) serve() error {
+ defer fs.flow.Close()
+
+ results, err := fs.processRequest()
+
+ vtrace.GetSpan(fs.T).Finish()
+
+ var traceResponse vtrace.Response
+ if fs.allowDebug {
+ traceResponse = ivtrace.Response(fs.T)
+ }
+
+ // Respond to the client with the response header and positional results.
+ response := ipc.Response{
+ Error: err,
+ EndStreamResults: true,
+ NumPosResults: uint64(len(results)),
+ TraceResponse: traceResponse,
+ AckBlessings: fs.ackBlessings,
+ }
+ if err := fs.enc.Encode(response); err != nil {
+ if err == io.EOF {
+ return err
+ }
+ return fmt.Errorf("ipc: response encoding failed: %v", err)
+ }
+ if response.Error != nil {
+ return response.Error
+ }
+ for ix, res := range results {
+ if err := fs.enc.Encode(res); err != nil {
+ if err == io.EOF {
+ return err
+ }
+ return fmt.Errorf("ipc: result #%d [%T=%v] encoding failed: %v", ix, res, res, err)
+ }
+ }
+ // TODO(ashankar): Should unread data from the flow be drained?
+ //
+ // Reason to do so:
+ // The common stream.Flow implementation (veyron/profiles/internal/ipc/stream/vc/reader.go)
+ // uses iobuf.Slices backed by an iobuf.Pool. If the stream is not drained, these
+ // slices will not be returned to the pool leading to possibly increased memory usage.
+ //
+ // Reason to not do so:
+ // Draining here will conflict with any Reads on the flow in a separate goroutine
+ // (for example, see TestStreamReadTerminatedByServer in full_test.go).
+ //
+ // For now, go with the reason to not do so as having unread data in the stream
+ // should be a rare case.
+ return nil
+}
+
+func (fs *flowServer) readIPCRequest() (*ipc.Request, error) {
+ // Set a default timeout before reading from the flow. Without this timeout,
+ // a client that sends no request or a partial request will retain the flow
+ // indefinitely (and lock up server resources).
+ initTimer := newTimer(defaultCallTimeout)
+ defer initTimer.Stop()
+ fs.flow.SetDeadline(initTimer.C)
+
+ // Decode the initial request.
+ var req ipc.Request
+ if err := fs.dec.Decode(&req); err != nil {
+ return nil, verror.New(verror.ErrBadProtocol, fs.T, newErrBadRequest(fs.T, err))
+ }
+ return &req, nil
+}
+
+func (fs *flowServer) processRequest() ([]interface{}, error) {
+ fs.starttime = time.Now()
+ req, err := fs.readIPCRequest()
+ if err != nil {
+ // We don't know what the ipc call was supposed to be, but we'll create
+ // a placeholder span so we can capture annotations.
+ fs.T, _ = vtrace.SetNewSpan(fs.T, fmt.Sprintf("\"%s\".UNKNOWN", fs.Name()))
+ return nil, err
+ }
+ fs.method = req.Method
+ fs.suffix = strings.TrimLeft(req.Suffix, "/")
+
+ // TODO(mattr): Currently this allows users to trigger trace collection
+ // on the server even if they will not be allowed to collect the
+ // results later. This might be considered a DOS vector.
+ spanName := fmt.Sprintf("\"%s\".%s", fs.Name(), fs.Method())
+ fs.T, _ = vtrace.SetContinuedTrace(fs.T, spanName, req.TraceRequest)
+
+ var cancel context.CancelFunc
+ if !req.Deadline.IsZero() {
+ fs.T, cancel = context.WithDeadline(fs.T, req.Deadline.Time)
+ } else {
+ fs.T, cancel = context.WithCancel(fs.T)
+ }
+ fs.flow.SetDeadline(fs.Done())
+ go fs.cancelContextOnClose(cancel)
+
+ // Initialize security: blessings, discharges, etc.
+ if err := fs.initSecurity(req); err != nil {
+ return nil, err
+ }
+ // Lookup the invoker.
+ invoker, auth, err := fs.lookup(fs.suffix, &fs.method)
+ if err != nil {
+ return nil, err
+ }
+ // Prepare invoker and decode args.
+ numArgs := int(req.NumPosArgs)
+ argptrs, tags, err := invoker.Prepare(fs.method, numArgs)
+ fs.tags = tags
+ if err != nil {
+ return nil, err
+ }
+ if called, want := req.NumPosArgs, uint64(len(argptrs)); called != want {
+ return nil, verror.New(verror.ErrBadProtocol, fs.T, newErrBadNumInputArgs(fs.T, fs.suffix, fs.method, called, want))
+ }
+ for ix, argptr := range argptrs {
+ if err := fs.dec.Decode(argptr); err != nil {
+ return nil, verror.New(verror.ErrBadProtocol, fs.T, newErrBadInputArg(fs.T, fs.suffix, fs.method, uint64(ix), err))
+ }
+ }
+ // Check application's authorization policy.
+ if err := authorize(fs, auth); err != nil {
+ return nil, err
+ }
+ // Check if the caller is permitted to view debug information.
+ // TODO(mattr): Is access.Debug the right thing to check?
+ fs.allowDebug = authorize(debugContext{fs}, auth) == nil
+ // Invoke the method.
+ results, err := invoker.Invoke(fs.method, fs, argptrs)
+ fs.server.stats.record(fs.method, time.Since(fs.starttime))
+ return results, err
+}
+
+func (fs *flowServer) cancelContextOnClose(cancel context.CancelFunc) {
+ // Ensure that the context gets cancelled if the flow is closed
+ // due to a network error, or client cancellation.
+ select {
+ case <-fs.flow.Closed():
+ // Here we remove the contexts channel as a deadline to the flow.
+ // We do this to ensure clients get a consistent error when they read/write
+ // after the flow is closed. Since the flow is already closed, it doesn't
+ // matter that the context is also cancelled.
+ fs.flow.SetDeadline(nil)
+ cancel()
+ case <-fs.Done():
+ }
+}
+
+// lookup returns the invoker and authorizer responsible for serving the given
+// name and method. The suffix is stripped of any leading slashes. If it begins
+// with ipc.DebugKeyword, we use the internal debug dispatcher to look up the
+// invoker. Otherwise, and we use the server's dispatcher. The suffix and method
+// value may be modified to match the actual suffix and method to use.
+func (fs *flowServer) lookup(suffix string, method *string) (ipc.Invoker, security.Authorizer, error) {
+ if naming.IsReserved(*method) {
+ // All reserved methods are trapped and handled here, by removing the
+ // reserved prefix and invoking them on reservedMethods. E.g. "__Glob"
+ // invokes reservedMethods.Glob.
+ *method = naming.StripReserved(*method)
+ return reservedInvoker(fs.disp, fs.server.dispReserved), &acceptAllAuthorizer{}, nil
+ }
+ disp := fs.disp
+ if naming.IsReserved(suffix) {
+ disp = fs.server.dispReserved
+ }
+ if disp != nil {
+ obj, auth, err := disp.Lookup(suffix)
+ switch {
+ case err != nil:
+ return nil, nil, err
+ case obj != nil:
+ invoker, err := objectToInvoker(obj)
+ if err != nil {
+ return nil, nil, verror.New(verror.ErrInternal, fs.T, "invalid received object", err)
+ }
+ return invoker, auth, nil
+ }
+ }
+ return nil, nil, ipc.NewErrUnknownSuffix(nil, suffix)
+}
+
+func objectToInvoker(obj interface{}) (ipc.Invoker, error) {
+ if obj == nil {
+ return nil, errors.New("nil object")
+ }
+ if invoker, ok := obj.(ipc.Invoker); ok {
+ return invoker, nil
+ }
+ return ipc.ReflectInvoker(obj)
+}
+
+func (fs *flowServer) initSecurity(req *ipc.Request) error {
+ // If additional credentials are provided, make them available in the context
+ // Detect unusable blessings now, rather then discovering they are unusable on
+ // first use.
+ //
+ // TODO(ashankar,ataly): Potential confused deputy attack: The client provides
+ // the server's identity as the blessing. Figure out what we want to do about
+ // this - should servers be able to assume that a blessing is something that
+ // does not have the authorizations that the server's own identity has?
+ if b := req.GrantedBlessings; b.PublicKey() != nil && !reflect.DeepEqual(b.PublicKey(), fs.flow.LocalPrincipal().PublicKey()) {
+ return verror.New(verror.ErrNoAccess, fs.T, fmt.Sprintf("blessing granted not bound to this server(%v vs %v)", b.PublicKey(), fs.flow.LocalPrincipal().PublicKey()))
+ }
+ fs.grantedBlessings = req.GrantedBlessings
+ var err error
+ if fs.clientBlessings, err = serverDecodeBlessings(fs.flow.VCDataCache(), req.Blessings, fs.server.stats); err != nil {
+ // When the server can't access the blessings cache, the client is not following
+ // protocol, so the server closes the VCs corresponding to the client endpoint.
+ // TODO(suharshs,toddw): Figure out a way to only shutdown the current VC, instead
+ // of all VCs connected to the RemoteEndpoint.
+ fs.server.streamMgr.ShutdownEndpoint(fs.RemoteEndpoint())
+ return verror.New(verror.ErrBadProtocol, fs.T, newErrBadBlessingsCache(fs.T, err))
+ }
+ fs.ackBlessings = true
+
+ for _, d := range req.Discharges {
+ dis := security.NewDischarge(d)
+ fs.discharges[dis.ID()] = dis
+ }
+ return nil
+}
+
+type acceptAllAuthorizer struct{}
+
+func (acceptAllAuthorizer) Authorize(security.Call) error {
+ return nil
+}
+
+func authorize(ctx ipc.ServerCall, auth security.Authorizer) error {
+ if ctx.LocalPrincipal() == nil {
+ // LocalPrincipal is nil means that the server wanted to avoid
+ // authentication, and thus wanted to skip authorization as well.
+ return nil
+ }
+ if auth == nil {
+ auth = defaultAuthorizer{}
+ }
+ if err := auth.Authorize(ctx); err != nil {
+ // TODO(ataly, ashankar): For privacy reasons, should we hide the authorizer error?
+ return verror.New(verror.ErrNoAccess, ctx.Context(), newErrBadAuth(ctx.Context(), ctx.Suffix(), ctx.Method(), err))
+ }
+ return nil
+}
+
+// debugContext is a context which wraps another context but always returns
+// the debug tag.
+type debugContext struct {
+ ipc.ServerCall
+}
+
+func (debugContext) MethodTags() []*vdl.Value {
+ return []*vdl.Value{vdl.ValueOf(access.Debug)}
+}
+
+// Send implements the ipc.Stream method.
+func (fs *flowServer) Send(item interface{}) error {
+ defer vlog.LogCall()()
+ // The empty response header indicates what follows is a streaming result.
+ if err := fs.enc.Encode(ipc.Response{}); err != nil {
+ return err
+ }
+ return fs.enc.Encode(item)
+}
+
+// Recv implements the ipc.Stream method.
+func (fs *flowServer) Recv(itemptr interface{}) error {
+ defer vlog.LogCall()()
+ var req ipc.Request
+ if err := fs.dec.Decode(&req); err != nil {
+ return err
+ }
+ if req.EndStreamArgs {
+ fs.endStreamArgs = true
+ return io.EOF
+ }
+ return fs.dec.Decode(itemptr)
+}
+
+// Implementations of ipc.ServerCall methods.
+
+func (fs *flowServer) RemoteDischarges() map[string]security.Discharge {
+ //nologcall
+ return fs.discharges
+}
+func (fs *flowServer) Server() ipc.Server {
+ //nologcall
+ return fs.server
+}
+func (fs *flowServer) Timestamp() time.Time {
+ //nologcall
+ return fs.starttime
+}
+func (fs *flowServer) Method() string {
+ //nologcall
+ return fs.method
+}
+func (fs *flowServer) MethodTags() []*vdl.Value {
+ //nologcall
+ return fs.tags
+}
+func (fs *flowServer) Context() *context.T {
+ return fs.T
+}
+
+func (fs *flowServer) VanadiumContext() *context.T {
+ return fs.T
+}
+
+// TODO(cnicolaou): remove Name from ipc.ServerCall and all of
+// its implementations
+func (fs *flowServer) Name() string {
+ //nologcall
+ return fs.suffix
+}
+func (fs *flowServer) Suffix() string {
+ //nologcall
+ return fs.suffix
+}
+func (fs *flowServer) LocalPrincipal() security.Principal {
+ //nologcall
+ return fs.flow.LocalPrincipal()
+}
+func (fs *flowServer) LocalBlessings() security.Blessings {
+ //nologcall
+ return fs.flow.LocalBlessings()
+}
+func (fs *flowServer) RemoteBlessings() security.Blessings {
+ //nologcall
+ if !fs.clientBlessings.IsZero() {
+ return fs.clientBlessings
+ }
+ return fs.flow.RemoteBlessings()
+}
+func (fs *flowServer) GrantedBlessings() security.Blessings {
+ //nologcall
+ return fs.grantedBlessings
+}
+func (fs *flowServer) LocalEndpoint() naming.Endpoint {
+ //nologcall
+ return fs.flow.LocalEndpoint()
+}
+func (fs *flowServer) RemoteEndpoint() naming.Endpoint {
+ //nologcall
+ return fs.flow.RemoteEndpoint()
+}
diff --git a/profiles/internal/ipc/server_authorizer.go b/profiles/internal/ipc/server_authorizer.go
new file mode 100644
index 0000000..04efce6
--- /dev/null
+++ b/profiles/internal/ipc/server_authorizer.go
@@ -0,0 +1,116 @@
+package ipc
+
+import (
+ "errors"
+ "reflect"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+)
+
+// TODO(ribrdb): Flip this to true once everything is updated and also update
+// the server authorizer tests.
+const enableSecureServerAuth = false
+
+var (
+ errNoBlessings = verror.Register(pkgPath+".noBlessings", verror.NoRetry, "server has not presented any blessings")
+
+ errAuthNoPatternMatch = verror.Register(pkgPath+".authNoPatternMatch",
+ verror.NoRetry, "server blessings {3} do not match pattern {4}{:5}")
+
+ errAuthServerNotAllowed = verror.Register(pkgPath+".authServerNotAllowed",
+ verror.NoRetry, "server blessings {3} do not match any allowed server patterns {4}{:5}")
+
+ errAuthServerKeyNotAllowed = verror.Register(pkgPath+".authServerKeyNotAllowed",
+ verror.NoRetry, "remote public key {3} not matched by server key {4}")
+)
+
+// serverAuthorizer implements security.Authorizer.
+type serverAuthorizer struct {
+ patternsFromNameResolution []security.BlessingPattern
+ allowedServerPolicies [][]security.BlessingPattern
+ serverPublicKey security.PublicKey
+}
+
+// newServerAuthorizer returns a security.Authorizer for authorizing the server
+// during a flow. The authorization policy is based on enforcing any server
+// patterns obtained by resolving the server's name, and any server authorization
+// options supplied to the call that initiated the flow.
+//
+// This method assumes that canCreateServerAuthorizer(opts) is nil.
+func newServerAuthorizer(ctx *context.T, patternsFromNameResolution []security.BlessingPattern, opts ...ipc.CallOpt) security.Authorizer {
+ auth := &serverAuthorizer{
+ patternsFromNameResolution: patternsFromNameResolution,
+ }
+ for _, o := range opts {
+ // TODO(ataly, ashankar): Consider creating an authorizer for each of the
+ // options below and then take the intersection of the authorizers.
+ switch v := o.(type) {
+ case options.ServerPublicKey:
+ auth.serverPublicKey = v.PublicKey
+ case options.AllowedServersPolicy:
+ auth.allowedServerPolicies = append(auth.allowedServerPolicies, v)
+ case options.SkipResolveAuthorization:
+ auth.patternsFromNameResolution = []security.BlessingPattern{security.AllPrincipals}
+ }
+ }
+ return auth
+}
+
+func (a *serverAuthorizer) Authorize(ctx security.Call) error {
+ if ctx.RemoteBlessings().IsZero() {
+ return verror.New(errNoBlessings, ctx.Context())
+ }
+ serverBlessings, rejectedBlessings := ctx.RemoteBlessings().ForCall(ctx)
+
+ if !matchedBy(a.patternsFromNameResolution, serverBlessings) {
+ return verror.New(errAuthNoPatternMatch, ctx.Context(), serverBlessings, a.patternsFromNameResolution, rejectedBlessings)
+ } else if enableSecureServerAuth {
+ // No server patterns were obtained while resolving the name, authorize
+ // the server using the default authorization policy.
+ if err := (defaultAuthorizer{}).Authorize(ctx); err != nil {
+ return verror.New(errDefaultAuthDenied, ctx.Context(), serverBlessings)
+ }
+ }
+
+ for _, patterns := range a.allowedServerPolicies {
+ if !matchedBy(patterns, serverBlessings) {
+ return verror.New(errAuthServerNotAllowed, ctx.Context(), serverBlessings, patterns, rejectedBlessings)
+ }
+ }
+
+ if remoteKey, key := ctx.RemoteBlessings().PublicKey(), a.serverPublicKey; key != nil && !reflect.DeepEqual(remoteKey, key) {
+ return verror.New(errAuthServerKeyNotAllowed, ctx.Context(), remoteKey, key)
+ }
+
+ return nil
+}
+
+func matchedBy(patterns []security.BlessingPattern, blessings []string) bool {
+ if patterns == nil {
+ return true
+ }
+ for _, p := range patterns {
+ if p.MatchedBy(blessings...) {
+ return true
+ }
+ }
+ return false
+}
+
+func canCreateServerAuthorizer(opts []ipc.CallOpt) error {
+ var pkey security.PublicKey
+ for _, o := range opts {
+ switch v := o.(type) {
+ case options.ServerPublicKey:
+ if pkey != nil && !reflect.DeepEqual(pkey, v.PublicKey) {
+ return errors.New("multiple ServerPublicKey options supplied to call, at most one is allowed")
+ }
+ pkey = v.PublicKey
+ }
+ }
+ return nil
+}
diff --git a/profiles/internal/ipc/server_authorizer_test.go b/profiles/internal/ipc/server_authorizer_test.go
new file mode 100644
index 0000000..1440dbd
--- /dev/null
+++ b/profiles/internal/ipc/server_authorizer_test.go
@@ -0,0 +1,99 @@
+package ipc
+
+import (
+ "testing"
+
+ tsecurity "v.io/x/ref/lib/testutil/security"
+
+ "v.io/v23/options"
+ "v.io/v23/security"
+)
+
+func TestServerAuthorizer(t *testing.T) {
+ var (
+ pclient = tsecurity.NewPrincipal()
+ pserver = tsecurity.NewPrincipal()
+ pother = tsecurity.NewPrincipal()
+
+ ali, _ = pserver.BlessSelf("ali")
+ bob, _ = pserver.BlessSelf("bob")
+ che, _ = pserver.BlessSelf("che")
+ otherAli, _ = pother.BlessSelf("ali")
+ zero = security.Blessings{}
+
+ ctx = testContext()
+
+ U = func(blessings ...security.Blessings) security.Blessings {
+ u, err := security.UnionOfBlessings(blessings...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return u
+ }
+ )
+ // Make client recognize ali, bob and otherAli blessings
+ for _, b := range []security.Blessings{ali, bob, otherAli} {
+ if err := pclient.AddToRoots(b); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // All tests are run as if pclient is the client end and pserver is remote end.
+ tests := []struct {
+ auth security.Authorizer
+ authorizedServers []security.Blessings
+ unauthorizedServers []security.Blessings
+ }{
+ {
+ // All servers with a non-zero blessing are authorized
+ newServerAuthorizer(ctx, nil),
+ []security.Blessings{ali, otherAli, bob, che},
+ []security.Blessings{zero},
+ },
+ {
+ // Only ali, otherAli and bob are authorized
+ newServerAuthorizer(ctx, []security.BlessingPattern{"ali", "bob"}),
+ []security.Blessings{ali, otherAli, bob, U(ali, che), U(bob, che)},
+ []security.Blessings{che},
+ },
+ {
+ // Still only ali, otherAli and bob are authorized (che is not
+ // authorized since it is not recognized by the client)
+ newServerAuthorizer(ctx, []security.BlessingPattern{"ali", "bob", "che"}, nil),
+ []security.Blessings{ali, otherAli, bob, U(ali, che), U(bob, che)},
+ []security.Blessings{che},
+ },
+ {
+
+ // Only ali and otherAli are authorized (since there is an
+ // allowed-servers policy that does not allow "bob")
+ newServerAuthorizer(ctx, []security.BlessingPattern{"ali", "bob", "che"}, options.AllowedServersPolicy{"ali", "bob"}, options.AllowedServersPolicy{"ali"}),
+ []security.Blessings{ali, otherAli, U(ali, che), U(ali, bob)},
+ []security.Blessings{bob, che},
+ },
+ {
+ // Only otherAli is authorized (since only pother's public key is
+ // authorized)
+ newServerAuthorizer(ctx, nil, options.ServerPublicKey{pother.PublicKey()}),
+ []security.Blessings{otherAli},
+ []security.Blessings{ali, bob, che},
+ },
+ }
+ for _, test := range tests {
+ for _, s := range test.authorizedServers {
+ if err := test.auth.Authorize(&mockSecurityContext{
+ p: pclient,
+ r: s,
+ }); err != nil {
+ t.Errorf("serverAuthorizer: %#v failed to authorize server: %v", test.auth, s)
+ }
+ }
+ for _, s := range test.unauthorizedServers {
+ if err := test.auth.Authorize(&mockSecurityContext{
+ p: pclient,
+ r: s,
+ }); err == nil {
+ t.Errorf("serverAuthorizer: %#v authorized server: %v", test.auth, s)
+ }
+ }
+ }
+}
diff --git a/profiles/internal/ipc/server_test.go b/profiles/internal/ipc/server_test.go
new file mode 100644
index 0000000..07b33de
--- /dev/null
+++ b/profiles/internal/ipc/server_test.go
@@ -0,0 +1,626 @@
+package ipc
+
+import (
+ "net"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "v.io/v23/config"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/netstate"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ imanager "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ tnaming "v.io/x/ref/profiles/internal/testing/mocks/naming"
+)
+
+type noMethodsType struct{ Field string }
+
+type fieldType struct {
+ unexported string
+}
+type noExportedFieldsType struct{}
+
+func (noExportedFieldsType) F(_ ipc.ServerCall, f fieldType) error { return nil }
+
+type badObjectDispatcher struct{}
+
+func (badObjectDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ return noMethodsType{}, nil, nil
+}
+
+// TestBadObject ensures that Serve handles bad reciver objects gracefully (in
+// particular, it doesn't panic).
+func TestBadObject(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ ctx := testContext()
+ server, err := testInternalNewServer(ctx, sm, ns)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+
+ if _, err := server.Listen(listenSpec); err != nil {
+ t.Fatalf("Listen failed: %v", err)
+ }
+ if err := server.Serve("", nil, nil); err == nil {
+ t.Fatal("should have failed")
+ }
+ if err := server.Serve("", new(noMethodsType), nil); err == nil {
+ t.Fatal("should have failed")
+ }
+ if err := server.Serve("", new(noExportedFieldsType), nil); err == nil {
+ t.Fatal("should have failed")
+ }
+ if err := server.ServeDispatcher("servername", badObjectDispatcher{}); err != nil {
+ t.Fatalf("ServeDispatcher failed: %v", err)
+ }
+ client, err := InternalNewClient(sm, ns)
+ if err != nil {
+ t.Fatalf("InternalNewClient failed: %v", err)
+ }
+ ctx, _ = context.WithDeadline(testContext(), time.Now().Add(10*time.Second))
+ call, err := client.StartCall(ctx, "servername", "SomeMethod", nil)
+ if err != nil {
+ t.Fatalf("StartCall failed: %v", err)
+ }
+ var result string
+ if err := call.Finish(&result); err == nil {
+ // TODO(caprita): Check the error type rather than
+ // merely ensuring the test doesn't panic.
+ t.Fatalf("should have failed")
+ }
+}
+
+func TestServerArgs(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ server, err := InternalNewServer(testContext(), sm, ns, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+ _, err = server.Listen(ipc.ListenSpec{})
+ if !verror.Is(err, verror.ErrBadArg.ID) {
+ t.Fatalf("expected a BadArg error: got %v", err)
+ }
+ _, err = server.Listen(ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "*:0"}}})
+ if !verror.Is(err, verror.ErrBadArg.ID) {
+ t.Fatalf("expected a BadArg error: got %v", err)
+ }
+ _, err = server.Listen(ipc.ListenSpec{
+ Addrs: ipc.ListenAddrs{
+ {"tcp", "*:0"},
+ {"tcp", "127.0.0.1:0"},
+ }})
+ if verror.Is(err, verror.ErrBadArg.ID) {
+ t.Fatalf("expected a BadArg error: got %v", err)
+ }
+ status := server.Status()
+ if got, want := len(status.Errors), 1; got != want {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+ _, err = server.Listen(ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "*:0"}}})
+ if !verror.Is(err, verror.ErrBadArg.ID) {
+ t.Fatalf("expected a BadArg error: got %v", err)
+ }
+ status = server.Status()
+ if got, want := len(status.Errors), 1; got != want {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+}
+
+type statusServer struct{ ch chan struct{} }
+
+func (s *statusServer) Hang(ctx ipc.ServerCall) error {
+ <-s.ch
+ return nil
+}
+
+func TestServerStatus(t *testing.T) {
+ ctx := testContext()
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ principal := vc.LocalPrincipal{tsecurity.NewPrincipal("testServerStatus")}
+ server, err := testInternalNewServer(ctx, sm, ns, principal)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+
+ status := server.Status()
+ if got, want := status.State, ipc.ServerInit; got != want {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+ server.Listen(ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
+ status = server.Status()
+ if got, want := status.State, ipc.ServerActive; got != want {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+ serverChan := make(chan struct{})
+ err = server.Serve("test", &statusServer{serverChan}, nil)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ status = server.Status()
+ if got, want := status.State, ipc.ServerActive; got != want {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+
+ progress := make(chan error)
+
+ client, err := InternalNewClient(sm, ns, principal)
+ makeCall := func(ctx *context.T) {
+ call, err := client.StartCall(ctx, "test", "Hang", nil)
+ progress <- err
+ progress <- call.Finish()
+ }
+ go makeCall(ctx)
+
+ // Wait for RPC to start
+ if err := <-progress; err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ // Stop server asynchronously
+ go func() {
+ err = server.Stop()
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ }()
+
+ // Server should enter 'ServerStopping' state.
+ then := time.Now()
+ for {
+ status = server.Status()
+ if got, want := status.State, ipc.ServerStopping; got != want {
+ if time.Now().Sub(then) > time.Minute {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+ } else {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ // Server won't stop until the statusServer's hung method completes.
+ close(serverChan)
+ // Wait for RPC to finish
+ if err := <-progress; err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ // Now that the RPC is done, the server should be able to stop.
+ then = time.Now()
+ for {
+ status = server.Status()
+ if got, want := status.State, ipc.ServerStopped; got != want {
+ if time.Now().Sub(then) > time.Minute {
+ t.Fatalf("got %s, want %s", got, want)
+ }
+ } else {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+}
+
+func TestServerStates(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ ctx := testContext()
+
+ expectBadState := func(err error) {
+ if !verror.Is(err, verror.ErrBadState.ID) {
+ t.Fatalf("%s: unexpected error: %v", loc(1), err)
+ }
+ }
+
+ expectNoError := func(err error) {
+ if err != nil {
+ t.Fatalf("%s: unexpected error: %v", loc(1), err)
+ }
+ }
+
+ server, err := testInternalNewServer(ctx, sm, ns)
+ expectNoError(err)
+ defer server.Stop()
+
+ expectState := func(s ipc.ServerState) {
+ if got, want := server.Status().State, s; got != want {
+ t.Fatalf("%s: got %s, want %s", loc(1), got, want)
+ }
+ }
+
+ expectState(ipc.ServerInit)
+
+ // Need to call Listen first.
+ err = server.Serve("", &testServer{}, nil)
+ expectBadState(err)
+ err = server.AddName("a")
+ expectBadState(err)
+
+ _, err = server.Listen(ipc.ListenSpec{Addrs: ipc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
+ expectNoError(err)
+
+ expectState(ipc.ServerActive)
+
+ err = server.Serve("", &testServer{}, nil)
+ expectNoError(err)
+
+ err = server.Serve("", &testServer{}, nil)
+ expectBadState(err)
+
+ expectState(ipc.ServerActive)
+
+ err = server.AddName("a")
+ expectNoError(err)
+
+ expectState(ipc.ServerActive)
+
+ server.RemoveName("a")
+
+ expectState(ipc.ServerActive)
+
+ err = server.Stop()
+ expectNoError(err)
+ err = server.Stop()
+ expectNoError(err)
+
+ err = server.AddName("a")
+ expectBadState(err)
+}
+
+func TestMountStatus(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ server, err := testInternalNewServer(testContext(), sm, ns)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+
+ eps, err := server.Listen(ipc.ListenSpec{
+ Addrs: ipc.ListenAddrs{
+ {"tcp", "127.0.0.1:0"},
+ {"tcp", "127.0.0.1:0"},
+ }})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := len(eps), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ if err = server.Serve("foo", &testServer{}, nil); err != nil {
+ t.Fatal(err)
+ }
+ status := server.Status()
+ if got, want := len(status.Mounts), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ servers := status.Mounts.Servers()
+ if got, want := len(servers), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+ t.Fatalf("got %v, want %v", got, want)
+ }
+
+ // Add a second name and we should now see 4 mounts, 2 for each name.
+ if err := server.AddName("bar"); err != nil {
+ t.Fatal(err)
+ }
+ status = server.Status()
+ if got, want := len(status.Mounts), 4; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ servers = status.Mounts.Servers()
+ if got, want := len(servers), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+ t.Fatalf("got %v, want %v", got, want)
+ }
+ names := status.Mounts.Names()
+ if got, want := len(names), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ serversPerName := map[string][]string{}
+ for _, ms := range status.Mounts {
+ serversPerName[ms.Name] = append(serversPerName[ms.Name], ms.Server)
+ }
+ if got, want := len(serversPerName), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ for _, name := range []string{"foo", "bar"} {
+ if got, want := len(serversPerName[name]), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ }
+}
+
+func updateHost(ep naming.Endpoint, address string) naming.Endpoint {
+ niep := *(ep).(*inaming.Endpoint)
+ niep.Address = address
+ return &niep
+}
+
+func getIPAddrs(eps []naming.Endpoint) []ipc.Address {
+ hosts := map[string]struct{}{}
+ for _, ep := range eps {
+ iep := (ep).(*inaming.Endpoint)
+ h, _, _ := net.SplitHostPort(iep.Address)
+ if len(h) > 0 {
+ hosts[h] = struct{}{}
+ }
+ }
+ addrs := []ipc.Address{}
+ for h, _ := range hosts {
+ a := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP(h)}}
+ addrs = append(addrs, a)
+ }
+ return addrs
+}
+
+func endpointToStrings(eps []naming.Endpoint) []string {
+ r := []string{}
+ for _, ep := range eps {
+ r = append(r, ep.String())
+ }
+ sort.Strings(r)
+ return r
+}
+
+func cmpEndpoints(got, want []naming.Endpoint) bool {
+ if len(got) != len(want) {
+ return false
+ }
+ return reflect.DeepEqual(endpointToStrings(got), endpointToStrings(want))
+}
+
+func getUniqPorts(eps []naming.Endpoint) []string {
+ ports := map[string]struct{}{}
+ for _, ep := range eps {
+ iep := ep.(*inaming.Endpoint)
+ _, p, _ := net.SplitHostPort(iep.Address)
+ ports[p] = struct{}{}
+ }
+ r := []string{}
+ for p, _ := range ports {
+ r = append(r, p)
+ }
+ return r
+}
+
+func TestRoaming(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ server, err := testInternalNewServer(testContext(), sm, ns)
+ defer server.Stop()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ publisher := config.NewPublisher()
+ roaming := make(chan config.Setting)
+ stop, err := publisher.CreateStream("TestRoaming", "TestRoaming", roaming)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() { publisher.Shutdown(); <-stop }()
+
+ ipv4And6 := func(network string, addrs []ipc.Address) ([]ipc.Address, error) {
+ accessible := netstate.AddrList(addrs)
+ ipv4 := accessible.Filter(netstate.IsUnicastIPv4)
+ ipv6 := accessible.Filter(netstate.IsUnicastIPv6)
+ return append(ipv4, ipv6...), nil
+ }
+ spec := ipc.ListenSpec{
+ Addrs: ipc.ListenAddrs{
+ {"tcp", "*:0"},
+ {"tcp", ":0"},
+ {"tcp", ":0"},
+ },
+ StreamName: "TestRoaming",
+ StreamPublisher: publisher,
+ AddressChooser: ipv4And6,
+ }
+
+ eps, err := server.Listen(spec)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(eps) == 0 {
+ t.Fatal(err)
+ }
+
+ if err = server.Serve("foo", &testServer{}, nil); err != nil {
+ t.Fatal(err)
+ }
+ if err = server.AddName("bar"); err != nil {
+ t.Fatal(err)
+ }
+
+ status := server.Status()
+ if got, want := status.Endpoints, eps; !cmpEndpoints(got, want) {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ if got, want := len(status.Mounts), len(eps)*2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ n1 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("1.1.1.1")}}
+ n2 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("2.2.2.2")}}
+
+ watcher := make(chan ipc.NetworkChange, 10)
+ server.WatchNetwork(watcher)
+ defer close(watcher)
+
+ roaming <- ipc.NewAddAddrsSetting([]ipc.Address{n1, n2})
+
+ waitForChange := func() *ipc.NetworkChange {
+ vlog.Infof("Waiting on %p", watcher)
+ select {
+ case c := <-watcher:
+ return &c
+ case <-time.After(time.Minute):
+ t.Fatalf("timedout: %s", loc(1))
+ }
+ return nil
+ }
+
+ // We expect 4 changes, one for each IP per usable listen spec addr.
+ change := waitForChange()
+ if got, want := len(change.Changed), 4; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ nepsA := make([]naming.Endpoint, len(eps))
+ copy(nepsA, eps)
+ for _, p := range getUniqPorts(eps) {
+ nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+ nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+ nepsA = append(nepsA, []naming.Endpoint{nep1, nep2}...)
+ }
+
+ status = server.Status()
+ if got, want := status.Endpoints, nepsA; !cmpEndpoints(got, want) {
+ t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+ }
+
+ if got, want := len(status.Mounts), len(nepsA)*2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+ if got, want := len(status.Mounts.Servers()), len(nepsA); got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ roaming <- ipc.NewRmAddrsSetting([]ipc.Address{n1})
+
+ // We expect 2 changes, one for each usable listen spec addr.
+ change = waitForChange()
+ if got, want := len(change.Changed), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ nepsR := make([]naming.Endpoint, len(eps))
+ copy(nepsR, eps)
+ for _, p := range getUniqPorts(eps) {
+ nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+ nepsR = append(nepsR, nep2)
+ }
+
+ status = server.Status()
+ if got, want := status.Endpoints, nepsR; !cmpEndpoints(got, want) {
+ t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+ }
+
+ // Remove all addresses to mimic losing all connectivity.
+ roaming <- ipc.NewRmAddrsSetting(getIPAddrs(nepsR))
+
+ // We expect changes for all of the current endpoints
+ change = waitForChange()
+ if got, want := len(change.Changed), len(nepsR); got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ status = server.Status()
+ if got, want := len(status.Mounts), 0; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+ roaming <- ipc.NewAddAddrsSetting([]ipc.Address{n1})
+ // We expect 2 changes, one for each usable listen spec addr.
+ change = waitForChange()
+ if got, want := len(change.Changed), 2; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+
+}
+
+func TestWatcherDeadlock(t *testing.T) {
+ sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+ defer sm.Shutdown()
+ ns := tnaming.NewSimpleNamespace()
+ server, err := testInternalNewServer(testContext(), sm, ns)
+ defer server.Stop()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ publisher := config.NewPublisher()
+ roaming := make(chan config.Setting)
+ stop, err := publisher.CreateStream("TestWatcherDeadlock", "TestWatcherDeadlock", roaming)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() { publisher.Shutdown(); <-stop }()
+
+ spec := ipc.ListenSpec{
+ Addrs: ipc.ListenAddrs{
+ {"tcp", ":0"},
+ },
+ StreamName: "TestWatcherDeadlock",
+ StreamPublisher: publisher,
+ }
+ eps, err := server.Listen(spec)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = server.Serve("foo", &testServer{}, nil); err != nil {
+ t.Fatal(err)
+ }
+
+ // Set a watcher that we never read from - the intent is to make sure
+ // that the listener still listens to changes even though there is no
+ // goroutine to read from the watcher channel.
+ watcher := make(chan ipc.NetworkChange, 0)
+ server.WatchNetwork(watcher)
+ defer close(watcher)
+
+ // Remove all addresses to mimic losing all connectivity.
+ roaming <- ipc.NewRmAddrsSetting(getIPAddrs(eps))
+
+ // Add in two new addresses
+ n1 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("1.1.1.1")}}
+ n2 := &netstate.AddrIfc{Addr: &net.IPAddr{IP: net.ParseIP("2.2.2.2")}}
+ roaming <- ipc.NewAddAddrsSetting([]ipc.Address{n1, n2})
+
+ neps := make([]naming.Endpoint, 0, len(eps))
+ for _, p := range getUniqPorts(eps) {
+ nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+ nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+ neps = append(neps, []naming.Endpoint{nep1, nep2}...)
+ }
+ then := time.Now()
+ for {
+ status := server.Status()
+ if got, want := status.Endpoints, neps; cmpEndpoints(got, want) {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ if time.Now().Sub(then) > time.Minute {
+ t.Fatalf("timed out waiting for changes to take effect")
+ }
+ }
+
+}
diff --git a/profiles/internal/ipc/signature_test.go b/profiles/internal/ipc/signature_test.go
new file mode 100644
index 0000000..5d35fd6
--- /dev/null
+++ b/profiles/internal/ipc/signature_test.go
@@ -0,0 +1,156 @@
+package ipc_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/ipc/reserved"
+ "v.io/v23/naming"
+ "v.io/v23/vdl"
+ "v.io/v23/vdlroot/signature"
+
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+)
+
+func startSigServer(ctx *context.T, sig sigImpl) (string, func(), error) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to start sig server: %v", err)
+ }
+ eps, err := server.Listen(v23.GetListenSpec(ctx))
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to listen: %v", err)
+ }
+ if err := server.Serve("", sig, nil); err != nil {
+ return "", nil, err
+ }
+ return eps[0].String(), func() { server.Stop() }, nil
+}
+
+type sigImpl struct{}
+
+func (sigImpl) NonStreaming0(ipc.ServerCall) error { panic("X") }
+func (sigImpl) NonStreaming1(_ ipc.ServerCall, _ string) (int64, error) { panic("X") }
+func (sigImpl) Streaming0(_ *streamStringBool) error { panic("X") }
+func (sigImpl) Streaming1(_ *streamStringBool, _ int64) (float64, error) { panic("X") }
+
+type streamStringBool struct{ ipc.StreamServerCall }
+
+func (*streamStringBool) Init(ipc.StreamServerCall) { panic("X") }
+func (*streamStringBool) RecvStream() interface {
+ Advance() bool
+ Value() string
+ Err() error
+} {
+ panic("X")
+}
+func (*streamStringBool) SendStream() interface {
+ Send(_ bool) error
+} {
+ panic("X")
+}
+
+func TestMethodSignature(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+ ep, stop, err := startSigServer(ctx, sigImpl{})
+ if err != nil {
+ t.Fatalf("startSigServer: %v", err)
+ }
+ defer stop()
+ name := naming.JoinAddressName(ep, "")
+
+ tests := []struct {
+ Method string
+ Want signature.Method
+ }{
+ {"NonStreaming0", signature.Method{
+ Name: "NonStreaming0",
+ }},
+ {"NonStreaming1", signature.Method{
+ Name: "NonStreaming1",
+ InArgs: []signature.Arg{{Type: vdl.StringType}},
+ OutArgs: []signature.Arg{{Type: vdl.Int64Type}},
+ }},
+ {"Streaming0", signature.Method{
+ Name: "Streaming0",
+ InStream: &signature.Arg{Type: vdl.StringType},
+ OutStream: &signature.Arg{Type: vdl.BoolType},
+ }},
+ {"Streaming1", signature.Method{
+ Name: "Streaming1",
+ InArgs: []signature.Arg{{Type: vdl.Int64Type}},
+ OutArgs: []signature.Arg{{Type: vdl.Float64Type}},
+ InStream: &signature.Arg{Type: vdl.StringType},
+ OutStream: &signature.Arg{Type: vdl.BoolType},
+ }},
+ }
+ for _, test := range tests {
+ sig, err := reserved.MethodSignature(ctx, name, test.Method)
+ if err != nil {
+ t.Errorf("call failed: %v", err)
+ }
+ if got, want := sig, test.Want; !reflect.DeepEqual(got, want) {
+ t.Errorf("%s got %#v, want %#v", test.Method, got, want)
+ }
+ }
+}
+
+func TestSignature(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+ ep, stop, err := startSigServer(ctx, sigImpl{})
+ if err != nil {
+ t.Fatalf("startSigServer: %v", err)
+ }
+ defer stop()
+ name := naming.JoinAddressName(ep, "")
+ sig, err := reserved.Signature(ctx, name)
+ if err != nil {
+ t.Errorf("call failed: %v", err)
+ }
+ if got, want := len(sig), 2; got != want {
+ t.Fatalf("got sig %#v len %d, want %d", sig, got, want)
+ }
+ // Check expected methods.
+ methods := signature.Interface{
+ Doc: "The empty interface contains methods not attached to any interface.",
+ Methods: []signature.Method{
+ {
+ Name: "NonStreaming0",
+ },
+ {
+ Name: "NonStreaming1",
+ InArgs: []signature.Arg{{Type: vdl.StringType}},
+ OutArgs: []signature.Arg{{Type: vdl.Int64Type}},
+ },
+ {
+ Name: "Streaming0",
+ InStream: &signature.Arg{Type: vdl.StringType},
+ OutStream: &signature.Arg{Type: vdl.BoolType},
+ },
+ {
+ Name: "Streaming1",
+ InArgs: []signature.Arg{{Type: vdl.Int64Type}},
+ OutArgs: []signature.Arg{{Type: vdl.Float64Type}},
+ InStream: &signature.Arg{Type: vdl.StringType},
+ OutStream: &signature.Arg{Type: vdl.BoolType},
+ },
+ },
+ }
+ if got, want := sig[0], methods; !reflect.DeepEqual(got, want) {
+ t.Errorf("got sig[0] %#v, want %#v", got, want)
+ }
+ // Check reserved methods.
+ if got, want := sig[1].Name, "__Reserved"; got != want {
+ t.Errorf("got sig[1].Name %q, want %q", got, want)
+ }
+ if got, want := signature.MethodNames(sig[1:2]), []string{"__Glob", "__MethodSignature", "__Signature"}; !reflect.DeepEqual(got, want) {
+ t.Fatalf("got sig[1] methods %v, want %v", got, want)
+ }
+}
diff --git a/profiles/internal/ipc/simple_test.go b/profiles/internal/ipc/simple_test.go
new file mode 100644
index 0000000..dff3463
--- /dev/null
+++ b/profiles/internal/ipc/simple_test.go
@@ -0,0 +1,122 @@
+package ipc_test
+
+import (
+ "io"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/ipc"
+)
+
+type simple struct {
+ done <-chan struct{}
+}
+
+func (s *simple) Sleep(call ipc.ServerCall) error {
+ select {
+ case <-s.done:
+ case <-time.After(time.Hour):
+ }
+ return nil
+}
+
+func (s *simple) Ping(call ipc.ServerCall) (string, error) {
+ return "pong", nil
+}
+
+func (s *simple) Source(call ipc.StreamServerCall, start int) error {
+ i := start
+ backoff := 25 * time.Millisecond
+ for {
+ select {
+ case <-s.done:
+ return nil
+ case <-time.After(backoff):
+ call.Send(i)
+ i++
+ }
+ backoff *= 2
+ }
+}
+
+func (s *simple) Sink(call ipc.StreamServerCall) (int, error) {
+ i := 0
+ for {
+ if err := call.Recv(&i); err != nil {
+ if err == io.EOF {
+ return i, nil
+ }
+ return 0, err
+ }
+ }
+}
+
+func (s *simple) Inc(call ipc.StreamServerCall, inc int) (int, error) {
+ i := 0
+ for {
+ if err := call.Recv(&i); err != nil {
+ if err == io.EOF {
+ return i, nil
+ }
+ return 0, err
+ }
+ call.Send(i + inc)
+ }
+}
+
+func TestSimpleRPC(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ client := v23.GetClient(ctx)
+ call, err := client.StartCall(ctx, name, "Ping", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ response := ""
+ if err := call.Finish(&response); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if got, want := response, "pong"; got != want {
+ t.Fatalf("got %q, want %q", got, want)
+ }
+}
+
+func TestSimpleStreaming(t *testing.T) {
+ ctx, shutdown := newCtx()
+ defer shutdown()
+ name, fn := initServer(t, ctx)
+ defer fn()
+
+ inc := 1
+ call, err := v23.GetClient(ctx).StartCall(ctx, name, "Inc", []interface{}{inc})
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ want := 10
+ for i := 0; i <= want; i++ {
+ if err := call.Send(i); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ got := -1
+ if err = call.Recv(&got); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if want := i + inc; got != want {
+ t.Fatalf("got %d, want %d")
+ }
+ }
+ call.CloseSend()
+ final := -1
+ err = call.Finish(&final)
+ if err != nil {
+ t.Errorf("unexpected error: %#v", err)
+ }
+ if got := final; got != want {
+ t.Fatalf("got %d, want %d", got, want)
+ }
+}
diff --git a/profiles/internal/ipc/sort_endpoints.go b/profiles/internal/ipc/sort_endpoints.go
new file mode 100644
index 0000000..61a3e5f
--- /dev/null
+++ b/profiles/internal/ipc/sort_endpoints.go
@@ -0,0 +1,221 @@
+package ipc
+
+import (
+ "fmt"
+ "net"
+ "sort"
+
+ "v.io/v23/naming"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/netstate"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+type errorAccumulator struct {
+ errs []error
+}
+
+func (e *errorAccumulator) add(err error) {
+ e.errs = append(e.errs, err)
+}
+
+func (e *errorAccumulator) failed() bool {
+ return len(e.errs) > 0
+}
+
+func (e *errorAccumulator) String() string {
+ r := ""
+ for _, err := range e.errs {
+ r += fmt.Sprintf("(%s)", err)
+ }
+ return r
+}
+
+func newErrorAccumulator() *errorAccumulator {
+ return &errorAccumulator{errs: make([]error, 0, 4)}
+}
+
+type serverLocality int
+
+const (
+ unknownNetwork serverLocality = iota
+ remoteNetwork
+ localNetwork
+)
+
+type sortableServer struct {
+ server naming.MountedServer
+ protocolRank int // larger values are preferred.
+ locality serverLocality // larger values are preferred.
+}
+
+func (s *sortableServer) String() string {
+ return fmt.Sprintf("%v", s.server)
+}
+
+type sortableServerList []sortableServer
+
+func (l sortableServerList) Len() int { return len(l) }
+func (l sortableServerList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l sortableServerList) Less(i, j int) bool {
+ if l[i].protocolRank == l[j].protocolRank {
+ return l[i].locality > l[j].locality
+ }
+ return l[i].protocolRank > l[j].protocolRank
+}
+
+func mkProtocolRankMap(list []string) map[string]int {
+ if len(list) == 0 {
+ return nil
+ }
+ m := make(map[string]int)
+ for idx, protocol := range list {
+ m[protocol] = len(list) - idx
+ }
+ return m
+}
+
+var defaultPreferredProtocolOrder = mkProtocolRankMap([]string{"unixfd", "wsh", "tcp4", "tcp", "*"})
+
+// filterAndOrderServers returns a set of servers that are compatible with
+// the current client in order of 'preference' specified by the supplied
+// protocols and a notion of 'locality' according to the supplied protocol
+// list as follows:
+// - if the protocol parameter is non-empty, then only servers matching those
+// protocols are returned and the endpoints are ordered first by protocol
+// and then by locality within each protocol. If tcp4 and unixfd are requested
+// for example then only protocols that match tcp4 and unixfd will returned
+// with the tcp4 ones preceeding the unixfd ones.
+// - if the protocol parameter is empty, then a default protocol ordering
+// will be used, but unlike the previous case, any servers that don't support
+// these protocols will be returned also, but following the default
+// preferences.
+func filterAndOrderServers(servers []naming.MountedServer, protocols []string, ipnets []*net.IPNet) ([]naming.MountedServer, error) {
+ vlog.VI(3).Infof("filterAndOrderServers%v: %v", protocols, servers)
+ var (
+ errs = newErrorAccumulator()
+ list = make(sortableServerList, 0, len(servers))
+ protoRanks = mkProtocolRankMap(protocols)
+ )
+ if len(protoRanks) == 0 {
+ protoRanks = defaultPreferredProtocolOrder
+ }
+ for _, server := range servers {
+ name := server.Server
+ ep, err := name2endpoint(name)
+ if err != nil {
+ errs.add(fmt.Errorf("malformed endpoint %q: %v", name, err))
+ continue
+ }
+ if err = version.CheckCompatibility(ep); err != nil {
+ errs.add(fmt.Errorf("%q: %v", name, err))
+ continue
+ }
+ rank, err := protocol2rank(ep.Addr().Network(), protoRanks)
+ if err != nil {
+ errs.add(fmt.Errorf("%q: %v", name, err))
+ continue
+ }
+ list = append(list, sortableServer{
+ server: server,
+ protocolRank: rank,
+ locality: locality(ep, ipnets),
+ })
+ }
+ if len(list) == 0 {
+ return nil, fmt.Errorf("failed to find any compatible servers: %v", errs)
+ }
+ // TODO(ashankar): Don't have to use stable sorting, could
+ // just use sort.Sort. The only problem with that is the
+ // unittest.
+ sort.Stable(list)
+ // Convert to []naming.MountedServer
+ ret := make([]naming.MountedServer, len(list))
+ for idx, item := range list {
+ ret[idx] = item.server
+ }
+ return ret, nil
+}
+
+// name2endpoint returns the naming.Endpoint encoded in a name.
+func name2endpoint(name string) (naming.Endpoint, error) {
+ addr := name
+ if naming.Rooted(name) {
+ addr, _ = naming.SplitAddressName(name)
+ }
+ return inaming.NewEndpoint(addr)
+}
+
+// protocol2rank returns the "rank" of a protocol (given a map of ranks).
+// The higher the rank, the more preferable the protocol.
+func protocol2rank(protocol string, ranks map[string]int) (int, error) {
+ if r, ok := ranks[protocol]; ok {
+ return r, nil
+ }
+ // Special case: if "wsh" has a rank but "wsh4"/"wsh6" don't,
+ // then they get the same rank as "wsh". Similar for "tcp" and "ws".
+ //
+ // TODO(jhahn): We have similar protocol equivalency checks at a few places.
+ // Figure out a way for this mapping to be shared.
+ if p := protocol; p == "wsh4" || p == "wsh6" || p == "tcp4" || p == "tcp6" || p == "ws4" || p == "ws6" {
+ if r, ok := ranks[p[:len(p)-1]]; ok {
+ return r, nil
+ }
+ }
+ // "*" means that any protocol is acceptable.
+ if r, ok := ranks["*"]; ok {
+ return r, nil
+ }
+ // UnknownProtocol should be rare, it typically happens when
+ // the endpoint is described in <host>:<port> format instead of
+ // the full fidelity description (@<version>@<protocol>@...).
+ if protocol == naming.UnknownProtocol {
+ return -1, nil
+ }
+ return 0, fmt.Errorf("undesired protocol %q", protocol)
+}
+
+// locality returns the serverLocality to use given an endpoint and the
+// set of IP networks configured on this machine.
+func locality(ep naming.Endpoint, ipnets []*net.IPNet) serverLocality {
+ if len(ipnets) < 1 {
+ return unknownNetwork // 0 IP networks, locality doesn't matter.
+
+ }
+ host, _, err := net.SplitHostPort(ep.Addr().String())
+ if err != nil {
+ host = ep.Addr().String()
+ }
+ ip := net.ParseIP(host)
+ if ip == nil {
+ // Not an IP address (possibly not an IP network).
+ return unknownNetwork
+ }
+ for _, ipnet := range ipnets {
+ if ipnet.Contains(ip) {
+ return localNetwork
+ }
+ }
+ return remoteNetwork
+}
+
+// ipNetworks returns the IP networks on this machine.
+func ipNetworks() []*net.IPNet {
+ ifcs, err := netstate.GetAll()
+ if err != nil {
+ vlog.VI(5).Infof("netstate.GetAll failed: %v", err)
+ return nil
+ }
+ ret := make([]*net.IPNet, 0, len(ifcs))
+ for _, a := range ifcs {
+ _, ipnet, err := net.ParseCIDR(a.Address().String())
+ if err != nil {
+ vlog.VI(5).Infof("net.ParseCIDR(%q) failed: %v", a.Address(), err)
+ continue
+ }
+ ret = append(ret, ipnet)
+ }
+ return ret
+}
diff --git a/profiles/internal/ipc/sort_internal_test.go b/profiles/internal/ipc/sort_internal_test.go
new file mode 100644
index 0000000..cdf3bd8
--- /dev/null
+++ b/profiles/internal/ipc/sort_internal_test.go
@@ -0,0 +1,220 @@
+package ipc
+
+import (
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+)
+
+func servers2names(servers []naming.MountedServer) []string {
+ e := naming.MountEntry{Servers: servers}
+ return e.Names()
+}
+
+func TestIncompatible(t *testing.T) {
+ servers := []naming.MountedServer{}
+
+ _, err := filterAndOrderServers(servers, []string{"tcp"}, nil)
+ if err == nil || err.Error() != "failed to find any compatible servers: " {
+ t.Errorf("expected a different error: %v", err)
+ }
+
+ for _, a := range []string{"127.0.0.1", "127.0.0.2"} {
+ addr := naming.FormatEndpoint("tcp", a, version.IPCVersionRange{100, 200})
+ name := naming.JoinAddressName(addr, "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+
+ _, err = filterAndOrderServers(servers, []string{"tcp"}, nil)
+ if err == nil || (!strings.HasPrefix(err.Error(), "failed to find any compatible servers:") && !strings.Contains(err.Error(), "No compatible IPC versions available")) {
+ t.Errorf("expected a different error to: %v", err)
+ }
+
+ for _, a := range []string{"127.0.0.3", "127.0.0.4"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+
+ _, err = filterAndOrderServers(servers, []string{"foobar"}, nil)
+ if err == nil || !strings.HasSuffix(err.Error(), "undesired protocol \"tcp\")") {
+ t.Errorf("expected a different error to: %v", err)
+ }
+
+}
+
+func TestOrderingByProtocol(t *testing.T) {
+ servers := []naming.MountedServer{}
+ _, ipnet, _ := net.ParseCIDR("127.0.0.0/8")
+ ipnets := []*net.IPNet{ipnet}
+
+ for _, a := range []string{"127.0.0.3", "127.0.0.4"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ for _, a := range []string{"127.0.0.1", "127.0.0.2"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp4", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ for _, a := range []string{"127.0.0.10", "127.0.0.11"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("foobar", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ for _, a := range []string{"127.0.0.7", "127.0.0.8"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp6", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ if _, err := filterAndOrderServers(servers, []string{"batman"}, ipnets); err == nil {
+ t.Fatalf("expected an error")
+ }
+
+ // Add a server with naming.UnknownProtocol. This typically happens
+ // when the endpoint is in <host>:<port> format. Currently, the sorting
+ // is setup to always allow UnknownProtocol, but put it in the end.
+ // May want to revisit this choice, but for now the test captures what
+ // the current state of the code intends.
+ servers = append(servers, naming.MountedServer{Server: "127.0.0.12:14141"})
+
+ // Just foobar and tcp4
+ want := []string{
+ "/@2@foobar@127.0.0.10@@@@@",
+ "/@2@foobar@127.0.0.11@@@@@",
+ "/@2@tcp4@127.0.0.1@@@@@",
+ "/@2@tcp4@127.0.0.2@@@@@",
+ "/127.0.0.12:14141",
+ }
+ result, err := filterAndOrderServers(servers, []string{"foobar", "tcp4"}, ipnets)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+
+ // Everything, since we didn't specify a protocol, but ordered by
+ // the internal metric - see defaultPreferredProtocolOrder.
+ // The order will be the default preferred order for protocols, the
+ // original ordering within each protocol, with protocols that
+ // are not in the default ordering list at the end.
+ want = []string{
+ "/@2@tcp4@127.0.0.1@@@@@",
+ "/@2@tcp4@127.0.0.2@@@@@",
+ "/@2@tcp@127.0.0.3@@@@@",
+ "/@2@tcp@127.0.0.4@@@@@",
+ "/@2@tcp6@127.0.0.7@@@@@",
+ "/@2@tcp6@127.0.0.8@@@@@",
+ "/@2@foobar@127.0.0.10@@@@@",
+ "/@2@foobar@127.0.0.11@@@@@",
+ "/127.0.0.12:14141",
+ }
+ if result, err = filterAndOrderServers(servers, nil, ipnets); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+
+ if result, err = filterAndOrderServers(servers, []string{}, ipnets); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+
+ // Just "tcp" implies tcp4 and tcp6 as well.
+ want = []string{
+ "/@2@tcp@127.0.0.3@@@@@",
+ "/@2@tcp@127.0.0.4@@@@@",
+ "/@2@tcp4@127.0.0.1@@@@@",
+ "/@2@tcp4@127.0.0.2@@@@@",
+ "/@2@tcp6@127.0.0.7@@@@@",
+ "/@2@tcp6@127.0.0.8@@@@@",
+ "/127.0.0.12:14141",
+ }
+ if result, err = filterAndOrderServers(servers, []string{"tcp"}, ipnets); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+
+ // Ask for all protocols, with no ordering, except for locality
+ want = []string{
+ "/@2@tcp@127.0.0.3@@@@@",
+ "/@2@tcp@127.0.0.1@@@@@",
+ "/@2@tcp@74.125.69.139@@@@@",
+ "/@2@tcp@192.168.1.10@@@@@",
+ "/@2@tcp@74.125.142.83@@@@@",
+ "/127.0.0.12:14141",
+ "/@2@foobar@127.0.0.10@@@@@",
+ "/@2@foobar@127.0.0.11@@@@@",
+ }
+ servers = []naming.MountedServer{}
+ // naming.UnknownProtocol
+ servers = append(servers, naming.MountedServer{Server: "127.0.0.12:14141"})
+ for _, a := range []string{"74.125.69.139", "127.0.0.3", "127.0.0.1", "192.168.1.10", "74.125.142.83"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ for _, a := range []string{"127.0.0.10", "127.0.0.11"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("foobar", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ if result, err = filterAndOrderServers(servers, []string{}, ipnets); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+}
+
+func TestOrderingByLocality(t *testing.T) {
+ servers := []naming.MountedServer{}
+ _, ipnet, _ := net.ParseCIDR("127.0.0.0/8")
+ ipnets := []*net.IPNet{ipnet}
+
+ for _, a := range []string{"74.125.69.139", "127.0.0.3", "127.0.0.1", "192.168.1.10", "74.125.142.83"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+ result, err := filterAndOrderServers(servers, []string{"tcp"}, ipnets)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ want := []string{
+ "/@2@tcp@127.0.0.3@@@@@",
+ "/@2@tcp@127.0.0.1@@@@@",
+ "/@2@tcp@74.125.69.139@@@@@",
+ "/@2@tcp@192.168.1.10@@@@@",
+ "/@2@tcp@74.125.142.83@@@@@",
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+ for _, a := range []string{"74.125.69.139", "127.0.0.3:123", "127.0.0.1", "192.168.1.10", "74.125.142.83"} {
+ name := naming.JoinAddressName(naming.FormatEndpoint("ws", a), "")
+ servers = append(servers, naming.MountedServer{Server: name})
+ }
+
+ if result, err = filterAndOrderServers(servers, []string{"ws", "tcp"}, ipnets); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ want = []string{
+ "/@2@ws@127.0.0.3:123@@@@@",
+ "/@2@ws@127.0.0.1@@@@@",
+ "/@2@ws@74.125.69.139@@@@@",
+ "/@2@ws@192.168.1.10@@@@@",
+ "/@2@ws@74.125.142.83@@@@@",
+ "/@2@tcp@127.0.0.3@@@@@",
+ "/@2@tcp@127.0.0.1@@@@@",
+ "/@2@tcp@74.125.69.139@@@@@",
+ "/@2@tcp@192.168.1.10@@@@@",
+ "/@2@tcp@74.125.142.83@@@@@",
+ }
+ if got := servers2names(result); !reflect.DeepEqual(got, want) {
+ t.Errorf("got: %v, want %v", got, want)
+ }
+}
diff --git a/profiles/internal/ipc/stats.go b/profiles/internal/ipc/stats.go
new file mode 100644
index 0000000..cbc6315
--- /dev/null
+++ b/profiles/internal/ipc/stats.go
@@ -0,0 +1,95 @@
+package ipc
+
+import (
+ "sync"
+ "time"
+
+ "v.io/x/ref/lib/stats"
+ "v.io/x/ref/lib/stats/counter"
+ "v.io/x/ref/lib/stats/histogram"
+
+ "v.io/v23/naming"
+)
+
+type ipcStats struct {
+ mu sync.RWMutex
+ prefix string
+ methods map[string]*perMethodStats
+ blessingsCacheStats *blessingsCacheStats
+}
+
+func newIPCStats(prefix string) *ipcStats {
+ return &ipcStats{
+ prefix: prefix,
+ methods: make(map[string]*perMethodStats),
+ blessingsCacheStats: newBlessingsCacheStats(prefix),
+ }
+}
+
+type perMethodStats struct {
+ latency *histogram.Histogram
+}
+
+func (s *ipcStats) stop() {
+ stats.Delete(s.prefix)
+}
+
+func (s *ipcStats) record(method string, latency time.Duration) {
+ // Try first with a read lock. This will succeed in the most common
+ // case. If it fails, try again with a write lock and create the stats
+ // objects if they are still not there.
+ s.mu.RLock()
+ m, ok := s.methods[method]
+ s.mu.RUnlock()
+ if !ok {
+ m = s.newPerMethodStats(method)
+ }
+ m.latency.Add(int64(latency / time.Millisecond))
+}
+
+func (s *ipcStats) recordBlessingCache(hit bool) {
+ s.blessingsCacheStats.incr(hit)
+}
+
+// newPerMethodStats creates a new perMethodStats object if one doesn't exist
+// already. It returns the newly created object, or the already existing one.
+func (s *ipcStats) newPerMethodStats(method string) *perMethodStats {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ m, ok := s.methods[method]
+ if !ok {
+ name := naming.Join(s.prefix, "methods", method, "latency-ms")
+ s.methods[method] = &perMethodStats{
+ latency: stats.NewHistogram(name, histogram.Options{
+ NumBuckets: 25,
+ GrowthFactor: 1,
+ SmallestBucketSize: 1,
+ MinValue: 0,
+ }),
+ }
+ m = s.methods[method]
+ }
+ return m
+}
+
+// blessingsCacheStats keeps blessing cache hits and total calls received to determine
+// how often the blessingCache is being used.
+type blessingsCacheStats struct {
+ callsReceived, cacheHits *counter.Counter
+}
+
+func newBlessingsCacheStats(prefix string) *blessingsCacheStats {
+ cachePrefix := naming.Join(prefix, "security", "blessings", "cache")
+ return &blessingsCacheStats{
+ callsReceived: stats.NewCounter(naming.Join(cachePrefix, "attempts")),
+ cacheHits: stats.NewCounter(naming.Join(cachePrefix, "hits")),
+ }
+}
+
+// Incr increments the cache attempt counter and the cache hit counter if hit is true.
+func (s *blessingsCacheStats) incr(hit bool) {
+ s.callsReceived.Incr(1)
+ if hit {
+ s.cacheHits.Incr(1)
+ }
+}
diff --git a/profiles/internal/ipc/stream/benchmark/RESULTS.txt b/profiles/internal/ipc/stream/benchmark/RESULTS.txt
new file mode 100644
index 0000000..15f55f9
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/RESULTS.txt
@@ -0,0 +1,82 @@
+Date: 01/30/2015
+Platform: Intel(R) Xeon(R) CPU E5-2689 0 @ 2.60GHz, 66114888KB Memory
+
+$ v23 go test -bench=. -cpu=1 -benchtime=5s \
+ v.io/x/ref/profiles/internal/ipc/stream/benchmark
+
+Benchmark_dial_VIF 500000 14292 ns/op
+--- Histogram (unit: s)
+ Count: 500000 Min: 4 Max: 16455 Avg: 13.58
+ ------------------------------------------------------------
+ [ 4, 5) 139232 27.8% 27.8% ###
+ [ 5, 6) 257818 51.6% 79.4% #####
+ [ 6, 9) 92644 18.5% 97.9% ##
+ [ 9, 15) 5963 1.2% 99.1%
+ [ 15, 28) 3162 0.6% 99.8%
+ [ 28, 53) 171 0.0% 99.8%
+ [ 53, 101) 67 0.0% 99.8%
+ [ 101, 193) 1 0.0% 99.8%
+ [ 193, 370) 0 0.0% 99.8%
+ [ 370, 708) 0 0.0% 99.8%
+ [ 708, 1354) 57 0.0% 99.8%
+ [ 1354, 2589) 152 0.0% 99.9%
+ [ 2589, 4949) 393 0.1% 99.9%
+ [ 4949, 9457) 322 0.1% 100.0%
+ [ 9457, 18069) 18 0.0% 100.0%
+ [18069, 34520) 0 0.0% 100.0%
+ [34520, inf) 0 0.0% 100.0%
+Benchmark_dial_VIF_TLS 500 12594281 ns/op
+--- Histogram (unit: ms)
+ Count: 500 Min: 12 Max: 14 Avg: 12.31
+ ------------------------------------------------------------
+ [ 12, 13) 352 70.4% 70.4% #######
+ [ 13, 14) 141 28.2% 98.6% ###
+ [ 14, inf) 7 1.4% 100.0%
+Benchmark_dial_VC_TLS 500 16116072 ns/op
+--- Histogram (unit: ms)
+ Count: 500 Min: 15 Max: 22 Avg: 15.53
+ ------------------------------------------------------------
+ [ 15, 16) 313 62.6% 62.6% ######
+ [ 16, 17) 121 24.2% 86.8% ##
+ [ 17, 18) 60 12.0% 98.8% #
+ [ 18, 19) 3 0.6% 99.4%
+ [ 19, 20) 2 0.4% 99.8%
+ [ 20, 21) 0 0.0% 99.8%
+ [ 21, 23) 1 0.2% 100.0%
+ [ 23, inf) 0 0.0% 100.0%
+Benchmark_throughput_TCP_1Conn 1000000 9197 ns/op 5566.89 MB/s
+Benchmark_throughput_TCP_2Conns 1000000 9083 ns/op 5636.56 MB/s
+Benchmark_throughput_TCP_4Conns 1000000 9855 ns/op 5194.81 MB/s
+Benchmark_throughput_TCP_8Conns 500000 12541 ns/op 4082.43 MB/s
+Benchmark_throughput_WS_1Conn 30000 206804 ns/op 247.58 MB/s
+Benchmark_throughput_WS_2Conns 30000 211842 ns/op 241.69 MB/s
+Benchmark_throughput_WS_4Conns 30000 209994 ns/op 243.82 MB/s
+Benchmark_throughput_WS_8Conns 30000 217110 ns/op 235.83 MB/s
+Benchmark_throughput_WSH_TCP_1Conn 1000000 9322 ns/op 5491.85 MB/s
+Benchmark_throughput_WSH_TCP_2Conns 1000000 9370 ns/op 5463.77 MB/s
+Benchmark_throughput_WSH_TCP_4Conns 1000000 9466 ns/op 5408.50 MB/s
+Benchmark_throughput_WSH_TCP_8Conns 500000 12526 ns/op 4087.22 MB/s
+Benchmark_throughput_WSH_WS_1Conn 30000 207833 ns/op 246.35 MB/s
+Benchmark_throughput_WSH_WS_2Conns 30000 208567 ns/op 245.48 MB/s
+Benchmark_throughput_WSH_WS_4Conns 30000 211562 ns/op 242.01 MB/s
+Benchmark_throughput_WSH_WS_8Conns 30000 216454 ns/op 236.54 MB/s
+Benchmark_throughput_Pipe_1Conn 500000 20169 ns/op 2538.54 MB/s
+Benchmark_throughput_Pipe_2Conns 500000 19935 ns/op 2568.29 MB/s
+Benchmark_throughput_Pipe_4Conns 300000 19893 ns/op 2573.76 MB/s
+Benchmark_throughput_Pipe_8Conns 1000000 20235 ns/op 2530.22 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_1Flow 300000 28014 ns/op 1827.66 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_2Flow 300000 27495 ns/op 1862.09 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_8Flow 200000 35584 ns/op 1438.84 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_2Flow 300000 27665 ns/op 1850.66 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_8Flow 200000 34974 ns/op 1463.94 MB/s
+Benchmark_throughput_Flow_2VIF_4VC_8Flow 200000 37642 ns/op 1360.15 MB/s
+Benchmark_throughput_TLS_1Conn 20000 415149 ns/op 123.33 MB/s
+Benchmark_throughput_TLS_2Conns 20000 416008 ns/op 123.07 MB/s
+Benchmark_throughput_TLS_4Conns 20000 421083 ns/op 121.59 MB/s
+Benchmark_throughput_TLS_8Conns 20000 423079 ns/op 121.02 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_1FlowTLS 20000 466212 ns/op 109.82 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_2FlowTLS 20000 466104 ns/op 109.85 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_8FlowTLS 20000 476604 ns/op 107.43 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_2FlowTLS 20000 466818 ns/op 109.68 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_8FlowTLS 20000 477094 ns/op 107.32 MB/s
+Benchmark_throughput_Flow_2VIF_4VC_8FlowTLS 20000 476370 ns/op 107.48 MB/s
diff --git a/profiles/internal/ipc/stream/benchmark/benchmark_test.go b/profiles/internal/ipc/stream/benchmark/benchmark_test.go
new file mode 100644
index 0000000..4b62a27
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/benchmark_test.go
@@ -0,0 +1,17 @@
+package benchmark
+
+import (
+ "os"
+ "testing"
+
+ "v.io/x/ref/lib/testutil/benchmark"
+)
+
+// A single empty test to avoid:
+// testing: warning: no tests to run
+// from showing up when running benchmarks in this package via "go test"
+func TestNoOp(t *testing.T) {}
+
+func TestMain(m *testing.M) {
+ os.Exit(benchmark.RunTestMain(m))
+}
diff --git a/profiles/internal/ipc/stream/benchmark/dial_test.go b/profiles/internal/ipc/stream/benchmark/dial_test.go
new file mode 100644
index 0000000..a591009
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/dial_test.go
@@ -0,0 +1,10 @@
+package benchmark
+
+import "testing"
+
+func Benchmark_dial_VIF(b *testing.B) { benchmarkDialVIF(b, securityNone) }
+func Benchmark_dial_VIF_TLS(b *testing.B) { benchmarkDialVIF(b, securityTLS) }
+
+// Note: We don't benchmark Non-TLC VC Dial for now since it doesn't wait ack
+// from the server after sending "OpenVC".
+func Benchmark_dial_VC_TLS(b *testing.B) { benchmarkDialVC(b, securityTLS) }
diff --git a/profiles/internal/ipc/stream/benchmark/dial_vc.go b/profiles/internal/ipc/stream/benchmark/dial_vc.go
new file mode 100644
index 0000000..68cfdd5
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/dial_vc.go
@@ -0,0 +1,54 @@
+package benchmark
+
+import (
+ "testing"
+ "time"
+
+ "v.io/x/ref/lib/testutil/benchmark"
+ "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ _ "v.io/x/ref/profiles/static"
+
+ "v.io/v23/naming"
+ "v.io/v23/options"
+)
+
+// benchmarkDialVC measures VC creation time over the underlying VIF.
+func benchmarkDialVC(b *testing.B, mode options.VCSecurityLevel) {
+ stats := benchmark.AddStats(b, 16)
+
+ server := manager.InternalNew(naming.FixedRoutingID(0x5))
+ client := manager.InternalNew(naming.FixedRoutingID(0xc))
+
+ _, ep, err := server.Listen("tcp", "127.0.0.1:0", mode)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ // Warmup to create the underlying VIF.
+ _, err = client.Dial(ep, mode)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer() // Exclude setup time from measurement.
+
+ for i := 0; i < b.N; i++ {
+ b.StartTimer()
+ start := time.Now()
+
+ _, err := client.Dial(ep, mode)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ duration := time.Since(start)
+ b.StopTimer()
+
+ stats.Add(duration)
+
+ client.ShutdownEndpoint(ep)
+ }
+
+ client.Shutdown()
+ server.Shutdown()
+}
diff --git a/profiles/internal/ipc/stream/benchmark/dial_vif.go b/profiles/internal/ipc/stream/benchmark/dial_vif.go
new file mode 100644
index 0000000..6bd7d7d
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/dial_vif.go
@@ -0,0 +1,46 @@
+package benchmark
+
+import (
+ "net"
+ "testing"
+ "time"
+
+ "v.io/x/ref/lib/testutil/benchmark"
+ "v.io/x/ref/profiles/internal/ipc/stream/vif"
+
+ "v.io/v23/naming"
+ "v.io/v23/options"
+)
+
+// benchmarkDialVIF measures VIF creation time over the underlying net connection.
+func benchmarkDialVIF(b *testing.B, mode options.VCSecurityLevel) {
+ stats := benchmark.AddStats(b, 16)
+
+ b.ResetTimer() // Exclude setup time from measurement.
+
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ nc, ns := net.Pipe()
+
+ server, err := vif.InternalNewAcceptedVIF(ns, naming.FixedRoutingID(0x5), nil, mode)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.StartTimer()
+ start := time.Now()
+
+ client, err := vif.InternalNewDialedVIF(nc, naming.FixedRoutingID(0xc), nil, mode)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ duration := time.Since(start)
+ b.StopTimer()
+
+ stats.Add(duration)
+
+ client.Close()
+ server.Close()
+ }
+}
diff --git a/profiles/internal/ipc/stream/benchmark/doc.go b/profiles/internal/ipc/stream/benchmark/doc.go
new file mode 100644
index 0000000..b7f2bd4
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/doc.go
@@ -0,0 +1,7 @@
+// Package benchmark implements some benchmarks for comparing the
+// veyron/profiles/internal/ipc/stream implementation with raw TCP connections and/or
+// pipes.
+//
+// Sample usage:
+// go test veyron/profiles/internal/ipc/stream/benchmark -bench=.
+package benchmark
diff --git a/profiles/internal/ipc/stream/benchmark/throughput.go b/profiles/internal/ipc/stream/benchmark/throughput.go
new file mode 100644
index 0000000..97df8e3
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput.go
@@ -0,0 +1,69 @@
+package benchmark
+
+import (
+ "crypto/rand"
+ "io"
+ "sync"
+ "testing"
+)
+
+const (
+ // Number of bytes to read/write
+ throughputBlockSize = 50 << 10 // 50 KB
+)
+
+type throughputTester struct {
+ b *testing.B
+ writers []io.WriteCloser
+ readers []io.ReadCloser
+
+ data []byte
+ pending sync.WaitGroup
+}
+
+func (t *throughputTester) Run() {
+ t.pending.Add(len(t.writers) + len(t.readers))
+ iters := t.b.N / len(t.writers)
+ t.data = make([]byte, throughputBlockSize)
+ if n, err := rand.Read(t.data); n != len(t.data) || err != nil {
+ t.b.Fatalf("Failed to fill write buffer with data: (%d, %v)", n, err)
+ }
+ t.b.ResetTimer()
+ for _, w := range t.writers {
+ go t.writeLoop(w, iters)
+ }
+ for _, r := range t.readers {
+ go t.readLoop(r)
+ }
+ t.pending.Wait()
+}
+
+func (t *throughputTester) writeLoop(w io.WriteCloser, N int) {
+ defer t.pending.Done()
+ defer w.Close()
+ size := len(t.data)
+ t.b.SetBytes(int64(size))
+ for i := 0; i < N; i++ {
+ if n, err := w.Write(t.data); err != nil || n != size {
+ t.b.Fatalf("Write error: %v", err)
+ return
+ }
+ }
+}
+
+func (t *throughputTester) readLoop(r io.ReadCloser) {
+ defer t.pending.Done()
+ defer r.Close()
+ var buf [throughputBlockSize]byte
+ total := 0
+ for {
+ n, err := r.Read(buf[:])
+ if err != nil {
+ if err != io.EOF {
+ t.b.Errorf("Read returned (%d, %v)", n, err)
+ }
+ break
+ }
+ total += n
+ }
+}
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_flow.go b/profiles/internal/ipc/stream/benchmark/throughput_flow.go
new file mode 100644
index 0000000..e0191d0
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_flow.go
@@ -0,0 +1,108 @@
+package benchmark
+
+import (
+ "io"
+ "testing"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/manager"
+
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+const (
+ // Shorthands
+ securityNone = options.VCSecurityNone
+ securityTLS = options.VCSecurityConfidential
+)
+
+type listener struct {
+ ln stream.Listener
+ ep naming.Endpoint
+}
+
+// createListeners returns N (stream.Listener, naming.Endpoint) pairs, such
+// that calling stream.Manager.Dial to each of the endpoints will end up
+// creating a new VIF.
+func createListeners(mode options.VCSecurityLevel, m stream.Manager, N int) (servers []listener, err error) {
+ for i := 0; i < N; i++ {
+ var l listener
+ if l.ln, l.ep, err = m.Listen("tcp", "127.0.0.1:0", mode); err != nil {
+ return
+ }
+ servers = append(servers, l)
+ }
+ return
+}
+
+func benchmarkFlow(b *testing.B, mode options.VCSecurityLevel, nVIFs, nVCsPerVIF, nFlowsPerVC int) {
+ client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
+ server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
+
+ lns, err := createListeners(mode, server, nVIFs)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ nFlows := nVIFs * nVCsPerVIF * nFlowsPerVC
+ rchan := make(chan io.ReadCloser, nFlows)
+ wchan := make(chan io.WriteCloser, nFlows)
+
+ go func() {
+ defer close(wchan)
+ for i := 0; i < nVIFs; i++ {
+ ep := lns[i].ep
+ for j := 0; j < nVCsPerVIF; j++ {
+ vc, err := client.Dial(ep, mode)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ for k := 0; k < nFlowsPerVC; k++ {
+ flow, err := vc.Connect()
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ // Flows are "Accepted" by the remote
+ // end only on the first Write.
+ if _, err := flow.Write([]byte("hello")); err != nil {
+ b.Error(err)
+ return
+ }
+ wchan <- flow
+ }
+ }
+ }
+ }()
+
+ go func() {
+ defer close(rchan)
+ for i := 0; i < nVIFs; i++ {
+ ln := lns[i].ln
+ nFlowsPerVIF := nVCsPerVIF * nFlowsPerVC
+ for j := 0; j < nFlowsPerVIF; j++ {
+ flow, err := ln.Accept()
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ rchan <- flow
+ }
+ }
+ }()
+
+ var readers []io.ReadCloser
+ for r := range rchan {
+ readers = append(readers, r)
+ }
+ var writers []io.WriteCloser
+ for w := range wchan {
+ writers = append(writers, w)
+ }
+ if b.Failed() {
+ return
+ }
+ (&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_pipe.go b/profiles/internal/ipc/stream/benchmark/throughput_pipe.go
new file mode 100644
index 0000000..f1df82a
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_pipe.go
@@ -0,0 +1,28 @@
+package benchmark
+
+import (
+ "io"
+ "os"
+ "testing"
+)
+
+// benchmarkPipe runs a benchmark to test the throughput when nPipes each are
+// reading and writing.
+func benchmarkPipe(b *testing.B, nPipes int) {
+ readers := make([]io.ReadCloser, nPipes)
+ writers := make([]io.WriteCloser, nPipes)
+ var err error
+ for i := 0; i < nPipes; i++ {
+ // Use os.Pipe and NOT net.Pipe.
+ // The latter (based on io.Pipe) doesn't really do any I/O
+ // on the Write, it just manipulates pointers (the slice)
+ // and thus isn't useful when benchmarking since that
+ // implementation is excessively cache friendly.
+ readers[i], writers[i], err = os.Pipe()
+ if err != nil {
+ b.Fatalf("Failed to create pipe #%d: %v", i, err)
+ return
+ }
+ }
+ (&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_tcp.go b/profiles/internal/ipc/stream/benchmark/throughput_tcp.go
new file mode 100644
index 0000000..181c9cb
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_tcp.go
@@ -0,0 +1,58 @@
+package benchmark
+
+import (
+ "io"
+ "net"
+ "testing"
+)
+
+// benchmarkTCP sets up nConns TCP connections and measures throughput.
+func benchmarkTCP(b *testing.B, nConns int) {
+ rchan := make(chan net.Conn, nConns)
+ wchan := make(chan net.Conn, nConns)
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ b.Fatalf("net.Listen failed: %v", err)
+ return
+ }
+ defer ln.Close()
+ // One goroutine to dial nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := net.Dial("tcp", ln.Addr().String())
+ if err != nil {
+ b.Fatalf("net.Dial(%q, %q) failed: %v", "tcp", ln.Addr(), err)
+ wchan <- nil
+ return
+ }
+ wchan <- conn
+ }
+ close(wchan)
+ }()
+ // One goroutine to accept nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := ln.Accept()
+ if err != nil {
+ b.Fatalf("Accept failed: %v", err)
+ rchan <- nil
+ return
+ }
+ rchan <- conn
+ }
+ close(rchan)
+ }()
+
+ var readers []io.ReadCloser
+ var writers []io.WriteCloser
+ for r := range rchan {
+ readers = append(readers, r)
+ }
+ for w := range wchan {
+ writers = append(writers, w)
+ }
+ if b.Failed() {
+ return
+ }
+ (&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_test.go b/profiles/internal/ipc/stream/benchmark/throughput_test.go
new file mode 100644
index 0000000..f97b6e9
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_test.go
@@ -0,0 +1,51 @@
+package benchmark
+
+import "testing"
+
+func Benchmark_throughput_TCP_1Conn(b *testing.B) { benchmarkTCP(b, 1) }
+func Benchmark_throughput_TCP_2Conns(b *testing.B) { benchmarkTCP(b, 2) }
+func Benchmark_throughput_TCP_4Conns(b *testing.B) { benchmarkTCP(b, 4) }
+func Benchmark_throughput_TCP_8Conns(b *testing.B) { benchmarkTCP(b, 8) }
+
+func Benchmark_throughput_WS_1Conn(b *testing.B) { benchmarkWS(b, 1) }
+func Benchmark_throughput_WS_2Conns(b *testing.B) { benchmarkWS(b, 2) }
+func Benchmark_throughput_WS_4Conns(b *testing.B) { benchmarkWS(b, 4) }
+func Benchmark_throughput_WS_8Conns(b *testing.B) { benchmarkWS(b, 8) }
+
+func Benchmark_throughput_WSH_TCP_1Conn(b *testing.B) { benchmarkWSH(b, "tcp", 1) }
+func Benchmark_throughput_WSH_TCP_2Conns(b *testing.B) { benchmarkWSH(b, "tcp", 2) }
+func Benchmark_throughput_WSH_TCP_4Conns(b *testing.B) { benchmarkWSH(b, "tcp", 4) }
+func Benchmark_throughput_WSH_TCP_8Conns(b *testing.B) { benchmarkWSH(b, "tcp", 8) }
+
+func Benchmark_throughput_WSH_WS_1Conn(b *testing.B) { benchmarkWSH(b, "ws", 1) }
+func Benchmark_throughput_WSH_WS_2Conns(b *testing.B) { benchmarkWSH(b, "ws", 2) }
+func Benchmark_throughput_WSH_WS_4Conns(b *testing.B) { benchmarkWSH(b, "ws", 4) }
+func Benchmark_throughput_WSH_WS_8Conns(b *testing.B) { benchmarkWSH(b, "ws", 8) }
+
+func Benchmark_throughput_Pipe_1Conn(b *testing.B) { benchmarkPipe(b, 1) }
+func Benchmark_throughput_Pipe_2Conns(b *testing.B) { benchmarkPipe(b, 2) }
+func Benchmark_throughput_Pipe_4Conns(b *testing.B) { benchmarkPipe(b, 4) }
+func Benchmark_throughput_Pipe_8Conns(b *testing.B) { benchmarkPipe(b, 8) }
+
+func Benchmark_throughput_Flow_1VIF_1VC_1Flow(b *testing.B) { benchmarkFlow(b, securityNone, 1, 1, 1) }
+func Benchmark_throughput_Flow_1VIF_1VC_2Flow(b *testing.B) { benchmarkFlow(b, securityNone, 1, 1, 2) }
+func Benchmark_throughput_Flow_1VIF_1VC_8Flow(b *testing.B) { benchmarkFlow(b, securityNone, 1, 1, 8) }
+
+func Benchmark_throughput_Flow_1VIF_2VC_2Flow(b *testing.B) { benchmarkFlow(b, securityNone, 1, 2, 1) }
+func Benchmark_throughput_Flow_1VIF_2VC_8Flow(b *testing.B) { benchmarkFlow(b, securityNone, 1, 2, 4) }
+
+func Benchmark_throughput_Flow_2VIF_4VC_8Flow(b *testing.B) { benchmarkFlow(b, securityNone, 2, 2, 2) }
+
+func Benchmark_throughput_TLS_1Conn(b *testing.B) { benchmarkTLS(b, 1) }
+func Benchmark_throughput_TLS_2Conns(b *testing.B) { benchmarkTLS(b, 2) }
+func Benchmark_throughput_TLS_4Conns(b *testing.B) { benchmarkTLS(b, 4) }
+func Benchmark_throughput_TLS_8Conns(b *testing.B) { benchmarkTLS(b, 8) }
+
+func Benchmark_throughput_Flow_1VIF_1VC_1FlowTLS(b *testing.B) { benchmarkFlow(b, securityTLS, 1, 1, 1) }
+func Benchmark_throughput_Flow_1VIF_1VC_2FlowTLS(b *testing.B) { benchmarkFlow(b, securityTLS, 1, 1, 2) }
+func Benchmark_throughput_Flow_1VIF_1VC_8FlowTLS(b *testing.B) { benchmarkFlow(b, securityTLS, 1, 1, 8) }
+
+func Benchmark_throughput_Flow_1VIF_2VC_2FlowTLS(b *testing.B) { benchmarkFlow(b, securityTLS, 1, 2, 1) }
+func Benchmark_throughput_Flow_1VIF_2VC_8FlowTLS(b *testing.B) { benchmarkFlow(b, securityTLS, 1, 2, 4) }
+
+func Benchmark_throughput_Flow_2VIF_4VC_8FlowTLS(b *testing.B) { benchmarkFlow(b, securityTLS, 2, 2, 2) }
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_tls.go b/profiles/internal/ipc/stream/benchmark/throughput_tls.go
new file mode 100644
index 0000000..9750db5
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_tls.go
@@ -0,0 +1,64 @@
+package benchmark
+
+import (
+ "crypto/tls"
+ "io"
+ "net"
+ "testing"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+)
+
+func benchmarkTLS(b *testing.B, nConns int) {
+ rchan := make(chan *tls.Conn, nConns)
+ wchan := make(chan *tls.Conn, nConns)
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ b.Fatalf("net.Listen failed: %v", err)
+ return
+ }
+
+ defer ln.Close()
+ // One goroutine to dial nConns connections.
+ var tlsConfig tls.Config
+ tlsConfig.InsecureSkipVerify = true
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := tls.Dial("tcp", ln.Addr().String(), &tlsConfig)
+ if err != nil {
+ b.Fatalf("tls.Dial(%q, %q) failed: %v", "tcp", ln.Addr(), err)
+ wchan <- nil
+ return
+ }
+ wchan <- conn
+ }
+ close(wchan)
+ }()
+ // One goroutine to accept nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := ln.Accept()
+ if err != nil {
+ b.Fatalf("Accept failed: %v", err)
+ rchan <- nil
+ }
+ server := tls.Server(conn, crypto.ServerTLSConfig())
+ server.Handshake()
+ rchan <- server
+ }
+ close(rchan)
+ }()
+
+ var readers []io.ReadCloser
+ var writers []io.WriteCloser
+ for r := range rchan {
+ readers = append(readers, r)
+ }
+ for w := range wchan {
+ writers = append(writers, w)
+ }
+ if b.Failed() {
+ return
+ }
+ (&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_ws.go b/profiles/internal/ipc/stream/benchmark/throughput_ws.go
new file mode 100644
index 0000000..551a0fb
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_ws.go
@@ -0,0 +1,60 @@
+package benchmark
+
+import (
+ "io"
+ "net"
+ "testing"
+
+ "v.io/x/ref/lib/websocket"
+)
+
+// benchmarkWS sets up nConns WS connections and measures throughput.
+func benchmarkWS(b *testing.B, nConns int) {
+ rchan := make(chan net.Conn, nConns)
+ wchan := make(chan net.Conn, nConns)
+ ln, err := websocket.Listener("ws", "127.0.0.1:0")
+ if err != nil {
+ b.Fatalf("websocket.Listener failed: %v", err)
+ return
+ }
+ defer ln.Close()
+ // One goroutine to dial nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := websocket.Dial("ws", ln.Addr().String(), 0)
+ if err != nil {
+ b.Fatalf("websocket.Dial(%q, %q) failed: %v", "ws", ln.Addr(), err)
+ wchan <- nil
+ return
+ }
+ wchan <- conn
+ }
+ close(wchan)
+ }()
+ // One goroutine to accept nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := ln.Accept()
+ if err != nil {
+ b.Fatalf("Accept failed: %v", err)
+ rchan <- nil
+ return
+ }
+ rchan <- conn
+ }
+ close(rchan)
+ }()
+
+ var readers []io.ReadCloser
+ var writers []io.WriteCloser
+ for r := range rchan {
+ readers = append(readers, r)
+ }
+ for w := range wchan {
+ writers = append(writers, w)
+ }
+ if b.Failed() {
+ return
+ }
+ (&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/profiles/internal/ipc/stream/benchmark/throughput_wsh.go b/profiles/internal/ipc/stream/benchmark/throughput_wsh.go
new file mode 100644
index 0000000..a7fd748
--- /dev/null
+++ b/profiles/internal/ipc/stream/benchmark/throughput_wsh.go
@@ -0,0 +1,75 @@
+package benchmark
+
+import (
+ "io"
+ "net"
+ "testing"
+
+ "v.io/x/ref/lib/websocket"
+)
+
+// benchmarkWS sets up nConns WS connections and measures throughput.
+func benchmarkWSH(b *testing.B, protocol string, nConns int) {
+ rchan := make(chan net.Conn, nConns)
+ wchan := make(chan net.Conn, nConns)
+ ln, err := websocket.HybridListener("wsh", "127.0.0.1:0")
+ if err != nil {
+ b.Fatalf("websocket.HybridListener failed: %v", err)
+ return
+ }
+ defer ln.Close()
+ // One goroutine to dial nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ var conn net.Conn
+ var err error
+ switch protocol {
+ case "tcp":
+ conn, err = net.Dial("tcp", ln.Addr().String())
+ case "ws":
+ conn, err = websocket.Dial("ws", ln.Addr().String(), 0)
+ }
+ if err != nil {
+ b.Fatalf("Dial(%q, %q) failed: %v", protocol, ln.Addr(), err)
+ wchan <- nil
+ return
+ }
+ if protocol == "tcp" {
+ // Write a dummy byte since wsh waits for magic byte forever.
+ conn.Write([]byte("."))
+ }
+ wchan <- conn
+ }
+ close(wchan)
+ }()
+ // One goroutine to accept nConns connections.
+ go func() {
+ for i := 0; i < nConns; i++ {
+ conn, err := ln.Accept()
+ if err != nil {
+ b.Fatalf("Accept failed: %v", err)
+ rchan <- nil
+ return
+ }
+ if protocol == "tcp" {
+ // Read a dummy byte.
+ conn.Read(make([]byte, 1))
+ }
+ rchan <- conn
+ }
+ close(rchan)
+ }()
+
+ var readers []io.ReadCloser
+ var writers []io.WriteCloser
+ for r := range rchan {
+ readers = append(readers, r)
+ }
+ for w := range wchan {
+ writers = append(writers, w)
+ }
+ if b.Failed() {
+ return
+ }
+ (&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/profiles/internal/ipc/stream/crypto/box.go b/profiles/internal/ipc/stream/crypto/box.go
new file mode 100644
index 0000000..39e1864
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/box.go
@@ -0,0 +1,96 @@
+package crypto
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+
+ "golang.org/x/crypto/nacl/box"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+type boxcrypter struct {
+ conn net.Conn
+ alloc *iobuf.Allocator
+ sharedKey [32]byte
+ sortedPubkeys []byte
+ writeNonce, readNonce uint64
+}
+
+// NewBoxCrypter uses Curve25519, XSalsa20 and Poly1305 to encrypt and
+// authenticate messages (as defined in http://nacl.cr.yp.to/box.html).
+// An ephemeral Diffie-Hellman key exchange is performed per invocation
+// of NewBoxCrypter; the data sent has forward security with connection
+// granularity. One round-trip is required before any data can be sent.
+// BoxCrypter does NOT do anything to verify the identity of the peer.
+func NewBoxCrypter(conn net.Conn, pool *iobuf.Pool) (Crypter, error) {
+ pk, sk, err := box.GenerateKey(rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ var theirPK [32]byte
+ errChan := make(chan error)
+ defer close(errChan)
+ go func() { _, err := conn.Write(pk[:]); errChan <- err }()
+ go func() { _, err := io.ReadFull(conn, theirPK[:]); errChan <- err }()
+ if err := <-errChan; err != nil {
+ return nil, err
+ }
+ if err := <-errChan; err != nil {
+ return nil, err
+ }
+ ret := &boxcrypter{conn: conn, alloc: iobuf.NewAllocator(pool, 0)}
+ box.Precompute(&ret.sharedKey, &theirPK, sk)
+ // Distinct messages between the same {sender, receiver} set are required
+ // to have distinct nonces. The server with the lexicographically smaller
+ // public key will be sending messages with 0, 2, 4... and the other will
+ // be using 1, 3, 5...
+ if bytes.Compare(pk[:], theirPK[:]) < 0 {
+ ret.writeNonce, ret.readNonce = 0, 1
+ ret.sortedPubkeys = append(pk[:], theirPK[:]...)
+ } else {
+ ret.writeNonce, ret.readNonce = 1, 0
+ ret.sortedPubkeys = append(theirPK[:], pk[:]...)
+ }
+ return ret, nil
+}
+
+func (c *boxcrypter) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
+ defer src.Release()
+ var nonce [24]byte
+ binary.LittleEndian.PutUint64(nonce[:], c.writeNonce)
+ c.writeNonce += 2
+ ret := c.alloc.Alloc(uint(len(src.Contents) + box.Overhead))
+ ret.Contents = box.SealAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+ return ret, nil
+}
+
+func (c *boxcrypter) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
+ defer src.Release()
+ var nonce [24]byte
+ binary.LittleEndian.PutUint64(nonce[:], c.readNonce)
+ c.readNonce += 2
+ retLen := len(src.Contents) - box.Overhead
+ if retLen < 0 {
+ return nil, fmt.Errorf("ciphertext too short")
+ }
+ ret := c.alloc.Alloc(uint(retLen))
+ var ok bool
+ ret.Contents, ok = box.OpenAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+ if !ok {
+ return nil, fmt.Errorf("message authentication failed")
+ }
+ return ret, nil
+}
+
+func (c *boxcrypter) ChannelBinding() []byte {
+ return c.sortedPubkeys
+}
+
+func (c *boxcrypter) String() string {
+ return fmt.Sprintf("%#v", *c)
+}
diff --git a/profiles/internal/ipc/stream/crypto/box_cipher.go b/profiles/internal/ipc/stream/crypto/box_cipher.go
new file mode 100644
index 0000000..6174213
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/box_cipher.go
@@ -0,0 +1,136 @@
+package crypto
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "golang.org/x/crypto/nacl/box"
+ "golang.org/x/crypto/salsa20/salsa"
+)
+
+// cbox implements a ControlCipher using go.crypto/nacl/box.
+type cbox struct {
+ sharedKey [32]byte
+ enc cboxStream
+ dec cboxStream
+}
+
+// cboxStream implements one stream of encryption or decryption.
+type cboxStream struct {
+ counter uint64
+ nonce [24]byte
+ // buffer is a temporary used for in-place crypto.
+ buffer []byte
+}
+
+const (
+ cboxMACSize = box.Overhead
+)
+
+var (
+ errMessageTooShort = errors.New("control cipher: message is too short")
+)
+
+func (s *cboxStream) alloc(n int) []byte {
+ if len(s.buffer) < n {
+ s.buffer = make([]byte, n*2)
+ }
+ return s.buffer[:0]
+}
+
+func (s *cboxStream) currentNonce() *[24]byte {
+ return &s.nonce
+}
+
+func (s *cboxStream) advanceNonce() {
+ s.counter++
+ binary.LittleEndian.PutUint64(s.nonce[:], s.counter)
+}
+
+// setupXSalsa20 produces a sub-key and Salsa20 counter given a nonce and key.
+//
+// See, "Extending the Salsa20 nonce," by Daniel J. Bernsten, Department of
+// Computer Science, University of Illinois at Chicago, 2008.
+func setupXSalsa20(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
+ // We use XSalsa20 for encryption so first we need to generate a
+ // key and nonce with HSalsa20.
+ var hNonce [16]byte
+ copy(hNonce[:], nonce[:])
+ salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
+
+ // The final 8 bytes of the original nonce form the new nonce.
+ copy(counter[:], nonce[16:])
+}
+
+// NewControlCipher returns a ControlCipher for IPC V6.
+func NewControlCipherIPC6(peersPublicKey, privateKey *[32]byte, isServer bool) ControlCipher {
+ var c cbox
+ box.Precompute(&c.sharedKey, peersPublicKey, privateKey)
+ // The stream is full-duplex, and we want the directions to use different
+ // nonces, so we set bit (1 << 64) in the server-to-client stream, and leave
+ // it cleared in the client-to-server stream. advanceNone touches only the
+ // first 8 bytes, so this change is permanent for the duration of the
+ // stream.
+ if isServer {
+ c.enc.nonce[8] = 1
+ } else {
+ c.dec.nonce[8] = 1
+ }
+ return &c
+}
+
+// MACSize implements the ControlCipher method.
+func (c *cbox) MACSize() int {
+ return cboxMACSize
+}
+
+// Seal implements the ControlCipher method.
+func (c *cbox) Seal(data []byte) error {
+ n := len(data)
+ if n < cboxMACSize {
+ return errMessageTooShort
+ }
+ tmp := c.enc.alloc(n)
+ nonce := c.enc.currentNonce()
+ out := box.SealAfterPrecomputation(tmp, data[:n-cboxMACSize], nonce, &c.sharedKey)
+ c.enc.advanceNonce()
+ copy(data, out)
+ return nil
+}
+
+// Open implements the ControlCipher method.
+func (c *cbox) Open(data []byte) bool {
+ n := len(data)
+ if n < cboxMACSize {
+ return false
+ }
+ tmp := c.dec.alloc(n - cboxMACSize)
+ nonce := c.dec.currentNonce()
+ out, ok := box.OpenAfterPrecomputation(tmp, data, nonce, &c.sharedKey)
+ if !ok {
+ return false
+ }
+ c.dec.advanceNonce()
+ copy(data, out)
+ return true
+}
+
+// Encrypt implements the ControlCipher method.
+func (c *cbox) Encrypt(data []byte) {
+ var subKey [32]byte
+ var counter [16]byte
+ nonce := c.enc.currentNonce()
+ setupXSalsa20(&subKey, &counter, nonce, &c.sharedKey)
+ c.enc.advanceNonce()
+ salsa.XORKeyStream(data, data, &counter, &subKey)
+}
+
+// Decrypt implements the ControlCipher method.
+func (c *cbox) Decrypt(data []byte) {
+ var subKey [32]byte
+ var counter [16]byte
+ nonce := c.dec.currentNonce()
+ setupXSalsa20(&subKey, &counter, nonce, &c.sharedKey)
+ c.dec.advanceNonce()
+ salsa.XORKeyStream(data, data, &counter, &subKey)
+}
diff --git a/profiles/internal/ipc/stream/crypto/box_cipher_test.go b/profiles/internal/ipc/stream/crypto/box_cipher_test.go
new file mode 100644
index 0000000..64bb592
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/box_cipher_test.go
@@ -0,0 +1,129 @@
+package crypto_test
+
+import (
+ "bytes"
+ "crypto/rand"
+ "testing"
+
+ "golang.org/x/crypto/nacl/box"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+)
+
+// Add space for a MAC.
+func newMessage(s string) []byte {
+ b := make([]byte, len(s)+box.Overhead)
+ copy(b, []byte(s))
+ return b
+}
+
+func TestOpenSeal(t *testing.T) {
+ pub1, pvt1, err := box.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("can't generate key")
+ }
+ pub2, pvt2, err := box.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("can't generate key")
+ }
+ c1 := crypto.NewControlCipherIPC6(pub2, pvt1, true)
+ c2 := crypto.NewControlCipherIPC6(pub1, pvt2, false)
+
+ msg1 := newMessage("hello")
+ if err := c1.Seal(msg1); err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ msg2 := newMessage("world")
+ if err := c1.Seal(msg2); err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ msg3 := newMessage("hello")
+ if err := c1.Seal(msg3); err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if bytes.Compare(msg1, msg3) == 0 {
+ t.Errorf("message should differ: %q, %q", msg1, msg3)
+ }
+
+ // Check that the client does not encrypt the same.
+ msg4 := newMessage("hello")
+ if err := c2.Seal(msg4); err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if bytes.Compare(msg4, msg1) == 0 {
+ t.Errorf("messages should differ %q vs. %q", msg4, msg1)
+ }
+
+ // Corrupted message should not decrypt.
+ msg1[0] ^= 1
+ if ok := c2.Open(msg1); ok {
+ t.Errorf("expected error")
+ }
+
+ // Fix the message and try again.
+ msg1[0] ^= 1
+ if ok := c2.Open(msg1); !ok {
+ t.Errorf("Open failed")
+ }
+ if bytes.Compare(msg1[:5], []byte("hello")) != 0 {
+ t.Errorf("got %q, expected %q", msg1[:5], "hello")
+ }
+
+ // msg3 should not decrypt.
+ if ok := c2.Open(msg3); ok {
+ t.Errorf("expected error")
+ }
+
+ // Resume.
+ if ok := c2.Open(msg2); !ok {
+ t.Errorf("Open failed")
+ }
+ if bytes.Compare(msg2[:5], []byte("world")) != 0 {
+ t.Errorf("got %q, expected %q", msg2[:5], "world")
+ }
+ if ok := c2.Open(msg3); !ok {
+ t.Errorf("Open failed")
+ }
+ if bytes.Compare(msg3[:5], []byte("hello")) != 0 {
+ t.Errorf("got %q, expected %q", msg3[:5], "hello")
+ }
+}
+
+func TestXORKeyStream(t *testing.T) {
+ pub1, pvt1, err := box.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("can't generate key")
+ }
+ pub2, pvt2, err := box.GenerateKey(rand.Reader)
+ if err != nil {
+ t.Fatalf("can't generate key")
+ }
+ c1 := crypto.NewControlCipherIPC6(pub2, pvt1, true)
+ c2 := crypto.NewControlCipherIPC6(pub1, pvt2, false)
+
+ msg1 := []byte("hello")
+ msg2 := []byte("world")
+ msg3 := []byte("hello")
+ c1.Encrypt(msg1)
+ c1.Encrypt(msg2)
+ c1.Encrypt(msg3)
+ if bytes.Compare(msg1, msg3) == 0 {
+ t.Errorf("messages should differ: %q, %q", msg1, msg3)
+ }
+
+ c2.Decrypt(msg1)
+ c2.Decrypt(msg2)
+ c2.Decrypt(msg3)
+ s1 := string(msg1)
+ s2 := string(msg2)
+ s3 := string(msg3)
+ if s1 != "hello" {
+ t.Errorf("got %q, expected 'hello'", s1)
+ }
+ if s2 != "world" {
+ t.Errorf("got %q, expected 'world'", s2)
+ }
+ if s3 != "hello" {
+ t.Errorf("got %q, expected 'hello'", s3)
+ }
+}
diff --git a/profiles/internal/ipc/stream/crypto/control_cipher.go b/profiles/internal/ipc/stream/crypto/control_cipher.go
new file mode 100644
index 0000000..8258bcf
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/control_cipher.go
@@ -0,0 +1,23 @@
+package crypto
+
+// ControlCipher provides the ciphers and MAC for control channel encryption.
+// Encryption and decryption are performed in place.
+type ControlCipher interface {
+ // MACSize returns the number of bytes in the MAC.
+ MACSize() int
+
+ // Seal replaces the message with an authenticated and encrypted version.
+ // The trailing MACSize bytes of the data are used for the MAC; they are
+ // discarded and overwritten.
+ Seal(data []byte) error
+
+ // Open authenticates and decrypts a box produced by Seal. The trailing
+ // MACSize bytes are not changed. Returns true on success.
+ Open(data []byte) bool
+
+ // Encrypt encrypts the data in place. No MAC is added.
+ Encrypt(data []byte)
+
+ // Decrypt decrypts the data in place. No MAC is verified.
+ Decrypt(data []byte)
+}
diff --git a/profiles/internal/ipc/stream/crypto/crypto.go b/profiles/internal/ipc/stream/crypto/crypto.go
new file mode 100644
index 0000000..14422c1
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/crypto.go
@@ -0,0 +1,35 @@
+// Package crypto implements encryption and decryption interfaces intended for
+// securing communication over VCs.
+package crypto
+
+import "v.io/x/ref/profiles/internal/lib/iobuf"
+
+type Encrypter interface {
+ // Encrypt encrypts the provided plaintext data and returns the
+ // corresponding ciphertext slice (or nil if an error is returned).
+ //
+ // It always calls Release on plaintext and thus plaintext should not
+ // be used after calling Encrypt.
+ Encrypt(plaintext *iobuf.Slice) (ciphertext *iobuf.Slice, err error)
+}
+
+type Decrypter interface {
+ // Decrypt decrypts the provided ciphertext slice and returns the
+ // corresponding plaintext (or nil if an error is returned).
+ //
+ // It always calls Release on ciphertext and thus ciphertext should not
+ // be used after calling Decrypt.
+ Decrypt(ciphertext *iobuf.Slice) (plaintext *iobuf.Slice, err error)
+}
+
+type Crypter interface {
+ Encrypter
+ Decrypter
+ // ChannelBinding Returns a byte slice that is unique for the the
+ // particular crypter (and the parties between which it is operating).
+ // Having both parties assert out of the band that they are indeed
+ // participating in a connection with that channel binding value is
+ // sufficient to authenticate the data received through the crypter.
+ ChannelBinding() []byte
+ String() string
+}
diff --git a/profiles/internal/ipc/stream/crypto/crypto_test.go b/profiles/internal/ipc/stream/crypto/crypto_test.go
new file mode 100644
index 0000000..790ad9b
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/crypto_test.go
@@ -0,0 +1,258 @@
+package crypto
+
+import (
+ "bytes"
+ "crypto/rand"
+ "net"
+ "testing"
+ "testing/quick"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+func quickTest(t *testing.T, e Encrypter, d Decrypter) {
+ f := func(plaintext []byte) bool {
+ plainslice := iobuf.NewSlice(plaintext)
+ cipherslice, err := e.Encrypt(plainslice)
+ if err != nil {
+ t.Error(err)
+ return false
+ }
+ plainslice, err = d.Decrypt(cipherslice)
+ if err != nil {
+ t.Error(err)
+ return false
+ }
+ defer plainslice.Release()
+ return bytes.Equal(plainslice.Contents, plaintext)
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestNull(t *testing.T) {
+ crypter := NewNullCrypter()
+ quickTest(t, crypter, crypter)
+ crypter.String() // Only to test that String does not crash.
+}
+
+func testSimple(t *testing.T, c1, c2 Crypter) {
+ // Execute String just to check that it does not crash.
+ c1.String()
+ c2.String()
+ if t.Failed() {
+ return
+ }
+ quickTest(t, c1, c2)
+ quickTest(t, c2, c1)
+
+ // Log the byte overhead of encryption, just so that test output has a
+ // record.
+ var overhead [10]int
+ for i := 0; i < len(overhead); i++ {
+ size := 1 << uint(i)
+ slice, err := c1.Encrypt(iobuf.NewSlice(make([]byte, size)))
+ overhead[i] = slice.Size() - size
+ slice.Release()
+ if err != nil {
+ t.Fatalf("%d: %v", i, err)
+ }
+ }
+ t.Logf("Byte overhead of encryption: %v", overhead)
+}
+
+func TestTLS(t *testing.T) {
+ server, client := net.Pipe()
+ c1, c2 := tlsCrypters(t, server, client)
+ testSimple(t, c1, c2)
+}
+
+func TestBox(t *testing.T) {
+ server, client := net.Pipe()
+ c1, c2 := boxCrypters(t, server, client)
+ testSimple(t, c1, c2)
+}
+
+// testChannelBinding attempts to ensure that:
+// (a) ChannelBinding returns the same value for both ends of a Crypter
+// (b) ChannelBindings are unique
+// For (b), we simply test many times and check that no two instances have the same ChannelBinding value.
+// Yes, this test isn't exhaustive. If you have ideas, please share.
+func testChannelBinding(t *testing.T, factory func(testing.TB, net.Conn, net.Conn) (Crypter, Crypter)) {
+ values := make([][]byte, 100)
+ for i := 0; i < len(values); i++ {
+ conn1, conn2 := net.Pipe()
+ c1, c2 := factory(t, conn1, conn2)
+ if !bytes.Equal(c1.ChannelBinding(), c2.ChannelBinding()) {
+ t.Fatalf("Two ends of the crypter ended up with different channel bindings (iteration #%d)", i)
+ }
+ values[i] = c1.ChannelBinding()
+ }
+ for i := 0; i < len(values); i++ {
+ for j := i + 1; j < len(values); j++ {
+ if bytes.Equal(values[i], values[j]) {
+ t.Fatalf("Same ChannelBinding seen on multiple channels (%d and %d)", i, j)
+ }
+ }
+ }
+}
+
+func TestChannelBindingTLS(t *testing.T) { testChannelBinding(t, tlsCrypters) }
+func TestChannelBindingBox(t *testing.T) { testChannelBinding(t, boxCrypters) }
+
+func TestTLSNil(t *testing.T) {
+ conn1, conn2 := net.Pipe()
+ c1, c2 := tlsCrypters(t, conn1, conn2)
+ if t.Failed() {
+ return
+ }
+ cipher, err := c1.Encrypt(iobuf.NewSlice(nil))
+ if err != nil {
+ t.Fatal(err)
+ }
+ plain, err := c2.Decrypt(cipher)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if plain.Size() != 0 {
+ t.Fatalf("Decryption produced non-empty data (%d)", plain.Size())
+ }
+}
+
+func TestTLSFragmentedPlaintext(t *testing.T) {
+ // Form RFC 5246, Section 6.2.1, the maximum length of a TLS record is
+ // 16K (it is represented by a uint16).
+ // http://tools.ietf.org/html/rfc5246#section-6.2.1
+ const dataLen = 16384 + 1
+ conn1, conn2 := net.Pipe()
+ enc, dec := tlsCrypters(t, conn1, conn2)
+ cipher, err := enc.Encrypt(iobuf.NewSlice(make([]byte, dataLen)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ plain, err := dec.Decrypt(cipher)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(plain.Contents, make([]byte, dataLen)) {
+ t.Errorf("Got %d bytes, want %d bytes of zeroes", plain.Size(), dataLen)
+ }
+}
+
+type factory func(t testing.TB, server, client net.Conn) (Crypter, Crypter)
+
+func tlsCrypters(t testing.TB, serverConn, clientConn net.Conn) (Crypter, Crypter) {
+ crypters := make(chan Crypter)
+ go func() {
+ server, err := NewTLSServer(serverConn, serverConn.LocalAddr(), serverConn.RemoteAddr(), iobuf.NewPool(0))
+ if err != nil {
+ t.Fatal(err)
+ }
+ crypters <- server
+ }()
+
+ go func() {
+ client, err := NewTLSClient(clientConn, clientConn.LocalAddr(), clientConn.RemoteAddr(), TLSClientSessionCache{}, iobuf.NewPool(0))
+ if err != nil {
+ t.Fatal(err)
+ }
+ crypters <- client
+ }()
+ c1 := <-crypters
+ c2 := <-crypters
+ return c1, c2
+}
+
+func boxCrypters(t testing.TB, serverConn, clientConn net.Conn) (Crypter, Crypter) {
+ crypters := make(chan Crypter)
+ for _, conn := range []net.Conn{serverConn, clientConn} {
+ go func(conn net.Conn) {
+ crypter, err := NewBoxCrypter(conn, iobuf.NewPool(0))
+ if err != nil {
+ t.Fatal(err)
+ }
+ crypters <- crypter
+ }(conn)
+ }
+ return <-crypters, <-crypters
+}
+
+func benchmarkEncrypt(b *testing.B, crypters factory, size int) {
+ plaintext := make([]byte, size)
+ if _, err := rand.Read(plaintext); err != nil {
+ b.Fatal(err)
+ }
+ conn1, conn2 := net.Pipe()
+ defer conn1.Close()
+ defer conn2.Close()
+ e, _ := crypters(b, conn1, conn2)
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ cipher, err := e.Encrypt(iobuf.NewSlice(plaintext))
+ if err != nil {
+ b.Fatal(err)
+ }
+ cipher.Release()
+ }
+}
+
+func BenchmarkTLSEncrypt_1B(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 1) }
+func BenchmarkTLSEncrypt_1K(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 1<<10) }
+func BenchmarkTLSEncrypt_10K(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 10<<10) }
+func BenchmarkTLSEncrypt_1M(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 1<<20) }
+func BenchmarkTLSEncrypt_5M(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 5<<20) }
+
+func BenchmarkBoxEncrypt_1B(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 1) }
+func BenchmarkBoxEncrypt_1K(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 1<<10) }
+func BenchmarkBoxEncrypt_10K(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 10<<10) }
+func BenchmarkBoxEncrypt_1M(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 1<<20) }
+func BenchmarkBoxEncrypt_5M(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 5<<20) }
+
+func benchmarkRoundTrip(b *testing.B, crypters factory, size int) {
+ plaintext := make([]byte, size)
+ if _, err := rand.Read(plaintext); err != nil {
+ b.Fatal(err)
+ }
+ conn1, conn2 := net.Pipe()
+ defer conn1.Close()
+ defer conn2.Close()
+ e, d := crypters(b, conn1, conn2)
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ cipherslice, err := e.Encrypt(iobuf.NewSlice(plaintext))
+ if err != nil {
+ b.Fatal(err)
+ }
+ plainslice, err := d.Decrypt(cipherslice)
+ if err != nil {
+ b.Fatal(err)
+ }
+ plainslice.Release()
+ }
+}
+func BenchmarkTLSRoundTrip_1B(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 1) }
+func BenchmarkTLSRoundTrip_1K(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 1<<10) }
+func BenchmarkTLSRoundTrip_10K(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 10<<10) }
+func BenchmarkTLSRoundTrip_1M(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 1<<20) }
+func BenchmarkTLSRoundTrip_5M(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 5<<20) }
+
+func BenchmarkBoxRoundTrip_1B(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 1) }
+func BenchmarkBoxRoundTrip_1K(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 1<<10) }
+func BenchmarkBoxRoundTrip_10K(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 10<<10) }
+func BenchmarkBoxRoundTrip_1M(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 1<<20) }
+func BenchmarkBoxRoundTrip_5M(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 5<<20) }
+
+func benchmarkSetup(b *testing.B, crypters factory) {
+ for i := 0; i < b.N; i++ {
+ conn1, conn2 := net.Pipe()
+ crypters(b, conn1, conn2)
+ conn1.Close()
+ conn2.Close()
+ }
+}
+
+func BenchmarkTLSSetup(b *testing.B) { benchmarkSetup(b, tlsCrypters) }
+func BenchmarkBoxSetup(b *testing.B) { benchmarkSetup(b, boxCrypters) }
diff --git a/profiles/internal/ipc/stream/crypto/null.go b/profiles/internal/ipc/stream/crypto/null.go
new file mode 100644
index 0000000..4394d55
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/null.go
@@ -0,0 +1,13 @@
+package crypto
+
+import "v.io/x/ref/profiles/internal/lib/iobuf"
+
+// NewNullCrypter returns a Crypter that does no encryption/decryption.
+func NewNullCrypter() Crypter { return null{} }
+
+type null struct{}
+
+func (null) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) { return src, nil }
+func (null) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) { return src, nil }
+func (null) String() string { return "Null" }
+func (null) ChannelBinding() []byte { return nil }
diff --git a/profiles/internal/ipc/stream/crypto/null_cipher.go b/profiles/internal/ipc/stream/crypto/null_cipher.go
new file mode 100644
index 0000000..9a39cc7
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/null_cipher.go
@@ -0,0 +1,23 @@
+package crypto
+
+// NullControlCipher is a cipher that does nothing.
+type NullControlCipher struct{}
+
+func (NullControlCipher) MACSize() int { return 0 }
+func (NullControlCipher) Seal(data []byte) error { return nil }
+func (NullControlCipher) Open(data []byte) bool { return true }
+func (NullControlCipher) Encrypt(data []byte) {}
+func (NullControlCipher) Decrypt(data []byte) {}
+
+type disabledControlCipher struct {
+ NullControlCipher
+ macSize int
+}
+
+func (c *disabledControlCipher) MACSize() int { return c.macSize }
+
+// NewDisabledControlCipher returns a cipher that has the correct MACSize, but
+// encryption and decryption are disabled.
+func NewDisabledControlCipher(c ControlCipher) ControlCipher {
+ return &disabledControlCipher{macSize: c.MACSize()}
+}
diff --git a/profiles/internal/ipc/stream/crypto/tls.go b/profiles/internal/ipc/stream/crypto/tls.go
new file mode 100644
index 0000000..03a3925
--- /dev/null
+++ b/profiles/internal/ipc/stream/crypto/tls.go
@@ -0,0 +1,255 @@
+// +build go1.4
+
+package crypto
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+var errDeadlinesNotSupported = errors.New("deadlines not supported")
+
+// TLSClientSessionCacheOpt specifies the ClientSessionCache used to resume TLS sessions.
+// It adapts tls.ClientSessionCache to the veyron/profiles/internal/ipc/stream.VCOpt interface.
+type TLSClientSessionCache struct{ tls.ClientSessionCache }
+
+func (TLSClientSessionCache) IPCStreamVCOpt() {}
+
+// NewTLSClientSessionCache creates a new session cache.
+// TODO(ashankar): Remove this once go1.4 is released and tlsfork can be release, at that
+// point use crypto/tls.NewLRUClientSessionCache directly.
+func NewTLSClientSessionCache() TLSClientSessionCache {
+ return TLSClientSessionCache{tls.NewLRUClientSessionCache(-1)}
+}
+
+// NewTLSClient returns a Crypter implementation that uses TLS, assuming
+// handshaker was initiated by a client.
+func NewTLSClient(handshaker io.ReadWriteCloser, local, remote net.Addr, sessionCache TLSClientSessionCache, pool *iobuf.Pool) (Crypter, error) {
+ var config tls.Config
+ // TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+ config.SessionTicketsDisabled = true
+ config.InsecureSkipVerify = true
+ config.ClientSessionCache = sessionCache.ClientSessionCache
+ return newTLSCrypter(handshaker, local, remote, &config, pool, false)
+}
+
+// NewTLSServer returns a Crypter implementation that uses TLS, assuming
+// handshaker was accepted by a server.
+func NewTLSServer(handshaker io.ReadWriteCloser, local, remote net.Addr, pool *iobuf.Pool) (Crypter, error) {
+ return newTLSCrypter(handshaker, local, remote, ServerTLSConfig(), pool, true)
+}
+
+type fakeConn struct {
+ handshakeConn io.ReadWriteCloser
+ out bytes.Buffer
+ in []byte
+ laddr, raddr net.Addr
+}
+
+func (c *fakeConn) Read(b []byte) (n int, err error) {
+ if c.handshakeConn != nil {
+ return c.handshakeConn.Read(b)
+ }
+ if len(c.in) == 0 {
+ return 0, tempError{}
+ }
+ n = copy(b, c.in)
+ c.in = c.in[n:]
+ return
+}
+
+func (c *fakeConn) Write(b []byte) (int, error) {
+ if c.handshakeConn != nil {
+ return c.handshakeConn.Write(b)
+ }
+ return c.out.Write(b)
+}
+
+func (*fakeConn) Close() error { return nil }
+func (c *fakeConn) LocalAddr() net.Addr { return c.laddr }
+func (c *fakeConn) RemoteAddr() net.Addr { return c.raddr }
+func (*fakeConn) SetDeadline(t time.Time) error { return errDeadlinesNotSupported }
+func (*fakeConn) SetReadDeadline(t time.Time) error { return errDeadlinesNotSupported }
+func (*fakeConn) SetWriteDeadline(t time.Time) error { return errDeadlinesNotSupported }
+
+// tempError implements net.Error and returns true for Temporary.
+// Providing this error in fakeConn.Read allows tls.Conn.Read to return with an
+// error without changing underlying state.
+type tempError struct{}
+
+func (tempError) Error() string { return "end of encrypted slice" }
+func (tempError) Timeout() bool { return false }
+func (tempError) Temporary() bool { return true }
+
+// tlsCrypter implements the Crypter interface using crypto/tls.
+//
+// crypto/tls provides a net.Conn, while the Crypter interface operates on
+// iobuf.Slice objects. In order to adapt to the Crypter interface, the
+// strategy is as follows:
+//
+// - netTLSCrypter wraps a net.Conn with an alternative implementation
+// (fakeConn) for the TLS handshake protocol.
+// - Once the TLS handshake is complete, fakeConn switches to a mode where all
+// Write calls add to a bytes.Buffer and all Read calls read from a
+// bytes.Buffer.
+// - Encrypt uses tls.Conn.Write, which in-turn invokes fakeConn.Write and then
+// it extracts the contents of the underlying bytes.Buffer.
+// - Decrypt adds to the read buffer and then invokes tls.Conn.Read, which
+// in-turn invokes fakeConn.Read, which reads from that buffer.
+type tlsCrypter struct {
+ mu sync.Mutex
+ alloc *iobuf.Allocator
+ tls *tls.Conn
+ fc *fakeConn
+}
+
+func newTLSCrypter(handshaker io.ReadWriteCloser, local, remote net.Addr, config *tls.Config, pool *iobuf.Pool, server bool) (Crypter, error) {
+ fc := &fakeConn{handshakeConn: handshaker, laddr: local, raddr: remote}
+ var t *tls.Conn
+ if server {
+ t = tls.Server(fc, config)
+ } else {
+ // The TLS handshake protocol ends with a message received by the client.
+ // handshaker should be closed only after the handshake protocol completes.
+ // So, the client closes the handshaker.
+ defer handshaker.Close()
+ t = tls.Client(fc, config)
+ }
+ if err := t.Handshake(); err != nil {
+ return nil, err
+ }
+ // Must have used Diffie-Hellman to exchange keys (so that the key
+ // selection is independent of any TLS certificates used).
+ // This helps ensure that identities exchanged during the veyron
+ // authentication protocol
+ // (http://goto.google.com/veyron:authentication) cannot be stolen and
+ // are bound to the session key established during the TLS handshake.
+ switch cs := t.ConnectionState().CipherSuite; cs {
+ case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+ case tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+ case tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ case tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+ case tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+ case tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ case tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ default:
+ t.Close()
+ return nil, fmt.Errorf("CipherSuite 0x%04x is not recognized. Must use one that uses Diffie-Hellman as the key exchange algorithm", cs)
+ }
+ fc.handshakeConn = nil
+ return &tlsCrypter{
+ alloc: iobuf.NewAllocator(pool, 0),
+ tls: t,
+ fc: fc,
+ }, nil
+}
+
+func (c *tlsCrypter) Encrypt(plaintext *iobuf.Slice) (*iobuf.Slice, error) {
+ defer plaintext.Release()
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ defer c.fc.out.Reset()
+ if _, err := c.tls.Write(plaintext.Contents); err != nil {
+ return nil, err
+ }
+ return c.alloc.Copy(c.fc.out.Bytes()), nil
+}
+
+func (c *tlsCrypter) Decrypt(ciphertext *iobuf.Slice) (*iobuf.Slice, error) {
+ defer ciphertext.Release()
+ if ciphertext.Size() == 0 {
+ return ciphertext, nil
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.fc.in = ciphertext.Contents
+ // Given the cipher suites used, len(plaintext) < len(ciphertext)
+ // (ciphertext includes TLS record headers). Allocating space for
+ // plaintext based on ciphertext.Size should suffice.
+ plaintext := c.alloc.Alloc(uint(ciphertext.Size()))
+ out := plaintext.Contents
+ for {
+ n, err := c.tls.Read(out)
+ if err != nil {
+ if _, exit := err.(tempError); exit {
+ break
+ }
+ plaintext.Release()
+ return nil, err
+ }
+ out = out[n:]
+ }
+ plaintext.Contents = plaintext.Contents[:plaintext.Size()-len(out)]
+ return plaintext, nil
+}
+
+func (c *tlsCrypter) String() string {
+ state := c.tls.ConnectionState()
+ return fmt.Sprintf("TLS CipherSuite:0x%04x Resumed:%v", state.CipherSuite, state.DidResume)
+}
+
+// ServerTLSConfig returns the tls.Config used by NewTLSServer.
+func ServerTLSConfig() *tls.Config {
+ c, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey))
+ if err != nil {
+ panic(err)
+ }
+ return &tls.Config{
+ // TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+ SessionTicketsDisabled: true,
+ Certificates: []tls.Certificate{c},
+ InsecureSkipVerify: true,
+ // RC4_128_SHA is 4-5X faster compared to the other cipher suites.
+ // There are concerns with its security (see http://en.wikipedia.org/wiki/RC4 and
+ // https://www.usenix.org/conference/usenixsecurity13/technical-sessions/paper/alFardan),
+ // so this decision will be revisted.
+ // TODO(ashankar,ataly): Figure out what cipher to use and how to
+ // have a speedy Go implementation of it.
+ CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA},
+ }
+}
+
+func (c *tlsCrypter) ChannelBinding() []byte {
+ return c.tls.ConnectionState().TLSUnique
+}
+
+// TODO(ashankar): Get rid of TLS certificates completely after implementing an
+// anonymous key-exchange mechanism. See F.1.1.1 in RFC 5246.
+//
+// PEM-encoded certificates and keys used in the tests.
+// One way to generate them is:
+// go run $GOROOT/src/pkg/crypto/tls/generate_cert.go --host=localhost --duration=87600h --ecdsa-curve=P256
+// (This generates a self-signed certificate valid for 10 years)
+// (The --ecdsa-curve flag has not yet been submitted back to the Go repository)
+// which will create cert.pem and key.pem files.
+const (
+ serverCert = `
+-----BEGIN CERTIFICATE-----
+MIIBbTCCAROgAwIBAgIQMD+Kzawjvhij1B/BmvHxLDAKBggqhkjOPQQDAjASMRAw
+DgYDVQQKEwdBY21lIENvMB4XDTE0MDcxODIzMTYxMloXDTI0MDcxNTIzMTYxMlow
+EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABLiz
+Ajsly1DS8NJF2KE195V83TgidfgGEB7nudscdKWH3+5uQHgCc+2BV/7AGGj3yePR
+ZZLzYD95goJ/a7eet/2jSzBJMA4GA1UdDwEB/wQEAwIAoDATBgNVHSUEDDAKBggr
+BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
+hkjOPQQDAgNIADBFAiAb4tBxggEpnKdxv66TBVFxAUn3EBWX25XlL1G2GF8RkAIh
+AOAwys3mvzM4Td/2kV9QNyQPZ9kLLQr9A9ryB0H3N9Yz
+-----END CERTIFICATE-----
+`
+ serverKey = `
+-----BEGIN ECDSA PRIVATE KEY-----
+MHcCAQEEIPLfwg+SVC2/xUcKq0bI9y2+SDEEdCeGuxuBz22BhAw1oAoGCCqGSM49
+AwEHoUQDQgAEuLMCOyXLUNLw0kXYoTX3lXzdOCJ1+AYQHue52xx0pYff7m5AeAJz
+7YFX/sAYaPfJ49FlkvNgP3mCgn9rt563/Q==
+-----END ECDSA PRIVATE KEY-----
+`
+)
diff --git a/profiles/internal/ipc/stream/doc.go b/profiles/internal/ipc/stream/doc.go
new file mode 100644
index 0000000..65f070b
--- /dev/null
+++ b/profiles/internal/ipc/stream/doc.go
@@ -0,0 +1,50 @@
+// Package stream implements authenticated byte streams to veyron endpoints.
+//
+// It is split into multiple sub-packages in an attempt to keep the code
+// healthier by limiting the dependencies between objects. Most users should not
+// need to use this package.
+//
+// Package contents and dependencies are as follows:
+//
+// * manager provides a factory for Manager objects.
+// It depends on the vif and proxy packages.
+// * vif implements a VIF type that wraps over a net.Conn and enables the
+// creation of VC objects over the underlying network connection.
+// It depends on the id, message and vc packages.
+// * message implements serialization and deserialization for messages
+// exchanged over a VIF.
+// It depends on the id package.
+// * vc provides types implementing VC and Flow.
+// It depends on the id and crypto packages.
+// * crypto provides types to secure communication over VCs.
+// It does not depend on any other package.
+// * id defines identifier types used by other packages.
+// It does not depend on any other package.
+package stream
+
+// A dump of some ideas/thoughts/TODOs arising from the first iteration of this
+// package. Big ticket items like proxying and TLS/authentication are obvious
+// and won't be missed. I just wanted to put some smaller items on record (in
+// no particular order).
+//
+// (1) Garbage collection of VIFs: Create a policy to close the underlying
+// network connection (and shutdown the VIF) when it is "inactive" (i.e., no VCs
+// have existed on it for a while).
+// (2) On the first write of a new flow, counters are stolen from a shared pool
+// (to avoid a round trip of a "create flow" message followed by a "here are
+// your counters" message). Currently, this happens on either end of the flow
+// (on both the remote and local process). This doesn't need to be the case,
+// the end that received the first message of the flow doesn't need to steal
+// on its first write.
+// (3) Should flow control counters be part of the Data message?
+// If so, maybe the flowQ should have a lower priority than that of Data
+// messages? At a higher level I'm thinking of ways to reduce the number
+// of messages sent per flow. Currently, just creating a flow results in
+// two messages - One where the initiator sends counters to the receiver
+// and one where the receiver does the same. The first write does not
+// block on receiving the counters because of the "steal from shared pool on
+// first write" scheme, but still, sounds like too much traffic.
+// (4) As an example of the above, consider the following code:
+// vc.Connect().Close()
+// This will result in 3 messages. But ideally it should involve 0.
+// (5) Encryption of control messages to protect from network sniffers.
diff --git a/profiles/internal/ipc/stream/id/id.go b/profiles/internal/ipc/stream/id/id.go
new file mode 100644
index 0000000..fbd5611
--- /dev/null
+++ b/profiles/internal/ipc/stream/id/id.go
@@ -0,0 +1,8 @@
+// Package id provides types for identifying VCs and Flows over them.
+package id
+
+// VC identifies a VC over a VIF.
+type VC uint32
+
+// Flow identifies a Flow over a VC.
+type Flow uint32
diff --git a/profiles/internal/ipc/stream/manager/listener.go b/profiles/internal/ipc/stream/manager/listener.go
new file mode 100644
index 0000000..bf1e929
--- /dev/null
+++ b/profiles/internal/ipc/stream/manager/listener.go
@@ -0,0 +1,272 @@
+package manager
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+
+ "v.io/x/ref/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/ipc/stream/vif"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ "v.io/x/ref/profiles/proxy"
+
+ "v.io/v23/naming"
+ "v.io/v23/verror"
+ "v.io/v23/vom"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+var errListenerIsClosed = errors.New("Listener has been Closed")
+
+// listener extends stream.Listener with a DebugString method.
+type listener interface {
+ stream.Listener
+ DebugString() string
+}
+
+// netListener implements the listener interface by accepting flows (and VCs)
+// over network connections accepted on an underlying net.Listener.
+type netListener struct {
+ q *upcqueue.T
+ netLn net.Listener
+ manager *manager
+ vifs *vif.Set
+
+ netLoop sync.WaitGroup
+ vifLoops sync.WaitGroup
+}
+
+var _ stream.Listener = (*netListener)(nil)
+
+// proxyListener implements the listener interface by connecting to a remote
+// proxy (typically used to "listen" across network domains).
+type proxyListener struct {
+ q *upcqueue.T
+ proxyEP naming.Endpoint
+ manager *manager
+ opts []stream.ListenerOpt
+}
+
+var _ stream.Listener = (*proxyListener)(nil)
+
+func newNetListener(m *manager, netLn net.Listener, opts []stream.ListenerOpt) listener {
+ ln := &netListener{
+ q: upcqueue.New(),
+ manager: m,
+ netLn: netLn,
+ vifs: vif.NewSet(),
+ }
+ ln.netLoop.Add(1)
+ go ln.netAcceptLoop(opts)
+ return ln
+}
+
+func (ln *netListener) netAcceptLoop(listenerOpts []stream.ListenerOpt) {
+ defer ln.netLoop.Done()
+ for {
+ conn, err := ln.netLn.Accept()
+ if err != nil {
+ vlog.VI(1).Infof("Exiting netAcceptLoop: net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+ return
+ }
+ vlog.VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
+ vf, err := vif.InternalNewAcceptedVIF(conn, ln.manager.rid, nil, listenerOpts...)
+ if err != nil {
+ vlog.Infof("Shutting down conn from %s (local address: %s) as a VIF could not be created: %v", conn.RemoteAddr(), conn.LocalAddr(), err)
+ conn.Close()
+ continue
+ }
+ ln.vifs.Insert(vf)
+ ln.manager.vifs.Insert(vf)
+ ln.vifLoops.Add(1)
+ go ln.vifLoop(vf)
+ }
+}
+
+func (ln *netListener) vifLoop(vf *vif.VIF) {
+ vifLoop(vf, ln.q)
+ ln.vifLoops.Done()
+}
+
+func (ln *netListener) Accept() (stream.Flow, error) {
+ item, err := ln.q.Get(nil)
+ switch {
+ case err == upcqueue.ErrQueueIsClosed:
+ return nil, errListenerIsClosed
+ case err != nil:
+ return nil, fmt.Errorf("Accept failed: %v", err)
+ default:
+ return item.(vif.ConnectorAndFlow).Flow, nil
+ }
+}
+
+func (ln *netListener) Close() error {
+ closeNetListener(ln.netLn)
+ ln.netLoop.Wait()
+ for _, vif := range ln.vifs.List() {
+ // NOTE(caprita): We do not actually Close down the vifs, as
+ // that would require knowing when all outstanding requests are
+ // finished. For now, do not worry about it, since we expect
+ // shut down to immediately precede process exit.
+ vif.StopAccepting()
+ }
+ ln.q.Shutdown()
+ ln.manager.removeListener(ln)
+ ln.vifLoops.Wait()
+ vlog.VI(3).Infof("Closed stream.Listener %s", ln)
+ return nil
+}
+
+func (ln *netListener) String() string {
+ return fmt.Sprintf("%T: (%v, %v)", ln, ln.netLn.Addr().Network(), ln.netLn.Addr())
+}
+
+func (ln *netListener) DebugString() string {
+ ret := []string{
+ fmt.Sprintf("stream.Listener: net.Listener on (%q, %q)", ln.netLn.Addr().Network(), ln.netLn.Addr()),
+ }
+ if vifs := ln.vifs.List(); len(vifs) > 0 {
+ ret = append(ret, fmt.Sprintf("===Accepted VIFs(%d)===", len(vifs)))
+ for ix, vif := range vifs {
+ ret = append(ret, fmt.Sprintf("%4d) %v", ix, vif))
+ }
+ }
+ return strings.Join(ret, "\n")
+}
+
+func newProxyListener(m *manager, ep naming.Endpoint, opts []stream.ListenerOpt) (listener, naming.Endpoint, error) {
+ ln := &proxyListener{
+ q: upcqueue.New(),
+ proxyEP: ep,
+ manager: m,
+ opts: opts,
+ }
+ vf, ep, err := ln.connect()
+ if err != nil {
+ return nil, nil, err
+ }
+ go vifLoop(vf, ln.q)
+ return ln, ep, nil
+}
+
+func (ln *proxyListener) connect() (*vif.VIF, naming.Endpoint, error) {
+ vlog.VI(1).Infof("Connecting to proxy at %v", ln.proxyEP)
+ // Requires dialing a VC to the proxy, need to extract options (like the principal)
+ // from ln.opts to do so.
+ var dialOpts []stream.VCOpt
+ for _, opt := range ln.opts {
+ if dopt, ok := opt.(stream.VCOpt); ok {
+ dialOpts = append(dialOpts, dopt)
+ }
+ }
+ // TODO(cnicolaou, ashankar): probably want to set a timeout here. (is this covered by opts?)
+ vf, err := ln.manager.FindOrDialVIF(ln.proxyEP, dialOpts...)
+ if err != nil {
+ return nil, nil, err
+ }
+ if err := vf.StartAccepting(ln.opts...); err != nil {
+ return nil, nil, fmt.Errorf("already connected to proxy and accepting connections? VIF: %v, StartAccepting error: %v", vf, err)
+ }
+ // Proxy protocol: See veyron/profiles/proxy/protocol.vdl
+ vc, err := vf.Dial(ln.proxyEP, dialOpts...)
+ if err != nil {
+ vf.StopAccepting()
+ if verror.ErrorID(err) == verror.ErrAborted.ID {
+ ln.manager.vifs.Delete(vf)
+ }
+ return nil, nil, fmt.Errorf("VC establishment with proxy failed: %v", err)
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("unable to create liveness check flow to proxy: %v", err)
+ }
+ var request proxy.Request
+ var response proxy.Response
+ enc, err := vom.NewEncoder(flow)
+ if err != nil {
+ flow.Close()
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("failed to create new Encoder: %v", err)
+ }
+ if err := enc.Encode(request); err != nil {
+ flow.Close()
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("failed to encode request to proxy: %v", err)
+ }
+ dec, err := vom.NewDecoder(flow)
+ if err != nil {
+ flow.Close()
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("failed to create new Decoder: %v", err)
+ }
+ if err := dec.Decode(&response); err != nil {
+ flow.Close()
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("failed to decode response from proxy: %v", err)
+ }
+ if response.Error != nil {
+ flow.Close()
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("proxy error: %v", response.Error)
+ }
+ ep, err := inaming.NewEndpoint(response.Endpoint)
+ if err != nil {
+ flow.Close()
+ vf.StopAccepting()
+ return nil, nil, fmt.Errorf("proxy returned invalid endpoint(%v): %v", response.Endpoint, err)
+ }
+ go func(vf *vif.VIF, flow stream.Flow, q *upcqueue.T) {
+ <-flow.Closed()
+ vf.StopAccepting()
+ q.Close()
+ }(vf, flow, ln.q)
+ return vf, ep, nil
+}
+
+func (ln *proxyListener) Accept() (stream.Flow, error) {
+ item, err := ln.q.Get(nil)
+ switch {
+ case err == upcqueue.ErrQueueIsClosed:
+ return nil, errListenerIsClosed
+ case err != nil:
+ return nil, fmt.Errorf("Accept failed: %v", err)
+ default:
+ return item.(vif.ConnectorAndFlow).Flow, nil
+ }
+}
+
+func (ln *proxyListener) Close() error {
+ ln.q.Shutdown()
+ ln.manager.removeListener(ln)
+ return nil
+}
+
+func (ln *proxyListener) String() string {
+ return ln.DebugString()
+}
+
+func (ln *proxyListener) DebugString() string {
+ return fmt.Sprintf("stream.Listener: PROXY:%v RoutingID:%v", ln.proxyEP, ln.manager.rid)
+}
+
+func vifLoop(vf *vif.VIF, q *upcqueue.T) {
+ for {
+ cAndf, err := vf.Accept()
+ switch {
+ case err != nil:
+ vlog.VI(2).Infof("Shutting down listener on VIF %v: %v", vf, err)
+ return
+ case cAndf.Flow == nil:
+ vlog.VI(1).Infof("New VC %v on VIF %v", cAndf.Connector, vf)
+ default:
+ if err := q.Put(cAndf); err != nil {
+ vlog.VI(1).Infof("Closing new flow on VC %v (VIF %v) as Put failed in vifLoop: %v", cAndf.Connector, vf, err)
+ cAndf.Flow.Close()
+ }
+ }
+ }
+}
diff --git a/profiles/internal/ipc/stream/manager/manager.go b/profiles/internal/ipc/stream/manager/manager.go
new file mode 100644
index 0000000..515a2d8
--- /dev/null
+++ b/profiles/internal/ipc/stream/manager/manager.go
@@ -0,0 +1,280 @@
+// Package manager provides an implementation of the Manager interface defined in veyron/profiles/internal/ipc/stream.
+package manager
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/stats"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/vif"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+var errShutDown = errors.New("manager has been shut down")
+
+// InternalNew creates a new stream.Manager for managing streams where the local
+// process is identified by the provided RoutingID.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside veyron/profiles/internal. Code outside the
+// veyron/profiles/internal/* packages should never call this method.
+func InternalNew(rid naming.RoutingID) stream.Manager {
+ m := &manager{
+ rid: rid,
+ vifs: vif.NewSet(),
+ sessionCache: crypto.NewTLSClientSessionCache(),
+ listeners: make(map[listener]bool),
+ statsName: naming.Join("ipc", "stream", "routing-id", rid.String(), "debug"),
+ }
+ stats.NewStringFunc(m.statsName, m.DebugString)
+ return m
+}
+
+type manager struct {
+ rid naming.RoutingID
+ vifs *vif.Set
+ sessionCache crypto.TLSClientSessionCache
+
+ muListeners sync.Mutex
+ listeners map[listener]bool // GUARDED_BY(muListeners)
+ shutdown bool // GUARDED_BY(muListeners)
+
+ statsName string
+}
+
+var _ stream.Manager = (*manager)(nil)
+
+type DialTimeout struct{ time.Duration }
+
+func (DialTimeout) IPCStreamVCOpt() {}
+func (DialTimeout) IPCClientOpt() {}
+
+func dial(network, address string, timeout time.Duration) (net.Conn, error) {
+ if d, _, _ := ipc.RegisteredProtocol(network); d != nil {
+ return d(network, address, timeout)
+ }
+ return nil, fmt.Errorf("unknown network %s", network)
+}
+
+// FindOrDialVIF returns the network connection (VIF) to the provided address
+// from the cache in the manager. If not already present in the cache, a new
+// connection will be created using net.Dial.
+func (m *manager) FindOrDialVIF(remote naming.Endpoint, opts ...stream.VCOpt) (*vif.VIF, error) {
+ // Extract options.
+ var timeout time.Duration
+ for _, o := range opts {
+ switch v := o.(type) {
+ case *DialTimeout:
+ timeout = v.Duration
+ }
+ }
+
+ addr := remote.Addr()
+ network, address := addr.Network(), addr.String()
+ if vf := m.vifs.Find(network, address); vf != nil {
+ return vf, nil
+ }
+ vlog.VI(1).Infof("(%q, %q) not in VIF cache. Dialing", network, address)
+ conn, err := dial(network, address, timeout)
+ if err != nil {
+ return nil, fmt.Errorf("net.Dial(%q, %q) failed: %v", network, address, err)
+ }
+ // (network, address) in the endpoint might not always match up
+ // with the key used in the vifs. For example:
+ // - conn, err := net.Dial("tcp", "www.google.com:80")
+ // fmt.Println(conn.RemoteAddr()) // Might yield the corresponding IP address
+ // - Similarly, an unspecified IP address (net.IP.IsUnspecified) like "[::]:80"
+ // might yield "[::1]:80" (loopback interface) in conn.RemoteAddr().
+ // Thus, look for VIFs with the resolved address as well.
+ if vf := m.vifs.Find(conn.RemoteAddr().Network(), conn.RemoteAddr().String()); vf != nil {
+ vlog.VI(1).Infof("(%q, %q) resolved to (%q, %q) which exists in the VIF cache. Closing newly Dialed connection", network, address, conn.RemoteAddr().Network(), conn.RemoteAddr())
+ conn.Close()
+ return vf, nil
+ }
+ vRange := version.SupportedRange
+ if ep, ok := remote.(*inaming.Endpoint); ok {
+ epRange := &version.Range{Min: ep.MinIPCVersion, Max: ep.MaxIPCVersion}
+ if r, err := vRange.Intersect(epRange); err == nil {
+ vRange = r
+ }
+ }
+ vf, err := vif.InternalNewDialedVIF(conn, m.rid, vRange, opts...)
+ if err != nil {
+ conn.Close()
+ return nil, fmt.Errorf("failed to create VIF: %v", err)
+ }
+ // TODO(ashankar): If two goroutines are simultaneously invoking
+ // manager.Dial, it is possible that two VIFs are inserted into m.vifs
+ // for the same remote network address. This is normally not a problem,
+ // but can be troublesome if the remote endpoint corresponds to a
+ // proxy, since the proxy requires a single network connection per
+ // routing id. Figure out a way to handle this cleanly. One option is
+ // to have only a single VIF per remote network address - have to think
+ // that through.
+ m.vifs.Insert(vf)
+ return vf, nil
+}
+
+func (m *manager) Dial(remote naming.Endpoint, opts ...stream.VCOpt) (stream.VC, error) {
+ // If vif.Dial fails because the cached network connection was broken, remove from
+ // the cache and try once more.
+ for retry := true; true; retry = false {
+ vf, err := m.FindOrDialVIF(remote, opts...)
+ if err != nil {
+ return nil, err
+ }
+ vc, err := vf.Dial(remote, append(opts, m.sessionCache)...)
+ if !retry || verror.ErrorID(err) != verror.ErrAborted.ID {
+ return vc, err
+ }
+ vf.Close()
+ m.vifs.Delete(vf)
+ vlog.VI(2).Infof("VIF %v is closed, removing from cache", vf)
+ }
+ return nil, verror.NewErrInternal(nil) // Not reached
+}
+
+func listen(protocol, address string) (net.Listener, error) {
+ if _, l, _ := ipc.RegisteredProtocol(protocol); l != nil {
+ return l(protocol, address)
+ }
+ return nil, fmt.Errorf("unknown network %s", protocol)
+}
+
+func (m *manager) Listen(protocol, address string, opts ...stream.ListenerOpt) (stream.Listener, naming.Endpoint, error) {
+ m.muListeners.Lock()
+ if m.shutdown {
+ m.muListeners.Unlock()
+ return nil, nil, errShutDown
+ }
+ m.muListeners.Unlock()
+
+ if protocol == inaming.Network {
+ // Act as if listening on the address of a remote proxy.
+ ep, err := inaming.NewEndpoint(address)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to parse endpoint %q: %v", address, err)
+ }
+ return m.remoteListen(ep, opts)
+ }
+ netln, err := listen(protocol, address)
+ if err != nil {
+ return nil, nil, fmt.Errorf("net.Listen(%q, %q) failed: %v", protocol, address, err)
+ }
+
+ m.muListeners.Lock()
+ if m.shutdown {
+ m.muListeners.Unlock()
+ closeNetListener(netln)
+ return nil, nil, errShutDown
+ }
+
+ ln := newNetListener(m, netln, opts)
+ m.listeners[ln] = true
+ m.muListeners.Unlock()
+ ep := version.Endpoint(protocol, netln.Addr().String(), m.rid)
+ return ln, ep, nil
+}
+
+func (m *manager) remoteListen(proxy naming.Endpoint, listenerOpts []stream.ListenerOpt) (stream.Listener, naming.Endpoint, error) {
+ ln, ep, err := newProxyListener(m, proxy, listenerOpts)
+ if err != nil {
+ return nil, nil, err
+ }
+ m.muListeners.Lock()
+ defer m.muListeners.Unlock()
+ if m.shutdown {
+ ln.Close()
+ return nil, nil, errShutDown
+ }
+ m.listeners[ln] = true
+ return ln, ep, nil
+}
+
+func (m *manager) ShutdownEndpoint(remote naming.Endpoint) {
+ vifs := m.vifs.List()
+ total := 0
+ for _, vf := range vifs {
+ total += vf.ShutdownVCs(remote)
+ }
+ vlog.VI(1).Infof("ShutdownEndpoint(%q) closed %d VCs", remote, total)
+}
+
+func closeNetListener(ln net.Listener) {
+ addr := ln.Addr()
+ err := ln.Close()
+ vlog.VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
+}
+
+func (m *manager) removeListener(ln listener) {
+ m.muListeners.Lock()
+ delete(m.listeners, ln)
+ m.muListeners.Unlock()
+}
+
+func (m *manager) Shutdown() {
+ stats.Delete(m.statsName)
+ m.muListeners.Lock()
+ if m.shutdown {
+ m.muListeners.Unlock()
+ return
+ }
+ m.shutdown = true
+ var wg sync.WaitGroup
+ wg.Add(len(m.listeners))
+ for ln, _ := range m.listeners {
+ go func(ln stream.Listener) {
+ ln.Close()
+ wg.Done()
+ }(ln)
+ }
+ m.listeners = make(map[listener]bool)
+ m.muListeners.Unlock()
+ wg.Wait()
+
+ vifs := m.vifs.List()
+ for _, vf := range vifs {
+ vf.Close()
+ }
+}
+
+func (m *manager) RoutingID() naming.RoutingID {
+ return m.rid
+}
+
+func (m *manager) DebugString() string {
+ vifs := m.vifs.List()
+
+ m.muListeners.Lock()
+ defer m.muListeners.Unlock()
+
+ l := make([]string, 0)
+ l = append(l, fmt.Sprintf("Manager: RoutingID:%v #VIFs:%d #Listeners:%d Shutdown:%t", m.rid, len(vifs), len(m.listeners), m.shutdown))
+ if len(vifs) > 0 {
+ l = append(l, "============================VIFs================================================")
+ for ix, vif := range vifs {
+ l = append(l, fmt.Sprintf("%4d) %v", ix, vif.DebugString()))
+ l = append(l, "--------------------------------------------------------------------------------")
+ }
+ }
+ if len(m.listeners) > 0 {
+ l = append(l, "=======================================Listeners==================================================")
+ l = append(l, " (stream listeners, their local network listeners (missing for proxied listeners), and VIFS")
+ for ln, _ := range m.listeners {
+ l = append(l, ln.DebugString())
+ }
+ }
+ return strings.Join(l, "\n")
+}
diff --git a/profiles/internal/ipc/stream/manager/manager_test.go b/profiles/internal/ipc/stream/manager/manager_test.go
new file mode 100644
index 0000000..222387f
--- /dev/null
+++ b/profiles/internal/ipc/stream/manager/manager_test.go
@@ -0,0 +1,648 @@
+package manager
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/tcp"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/ws"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+func newPrincipal(defaultBlessing string) vc.LocalPrincipal {
+ return vc.LocalPrincipal{tsecurity.NewPrincipal(defaultBlessing)}
+}
+
+func init() {
+ modules.RegisterChild("runServer", "", runServer)
+}
+
+// We write our own TestMain here instead of relying on v23 test generate because
+// we need to set runtime.GOMAXPROCS.
+func TestMain(m *testing.M) {
+ testutil.Init()
+ // testutil.Init sets GOMAXPROCS to NumCPU. We want to force
+ // GOMAXPROCS to remain at 1, in order to trigger a particular race
+ // condition that occurs when closing the server; also, using 1 cpu
+ // introduces less variance in the behavior of the test.
+ runtime.GOMAXPROCS(1)
+ if modules.IsModulesProcess() {
+ if err := modules.Dispatch(); err != nil {
+ fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+ os.Exit(1)
+ }
+ return
+ }
+ os.Exit(m.Run())
+}
+
+func testSimpleFlow(t *testing.T, protocol string) {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ ln, ep, err := server.Listen(protocol, "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := "the dark knight rises"
+ var clientVC stream.VC
+ var clientF1 stream.Flow
+ go func() {
+ if clientVC, err = client.Dial(ep); err != nil {
+ t.Errorf("Dial(%q) failed: %v", ep, err)
+ return
+ }
+ if clientF1, err = clientVC.Connect(); err != nil {
+ t.Errorf("Connect() failed: %v", err)
+ return
+ }
+ if err := writeLine(clientF1, data); err != nil {
+ t.Error(err)
+ }
+ }()
+ serverF, err := ln.Accept()
+ if err != nil {
+ t.Fatalf("Accept failed: %v", err)
+ }
+ if got, err := readLine(serverF); got != data || err != nil {
+ t.Errorf("Got (%q, %v), want (%q, nil)", got, err, data)
+ }
+ // By this point, the goroutine has passed the write call (or exited
+ // early) since the read has gotten through. Check if the goroutine
+ // encountered any errors in creating the VC or flow and abort.
+ if t.Failed() {
+ return
+ }
+ defer clientF1.Close()
+
+ ln.Close()
+
+ // Writes on flows opened before the server listener was closed should
+ // still succeed.
+ data = "the dark knight goes to bed"
+ go func() {
+ if err := writeLine(clientF1, data); err != nil {
+ t.Error(err)
+ }
+ }()
+ if got, err := readLine(serverF); got != data || err != nil {
+ t.Errorf("Got (%q, %v), want (%q, nil)", got, err, data)
+ }
+
+ // Opening a new flow on an existing VC will succeed initially, but
+ // writes on the client end will eventually fail once the server has
+ // stopped listening.
+ //
+ // It will require a round-trip to the server to notice the failure,
+ // hence the client should write enough data to ensure that the Write
+ // call will not return before a round-trip.
+ //
+ // The length of the data is taken to exceed the queue buffer size
+ // (DefaultBytesBufferedPerFlow), the shared counters (MaxSharedBytes)
+ // and the per-flow counters (DefaultBytesBufferedPerFlow) that are
+ // given when the flow gets established.
+ //
+ // TODO(caprita): separate the constants for the queue buffer size and
+ // the default number of counters to avoid confusion.
+ lotsOfData := string(make([]byte, vc.DefaultBytesBufferedPerFlow*2+vc.MaxSharedBytes+1))
+ clientF2, err := clientVC.Connect()
+ if err != nil {
+ t.Fatalf("Connect() failed: %v", err)
+ }
+ defer clientF2.Close()
+ if err := writeLine(clientF2, lotsOfData); err == nil {
+ t.Errorf("Should not be able to Dial or Write after the Listener is closed")
+ }
+ // Opening a new VC should fail fast.
+ if _, err := client.Dial(ep); err == nil {
+ t.Errorf("Should not be able to Dial after listener is closed")
+ }
+}
+
+func TestSimpleFlow(t *testing.T) {
+ testSimpleFlow(t, "tcp")
+}
+
+func TestSimpleFlowWS(t *testing.T) {
+ testSimpleFlow(t, "ws")
+}
+
+func TestConnectionTimeout(t *testing.T) {
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ ch := make(chan error)
+ go func() {
+ // 203.0.113.0 is TEST-NET-3 from RFC5737
+ ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("tcp", "203.0.113.10:80"))
+ _, err := client.Dial(ep, &DialTimeout{time.Second})
+ ch <- err
+ }()
+
+ select {
+ case err := <-ch:
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ case <-time.After(time.Minute):
+ t.Fatalf("timedout")
+ }
+}
+
+func testAuthenticatedByDefault(t *testing.T, protocol string) {
+ var (
+ server = InternalNew(naming.FixedRoutingID(0x55555555))
+ client = InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ clientPrincipal = newPrincipal("client")
+ serverPrincipal = newPrincipal("server")
+ clientKey = clientPrincipal.Principal.PublicKey()
+ serverBlessings = serverPrincipal.Principal.BlessingStore().Default()
+ )
+ // VCSecurityLevel is intentionally not provided to Listen - to test
+ // default behavior.
+ ln, ep, err := server.Listen(protocol, "127.0.0.1:0", serverPrincipal)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ errs := make(chan error)
+
+ testAuth := func(tag string, flow stream.Flow, wantServer security.Blessings, wantClientKey security.PublicKey) {
+ // Since the client's blessing is expected to be self-signed we only test
+ // its public key
+ gotServer := flow.RemoteBlessings()
+ gotClientKey := flow.LocalBlessings().PublicKey()
+ if tag == "server" {
+ gotServer = flow.LocalBlessings()
+ gotClientKey = flow.RemoteBlessings().PublicKey()
+ }
+ if !reflect.DeepEqual(gotServer, wantServer) || !reflect.DeepEqual(gotClientKey, wantClientKey) {
+ errs <- fmt.Errorf("%s: Server: Got Blessings %q, want %q. Server: Got Blessings %q, want %q", tag, gotServer, wantServer, gotClientKey, wantClientKey)
+ return
+ }
+ errs <- nil
+ }
+
+ go func() {
+ flow, err := ln.Accept()
+ if err != nil {
+ errs <- err
+ return
+ }
+ defer flow.Close()
+ testAuth("server", flow, serverBlessings, clientKey)
+ }()
+
+ go func() {
+ // VCSecurityLevel is intentionally not provided to Dial - to
+ // test default behavior.
+ vc, err := client.Dial(ep, clientPrincipal)
+ if err != nil {
+ errs <- err
+ return
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+ errs <- err
+ return
+ }
+ defer flow.Close()
+ testAuth("client", flow, serverBlessings, clientKey)
+ }()
+
+ if err := <-errs; err != nil {
+ t.Error(err)
+ }
+ if err := <-errs; err != nil {
+ t.Error(err)
+ }
+}
+
+func TestAuthenticatedByDefault(t *testing.T) {
+ testAuthenticatedByDefault(t, "tcp")
+}
+
+func TestAuthenticatedByDefaultWS(t *testing.T) {
+ testAuthenticatedByDefault(t, "ws")
+}
+
+func numListeners(m stream.Manager) int { return len(m.(*manager).listeners) }
+func debugString(m stream.Manager) string { return m.(*manager).DebugString() }
+func numVIFs(m stream.Manager) int { return len(m.(*manager).vifs.List()) }
+
+func TestListenEndpoints(t *testing.T) {
+ server := InternalNew(naming.FixedRoutingID(0xcafe))
+ ln1, ep1, err1 := server.Listen("tcp", "127.0.0.1:0")
+ ln2, ep2, err2 := server.Listen("tcp", "127.0.0.1:0")
+ // Since "127.0.0.1:0" was used as the network address, a random port will be
+ // assigned in each case. The endpoint should include that random port.
+ if err1 != nil {
+ t.Error(err1)
+ }
+ if err2 != nil {
+ t.Error(err2)
+ }
+ if ep1.String() == ep2.String() {
+ t.Errorf("Both listeners got the same endpoint: %q", ep1)
+ }
+ if n, expect := numListeners(server), 2; n != expect {
+ t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+ }
+ ln1.Close()
+ if n, expect := numListeners(server), 1; n != expect {
+ t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+ }
+ ln2.Close()
+ if n, expect := numListeners(server), 0; n != expect {
+ t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+ }
+}
+
+func acceptLoop(ln stream.Listener) {
+ for {
+ f, err := ln.Accept()
+ if err != nil {
+ return
+ }
+ f.Close()
+ }
+}
+
+func TestCloseListener(t *testing.T) {
+ testCloseListener(t, "tcp")
+}
+
+func TestCloseListenerWS(t *testing.T) {
+ testCloseListener(t, "ws")
+}
+
+func testCloseListener(t *testing.T, protocol string) {
+ server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+
+ ln, ep, err := server.Listen(protocol, "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Server will just listen for flows and close them.
+ go acceptLoop(ln)
+ client := InternalNew(naming.FixedRoutingID(0xc1e41))
+ if _, err = client.Dial(ep); err != nil {
+ t.Fatal(err)
+ }
+ ln.Close()
+ client = InternalNew(naming.FixedRoutingID(0xc1e42))
+ if _, err := client.Dial(ep); err == nil {
+ t.Errorf("client.Dial(%q) should have failed", ep)
+ }
+}
+
+func TestShutdown(t *testing.T) {
+ server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+ ln, _, err := server.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Server will just listen for flows and close them.
+ go acceptLoop(ln)
+ if n, expect := numListeners(server), 1; n != expect {
+ t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+ }
+ server.Shutdown()
+ if _, _, err := server.Listen("tcp", "127.0.0.1:0"); err == nil {
+ t.Error("server should have shut down")
+ }
+ if n, expect := numListeners(server), 0; n != expect {
+ t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+ }
+}
+
+func TestShutdownEndpoint(t *testing.T) {
+ testShutdownEndpoint(t, "tcp")
+}
+
+func TestShutdownEndpointWS(t *testing.T) {
+ testShutdownEndpoint(t, "ws")
+}
+
+func testShutdownEndpoint(t *testing.T, protocol string) {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ ln, ep, err := server.Listen(protocol, "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Server will just listen for flows and close them.
+ go acceptLoop(ln)
+
+ vc, err := client.Dial(ep)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if f, err := vc.Connect(); f == nil || err != nil {
+ t.Errorf("vc.Connect failed: (%v, %v)", f, err)
+ }
+ client.ShutdownEndpoint(ep)
+ if f, err := vc.Connect(); f != nil || err == nil {
+ t.Errorf("vc.Connect unexpectedly succeeded: (%v, %v)", f, err)
+ }
+}
+
+/* TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+func TestSessionTicketCache(t *testing.T) {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ _, ep, err := server.Listen("tcp", "127.0.0.1:0", newPrincipal("server"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ if _, err = client.Dial(ep, newPrincipal("TestSessionTicketCacheClient")); err != nil {
+ t.Fatalf("Dial(%q) failed: %v", ep, err)
+ }
+
+ if _, ok := client.(*manager).sessionCache.Get(ep.String()); !ok {
+ t.Fatalf("SessionTicket from TLS handshake not cached")
+ }
+}
+*/
+
+func testMultipleVCs(t *testing.T, protocol string) {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ const nVCs = 2
+ const data = "bugs bunny"
+
+ // Have the server read from each flow and write to rchan.
+ rchan := make(chan string)
+ ln, ep, err := server.Listen(protocol, "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ read := func(flow stream.Flow, c chan string) {
+ var buf bytes.Buffer
+ var tmp [1024]byte
+ for {
+ n, err := flow.Read(tmp[:])
+ buf.Write(tmp[:n])
+ if err == io.EOF {
+ c <- buf.String()
+ return
+ }
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ }
+ }
+ go func() {
+ for i := 0; i < nVCs; i++ {
+ flow, err := ln.Accept()
+ if err != nil {
+ t.Error(err)
+ rchan <- ""
+ continue
+ }
+ go read(flow, rchan)
+ }
+ }()
+
+ // Have the client establish nVCs and a flow on each.
+ var vcs [nVCs]stream.VC
+ for i := 0; i < nVCs; i++ {
+ var err error
+ vcs[i], err = client.Dial(ep)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ write := func(vc stream.VC) {
+ if err != nil {
+ ln.Close()
+ t.Error(err)
+ return
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+ ln.Close()
+ t.Error(err)
+ return
+ }
+ defer flow.Close()
+ if _, err := flow.Write([]byte(data)); err != nil {
+ ln.Close()
+ t.Error(err)
+ return
+ }
+ }
+ for _, vc := range vcs {
+ go write(vc)
+ }
+ for i := 0; i < nVCs; i++ {
+ if got := <-rchan; got != data {
+ t.Errorf("Got %q want %q", got, data)
+ }
+ }
+}
+
+func TestMultipleVCs(t *testing.T) {
+ testMultipleVCs(t, "tcp")
+}
+
+func TestMultipleVCsWS(t *testing.T) {
+ testMultipleVCs(t, "ws")
+}
+
+func TestAddressResolution(t *testing.T) {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ // Using "tcp4" instead of "tcp" because the latter can end up with IPv6
+ // addresses and our Google Compute Engine integration test machines cannot
+ // resolve IPv6 addresses.
+ // As of April 2014, https://developers.google.com/compute/docs/networking
+ // said that IPv6 is not yet supported.
+ ln, ep, err := server.Listen("tcp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ go acceptLoop(ln)
+
+ // We'd like an endpoint that contains an address that's different than the
+ // one used for the connection. In practice this is awkward to achieve since
+ // we don't want to listen on ":0" since that will annoy firewalls. Instead we
+ // listen on 127.0.0.1 and we fabricate an endpoint that doesn't contain
+ // 127.0.0.1 by using ":0" to create it. This leads to an endpoint such that
+ // the address encoded in the endpoint (e.g. "0.0.0.0:55324") is different
+ // from the address of the connection (e.g. "127.0.0.1:55324").
+ _, port, _ := net.SplitHostPort(ep.Addr().String())
+ nep := version.Endpoint(ep.Addr().Network(), net.JoinHostPort("", port), ep.RoutingID())
+
+ // Dial multiple VCs
+ for i := 0; i < 2; i++ {
+ if _, err = client.Dial(nep); err != nil {
+ t.Fatalf("Dial #%d failed: %v", i, err)
+ }
+ }
+ // They should all be on the same VIF.
+ if n := numVIFs(client); n != 1 {
+ t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
+ }
+ // TODO(ashankar): While a VIF can be re-used to Dial from the server
+ // to the client, currently there is no way to have the client "listen"
+ // on the same VIF. It can listen on a VC for new flows, but it cannot
+ // listen on an established VIF for new VCs. Figure this out?
+}
+
+func TestServerRestartDuringClientLifetime(t *testing.T) {
+ testServerRestartDuringClientLifetime(t, "tcp")
+}
+
+func TestServerRestartDuringClientLifetimeWS(t *testing.T) {
+ testServerRestartDuringClientLifetime(t, "ws")
+}
+
+func testServerRestartDuringClientLifetime(t *testing.T, protocol string) {
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(nil, nil)
+ h, err := sh.Start("runServer", nil, protocol, "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ addr := s.ReadLine()
+
+ ep, err := inaming.NewEndpoint(naming.FormatEndpoint(protocol, addr))
+ if err != nil {
+ t.Fatalf("inaming.NewEndpoint(%q): %v", addr, err)
+ }
+ if _, err := client.Dial(ep); err != nil {
+ t.Fatal(err)
+ }
+ h.Shutdown(nil, os.Stderr)
+
+ // A new VC cannot be created since the server is dead
+ if _, err := client.Dial(ep); err == nil {
+ t.Fatal("Expected client.Dial to fail since server is dead")
+ }
+
+ h, err = sh.Start("runServer", nil, protocol, addr)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s = expect.NewSession(t, h.Stdout(), time.Minute)
+ // Restarting the server, listening on the same address as before
+ if addr2 := s.ReadLine(); addr2 != addr || err != nil {
+ t.Fatalf("Got (%q, %v) want (%q, nil)", addr2, err, addr)
+ }
+ if _, err := client.Dial(ep); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func runServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ _, ep, err := server.Listen(args[0], args[1])
+ if err != nil {
+ fmt.Fprintln(stderr, err)
+ return err
+ }
+ fmt.Fprintln(stdout, ep.Addr())
+ // Live forever (till the process is explicitly killed)
+ modules.WaitForEOF(stdin)
+ return nil
+}
+
+func readLine(f stream.Flow) (string, error) {
+ var result bytes.Buffer
+ var buf [5]byte
+ for {
+ n, err := f.Read(buf[:])
+ result.Write(buf[:n])
+ if err == io.EOF || buf[n-1] == '\n' {
+ return strings.TrimRight(result.String(), "\n"), nil
+ }
+ if err != nil {
+ return "", fmt.Errorf("Read returned (%d, %v)", n, err)
+ }
+ }
+}
+
+func writeLine(f stream.Flow, data string) error {
+ data = data + "\n"
+ vlog.VI(1).Infof("write sending %d bytes", len(data))
+ if n, err := f.Write([]byte(data)); err != nil {
+ return fmt.Errorf("Write returned (%d, %v)", n, err)
+ }
+ return nil
+}
+
+func TestRegistration(t *testing.T) {
+ server := InternalNew(naming.FixedRoutingID(0x55555555))
+ client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+ dialer := func(_, _ string, _ time.Duration) (net.Conn, error) {
+ return nil, fmt.Errorf("tn.Dial")
+ }
+ listener := func(_, _ string) (net.Listener, error) {
+ return nil, fmt.Errorf("tn.Listen")
+ }
+ ipc.RegisterProtocol("tn", dialer, listener)
+
+ _, _, err := server.Listen("tnx", "127.0.0.1:0")
+ if err == nil || !strings.Contains(err.Error(), "unknown network tnx") {
+ t.Fatal("expected error is missing (%v)", err)
+ }
+
+ _, _, err = server.Listen("tn", "127.0.0.1:0")
+ if err == nil || !strings.Contains(err.Error(), "tn.Listen") {
+ t.Fatal("expected error is missing (%v)", err)
+ }
+
+ // Need a functional listener to test Dial.
+ listener = func(_, addr string) (net.Listener, error) {
+ return net.Listen("tcp", addr)
+ }
+
+ if got, want := ipc.RegisterProtocol("tn", dialer, listener), true; got != want {
+ t.Errorf("got %t, want %t", got, want)
+ }
+
+ _, ep, err := server.Listen("tn", "127.0.0.1:0")
+ if err != nil {
+ t.Errorf("unexpected error %s", err)
+ }
+
+ _, err = client.Dial(ep)
+ if err == nil || !strings.Contains(err.Error(), "tn.Dial") {
+ t.Fatal("expected error is missing (%v)", err)
+ }
+}
diff --git a/profiles/internal/ipc/stream/message/coding.go b/profiles/internal/ipc/stream/message/coding.go
new file mode 100644
index 0000000..4e96e46
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/coding.go
@@ -0,0 +1,195 @@
+package message
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+)
+
+var errLargerThan3ByteUint = errors.New("integer too large to represent in 3 bytes")
+
+func write3ByteUint(dst []byte, n int) error {
+ if n >= (1<<24) || n < 0 {
+ return errLargerThan3ByteUint
+ }
+ dst[0] = byte((n & 0xff0000) >> 16)
+ dst[1] = byte((n & 0x00ff00) >> 8)
+ dst[2] = byte(n & 0x0000ff)
+ return nil
+}
+
+func read3ByteUint(src []byte) int {
+ return int(src[0])<<16 | int(src[1])<<8 | int(src[2])
+}
+
+func write4ByteUint(dst []byte, n uint32) {
+ dst[0] = byte((n & 0xff000000) >> 24)
+ dst[1] = byte((n & 0x00ff0000) >> 16)
+ dst[2] = byte((n & 0x0000ff00) >> 8)
+ dst[3] = byte(n & 0x000000ff)
+}
+
+func read4ByteUint(src []byte) uint32 {
+ return uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+}
+
+func readInt(r io.Reader, ptr interface{}) error {
+ return binary.Read(r, binary.BigEndian, ptr)
+}
+
+func writeInt(w io.Writer, ptr interface{}) error {
+ return binary.Write(w, binary.BigEndian, ptr)
+}
+
+func readString(r io.Reader, s *string) error {
+ var size uint32
+ if err := readInt(r, &size); err != nil {
+ return err
+ }
+ bytes := make([]byte, size)
+ n, err := r.Read(bytes)
+ if err != nil {
+ return err
+ }
+ if n != int(size) {
+ return io.ErrUnexpectedEOF
+ }
+ *s = string(bytes)
+ return nil
+}
+
+func writeString(w io.Writer, s string) error {
+ size := uint32(len(s))
+ if err := writeInt(w, size); err != nil {
+ return err
+ }
+ n, err := w.Write([]byte(s))
+ if err != nil {
+ return err
+ }
+ if n != int(size) {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+// byteReader adapts an io.Reader to an io.ByteReader
+type byteReader struct{ io.Reader }
+
+func (b byteReader) ReadByte() (byte, error) {
+ var buf [1]byte
+ n, err := b.Reader.Read(buf[:])
+ switch {
+ case n == 1:
+ return buf[0], err
+ case err != nil:
+ return 0, err
+ default:
+ return 0, fmt.Errorf("read %d bytes, wanted to read 1", n)
+ }
+}
+
+func readCounters(r io.Reader) (Counters, error) {
+ var br io.ByteReader
+ var ok bool
+ if br, ok = r.(io.ByteReader); !ok {
+ br = byteReader{r}
+ }
+ size, err := binary.ReadUvarint(br)
+ if err != nil {
+ return nil, err
+ }
+ if size == 0 {
+ return nil, nil
+ }
+ c := Counters(make(map[CounterID]uint32, size))
+ for i := uint64(0); i < size; i++ {
+ vci, err := binary.ReadUvarint(br)
+ if err != nil {
+ return nil, err
+ }
+ fid, err := binary.ReadUvarint(br)
+ if err != nil {
+ return nil, err
+ }
+ bytes, err := binary.ReadUvarint(br)
+ if err != nil {
+ return nil, err
+ }
+ c.Add(id.VC(vci), id.Flow(fid), uint32(bytes))
+ }
+ return c, nil
+}
+
+func writeCounters(w io.Writer, c Counters) (err error) {
+ var vbuf [binary.MaxVarintLen64]byte
+ putUvarint := func(n uint64) {
+ if err == nil {
+ _, err = w.Write(vbuf[:binary.PutUvarint(vbuf[:], n)])
+ }
+ }
+ putUvarint(uint64(len(c)))
+ for cid, bytes := range c {
+ putUvarint(uint64(cid.VCI()))
+ putUvarint(uint64(cid.Flow()))
+ putUvarint(uint64(bytes))
+ }
+ return
+}
+
+func readSetupOptions(r io.Reader) ([]SetupOption, error) {
+ var opts []SetupOption
+ for {
+ var code setupOptionCode
+ switch err := readInt(r, &code); err {
+ case io.EOF:
+ return opts, nil
+ case nil:
+ break
+ default:
+ return nil, err
+ }
+ var size uint16
+ if err := readInt(r, &size); err != nil {
+ return nil, err
+ }
+ l := &io.LimitedReader{R: r, N: int64(size)}
+ switch code {
+ case naclBoxPublicKey:
+ var opt NaclBox
+ if err := opt.read(l); err != nil {
+ return nil, err
+ }
+ opts = append(opts, &opt)
+ }
+ // Consume any data remaining.
+ readAndDiscardToError(l)
+ }
+}
+
+func writeSetupOptions(w io.Writer, options []SetupOption) error {
+ for _, opt := range options {
+ if err := writeInt(w, opt.code()); err != nil {
+ return err
+ }
+ if err := writeInt(w, opt.size()); err != nil {
+ return err
+ }
+ if err := opt.write(w); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func readAndDiscardToError(r io.Reader) {
+ var data [1024]byte
+ for {
+ if _, err := r.Read(data[:]); err != nil {
+ return
+ }
+ }
+}
diff --git a/profiles/internal/ipc/stream/message/control.go b/profiles/internal/ipc/stream/message/control.go
new file mode 100644
index 0000000..b63f56e
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/control.go
@@ -0,0 +1,414 @@
+package message
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "v.io/v23/naming"
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+// Control is the interface implemented by all control messages.
+type Control interface {
+ readFrom(r *bytes.Buffer) error
+ writeTo(w io.Writer) error
+}
+
+// OpenVC is a Control implementation requesting the creation of a new virtual
+// circuit.
+type OpenVC struct {
+ VCI id.VC
+ DstEndpoint naming.Endpoint
+ SrcEndpoint naming.Endpoint
+ Counters Counters
+}
+
+// CloseVC is a Control implementation notifying the closure of an established
+// virtual circuit, or failure to establish a virtual circuit.
+//
+// The Error string will be empty in case the close was the result of an
+// explicit close by the application (and not an error).
+type CloseVC struct {
+ VCI id.VC
+ Error string
+}
+
+// SetupVC is a Control implementation containing information to setup a new
+// virtual circuit. This message is expected to replace OpenVC and allow for
+// the two ends of a VC to establish a protocol version.
+type SetupVC struct {
+ VCI id.VC
+ Versions version.Range // Version range supported by the sender.
+ LocalEndpoint naming.Endpoint // Endpoint of the sender (as seen by the sender), can be nil.
+ RemoteEndpoint naming.Endpoint // Endpoint of the receiver (as seen by the sender), can be nil.
+ Counters Counters
+ Options []SetupOption
+}
+
+// AddReceiveBuffers is a Control implementation used by the sender of the
+// message to inform the other end of a virtual circuit that it is ready to
+// receive more bytes of data (specified per flow).
+type AddReceiveBuffers struct {
+ Counters Counters
+}
+
+// OpenFlow is a Control implementation notifying the senders intent to create
+// a new Flow. It also include the number of bytes the sender of this message
+// is willing to read.
+type OpenFlow struct {
+ VCI id.VC
+ Flow id.Flow
+ InitialCounters uint32
+}
+
+// HopSetup is a control message used to negotiate VIF options on a
+// hop-by-hop basis.
+type HopSetup struct {
+ Versions version.Range
+ Options []SetupOption
+}
+
+// SetupOption is the base interface for optional HopSetup and SetupVC options.
+type SetupOption interface {
+ // code is the identifier for the option.
+ code() setupOptionCode
+
+ // size returns the number of bytes needed to represent the option.
+ size() uint16
+
+ // write the option to the writer.
+ write(w io.Writer) error
+
+ // read the option from the reader.
+ read(r io.Reader) error
+}
+
+// NaclBox is a SetupOption that specifies the public key for the NaclBox
+// encryption protocol.
+type NaclBox struct {
+ PublicKey [32]byte
+}
+
+// HopSetupStream is a byte stream used to negotiate VIF setup. During VIF setup,
+// each party sends a HopSetup message to the other party containing their version
+// and options. If the version requires further negotiation (such as for authentication),
+// the HopSetupStream is used for the negotiation.
+//
+// The protocol used on the stream is version-specific, it is not specified here. See
+// vif/auth.go for an example.
+type HopSetupStream struct {
+ Data []byte
+}
+
+// Setup option codes.
+type setupOptionCode uint16
+
+const (
+ naclBoxPublicKey setupOptionCode = 0
+)
+
+// Command enum.
+type command uint8
+
+const (
+ openVCCommand command = 0
+ closeVCCommand command = 1
+ addReceiveBuffersCommand command = 2
+ openFlowCommand command = 3
+ hopSetupCommand command = 4
+ hopSetupStreamCommand command = 5
+ setupVCCommand command = 6
+)
+
+func writeControl(w io.Writer, m Control) error {
+ var command command
+ switch m.(type) {
+ case *OpenVC:
+ command = openVCCommand
+ case *CloseVC:
+ command = closeVCCommand
+ case *AddReceiveBuffers:
+ command = addReceiveBuffersCommand
+ case *OpenFlow:
+ command = openFlowCommand
+ case *HopSetup:
+ command = hopSetupCommand
+ case *HopSetupStream:
+ command = hopSetupStreamCommand
+ case *SetupVC:
+ command = setupVCCommand
+ default:
+ return fmt.Errorf("unrecognized VC control message: %T", m)
+ }
+ var header [1]byte
+ header[0] = byte(command)
+ if n, err := w.Write(header[:]); n != len(header) || err != nil {
+ return fmt.Errorf("failed to write header. Got (%d, %v) want (%d, nil)", n, err, len(header))
+ }
+ if err := m.writeTo(w); err != nil {
+ return err
+ }
+ return nil
+}
+
+func readControl(r *bytes.Buffer) (Control, error) {
+ var header byte
+ var err error
+ if header, err = r.ReadByte(); err != nil {
+ return nil, fmt.Errorf("message too small, cannot read control message command (0, %v)", err)
+ }
+ command := command(header)
+ var m Control
+ switch command {
+ case openVCCommand:
+ m = new(OpenVC)
+ case closeVCCommand:
+ m = new(CloseVC)
+ case addReceiveBuffersCommand:
+ m = new(AddReceiveBuffers)
+ case openFlowCommand:
+ m = new(OpenFlow)
+ case hopSetupCommand:
+ m = new(HopSetup)
+ case hopSetupStreamCommand:
+ m = new(HopSetupStream)
+ case setupVCCommand:
+ m = new(SetupVC)
+ default:
+ return nil, fmt.Errorf("unrecognized VC control message command(%d)", command)
+ }
+ if err := m.readFrom(r); err != nil {
+ return nil, fmt.Errorf("failed to deserialize control message %d(%T): %v", command, m, err)
+ }
+ return m, nil
+}
+
+func (m *OpenVC) writeTo(w io.Writer) (err error) {
+ if err = writeInt(w, m.VCI); err != nil {
+ return
+ }
+ if err = writeString(w, m.DstEndpoint.String()); err != nil {
+ return
+ }
+ if err = writeString(w, m.SrcEndpoint.String()); err != nil {
+ return
+ }
+ if err = writeCounters(w, m.Counters); err != nil {
+ return
+ }
+ return nil
+}
+
+func (m *OpenVC) readFrom(r *bytes.Buffer) (err error) {
+ if err = readInt(r, &m.VCI); err != nil {
+ return
+ }
+ var ep string
+ if err = readString(r, &ep); err != nil {
+ return
+ }
+ if m.DstEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+ return
+ }
+ if err = readString(r, &ep); err != nil {
+ return
+ }
+ if m.SrcEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+ return
+ }
+ if m.Counters, err = readCounters(r); err != nil {
+ return
+ }
+ return nil
+}
+
+func (m *CloseVC) writeTo(w io.Writer) (err error) {
+ if err = writeInt(w, m.VCI); err != nil {
+ return
+ }
+ if err = writeString(w, m.Error); err != nil {
+ return
+ }
+ return
+}
+
+func (m *CloseVC) readFrom(r *bytes.Buffer) (err error) {
+ if err = readInt(r, &m.VCI); err != nil {
+ return
+ }
+ if err = readString(r, &m.Error); err != nil {
+ return
+ }
+ return
+}
+
+func (m *SetupVC) writeTo(w io.Writer) (err error) {
+ if err = writeInt(w, m.VCI); err != nil {
+ return
+ }
+ if err = writeInt(w, m.Versions.Min); err != nil {
+ return
+ }
+ if err = writeInt(w, m.Versions.Max); err != nil {
+ return
+ }
+ var localep string
+ if m.LocalEndpoint != nil {
+ localep = m.LocalEndpoint.String()
+ }
+ if err = writeString(w, localep); err != nil {
+ return
+ }
+ var remoteep string
+ if m.RemoteEndpoint != nil {
+ remoteep = m.RemoteEndpoint.String()
+ }
+ if err = writeString(w, remoteep); err != nil {
+ return
+ }
+ if err = writeCounters(w, m.Counters); err != nil {
+ return
+ }
+ if err = writeSetupOptions(w, m.Options); err != nil {
+ return
+ }
+ return
+}
+
+func (m *SetupVC) readFrom(r *bytes.Buffer) (err error) {
+ if err = readInt(r, &m.VCI); err != nil {
+ return
+ }
+ if err = readInt(r, &m.Versions.Min); err != nil {
+ return
+ }
+ if err = readInt(r, &m.Versions.Max); err != nil {
+ return
+ }
+ var ep string
+ if err = readString(r, &ep); err != nil {
+ return
+ }
+ if ep != "" {
+ if m.LocalEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+ return
+ }
+ }
+ if err = readString(r, &ep); err != nil {
+ return
+ }
+ if ep != "" {
+ if m.RemoteEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+ return
+ }
+ }
+ if m.Counters, err = readCounters(r); err != nil {
+ return
+ }
+ if m.Options, err = readSetupOptions(r); err != nil {
+ return
+ }
+ return
+}
+
+func (m *AddReceiveBuffers) writeTo(w io.Writer) error {
+ return writeCounters(w, m.Counters)
+}
+
+func (m *AddReceiveBuffers) readFrom(r *bytes.Buffer) (err error) {
+ m.Counters, err = readCounters(r)
+ return
+}
+
+func (m *OpenFlow) writeTo(w io.Writer) (err error) {
+ if err = writeInt(w, m.VCI); err != nil {
+ return
+ }
+ if err = writeInt(w, m.Flow); err != nil {
+ return
+ }
+ if err = writeInt(w, m.InitialCounters); err != nil {
+ return
+ }
+ return
+}
+
+func (m *OpenFlow) readFrom(r *bytes.Buffer) (err error) {
+ if err = readInt(r, &m.VCI); err != nil {
+ return
+ }
+ if err = readInt(r, &m.Flow); err != nil {
+ return
+ }
+ if err = readInt(r, &m.InitialCounters); err != nil {
+ return
+ }
+ return
+}
+
+func (m *HopSetup) writeTo(w io.Writer) (err error) {
+ if err = writeInt(w, m.Versions.Min); err != nil {
+ return
+ }
+ if err = writeInt(w, m.Versions.Max); err != nil {
+ return
+ }
+ if err = writeSetupOptions(w, m.Options); err != nil {
+ return
+ }
+ return
+}
+
+func (m *HopSetup) readFrom(r *bytes.Buffer) (err error) {
+ if err = readInt(r, &m.Versions.Min); err != nil {
+ return
+ }
+ if err = readInt(r, &m.Versions.Max); err != nil {
+ return
+ }
+ if m.Options, err = readSetupOptions(r); err != nil {
+ return
+ }
+ return
+}
+
+// NaclBox returns the first NaclBox option, or nil if there is none.
+func (m *HopSetup) NaclBox() *NaclBox {
+ for _, opt := range m.Options {
+ if b, ok := opt.(*NaclBox); ok {
+ return b
+ }
+ }
+ return nil
+}
+
+func (*NaclBox) code() setupOptionCode {
+ return naclBoxPublicKey
+}
+
+func (m *NaclBox) size() uint16 {
+ return uint16(len(m.PublicKey))
+}
+
+func (m *NaclBox) write(w io.Writer) error {
+ _, err := w.Write(m.PublicKey[:])
+ return err
+}
+
+func (m *NaclBox) read(r io.Reader) error {
+ _, err := io.ReadFull(r, m.PublicKey[:])
+ return err
+}
+
+func (m *HopSetupStream) writeTo(w io.Writer) error {
+ _, err := w.Write(m.Data)
+ return err
+}
+
+func (m *HopSetupStream) readFrom(r *bytes.Buffer) error {
+ m.Data = r.Bytes()
+ return nil
+}
diff --git a/profiles/internal/ipc/stream/message/counters.go b/profiles/internal/ipc/stream/message/counters.go
new file mode 100644
index 0000000..352710a
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/counters.go
@@ -0,0 +1,53 @@
+package message
+
+import (
+ "fmt"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+)
+
+// CounterID encapsulates the VCI and Flow used for flow control counter
+// accounting.
+type CounterID uint64
+
+// VCI returns the VCI encoded within the CounterID
+func (c *CounterID) VCI() id.VC { return id.VC(*c >> 32) }
+
+// Flow returns the Flow identifier encoded within the CounterID
+func (c *CounterID) Flow() id.Flow { return id.Flow(*c & 0xffffffff) }
+
+func (c *CounterID) String() string { return fmt.Sprintf("Flow:%d/VCI:%d", c.Flow(), c.VCI()) }
+
+// MakeCounterID creates a CounterID from the provided (vci, fid) pair.
+func MakeCounterID(vci id.VC, fid id.Flow) CounterID {
+ return CounterID(uint64(vci)<<32 | uint64(fid))
+}
+
+// Counters is a map from (VCI, Flow) to the number of bytes for that (VCI,
+// Flow) pair that the receiver is willing to read.
+//
+// Counters are not safe for concurrent access from multiple goroutines.
+//
+// When received in Control messages, clients can iterate over the map:
+// for cid, bytes := range counters {
+// fmt.Println("VCI=%d Flow=%d Bytes=%d", cid.VCI(), cid.Flow(), bytes)
+// }
+type Counters map[CounterID]uint32
+
+// NewCounters creates a new Counters object.
+func NewCounters() Counters { return Counters(make(map[CounterID]uint32)) }
+
+// Add should be called by the receiving end of a Flow to indicate that it is
+// ready to read 'bytes' more data for the flow identified by (vci, fid).
+func (c Counters) Add(vci id.VC, fid id.Flow, bytes uint32) {
+ c[MakeCounterID(vci, fid)] += bytes
+}
+
+func (c Counters) String() string {
+ ret := "map[ "
+ for cid, bytes := range c {
+ ret += fmt.Sprintf("%d@%d:%d ", cid.Flow(), cid.VCI(), bytes)
+ }
+ ret += "]"
+ return ret
+}
diff --git a/profiles/internal/ipc/stream/message/counters_test.go b/profiles/internal/ipc/stream/message/counters_test.go
new file mode 100644
index 0000000..8fb90c2
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/counters_test.go
@@ -0,0 +1,61 @@
+package message
+
+import (
+ "testing"
+ "testing/quick"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+)
+
+func TestCounterID(t *testing.T) {
+ tests := []struct {
+ vci id.VC
+ fid id.Flow
+ }{
+ {0, 0},
+ {1, 10},
+ {0xffeeddcc, 0xffaabbcc},
+ }
+ for _, test := range tests {
+ cid := MakeCounterID(test.vci, test.fid)
+ if g, w := cid.VCI(), test.vci; g != w {
+ t.Errorf("Got VCI %d want %d", g, w)
+ }
+ if g, w := cid.Flow(), test.fid; g != w {
+ t.Errorf("Got Flow %d want %d", g, w)
+ }
+ }
+}
+
+func TestCounterID_Random(t *testing.T) {
+ f := func(vci id.VC, fid id.Flow) bool {
+ cid := MakeCounterID(vci, fid)
+ return cid.VCI() == vci && cid.Flow() == fid
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestCounters(t *testing.T) {
+ f := func(vci id.VC, fid id.Flow, bytes []uint32) bool {
+ c := NewCounters()
+ var sum uint32
+ for _, bin := range bytes {
+ c.Add(vci, fid, bin)
+ if len(c) != 1 {
+ return false
+ }
+ sum += bin
+ for cid, bout := range c {
+ if cid.VCI() != vci || cid.Flow() != fid || bout != sum {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/profiles/internal/ipc/stream/message/data.go b/profiles/internal/ipc/stream/message/data.go
new file mode 100644
index 0000000..4de9e5b
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/data.go
@@ -0,0 +1,41 @@
+package message
+
+import (
+ "fmt"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// Data encapsulates an application data message.
+type Data struct {
+ VCI id.VC // Must be non-zero.
+ Flow id.Flow
+ flags uint8
+ Payload *iobuf.Slice
+}
+
+// Close returns true if the sender of the data message requested that the flow be closed.
+func (d *Data) Close() bool { return d.flags&0x1 == 1 }
+
+// SetClose sets the Close flag of the message.
+func (d *Data) SetClose() { d.flags |= 0x1 }
+
+// Release releases the Payload
+func (d *Data) Release() {
+ if d.Payload != nil {
+ d.Payload.Release()
+ d.Payload = nil
+ }
+}
+
+func (d *Data) PayloadSize() int {
+ if d.Payload == nil {
+ return 0
+ }
+ return d.Payload.Size()
+}
+
+func (d *Data) String() string {
+ return fmt.Sprintf("VCI:%d Flow:%d Flags:%02x Payload:(%d bytes)", d.VCI, d.Flow, d.flags, d.PayloadSize())
+}
diff --git a/profiles/internal/ipc/stream/message/message.go b/profiles/internal/ipc/stream/message/message.go
new file mode 100644
index 0000000..c8b2551
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/message.go
@@ -0,0 +1,249 @@
+// Package message provides data structures and serialization/deserialization
+// methods for messages exchanged by the implementation of the
+// veyron/profiles/internal/ipc/stream interfaces in veyron/profiles/internal/ipc/stream.
+package message
+
+// This file contains methods to read and write messages sent over the VIF.
+// Every message has the following format:
+//
+// +-----------------------------------------+
+// | Type (1 byte) | PayloadSize (3 bytes) |
+// +-----------------------------------------+
+// | Payload (PayloadSize bytes) |
+// +-----------------------------------------+
+//
+// Currently, there are 2 valid types:
+// 0 (controlType)
+// 1 (dataType)
+//
+// When Type == controlType, the message is:
+// +---------------------------------------------+
+// | 0 | PayloadSize (3 bytes) |
+// +---------------------------------------------+
+// | Cmd (1 byte) |
+// +---------------------------------------------+
+// | Data (PayloadSize - MACSize - 1 bytes) |
+// +---------------------------------------------+
+// | MAC (MACSize bytes) |
+// +---------------------------------------------+
+// Where Data is the serialized Control interface object.
+//
+// When Type == dataType, the message is:
+// +---------------------------------------------+
+// | 1 | PayloadSize (3 bytes) |
+// +---------------------------------------------+
+// | id.VCI (4 bytes) |
+// +---------------------------------------------+
+// | id.Flow (4 bytes) |
+// +---------------------------------------------+
+// | Flags (1 byte) |
+// +---------------------------------------------+
+// | MAC (MACSize bytes) |
+// +---------------------------------------------+
+// | Data (PayloadSize - 9 - MACSize bytes) |
+// +---------------------------------------------+
+// Where Data is the application data. The Data is encrypted separately; it is
+// not included in the MAC.
+//
+// A crypto.ControlCipher is used to encrypt the control data. The MACSize
+// comes from the ControlCipher. When used, the first word of the header,
+// containing the Type and PayloadSize, is encrypted with the cipher's Encrypt
+// method. The rest of the control data is encrypted with the cipher's Seal
+// method. This means that none of the data is observable by an adversary, but
+// the type and length are subject to corruption (the rest of the data is not).
+// This doesn't matter -- if the Type or PayloadSize is corrupted by an
+// adversary, the payload will be misread, and will fail to validate.
+//
+// We could potentially pass the Type and PayloadSize in the clear, but then the
+// framing would be observable, a (probably minor) information leak. There is
+// no reason to do so, we encrypt everything.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+const (
+ // Size (in bytes) of headers appended to application data payload in
+ // Data messages.
+ HeaderSizeBytes = commonHeaderSizeBytes + dataHeaderSizeBytes
+
+ commonHeaderSizeBytes = 4 // 1 byte type + 3 bytes payload length
+ dataHeaderSizeBytes = 9 // 4 byte id.VC + 4 byte id.Flow + 1 byte flags
+
+ // Make sure the first byte can't be ASCII to ensure that a VC
+ // header can never be confused with a web socket request.
+ // TODO(cnicolaou): remove the original controlType and dataType values
+ // when new binaries are pushed.
+ controlType = 0
+ controlTypeWS = 0x80
+ dataType = 1
+ dataTypeWS = 0x81
+)
+
+var (
+ emptyMessageErr = errors.New("message is empty")
+ corruptedMessageErr = errors.New("corrupted message")
+)
+
+// T is the interface implemented by all messages communicated over a VIF.
+type T interface {
+}
+
+// ReadFrom reads a message from the provided iobuf.Reader.
+//
+// Sample usage:
+// msg, err := message.ReadFrom(r)
+// switch m := msg.(type) {
+// case *Data:
+// notifyFlowOfReceivedData(m.VCI, m.Flow, m.Payload)
+// if m.Closed() {
+// closeFlow(m.VCI, m.Flow)
+// }
+// case Control:
+// handleControlMessage(m)
+// }
+func ReadFrom(r *iobuf.Reader, c crypto.ControlCipher) (T, error) {
+ header, err := r.Read(commonHeaderSizeBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read VC header: %v", err)
+ }
+ c.Decrypt(header.Contents)
+ msgType := header.Contents[0]
+ msgPayloadSize := read3ByteUint(header.Contents[1:4])
+ header.Release()
+ payload, err := r.Read(msgPayloadSize)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read payload of %d bytes for type %d: %v", msgPayloadSize, msgType, err)
+ }
+ macSize := c.MACSize()
+ switch msgType {
+ case controlType, controlTypeWS:
+ if !c.Open(payload.Contents) {
+ payload.Release()
+ return nil, corruptedMessageErr
+ }
+ m, err := readControl(bytes.NewBuffer(payload.Contents[:msgPayloadSize-macSize]))
+ payload.Release()
+ return m, err
+ case dataType, dataTypeWS:
+ if !c.Open(payload.Contents[0 : dataHeaderSizeBytes+macSize]) {
+ payload.Release()
+ return nil, corruptedMessageErr
+ }
+ m := &Data{
+ VCI: id.VC(read4ByteUint(payload.Contents[0:4])),
+ Flow: id.Flow(read4ByteUint(payload.Contents[4:8])),
+ flags: payload.Contents[8],
+ Payload: payload,
+ }
+ m.Payload.TruncateFront(uint(dataHeaderSizeBytes + macSize))
+ return m, nil
+ default:
+ payload.Release()
+ return nil, fmt.Errorf("unrecognized message type: %d", msgType)
+ }
+}
+
+// WriteTo serializes message and makes a single call to w.Write.
+// It is the inverse of ReadFrom.
+//
+// By writing the message in a single call to w.Write, confusion is avoided in
+// case multiple goroutines are calling Write on w simultaneously.
+//
+// If message is a Data message, the Payload contents will be Released
+// irrespective of the return value of this method.
+func WriteTo(w io.Writer, message T, c crypto.ControlCipher) error {
+ macSize := c.MACSize()
+ switch m := message.(type) {
+ case *Data:
+ payloadSize := m.PayloadSize() + dataHeaderSizeBytes + macSize
+ msg := mkHeaderSpace(m.Payload, uint(HeaderSizeBytes+macSize))
+ header := msg.Contents[0 : HeaderSizeBytes+macSize]
+ header[0] = dataType
+ if err := write3ByteUint(header[1:4], payloadSize); err != nil {
+ return err
+
+ }
+ write4ByteUint(header[4:8], uint32(m.VCI))
+ write4ByteUint(header[8:12], uint32(m.Flow))
+ header[12] = m.flags
+ EncryptMessage(msg.Contents, c)
+ _, err := w.Write(msg.Contents)
+ msg.Release()
+ return err
+ case Control:
+ var buf bytes.Buffer
+ // Prevent a few memory allocations by presizing the buffer to
+ // something that is large enough for typical control messages.
+ buf.Grow(256)
+ // Reserve space for the header
+ if err := extendBuffer(&buf, commonHeaderSizeBytes); err != nil {
+ return err
+ }
+ if err := writeControl(&buf, m); err != nil {
+ return err
+ }
+ if err := extendBuffer(&buf, macSize); err != nil {
+ return err
+ }
+ msg := buf.Bytes()
+ msg[0] = controlType
+ if err := write3ByteUint(msg[1:4], buf.Len()-commonHeaderSizeBytes); err != nil {
+ return err
+ }
+ EncryptMessage(msg, c)
+ _, err := w.Write(msg)
+ return err
+ default:
+ return fmt.Errorf("invalid message type %T", m)
+ }
+ return nil
+}
+
+// EncryptMessage encrypts the message's control data in place.
+func EncryptMessage(msg []byte, c crypto.ControlCipher) error {
+ if len(msg) == 0 {
+ return emptyMessageErr
+ }
+ n := len(msg)
+ switch msgType := msg[0]; msgType {
+ case controlType:
+ // skip
+ case dataType:
+ n = HeaderSizeBytes + c.MACSize()
+ default:
+ return fmt.Errorf("unrecognized message type: %d", msgType)
+ }
+ c.Encrypt(msg[0:commonHeaderSizeBytes])
+ c.Seal(msg[commonHeaderSizeBytes:n])
+ return nil
+}
+
+func mkHeaderSpace(slice *iobuf.Slice, space uint) *iobuf.Slice {
+ if slice == nil {
+ return iobuf.NewSlice(make([]byte, space))
+ }
+ if slice.ExpandFront(space) {
+ return slice
+ }
+ vlog.VI(10).Infof("Failed to expand slice by %d bytes. Copying", space)
+ contents := make([]byte, slice.Size()+int(space))
+ copy(contents[space:], slice.Contents)
+ slice.Release()
+ return iobuf.NewSlice(contents)
+}
+
+var emptyBytes [256]byte
+
+func extendBuffer(buf *bytes.Buffer, size int) error {
+ _, err := buf.Write(emptyBytes[:size])
+ return err
+}
diff --git a/profiles/internal/ipc/stream/message/message_test.go b/profiles/internal/ipc/stream/message/message_test.go
new file mode 100644
index 0000000..492ad97
--- /dev/null
+++ b/profiles/internal/ipc/stream/message/message_test.go
@@ -0,0 +1,209 @@
+package message
+
+import (
+ "bytes"
+ "encoding/binary"
+ "reflect"
+ "testing"
+
+ "v.io/v23/naming"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// testControlCipher is a super-simple cipher that xor's each byte of the
+// payload with 0xaa.
+type testControlCipher struct{}
+
+const testMACSize = 4
+
+func (*testControlCipher) MACSize() int {
+ return testMACSize
+}
+
+func testMAC(data []byte) []byte {
+ var h uint32
+ for _, b := range data {
+ h = (h << 1) ^ uint32(b)
+ }
+ var hash [4]byte
+ binary.BigEndian.PutUint32(hash[:], h)
+ return hash[:]
+}
+
+func (c *testControlCipher) Decrypt(data []byte) {
+ for i, _ := range data {
+ data[i] ^= 0xaa
+ }
+}
+
+func (c *testControlCipher) Encrypt(data []byte) {
+ for i, _ := range data {
+ data[i] ^= 0xaa
+ }
+}
+
+func (c *testControlCipher) Open(data []byte) bool {
+ mac := testMAC(data[:len(data)-testMACSize])
+ if bytes.Compare(mac, data[len(data)-testMACSize:]) != 0 {
+ return false
+ }
+ c.Decrypt(data[:len(data)-testMACSize])
+ return true
+}
+
+func (c *testControlCipher) Seal(data []byte) error {
+ c.Encrypt(data[:len(data)-testMACSize])
+ mac := testMAC(data[:len(data)-testMACSize])
+ copy(data[len(data)-testMACSize:], mac)
+ return nil
+}
+
+func TestControl(t *testing.T) {
+ counters := NewCounters()
+ counters.Add(12, 13, 10240)
+ tests := []Control{
+ &OpenVC{VCI: 2,
+ DstEndpoint: version.Endpoint("tcp", "batman.com:1990", naming.FixedRoutingID(0xba7)),
+ SrcEndpoint: version.Endpoint("tcp", "google.com:80", naming.FixedRoutingID(0xba6)),
+ },
+ &OpenVC{
+ VCI: 4,
+ DstEndpoint: version.Endpoint("tcp", "batman.com:1990", naming.FixedRoutingID(0xba7)),
+ SrcEndpoint: version.Endpoint("tcp", "google.com:80", naming.FixedRoutingID(0xba6)),
+ Counters: counters,
+ },
+
+ &CloseVC{VCI: 1},
+ &CloseVC{VCI: 2, Error: "some error"},
+
+ &SetupVC{
+ VCI: 1,
+ Versions: version.Range{Min: 34, Max: 56},
+ LocalEndpoint: version.Endpoint("tcp", "batman.com:1990", naming.FixedRoutingID(0xba7)),
+ RemoteEndpoint: version.Endpoint("tcp", "bugsbunny.com:1940", naming.FixedRoutingID(0xbb)),
+ Counters: counters,
+ Options: []SetupOption{
+ &NaclBox{PublicKey: [32]byte{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+ &NaclBox{PublicKey: [32]byte{7, 67, 31}},
+ },
+ },
+ // SetupVC without endpoints
+ &SetupVC{
+ VCI: 1,
+ Versions: version.Range{Min: 34, Max: 56},
+ Counters: counters,
+ Options: []SetupOption{
+ &NaclBox{PublicKey: [32]byte{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+ &NaclBox{PublicKey: [32]byte{7, 67, 31}},
+ },
+ },
+
+ &AddReceiveBuffers{},
+ &AddReceiveBuffers{Counters: counters},
+
+ &OpenFlow{VCI: 1, Flow: 10, InitialCounters: 1 << 24},
+
+ &HopSetup{
+ Versions: version.Range{Min: 21, Max: 71},
+ Options: []SetupOption{
+ &NaclBox{PublicKey: [32]byte{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+ &NaclBox{PublicKey: [32]byte{7, 67, 31}},
+ },
+ },
+
+ &HopSetupStream{Data: []byte("HelloWorld")},
+ }
+
+ var c testControlCipher
+ pool := iobuf.NewPool(0)
+ for i, msg := range tests {
+ var buf bytes.Buffer
+ if err := WriteTo(&buf, msg, &c); err != nil {
+ t.Errorf("WriteTo(%T) (test #%d) failed: %v", msg, i, err)
+ continue
+ }
+ reader := iobuf.NewReader(pool, &buf)
+ read, err := ReadFrom(reader, &c)
+ reader.Close()
+ if err != nil {
+ t.Errorf("ReadFrom failed (test #%d): %v", i, err)
+ continue
+ }
+ if !reflect.DeepEqual(msg, read) {
+ t.Errorf("Test #%d: Got %T = %+v, want %T = %+v", i, read, read, msg, msg)
+ }
+ }
+}
+
+func TestData(t *testing.T) {
+ tests := []struct {
+ Header Data
+ Payload string
+ }{
+ {Data{VCI: 10, Flow: 3}, "abcd"},
+ {Data{VCI: 10, Flow: 3, flags: 1}, "batman"},
+ }
+
+ var c testControlCipher
+ pool := iobuf.NewPool(0)
+ allocator := iobuf.NewAllocator(pool, HeaderSizeBytes+testMACSize)
+ for i, test := range tests {
+ var buf bytes.Buffer
+ msgW := test.Header
+ msgW.Payload = allocator.Copy([]byte(test.Payload))
+ if err := WriteTo(&buf, &msgW, &c); err != nil {
+ t.Errorf("WriteTo(%v) failed: %v", i, err)
+ continue
+ }
+ reader := iobuf.NewReader(pool, &buf)
+ read, err := ReadFrom(reader, &c)
+ if err != nil {
+ t.Errorf("ReadFrom(%v) failed: %v", i, err)
+ continue
+ }
+ msgR := read.(*Data)
+ // Must compare Payload and the rest of the message separately.
+ // reflect.DeepEqual(msgR, &msgW) will not cut it because the
+ // iobuf.Slice objects might not pass the DeepEqual test. That
+ // is fine, the important thing is for iobuf.Slice.Content to
+ // match.
+ if g, w := string(msgR.Payload.Contents), test.Payload; g != w {
+ t.Errorf("Mismatched payloads in test #%d. Got %q want %q", i, g, w)
+ }
+ msgR.Release()
+ if !reflect.DeepEqual(&test.Header, msgR) {
+ t.Errorf("Mismatched headers in test #%d. Got %+v want %+v", i, msgR, &test.Header)
+ }
+ }
+}
+
+func TestDataNoPayload(t *testing.T) {
+ tests := []Data{
+ {VCI: 10, Flow: 3},
+ {VCI: 11, Flow: 4, flags: 10},
+ }
+ var c testControlCipher
+ pool := iobuf.NewPool(0)
+ for _, test := range tests {
+ var buf bytes.Buffer
+ if err := WriteTo(&buf, &test, &c); err != nil {
+ t.Errorf("WriteTo(%v) failed: %v", test, err)
+ continue
+ }
+ read, err := ReadFrom(iobuf.NewReader(pool, &buf), &c)
+ if err != nil {
+ t.Errorf("ReadFrom(%v) failed: %v", test, err)
+ continue
+ }
+ msgR := read.(*Data)
+ if msgR.PayloadSize() != 0 {
+ t.Errorf("ReadFrom(WriteTo(%v)) returned payload of %d bytes", test, msgR.PayloadSize())
+ continue
+ }
+ msgR.Payload = nil
+ if !reflect.DeepEqual(&test, msgR) {
+ t.Errorf("Wrote %v, Read %v", test, read)
+ }
+ }
+}
diff --git a/profiles/internal/ipc/stream/model.go b/profiles/internal/ipc/stream/model.go
new file mode 100644
index 0000000..b98e3db
--- /dev/null
+++ b/profiles/internal/ipc/stream/model.go
@@ -0,0 +1,133 @@
+package stream
+
+import (
+ "io"
+
+ "v.io/v23/naming"
+ "v.io/v23/security"
+)
+
+// Flow is the interface for a flow-controlled channel multiplexed on a Virtual
+// Circuit (VC) (and its underlying network connections).
+//
+// This allows for a single level of multiplexing and flow-control over
+// multiple concurrent streams (that may be used for RPCs) over multiple
+// VCs over a single underlying network connection.
+type Flow interface {
+ io.ReadWriteCloser
+
+ // LocalEndpoint returns the local veyron Endpoint
+ LocalEndpoint() naming.Endpoint
+ // RemoteEndpoint returns the remote veyron Endpoint
+ RemoteEndpoint() naming.Endpoint
+ // LocalPrincipal returns the Principal at the local end of the flow that has authenticated with the remote end.
+ LocalPrincipal() security.Principal
+ // LocalBlessings returns the blessings presented by the local end of the flow during authentication.
+ LocalBlessings() security.Blessings
+ // RemoteBlessings returns the blessings presented by the remote end of the flow during authentication.
+ RemoteBlessings() security.Blessings
+ // RemoteDischarges() returns the discharges presented by the remote end of the flow during authentication.
+ //
+ // The discharges are organized in a map keyed by the discharge-identifier.
+ RemoteDischarges() map[string]security.Discharge
+ // Cancel, like Close, closes the Flow but unlike Close discards any queued writes.
+ Cancel()
+ // Closed returns true if the flow has been closed or cancelled.
+ IsClosed() bool
+ // Closed returns a channel that remains open until the flow has been closed.
+ Closed() <-chan struct{}
+
+ // SetDeadline causes reads and writes to the flow to be
+ // cancelled when the given channel is closed.
+ SetDeadline(deadline <-chan struct{})
+
+ // VCDataCache returns the stream.VCDataCache object that allows information to be
+ // shared across the Flow's parent VC.
+ VCDataCache() VCDataCache
+}
+
+// VCDataCache is a thread-safe store that allows data to be shared across a VC,
+// with the intention of caching data that reappears over multiple flows.
+type VCDataCache interface {
+ // GetOrInsert returns the 'value' associated with 'key'. If an entry already exists in the
+ // cache with the 'key', the 'value' is returned, otherwise 'create' is called to create a new
+ // value N, the cache is updated, and N is returned. GetOrInsert may be called from
+ // multiple goroutines concurrently.
+ GetOrInsert(key interface{}, create func() interface{}) interface{}
+}
+
+// FlowOpt is the interface for all Flow options.
+type FlowOpt interface {
+ IPCStreamFlowOpt()
+}
+
+// Listener is the interface for accepting Flows created by a remote process.
+type Listener interface {
+ // Accept blocks until a new Flow has been initiated by a remote process.
+ // TODO(toddw): This should be:
+ // Accept() (Flow, Connector, error)
+ Accept() (Flow, error)
+
+ // Close prevents new Flows from being accepted on this Listener.
+ // Previously accepted Flows are not closed down.
+ Close() error
+}
+
+// ListenerOpt is the interface for all options that control the creation of a
+// Listener.
+type ListenerOpt interface {
+ IPCStreamListenerOpt()
+}
+
+// Connector is the interface for initiating Flows to a remote process over a
+// Virtual Circuit (VC).
+type Connector interface {
+ Connect(opts ...FlowOpt) (Flow, error)
+}
+
+// VC is the interface for creating authenticated and secure end-to-end
+// streams.
+//
+// VCs are multiplexed onto underlying network conections and can span
+// multiple hops. Authentication and encryption are end-to-end, even though
+// underlying network connections span a single hop.
+type VC interface {
+ Connector
+ Listen() (Listener, error)
+}
+
+// VCOpt is the interface for all VC options.
+type VCOpt interface {
+ IPCStreamVCOpt()
+}
+
+// Manager is the interface for managing the creation of VCs.
+type Manager interface {
+ // Listen creates a Listener that can be used to accept Flows initiated
+ // with the provided network address.
+ //
+ // For example:
+ // ln, ep, err := Listen("tcp", ":0")
+ // for {
+ // flow, err := ln.Accept()
+ // // process flow
+ // }
+ // can be used to accept Flows initiated by remote processes to the endpoint
+ // identified by the returned Endpoint.
+ Listen(protocol, address string, opts ...ListenerOpt) (Listener, naming.Endpoint, error)
+
+ // Dial creates a VC to the provided remote endpoint.
+ Dial(remote naming.Endpoint, opts ...VCOpt) (VC, error)
+
+ // ShutdownEndpoint closes all VCs (and Flows and Listeners over it)
+ // involving the provided remote endpoint.
+ ShutdownEndpoint(remote naming.Endpoint)
+
+ // Shutdown closes all VCs and Listeners (and Flows over them) and
+ // frees up internal data structures.
+ // The Manager is not usable after Shutdown has been called.
+ Shutdown()
+
+ // RoutingID returns the Routing ID associated with the VC.
+ RoutingID() naming.RoutingID
+}
diff --git a/profiles/internal/ipc/stream/vc/auth.go b/profiles/internal/ipc/stream/vc/auth.go
new file mode 100644
index 0000000..d29edc2
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/auth.go
@@ -0,0 +1,193 @@
+package vc
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/security"
+ "v.io/v23/vom"
+)
+
+var (
+ authServerContextTag = []byte("VCauthS\x00")
+ authClientContextTag = []byte("VCauthC\x00")
+)
+
+var (
+ errSameChannelPublicKey = errors.New("same public keys for both ends of the channel")
+ errChannelIDMismatch = errors.New("channel id does not match expectation")
+ errChecksumMismatch = errors.New("checksum mismatch")
+ errInvalidSignatureInMessage = errors.New("signature does not verify in authentication handshake message")
+ errNoCertificatesReceived = errors.New("no certificates received")
+ errSingleCertificateRequired = errors.New("exactly one X.509 certificate chain with exactly one certificate is required")
+)
+
+// AuthenticateAsServer executes the authentication protocol at the server and
+// returns the blessings used to authenticate the client.
+func AuthenticateAsServer(conn io.ReadWriteCloser, principal security.Principal, server security.Blessings, dc DischargeClient, crypter crypto.Crypter, v version.IPCVersion) (client security.Blessings, err error) {
+ if server.IsZero() {
+ return security.Blessings{}, errors.New("no blessings to present as a server")
+ }
+ var discharges []security.Discharge
+ if tpcavs := server.ThirdPartyCaveats(); len(tpcavs) > 0 && dc != nil {
+ discharges = dc.PrepareDischarges(nil, tpcavs, security.DischargeImpetus{})
+ }
+ if err = writeBlessings(conn, authServerContextTag, crypter, principal, server, discharges, v); err != nil {
+ return
+ }
+ if client, _, err = readBlessings(conn, authClientContextTag, crypter, v); err != nil {
+ return
+ }
+ return
+}
+
+// AuthenticateAsClient executes the authentication protocol at the client and
+// returns the blessings used to authenticate both ends.
+//
+// The client will only share its blessings if the server (who shares its blessings first)
+// is authorized as per the authorizer for this RPC.
+func AuthenticateAsClient(conn io.ReadWriteCloser, crypter crypto.Crypter, params security.CallParams, auth *ServerAuthorizer, v version.IPCVersion) (server, client security.Blessings, serverDischarges map[string]security.Discharge, err error) {
+ if server, serverDischarges, err = readBlessings(conn, authServerContextTag, crypter, v); err != nil {
+ return
+ }
+ // Authorize the server based on the provided authorizer.
+ if auth != nil {
+ params.RemoteBlessings = server
+ params.RemoteDischarges = serverDischarges
+ if err = auth.Authorize(params); err != nil {
+ return
+ }
+ }
+
+ // The client shares its blessings at RPC time (as the blessings may vary across
+ // RPCs). During VC handshake, the client simply sends a self-signed blessing
+ // in order to reveal its public key to the server.
+ principal := params.LocalPrincipal
+ client, err = principal.BlessSelf("vcauth")
+ if err != nil {
+ return security.Blessings{}, security.Blessings{}, nil, fmt.Errorf("failed to created self blessing: %v", err)
+ }
+ if err = writeBlessings(conn, authClientContextTag, crypter, principal, client, nil, v); err != nil {
+ return
+ }
+ return
+}
+
+func writeBlessings(w io.Writer, tag []byte, crypter crypto.Crypter, p security.Principal, b security.Blessings, discharges []security.Discharge, v version.IPCVersion) error {
+ signature, err := p.Sign(append(tag, crypter.ChannelBinding()...))
+ if err != nil {
+ return err
+ }
+ var buf bytes.Buffer
+ enc, err := vom.NewEncoder(&buf)
+ if err != nil {
+ return err
+ }
+ if err := enc.Encode(signature); err != nil {
+ return err
+ }
+ if err := enc.Encode(b); err != nil {
+ return err
+ }
+ if v >= version.IPCVersion7 {
+ if err := enc.Encode(marshalDischarges(discharges)); err != nil {
+ return err
+ }
+ } else if v >= version.IPCVersion5 {
+ if err := enc.Encode(discharges); err != nil {
+ return err
+ }
+ }
+ msg, err := crypter.Encrypt(iobuf.NewSlice(buf.Bytes()))
+ if err != nil {
+ return err
+ }
+ defer msg.Release()
+ enc, err = vom.NewEncoder(w)
+ if err != nil {
+ return err
+ }
+ return enc.Encode(msg.Contents)
+}
+
+func readBlessings(r io.Reader, tag []byte, crypter crypto.Crypter, v version.IPCVersion) (security.Blessings, map[string]security.Discharge, error) {
+ var msg []byte
+ var noBlessings security.Blessings
+ dec, err := vom.NewDecoder(r)
+ if err != nil {
+ return noBlessings, nil, fmt.Errorf("failed to create new decoder: %v", err)
+ }
+ if err := dec.Decode(&msg); err != nil {
+ return noBlessings, nil, fmt.Errorf("failed to read handshake message: %v", err)
+ }
+ buf, err := crypter.Decrypt(iobuf.NewSlice(msg))
+ if err != nil {
+ return noBlessings, nil, err
+ }
+ defer buf.Release()
+ dec, err = vom.NewDecoder(bytes.NewReader(buf.Contents))
+ if err != nil {
+ return noBlessings, nil, fmt.Errorf("failed to create new decoder: %v", err)
+ }
+
+ var (
+ blessings security.Blessings
+ sig security.Signature
+ )
+ if err = dec.Decode(&sig); err != nil {
+ return noBlessings, nil, err
+ }
+ if err = dec.Decode(&blessings); err != nil {
+ return noBlessings, nil, err
+ }
+ var discharges map[string]security.Discharge
+ if v >= version.IPCVersion7 {
+ var wired []security.WireDischarge
+ if err = dec.Decode(&wired); err != nil {
+ return noBlessings, nil, err
+ }
+ if len(wired) > 0 {
+ discharges = make(map[string]security.Discharge)
+ for _, d := range unmarshalDischarges(wired) {
+ discharges[d.ID()] = d
+ }
+ }
+ } else if v >= version.IPCVersion5 {
+ var list []security.Discharge
+ if err = dec.Decode(&list); err != nil {
+ return noBlessings, nil, err
+ }
+ if len(list) > 0 {
+ discharges = make(map[string]security.Discharge)
+ for _, d := range list {
+ discharges[d.ID()] = d
+ }
+ }
+ }
+ if !sig.Verify(blessings.PublicKey(), append(tag, crypter.ChannelBinding()...)) {
+ return noBlessings, nil, errInvalidSignatureInMessage
+ }
+ return blessings, discharges, nil
+}
+
+func marshalDischarges(discharges []security.Discharge) []security.WireDischarge {
+ wire := make([]security.WireDischarge, len(discharges))
+ for i, d := range discharges {
+ wire[i] = security.MarshalDischarge(d)
+ }
+ return wire
+}
+
+func unmarshalDischarges(wire []security.WireDischarge) []security.Discharge {
+ discharges := make([]security.Discharge, len(wire))
+ for i, w := range wire {
+ discharges[i] = security.NewDischarge(w)
+ }
+ return discharges
+}
diff --git a/profiles/internal/ipc/stream/vc/data_cache.go b/profiles/internal/ipc/stream/vc/data_cache.go
new file mode 100644
index 0000000..8b75ed0
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/data_cache.go
@@ -0,0 +1,40 @@
+package vc
+
+import (
+ "sync"
+)
+
+// dataCache is a thread-safe map for any two types.
+type dataCache struct {
+ sync.RWMutex
+ m map[interface{}]interface{}
+}
+
+func newDataCache() *dataCache {
+ return &dataCache{m: make(map[interface{}]interface{})}
+}
+
+// GetOrInsert first checks if the key exists in the cache with a reader lock.
+// If it doesn't exist, it instead acquires a writer lock, creates and stores the new value
+// with create and returns value.
+func (c *dataCache) GetOrInsert(key interface{}, create func() interface{}) interface{} {
+ // We use the read lock for the fastpath. This should be the more common case, so we rarely
+ // need a writer lock.
+ c.RLock()
+ value, exists := c.m[key]
+ c.RUnlock()
+ if exists {
+ return value
+ }
+ // We acquire the writer lock for the slowpath, and need to re-check if the key exists
+ // in the map, since other thread may have snuck in.
+ c.Lock()
+ defer c.Unlock()
+ value, exists = c.m[key]
+ if exists {
+ return value
+ }
+ value = create()
+ c.m[key] = value
+ return value
+}
diff --git a/profiles/internal/ipc/stream/vc/doc.go b/profiles/internal/ipc/stream/vc/doc.go
new file mode 100644
index 0000000..a306a66
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/doc.go
@@ -0,0 +1,2 @@
+// Package vc provides implementations of the VC and Flow interfaces in veyron/profiles/internal/ipc/stream.
+package vc
diff --git a/profiles/internal/ipc/stream/vc/flow.go b/profiles/internal/ipc/stream/vc/flow.go
new file mode 100644
index 0000000..e57ddc7
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/flow.go
@@ -0,0 +1,58 @@
+package vc
+
+import (
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+type flow struct {
+ authN // authentication information.
+ *reader
+ *writer
+ localEndpoint, remoteEndpoint naming.Endpoint
+ dataCache *dataCache
+}
+
+type authN interface {
+ LocalPrincipal() security.Principal
+ LocalBlessings() security.Blessings
+ RemoteBlessings() security.Blessings
+ RemoteDischarges() map[string]security.Discharge
+}
+
+func (f *flow) LocalEndpoint() naming.Endpoint { return f.localEndpoint }
+func (f *flow) RemoteEndpoint() naming.Endpoint { return f.remoteEndpoint }
+
+func (f *flow) Close() error {
+ f.reader.Close()
+ f.writer.Close()
+ return nil
+}
+
+// SetDeadline sets a deadline channel on the flow. Reads and writes
+// will be cancelled if the channel is closed.
+func (f *flow) SetDeadline(deadline <-chan struct{}) {
+ f.reader.SetDeadline(deadline)
+ f.writer.SetDeadline(deadline)
+}
+
+// Shutdown closes the flow and discards any queued up write buffers.
+// This is appropriate when the flow has been closed by the remote end.
+func (f *flow) Shutdown() {
+ f.reader.Close()
+ f.writer.shutdown(true)
+}
+
+// Cancel closes the flow and discards any queued up write buffers.
+// This is appropriate when the flow is being cancelled locally.
+func (f *flow) Cancel() {
+ f.reader.Close()
+ f.writer.shutdown(false)
+}
+
+// VCDataCache returns the stream.VCDataCache object that allows information to be
+// shared across the Flow's parent VC.
+func (f *flow) VCDataCache() stream.VCDataCache {
+ return f.dataCache
+}
diff --git a/profiles/internal/ipc/stream/vc/init.go b/profiles/internal/ipc/stream/vc/init.go
new file mode 100644
index 0000000..4fc3889
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/init.go
@@ -0,0 +1,63 @@
+package vc
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "fmt"
+
+ "v.io/v23/security"
+ "v.io/x/lib/vlog"
+)
+
+var AnonymousPrincipal security.Principal
+
+func init() {
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ vlog.Fatalf("could not create private key for anonymous principal: %v", err)
+ }
+ store := &anonymousBlessingStore{k: security.NewECDSAPublicKey(&key.PublicKey)}
+ if AnonymousPrincipal, err = security.CreatePrincipal(security.NewInMemoryECDSASigner(key), store, nil); err != nil {
+ vlog.Fatalf("could not create anonymous principal: %v", err)
+ }
+ if store.b, err = AnonymousPrincipal.BlessSelf("anonymous"); err != nil {
+ vlog.Fatalf("failed to generate the one blessing to be used by the anonymous principal: %v", err)
+ }
+}
+
+// TODO(ashankar,ataly): Figure out what to do with this!
+// (Most likely move the BlessingStore implementation from veyron/profiles/internal/rt to veyron/security
+// and use that?)
+type anonymousBlessingStore struct {
+ k security.PublicKey
+ b security.Blessings
+}
+
+func (s *anonymousBlessingStore) Set(security.Blessings, security.BlessingPattern) (security.Blessings, error) {
+ return security.Blessings{}, fmt.Errorf("cannot store blessings with an anonymous principal")
+}
+
+func (s *anonymousBlessingStore) ForPeer(...string) security.Blessings {
+ return s.b
+}
+
+func (s *anonymousBlessingStore) SetDefault(security.Blessings) error {
+ return fmt.Errorf("cannot change default blessing associated with the anonymous principal")
+}
+
+func (s *anonymousBlessingStore) Default() security.Blessings {
+ return s.b
+}
+
+func (s *anonymousBlessingStore) PublicKey() security.PublicKey {
+ return s.k
+}
+
+func (s *anonymousBlessingStore) PeerBlessings() map[security.BlessingPattern]security.Blessings {
+ return nil
+}
+
+func (anonymousBlessingStore) DebugString() string {
+ return "anonymous BlessingStore"
+}
diff --git a/profiles/internal/ipc/stream/vc/knobs.go b/profiles/internal/ipc/stream/vc/knobs.go
new file mode 100644
index 0000000..dc48082
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/knobs.go
@@ -0,0 +1,23 @@
+package vc
+
+const (
+ // Maximum size (in bytes) of application data to write out in a single message.
+ MaxPayloadSizeBytes = 1 << 16 // 64KB
+
+ // Number of bytes that a receiver is willing to buffer for a flow.
+ DefaultBytesBufferedPerFlow = 1 << 20 // 1MB
+
+ // Maximum number of bytes to steal from the shared pool of receive
+ // buffers for the first write of a new Flow.
+ MaxSharedBytes = 1 << 12 // 4KB
+
+ // Number of Flow IDs reserved for possible future use.
+ NumReservedFlows = 10
+
+ // Number of VC IDs reserved for special use.
+ NumReservedVCs = 10
+
+ // Special Flow ID used for information specific to the VC
+ // (and not any specific flow)
+ SharedFlowID = 0
+)
diff --git a/profiles/internal/ipc/stream/vc/listener.go b/profiles/internal/ipc/stream/vc/listener.go
new file mode 100644
index 0000000..42946b5
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/listener.go
@@ -0,0 +1,42 @@
+package vc
+
+import (
+ "errors"
+
+ "v.io/x/ref/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+var errListenerClosed = errors.New("Listener has been closed")
+
+type listener struct {
+ q *upcqueue.T
+}
+
+var _ stream.Listener = (*listener)(nil)
+
+func newListener() *listener { return &listener{q: upcqueue.New()} }
+
+func (l *listener) Enqueue(f stream.Flow) error {
+ err := l.q.Put(f)
+ if err == upcqueue.ErrQueueIsClosed {
+ return errListenerClosed
+ }
+ return err
+}
+
+func (l *listener) Accept() (stream.Flow, error) {
+ item, err := l.q.Get(nil)
+ if err == upcqueue.ErrQueueIsClosed {
+ return nil, errListenerClosed
+ }
+ if err != nil {
+ return nil, err
+ }
+ return item.(stream.Flow), nil
+}
+
+func (l *listener) Close() error {
+ l.q.Close()
+ return nil
+}
diff --git a/profiles/internal/ipc/stream/vc/listener_test.go b/profiles/internal/ipc/stream/vc/listener_test.go
new file mode 100644
index 0000000..f240b3f
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/listener_test.go
@@ -0,0 +1,60 @@
+package vc
+
+import (
+ "testing"
+
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+type noopFlow struct{}
+
+// net.Conn methods
+func (*noopFlow) Read([]byte) (int, error) { return 0, nil }
+func (*noopFlow) Write([]byte) (int, error) { return 0, nil }
+func (*noopFlow) Close() error { return nil }
+func (*noopFlow) IsClosed() bool { return false }
+func (*noopFlow) Closed() <-chan struct{} { return nil }
+func (*noopFlow) Cancel() {}
+func (*noopFlow) LocalEndpoint() naming.Endpoint { return nil }
+func (*noopFlow) RemoteEndpoint() naming.Endpoint { return nil }
+
+// Other stream.Flow methods
+func (*noopFlow) LocalPrincipal() security.Principal { return nil }
+func (*noopFlow) LocalBlessings() security.Blessings { return security.Blessings{} }
+func (*noopFlow) RemoteBlessings() security.Blessings { return security.Blessings{} }
+func (*noopFlow) RemoteDischarges() map[string]security.Discharge { return nil }
+func (*noopFlow) SetDeadline(<-chan struct{}) {}
+func (*noopFlow) VCDataCache() stream.VCDataCache { return nil }
+
+func TestListener(t *testing.T) {
+ ln := newListener()
+ f1, f2 := &noopFlow{}, &noopFlow{}
+
+ if err := ln.Enqueue(f1); err != nil {
+ t.Error(err)
+ }
+ if err := ln.Enqueue(f2); err != nil {
+ t.Error(err)
+ }
+ if f, err := ln.Accept(); f != f1 || err != nil {
+ t.Errorf("Got (%p, %v) want (%p, nil)", f, err, f1)
+ }
+ if f, err := ln.Accept(); f != f2 || err != nil {
+ t.Errorf("Got (%p, %v) want (%p, nil)", f, err, f2)
+ }
+ if err := ln.Close(); err != nil {
+ t.Error(err)
+ }
+ // Close-ing multiple times is fine.
+ if err := ln.Close(); err != nil {
+ t.Error(err)
+ }
+ if err := ln.Enqueue(f1); err != errListenerClosed {
+ t.Error(err)
+ }
+ if f, err := ln.Accept(); f != nil || err != errListenerClosed {
+ t.Errorf("Accept returned (%p, %v) wanted (nil, %v)", f, err, errListenerClosed)
+ }
+}
diff --git a/profiles/internal/ipc/stream/vc/reader.go b/profiles/internal/ipc/stream/vc/reader.go
new file mode 100644
index 0000000..b24cd5a
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/reader.go
@@ -0,0 +1,108 @@
+package vc
+
+import (
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+
+ vsync "v.io/x/ref/lib/sync"
+ "v.io/x/ref/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// readHandler is the interface used by the reader to notify other components
+// of the number of bytes returned in Read calls.
+type readHandler interface {
+ HandleRead(bytes uint)
+}
+
+// reader implements the io.Reader and SetReadDeadline interfaces for a Flow,
+// backed by iobuf.Slice objects read from a upcqueue.
+type reader struct {
+ handler readHandler
+ src *upcqueue.T
+ mu sync.Mutex
+ buf *iobuf.Slice // GUARDED_BY(mu)
+ deadline <-chan struct{} // GUARDED_BY(mu)
+ totalBytes uint32
+}
+
+func newReader(h readHandler) *reader {
+ return &reader{handler: h, src: upcqueue.New()}
+}
+
+func (r *reader) Close() {
+ r.src.Close()
+}
+
+func (r *reader) Read(b []byte) (int, error) {
+ // net.Conn requires that all methods be invokable by multiple
+ // goroutines simultaneously. Read calls are serialized to ensure
+ // contiguous chunks of data are provided from each Read call.
+ r.mu.Lock()
+ n, err := r.readLocked(b)
+ r.mu.Unlock()
+ atomic.AddUint32(&r.totalBytes, uint32(n))
+ if n > 0 {
+ r.handler.HandleRead(uint(n))
+ }
+ return n, err
+}
+
+func (r *reader) readLocked(b []byte) (int, error) {
+ if r.buf == nil {
+ slice, err := r.src.Get(r.deadline)
+ if err != nil {
+ switch err {
+ case upcqueue.ErrQueueIsClosed:
+ return 0, io.EOF
+ case vsync.ErrCanceled:
+ // As per net.Conn.Read specification
+ return 0, timeoutError{}
+ default:
+ return 0, fmt.Errorf("upcqueue.Get failed: %v", err)
+ }
+ }
+ r.buf = slice.(*iobuf.Slice)
+ }
+ copied := 0
+ for r.buf.Size() <= len(b) {
+ n := copy(b, r.buf.Contents)
+ copied += n
+ b = b[n:]
+ r.buf.Release()
+ r.buf = nil
+
+ slice, err := r.src.TryGet()
+ if err != nil {
+ return copied, nil
+ }
+ r.buf = slice.(*iobuf.Slice)
+ }
+ n := copy(b, r.buf.Contents)
+ r.buf.TruncateFront(uint(n))
+ copied += n
+ return copied, nil
+}
+
+func (r *reader) SetDeadline(deadline <-chan struct{}) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.deadline = deadline
+}
+
+func (r *reader) BytesRead() uint32 {
+ return atomic.LoadUint32(&r.totalBytes)
+}
+
+func (r *reader) Put(slice *iobuf.Slice) error {
+ return r.src.Put(slice)
+}
+
+// timeoutError implements net.Error with Timeout returning true.
+type timeoutError struct{}
+
+func (t timeoutError) Error() string { return "deadline exceeded" }
+func (t timeoutError) Timeout() bool { return true }
+func (t timeoutError) Temporary() bool { return false }
diff --git a/profiles/internal/ipc/stream/vc/reader_test.go b/profiles/internal/ipc/stream/vc/reader_test.go
new file mode 100644
index 0000000..caa05c0
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/reader_test.go
@@ -0,0 +1,108 @@
+package vc
+
+import (
+ "io"
+ "net"
+ "reflect"
+ "testing"
+ "testing/quick"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+type testReadHandler struct{ items []uint }
+
+func (t *testReadHandler) HandleRead(bytes uint) {
+ t.items = append(t.items, bytes)
+}
+
+func TestRead(t *testing.T) {
+ l := &testReadHandler{}
+ r := newReader(l)
+ input := []byte("abcdefghijklmnopqrstuvwxyzABCDE") // 31 bytes total
+ start := 0
+ // Produce data to read, adding elements to the underlying upcqueue
+ // with a geometric progression of 2.
+ for n := 1; start < len(input); n *= 2 {
+ if err := r.Put(iobuf.NewSlice(input[start : start+n])); err != nil {
+ t.Fatalf("Put(start=%d, n=%d) failed: %v", start, n, err)
+ }
+ start = start + n
+ }
+
+ var output [31]byte
+ start = 0
+ // Read with geometric progression of 1/2.
+ for n := 16; start < len(output); n /= 2 {
+ if m, err := r.Read(output[start : start+n]); err != nil || m != n {
+ t.Errorf("Read returned (%d, %v) want (%d, nil)", m, err, n)
+ }
+ if m := l.items[len(l.items)-1]; m != uint(n) {
+ t.Errorf("Read notified %d but should have notified %d bytes", m, n)
+ }
+ start = start + n
+ }
+ if got, want := string(output[:]), string(input); got != want {
+ t.Errorf("Got %q want %q", got, want)
+ }
+
+ r.Close()
+ if n, err := r.Read(output[:]); n != 0 || err != io.EOF {
+ t.Errorf("Got (%d, %v) want (0, nil)", n, err)
+ }
+}
+
+func TestReadRandom(t *testing.T) {
+ f := func(data [][]byte) bool {
+ r := newReader(&testReadHandler{})
+ // Use an empty slice (as opposed to a nil-slice) so that the
+ // reflect.DeepEqual call below succeeds when data is
+ // [][]byte{}.
+ written := make([]byte, 0)
+ for _, d := range data {
+ if err := r.Put(iobuf.NewSlice(d)); err != nil {
+ t.Error(err)
+ return false
+ }
+ written = append(written, d...)
+ }
+ read := make([]byte, len(written))
+ buf := read
+ r.Close()
+ for {
+ n, err := r.Read(buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Error(err)
+ return false
+ }
+ buf = buf[n:]
+ }
+ return reflect.DeepEqual(written, read) && int(r.BytesRead()) == len(written)
+ }
+ if err := quick.Check(f, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestReadDeadline(t *testing.T) {
+ l := &testReadHandler{}
+ r := newReader(l)
+ defer r.Close()
+
+ deadline := make(chan struct{}, 0)
+ r.SetDeadline(deadline)
+ close(deadline)
+
+ var buf [1]byte
+ n, err := r.Read(buf[:])
+ neterr, ok := err.(net.Error)
+ if n != 0 || err == nil || !ok || !neterr.Timeout() {
+ t.Errorf("Expected read to fail with net.Error.Timeout, got (%d, %v)", n, err)
+ }
+ if len(l.items) != 0 {
+ t.Errorf("Expected no reads, but notified of reads: %v", l.items)
+ }
+}
diff --git a/profiles/internal/ipc/stream/vc/v23_internal_test.go b/profiles/internal/ipc/stream/vc/v23_internal_test.go
new file mode 100644
index 0000000..bfe8e63
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package vc
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/testutil"
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/ipc/stream/vc/vc.go b/profiles/internal/ipc/stream/vc/vc.go
new file mode 100644
index 0000000..042044a
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/vc.go
@@ -0,0 +1,824 @@
+package vc
+
+// Logging guidelines:
+// Verbosity level 1 is for per-VC messages.
+// Verbosity level 2 is for per-Flow messages.
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ vsync "v.io/x/ref/lib/sync"
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/vom"
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+var (
+ errAlreadyListening = errors.New("Listen has already been called")
+ errDuplicateFlow = errors.New("duplicate OpenFlow message")
+ errUnrecognizedFlow = errors.New("unrecognized flow")
+)
+
+// TODO(suharshs): Should we make this configurable?
+const DischargeExpiryBuffer = 20 * time.Second
+
+// VC implements the stream.VC interface and exports additional methods to
+// manage Flows.
+//
+// stream.Flow objects created by this stream.VC implementation use a buffer
+// queue (veyron/profiles/internal/lib/bqueue) to provide flow control on Write
+// operations.
+type VC struct {
+ vci id.VC
+ localEP, remoteEP naming.Endpoint
+ localPrincipal security.Principal
+ localBlessings, remoteBlessings security.Blessings
+ remoteDischarges map[string]security.Discharge
+
+ pool *iobuf.Pool
+ reserveBytes uint
+ sharedCounters *vsync.Semaphore
+
+ mu sync.Mutex
+ flowMap map[id.Flow]*flow // nil iff the VC is closed.
+ acceptHandshakeDone chan struct{} // non-nil when HandshakeAcceptVC begins the handshake, closed when handshake completes.
+ handshakeFID id.Flow // flow used for a TLS handshake to setup encryption.
+ authFID id.Flow // flow used by the authentication protocol.
+ nextConnectFID id.Flow
+ listener *listener // non-nil iff Listen has been called and the VC has not been closed.
+ crypter crypto.Crypter
+ closeReason string // reason why the VC was closed
+ closeCh chan struct{}
+ closed bool
+
+ helper Helper
+ version version.IPCVersion
+ dataCache *dataCache // dataCache contains information that can shared between Flows from this VC.
+}
+
+// ServerAuthorizer encapsulates the policy used to authorize servers during VC
+// establishment.
+//
+// A client will first authorize a server before revealing any of its credentials
+// (public key, blessings etc.) to the server. Thus, if the authorization policy
+// calls for the server to be rejected, then the client will not have revealed
+// any of its credentials to the server.
+//
+// ServerAuthorizer in turn uses an authorization policy (security.Authorizer),
+// with the context matching the context of the RPC that caused the initiation
+// of the VC.
+type ServerAuthorizer struct {
+ Suffix, Method string
+ Policy security.Authorizer
+}
+
+func (a *ServerAuthorizer) IPCStreamVCOpt() {}
+func (a *ServerAuthorizer) Authorize(params security.CallParams) error {
+ params.Suffix = a.Suffix
+ params.Method = a.Method
+ return a.Policy.Authorize(security.NewCall(¶ms))
+}
+
+var _ stream.VC = (*VC)(nil)
+
+// Helper is the interface for functionality required by the stream.VC
+// implementation in this package.
+type Helper interface {
+ // NotifyOfNewFlow notifies the remote end of a VC that the caller intends to
+ // establish a new flow to it and that the caller is ready to receive bytes
+ // data from the remote end.
+ NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint)
+
+ // AddReceiveBuffers notifies the remote end of a VC that it is read to receive
+ // bytes more data on the flow identified by fid over the VC identified by vci.
+ //
+ // Unlike NotifyOfNewFlow, this call does not let the remote end know of the
+ // intent to establish a new flow.
+ AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint)
+
+ // NewWriter creates a buffer queue for Write operations on the
+ // stream.Flow implementation.
+ NewWriter(vci id.VC, fid id.Flow) (bqueue.Writer, error)
+}
+
+// Params encapsulates the set of parameters needed to create a new VC.
+type Params struct {
+ VCI id.VC // Identifier of the VC
+ Dialed bool // True if the VC was initiated by the local process.
+ LocalEP naming.Endpoint // Endpoint of the local end of the VC.
+ RemoteEP naming.Endpoint // Endpoint of the remote end of the VC.
+ Pool *iobuf.Pool // Byte pool used for read and write buffer allocations.
+ ReserveBytes uint // Number of padding bytes to reserve for headers.
+ Helper Helper
+ Version version.IPCVersion
+}
+
+// LocalPrincipal wraps a security.Principal so that it can be provided
+// as an option to various methods in order to provide authentication information
+// when establishing virtual circuits.
+type LocalPrincipal struct{ security.Principal }
+
+func (LocalPrincipal) IPCStreamListenerOpt() {}
+func (LocalPrincipal) IPCStreamVCOpt() {}
+func (LocalPrincipal) IPCClientOpt() {}
+func (LocalPrincipal) IPCServerOpt() {}
+
+// DischargeClient is an interface for obtaining discharges for a set of third-party
+// caveats.
+//
+// TODO(ataly, ashankar): What should be the impetus for obtaining the discharges?
+type DischargeClient interface {
+ PrepareDischarges(ctx *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) []security.Discharge
+ // Invalidate marks the provided discharges as invalid, and therefore unfit
+ // for being returned by a subsequent PrepareDischarges call.
+ Invalidate(discharges ...security.Discharge)
+ IPCStreamListenerOpt()
+}
+
+// DialContext establishes the context under which a VC Dial was initiated.
+type DialContext struct{ *context.T }
+
+func (DialContext) IPCStreamVCOpt() {}
+func (DialContext) IPCStreamListenerOpt() {}
+
+// InternalNew creates a new VC, which implements the stream.VC interface.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside veyron/profiles/internal. Code outside the
+// veyron/profiles/internal/* packages should never call this method.
+func InternalNew(p Params) *VC {
+ fidOffset := 1
+ if p.Dialed {
+ fidOffset = 0
+ }
+ return &VC{
+ vci: p.VCI,
+ localEP: p.LocalEP,
+ remoteEP: p.RemoteEP,
+ pool: p.Pool,
+ reserveBytes: p.ReserveBytes,
+ sharedCounters: vsync.NewSemaphore(),
+ flowMap: make(map[id.Flow]*flow),
+ // Reserve flow IDs 0 thru NumReservedFlows for
+ // possible future use.
+ // Furthermore, flows created by Connect have an even
+ // id if the VC was initiated by the local process,
+ // and have an odd id if the VC was initiated by the
+ // remote process.
+ nextConnectFID: id.Flow(NumReservedFlows + fidOffset),
+ crypter: crypto.NewNullCrypter(),
+ helper: p.Helper,
+ version: p.Version,
+ dataCache: newDataCache(),
+ closeCh: make(chan struct{}),
+ }
+}
+
+// Connect implements the stream.Connector.Connect method.
+func (vc *VC) Connect(opts ...stream.FlowOpt) (stream.Flow, error) {
+ return vc.connectFID(vc.allocFID(), opts...)
+}
+
+func (vc *VC) connectFID(fid id.Flow, opts ...stream.FlowOpt) (stream.Flow, error) {
+ writer, err := vc.newWriter(fid)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create writer for Flow: %v", err)
+ }
+ f := &flow{
+ authN: vc,
+ reader: newReader(readHandlerImpl{vc, fid}),
+ writer: writer,
+ localEndpoint: vc.localEP,
+ remoteEndpoint: vc.remoteEP,
+ dataCache: vc.dataCache,
+ }
+ vc.mu.Lock()
+ if vc.flowMap != nil {
+ vc.flowMap[fid] = f
+ } else {
+ err = fmt.Errorf("Connect on closed VC(%q)", vc.closeReason)
+ }
+ vc.mu.Unlock()
+ if err != nil {
+ f.Shutdown()
+ return nil, err
+ }
+ // New flow created, inform remote end that data can be received on it.
+ vc.helper.NotifyOfNewFlow(vc.vci, fid, DefaultBytesBufferedPerFlow)
+ return f, nil
+}
+
+// Listen implements the stream.VC.Listen method.
+func (vc *VC) Listen() (stream.Listener, error) {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ if vc.listener != nil {
+ return nil, errAlreadyListening
+ }
+ vc.listener = newListener()
+ return vc.listener, nil
+}
+
+// RemoteAddr returns the remote endpoint for this VC.
+func (vc *VC) RemoteAddr() naming.Endpoint {
+ return vc.remoteEP
+}
+
+// LocalAddr returns the local endpoint for this VC.
+func (vc *VC) LocalAddr() naming.Endpoint {
+ return vc.localEP
+}
+
+// DispatchPayload makes payload.Contents available to Read operations on the
+// Flow identified by fid.
+//
+// Assumes ownership of payload, i.e., payload should not be used by the caller
+// after this method returns (irrespective of the return value).
+func (vc *VC) DispatchPayload(fid id.Flow, payload *iobuf.Slice) error {
+ if payload.Size() == 0 {
+ payload.Release()
+ return nil
+ }
+ vc.mu.Lock()
+ if vc.flowMap == nil {
+ vc.mu.Unlock()
+ payload.Release()
+ return fmt.Errorf("ignoring message for Flow %d on closed VC %d", fid, vc.VCI())
+ }
+ // TLS decryption is stateful, so even if the message will be discarded
+ // because of other checks further down in this method, go through with
+ // the decryption.
+ if fid != vc.handshakeFID && fid != vc.authFID {
+ vc.waitForHandshakeLocked()
+ var err error
+ if payload, err = vc.crypter.Decrypt(payload); err != nil {
+ vc.mu.Unlock()
+ return fmt.Errorf("failed to decrypt payload: %v", err)
+ }
+ }
+ if payload.Size() == 0 {
+ vc.mu.Unlock()
+ payload.Release()
+ return nil
+ }
+ f := vc.flowMap[fid]
+ if f == nil {
+ vc.mu.Unlock()
+ payload.Release()
+ return errUnrecognizedFlow
+ }
+ vc.mu.Unlock()
+ if err := f.reader.Put(payload); err != nil {
+ payload.Release()
+ return err
+ }
+ return nil
+}
+
+// AcceptFlow enqueues a new Flow for acceptance by the listener on the VC.
+// Returns an error if the VC is not accepting flows initiated by the remote
+// end.
+func (vc *VC) AcceptFlow(fid id.Flow) error {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ if vc.listener == nil {
+ return fmt.Errorf("no active listener on VC %d", vc.vci)
+ }
+ writer, err := vc.newWriter(fid)
+ if err != nil {
+ return fmt.Errorf("failed to create writer for new flow(%d): %v", fid, err)
+ }
+ f := &flow{
+ authN: vc,
+ reader: newReader(readHandlerImpl{vc, fid}),
+ writer: writer,
+ localEndpoint: vc.localEP,
+ remoteEndpoint: vc.remoteEP,
+ dataCache: vc.dataCache,
+ }
+ if err = vc.listener.Enqueue(f); err != nil {
+ f.Shutdown()
+ return fmt.Errorf("failed to enqueue flow at listener: %v", err)
+ }
+ if _, exists := vc.flowMap[fid]; exists {
+ return errDuplicateFlow
+ }
+ vc.flowMap[fid] = f
+ // New flow accepted, notify remote end that it can send over data.
+ // Do it in a goroutine in case the implementation of AddReceiveBuffers
+ // ends up attempting to lock vc.mu
+ go vc.helper.AddReceiveBuffers(vc.vci, fid, DefaultBytesBufferedPerFlow)
+ vlog.VI(2).Infof("Added flow %d@%d to listener", fid, vc.vci)
+ return nil
+}
+
+// ShutdownFlow closes the Flow identified by fid and discards any pending
+// writes.
+func (vc *VC) ShutdownFlow(fid id.Flow) {
+ vc.mu.Lock()
+ f := vc.flowMap[fid]
+ delete(vc.flowMap, fid)
+ vc.mu.Unlock()
+ if f != nil {
+ f.Shutdown()
+ }
+}
+
+// ReleaseCounters informs the Flow (identified by fid) that the remote end is
+// ready to receive up to 'bytes' more bytes of data.
+func (vc *VC) ReleaseCounters(fid id.Flow, bytes uint32) {
+ if fid == SharedFlowID {
+ vc.sharedCounters.IncN(uint(bytes))
+ return
+ }
+ var f *flow
+ vc.mu.Lock()
+ if vc.flowMap != nil {
+ f = vc.flowMap[fid]
+ }
+ vc.mu.Unlock()
+ if f == nil {
+ vlog.VI(2).Infof("Ignoring ReleaseCounters(%d, %d) on VCI %d as the flow does not exist", fid, bytes, vc.vci)
+ return
+ }
+ f.Release(int(bytes))
+}
+
+// Close closes the VC and all flows on it, allowing any pending writes in the
+// flow to drain.
+func (vc *VC) Close(reason string) error {
+ vlog.VI(1).Infof("Closing VC %v. Reason:%q", vc, reason)
+ vc.mu.Lock()
+ flows := vc.flowMap
+ if !vc.closed {
+ close(vc.closeCh)
+ vc.closed = true
+ }
+ vc.flowMap = nil
+ if vc.listener != nil {
+ vc.listener.Close()
+ vc.listener = nil
+ }
+ vc.closeReason = reason
+ vc.mu.Unlock()
+
+ vc.sharedCounters.Close()
+ for fid, flow := range flows {
+ vlog.VI(2).Infof("Closing flow %d on VC %v as VC is being closed(%q)", fid, vc, reason)
+ flow.Close()
+ }
+ return nil
+}
+
+// err prefers vc.closeReason over err.
+func (vc *VC) err(err error) error {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ if vc.closeReason != "" {
+ return errors.New(vc.closeReason)
+ }
+ return err
+}
+
+// HandshakeDialedVC completes initialization of the VC (setting up encryption,
+// authentication etc.) under the assumption that the VC was initiated by the
+// local process (i.e., the local process "Dial"ed to create the VC).
+func (vc *VC) HandshakeDialedVC(opts ...stream.VCOpt) error {
+ var (
+ principal security.Principal
+ tlsSessionCache crypto.TLSClientSessionCache
+ securityLevel options.VCSecurityLevel
+ auth *ServerAuthorizer
+ )
+ for _, o := range opts {
+ switch v := o.(type) {
+ case LocalPrincipal:
+ principal = v.Principal
+ case options.VCSecurityLevel:
+ securityLevel = v
+ case crypto.TLSClientSessionCache:
+ tlsSessionCache = v
+ case *ServerAuthorizer:
+ auth = v
+ }
+ }
+ switch securityLevel {
+ case options.VCSecurityConfidential:
+ if principal == nil {
+ principal = AnonymousPrincipal
+ }
+ case options.VCSecurityNone:
+ return nil
+ default:
+ return fmt.Errorf("unrecognized VC security level: %v", securityLevel)
+ }
+
+ // Establish TLS
+ handshakeFID := vc.allocFID()
+ handshakeConn, err := vc.connectFID(handshakeFID)
+ if err != nil {
+ return vc.err(fmt.Errorf("failed to create a Flow for setting up TLS: %v", err))
+ }
+ crypter, err := crypto.NewTLSClient(handshakeConn, handshakeConn.LocalEndpoint(), handshakeConn.RemoteEndpoint(), tlsSessionCache, vc.pool)
+ if err != nil {
+ return vc.err(fmt.Errorf("failed to setup TLS: %v", err))
+ }
+
+ // Authenticate (exchange identities)
+ // Unfortunately, handshakeConn cannot be used for the authentication protocol.
+ // This is because the Crypter implementation uses crypto/tls.Conn,
+ // which can consume data beyond the handshake message boundaries (call
+ // to readFromUntil in
+ // https://code.google.com/p/go/source/browse/src/pkg/crypto/tls/conn.go?spec=svn654b2703fcc466a29692068ab56efedd09fb3d05&r=654b2703fcc466a29692068ab56efedd09fb3d05#539).
+ // This is not a problem when tls.Conn is used as intended (to wrap over a stream), but
+ // becomes a problem when shoehorning a block encrypter (Crypter interface) over this
+ // stream API.
+ authFID := vc.allocFID()
+ authConn, err := vc.connectFID(authFID)
+ if err != nil {
+ return vc.err(fmt.Errorf("failed to create a Flow for authentication: %v", err))
+ }
+ params := security.CallParams{
+ LocalPrincipal: principal,
+ LocalEndpoint: vc.localEP,
+ RemoteEndpoint: vc.remoteEP,
+ }
+ rBlessings, lBlessings, rDischarges, err := AuthenticateAsClient(authConn, crypter, params, auth, vc.version)
+ if err != nil || len(rBlessings.ThirdPartyCaveats()) == 0 {
+ authConn.Close()
+ if err != nil {
+ return vc.err(fmt.Errorf("authentication failed: %v", err))
+ }
+ } else {
+ go vc.recvDischargesLoop(authConn)
+ }
+
+ vc.mu.Lock()
+ vc.handshakeFID = handshakeFID
+ vc.authFID = authFID
+ vc.crypter = crypter
+ vc.localPrincipal = principal
+ vc.localBlessings = lBlessings
+ vc.remoteBlessings = rBlessings
+ vc.remoteDischarges = rDischarges
+ vc.mu.Unlock()
+
+ vlog.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+ return nil
+}
+
+// HandshakeResult is sent by HandshakeAcceptedVC over the channel returned by it.
+type HandshakeResult struct {
+ Listener stream.Listener // Listener for accepting new Flows on the VC.
+ Error error // Error, if any, during the handshake.
+}
+
+// HandshakeAcceptedVC completes initialization of the VC (setting up
+// encryption, authentication etc.) under the assumption that the VC was
+// initiated by a remote process (and the local process wishes to "accept" it).
+//
+// Since the handshaking process might involve several round trips, a bulk of the work
+// is done asynchronously and the result of the handshake is written to the
+// channel returned by this method.
+func (vc *VC) HandshakeAcceptedVC(opts ...stream.ListenerOpt) <-chan HandshakeResult {
+ result := make(chan HandshakeResult, 1)
+ finish := func(ln stream.Listener, err error) chan HandshakeResult {
+ result <- HandshakeResult{ln, err}
+ return result
+ }
+ var (
+ principal security.Principal
+ securityLevel options.VCSecurityLevel
+ dischargeClient DischargeClient
+ lBlessings security.Blessings
+ )
+ for _, o := range opts {
+ switch v := o.(type) {
+ case DischargeClient:
+ dischargeClient = v
+ case LocalPrincipal:
+ principal = v.Principal
+ case options.VCSecurityLevel:
+ securityLevel = v
+ case options.ServerBlessings:
+ lBlessings = v.Blessings
+ }
+ }
+ // If the listener was setup asynchronously, there is a race between
+ // the listener being setup and the caller of this method trying to
+ // dispatch messages, thus it is setup synchronously.
+ ln, err := vc.Listen()
+ if err != nil {
+ return finish(nil, err)
+ }
+ vc.helper.AddReceiveBuffers(vc.VCI(), SharedFlowID, DefaultBytesBufferedPerFlow)
+ switch securityLevel {
+ case options.VCSecurityConfidential:
+ if principal == nil {
+ principal = AnonymousPrincipal
+ }
+ if lBlessings.IsZero() {
+ lBlessings = principal.BlessingStore().Default()
+ }
+ case options.VCSecurityNone:
+ return finish(ln, nil)
+ default:
+ ln.Close()
+ return finish(nil, fmt.Errorf("unrecognized VC security level: %v", securityLevel))
+ }
+ go func() {
+ sendErr := func(err error) {
+ ln.Close()
+ result <- HandshakeResult{nil, vc.err(err)}
+ }
+ // TODO(ashankar): There should be a timeout on this Accept
+ // call. Otherwise, a malicious (or incompetent) client can
+ // consume server resources by sending many OpenVC messages but
+ // not following up with the handshake protocol. Same holds for
+ // the identity exchange protocol.
+ handshakeConn, err := ln.Accept()
+ if err != nil {
+ sendErr(fmt.Errorf("TLS handshake Flow not accepted: %v", err))
+ return
+ }
+ vc.mu.Lock()
+ vc.acceptHandshakeDone = make(chan struct{})
+ vc.handshakeFID = vc.findFlowLocked(handshakeConn)
+ vc.mu.Unlock()
+
+ // Establish TLS
+ crypter, err := crypto.NewTLSServer(handshakeConn, handshakeConn.LocalEndpoint(), handshakeConn.RemoteEndpoint(), vc.pool)
+ if err != nil {
+ sendErr(fmt.Errorf("failed to setup TLS: %v", err))
+ return
+ }
+
+ // Authenticate (exchange identities)
+ authConn, err := ln.Accept()
+ if err != nil {
+ sendErr(fmt.Errorf("Authentication Flow not accepted: %v", err))
+ return
+ }
+ vc.mu.Lock()
+ vc.authFID = vc.findFlowLocked(authConn)
+ vc.mu.Unlock()
+ rBlessings, err := AuthenticateAsServer(authConn, principal, lBlessings, dischargeClient, crypter, vc.version)
+ if err != nil {
+ authConn.Close()
+ sendErr(fmt.Errorf("authentication failed: %v", err))
+ return
+ }
+
+ vc.mu.Lock()
+ vc.crypter = crypter
+ vc.localPrincipal = principal
+ vc.localBlessings = lBlessings
+ vc.remoteBlessings = rBlessings
+ close(vc.acceptHandshakeDone)
+ vc.acceptHandshakeDone = nil
+ vc.mu.Unlock()
+ vlog.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+ result <- HandshakeResult{ln, nil}
+
+ if len(lBlessings.ThirdPartyCaveats()) > 0 {
+ go vc.sendDischargesLoop(authConn, dischargeClient, lBlessings.ThirdPartyCaveats())
+ } else {
+ authConn.Close()
+ }
+ }()
+ return result
+}
+
+func (vc *VC) sendDischargesLoop(conn io.WriteCloser, dc DischargeClient, tpCavs []security.Caveat) {
+ defer conn.Close()
+ if dc == nil {
+ return
+ }
+ enc, err := vom.NewEncoder(conn)
+ if err != nil {
+ vlog.Errorf("failed to create new encoder(conn=%v): %v", conn, err)
+ return
+ }
+ discharges := dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
+ for expiry := minExpiryTime(discharges, tpCavs); !expiry.IsZero(); expiry = minExpiryTime(discharges, tpCavs) {
+ select {
+ case <-time.After(fetchDuration(expiry)):
+ discharges = dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
+ if err := enc.Encode(marshalDischarges(discharges)); err != nil {
+ vlog.Errorf("encoding discharge(%v) failed: %v", discharges, err)
+ return
+ }
+ case <-vc.closeCh:
+ vlog.VI(3).Infof("closing sendDischargesLoop")
+ return
+ }
+ }
+}
+
+func fetchDuration(expiry time.Time) time.Duration {
+ // Fetch the discharge earlier than the actual expiry to factor in for clock
+ // skew and RPC time.
+ return expiry.Sub(time.Now().Add(DischargeExpiryBuffer))
+}
+
+func minExpiryTime(discharges []security.Discharge, tpCavs []security.Caveat) time.Time {
+ var min time.Time
+ // If some discharges were not fetched, retry again in a minute.
+ if len(discharges) < len(tpCavs) {
+ min = time.Now().Add(time.Minute)
+ }
+ for _, d := range discharges {
+ if exp := d.Expiry(); min.IsZero() || (!exp.IsZero() && exp.Before(min)) {
+ min = exp
+ }
+ }
+ return min
+}
+
+func (vc *VC) recvDischargesLoop(conn io.ReadCloser) {
+ defer conn.Close()
+ dec, err := vom.NewDecoder(conn)
+ if err != nil {
+ vlog.Errorf("failed to create new decoder: %v", err)
+ return
+ }
+
+ for {
+ var wire []security.WireDischarge
+ if err := dec.Decode(&wire); err != nil {
+ vlog.VI(3).Infof("decoding discharge failed: %v", err)
+ return
+ }
+ discharges := unmarshalDischarges(wire)
+ vc.mu.Lock()
+ for _, d := range discharges {
+ vc.remoteDischarges[d.ID()] = d
+ }
+ vc.mu.Unlock()
+ }
+}
+
+// Encrypt uses the VC's encryption scheme to encrypt the provided data payload.
+// Always takes ownership of plaintext.
+func (vc *VC) Encrypt(fid id.Flow, plaintext *iobuf.Slice) (cipherslice *iobuf.Slice, err error) {
+ if plaintext == nil {
+ return nil, nil
+ }
+ vc.mu.Lock()
+ if fid == vc.handshakeFID || fid == vc.authFID {
+ cipherslice = plaintext
+ } else {
+ cipherslice, err = vc.crypter.Encrypt(plaintext)
+ }
+ vc.mu.Unlock()
+ return
+}
+
+func (vc *VC) allocFID() id.Flow {
+ vc.mu.Lock()
+ ret := vc.nextConnectFID
+ vc.nextConnectFID += 2
+ vc.mu.Unlock()
+ return ret
+}
+
+func (vc *VC) newWriter(fid id.Flow) (*writer, error) {
+ bq, err := vc.helper.NewWriter(vc.vci, fid)
+ if err != nil {
+ return nil, err
+ }
+ alloc := iobuf.NewAllocator(vc.pool, vc.reserveBytes)
+ return newWriter(MaxPayloadSizeBytes, bq, alloc, vc.sharedCounters), nil
+}
+
+// findFlowLocked finds the flow id for the provided flow.
+// REQUIRES: vc.mu is held
+// Returns 0 if there is none.
+func (vc *VC) findFlowLocked(flow interface{}) id.Flow {
+ const invalidFlowID = 0
+ // This operation is rare and early enough (called when there are <= 2
+ // flows over the VC) that iteration to the map should be fine.
+ for fid, f := range vc.flowMap {
+ if f == flow {
+ return fid
+ }
+ }
+ return invalidFlowID
+}
+
+// VCI returns the identifier of this VC.
+func (vc *VC) VCI() id.VC { return vc.vci }
+
+// LocalPrincipal returns the principal that authenticated with the remote end of the VC.
+func (vc *VC) LocalPrincipal() security.Principal {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ vc.waitForHandshakeLocked()
+ return vc.localPrincipal
+}
+
+// LocalBlessings returns the blessings (bound to LocalPrincipal) presented to the
+// remote end of the VC during authentication.
+func (vc *VC) LocalBlessings() security.Blessings {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ vc.waitForHandshakeLocked()
+ return vc.localBlessings
+}
+
+// RemoteBlessings returns the blessings presented by the remote end of the VC during
+// authentication.
+func (vc *VC) RemoteBlessings() security.Blessings {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ vc.waitForHandshakeLocked()
+ return vc.remoteBlessings
+}
+
+// RemoteDischarges returns the discharges presented by the remote end of the VC during
+// authentication.
+func (vc *VC) RemoteDischarges() map[string]security.Discharge {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+ vc.waitForHandshakeLocked()
+ if len(vc.remoteDischarges) == 0 {
+ return nil
+ }
+ // Copy the map to prevent racy reads.
+ ret := make(map[string]security.Discharge)
+ for k, v := range vc.remoteDischarges {
+ ret[k] = v
+ }
+ return ret
+}
+
+// waitForHandshakeLocked blocks until an in-progress handshake (encryption
+// setup and authentication) completes.
+// REQUIRES: vc.mu is held.
+func (vc *VC) waitForHandshakeLocked() {
+ if hsd := vc.acceptHandshakeDone; hsd != nil {
+ vc.mu.Unlock()
+ <-hsd
+ vc.mu.Lock()
+ }
+}
+
+func (vc *VC) String() string {
+ return fmt.Sprintf("VCI:%d (%v<->%v)", vc.vci, vc.localEP, vc.remoteEP)
+}
+
+// DebugString returns a string representation of the state of a VC.
+//
+// The format of the returned string is meant to be human-friendly and the
+// specific format should not be relied upon for automated processing.
+func (vc *VC) DebugString() string {
+ vc.mu.Lock()
+ l := make([]string, 0, len(vc.flowMap)+1)
+ l = append(l, fmt.Sprintf("VCI:%d -- Endpoints:(Local:%q Remote:%q) #Flows:%d NextConnectFID:%d",
+ vc.vci,
+ vc.localEP,
+ vc.remoteEP,
+ len(vc.flowMap),
+ vc.nextConnectFID))
+ if vc.crypter == nil {
+ l = append(l, "Handshake not completed yet")
+ } else {
+ l = append(l, "Encryption: "+vc.crypter.String())
+ if vc.localPrincipal != nil {
+ l = append(l, fmt.Sprintf("LocalPrincipal:%v LocalBlessings:%v RemoteBlessings:%v", vc.localPrincipal.PublicKey(), vc.localBlessings, vc.remoteBlessings))
+ }
+ }
+ for fid, f := range vc.flowMap {
+ l = append(l, fmt.Sprintf(" Flow:%3d BytesRead:%7d BytesWritten:%7d", fid, f.BytesRead(), f.BytesWritten()))
+ }
+ vc.mu.Unlock()
+ sort.Strings(l[1:])
+ return strings.Join(l, "\n")
+}
+
+// readHandlerImpl is an adapter for the readHandler interface required by
+// the reader type.
+type readHandlerImpl struct {
+ vc *VC
+ fid id.Flow
+}
+
+func (r readHandlerImpl) HandleRead(bytes uint) {
+ r.vc.helper.AddReceiveBuffers(r.vc.vci, r.fid, bytes)
+}
diff --git a/profiles/internal/ipc/stream/vc/vc_test.go b/profiles/internal/ipc/stream/vc/vc_test.go
new file mode 100644
index 0000000..b247d58
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/vc_test.go
@@ -0,0 +1,594 @@
+// Use a different package for the tests to ensure that only the exported API is used.
+
+package vc_test
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/bqueue/drrqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+var (
+ clientEP = endpoint(naming.FixedRoutingID(0xcccccccccccccccc))
+ serverEP = endpoint(naming.FixedRoutingID(0x5555555555555555))
+)
+
+//go:generate v23 test generate
+
+const (
+ // Convenience alias to avoid conflicts between the package name "vc" and variables called "vc".
+ DefaultBytesBufferedPerFlow = vc.DefaultBytesBufferedPerFlow
+ // Shorthands
+ SecurityNone = options.VCSecurityNone
+ SecurityTLS = options.VCSecurityConfidential
+
+ LatestVersion = version.IPCVersion7
+)
+
+// testFlowEcho writes a random string of 'size' bytes on the flow and then
+// ensures that the same string is read back.
+func testFlowEcho(t *testing.T, flow stream.Flow, size int) {
+ defer flow.Close()
+ wrote := testutil.RandomBytes(size)
+ go func() {
+ buf := wrote
+ for len(buf) > 0 {
+ limit := 1 + testutil.Rand.Intn(len(buf)) // Random number in [1, n]
+ n, err := flow.Write(buf[:limit])
+ if n != limit || err != nil {
+ t.Errorf("Write returned (%d, %v) want (%d, nil)", n, err, limit)
+ }
+ buf = buf[limit:]
+ }
+ }()
+
+ total := 0
+ read := make([]byte, size)
+ buf := read
+ for total < size {
+ n, err := flow.Read(buf)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ total += n
+ buf = buf[n:]
+ }
+ if bytes.Compare(read, wrote) != 0 {
+ t.Errorf("Data read != data written")
+ }
+}
+
+func TestHandshake(t *testing.T) {
+ // When SecurityNone is used, the blessings should not be sent over the wire.
+ var (
+ client = tsecurity.NewPrincipal("client")
+ server = tsecurity.NewPrincipal("server")
+ )
+ h, vc, err := New(SecurityNone, LatestVersion, client, server, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !flow.RemoteBlessings().IsZero() {
+ t.Errorf("Server sent blessing %v over insecure transport", flow.RemoteBlessings())
+ }
+ if !flow.LocalBlessings().IsZero() {
+ t.Errorf("Client sent blessing %v over insecure transport", flow.LocalBlessings())
+ }
+}
+
+func testFlowAuthN(flow stream.Flow, serverBlessings security.Blessings, serverDischarges map[string]security.Discharge, clientPublicKey security.PublicKey) error {
+ if got, want := flow.RemoteBlessings(), serverBlessings; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("Server shared blessings %v, want %v", got, want)
+ }
+ if got, want := flow.RemoteDischarges(), serverDischarges; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("Server shared discharges %#v, want %#v", got, want)
+ }
+ if got, want := flow.LocalBlessings().PublicKey(), clientPublicKey; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("Client shared %v, want %v", got, want)
+ }
+ return nil
+}
+
+// auth implements security.Authorizer.
+type auth struct {
+ localPrincipal security.Principal
+ remoteBlessings security.Blessings
+ remoteDischarges map[string]security.Discharge
+ suffix, method string
+ err error
+}
+
+// Authorize tests that the context passed to the authorizer is the expected one.
+func (a *auth) Authorize(ctx security.Call) error {
+ if a.err != nil {
+ return a.err
+ }
+ if got, want := ctx.LocalPrincipal(), a.localPrincipal; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("ctx.LocalPrincipal: got %v, want %v", got, want)
+ }
+ if got, want := ctx.RemoteBlessings(), a.remoteBlessings; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("ctx.RemoteBlessings: got %v, want %v", got, want)
+ }
+ if got, want := ctx.RemoteDischarges(), a.remoteDischarges; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("ctx.RemoteDischarges: got %v, want %v", got, want)
+ }
+ if got, want := ctx.LocalEndpoint(), clientEP; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("ctx.LocalEndpoint: got %v, want %v", got, want)
+ }
+ if got, want := ctx.RemoteEndpoint(), serverEP; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+ }
+ if got, want := ctx.Suffix(), a.suffix; got != want {
+ return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+ }
+ if got, want := ctx.Method(), a.method; got != want {
+ return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+ }
+ return nil
+}
+
+// mockDischargeClient implements vc.DischargeClient.
+type mockDischargeClient []security.Discharge
+
+func (m mockDischargeClient) PrepareDischarges(_ *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) []security.Discharge {
+ return m
+}
+func (mockDischargeClient) Invalidate(...security.Discharge) {}
+func (mockDischargeClient) IPCStreamListenerOpt() {}
+func (mockDischargeClient) IPCStreamVCOpt() {}
+
+// Test that mockDischargeClient implements vc.DischargeClient.
+var _ vc.DischargeClient = (mockDischargeClient)(nil)
+
+func TestHandshakeTLS(t *testing.T) {
+ matchesError := func(got error, want string) error {
+ if (got == nil) && len(want) == 0 {
+ return nil
+ }
+ if got == nil && !strings.Contains(got.Error(), want) {
+ return fmt.Errorf("got error %q, wanted to match %q", got, want)
+ }
+ return nil
+ }
+ var (
+ root = tsecurity.NewIDProvider("root")
+ discharger = tsecurity.NewPrincipal("discharger")
+ client = tsecurity.NewPrincipal()
+ server = tsecurity.NewPrincipal()
+ )
+ tpcav, err := security.NewPublicKeyCaveat(discharger.PublicKey(), "irrelevant", security.ThirdPartyRequirements{}, security.UnconstrainedUse())
+ if err != nil {
+ t.Fatal(err)
+ }
+ dis, err := discharger.MintDischarge(tpcav, security.UnconstrainedUse())
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Root blesses the client
+ if err := root.Bless(client, "client"); err != nil {
+ t.Fatal(err)
+ }
+ // Root blesses the server with a third-party caveat
+ if err := root.Bless(server, "server", tpcav); err != nil {
+ t.Fatal(err)
+ }
+
+ testdata := []struct {
+ dischargeClient vc.DischargeClient
+ auth *vc.ServerAuthorizer
+ dialErr string
+ flowRemoteBlessings security.Blessings
+ flowRemoteDischarges map[string]security.Discharge
+ }{
+ {
+ flowRemoteBlessings: server.BlessingStore().Default(),
+ },
+ {
+ dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+ flowRemoteBlessings: server.BlessingStore().Default(),
+ flowRemoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+ },
+ {
+ dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+ auth: &vc.ServerAuthorizer{
+ Suffix: "suffix",
+ Method: "method",
+ Policy: &auth{
+ localPrincipal: client,
+ remoteBlessings: server.BlessingStore().Default(),
+ remoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+ suffix: "suffix",
+ method: "method",
+ },
+ },
+ flowRemoteBlessings: server.BlessingStore().Default(),
+ flowRemoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+ },
+ {
+ dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+ auth: &vc.ServerAuthorizer{
+ Suffix: "suffix",
+ Method: "method",
+ Policy: &auth{
+ err: errors.New("authorization error"),
+ },
+ },
+ dialErr: "authorization error",
+ },
+ }
+ for i, d := range testdata {
+ h, vc, err := New(SecurityTLS, LatestVersion, client, server, d.dischargeClient, d.auth)
+ if merr := matchesError(err, d.dialErr); merr != nil {
+ t.Errorf("Test #%d: HandshakeDialedVC with server authorizer %#v:: %v", i, d.auth.Policy, merr)
+ }
+ if err != nil {
+ continue
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+ h.Close()
+ t.Errorf("Unable to create flow: %v", err)
+ continue
+ }
+ if err := testFlowAuthN(flow, d.flowRemoteBlessings, d.flowRemoteDischarges, client.PublicKey()); err != nil {
+ h.Close()
+ t.Error(err)
+ continue
+ }
+ h.Close()
+ }
+}
+
+func testConnect_Small(t *testing.T, security options.VCSecurityLevel) {
+ h, vc, err := New(security, LatestVersion, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ testFlowEcho(t, flow, 10)
+}
+func TestConnect_Small(t *testing.T) { testConnect_Small(t, SecurityNone) }
+func TestConnect_SmallTLS(t *testing.T) { testConnect_Small(t, SecurityTLS) }
+
+func testConnect(t *testing.T, security options.VCSecurityLevel) {
+ h, vc, err := New(security, LatestVersion, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ testFlowEcho(t, flow, 10*DefaultBytesBufferedPerFlow)
+}
+func TestConnect(t *testing.T) { testConnect(t, SecurityNone) }
+func TestConnectTLS(t *testing.T) { testConnect(t, SecurityTLS) }
+
+func testConnect_Version7(t *testing.T, security options.VCSecurityLevel) {
+ h, vc, err := New(security, version.IPCVersion7, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ testFlowEcho(t, flow, 10)
+}
+func TestConnect_Version7(t *testing.T) { testConnect_Version7(t, SecurityNone) }
+func TestConnect_Version7TLS(t *testing.T) { testConnect_Version7(t, SecurityTLS) }
+
+// helper function for testing concurrent operations on multiple flows over the
+// same VC. Such tests are most useful when running the race detector.
+// (go test -race ...)
+func testConcurrentFlows(t *testing.T, security options.VCSecurityLevel, flows, gomaxprocs int) {
+ mp := runtime.GOMAXPROCS(gomaxprocs)
+ defer runtime.GOMAXPROCS(mp)
+ h, vc, err := New(security, LatestVersion, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+
+ var wg sync.WaitGroup
+ wg.Add(flows)
+ for i := 0; i < flows; i++ {
+ go func(n int) {
+ defer wg.Done()
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Error(err)
+ } else {
+ testFlowEcho(t, flow, (n+1)*DefaultBytesBufferedPerFlow)
+ }
+ }(i)
+ }
+ wg.Wait()
+}
+
+func TestConcurrentFlows_1(t *testing.T) { testConcurrentFlows(t, SecurityNone, 10, 1) }
+func TestConcurrentFlows_1TLS(t *testing.T) { testConcurrentFlows(t, SecurityTLS, 10, 1) }
+
+func TestConcurrentFlows_10(t *testing.T) { testConcurrentFlows(t, SecurityNone, 10, 10) }
+func TestConcurrentFlows_10TLS(t *testing.T) { testConcurrentFlows(t, SecurityTLS, 10, 10) }
+
+func testListen(t *testing.T, security options.VCSecurityLevel) {
+ data := "the dark knight"
+ h, vc, err := New(security, LatestVersion, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ if err := h.VC.AcceptFlow(id.Flow(21)); err == nil {
+ t.Errorf("Expected AcceptFlow on a new flow to fail as Listen was not called")
+ }
+
+ ln, err := vc.Listen()
+ if err != nil {
+ t.Fatalf("vc.Listen failed: %v", err)
+ return
+ }
+ _, err = vc.Listen()
+ if err == nil {
+ t.Fatalf("Second call to vc.Listen should have failed")
+ return
+ }
+
+ if err := h.VC.AcceptFlow(id.Flow(23)); err != nil {
+ t.Fatal(err)
+ }
+ cipherdata, err := h.otherEnd.VC.Encrypt(id.Flow(23), iobuf.NewSlice([]byte(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := h.VC.DispatchPayload(id.Flow(23), cipherdata); err != nil {
+ t.Fatal(err)
+ }
+ flow, err := ln.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := ln.Close(); err != nil {
+ t.Error(err)
+ }
+ flow.Close()
+ var buf [4096]byte
+ if n, err := flow.Read(buf[:]); n != len(data) || err != nil || string(buf[:n]) != data {
+ t.Errorf("Got (%d, %v) = %q, want (%d, nil) = %q", n, err, string(buf[:n]), len(data), data)
+ }
+ if n, err := flow.Read(buf[:]); n != 0 || err != io.EOF {
+ t.Errorf("Got (%d, %v) want (0, %v)", n, err, io.EOF)
+ }
+}
+func TestListen(t *testing.T) { testListen(t, SecurityNone) }
+func TestListenTLS(t *testing.T) { testListen(t, SecurityTLS) }
+
+func testNewFlowAfterClose(t *testing.T, security options.VCSecurityLevel) {
+ h, _, err := New(security, LatestVersion, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ h.VC.Close("reason")
+ if err := h.VC.AcceptFlow(id.Flow(10)); err == nil {
+ t.Fatalf("New flows should not be accepted once the VC is closed")
+ }
+}
+func TestNewFlowAfterClose(t *testing.T) { testNewFlowAfterClose(t, SecurityNone) }
+func TestNewFlowAfterCloseTLS(t *testing.T) { testNewFlowAfterClose(t, SecurityTLS) }
+
+func testConnectAfterClose(t *testing.T, security options.VCSecurityLevel) {
+ h, vc, err := New(security, LatestVersion, tsecurity.NewPrincipal("client"), tsecurity.NewPrincipal("server"), nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h.Close()
+ h.VC.Close("myerr")
+ if f, err := vc.Connect(); f != nil || err == nil || !strings.Contains(err.Error(), "myerr") {
+ t.Fatalf("Got (%v, %v), want (nil, %q)", f, err, "myerr")
+ }
+}
+func TestConnectAfterClose(t *testing.T) { testConnectAfterClose(t, SecurityNone) }
+func TestConnectAfterCloseTLS(t *testing.T) { testConnectAfterClose(t, SecurityTLS) }
+
+// helper implements vc.Helper and also sets up a single VC.
+type helper struct {
+ VC *vc.VC
+ bq bqueue.T
+
+ mu sync.Mutex
+ otherEnd *helper // GUARDED_BY(mu)
+}
+
+// New creates both ends of a VC but returns only the "client" end (i.e., the
+// one that initiated the VC). The "server" end (the one that "accepted" the VC)
+// listens for flows and simply echoes data read.
+func New(security options.VCSecurityLevel, v version.IPCVersion, client, server security.Principal, dischargeClient vc.DischargeClient, auth *vc.ServerAuthorizer) (*helper, stream.VC, error) {
+ clientH := &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
+ serverH := &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
+ clientH.otherEnd = serverH
+ serverH.otherEnd = clientH
+
+ vci := id.VC(1234)
+
+ clientParams := vc.Params{
+ VCI: vci,
+ Dialed: true,
+ LocalEP: clientEP,
+ RemoteEP: serverEP,
+ Pool: iobuf.NewPool(0),
+ Helper: clientH,
+ Version: v,
+ }
+ serverParams := vc.Params{
+ VCI: vci,
+ LocalEP: serverEP,
+ RemoteEP: clientEP,
+ Pool: iobuf.NewPool(0),
+ Helper: serverH,
+ Version: v,
+ }
+
+ clientH.VC = vc.InternalNew(clientParams)
+ serverH.VC = vc.InternalNew(serverParams)
+ clientH.AddReceiveBuffers(vci, vc.SharedFlowID, vc.DefaultBytesBufferedPerFlow)
+
+ go clientH.pipeLoop(serverH.VC)
+ go serverH.pipeLoop(clientH.VC)
+
+ lopts := []stream.ListenerOpt{vc.LocalPrincipal{server}, security}
+ vcopts := []stream.VCOpt{vc.LocalPrincipal{client}, security}
+
+ if dischargeClient != nil {
+ lopts = append(lopts, dischargeClient)
+ }
+ if auth != nil {
+ vcopts = append(vcopts, auth)
+ }
+
+ c := serverH.VC.HandshakeAcceptedVC(lopts...)
+ if err := clientH.VC.HandshakeDialedVC(vcopts...); err != nil {
+ go func() { <-c }()
+ return nil, nil, err
+ }
+ hr := <-c
+ if hr.Error != nil {
+ return nil, nil, hr.Error
+ }
+ go acceptLoop(hr.Listener)
+ return clientH, clientH.VC, nil
+}
+
+// pipeLoop forwards slices written to h.bq to dst.
+func (h *helper) pipeLoop(dst *vc.VC) {
+ for {
+ w, bufs, err := h.bq.Get(nil)
+ if err != nil {
+ return
+ }
+ fid := id.Flow(w.ID())
+ for _, b := range bufs {
+ cipher, err := h.VC.Encrypt(fid, b)
+ if err != nil {
+ panic(err)
+ }
+ if err := dst.DispatchPayload(fid, cipher); err != nil {
+ panic(err)
+ return
+ }
+ }
+ if w.IsDrained() {
+ h.VC.ShutdownFlow(fid)
+ dst.ShutdownFlow(fid)
+ }
+ }
+}
+
+func acceptLoop(ln stream.Listener) {
+ for {
+ f, err := ln.Accept()
+ if err != nil {
+ return
+ }
+ go echoLoop(f)
+ }
+}
+
+func echoLoop(flow stream.Flow) {
+ var buf [vc.DefaultBytesBufferedPerFlow * 20]byte
+ for {
+ n, err := flow.Read(buf[:])
+ if err == io.EOF {
+ return
+ }
+ if err == nil {
+ _, err = flow.Write(buf[:n])
+ }
+ if err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (h *helper) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+ h.mu.Lock()
+ if h.otherEnd != nil {
+ if err := h.otherEnd.VC.AcceptFlow(fid); err != nil {
+ panic(err)
+ }
+ h.otherEnd.VC.ReleaseCounters(fid, uint32(bytes))
+ }
+ h.mu.Unlock()
+}
+
+func (h *helper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+ h.mu.Lock()
+ if h.otherEnd != nil {
+ h.otherEnd.VC.ReleaseCounters(fid, uint32(bytes))
+ }
+ h.mu.Unlock()
+}
+
+func (h *helper) NewWriter(vci id.VC, fid id.Flow) (bqueue.Writer, error) {
+ return h.bq.NewWriter(bqueue.ID(fid), 0, DefaultBytesBufferedPerFlow)
+}
+
+func (h *helper) Close() {
+ h.VC.Close("helper closed")
+ h.bq.Close()
+ h.mu.Lock()
+ otherEnd := h.otherEnd
+ h.otherEnd = nil
+ h.mu.Unlock()
+ if otherEnd != nil {
+ otherEnd.mu.Lock()
+ otherEnd.otherEnd = nil
+ otherEnd.mu.Unlock()
+ otherEnd.Close()
+ }
+}
+
+type endpoint naming.RoutingID
+
+func (e endpoint) Network() string { return "test" }
+func (e endpoint) VersionedString(int) string { return e.String() }
+func (e endpoint) String() string { return naming.RoutingID(e).String() }
+func (e endpoint) Name() string { return naming.JoinAddressName(e.String(), "") }
+func (e endpoint) RoutingID() naming.RoutingID { return naming.RoutingID(e) }
+func (e endpoint) Addr() net.Addr { return nil }
+func (e endpoint) ServesMountTable() bool { return false }
+func (e endpoint) BlessingNames() []string { return nil }
diff --git a/profiles/internal/ipc/stream/vc/writer.go b/profiles/internal/ipc/stream/vc/writer.go
new file mode 100644
index 0000000..d2a59e3
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/writer.go
@@ -0,0 +1,183 @@
+package vc
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+
+ vsync "v.io/x/ref/lib/sync"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+var errWriterClosed = errors.New("attempt to call Write on Flow that has been Closed")
+
+// writer implements the io.Writer and SetWriteDeadline interfaces for Flow.
+type writer struct {
+ MTU int // Maximum size (in bytes) of each slice Put into Sink.
+ Sink bqueue.Writer // Buffer queue writer where data from Write is sent as iobuf.Slice objects.
+ Alloc *iobuf.Allocator // Allocator for iobuf.Slice objects. GUARDED_BY(mu)
+ SharedCounters *vsync.Semaphore // Semaphore hosting counters shared by all flows over a VC.
+
+ mu sync.Mutex // Guards call to Writes
+ wroteOnce bool // GUARDED_BY(mu)
+ isClosed bool // GUARDED_BY(mu)
+ closeError error // GUARDED_BY(mu)
+ closed chan struct{} // GUARDED_BY(mu)
+ deadline <-chan struct{} // GUARDED_BY(mu)
+
+ // Total number of bytes filled in by all Write calls on this writer.
+ // Atomic operations are used to manipulate it.
+ totalBytes uint32
+
+ // Accounting for counters borrowed from the shared pool.
+ muSharedCountersBorrowed sync.Mutex
+ sharedCountersBorrowed int // GUARDED_BY(muSharedCountersBorrowed)
+}
+
+func newWriter(mtu int, sink bqueue.Writer, alloc *iobuf.Allocator, counters *vsync.Semaphore) *writer {
+ return &writer{
+ MTU: mtu,
+ Sink: sink,
+ Alloc: alloc,
+ SharedCounters: counters,
+ closed: make(chan struct{}),
+ closeError: errWriterClosed,
+ }
+}
+
+// Shutdown closes the writer and discards any queued up write buffers, i.e.,
+// the bqueue.Get call will not see the buffers queued up at this writer.
+// If removeWriter is true the writer will also be removed entirely from the
+// bqueue, otherwise the now empty writer will eventually be returned by
+// bqueue.Get.
+func (w *writer) shutdown(removeWriter bool) {
+ w.Sink.Shutdown(removeWriter)
+ w.finishClose(true)
+}
+
+// Close closes the writer without discarding any queued up write buffers.
+func (w *writer) Close() {
+ w.Sink.Close()
+ w.finishClose(false)
+}
+
+func (w *writer) IsClosed() bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.isClosed
+}
+
+func (w *writer) Closed() <-chan struct{} {
+ return w.closed
+}
+
+func (w *writer) finishClose(remoteShutdown bool) {
+ // IsClosed() and Closed() indicate that the writer is closed before
+ // finishClose() completes. This is safe because Alloc and shared counters
+ // are guarded, and are not accessed elsewhere after w.closed is closed.
+ w.mu.Lock()
+ // finishClose() is idempotent, but Go's builtin close is not.
+ if !w.isClosed {
+ w.isClosed = true
+ if remoteShutdown {
+ w.closeError = io.EOF
+ }
+ close(w.closed)
+ }
+
+ w.Alloc.Release()
+ w.mu.Unlock()
+
+ w.muSharedCountersBorrowed.Lock()
+ w.SharedCounters.IncN(uint(w.sharedCountersBorrowed))
+ w.sharedCountersBorrowed = 0
+ w.muSharedCountersBorrowed.Unlock()
+}
+
+// Write implements the Write call for a Flow.
+//
+// Flow control is achieved using receive buffers (aka counters), wherein the
+// receiving end sends out the number of bytes that it is willing to read. To
+// avoid an additional round-trip for the creation of new flows, the very first
+// write of a new flow borrows counters from a shared pool.
+func (w *writer) Write(b []byte) (int, error) {
+ written := 0
+ // net.Conn requires that multiple goroutines be able to invoke methods
+ // simulatenously.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.isClosed {
+ return 0, w.closeError
+ }
+
+ for len(b) > 0 {
+ n := len(b)
+ if n > w.MTU {
+ n = w.MTU
+ }
+ if !w.wroteOnce && w.SharedCounters != nil {
+ w.wroteOnce = true
+ if n > MaxSharedBytes {
+ n = MaxSharedBytes
+ }
+ if err := w.SharedCounters.DecN(uint(n), w.deadline); err != nil {
+ if err == vsync.ErrCanceled {
+ return 0, timeoutError{}
+ }
+ return 0, fmt.Errorf("failed to get quota from receive buffers shared by all new flows on a VC: %v", err)
+ }
+ w.muSharedCountersBorrowed.Lock()
+ w.sharedCountersBorrowed = n
+ w.muSharedCountersBorrowed.Unlock()
+ w.Sink.Release(n)
+ }
+ slice := w.Alloc.Copy(b[:n])
+ if err := w.Sink.Put(slice, w.deadline); err != nil {
+ slice.Release()
+ atomic.AddUint32(&w.totalBytes, uint32(written))
+ switch err {
+ case bqueue.ErrCancelled, vsync.ErrCanceled:
+ return written, timeoutError{}
+ case bqueue.ErrWriterIsClosed:
+ return written, w.closeError
+ default:
+ return written, fmt.Errorf("bqueue.Writer.Put failed: %v", err)
+ }
+ }
+ written += n
+ b = b[n:]
+ }
+ atomic.AddUint32(&w.totalBytes, uint32(written))
+ return written, nil
+}
+
+func (w *writer) SetDeadline(deadline <-chan struct{}) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.deadline = deadline
+}
+
+// Release allows the next 'bytes' of data to be removed from the buffer queue
+// writer and passed to bqueue.Get.
+func (w *writer) Release(bytes int) {
+ w.muSharedCountersBorrowed.Lock()
+ switch {
+ case w.sharedCountersBorrowed == 0:
+ w.Sink.Release(bytes)
+ case w.sharedCountersBorrowed >= bytes:
+ w.SharedCounters.IncN(uint(bytes))
+ w.sharedCountersBorrowed -= bytes
+ default:
+ w.SharedCounters.IncN(uint(w.sharedCountersBorrowed))
+ w.Sink.Release(bytes - w.sharedCountersBorrowed)
+ w.sharedCountersBorrowed = 0
+ }
+ w.muSharedCountersBorrowed.Unlock()
+}
+
+func (w *writer) BytesWritten() uint32 {
+ return atomic.LoadUint32(&w.totalBytes)
+}
diff --git a/profiles/internal/ipc/stream/vc/writer_test.go b/profiles/internal/ipc/stream/vc/writer_test.go
new file mode 100644
index 0000000..345fcd4
--- /dev/null
+++ b/profiles/internal/ipc/stream/vc/writer_test.go
@@ -0,0 +1,216 @@
+package vc
+
+import (
+ "bytes"
+ "io"
+ "net"
+ "reflect"
+ "testing"
+
+ "v.io/x/ref/lib/sync"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/bqueue/drrqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// TestWrite is a very basic, easy to follow, but not very thorough test of the
+// writer. More thorough testing of flows (and implicitly the writer) is in
+// vc_test.go.
+func TestWrite(t *testing.T) {
+ bq := drrqueue.New(128)
+ defer bq.Close()
+
+ bw, err := bq.NewWriter(0, 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ shared := sync.NewSemaphore()
+ shared.IncN(4)
+
+ w := newTestWriter(bw, shared)
+
+ if n, err := w.Write([]byte("abcd")); n != 4 || err != nil {
+ t.Errorf("Got (%d, %v) want (4, nil)", n, err)
+ }
+
+ // Should have used up 4 shared counters
+ if err := shared.TryDecN(1); err != sync.ErrTryAgain {
+ t.Errorf("Got %v want %v", err, sync.ErrTryAgain)
+ }
+
+ // Further Writes will block until some space has been released.
+ w.Release(10)
+ if n, err := w.Write([]byte("efghij")); n != 6 || err != nil {
+ t.Errorf("Got (%d, %v) want (5, nil)", n, err)
+ }
+ // And the release should have returned to the shared counters set
+ if err := shared.TryDecN(4); err != nil {
+ t.Errorf("Got %v want %v", err, nil)
+ }
+
+ // Further writes will block since all 10 bytes (provided to NewWriter)
+ // have been exhausted and Get hasn't been called on bq yet.
+ deadline := make(chan struct{}, 0)
+ w.SetDeadline(deadline)
+ close(deadline)
+
+ w.SetDeadline(deadline)
+ if n, err := w.Write([]byte("k")); n != 0 || !isTimeoutError(err) {
+ t.Errorf("Got (%d, %v) want (0, timeout error)", n, err)
+ }
+
+ w.Close()
+ if w.BytesWritten() != 10 {
+ t.Errorf("Got %d want %d", w.BytesWritten(), 10)
+ }
+
+ _, bufs, err := bq.Get(nil)
+ var read bytes.Buffer
+ for _, b := range bufs {
+ read.Write(b.Contents)
+ b.Release()
+ }
+ if g, w := read.String(), "abcdefghij"; g != w {
+ t.Errorf("Got %q want %q", g, w)
+ }
+}
+
+func TestCloseBeforeWrite(t *testing.T) {
+ bq := drrqueue.New(128)
+ defer bq.Close()
+
+ bw, err := bq.NewWriter(0, 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ shared := sync.NewSemaphore()
+ shared.IncN(4)
+
+ w := newTestWriter(bw, shared)
+ w.Close()
+
+ if n, err := w.Write([]byte{1, 2}); n != 0 || err != errWriterClosed {
+ t.Errorf("Got (%v, %v) want (0, %v)", n, err, errWriterClosed)
+ }
+}
+
+func TestShutdownBeforeWrite(t *testing.T) {
+ bq := drrqueue.New(128)
+ defer bq.Close()
+
+ bw, err := bq.NewWriter(0, 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ shared := sync.NewSemaphore()
+ shared.IncN(4)
+
+ w := newTestWriter(bw, shared)
+ w.shutdown(true)
+
+ if n, err := w.Write([]byte{1, 2}); n != 0 || err != io.EOF {
+ t.Errorf("Got (%v, %v) want (0, %v)", n, err, io.EOF)
+ }
+}
+
+func TestCloseDoesNotDiscardPendingWrites(t *testing.T) {
+ bq := drrqueue.New(128)
+ defer bq.Close()
+
+ bw, err := bq.NewWriter(0, 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ shared := sync.NewSemaphore()
+ shared.IncN(2)
+
+ w := newTestWriter(bw, shared)
+ data := []byte{1, 2}
+ if n, err := w.Write(data); n != len(data) || err != nil {
+ t.Fatalf("Got (%d, %v) want (%d, nil)", n, err, len(data))
+ }
+ w.Close()
+
+ gbw, bufs, err := bq.Get(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if gbw != bw {
+ t.Fatalf("Got %p want %p", gbw, bw)
+ }
+ if len(bufs) != 1 {
+ t.Fatalf("Got %d bufs, want 1", len(bufs))
+ }
+ if !reflect.DeepEqual(bufs[0].Contents, data) {
+ t.Fatalf("Got %v want %v", bufs[0].Contents, data)
+ }
+ if !gbw.IsDrained() {
+ t.Fatal("Expected bqueue.Writer to be drained")
+ }
+}
+
+func TestWriterCloseIsIdempotent(t *testing.T) {
+ bq := drrqueue.New(128)
+ defer bq.Close()
+
+ bw, err := bq.NewWriter(0, 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ shared := sync.NewSemaphore()
+ shared.IncN(1)
+ w := newTestWriter(bw, shared)
+ if n, err := w.Write([]byte{1}); n != 1 || err != nil {
+ t.Fatalf("Got (%d, %v) want (1, nil)", n, err)
+ }
+ // Should have used up the shared counter.
+ if err := shared.TryDec(); err != sync.ErrTryAgain {
+ t.Fatalf("Got %v want %v", err, sync.ErrTryAgain)
+ }
+ w.Close()
+ // The shared counter should have been returned
+ if err := shared.TryDec(); err != nil {
+ t.Fatalf("Got %v want nil", err)
+ }
+ // Closing again shouldn't affect the shared counters
+ w.Close()
+ if err := shared.TryDec(); err != sync.ErrTryAgain {
+ t.Fatalf("Got %v want %v", err, sync.ErrTryAgain)
+ }
+}
+
+func TestClosedChannel(t *testing.T) {
+ bq := drrqueue.New(128)
+ defer bq.Close()
+
+ bw, err := bq.NewWriter(0, 0, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ shared := sync.NewSemaphore()
+ shared.IncN(4)
+
+ w := newTestWriter(bw, shared)
+ go w.Close()
+ <-w.Closed()
+
+ if n, err := w.Write([]byte{1, 2}); n != 0 || err != errWriterClosed {
+ t.Errorf("Got (%v, %v) want (0, %v)", n, err, errWriterClosed)
+ }
+}
+
+func newTestWriter(bqw bqueue.Writer, shared *sync.Semaphore) *writer {
+ alloc := iobuf.NewAllocator(iobuf.NewPool(0), 0)
+ return newWriter(16, bqw, alloc, shared)
+}
+
+func isTimeoutError(err error) bool {
+ neterr, ok := err.(net.Error)
+ return ok && neterr.Timeout()
+}
diff --git a/profiles/internal/ipc/stream/vif/auth.go b/profiles/internal/ipc/stream/vif/auth.go
new file mode 100644
index 0000000..1edf8e1
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/auth.go
@@ -0,0 +1,229 @@
+package vif
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+
+ "golang.org/x/crypto/nacl/box"
+
+ ipcversion "v.io/v23/ipc/version"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/message"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+var (
+ errUnsupportedEncryptVersion = errors.New("unsupported encryption version")
+ errVersionNegotiationFailed = errors.New("encryption version negotiation failed")
+ nullCipher crypto.NullControlCipher
+)
+
+// privateData includes secret data we need for encryption.
+type privateData struct {
+ naclBoxPrivateKey [32]byte
+}
+
+// AuthenticateAsClient sends a HopSetup message if possible. If so, it chooses
+// encryption based on the max supported version.
+//
+// The sequence is initiated by the client.
+//
+// - If the versions include IPCVersion6 or greater, the client sends a
+// HopSetup message to the server, containing the client's supported
+// versions, and the client's crypto options. The HopSetup message
+// is sent in the clear.
+//
+// - When the server receives the HopSetup message, it calls
+// AuthenticateAsServer, which constructs a response HopSetup containing
+// the server's version range, and any crypto options.
+//
+// - For IPCVersion6 and IPCVersion7, the client and server generate fresh
+// public/private key pairs, sending the public key to the peer as a crypto
+// option. The remainder of the communication is encrypted as
+// HopSetupStream messages using NewControlCipherIPC6, which is based on
+// code.google.com/p/go.crypto/nacl/box.
+//
+// - Once the encrypted HopSetupStream channel is setup, the client and
+// server authenticate using the vc.AuthenticateAs{Client,Server} protocol.
+//
+// Note that the HopSetup messages are sent in the clear, so they are subject to
+// modification by a man-in-the-middle, which can currently force a downgrade by
+// modifying the acceptable version ranges downward. This can be addressed by
+// including a hash of the HopSetup message in the encrypted stream. It is
+// likely that this will be addressed in subsequent protocol versions (or it may
+// not be addressed at all if IPCVersion6 becomes the only supported version).
+func AuthenticateAsClient(writer io.Writer, reader *iobuf.Reader, versions *version.Range, params security.CallParams, auth *vc.ServerAuthorizer) (crypto.ControlCipher, error) {
+ if versions == nil {
+ versions = version.SupportedRange
+ }
+ if params.LocalPrincipal == nil {
+ // If there is no principal, we do not support encryption/authentication.
+ var err error
+ versions, err = versions.Intersect(&version.Range{Min: 0, Max: ipcversion.IPCVersion5})
+ if err != nil {
+ return nil, err
+ }
+ }
+ if versions.Max < ipcversion.IPCVersion6 {
+ return nullCipher, nil
+ }
+
+ // The client has not yet sent its public data. Construct it and send it.
+ pvt, pub, err := makeHopSetup(versions)
+ if err != nil {
+ return nil, err
+ }
+ if err := message.WriteTo(writer, &pub, nullCipher); err != nil {
+ return nil, err
+ }
+
+ // Read the server's public data.
+ pmsg, err := message.ReadFrom(reader, nullCipher)
+ if err != nil {
+ return nil, err
+ }
+ ppub, ok := pmsg.(*message.HopSetup)
+ if !ok {
+ return nil, errVersionNegotiationFailed
+ }
+
+ // Choose the max version in the intersection.
+ vrange, err := pub.Versions.Intersect(&ppub.Versions)
+ if err != nil {
+ return nil, err
+ }
+ v := vrange.Max
+ if v < ipcversion.IPCVersion6 {
+ return nullCipher, nil
+ }
+
+ // Perform the authentication.
+ return authenticateAsClient(writer, reader, params, auth, &pvt, &pub, ppub, v)
+}
+
+func authenticateAsClient(writer io.Writer, reader *iobuf.Reader, params security.CallParams, auth *vc.ServerAuthorizer,
+ pvt *privateData, pub, ppub *message.HopSetup, version ipcversion.IPCVersion) (crypto.ControlCipher, error) {
+ if version < ipcversion.IPCVersion6 {
+ return nil, errUnsupportedEncryptVersion
+ }
+ pbox := ppub.NaclBox()
+ if pbox == nil {
+ return nil, errVersionNegotiationFailed
+ }
+ c := crypto.NewControlCipherIPC6(&pbox.PublicKey, &pvt.naclBoxPrivateKey, false)
+ sconn := newSetupConn(writer, reader, c)
+ // TODO(jyh): act upon the authentication results.
+ _, _, _, err := vc.AuthenticateAsClient(sconn, crypto.NewNullCrypter(), params, auth, version)
+ if err != nil {
+ return nil, fmt.Errorf("authentication failed: %v", err)
+ }
+ return c, nil
+}
+
+// AuthenticateAsServer handles a HopSetup message, choosing authentication
+// based on the max common version.
+//
+// See AuthenticateAsClient for a description of the negotiation.
+func AuthenticateAsServer(writer io.Writer, reader *iobuf.Reader, versions *version.Range, principal security.Principal, lBlessings security.Blessings,
+ dc vc.DischargeClient, ppub *message.HopSetup) (crypto.ControlCipher, error) {
+ var err error
+ if versions == nil {
+ versions = version.SupportedRange
+ }
+ if principal == nil {
+ // If there is no principal, we don't support encryption/authentication.
+ versions, err = versions.Intersect(&version.Range{Min: 0, Max: ipcversion.IPCVersion5})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Create our public data and send it to the client.
+ pvt, pub, err := makeHopSetup(versions)
+ if err != nil {
+ return nil, err
+ }
+ if err := message.WriteTo(writer, &pub, nullCipher); err != nil {
+ return nil, err
+ }
+
+ // Choose the max version in common.
+ vrange, err := pub.Versions.Intersect(&ppub.Versions)
+ if err != nil {
+ return nil, err
+ }
+ v := vrange.Max
+ if v < ipcversion.IPCVersion6 {
+ return nullCipher, nil
+ }
+
+ // Perform authentication.
+ return authenticateAsServerIPC6(writer, reader, principal, lBlessings, dc, &pvt, &pub, ppub, v)
+}
+
+func authenticateAsServerIPC6(writer io.Writer, reader *iobuf.Reader, principal security.Principal, lBlessings security.Blessings, dc vc.DischargeClient,
+ pvt *privateData, pub, ppub *message.HopSetup, version ipcversion.IPCVersion) (crypto.ControlCipher, error) {
+ if version < ipcversion.IPCVersion6 {
+ return nil, errUnsupportedEncryptVersion
+ }
+ box := ppub.NaclBox()
+ if box == nil {
+ return nil, errVersionNegotiationFailed
+ }
+ c := crypto.NewControlCipherIPC6(&box.PublicKey, &pvt.naclBoxPrivateKey, true)
+ sconn := newSetupConn(writer, reader, c)
+ // TODO(jyh): act upon authentication results.
+ _, err := vc.AuthenticateAsServer(sconn, principal, lBlessings, dc, crypto.NewNullCrypter(), version)
+ if err != nil {
+ return nil, fmt.Errorf("authentication failed: %v", err)
+ }
+ return c, nil
+}
+
+// serverAuthOptions extracts the Principal from the options list.
+func serverAuthOptions(lopts []stream.ListenerOpt) (principal security.Principal, lBlessings security.Blessings, dischargeClient vc.DischargeClient, err error) {
+ var securityLevel options.VCSecurityLevel
+ for _, o := range lopts {
+ switch v := o.(type) {
+ case vc.DischargeClient:
+ dischargeClient = v
+ case vc.LocalPrincipal:
+ principal = v.Principal
+ case options.VCSecurityLevel:
+ securityLevel = v
+ case options.ServerBlessings:
+ lBlessings = v.Blessings
+ }
+ }
+ switch securityLevel {
+ case options.VCSecurityConfidential:
+ if principal == nil {
+ principal = vc.AnonymousPrincipal
+ }
+ if lBlessings.IsZero() {
+ lBlessings = principal.BlessingStore().Default()
+ }
+ case options.VCSecurityNone:
+ principal = nil
+ default:
+ err = fmt.Errorf("unrecognized VC security level: %v", securityLevel)
+ }
+ return
+}
+
+// makeHopSetup constructs the options that this process can support.
+func makeHopSetup(versions *version.Range) (pvt privateData, pub message.HopSetup, err error) {
+ pub.Versions = *versions
+ var pubKey, pvtKey *[32]byte
+ pubKey, pvtKey, err = box.GenerateKey(rand.Reader)
+ pub.Options = append(pub.Options, &message.NaclBox{PublicKey: *pubKey})
+ pvt.naclBoxPrivateKey = *pvtKey
+ return
+}
diff --git a/profiles/internal/ipc/stream/vif/doc.go b/profiles/internal/ipc/stream/vif/doc.go
new file mode 100644
index 0000000..47ba9f6
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/doc.go
@@ -0,0 +1,4 @@
+// Package vif implements a virtual network interface that wraps over a
+// net.Conn and provides the ability to Dial and Listen for virtual circuits
+// (veyron/profiles/internal/ipc/stream.VC)
+package vif
diff --git a/profiles/internal/ipc/stream/vif/set.go b/profiles/internal/ipc/stream/vif/set.go
new file mode 100644
index 0000000..0ff5c95
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/set.go
@@ -0,0 +1,119 @@
+package vif
+
+import (
+ "math/rand"
+ "net"
+ "runtime"
+ "sync"
+
+ "v.io/v23/ipc"
+)
+
+// Set implements a set of VIFs keyed by (network, address) of the underlying
+// connection. Multiple goroutines can invoke methods on the Set
+// simultaneously.
+type Set struct {
+ mu sync.RWMutex
+ set map[string][]*VIF
+}
+
+// NewSet returns a new Set of VIFs.
+func NewSet() *Set {
+ return &Set{set: make(map[string][]*VIF)}
+}
+
+// Find returns a VIF where the remote end of the underlying network connection
+// is identified by the provided (network, address). Returns nil if there is no
+// such VIF.
+//
+// If there are multiple VIFs established to the same remote network address,
+// Find will randomly return one of them.
+func (s *Set) Find(network, address string) *VIF {
+ if len(address) == 0 ||
+ (network == "pipe" && address == "pipe") ||
+ (runtime.GOOS == "linux" && network == "unix" && address == "@") { // autobind
+ // Some network connections (like those created with net.Pipe or Unix sockets)
+ // do not end up with distinct net.Addrs on distinct net.Conns. For those cases,
+ // avoid the cache collisions by disabling cache lookups for them.
+ return nil
+ }
+
+ var keys []string
+ _, _, p := ipc.RegisteredProtocol(network)
+ for _, n := range p {
+ keys = append(keys, key(n, address))
+ }
+
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ for _, k := range keys {
+ vifs := s.set[k]
+ if len(vifs) > 0 {
+ return vifs[rand.Intn(len(vifs))]
+ }
+ }
+ return nil
+}
+
+// Insert adds a VIF to the set
+func (s *Set) Insert(vif *VIF) {
+ addr := vif.conn.RemoteAddr()
+ k := key(addr.Network(), addr.String())
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ vifs := s.set[k]
+ for _, v := range vifs {
+ if v == vif {
+ return
+ }
+ }
+ s.set[k] = append(vifs, vif)
+ vif.addSet(s)
+}
+
+// Delete removes a VIF from the set
+func (s *Set) Delete(vif *VIF) {
+ vif.removeSet(s)
+ addr := vif.conn.RemoteAddr()
+ k := key(addr.Network(), addr.String())
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ vifs := s.set[k]
+ for i, v := range vifs {
+ if v == vif {
+ if len(vifs) == 1 {
+ delete(s.set, k)
+ } else {
+ s.set[k] = append(vifs[:i], vifs[i+1:]...)
+ }
+ return
+ }
+ }
+}
+
+// List returns the elements in the set as a slice.
+func (s *Set) List() []*VIF {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ l := make([]*VIF, 0, len(s.set))
+ for _, vifs := range s.set {
+ l = append(l, vifs...)
+ }
+ return l
+}
+
+func key(network, address string) string {
+ if network == "tcp" || network == "ws" {
+ host, _, _ := net.SplitHostPort(address)
+ switch ip := net.ParseIP(host); {
+ case ip == nil:
+ // This may happen when address is a hostname. But we do not care
+ // about it, since vif cannot be found with a hostname anyway.
+ case ip.To4() != nil:
+ network += "4"
+ default:
+ network += "6"
+ }
+ }
+ return network + ":" + address
+}
diff --git a/profiles/internal/ipc/stream/vif/set_test.go b/profiles/internal/ipc/stream/vif/set_test.go
new file mode 100644
index 0000000..0a45f12
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/set_test.go
@@ -0,0 +1,319 @@
+package vif_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+
+ _ "v.io/x/ref/profiles"
+ "v.io/x/ref/profiles/internal/ipc/stream/vif"
+)
+
+var supportsIPv6 bool
+
+func init() {
+ ipc.RegisterProtocol("unix", net.DialTimeout, net.Listen)
+
+ // Check whether the platform supports IPv6.
+ ln, err := net.Listen("tcp6", "[::1]:0")
+ defer ln.Close()
+ if err == nil {
+ supportsIPv6 = true
+ }
+}
+
+func newConn(network, address string) (net.Conn, net.Conn, error) {
+ dfunc, lfunc, _ := ipc.RegisteredProtocol(network)
+ ln, err := lfunc(network, address)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer ln.Close()
+
+ done := make(chan net.Conn)
+ go func() {
+ conn, err := ln.Accept()
+ if err != nil {
+ panic(err)
+ }
+ conn.Read(make([]byte, 1)) // Read a dummy byte.
+ done <- conn
+ }()
+
+ conn, err := dfunc(ln.Addr().Network(), ln.Addr().String(), 1*time.Second)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Write a dummy byte since wsh listener waits for the magic bytes for ws.
+ conn.Write([]byte("."))
+ return conn, <-done, nil
+}
+
+func newVIF(c, s net.Conn) (*vif.VIF, *vif.VIF, error) {
+ done := make(chan *vif.VIF)
+ go func() {
+ vf, err := vif.InternalNewAcceptedVIF(s, naming.FixedRoutingID(0x5), nil)
+ if err != nil {
+ panic(err)
+ }
+ done <- vf
+ }()
+
+ vf, err := vif.InternalNewDialedVIF(c, naming.FixedRoutingID(0xc), nil, nil, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ return vf, <-done, nil
+}
+
+func diff(a, b []string) []string {
+ m := make(map[string]struct{})
+ for _, x := range b {
+ m[x] = struct{}{}
+ }
+ d := make([]string, 0, len(a))
+ for _, x := range a {
+ if _, ok := m[x]; !ok {
+ d = append(d, x)
+ }
+ }
+ return d
+}
+
+func TestSetBasic(t *testing.T) {
+ sockdir, err := ioutil.TempDir("", "TestSetBasic")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(sockdir)
+
+ all := ipc.RegisteredProtocols()
+ unknown := naming.UnknownProtocol
+ tests := []struct {
+ network, address string
+ compatibles []string
+ }{
+ {"tcp", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+ {"tcp4", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+ {"tcp", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+ {"tcp6", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+ {"ws", "127.0.0.1:0", []string{"ws", "ws4", "wsh", "wsh4", unknown}},
+ {"ws4", "127.0.0.1:0", []string{"ws", "ws4", "wsh", "wsh4", unknown}},
+ {"ws", "[::1]:0", []string{"ws", "ws6", "wsh", "wsh6", unknown}},
+ {"ws6", "[::1]:0", []string{"ws", "ws6", "wsh", "wsh6", unknown}},
+ // wsh dial always uses tcp.
+ {"wsh", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+ {"wsh4", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+ {"wsh", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+ {"wsh6", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+ {unknown, "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+ {unknown, "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+ {"unix", path.Join(sockdir, "socket"), []string{"unix"}},
+ }
+
+ set := vif.NewSet()
+ for _, test := range tests {
+ if test.address == "[::1]:0" && !supportsIPv6 {
+ continue
+ }
+
+ name := fmt.Sprintf("(%q, %q)", test.network, test.address)
+
+ c, s, err := newConn(test.network, test.address)
+ if err != nil {
+ t.Fatal(err)
+ }
+ vf, _, err := newVIF(c, s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ a := c.RemoteAddr()
+
+ set.Insert(vf)
+ for _, n := range test.compatibles {
+ if found := set.Find(n, a.String()); found == nil {
+ t.Fatalf("%s: Got nil, but want [%v] on Find(%q, %q))", name, vf, n, a)
+ }
+ }
+
+ for _, n := range diff(all, test.compatibles) {
+ if v := set.Find(n, a.String()); v != nil {
+ t.Fatalf("%s: Got [%v], but want nil on Find(%q, %q))", name, v, n, a)
+ }
+ }
+
+ set.Delete(vf)
+ for _, n := range all {
+ if v := set.Find(n, a.String()); v != nil {
+ t.Fatalf("%s: Got [%v], but want nil on Find(%q, %q))", name, v, n, a)
+ }
+ }
+ }
+}
+
+func TestSetWithPipes(t *testing.T) {
+ c1, s1 := net.Pipe()
+ c2, s2 := net.Pipe()
+ a1 := c1.RemoteAddr()
+ a2 := c2.RemoteAddr()
+ if a1.Network() != a2.Network() || a1.String() != a2.String() {
+ t.Fatalf("This test was intended for distinct connections that have duplicate RemoteAddrs. "+
+ "That does not seem to be the case with (%q, %q) and (%q, %q)",
+ a1.Network(), a1, a2.Network(), a2)
+ }
+
+ vf1, _, err := newVIF(c1, s1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ vf2, _, err := newVIF(c2, s2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ set := vif.NewSet()
+ set.Insert(vf1)
+ if v := set.Find(a1.Network(), a1.String()); v != nil {
+ t.Fatalf("Got [%v], but want nil on Find(%q, %q))", v, a1.Network(), a1)
+ }
+ if l := set.List(); len(l) != 1 || l[0] != vf1 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ set.Insert(vf2)
+ if v := set.Find(a2.Network(), a2.String()); v != nil {
+ t.Fatalf("Got [%v], but want nil on Find(%q, %q))", v, a2.Network(), a2)
+ }
+ if l := set.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ set.Delete(vf1)
+ if l := set.List(); len(l) != 1 || l[0] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+ set.Delete(vf2)
+ if l := set.List(); len(l) != 0 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+}
+
+func TestSetWithUnixSocket(t *testing.T) {
+ dir, err := ioutil.TempDir("", "TestSetWithUnixSocket")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ c1, s1, err := newConn("unix", path.Join(dir, "socket1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ c2, s2, err := newConn("unix", path.Join(dir, "socket2"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // The client side address is always unix:@ regardless of socket name.
+ a1 := s1.RemoteAddr()
+ a2 := s2.RemoteAddr()
+ if a1.Network() != a2.Network() || a1.String() != a2.String() {
+ t.Fatalf("This test was intended for distinct connections that have duplicate RemoteAddrs. "+
+ "That does not seem to be the case with (%q, %q) and (%q, %q)",
+ a1.Network(), a1, a2.Network(), a2)
+ }
+
+ _, vf1, err := newVIF(c1, s1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, vf2, err := newVIF(c2, s2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ set := vif.NewSet()
+ set.Insert(vf1)
+ if v := set.Find(a1.Network(), a1.String()); v != nil {
+ t.Fatalf("Got [%v], but want nil on Find(%q, %q))", v, a1.Network(), a1)
+ }
+ if l := set.List(); len(l) != 1 || l[0] != vf1 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ set.Insert(vf2)
+ if v := set.Find(a2.Network(), a2.String()); v != nil {
+ t.Fatalf("Got [%v], but want nil on Find(%q, %q))", v, a2.Network(), a2)
+ }
+ if l := set.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ set.Delete(vf1)
+ if l := set.List(); len(l) != 1 || l[0] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+ set.Delete(vf2)
+ if l := set.List(); len(l) != 0 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+}
+
+func TestSetInsertDelete(t *testing.T) {
+ c1, s1 := net.Pipe()
+ c2, s2 := net.Pipe()
+ vf1, _, err := newVIF(c1, s1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ vf2, _, err := newVIF(c2, s2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ set1 := vif.NewSet()
+ set2 := vif.NewSet()
+
+ set1.Insert(vf1)
+ if l := set1.List(); len(l) != 1 || l[0] != vf1 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+ set1.Insert(vf2)
+ if l := set1.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ set2.Insert(vf1)
+ set2.Insert(vf2)
+
+ set1.Delete(vf1)
+ if l := set1.List(); len(l) != 1 || l[0] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+ if l := set2.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ vf1.Close()
+ if l := set1.List(); len(l) != 1 || l[0] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+ if l := set2.List(); len(l) != 1 || l[0] != vf2 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+
+ vf2.Close()
+ if l := set1.List(); len(l) != 0 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+ if l := set2.List(); len(l) != 0 {
+ t.Errorf("Unexpected list of VIFs: %v", l)
+ }
+}
diff --git a/profiles/internal/ipc/stream/vif/setup_conn.go b/profiles/internal/ipc/stream/vif/setup_conn.go
new file mode 100644
index 0000000..9fa9553
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/setup_conn.go
@@ -0,0 +1,64 @@
+package vif
+
+import (
+ "io"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/message"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// setupConn writes the data to the net.Conn using HopSetupStream messages.
+type setupConn struct {
+ writer io.Writer
+ reader *iobuf.Reader
+ cipher crypto.ControlCipher
+ rbuffer []byte // read buffer
+}
+
+var _ io.ReadWriteCloser = (*setupConn)(nil)
+
+const maxFrameSize = 8192
+
+func newSetupConn(writer io.Writer, reader *iobuf.Reader, c crypto.ControlCipher) *setupConn {
+ return &setupConn{writer: writer, reader: reader, cipher: c}
+}
+
+// Read implements the method from net.Conn.
+func (s *setupConn) Read(buf []byte) (int, error) {
+ for len(s.rbuffer) == 0 {
+ msg, err := message.ReadFrom(s.reader, s.cipher)
+ if err != nil {
+ return 0, err
+ }
+ emsg, ok := msg.(*message.HopSetupStream)
+ if !ok {
+ return 0, errVersionNegotiationFailed
+ }
+ s.rbuffer = emsg.Data
+ }
+ n := copy(buf, s.rbuffer)
+ s.rbuffer = s.rbuffer[n:]
+ return n, nil
+}
+
+// Write implements the method from net.Conn.
+func (s *setupConn) Write(buf []byte) (int, error) {
+ amount := 0
+ for len(buf) > 0 {
+ n := len(buf)
+ if n > maxFrameSize {
+ n = maxFrameSize
+ }
+ emsg := message.HopSetupStream{Data: buf[:n]}
+ if err := message.WriteTo(s.writer, &emsg, s.cipher); err != nil {
+ return 0, err
+ }
+ buf = buf[n:]
+ amount += n
+ }
+ return amount, nil
+}
+
+// Close does nothing.
+func (s *setupConn) Close() error { return nil }
diff --git a/profiles/internal/ipc/stream/vif/setup_conn_test.go b/profiles/internal/ipc/stream/vif/setup_conn_test.go
new file mode 100644
index 0000000..19a7b99
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/setup_conn_test.go
@@ -0,0 +1,162 @@
+package vif
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "net"
+ "sync"
+ "testing"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+const (
+ text = `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.`
+)
+
+func min(i, j int) int {
+ if i < j {
+ return i
+ }
+ return j
+}
+
+// testControlCipher is a super-simple cipher that xor's each byte of the
+// payload with 0xaa.
+type testControlCipher struct{}
+
+const testMACSize = 4
+
+func (*testControlCipher) MACSize() int {
+ return testMACSize
+}
+
+func testMAC(data []byte) []byte {
+ var h uint32
+ for _, b := range data {
+ h = (h << 1) ^ uint32(b)
+ }
+ var hash [4]byte
+ binary.BigEndian.PutUint32(hash[:], h)
+ return hash[:]
+}
+
+func (c *testControlCipher) Decrypt(data []byte) {
+ for i, _ := range data {
+ data[i] ^= 0xaa
+ }
+}
+
+func (c *testControlCipher) Encrypt(data []byte) {
+ for i, _ := range data {
+ data[i] ^= 0xaa
+ }
+}
+
+func (c *testControlCipher) Open(data []byte) bool {
+ mac := testMAC(data[:len(data)-testMACSize])
+ if bytes.Compare(mac, data[len(data)-testMACSize:]) != 0 {
+ return false
+ }
+ c.Decrypt(data[:len(data)-testMACSize])
+ return true
+}
+
+func (c *testControlCipher) Seal(data []byte) error {
+ c.Encrypt(data[:len(data)-testMACSize])
+ mac := testMAC(data[:len(data)-testMACSize])
+ copy(data[len(data)-testMACSize:], mac)
+ return nil
+}
+
+// shortConn performs at most 3 bytes of IO at a time.
+type shortConn struct {
+ io.ReadWriteCloser
+}
+
+func (s *shortConn) Read(data []byte) (int, error) {
+ if len(data) > 3 {
+ data = data[:3]
+ }
+ return s.ReadWriteCloser.Read(data)
+}
+
+func (s *shortConn) Write(data []byte) (int, error) {
+ n := len(data)
+ for i := 0; i < n; i += 3 {
+ j := min(n, i+3)
+ m, err := s.ReadWriteCloser.Write(data[i:j])
+ if err != nil {
+ return i + m, err
+ }
+ }
+ return n, nil
+}
+
+func TestConn(t *testing.T) {
+ p1, p2 := net.Pipe()
+ pool := iobuf.NewPool(0)
+ r1 := iobuf.NewReader(pool, p1)
+ r2 := iobuf.NewReader(pool, p2)
+ f1 := newSetupConn(p1, r1, &testControlCipher{})
+ f2 := newSetupConn(p2, r2, &testControlCipher{})
+ testConn(t, f1, f2)
+}
+
+func TestShortInnerConn(t *testing.T) {
+ p1, p2 := net.Pipe()
+ s1 := &shortConn{p1}
+ s2 := &shortConn{p2}
+ pool := iobuf.NewPool(0)
+ r1 := iobuf.NewReader(pool, s1)
+ r2 := iobuf.NewReader(pool, s2)
+ f1 := newSetupConn(s1, r1, &testControlCipher{})
+ f2 := newSetupConn(s2, r2, &testControlCipher{})
+ testConn(t, f1, f2)
+}
+
+func TestShortOuterConn(t *testing.T) {
+ p1, p2 := net.Pipe()
+ pool := iobuf.NewPool(0)
+ r1 := iobuf.NewReader(pool, p1)
+ r2 := iobuf.NewReader(pool, p2)
+ e1 := newSetupConn(p1, r1, &testControlCipher{})
+ e2 := newSetupConn(p2, r2, &testControlCipher{})
+ f1 := &shortConn{e1}
+ f2 := &shortConn{e2}
+ testConn(t, f1, f2)
+}
+
+// Write prefixes of the text onto the framed pipe and verify the frame content.
+func testConn(t *testing.T, f1, f2 io.ReadWriteCloser) {
+ // Reader loop.
+ var pending sync.WaitGroup
+ pending.Add(1)
+ go func() {
+ var buf [1024]byte
+ for i := 1; i != len(text); i++ {
+ n, err := io.ReadFull(f1, buf[:i])
+ if err != nil {
+ t.Errorf("bad read: %s", err)
+ }
+ if n != i {
+ t.Errorf("bad read: got %d bytes, expected %d bytes", n, i)
+ }
+ actual := string(buf[:n])
+ expected := string(text[:n])
+ if actual != expected {
+ t.Errorf("got %q, expected %q", actual, expected)
+ }
+ }
+ pending.Done()
+ }()
+
+ // Writer.
+ for i := 1; i != len(text); i++ {
+ if n, err := f2.Write([]byte(text[:i])); err != nil || n != i {
+ t.Errorf("bad write: i=%d n=%d err=%s", i, n, err)
+ }
+ }
+ pending.Wait()
+}
diff --git a/profiles/internal/ipc/stream/vif/v23_internal_test.go b/profiles/internal/ipc/stream/vif/v23_internal_test.go
new file mode 100644
index 0000000..380f691
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package vif
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/testutil"
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/ipc/stream/vif/vcmap.go b/profiles/internal/ipc/stream/vif/vcmap.go
new file mode 100644
index 0000000..575484e
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/vcmap.go
@@ -0,0 +1,96 @@
+package vif
+
+import (
+ "sort"
+ "sync"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/lib/pcqueue"
+)
+
+// vcMap implements a thread-safe map of vc.VC objects (vcInfo) keyed by their VCI.
+type vcMap struct {
+ mu sync.Mutex
+ m map[id.VC]vcInfo
+ frozen bool
+}
+
+// vcInfo represents per-VC information maintained by a VIF.
+type vcInfo struct {
+ VC *vc.VC
+ // Queues used to dispatch work to per-VC goroutines.
+ // RQ is where vif.readLoop can dispatch work to.
+ // WQ is where vif.writeLoop can dispatch work to.
+ RQ, WQ *pcqueue.T
+}
+
+func newVCMap() *vcMap { return &vcMap{m: make(map[id.VC]vcInfo)} }
+
+func (m *vcMap) Insert(c *vc.VC) (inserted bool, rq, wq *pcqueue.T) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.frozen {
+ return false, nil, nil
+ }
+ if _, exists := m.m[c.VCI()]; exists {
+ return false, nil, nil
+ }
+ info := vcInfo{VC: c, RQ: pcqueue.New(100), WQ: pcqueue.New(100)}
+ m.m[c.VCI()] = info
+ return true, info.RQ, info.WQ
+}
+
+func (m *vcMap) Find(vci id.VC) (vc *vc.VC, rq, wq *pcqueue.T) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ info := m.m[vci]
+ return info.VC, info.RQ, info.WQ
+}
+
+func (m *vcMap) Delete(vci id.VC) {
+ m.mu.Lock()
+ if info, exists := m.m[vci]; exists {
+ info.RQ.Close()
+ info.WQ.Close()
+ delete(m.m, vci)
+ }
+ m.mu.Unlock()
+}
+
+func (m *vcMap) Size() int {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return len(m.m)
+}
+
+// Freeze causes all subsequent Inserts to fail.
+// Returns a list of all the VCs that are in the map.
+func (m *vcMap) Freeze() []vcInfo {
+ m.mu.Lock()
+ m.frozen = true
+ l := make([]vcInfo, 0, len(m.m))
+ for _, info := range m.m {
+ l = append(l, info)
+ }
+ m.mu.Unlock()
+ return l
+}
+
+type vcSlice []*vc.VC
+
+func (s vcSlice) Len() int { return len(s) }
+func (s vcSlice) Less(i, j int) bool { return s[i].VCI() < s[j].VCI() }
+func (s vcSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// List returns the list of all VCs currently in the map, sorted by VCI
+func (m *vcMap) List() []*vc.VC {
+ m.mu.Lock()
+ l := make([]*vc.VC, 0, len(m.m))
+ for _, info := range m.m {
+ l = append(l, info.VC)
+ }
+ m.mu.Unlock()
+ sort.Sort(vcSlice(l))
+ return l
+}
diff --git a/profiles/internal/ipc/stream/vif/vcmap_test.go b/profiles/internal/ipc/stream/vif/vcmap_test.go
new file mode 100644
index 0000000..8f23e33
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/vcmap_test.go
@@ -0,0 +1,59 @@
+package vif
+
+import (
+ "reflect"
+ "testing"
+
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+)
+
+func TestVCMap(t *testing.T) {
+ m := newVCMap()
+
+ vc12 := vc.InternalNew(vc.Params{VCI: 12})
+ vc34 := vc.InternalNew(vc.Params{VCI: 34})
+ vc45 := vc.InternalNew(vc.Params{VCI: 45})
+
+ if vc, _, _ := m.Find(12); vc != nil {
+ t.Errorf("Unexpected VC found: %+v", vc)
+ }
+ if ok, _, _ := m.Insert(vc34); !ok {
+ t.Errorf("Insert should have returned true on first insert")
+ }
+ if ok, _, _ := m.Insert(vc34); ok {
+ t.Errorf("Insert should have returned false on second insert")
+ }
+ if ok, _, _ := m.Insert(vc12); !ok {
+ t.Errorf("Insert should have returned true on first insert")
+ }
+ if ok, _, _ := m.Insert(vc45); !ok {
+ t.Errorf("Insert should have returned true on the first insert")
+ }
+ if g, w := m.List(), []*vc.VC{vc12, vc34, vc45}; !reflect.DeepEqual(g, w) {
+ t.Errorf("Did not get all VCs in expected order. Got %v, want %v", g, w)
+ }
+ m.Delete(vc34.VCI())
+ if g, w := m.List(), []*vc.VC{vc12, vc45}; !reflect.DeepEqual(g, w) {
+ t.Errorf("Did not get all VCs in expected order. Got %v, want %v", g, w)
+ }
+}
+
+func TestVCMapFreeze(t *testing.T) {
+ m := newVCMap()
+ vc1 := vc.InternalNew(vc.Params{VCI: 1})
+ vc2 := vc.InternalNew(vc.Params{VCI: 2})
+ if ok, _, _ := m.Insert(vc1); !ok {
+ t.Fatal("Should be able to insert the VC")
+ }
+ m.Freeze()
+ if ok, _, _ := m.Insert(vc2); ok {
+ t.Errorf("Should not be able to insert a VC after Freeze")
+ }
+ if vc, _, _ := m.Find(1); vc != vc1 {
+ t.Errorf("Got %v want %v", vc, vc1)
+ }
+ m.Delete(vc1.VCI())
+ if vc, _, _ := m.Find(1); vc != nil {
+ t.Errorf("Got %v want nil", vc)
+ }
+}
diff --git a/profiles/internal/ipc/stream/vif/vif.go b/profiles/internal/ipc/stream/vif/vif.go
new file mode 100644
index 0000000..aeb14ed
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/vif.go
@@ -0,0 +1,966 @@
+package vif
+
+// Logging guidelines:
+// vlog.VI(1) for per-net.Conn information
+// vlog.VI(2) for per-VC information
+// vlog.VI(3) for per-Flow information
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "sort"
+ "strings"
+ "sync"
+
+ "v.io/v23/context"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+ vsync "v.io/x/ref/lib/sync"
+ "v.io/x/ref/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/ipc/stream/message"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/bqueue/drrqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+ "v.io/x/ref/profiles/internal/lib/pcqueue"
+)
+
+const pkgPath = "v.io/x/ref/profiles/internal/ipc/stream/vif"
+
+var (
+ errShuttingDown = verror.Register(pkgPath+".errShuttingDown", verror.NoRetry, "{1:}{2:} underlying network connection({3}) shutting down{:_}")
+)
+
+// VIF implements a "virtual interface" over an underlying network connection
+// (net.Conn). Just like multiple network connections can be established over a
+// single physical interface, multiple Virtual Circuits (VCs) can be
+// established over a single VIF.
+type VIF struct {
+ // All reads must be performed through reader, and not directly through conn.
+ conn net.Conn
+ pool *iobuf.Pool
+ reader *iobuf.Reader
+ localEP naming.Endpoint
+
+ // control channel encryption.
+ isSetup bool
+ // ctrlCipher is normally guarded by writeMu, however see the exception in
+ // readLoop.
+ ctrlCipher crypto.ControlCipher
+ writeMu sync.Mutex
+
+ vcMap *vcMap
+ wpending, rpending vsync.WaitGroup
+
+ muListen sync.Mutex
+ acceptor *upcqueue.T // GUARDED_BY(muListen)
+ listenerOpts []stream.ListenerOpt // GUARDED_BY(muListen)
+
+ muNextVCI sync.Mutex
+ nextVCI id.VC
+
+ outgoing bqueue.T
+ expressQ bqueue.Writer
+
+ flowQ bqueue.Writer
+ flowMu sync.Mutex
+ flowCounters message.Counters
+
+ stopQ bqueue.Writer
+
+ // The IPC version range supported by this VIF. In practice this is
+ // non-nil only in testing. nil is equivalent to using the versions
+ // actually supported by this IPC implementation (which is always
+ // what you want outside of tests).
+ versions *version.Range
+
+ isClosedMu sync.Mutex
+ isClosed bool // GUARDED_BY(isClosedMu)
+
+ // All sets that this VIF is in.
+ muSets sync.Mutex
+ sets []*Set // GUARDED_BY(muSets)
+
+ // These counters track the number of messages sent and received by
+ // this VIF.
+ muMsgCounters sync.Mutex
+ msgCounters map[string]int64
+}
+
+// ConnectorAndFlow represents a Flow and the Connector that can be used to
+// create another Flow over the same underlying VC.
+type ConnectorAndFlow struct {
+ Connector stream.Connector
+ Flow stream.Flow
+}
+
+// Separate out constants that are not exported so that godoc looks nicer for
+// the exported ones.
+const (
+ // Priorities of the buffered queues used for flow control of writes.
+ expressPriority bqueue.Priority = iota
+ flowPriority
+ normalPriority
+ stopPriority
+
+ // Convenience aliases so that the package name "vc" does not
+ // conflict with the variables named "vc".
+ defaultBytesBufferedPerFlow = vc.DefaultBytesBufferedPerFlow
+ sharedFlowID = vc.SharedFlowID
+)
+
+var (
+ errAlreadySetup = errors.New("VIF is already setup")
+)
+
+// InternalNewDialedVIF creates a new virtual interface over the provided
+// network connection, under the assumption that the conn object was created
+// using net.Dial.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside veyron/profiles/internal. Code outside the
+// veyron2/profiles/internal/* packages should never call this method.
+func InternalNewDialedVIF(conn net.Conn, rid naming.RoutingID, versions *version.Range, opts ...stream.VCOpt) (*VIF, error) {
+ ctx, principal, err := clientAuthOptions(opts)
+ if err != nil {
+ return nil, err
+ }
+ if ctx != nil {
+ var span vtrace.Span
+ ctx, span = vtrace.SetNewSpan(ctx, "InternalNewDialedVIF")
+ span.Annotatef("(%v, %v)", conn.RemoteAddr().Network(), conn.RemoteAddr())
+ defer span.Finish()
+ }
+ pool := iobuf.NewPool(0)
+ reader := iobuf.NewReader(pool, conn)
+ params := security.CallParams{LocalPrincipal: principal, LocalEndpoint: localEP(conn, rid, versions)}
+
+ // TODO(ataly, ashankar, suharshs): Figure out what authorization policy to use
+ // for authenticating the server during VIF establishment. Note that we cannot
+ // use the VC.ServerAuthorizer available in 'opts' as that applies to the end
+ // server and not the remote endpoint of the VIF.
+ c, err := AuthenticateAsClient(conn, reader, versions, params, nil)
+ if err != nil {
+ return nil, err
+ }
+ return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs), versions, nil, nil, c)
+}
+
+// InternalNewAcceptedVIF creates a new virtual interface over the provided
+// network connection, under the assumption that the conn object was created
+// using an Accept call on a net.Listener object.
+//
+// The returned VIF is also setup for accepting new VCs and Flows with the provided
+// ListenerOpts.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside veyron/profiles/internal. Code outside the
+// veyron/profiles/internal/* packages should never call this method.
+func InternalNewAcceptedVIF(conn net.Conn, rid naming.RoutingID, versions *version.Range, lopts ...stream.ListenerOpt) (*VIF, error) {
+ pool := iobuf.NewPool(0)
+ reader := iobuf.NewReader(pool, conn)
+ return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs)+1, versions, upcqueue.New(), lopts, &crypto.NullControlCipher{})
+}
+
+func internalNew(conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, rid naming.RoutingID, initialVCI id.VC, versions *version.Range, acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher) (*VIF, error) {
+ var (
+ // Choose IDs that will not conflict with any other (VC, Flow)
+ // pairs. VCI 0 is never used by the application (it is
+ // reserved for control messages), so steal from the Flow space
+ // there.
+ expressID bqueue.ID = packIDs(0, 0)
+ flowID bqueue.ID = packIDs(0, 1)
+ stopID bqueue.ID = packIDs(0, 2)
+ )
+ outgoing := drrqueue.New(vc.MaxPayloadSizeBytes)
+
+ expressQ, err := outgoing.NewWriter(expressID, expressPriority, defaultBytesBufferedPerFlow)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create bqueue.Writer for express messages: %v", err)
+ }
+ expressQ.Release(-1) // Disable flow control
+
+ flowQ, err := outgoing.NewWriter(flowID, flowPriority, flowToken.Size())
+ if err != nil {
+ return nil, fmt.Errorf("failed to create bqueue.Writer for flow control counters: %v", err)
+ }
+ flowQ.Release(-1) // Disable flow control
+
+ stopQ, err := outgoing.NewWriter(stopID, stopPriority, 1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create bqueue.Writer for stopping the write loop: %v", err)
+ }
+ stopQ.Release(-1) // Disable flow control
+
+ vif := &VIF{
+ conn: conn,
+ pool: pool,
+ reader: reader,
+ ctrlCipher: c,
+ vcMap: newVCMap(),
+ acceptor: acceptor,
+ listenerOpts: listenerOpts,
+ localEP: localEP(conn, rid, versions),
+ nextVCI: initialVCI,
+ outgoing: outgoing,
+ expressQ: expressQ,
+ flowQ: flowQ,
+ flowCounters: message.NewCounters(),
+ stopQ: stopQ,
+ versions: versions,
+ msgCounters: make(map[string]int64),
+ }
+ go vif.readLoop()
+ go vif.writeLoop()
+ return vif, nil
+}
+
+// Dial creates a new VC to the provided remote identity, authenticating the VC
+// with the provided local identity.
+func (vif *VIF) Dial(remoteEP naming.Endpoint, opts ...stream.VCOpt) (stream.VC, error) {
+ vc, err := vif.newVC(vif.allocVCI(), vif.localEP, remoteEP, true)
+ if err != nil {
+ return nil, err
+ }
+ counters := message.NewCounters()
+ counters.Add(vc.VCI(), sharedFlowID, defaultBytesBufferedPerFlow)
+ // TODO(ashankar,mattr): If remoteEP/localEP version ranges allow, then
+ // use message.SetupVC instead of message.OpenVC.
+ // Rough outline:
+ // (1) Switch to NaclBox for VC encryption (thus the VC handshake will
+ // no longer require the TLS flow and roundtrips for that).
+ // (2) Send an appropriate SetupVC message in response to a received
+ // SetupVC message.
+ // (3) Use the SetupVC received from the remote end to establish the
+ // exact protocol version to use.
+ err = vif.sendOnExpressQ(&message.OpenVC{
+ VCI: vc.VCI(),
+ DstEndpoint: remoteEP,
+ SrcEndpoint: vif.localEP,
+ Counters: counters})
+ if err != nil {
+ err = fmt.Errorf("vif.sendOnExpressQ(OpenVC) failed: %v", err)
+ vc.Close(err.Error())
+ return nil, err
+ }
+ if err := vc.HandshakeDialedVC(opts...); err != nil {
+ vif.vcMap.Delete(vc.VCI())
+ err = fmt.Errorf("VC handshake failed: %v", err)
+ vc.Close(err.Error())
+ return nil, err
+ }
+ return vc, nil
+}
+
+// addSet adds a set to the list of sets this VIF is in. This method is called
+// by Set.Insert().
+func (vif *VIF) addSet(s *Set) {
+ vif.muSets.Lock()
+ defer vif.muSets.Unlock()
+ vif.sets = append(vif.sets, s)
+}
+
+// removeSet removes a set from the list of sets this VIF is in. This method is
+// called by Set.Delete().
+func (vif *VIF) removeSet(s *Set) {
+ vif.muSets.Lock()
+ defer vif.muSets.Unlock()
+ for ix, vs := range vif.sets {
+ if vs == s {
+ vif.sets = append(vif.sets[:ix], vif.sets[ix+1:]...)
+ return
+ }
+ }
+
+}
+
+// Close closes all VCs (and thereby Flows) over the VIF and then closes the
+// underlying network connection after draining all pending writes on those
+// VCs.
+func (vif *VIF) Close() {
+ vif.isClosedMu.Lock()
+ if vif.isClosed {
+ vif.isClosedMu.Unlock()
+ return
+ }
+ vif.isClosed = true
+ vif.isClosedMu.Unlock()
+
+ vif.muSets.Lock()
+ sets := vif.sets
+ vif.sets = nil
+ vif.muSets.Unlock()
+ for _, s := range sets {
+ s.Delete(vif)
+ }
+
+ vlog.VI(1).Infof("Closing VIF %s", vif)
+ // Stop accepting new VCs.
+ vif.StopAccepting()
+ // Close local datastructures for all existing VCs.
+ vcs := vif.vcMap.Freeze()
+ for _, vc := range vcs {
+ vc.VC.Close("VIF is being closed")
+ }
+ // Wait for the vcWriteLoops to exit (after draining queued up messages).
+ vif.stopQ.Close()
+ vif.wpending.Wait()
+ // Close the underlying network connection.
+ // No need to send individual messages to close all pending VCs since
+ // the remote end should know to close all VCs when the VIF's
+ // connection breaks.
+ if err := vif.conn.Close(); err != nil {
+ vlog.VI(1).Infof("net.Conn.Close failed on VIF %s: %v", vif, err)
+ }
+}
+
+// StartAccepting begins accepting Flows (and VCs) initiated by the remote end
+// of a VIF. opts is used to setup the listener on newly established VCs.
+func (vif *VIF) StartAccepting(opts ...stream.ListenerOpt) error {
+ vif.muListen.Lock()
+ defer vif.muListen.Unlock()
+ if vif.acceptor != nil {
+ return fmt.Errorf("already accepting Flows on VIF %v", vif)
+ }
+ vif.acceptor = upcqueue.New()
+ vif.listenerOpts = opts
+ return nil
+}
+
+// StopAccepting prevents any Flows initiated by the remote end of a VIF from
+// being accepted and causes any existing and future calls to Accept to fail
+// immediately.
+func (vif *VIF) StopAccepting() {
+ vif.muListen.Lock()
+ defer vif.muListen.Unlock()
+ if vif.acceptor != nil {
+ vif.acceptor.Shutdown()
+ vif.acceptor = nil
+ vif.listenerOpts = nil
+ }
+}
+
+// Accept returns the (stream.Connector, stream.Flow) pair of a newly
+// established VC and/or Flow.
+//
+// Sample usage:
+// for {
+// cAndf, err := vif.Accept()
+// switch {
+// case err != nil:
+// fmt.Println("Accept error:", err)
+// return
+// case cAndf.Flow == nil:
+// fmt.Println("New VC established:", cAndf.Connector)
+// default:
+// fmt.Println("New flow established")
+// go handleFlow(cAndf.Flow)
+// }
+// }
+func (vif *VIF) Accept() (ConnectorAndFlow, error) {
+ vif.muListen.Lock()
+ acceptor := vif.acceptor
+ vif.muListen.Unlock()
+ if acceptor == nil {
+ return ConnectorAndFlow{}, fmt.Errorf("VCs not accepted on VIF %v", vif)
+ }
+ item, err := acceptor.Get(nil)
+ if err != nil {
+ return ConnectorAndFlow{}, fmt.Errorf("Accept failed: %v", err)
+ }
+ return item.(ConnectorAndFlow), nil
+}
+
+func (vif *VIF) String() string {
+ l := vif.conn.LocalAddr()
+ r := vif.conn.RemoteAddr()
+ return fmt.Sprintf("(%s, %s) <-> (%s, %s)", r.Network(), r, l.Network(), l)
+}
+
+func (vif *VIF) readLoop() {
+ defer vif.Close()
+ defer vif.stopVCDispatchLoops()
+ for {
+ // vif.ctrlCipher is guarded by vif.writeMu. However, the only mutation
+ // to it is in handleMessage, which runs in the same goroutine, so a
+ // lock is not required here.
+ msg, err := message.ReadFrom(vif.reader, vif.ctrlCipher)
+ if err != nil {
+ vlog.VI(1).Infof("Exiting readLoop of VIF %s because of read error: %v", vif, err)
+ return
+ }
+ vlog.VI(3).Infof("Received %T = [%v] on VIF %s", msg, msg, vif)
+ if err := vif.handleMessage(msg); err != nil {
+ vlog.VI(1).Infof("Exiting readLoop of VIF %s because of message error: %v", vif, err)
+ return
+ }
+ }
+}
+
+// handleMessage handles a single incoming message. Any error returned is
+// fatal, causing the VIF to close.
+func (vif *VIF) handleMessage(msg message.T) error {
+ vif.muMsgCounters.Lock()
+ vif.msgCounters[fmt.Sprintf("Recv(%T)", msg)]++
+ vif.muMsgCounters.Unlock()
+
+ switch m := msg.(type) {
+ case *message.Data:
+ _, rq, _ := vif.vcMap.Find(m.VCI)
+ if rq == nil {
+ vlog.VI(2).Infof("Ignoring message of %d bytes for unrecognized VCI %d on VIF %s", m.Payload.Size(), m.VCI, vif)
+ m.Release()
+ return nil
+ }
+ if err := rq.Put(m, nil); err != nil {
+ vlog.VI(2).Infof("Failed to put message(%v) on VC queue on VIF %v: %v", m, vif, err)
+ m.Release()
+ }
+ case *message.OpenVC:
+ vif.muListen.Lock()
+ closed := vif.acceptor == nil || vif.acceptor.IsClosed()
+ lopts := vif.listenerOpts
+ vif.muListen.Unlock()
+ if closed {
+ vlog.VI(2).Infof("Ignoring OpenVC message %+v as VIF %s does not accept VCs", m, vif)
+ vif.sendOnExpressQ(&message.CloseVC{
+ VCI: m.VCI,
+ Error: "VCs not accepted",
+ })
+ return nil
+ }
+ vc, err := vif.newVC(m.VCI, m.DstEndpoint, m.SrcEndpoint, false)
+ vif.distributeCounters(m.Counters)
+ if err != nil {
+ vif.sendOnExpressQ(&message.CloseVC{
+ VCI: m.VCI,
+ Error: err.Error(),
+ })
+ return nil
+ }
+ go vif.acceptFlowsLoop(vc, vc.HandshakeAcceptedVC(lopts...))
+ case *message.SetupVC:
+ // TODO(ashankar,mattr): Handle this! See comment about SetupVC
+ // in vif.Dial
+ vif.distributeCounters(m.Counters)
+ vif.sendOnExpressQ(&message.CloseVC{
+ VCI: m.VCI,
+ Error: "SetupVC handling not implemented yet",
+ })
+ vlog.VI(2).Infof("Received SetupVC message, but handling not yet implemented")
+ case *message.CloseVC:
+ if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+ vif.vcMap.Delete(vc.VCI())
+ vlog.VI(2).Infof("CloseVC(%+v) on VIF %s", m, vif)
+ // TODO(cnicolaou): it would be nice to have a method on VC
+ // to indicate a 'remote close' rather than a 'local one'. This helps
+ // with error reporting since we expect reads/writes to occur
+ // after a remote close, but not after a local close.
+ vc.Close(fmt.Sprintf("remote end closed VC(%v)", m.Error))
+ return nil
+ }
+ vlog.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
+ case *message.AddReceiveBuffers:
+ vif.distributeCounters(m.Counters)
+ case *message.OpenFlow:
+ if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+ if err := vc.AcceptFlow(m.Flow); err != nil {
+ vlog.VI(3).Infof("OpenFlow %+v on VIF %v failed:%v", m, vif, err)
+ cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
+ cm.SetClose()
+ vif.sendOnExpressQ(cm)
+ return nil
+ }
+ vc.ReleaseCounters(m.Flow, m.InitialCounters)
+ return nil
+ }
+ vlog.VI(2).Infof("Ignoring OpenFlow(%+v) for unrecognized VCI on VIF %s", m, m, vif)
+ case *message.HopSetup:
+ // Configure the VIF. This takes over the conn during negotiation.
+ if vif.isSetup {
+ return errAlreadySetup
+ }
+ vif.muListen.Lock()
+ principal, lBlessings, dischargeClient, err := serverAuthOptions(vif.listenerOpts)
+ vif.muListen.Unlock()
+ if err != nil {
+ return errVersionNegotiationFailed
+ }
+ vif.writeMu.Lock()
+ c, err := AuthenticateAsServer(vif.conn, vif.reader, vif.versions, principal, lBlessings, dischargeClient, m)
+ if err != nil {
+ vif.writeMu.Unlock()
+ return err
+ }
+ vif.ctrlCipher = c
+ vif.writeMu.Unlock()
+ vif.isSetup = true
+ default:
+ vlog.Infof("Ignoring unrecognized message %T on VIF %s", m, vif)
+ }
+ return nil
+}
+
+func (vif *VIF) vcDispatchLoop(vc *vc.VC, messages *pcqueue.T) {
+ defer vlog.VI(2).Infof("Exiting vcDispatchLoop(%v) on VIF %v", vc, vif)
+ defer vif.rpending.Done()
+ for {
+ qm, err := messages.Get(nil)
+ if err != nil {
+ return
+ }
+ m := qm.(*message.Data)
+ if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
+ vlog.VI(2).Infof("Ignoring data message %v for on VIF %v: %v", m, vif, err)
+ }
+ if m.Close() {
+ vif.shutdownFlow(vc, m.Flow)
+ }
+ }
+}
+
+func (vif *VIF) stopVCDispatchLoops() {
+ vcs := vif.vcMap.Freeze()
+ for _, v := range vcs {
+ v.RQ.Close()
+ }
+ vif.rpending.Wait()
+}
+
+func (vif *VIF) acceptFlowsLoop(vc *vc.VC, c <-chan vc.HandshakeResult) {
+ hr := <-c
+ if hr.Error != nil {
+ vif.closeVCAndSendMsg(vc, hr.Error.Error())
+ return
+ }
+
+ vif.muListen.Lock()
+ acceptor := vif.acceptor
+ vif.muListen.Unlock()
+ if acceptor == nil {
+ vif.closeVCAndSendMsg(vc, "Flows no longer being accepted")
+ return
+ }
+
+ // Notify any listeners that a new VC has been established
+ if err := acceptor.Put(ConnectorAndFlow{vc, nil}); err != nil {
+ vif.closeVCAndSendMsg(vc, fmt.Sprintf("VC accept failed: %v", err))
+ return
+ }
+
+ vlog.VI(2).Infof("Running acceptFlowsLoop for VC %v on VIF %v", vc, vif)
+ for {
+ f, err := hr.Listener.Accept()
+ if err != nil {
+ vlog.VI(2).Infof("Accept failed on VC %v on VIF %v: %v", vc, vif, err)
+ return
+ }
+ if err := acceptor.Put(ConnectorAndFlow{vc, f}); err != nil {
+ vlog.VI(2).Infof("vif.acceptor.Put(%v, %T) on VIF %v failed: %v", vc, f, vif, err)
+ f.Close()
+ return
+ }
+ }
+}
+
+func (vif *VIF) distributeCounters(counters message.Counters) {
+ for cid, bytes := range counters {
+ vc, _, _ := vif.vcMap.Find(cid.VCI())
+ if vc == nil {
+ vlog.VI(2).Infof("Ignoring counters for non-existent VCI %d on VIF %s", cid.VCI(), vif)
+ continue
+ }
+ vc.ReleaseCounters(cid.Flow(), bytes)
+ }
+}
+
+func (vif *VIF) writeLoop() {
+ defer vif.outgoing.Close()
+ defer vif.stopVCWriteLoops()
+ for {
+ writer, bufs, err := vif.outgoing.Get(nil)
+ if err != nil {
+ vlog.VI(1).Infof("Exiting writeLoop of VIF %s because of bqueue.Get error: %v", vif, err)
+ return
+ }
+ vif.muMsgCounters.Lock()
+ vif.msgCounters[fmt.Sprintf("Send(%T)", writer)]++
+ vif.muMsgCounters.Unlock()
+ switch writer {
+ case vif.expressQ:
+ for _, b := range bufs {
+ if err := vif.writeSerializedMessage(b.Contents); err != nil {
+ vlog.VI(1).Infof("Exiting writeLoop of VIF %s because Control message write failed: %s", vif, err)
+ releaseBufs(bufs)
+ return
+ }
+ b.Release()
+ }
+ case vif.flowQ:
+ msg := &message.AddReceiveBuffers{}
+ // No need to call releaseBufs(bufs) as all bufs are
+ // the exact same value: flowToken.
+ vif.flowMu.Lock()
+ if len(vif.flowCounters) > 0 {
+ msg.Counters = vif.flowCounters
+ vif.flowCounters = message.NewCounters()
+ }
+ vif.flowMu.Unlock()
+ if len(msg.Counters) > 0 {
+ vlog.VI(3).Infof("Sending counters %v on VIF %s", msg.Counters, vif)
+ if err := vif.writeMessage(msg); err != nil {
+ vlog.VI(1).Infof("Exiting writeLoop of VIF %s because AddReceiveBuffers message write failed: %v", vif, err)
+ return
+ }
+ }
+ case vif.stopQ:
+ // Lowest-priority queue which will never have any
+ // buffers, Close is the only method called on it.
+ return
+ default:
+ vif.writeDataMessages(writer, bufs)
+ }
+ }
+}
+
+func (vif *VIF) vcWriteLoop(vc *vc.VC, messages *pcqueue.T) {
+ defer vlog.VI(2).Infof("Exiting vcWriteLoop(%v) on VIF %v", vc, vif)
+ defer vif.wpending.Done()
+ for {
+ qm, err := messages.Get(nil)
+ if err != nil {
+ return
+ }
+ m := qm.(*message.Data)
+ m.Payload, err = vc.Encrypt(m.Flow, m.Payload)
+ if err != nil {
+ vlog.Infof("Encryption failed. Flow:%v VC:%v Error:%v", m.Flow, vc, err)
+ }
+ if m.Close() {
+ // The last bytes written on the flow will be sent out
+ // on vif.conn. Local datastructures for the flow can
+ // be cleaned up now.
+ vif.shutdownFlow(vc, m.Flow)
+ }
+ if err == nil {
+ err = vif.writeMessage(m)
+ }
+ if err != nil {
+ // TODO(caprita): Calling closeVCAndSendMsg below causes
+ // a race as described in:
+ // https://docs.google.com/a/google.com/document/d/1C0kxfYhuOcStdV7tnLZELZpUhfQCZj47B0JrzbE29h8/edit
+ //
+ // There should be a finer grained way to fix this, and
+ // there are likely other instances where we should not
+ // be closing the VC.
+ //
+ // For now, commenting out the line below removes the
+ // flakiness from our existing unit tests, but this
+ // needs to be revisited and fixed correctly.
+ //
+ // vif.closeVCAndSendMsg(vc, fmt.Sprintf("write failure: %v", err))
+
+ // Drain the queue and exit.
+ for {
+ qm, err := messages.Get(nil)
+ if err != nil {
+ return
+ }
+ qm.(*message.Data).Release()
+ }
+ }
+ }
+}
+
+func (vif *VIF) stopVCWriteLoops() {
+ vcs := vif.vcMap.Freeze()
+ for _, v := range vcs {
+ v.WQ.Close()
+ }
+}
+
+// sendOnExpressQ adds 'msg' to the expressQ (highest priority queue) of messages to write on the wire.
+func (vif *VIF) sendOnExpressQ(msg message.T) error {
+ vlog.VI(2).Infof("sendOnExpressQ(%T = %+v) on VIF %s", msg, msg, vif)
+ var buf bytes.Buffer
+ // Don't encrypt yet, because the message ordering isn't yet determined.
+ // Encryption is performed by vif.writeSerializedMessage() when the
+ // message is actually written to vif.conn.
+ vif.writeMu.Lock()
+ c := vif.ctrlCipher
+ vif.writeMu.Unlock()
+ if err := message.WriteTo(&buf, msg, crypto.NewDisabledControlCipher(c)); err != nil {
+ return err
+ }
+ return vif.expressQ.Put(iobuf.NewSlice(buf.Bytes()), nil)
+}
+
+// writeMessage writes the message to the channel. Writes must be serialized so
+// that the control channel can be encrypted, so we acquire the writeMu.
+func (vif *VIF) writeMessage(msg message.T) error {
+ vif.writeMu.Lock()
+ defer vif.writeMu.Unlock()
+ return message.WriteTo(vif.conn, msg, vif.ctrlCipher)
+}
+
+// Write writes the message to the channel, encrypting the control data. Writes
+// must be serialized so that the control channel can be encrypted, so we
+// acquire the writeMu.
+func (vif *VIF) writeSerializedMessage(msg []byte) error {
+ vif.writeMu.Lock()
+ defer vif.writeMu.Unlock()
+ if err := message.EncryptMessage(msg, vif.ctrlCipher); err != nil {
+ return err
+ }
+ if n, err := vif.conn.Write(msg); err != nil {
+ return fmt.Errorf("write failed: got (%d, %v) for %d byte message", n, err, len(msg))
+ }
+ return nil
+}
+
+func (vif *VIF) writeDataMessages(writer bqueue.Writer, bufs []*iobuf.Slice) {
+ vci, fid := unpackIDs(writer.ID())
+ // iobuf.Coalesce will coalesce buffers only if they are adjacent to
+ // each other. In the worst case, each buf will be non-adjacent to the
+ // others and the code below will end up with multiple small writes
+ // instead of a single big one.
+ // Might want to investigate this and see if this needs to be
+ // revisited.
+ bufs = iobuf.Coalesce(bufs, uint(vc.MaxPayloadSizeBytes))
+ _, _, wq := vif.vcMap.Find(vci)
+ if wq == nil {
+ // VC has been removed, stop sending messages
+ vlog.VI(2).Infof("VCI %d on VIF %s was shutdown, dropping %d messages that were pending a write", vci, vif, len(bufs))
+ releaseBufs(bufs)
+ return
+ }
+ last := len(bufs) - 1
+ drained := writer.IsDrained()
+ for i, b := range bufs {
+ d := &message.Data{VCI: vci, Flow: fid, Payload: b}
+ if drained && i == last {
+ d.SetClose()
+ }
+ if err := wq.Put(d, nil); err != nil {
+ releaseBufs(bufs[i:])
+ return
+ }
+ }
+ if len(bufs) == 0 && drained {
+ d := &message.Data{VCI: vci, Flow: fid}
+ d.SetClose()
+ if err := wq.Put(d, nil); err != nil {
+ d.Release()
+ }
+ }
+}
+
+func (vif *VIF) allocVCI() id.VC {
+ vif.muNextVCI.Lock()
+ ret := vif.nextVCI
+ vif.nextVCI += 2
+ vif.muNextVCI.Unlock()
+ return ret
+}
+
+func (vif *VIF) newVC(vci id.VC, localEP, remoteEP naming.Endpoint, dialed bool) (*vc.VC, error) {
+ version, err := version.CommonVersion(localEP, remoteEP)
+ if vif.versions != nil {
+ version, err = vif.versions.CommonVersion(localEP, remoteEP)
+ }
+ if err != nil {
+ return nil, err
+ }
+ vc := vc.InternalNew(vc.Params{
+ VCI: vci,
+ Dialed: dialed,
+ LocalEP: localEP,
+ RemoteEP: remoteEP,
+ Pool: vif.pool,
+ ReserveBytes: uint(message.HeaderSizeBytes + vif.ctrlCipher.MACSize()),
+ Helper: vcHelper{vif},
+ Version: version,
+ })
+ added, rq, wq := vif.vcMap.Insert(vc)
+ // Start vcWriteLoop
+ if added = added && vif.wpending.TryAdd(); added {
+ go vif.vcWriteLoop(vc, wq)
+ }
+ // Start vcDispatchLoop
+ if added = added && vif.rpending.TryAdd(); added {
+ go vif.vcDispatchLoop(vc, rq)
+ }
+ if !added {
+ if rq != nil {
+ rq.Close()
+ }
+ if wq != nil {
+ wq.Close()
+ }
+ vif.vcMap.Delete(vci)
+ vc.Close("underlying network connection shutting down")
+ // We embed an error inside verror.ErrAborted because other layers
+ // check for the "Aborted" error as a special case. Perhaps
+ // eventually we'll get rid of the Aborted layer.
+ return nil, verror.New(verror.ErrAborted, nil, verror.New(errShuttingDown, nil, vif))
+ }
+ return vc, nil
+}
+
+func (vif *VIF) closeVCAndSendMsg(vc *vc.VC, msg string) {
+ vlog.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, msg)
+ vif.vcMap.Delete(vc.VCI())
+ vc.Close(msg)
+ if err := vif.sendOnExpressQ(&message.CloseVC{
+ VCI: vc.VCI(),
+ Error: msg,
+ }); err != nil {
+ vlog.VI(2).Infof("sendOnExpressQ(CloseVC{VCI:%d,...}) on VIF %v failed: %v", vc.VCI(), vif, err)
+ }
+}
+
+// shutdownFlow clears out all the datastructures associated with fid.
+func (vif *VIF) shutdownFlow(vc *vc.VC, fid id.Flow) {
+ vc.ShutdownFlow(fid)
+ vif.flowMu.Lock()
+ delete(vif.flowCounters, message.MakeCounterID(vc.VCI(), fid))
+ vif.flowMu.Unlock()
+}
+
+// ShutdownVCs closes all VCs established to the provided remote endpoint.
+// Returns the number of VCs that were closed.
+func (vif *VIF) ShutdownVCs(remote naming.Endpoint) int {
+ vcs := vif.vcMap.List()
+ n := 0
+ for _, vc := range vcs {
+ if naming.Compare(vc.RemoteAddr().RoutingID(), remote.RoutingID()) {
+ vlog.VI(1).Infof("VCI %d on VIF %s being closed because of ShutdownVCs call", vc.VCI(), vif)
+ vif.closeVCAndSendMsg(vc, "")
+ n++
+ }
+ }
+ return n
+}
+
+// NumVCs returns the number of VCs established over this VIF.
+func (vif *VIF) NumVCs() int { return vif.vcMap.Size() }
+
+// DebugString returns a descriptive state of the VIF.
+//
+// The returned string is meant for consumptions by humans. The specific format
+// should not be relied upon by any automated processing.
+func (vif *VIF) DebugString() string {
+ vcs := vif.vcMap.List()
+ l := make([]string, 0, len(vcs)+1)
+
+ vif.muNextVCI.Lock() // Needed for vif.nextVCI
+ l = append(l, fmt.Sprintf("VIF:[%s] -- #VCs:%d NextVCI:%d ControlChannelEncryption:%v IsClosed:%v", vif, len(vcs), vif.nextVCI, vif.isSetup, vif.isClosed))
+ vif.muNextVCI.Unlock()
+
+ for _, vc := range vcs {
+ l = append(l, vc.DebugString())
+ }
+
+ l = append(l, "Message Counters:")
+ ctrs := len(l)
+ vif.muMsgCounters.Lock()
+ for k, v := range vif.msgCounters {
+ l = append(l, fmt.Sprintf(" %-32s %10d", k, v))
+ }
+ vif.muMsgCounters.Unlock()
+ sort.Strings(l[ctrs:])
+ return strings.Join(l, "\n")
+}
+
+// Methods and type that implement vc.Helper
+//
+// We create a separate type for vc.Helper to hide the vc.Helper methods
+// from the exported method set of VIF.
+type vcHelper struct{ vif *VIF }
+
+func (h vcHelper) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+ h.vif.sendOnExpressQ(&message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)})
+}
+
+func (h vcHelper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+ if bytes == 0 {
+ return
+ }
+ h.vif.flowMu.Lock()
+ h.vif.flowCounters.Add(vci, fid, uint32(bytes))
+ h.vif.flowMu.Unlock()
+ h.vif.flowQ.TryPut(flowToken)
+}
+
+func (h vcHelper) NewWriter(vci id.VC, fid id.Flow) (bqueue.Writer, error) {
+ return h.vif.outgoing.NewWriter(packIDs(vci, fid), normalPriority, defaultBytesBufferedPerFlow)
+}
+
+// The token added to vif.flowQ.
+var flowToken *iobuf.Slice
+
+func init() {
+ // flowToken must be non-empty otherwise bqueue.Writer.Put will ignore it.
+ flowToken = iobuf.NewSlice(make([]byte, 1))
+}
+
+func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
+ return bqueue.ID(message.MakeCounterID(vci, fid))
+}
+
+func unpackIDs(b bqueue.ID) (id.VC, id.Flow) {
+ cid := message.CounterID(b)
+ return cid.VCI(), cid.Flow()
+}
+
+func releaseBufs(bufs []*iobuf.Slice) {
+ for _, b := range bufs {
+ b.Release()
+ }
+}
+
+func localEP(conn net.Conn, rid naming.RoutingID, versions *version.Range) naming.Endpoint {
+ localAddr := conn.LocalAddr()
+ ep := version.Endpoint(localAddr.Network(), localAddr.String(), rid)
+ if versions != nil {
+ ep = versions.Endpoint(localAddr.Network(), localAddr.String(), rid)
+ }
+ return ep
+}
+
+// clientAuthOptions extracts the client authentication options from the options
+// list.
+func clientAuthOptions(lopts []stream.VCOpt) (ctx *context.T, principal security.Principal, err error) {
+ var securityLevel options.VCSecurityLevel
+ for _, o := range lopts {
+ switch v := o.(type) {
+ case vc.DialContext:
+ ctx = v.T
+ case vc.LocalPrincipal:
+ principal = v.Principal
+ case options.VCSecurityLevel:
+ securityLevel = v
+ }
+ }
+ switch securityLevel {
+ case options.VCSecurityConfidential:
+ if principal == nil {
+ principal = vc.AnonymousPrincipal
+ }
+ case options.VCSecurityNone:
+ principal = nil
+ default:
+ err = fmt.Errorf("unrecognized VC security level: %v", securityLevel)
+ }
+ return
+}
diff --git a/profiles/internal/ipc/stream/vif/vif_test.go b/profiles/internal/ipc/stream/vif/vif_test.go
new file mode 100644
index 0000000..132aa11
--- /dev/null
+++ b/profiles/internal/ipc/stream/vif/vif_test.go
@@ -0,0 +1,626 @@
+// Tests in a separate package to ensure that only the exported API is used in the tests.
+//
+// All tests are run with the default security level on VCs (VCSecurityConfidential).
+
+package vif_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "runtime"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/ipc/stream/vif"
+ iversion "v.io/x/ref/profiles/internal/ipc/version"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+)
+
+//go:generate v23 test generate
+
+func newPrincipal(defaultBlessing string) vc.LocalPrincipal {
+ return vc.LocalPrincipal{tsecurity.NewPrincipal("defaultBlessing")}
+}
+
+func TestSingleFlowCreatedAtClient(t *testing.T) {
+ client, server := NewClientServer()
+ defer client.Close()
+
+ clientVC, _, err := createVC(client, server, makeEP(0x5))
+ if err != nil {
+ t.Fatal(err)
+ }
+ writer, err := clientVC.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Test with an empty message to ensure that we correctly
+ // handle closing empty flows.
+ rwSingleFlow(t, writer, acceptFlowAtServer(server), "")
+ writer, err = clientVC.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rwSingleFlow(t, writer, acceptFlowAtServer(server), "the dark knight")
+}
+
+func TestSingleFlowCreatedAtServer(t *testing.T) {
+ client, server := NewClientServer()
+ defer client.Close()
+
+ clientVC, serverConnector, err := createVC(client, server, makeEP(0x5))
+ if err != nil {
+ t.Fatal(err)
+ }
+ ln, err := clientVC.Listen()
+ if err != nil {
+ t.Fatal(err)
+ }
+ writer, err := serverConnector.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ reader, err := ln.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rwSingleFlow(t, writer, reader, "the dark knight")
+ ln.Close()
+}
+
+func testMultipleVCsAndMultipleFlows(t *testing.T, gomaxprocs int) {
+ // This test dials multiple VCs from the client to the server.
+ // On each VC, it creates multiple flows, writes to them and verifies
+ // that the other process received what was written.
+
+ // Knobs configuring this test
+ //
+ // In case the test breaks, the knobs can be tuned down to isolate the problem.
+ // In normal circumstances, the knows should be tuned up to stress test the code.
+ const (
+ nVCs = 6 // Number of VCs created by the client process Dialing.
+ nFlowsFromClientPerVC = 3 // Number of flows initiated by the client process, per VC
+ nFlowsFromServerPerVC = 4 // Number of flows initiated by the server process, per VC
+
+ // Maximum number of bytes to write and read per flow.
+ // The actual size is selected randomly.
+ maxBytesPerFlow = 512 << 10 // 512KB
+ )
+
+ mp := runtime.GOMAXPROCS(gomaxprocs)
+ defer runtime.GOMAXPROCS(mp)
+ client, server := NewClientServer()
+ defer client.Close()
+
+ // Create all the VCs
+ // clientVCs[i] is the VC at the client process
+ // serverConnectors[i] is the corresponding VC at the server process.
+ clientVCs, serverConnectors, err := createNVCs(client, server, 0, nVCs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create listeners for flows on the client VCs.
+ // Flows are implicitly being listened to at the server (available through server.Accept())
+ clientLNs, err := createListeners(clientVCs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create flows:
+ // Over each VC, create nFlowsFromClientPerVC initiated by the client
+ // and nFlowsFromServerPerVC initiated by the server.
+ nFlows := nVCs * (nFlowsFromClientPerVC + nFlowsFromServerPerVC)
+
+ // Fill in random strings that will be written over the Flows.
+ dataWritten := make([]string, nFlows)
+ for i := 0; i < nFlows; i++ {
+ dataWritten[i] = string(testutil.RandomBytes(maxBytesPerFlow))
+ }
+
+ // write writes data to flow in randomly sized chunks.
+ write := func(flow stream.Flow, data string) {
+ defer flow.Close()
+ buf := []byte(data)
+ // Split into a random number of Write calls.
+ for len(buf) > 0 {
+ size := 1 + testutil.Rand.Intn(len(buf)) // Random number in [1, len(buf)]
+ n, err := flow.Write(buf[:size])
+ if err != nil {
+ t.Errorf("Write failed: (%d, %v)", n, err)
+ return
+ }
+ buf = buf[size:]
+ }
+ }
+
+ dataReadChan := make(chan string, nFlows)
+ // read reads from a flow and writes out the data to dataReadChan
+ read := func(flow stream.Flow) {
+ var buf bytes.Buffer
+ var tmp [1024]byte
+ for {
+ n, err := flow.Read(tmp[:testutil.Rand.Intn(len(tmp))])
+ buf.Write(tmp[:n])
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read error: %v", err)
+ break
+ }
+ }
+ dataReadChan <- buf.String()
+ }
+
+ index := 0
+ for i := 0; i < len(clientVCs); i++ {
+ for j := 0; j < nFlowsFromClientPerVC; j++ {
+ // Flow initiated by client, read by server
+ writer, err := clientVCs[i].Connect()
+ if err != nil {
+ t.Errorf("clientVCs[%d], flow %d: %v", i, j, err)
+ continue
+ }
+ go write(writer, dataWritten[index])
+ go read(acceptFlowAtServer(server))
+ index++
+ }
+ }
+ for i := 0; i < len(serverConnectors); i++ {
+ for j := 0; j < nFlowsFromServerPerVC; j++ {
+ // Flow initiated by server, read by client
+ writer, err := serverConnectors[i].Connect()
+ if err != nil {
+ t.Errorf("serverConnectors[%d], flow %d: %v", i, j, err)
+ continue
+ }
+ go write(writer, dataWritten[index])
+ go read(acceptFlowAtClient(clientLNs[i]))
+ index++
+ }
+ }
+ if index != nFlows {
+ t.Errorf("Created %d flows, wanted %d", index, nFlows)
+ }
+
+ // Collect all data read and compare against the data written.
+ // Since flows might be accepted in arbitrary order, sort the data before comparing.
+ dataRead := make([]string, index)
+ for i := 0; i < index; i++ {
+ dataRead[i] = <-dataReadChan
+ }
+ sort.Strings(dataWritten)
+ sort.Strings(dataRead)
+ if !reflect.DeepEqual(dataRead, dataWritten) {
+ // Since the strings can be very large, only print out the first few diffs.
+ nDiffs := 0
+ for i := 0; i < len(dataRead); i++ {
+ if dataRead[i] != dataWritten[i] {
+ nDiffs++
+ t.Errorf("Diff %d out of %d items: Got %q want %q", nDiffs, i, atmostNbytes(dataRead[i], 20), atmostNbytes(dataWritten[i], 20))
+ }
+ }
+ if nDiffs > 0 {
+ t.Errorf("#Mismatches:%d #ReadSamples:%d #WriteSamples:", nDiffs, len(dataRead), len(dataWritten))
+ }
+ }
+}
+
+func TestMultipleVCsAndMultipleFlows_1(t *testing.T) {
+ // Test with a single goroutine since that is typically easier to debug
+ // in case of problems.
+ testMultipleVCsAndMultipleFlows(t, 1)
+}
+
+func TestMultipleVCsAndMultipleFlows_5(t *testing.T) {
+ // Test with multiple goroutines, particularly useful for checking
+ // races with
+ // go test -race
+ testMultipleVCsAndMultipleFlows(t, 5)
+}
+
+func TestClose(t *testing.T) {
+ client, server := NewClientServer()
+ vc, _, err := createVC(client, server, makeEP(0x5))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ clientFlow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ serverFlow := acceptFlowAtServer(server)
+
+ var message = []byte("bugs bunny")
+ go func() {
+ if n, err := clientFlow.Write(message); n != len(message) || err != nil {
+ t.Fatal("Wrote (%d, %v), want (%d, nil)", n, err, len(message))
+ }
+ client.Close()
+ }()
+
+ buf := make([]byte, 1024)
+ // client.Close should drain all pending writes first.
+ if n, err := serverFlow.Read(buf); n != len(message) || err != nil {
+ t.Fatalf("Got (%d, %v) = %q, want (%d, nil) = %q", n, err, buf[:n], len(message), message)
+ }
+ // subsequent reads should fail, since the VIF should be closed.
+ if n, err := serverFlow.Read(buf); n != 0 || err == nil {
+ t.Fatal("Got (%d, %v) = %q, want (0, nil)", n, err, buf[:n])
+ }
+ server.Close()
+}
+
+func TestShutdownVCs(t *testing.T) {
+ client, server := NewClientServer()
+ defer server.Close()
+ defer client.Close()
+
+ testN := func(N int) error {
+ c := client.NumVCs()
+ if c != N {
+ return fmt.Errorf("%d VCs on client VIF, expected %d", c, N)
+ }
+ return nil
+ }
+
+ if _, _, err := createVC(client, server, makeEP(0x5)); err != nil {
+ t.Fatal(err)
+ }
+ if err := testN(1); err != nil {
+ t.Error(err)
+ }
+ if _, _, err := createVC(client, server, makeEP(0x5)); err != nil {
+ t.Fatal(err)
+ }
+ if err := testN(2); err != nil {
+ t.Error(err)
+ }
+ if _, _, err := createVC(client, server, makeEP(0x7)); err != nil {
+ t.Fatal(err)
+ }
+ if err := testN(3); err != nil {
+ t.Error(err)
+ }
+ // Client does not have any VCs to the endpoint with routing id 0x9,
+ // so nothing should be closed
+ if n := client.ShutdownVCs(makeEP(0x9)); n != 0 {
+ t.Errorf("Expected 0 VCs to be closed, closed %d", n)
+ }
+ if err := testN(3); err != nil {
+ t.Error(err)
+ }
+ // But it does have to 0x5
+ if n := client.ShutdownVCs(makeEP(0x5)); n != 2 {
+ t.Errorf("Expected 2 VCs to be closed, closed %d", n)
+ }
+ if err := testN(1); err != nil {
+ t.Error(err)
+ }
+ // And 0x7
+ if n := client.ShutdownVCs(makeEP(0x7)); n != 1 {
+ t.Errorf("Expected 2 VCs to be closed, closed %d", n)
+ }
+ if err := testN(0); err != nil {
+ t.Error(err)
+ }
+}
+
+type versionTestCase struct {
+ client, server, ep *iversion.Range
+ expectError bool
+ expectVIFError bool
+}
+
+func (tc *versionTestCase) Run(t *testing.T) {
+ client, server, err := NewVersionedClientServer(tc.client, tc.server)
+ if (err != nil) != tc.expectVIFError {
+ t.Errorf("Error mismatch. Wanted error: %v, got %v; client: %v, server: %v", tc.expectVIFError, err, tc.client, tc.server)
+ }
+ if err != nil {
+ return
+ }
+ defer client.Close()
+
+ ep := tc.ep.Endpoint("test", "addr", naming.FixedRoutingID(0x5))
+ clientVC, _, err := createVC(client, server, ep)
+ if (err != nil) != tc.expectError {
+ t.Errorf("Error mismatch. Wanted error: %v, got %v (client:%v, server:%v ep:%v)", tc.expectError, err, tc.client, tc.server, tc.ep)
+
+ }
+ if err != nil {
+ return
+ }
+
+ writer, err := clientVC.Connect()
+ if err != nil {
+ t.Errorf("Unexpected error on case %+v: %v", tc, err)
+ return
+ }
+
+ rwSingleFlow(t, writer, acceptFlowAtServer(server), "the dark knight")
+}
+
+// TestIncompatibleVersions tests many cases where the client and server
+// have compatible or incompatible supported version ranges. It ensures
+// that overlapping ranges work properly, but non-overlapping ranges generate
+// errors.
+func TestIncompatibleVersions(t *testing.T) {
+ unknown := &iversion.Range{version.UnknownIPCVersion, version.UnknownIPCVersion}
+ tests := []versionTestCase{
+ {&iversion.Range{1, 1}, &iversion.Range{1, 1}, &iversion.Range{1, 1}, false, false},
+ {&iversion.Range{1, 3}, &iversion.Range{3, 5}, &iversion.Range{3, 5}, false, false},
+ {&iversion.Range{1, 3}, &iversion.Range{3, 5}, unknown, false, false},
+
+ // No VIF error because the client does not initiate authentication.
+ {&iversion.Range{1, 3}, &iversion.Range{4, 5}, &iversion.Range{4, 5}, true, false},
+ {&iversion.Range{1, 3}, &iversion.Range{4, 5}, unknown, true, false},
+
+ // VIF error because the client asks for authentication, but the server
+ // doesn't understand it.
+ {&iversion.Range{6, 6}, &iversion.Range{1, 5}, unknown, true, true},
+ }
+
+ for _, tc := range tests {
+ tc.Run(t)
+ }
+}
+
+func TestNetworkFailure(t *testing.T) {
+ c1, c2 := pipe()
+ result := make(chan *vif.VIF)
+ go func() {
+ client, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ result <- client
+ }()
+ server, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client := <-result
+ // If the network connection dies, Dial and Accept should fail.
+ c1.Close()
+ if _, err := client.Dial(makeEP(0x5)); err == nil {
+ t.Errorf("Expected client.Dial to fail")
+ }
+ if _, err := server.Accept(); err == nil {
+ t.Errorf("Expected server.Accept to fail")
+ }
+}
+
+func makeEP(rid uint64) naming.Endpoint {
+ return iversion.Endpoint("test", "addr", naming.FixedRoutingID(rid))
+}
+
+// pipeAddr provides a more descriptive String implementation than provided by net.Pipe.
+type pipeAddr struct{ name string }
+
+func (a pipeAddr) Network() string { return "pipe" }
+func (a pipeAddr) String() string { return a.name }
+
+// pipeConn provides a buffered net.Conn, with pipeAddr addressing.
+type pipeConn struct {
+ lock sync.Mutex
+ // w is guarded by lock, to prevent Close from racing with Write. This is a
+ // quick way to prevent the race, but it allows a Write to block the Close.
+ // This isn't a problem in the tests currently.
+ w chan<- []byte
+ r <-chan []byte
+ rdata []byte
+ laddr, raddr pipeAddr
+}
+
+func (c *pipeConn) Read(data []byte) (int, error) {
+ for len(c.rdata) == 0 {
+ d, ok := <-c.r
+ if !ok {
+ return 0, io.EOF
+ }
+ c.rdata = d
+ }
+ n := copy(data, c.rdata)
+ c.rdata = c.rdata[n:]
+ return n, nil
+}
+
+func (c *pipeConn) Write(data []byte) (int, error) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.w == nil {
+ return 0, io.EOF
+ }
+ d := make([]byte, len(data))
+ copy(d, data)
+ c.w <- d
+ return len(data), nil
+}
+
+func (c *pipeConn) Close() error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.w == nil {
+ return io.EOF
+ }
+ close(c.w)
+ c.w = nil
+ return nil
+}
+
+func (c *pipeConn) LocalAddr() net.Addr { return c.laddr }
+func (c *pipeConn) RemoteAddr() net.Addr { return c.raddr }
+func (c *pipeConn) SetDeadline(t time.Time) error { return nil }
+func (c *pipeConn) SetReadDeadline(t time.Time) error { return nil }
+func (c *pipeConn) SetWriteDeadline(t time.Time) error { return nil }
+
+func pipe() (net.Conn, net.Conn) {
+ clientAddr := pipeAddr{"client"}
+ serverAddr := pipeAddr{"server"}
+ c1 := make(chan []byte, 10)
+ c2 := make(chan []byte, 10)
+ p1 := &pipeConn{w: c1, r: c2, laddr: clientAddr, raddr: serverAddr}
+ p2 := &pipeConn{w: c2, r: c1, laddr: serverAddr, raddr: clientAddr}
+ return p1, p2
+}
+
+func NewClientServer() (client, server *vif.VIF) {
+ var err error
+ client, server, err = NewVersionedClientServer(nil, nil)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+func NewVersionedClientServer(clientVersions, serverVersions *iversion.Range) (client, server *vif.VIF, verr error) {
+ c1, c2 := pipe()
+ var cerr error
+ cl := make(chan *vif.VIF)
+ go func() {
+ c, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), clientVersions, newPrincipal("client"))
+ if err != nil {
+ cerr = err
+ close(cl)
+ } else {
+ cl <- c
+ }
+ }()
+ s, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), serverVersions, newPrincipal("server"))
+ c, ok := <-cl
+ if err != nil {
+ verr = err
+ return
+ }
+ if !ok {
+ verr = cerr
+ return
+ }
+ server = s
+ client = c
+ return
+}
+
+// rwSingleFlow writes out data on writer and ensures that the reader sees the same string.
+func rwSingleFlow(t *testing.T, writer io.WriteCloser, reader io.Reader, data string) {
+ go func() {
+ if n, err := writer.Write([]byte(data)); n != len(data) || err != nil {
+ t.Errorf("Write failure. Got (%d, %v) want (%d, nil)", n, err, len(data))
+ }
+ writer.Close()
+ }()
+
+ var buf bytes.Buffer
+ var tmp [4096]byte
+ for {
+ n, err := reader.Read(tmp[:])
+ buf.Write(tmp[:n])
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Errorf("Read error: %v", err)
+ }
+ }
+ if buf.String() != data {
+ t.Errorf("Wrote %q but read %q", data, buf.String())
+ }
+}
+
+// createVC creates a VC by dialing from the client process to the server
+// process. It returns the VC at the client and the Connector at the server
+// (which the server can use to create flows over the VC)).
+func createVC(client, server *vif.VIF, ep naming.Endpoint) (clientVC stream.VC, serverConnector stream.Connector, err error) {
+ vcChan := make(chan stream.VC)
+ scChan := make(chan stream.Connector)
+ errChan := make(chan error)
+ go func() {
+ vc, err := client.Dial(ep, newPrincipal("client"))
+ errChan <- err
+ vcChan <- vc
+ }()
+ go func() {
+ cAndf, err := server.Accept()
+ errChan <- err
+ if err == nil {
+ scChan <- cAndf.Connector
+ }
+ }()
+ if err = <-errChan; err != nil {
+ return
+ }
+ if err = <-errChan; err != nil {
+ return
+ }
+ clientVC = <-vcChan
+ serverConnector = <-scChan
+ return
+}
+
+func createNVCs(client, server *vif.VIF, startRID uint64, N int) (clientVCs []stream.VC, serverConnectors []stream.Connector, err error) {
+ var c stream.VC
+ var s stream.Connector
+ for i := 0; i < N; i++ {
+ c, s, err = createVC(client, server, makeEP(startRID+uint64(i)))
+ if err != nil {
+ return
+ }
+ clientVCs = append(clientVCs, c)
+ serverConnectors = append(serverConnectors, s)
+ }
+ return
+}
+
+func createListeners(vcs []stream.VC) ([]stream.Listener, error) {
+ var ret []stream.Listener
+ for _, vc := range vcs {
+ ln, err := vc.Listen()
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, ln)
+ }
+ return ret, nil
+}
+
+func acceptFlowAtServer(vf *vif.VIF) stream.Flow {
+ for {
+ cAndf, err := vf.Accept()
+ if err != nil {
+ panic(err)
+ }
+ if cAndf.Flow != nil {
+ return cAndf.Flow
+ }
+ }
+}
+
+func acceptFlowAtClient(ln stream.Listener) stream.Flow {
+ f, err := ln.Accept()
+ if err != nil {
+ panic(err)
+ }
+ return f
+}
+
+func atmostNbytes(s string, n int) string {
+ if n > len(s) {
+ return s
+ }
+ b := []byte(s)
+ return string(b[:n/2]) + "..." + string(b[len(s)-n/2:])
+}
diff --git a/profiles/internal/ipc/stress/internal/client.go b/profiles/internal/ipc/stress/internal/client.go
new file mode 100644
index 0000000..b46b6d1
--- /dev/null
+++ b/profiles/internal/ipc/stress/internal/client.go
@@ -0,0 +1,102 @@
+package internal
+
+import (
+ "bytes"
+ crand "crypto/rand"
+ "fmt"
+ "math/rand"
+
+ "v.io/v23/context"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/profiles/internal/ipc/stress"
+)
+
+func newArg(maxPayloadSize int) (stress.Arg, error) {
+ var arg stress.Arg
+ arg.ABool = rand.Intn(2) == 0
+ arg.AInt64 = rand.Int63()
+ arg.AListOfBytes = make([]byte, rand.Intn(maxPayloadSize)+1)
+ _, err := crand.Read(arg.AListOfBytes)
+ return arg, err
+}
+
+// CallSum calls 'Sum' method with a randomly generated payload.
+func CallSum(ctx *context.T, server string, maxPayloadSize int) {
+ stub := stress.StressClient(server)
+ arg, err := newArg(maxPayloadSize)
+ if err != nil {
+ vlog.Fatalf("new arg failed: %v", err)
+ }
+
+ got, err := stub.Sum(ctx, arg)
+ if err != nil {
+ vlog.Fatalf("Sum failed: %v", err)
+ }
+
+ wanted, _ := doSum(arg)
+ if !bytes.Equal(got, wanted) {
+ vlog.Fatalf("Sum returned %v, but expected %v", got, wanted)
+ }
+}
+
+// CallSumStream calls 'SumStream' method. Each iteration sends up to
+// 'maxChunkCnt' chunks on the stream and receives the same number of
+// sums back.
+func CallSumStream(ctx *context.T, server string, maxChunkCnt, maxPayloadSize int) {
+ stub := stress.StressClient(server)
+ stream, err := stub.SumStream(ctx)
+ if err != nil {
+ vlog.Fatalf("Stream failed: %v", err)
+ }
+
+ chunkCnt := rand.Intn(maxChunkCnt) + 1
+ args := make([]stress.Arg, chunkCnt)
+ done := make(chan error, 1)
+ go func() {
+ defer close(done)
+
+ recvS := stream.RecvStream()
+ i := 0
+ for ; recvS.Advance(); i++ {
+ got := recvS.Value()
+ wanted, _ := doSum(args[i])
+ if !bytes.Equal(got, wanted) {
+ done <- fmt.Errorf("RecvStream returned %v, but expected %v", got, wanted)
+ return
+ }
+ }
+ switch err := recvS.Err(); {
+ case err != nil:
+ done <- err
+ case i != chunkCnt:
+ done <- fmt.Errorf("RecvStream returned %d chunks, but expected %d", i, chunkCnt)
+ default:
+ done <- nil
+ }
+ }()
+
+ sendS := stream.SendStream()
+ for i := 0; i < chunkCnt; i++ {
+ arg, err := newArg(maxPayloadSize)
+ if err != nil {
+ vlog.Fatalf("new arg failed: %v", err)
+ }
+ args[i] = arg
+
+ if err = sendS.Send(arg); err != nil {
+ vlog.Fatalf("SendStream failed to send: %v", err)
+ }
+ }
+ if err = sendS.Close(); err != nil {
+ vlog.Fatalf("SendStream failed to close: %v", err)
+ }
+
+ if err = <-done; err != nil {
+ vlog.Fatalf("%v", err)
+ }
+
+ if err = stream.Finish(); err != nil {
+ vlog.Fatalf("Stream failed to finish: %v", err)
+ }
+}
diff --git a/profiles/internal/ipc/stress/internal/server.go b/profiles/internal/ipc/stress/internal/server.go
new file mode 100644
index 0000000..1f1b93c
--- /dev/null
+++ b/profiles/internal/ipc/stress/internal/server.go
@@ -0,0 +1,92 @@
+package internal
+
+import (
+ "sync"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/profiles/internal/ipc/stress"
+)
+
+type impl struct {
+ statsMu sync.Mutex
+ sumCount uint64 // GUARDED_BY(statsMu)
+ sumStreamCount uint64 // GUARDED_BY(statsMu)
+
+ stop chan struct{}
+}
+
+func (s *impl) Sum(ctx ipc.ServerCall, arg stress.Arg) ([]byte, error) {
+ defer s.incSumCount()
+ return doSum(arg)
+}
+
+func (s *impl) SumStream(ctx stress.StressSumStreamContext) error {
+ defer s.incSumStreamCount()
+ rStream := ctx.RecvStream()
+ sStream := ctx.SendStream()
+ for rStream.Advance() {
+ sum, err := doSum(rStream.Value())
+ if err != nil {
+ return err
+ }
+ sStream.Send(sum)
+ }
+ if err := rStream.Err(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (s *impl) GetStats(ctx ipc.ServerCall) (stress.Stats, error) {
+ s.statsMu.Lock()
+ defer s.statsMu.Unlock()
+ return stress.Stats{s.sumCount, s.sumStreamCount}, nil
+}
+
+func (s *impl) Stop(ctx ipc.ServerCall) error {
+ s.stop <- struct{}{}
+ return nil
+}
+
+func (s *impl) incSumCount() {
+ s.statsMu.Lock()
+ defer s.statsMu.Unlock()
+ s.sumCount++
+}
+
+func (s *impl) incSumStreamCount() {
+ s.statsMu.Lock()
+ defer s.statsMu.Unlock()
+ s.sumStreamCount++
+}
+
+type allowEveryoneAuthorizer struct{}
+
+func (allowEveryoneAuthorizer) Authorize(security.Call) error { return nil }
+
+// StartServer starts a server that implements the Stress service, and returns
+// the server and its veyron address. It also returns a channel carrying stop
+// requests. After reading from the stop channel, the application should exit.
+func StartServer(ctx *context.T, listenSpec ipc.ListenSpec) (ipc.Server, naming.Endpoint, <-chan struct{}) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ vlog.Fatalf("NewServer failed: %v", err)
+ }
+ eps, err := server.Listen(listenSpec)
+ if err != nil {
+ vlog.Fatalf("Listen failed: %v", err)
+ }
+
+ s := impl{stop: make(chan struct{})}
+ if err := server.Serve("", stress.StressServer(&s), allowEveryoneAuthorizer{}); err != nil {
+ vlog.Fatalf("Serve failed: %v", err)
+ }
+
+ return server, eps[0], s.stop
+}
diff --git a/profiles/internal/ipc/stress/internal/util.go b/profiles/internal/ipc/stress/internal/util.go
new file mode 100644
index 0000000..a57dd74
--- /dev/null
+++ b/profiles/internal/ipc/stress/internal/util.go
@@ -0,0 +1,22 @@
+package internal
+
+import (
+ "crypto/md5"
+ "encoding/binary"
+
+ "v.io/x/ref/profiles/internal/ipc/stress"
+)
+
+// doSum returns the MD5 checksum of the arg.
+func doSum(arg stress.Arg) ([]byte, error) {
+ h := md5.New()
+ if arg.ABool {
+ if err := binary.Write(h, binary.LittleEndian, arg.AInt64); err != nil {
+ return nil, err
+ }
+ }
+ if _, err := h.Write(arg.AListOfBytes); err != nil {
+ return nil, err
+ }
+ return h.Sum(nil), nil
+}
diff --git a/profiles/internal/ipc/stress/stress.vdl b/profiles/internal/ipc/stress/stress.vdl
new file mode 100644
index 0000000..556caad
--- /dev/null
+++ b/profiles/internal/ipc/stress/stress.vdl
@@ -0,0 +1,30 @@
+package stress
+
+import (
+ "v.io/v23/services/security/access"
+)
+
+type Arg struct {
+ ABool bool
+ AInt64 int64
+ AListOfBytes []byte
+}
+
+type Stats struct {
+ SumCount uint64
+ SumStreamCount uint64
+}
+
+type Stress interface {
+ // Do returns the checksum of the payload that it receives.
+ Sum(arg Arg) ([]byte | error) {access.Read}
+
+ // DoStream returns the checksum of the payload that it receives via the stream.
+ SumStream() stream<Arg,[]byte> error {access.Read}
+
+ // GetStats returns the stats on the calls that the server received.
+ GetStats() (Stats | error) {access.Read}
+
+ // Stop stops the server.
+ Stop() error {access.Admin}
+}
diff --git a/profiles/internal/ipc/stress/stress.vdl.go b/profiles/internal/ipc/stress/stress.vdl.go
new file mode 100644
index 0000000..90c38f3
--- /dev/null
+++ b/profiles/internal/ipc/stress/stress.vdl.go
@@ -0,0 +1,428 @@
+// This file was auto-generated by the veyron vdl tool.
+// Source: stress.vdl
+
+package stress
+
+import (
+ // VDL system imports
+ "io"
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/vdl"
+
+ // VDL user imports
+ "v.io/v23/services/security/access"
+)
+
+type Arg struct {
+ ABool bool
+ AInt64 int64
+ AListOfBytes []byte
+}
+
+func (Arg) __VDLReflect(struct {
+ Name string "v.io/x/ref/profiles/internal/ipc/stress.Arg"
+}) {
+}
+
+type Stats struct {
+ SumCount uint64
+ SumStreamCount uint64
+}
+
+func (Stats) __VDLReflect(struct {
+ Name string "v.io/x/ref/profiles/internal/ipc/stress.Stats"
+}) {
+}
+
+func init() {
+ vdl.Register((*Arg)(nil))
+ vdl.Register((*Stats)(nil))
+}
+
+// StressClientMethods is the client interface
+// containing Stress methods.
+type StressClientMethods interface {
+ // Do returns the checksum of the payload that it receives.
+ Sum(ctx *context.T, arg Arg, opts ...ipc.CallOpt) ([]byte, error)
+ // DoStream returns the checksum of the payload that it receives via the stream.
+ SumStream(*context.T, ...ipc.CallOpt) (StressSumStreamClientCall, error)
+ // GetStats returns the stats on the calls that the server received.
+ GetStats(*context.T, ...ipc.CallOpt) (Stats, error)
+ // Stop stops the server.
+ Stop(*context.T, ...ipc.CallOpt) error
+}
+
+// StressClientStub adds universal methods to StressClientMethods.
+type StressClientStub interface {
+ StressClientMethods
+ ipc.UniversalServiceMethods
+}
+
+// StressClient returns a client stub for Stress.
+func StressClient(name string, opts ...ipc.BindOpt) StressClientStub {
+ var client ipc.Client
+ for _, opt := range opts {
+ if clientOpt, ok := opt.(ipc.Client); ok {
+ client = clientOpt
+ }
+ }
+ return implStressClientStub{name, client}
+}
+
+type implStressClientStub struct {
+ name string
+ client ipc.Client
+}
+
+func (c implStressClientStub) c(ctx *context.T) ipc.Client {
+ if c.client != nil {
+ return c.client
+ }
+ return v23.GetClient(ctx)
+}
+
+func (c implStressClientStub) Sum(ctx *context.T, i0 Arg, opts ...ipc.CallOpt) (o0 []byte, err error) {
+ var call ipc.ClientCall
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "Sum", []interface{}{i0}, opts...); err != nil {
+ return
+ }
+ err = call.Finish(&o0)
+ return
+}
+
+func (c implStressClientStub) SumStream(ctx *context.T, opts ...ipc.CallOpt) (ocall StressSumStreamClientCall, err error) {
+ var call ipc.ClientCall
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "SumStream", nil, opts...); err != nil {
+ return
+ }
+ ocall = &implStressSumStreamClientCall{ClientCall: call}
+ return
+}
+
+func (c implStressClientStub) GetStats(ctx *context.T, opts ...ipc.CallOpt) (o0 Stats, err error) {
+ var call ipc.ClientCall
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "GetStats", nil, opts...); err != nil {
+ return
+ }
+ err = call.Finish(&o0)
+ return
+}
+
+func (c implStressClientStub) Stop(ctx *context.T, opts ...ipc.CallOpt) (err error) {
+ var call ipc.ClientCall
+ if call, err = c.c(ctx).StartCall(ctx, c.name, "Stop", nil, opts...); err != nil {
+ return
+ }
+ err = call.Finish()
+ return
+}
+
+// StressSumStreamClientStream is the client stream for Stress.SumStream.
+type StressSumStreamClientStream interface {
+ // RecvStream returns the receiver side of the Stress.SumStream client stream.
+ RecvStream() interface {
+ // Advance stages an item so that it may be retrieved via Value. Returns
+ // true iff there is an item to retrieve. Advance must be called before
+ // Value is called. May block if an item is not available.
+ Advance() bool
+ // Value returns the item that was staged by Advance. May panic if Advance
+ // returned false or was not called. Never blocks.
+ Value() []byte
+ // Err returns any error encountered by Advance. Never blocks.
+ Err() error
+ }
+ // SendStream returns the send side of the Stress.SumStream client stream.
+ SendStream() interface {
+ // Send places the item onto the output stream. Returns errors
+ // encountered while sending, or if Send is called after Close or
+ // the stream has been canceled. Blocks if there is no buffer
+ // space; will unblock when buffer space is available or after
+ // the stream has been canceled.
+ Send(item Arg) error
+ // Close indicates to the server that no more items will be sent;
+ // server Recv calls will receive io.EOF after all sent items.
+ // This is an optional call - e.g. a client might call Close if it
+ // needs to continue receiving items from the server after it's
+ // done sending. Returns errors encountered while closing, or if
+ // Close is called after the stream has been canceled. Like Send,
+ // blocks if there is no buffer space available.
+ Close() error
+ }
+}
+
+// StressSumStreamClientCall represents the call returned from Stress.SumStream.
+type StressSumStreamClientCall interface {
+ StressSumStreamClientStream
+ // Finish performs the equivalent of SendStream().Close, then blocks until
+ // the server is done, and returns the positional return values for the call.
+ //
+ // Finish returns immediately if the call has been canceled; depending on the
+ // timing the output could either be an error signaling cancelation, or the
+ // valid positional return values from the server.
+ //
+ // Calling Finish is mandatory for releasing stream resources, unless the call
+ // has been canceled or any of the other methods return an error. Finish should
+ // be called at most once.
+ Finish() error
+}
+
+type implStressSumStreamClientCall struct {
+ ipc.ClientCall
+ valRecv []byte
+ errRecv error
+}
+
+func (c *implStressSumStreamClientCall) RecvStream() interface {
+ Advance() bool
+ Value() []byte
+ Err() error
+} {
+ return implStressSumStreamCallRecv{c}
+}
+
+type implStressSumStreamCallRecv struct {
+ c *implStressSumStreamClientCall
+}
+
+func (c implStressSumStreamCallRecv) Advance() bool {
+ c.c.errRecv = c.c.Recv(&c.c.valRecv)
+ return c.c.errRecv == nil
+}
+func (c implStressSumStreamCallRecv) Value() []byte {
+ return c.c.valRecv
+}
+func (c implStressSumStreamCallRecv) Err() error {
+ if c.c.errRecv == io.EOF {
+ return nil
+ }
+ return c.c.errRecv
+}
+func (c *implStressSumStreamClientCall) SendStream() interface {
+ Send(item Arg) error
+ Close() error
+} {
+ return implStressSumStreamCallSend{c}
+}
+
+type implStressSumStreamCallSend struct {
+ c *implStressSumStreamClientCall
+}
+
+func (c implStressSumStreamCallSend) Send(item Arg) error {
+ return c.c.Send(item)
+}
+func (c implStressSumStreamCallSend) Close() error {
+ return c.c.CloseSend()
+}
+func (c *implStressSumStreamClientCall) Finish() (err error) {
+ err = c.ClientCall.Finish()
+ return
+}
+
+// StressServerMethods is the interface a server writer
+// implements for Stress.
+type StressServerMethods interface {
+ // Do returns the checksum of the payload that it receives.
+ Sum(ctx ipc.ServerCall, arg Arg) ([]byte, error)
+ // DoStream returns the checksum of the payload that it receives via the stream.
+ SumStream(StressSumStreamContext) error
+ // GetStats returns the stats on the calls that the server received.
+ GetStats(ipc.ServerCall) (Stats, error)
+ // Stop stops the server.
+ Stop(ipc.ServerCall) error
+}
+
+// StressServerStubMethods is the server interface containing
+// Stress methods, as expected by ipc.Server.
+// The only difference between this interface and StressServerMethods
+// is the streaming methods.
+type StressServerStubMethods interface {
+ // Do returns the checksum of the payload that it receives.
+ Sum(ctx ipc.ServerCall, arg Arg) ([]byte, error)
+ // DoStream returns the checksum of the payload that it receives via the stream.
+ SumStream(*StressSumStreamContextStub) error
+ // GetStats returns the stats on the calls that the server received.
+ GetStats(ipc.ServerCall) (Stats, error)
+ // Stop stops the server.
+ Stop(ipc.ServerCall) error
+}
+
+// StressServerStub adds universal methods to StressServerStubMethods.
+type StressServerStub interface {
+ StressServerStubMethods
+ // Describe the Stress interfaces.
+ Describe__() []ipc.InterfaceDesc
+}
+
+// StressServer returns a server stub for Stress.
+// It converts an implementation of StressServerMethods into
+// an object that may be used by ipc.Server.
+func StressServer(impl StressServerMethods) StressServerStub {
+ stub := implStressServerStub{
+ impl: impl,
+ }
+ // Initialize GlobState; always check the stub itself first, to handle the
+ // case where the user has the Glob method defined in their VDL source.
+ if gs := ipc.NewGlobState(stub); gs != nil {
+ stub.gs = gs
+ } else if gs := ipc.NewGlobState(impl); gs != nil {
+ stub.gs = gs
+ }
+ return stub
+}
+
+type implStressServerStub struct {
+ impl StressServerMethods
+ gs *ipc.GlobState
+}
+
+func (s implStressServerStub) Sum(ctx ipc.ServerCall, i0 Arg) ([]byte, error) {
+ return s.impl.Sum(ctx, i0)
+}
+
+func (s implStressServerStub) SumStream(ctx *StressSumStreamContextStub) error {
+ return s.impl.SumStream(ctx)
+}
+
+func (s implStressServerStub) GetStats(ctx ipc.ServerCall) (Stats, error) {
+ return s.impl.GetStats(ctx)
+}
+
+func (s implStressServerStub) Stop(ctx ipc.ServerCall) error {
+ return s.impl.Stop(ctx)
+}
+
+func (s implStressServerStub) Globber() *ipc.GlobState {
+ return s.gs
+}
+
+func (s implStressServerStub) Describe__() []ipc.InterfaceDesc {
+ return []ipc.InterfaceDesc{StressDesc}
+}
+
+// StressDesc describes the Stress interface.
+var StressDesc ipc.InterfaceDesc = descStress
+
+// descStress hides the desc to keep godoc clean.
+var descStress = ipc.InterfaceDesc{
+ Name: "Stress",
+ PkgPath: "v.io/x/ref/profiles/internal/ipc/stress",
+ Methods: []ipc.MethodDesc{
+ {
+ Name: "Sum",
+ Doc: "// Do returns the checksum of the payload that it receives.",
+ InArgs: []ipc.ArgDesc{
+ {"arg", ``}, // Arg
+ },
+ OutArgs: []ipc.ArgDesc{
+ {"", ``}, // []byte
+ },
+ Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+ },
+ {
+ Name: "SumStream",
+ Doc: "// DoStream returns the checksum of the payload that it receives via the stream.",
+ Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+ },
+ {
+ Name: "GetStats",
+ Doc: "// GetStats returns the stats on the calls that the server received.",
+ OutArgs: []ipc.ArgDesc{
+ {"", ``}, // Stats
+ },
+ Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+ },
+ {
+ Name: "Stop",
+ Doc: "// Stop stops the server.",
+ Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
+ },
+ },
+}
+
+// StressSumStreamServerStream is the server stream for Stress.SumStream.
+type StressSumStreamServerStream interface {
+ // RecvStream returns the receiver side of the Stress.SumStream server stream.
+ RecvStream() interface {
+ // Advance stages an item so that it may be retrieved via Value. Returns
+ // true iff there is an item to retrieve. Advance must be called before
+ // Value is called. May block if an item is not available.
+ Advance() bool
+ // Value returns the item that was staged by Advance. May panic if Advance
+ // returned false or was not called. Never blocks.
+ Value() Arg
+ // Err returns any error encountered by Advance. Never blocks.
+ Err() error
+ }
+ // SendStream returns the send side of the Stress.SumStream server stream.
+ SendStream() interface {
+ // Send places the item onto the output stream. Returns errors encountered
+ // while sending. Blocks if there is no buffer space; will unblock when
+ // buffer space is available.
+ Send(item []byte) error
+ }
+}
+
+// StressSumStreamContext represents the context passed to Stress.SumStream.
+type StressSumStreamContext interface {
+ ipc.ServerCall
+ StressSumStreamServerStream
+}
+
+// StressSumStreamContextStub is a wrapper that converts ipc.StreamServerCall into
+// a typesafe stub that implements StressSumStreamContext.
+type StressSumStreamContextStub struct {
+ ipc.StreamServerCall
+ valRecv Arg
+ errRecv error
+}
+
+// Init initializes StressSumStreamContextStub from ipc.StreamServerCall.
+func (s *StressSumStreamContextStub) Init(call ipc.StreamServerCall) {
+ s.StreamServerCall = call
+}
+
+// RecvStream returns the receiver side of the Stress.SumStream server stream.
+func (s *StressSumStreamContextStub) RecvStream() interface {
+ Advance() bool
+ Value() Arg
+ Err() error
+} {
+ return implStressSumStreamContextRecv{s}
+}
+
+type implStressSumStreamContextRecv struct {
+ s *StressSumStreamContextStub
+}
+
+func (s implStressSumStreamContextRecv) Advance() bool {
+ s.s.valRecv = Arg{}
+ s.s.errRecv = s.s.Recv(&s.s.valRecv)
+ return s.s.errRecv == nil
+}
+func (s implStressSumStreamContextRecv) Value() Arg {
+ return s.s.valRecv
+}
+func (s implStressSumStreamContextRecv) Err() error {
+ if s.s.errRecv == io.EOF {
+ return nil
+ }
+ return s.s.errRecv
+}
+
+// SendStream returns the send side of the Stress.SumStream server stream.
+func (s *StressSumStreamContextStub) SendStream() interface {
+ Send(item []byte) error
+} {
+ return implStressSumStreamContextSend{s}
+}
+
+type implStressSumStreamContextSend struct {
+ s *StressSumStreamContextStub
+}
+
+func (s implStressSumStreamContextSend) Send(item []byte) error {
+ return s.s.Send(item)
+}
diff --git a/profiles/internal/ipc/stress/stress/main.go b/profiles/internal/ipc/stress/stress/main.go
new file mode 100644
index 0000000..f62ad08
--- /dev/null
+++ b/profiles/internal/ipc/stress/stress/main.go
@@ -0,0 +1,122 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "strings"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/profiles/internal/ipc/stress"
+ "v.io/x/ref/profiles/internal/ipc/stress/internal"
+ _ "v.io/x/ref/profiles/static"
+)
+
+var (
+ servers = flag.String("servers", "", "comma-seperated list of of the servers to connect to")
+ workers = flag.Int("workers", 1, "number of test workers to run; If zero, no test will be performed.")
+ duration = flag.Duration("duration", 1*time.Minute, "duration of the stress test to run")
+
+ maxChunkCnt = flag.Int("max_chunk_count", 100, "maximum number of chunks to send per streaming RPC")
+ maxPayloadSize = flag.Int("max_payload_size", 10000, "maximum size of payload in bytes")
+
+ serverStats = flag.Bool("server_stats", false, "If true, print out the server stats")
+ serverStop = flag.Bool("server_stop", false, "If true, shutdown the servers")
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func runTest(ctx *context.T, servers []string) {
+ fmt.Printf("starting stress test against %d servers...\n", len(servers))
+ fmt.Printf("workers: %d, maxChunkCnt: %d, maxPayloadSize: %d\n", *workers, *maxChunkCnt, *maxPayloadSize)
+
+ now := time.Now()
+ done := make(chan stress.Stats, *workers)
+ for i := 0; i < *workers; i++ {
+ go func() {
+ var sumCount, sumStreamCount uint64
+ timeout := time.After(*duration)
+ done:
+ for {
+ server := servers[rand.Intn(len(servers))]
+ if rand.Intn(2) == 0 {
+ internal.CallSum(ctx, server, *maxPayloadSize)
+ sumCount++
+ } else {
+ internal.CallSumStream(ctx, server, *maxChunkCnt, *maxPayloadSize)
+ sumStreamCount++
+ }
+
+ select {
+ case <-timeout:
+ break done
+ default:
+ }
+ }
+ done <- stress.Stats{sumCount, sumStreamCount}
+ }()
+ }
+
+ var stats stress.Stats
+ for i := 0; i < *workers; i++ {
+ s := <-done
+ stats.SumCount += s.SumCount
+ stats.SumStreamCount += s.SumStreamCount
+ }
+ elapsed := time.Since(now)
+
+ fmt.Printf("done after %v\n", elapsed)
+ fmt.Printf("client stats: %+v, ", stats)
+ fmt.Printf("qps: %.2f\n", float64(stats.SumCount+stats.SumStreamCount)/elapsed.Seconds())
+}
+
+func printServerStats(ctx *context.T, servers []string) {
+ for _, server := range servers {
+ stats, err := stress.StressClient(server).GetStats(ctx)
+ if err != nil {
+ vlog.Fatal("GetStats failed: %v\n", err)
+ }
+ fmt.Printf("server stats: %q:%+v\n", server, stats)
+ }
+}
+
+func stopServers(ctx *context.T, servers []string) {
+ for _, server := range servers {
+ if err := stress.StressClient(server).Stop(ctx); err != nil {
+ vlog.Fatal("Stop failed: %v\n", err)
+ }
+ }
+}
+
+func main() {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ var addrs []string
+ for _, a := range strings.Split(*servers, ",") {
+ addrs = append(addrs, strings.TrimSpace(a))
+ }
+ if len(addrs) == 0 {
+ vlog.Fatal("no server specified")
+ }
+
+ if *workers > 0 && *duration > 0 {
+ runTest(ctx, addrs)
+ }
+
+ if *serverStats {
+ printServerStats(ctx, addrs)
+ }
+ if *serverStop {
+ stopServers(ctx, addrs)
+ }
+}
diff --git a/profiles/internal/ipc/stress/stressd/main.go b/profiles/internal/ipc/stress/stressd/main.go
new file mode 100644
index 0000000..ea138d5
--- /dev/null
+++ b/profiles/internal/ipc/stress/stressd/main.go
@@ -0,0 +1,44 @@
+// A simple command-line tool to run the benchmark server.
+package main
+
+import (
+ "flag"
+ "runtime"
+ "time"
+
+ "v.io/v23"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/signals"
+ "v.io/x/ref/profiles/internal/ipc/stress/internal"
+ _ "v.io/x/ref/profiles/static"
+)
+
+var (
+ duration = flag.Duration("duration", 0, "duration of the stress test to run; if zero, there is no limit.")
+)
+
+func main() {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ server, ep, stop := internal.StartServer(ctx, v23.GetListenSpec(ctx))
+ vlog.Infof("listening on %s", ep.Name())
+
+ var timeout <-chan time.Time
+ if *duration > 0 {
+ timeout = time.After(*duration)
+ }
+ select {
+ case <-timeout:
+ case <-stop:
+ case <-signals.ShutdownOnSignals(ctx):
+ }
+
+ if err := server.Stop(); err != nil {
+ vlog.Fatalf("Stop() failed: %v", err)
+ }
+ vlog.Info("stopped.")
+}
diff --git a/profiles/internal/ipc/testutil_test.go b/profiles/internal/ipc/testutil_test.go
new file mode 100644
index 0000000..5bff981
--- /dev/null
+++ b/profiles/internal/ipc/testutil_test.go
@@ -0,0 +1,96 @@
+package ipc
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/v23/vdl"
+ "v.io/v23/verror"
+)
+
+func makeResultPtrs(ins []interface{}) []interface{} {
+ outs := make([]interface{}, len(ins))
+ for ix, in := range ins {
+ typ := reflect.TypeOf(in)
+ if typ == nil {
+ // Nil indicates interface{}.
+ var empty interface{}
+ typ = reflect.ValueOf(&empty).Elem().Type()
+ }
+ outs[ix] = reflect.New(typ).Interface()
+ }
+ return outs
+}
+
+func checkResultPtrs(t *testing.T, name string, gotptrs, want []interface{}) {
+ for ix, res := range gotptrs {
+ got := reflect.ValueOf(res).Elem().Interface()
+ want := want[ix]
+ switch g := got.(type) {
+ case verror.E:
+ w, ok := want.(verror.E)
+ // don't use reflect deep equal on verror's since they contain
+ // a list of stack PCs which will be different.
+ if !ok {
+ t.Errorf("%s result %d got type %T, want %T", name, ix, g, w)
+ }
+ if !verror.Is(g, w.ID) {
+ t.Errorf("%s result %d got %v, want %v", name, ix, g, w)
+ }
+ default:
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s result %d got %v, want %v", name, ix, got, want)
+ }
+ }
+
+ }
+}
+
+func mkCaveat(cav security.Caveat, err error) security.Caveat {
+ if err != nil {
+ panic(err)
+ }
+ return cav
+}
+
+func bless(blesser, blessed security.Principal, extension string, caveats ...security.Caveat) security.Blessings {
+ if len(caveats) == 0 {
+ caveats = append(caveats, security.UnconstrainedUse())
+ }
+ b, err := blesser.Bless(blessed.PublicKey(), blesser.BlessingStore().Default(), extension, caveats[0], caveats[1:]...)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+// We need a special way to create contexts for tests. We
+// can't create a real runtime in the runtime implementation
+// so we use a fake one that panics if used. The runtime
+// implementation should not ever use the Runtime from a context.
+func testContext() *context.T {
+ ctx, _ := context.WithTimeout(testContextWithoutDeadline(), 20*time.Second)
+ return ctx
+}
+
+type mockSecurityContext struct {
+ p security.Principal
+ l, r security.Blessings
+ c *context.T
+}
+
+func (c *mockSecurityContext) Timestamp() (t time.Time) { return }
+func (c *mockSecurityContext) Method() string { return "" }
+func (c *mockSecurityContext) MethodTags() []*vdl.Value { return nil }
+func (c *mockSecurityContext) Suffix() string { return "" }
+func (c *mockSecurityContext) RemoteDischarges() map[string]security.Discharge { return nil }
+func (c *mockSecurityContext) LocalEndpoint() naming.Endpoint { return nil }
+func (c *mockSecurityContext) RemoteEndpoint() naming.Endpoint { return nil }
+func (c *mockSecurityContext) LocalPrincipal() security.Principal { return c.p }
+func (c *mockSecurityContext) LocalBlessings() security.Blessings { return c.l }
+func (c *mockSecurityContext) RemoteBlessings() security.Blessings { return c.r }
+func (c *mockSecurityContext) Context() *context.T { return c.c }
diff --git a/profiles/internal/ipc/timer.go b/profiles/internal/ipc/timer.go
new file mode 100644
index 0000000..fa148d4
--- /dev/null
+++ b/profiles/internal/ipc/timer.go
@@ -0,0 +1,32 @@
+package ipc
+
+import (
+ "time"
+)
+
+// timer is a replacement for time.Timer, the only difference is that
+// its channel is type chan struct{} and it will be closed when the timer expires,
+// which we need in some places.
+type timer struct {
+ base *time.Timer
+ C <-chan struct{}
+}
+
+func newTimer(d time.Duration) *timer {
+ c := make(chan struct{}, 0)
+ base := time.AfterFunc(d, func() {
+ close(c)
+ })
+ return &timer{
+ base: base,
+ C: c,
+ }
+}
+
+func (t *timer) Stop() bool {
+ return t.base.Stop()
+}
+
+func (t *timer) Reset(d time.Duration) bool {
+ return t.base.Reset(d)
+}
diff --git a/profiles/internal/ipc/timer_test.go b/profiles/internal/ipc/timer_test.go
new file mode 100644
index 0000000..e7345d6
--- /dev/null
+++ b/profiles/internal/ipc/timer_test.go
@@ -0,0 +1,31 @@
+package ipc
+
+import (
+ "testing"
+ "time"
+)
+
+func TestTimer(t *testing.T) {
+ test := newTimer(time.Millisecond)
+ if _, ok := <-test.C; ok {
+ t.Errorf("Expected the channel to be closed.")
+ }
+
+ // Test resetting.
+ test = newTimer(time.Hour)
+ if reset := test.Reset(time.Millisecond); !reset {
+ t.Errorf("Expected to successfully reset.")
+ }
+ if _, ok := <-test.C; ok {
+ t.Errorf("Expected the channel to be closed.")
+ }
+
+ // Test stop.
+ test = newTimer(100 * time.Millisecond)
+ test.Stop()
+ select {
+ case <-test.C:
+ t.Errorf("the test timer should have been stopped.")
+ case <-time.After(200 * time.Millisecond):
+ }
+}
diff --git a/profiles/internal/ipc/v23_test.go b/profiles/internal/ipc/v23_test.go
new file mode 100644
index 0000000..614873d
--- /dev/null
+++ b/profiles/internal/ipc/v23_test.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package ipc_test
+
+import "fmt"
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/modules"
+import "v.io/x/ref/lib/testutil"
+
+func init() {
+ modules.RegisterChild("childPing", ``, childPing)
+}
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ if modules.IsModulesProcess() {
+ if err := modules.Dispatch(); err != nil {
+ fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+ os.Exit(1)
+ }
+ return
+ }
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/ipc/version/version.go b/profiles/internal/ipc/version/version.go
new file mode 100644
index 0000000..a45d633
--- /dev/null
+++ b/profiles/internal/ipc/version/version.go
@@ -0,0 +1,160 @@
+package version
+
+import (
+ "fmt"
+
+ inaming "v.io/x/ref/profiles/internal/naming"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+)
+
+// Range represents a range of IPC versions.
+type Range struct {
+ Min, Max version.IPCVersion
+}
+
+var (
+ // SupportedRange represents the range of protocol verions supported by this
+ // implementation.
+ // Max should be incremented whenever we make a protocol
+ // change that's not both forward and backward compatible.
+ // Min should be incremented whenever we want to remove
+ // support for old protocol versions.
+ SupportedRange = &Range{Min: version.IPCVersion5, Max: version.IPCVersion7}
+
+ // Export the methods on supportedRange.
+ Endpoint = SupportedRange.Endpoint
+ ProxiedEndpoint = SupportedRange.ProxiedEndpoint
+ CommonVersion = SupportedRange.CommonVersion
+ CheckCompatibility = SupportedRange.CheckCompatibility
+)
+
+var (
+ NoCompatibleVersionErr = fmt.Errorf("No compatible IPC version available")
+ UnknownVersionErr = fmt.Errorf("There was not enough information to determine a version.")
+)
+
+// IsVersionError returns true if err is a versioning related error.
+func IsVersionError(err error) bool {
+ return err == NoCompatibleVersionErr || err == UnknownVersionErr
+}
+
+// Endpoint returns an endpoint with the Min/MaxIPCVersion properly filled in
+// to match this implementations supported protocol versions.
+func (r *Range) Endpoint(protocol, address string, rid naming.RoutingID) naming.Endpoint {
+ return &inaming.Endpoint{
+ Protocol: protocol,
+ Address: address,
+ RID: rid,
+ MinIPCVersion: r.Min,
+ MaxIPCVersion: r.Max,
+ }
+}
+
+// intersectRanges finds the intersection between ranges
+// supported by two endpoints. We make an assumption here that if one
+// of the endpoints has an UnknownVersion we assume it has the same
+// extent as the other endpoint. If both endpoints have Unknown for a
+// version number, an error is produced.
+// For example:
+// a == (2, 4) and b == (Unknown, Unknown), intersect(a,b) == (2, 4)
+// a == (2, Unknown) and b == (3, 4), intersect(a,b) == (3, 4)
+func intersectRanges(amin, amax, bmin, bmax version.IPCVersion) (min, max version.IPCVersion, err error) {
+ u := version.UnknownIPCVersion
+
+ min = amin
+ if min == u || (bmin != u && bmin > min) {
+ min = bmin
+ }
+ max = amax
+ if max == u || (bmax != u && bmax < max) {
+ max = bmax
+ }
+
+ if min == u || max == u {
+ err = UnknownVersionErr
+ } else if min > max {
+ err = NoCompatibleVersionErr
+ }
+ return
+}
+
+func intersectEndpoints(a, b *inaming.Endpoint) (min, max version.IPCVersion, err error) {
+ return intersectRanges(a.MinIPCVersion, a.MaxIPCVersion, b.MinIPCVersion, b.MaxIPCVersion)
+}
+
+func (r1 *Range) Intersect(r2 *Range) (*Range, error) {
+ min, max, err := intersectRanges(r1.Min, r1.Max, r2.Min, r2.Max)
+ if err != nil {
+ return nil, err
+ }
+ r := &Range{Min: min, Max: max}
+ return r, nil
+}
+
+// ProxiedEndpoint returns an endpoint with the Min/MaxIPCVersion properly filled in
+// to match the intersection of capabilities of this process and the proxy.
+func (r *Range) ProxiedEndpoint(rid naming.RoutingID, proxy naming.Endpoint) (naming.Endpoint, error) {
+ proxyEP, ok := proxy.(*inaming.Endpoint)
+ if !ok {
+ return nil, fmt.Errorf("unrecognized naming.Endpoint type %T", proxy)
+ }
+
+ ep := &inaming.Endpoint{
+ Protocol: proxyEP.Protocol,
+ Address: proxyEP.Address,
+ RID: rid,
+ MinIPCVersion: r.Min,
+ MaxIPCVersion: r.Max,
+ }
+
+ // This is the endpoint we are going to advertise. It should only claim to support versions in
+ // the intersection of those we support and those the proxy supports.
+ var err error
+ ep.MinIPCVersion, ep.MaxIPCVersion, err = intersectEndpoints(ep, proxyEP)
+ if err != nil {
+ return nil, fmt.Errorf("attempting to register with incompatible proxy: %s", proxy)
+ }
+ return ep, nil
+}
+
+// CommonVersion determines which version of the IPC protocol should be used
+// between two endpoints. Returns an error if the resulting version is incompatible
+// with this IPC implementation.
+func (r *Range) CommonVersion(a, b naming.Endpoint) (version.IPCVersion, error) {
+ aEP, ok := a.(*inaming.Endpoint)
+ if !ok {
+ return 0, fmt.Errorf("Unrecognized naming.Endpoint type: %T", a)
+ }
+ bEP, ok := b.(*inaming.Endpoint)
+ if !ok {
+ return 0, fmt.Errorf("Unrecognized naming.Endpoint type: %T", b)
+ }
+
+ _, max, err := intersectEndpoints(aEP, bEP)
+ if err != nil {
+ return 0, err
+ }
+
+ // We want to use the maximum common version of the protocol. We just
+ // need to make sure that it is supported by this IPC implementation.
+ if max < r.Min || max > r.Max {
+ return version.UnknownIPCVersion, NoCompatibleVersionErr
+ }
+ return max, nil
+}
+
+// CheckCompatibility returns an error if the given endpoint is incompatible
+// with this IPC implementation. It returns nil otherwise.
+func (r *Range) CheckCompatibility(remote naming.Endpoint) error {
+ remoteEP, ok := remote.(*inaming.Endpoint)
+ if !ok {
+ return fmt.Errorf("Unrecognized naming.Endpoint type: %T", remote)
+ }
+
+ _, _, err := intersectRanges(r.Min, r.Max,
+ remoteEP.MinIPCVersion, remoteEP.MaxIPCVersion)
+
+ return err
+}
diff --git a/profiles/internal/ipc/version/version_test.go b/profiles/internal/ipc/version/version_test.go
new file mode 100644
index 0000000..95222ec
--- /dev/null
+++ b/profiles/internal/ipc/version/version_test.go
@@ -0,0 +1,115 @@
+package version
+
+import (
+ "testing"
+
+ inaming "v.io/x/ref/profiles/internal/naming"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+)
+
+func TestCommonVersion(t *testing.T) {
+ r := &Range{Min: 1, Max: 3}
+
+ type testCase struct {
+ localMin, localMax version.IPCVersion
+ remoteMin, remoteMax version.IPCVersion
+ expectedVer version.IPCVersion
+ expectedErr error
+ }
+ tests := []testCase{
+ {0, 0, 0, 0, 0, UnknownVersionErr},
+ {0, 1, 2, 3, 0, NoCompatibleVersionErr},
+ {2, 3, 0, 1, 0, NoCompatibleVersionErr},
+ {0, 5, 5, 6, 0, NoCompatibleVersionErr},
+ {0, 2, 2, 4, 2, nil},
+ {0, 2, 1, 3, 2, nil},
+ {1, 3, 1, 3, 3, nil},
+ {3, 3, 3, 3, 3, nil},
+ }
+ for _, tc := range tests {
+ local := &inaming.Endpoint{
+ MinIPCVersion: tc.localMin,
+ MaxIPCVersion: tc.localMax,
+ }
+ remote := &inaming.Endpoint{
+ MinIPCVersion: tc.remoteMin,
+ MaxIPCVersion: tc.remoteMax,
+ }
+ if ver, err := r.CommonVersion(local, remote); ver != tc.expectedVer || err != tc.expectedErr {
+ t.Errorf("Unexpected result for local: %v, remote: %v. Got (%d, %v) wanted (%d, %v)",
+ local, remote, ver, err, tc.expectedVer, tc.expectedErr)
+ }
+ }
+}
+
+func TestProxiedEndpoint(t *testing.T) {
+ type testCase struct {
+ supportMin, supportMax version.IPCVersion
+ proxyMin, proxyMax version.IPCVersion
+ outMin, outMax version.IPCVersion
+ expectError bool
+ }
+ tests := []testCase{
+ {1, 3, 1, 2, 1, 2, false},
+ {1, 3, 3, 5, 3, 3, false},
+ {1, 3, 0, 1, 1, 1, false},
+ {1, 3, 0, 1, 1, 1, false},
+ {0, 0, 0, 0, 0, 0, true},
+ {2, 5, 0, 1, 0, 0, true},
+ {2, 5, 6, 7, 0, 0, true},
+ }
+
+ rid := naming.FixedRoutingID(1)
+ for _, tc := range tests {
+ r := &Range{Min: tc.supportMin, Max: tc.supportMax}
+ proxy := &inaming.Endpoint{
+ MinIPCVersion: tc.proxyMin,
+ MaxIPCVersion: tc.proxyMax,
+ }
+ if out, err := r.ProxiedEndpoint(rid, proxy); err != nil {
+ if !tc.expectError {
+ t.Errorf("Unexpected error for case %+v: %v", tc, err)
+ }
+ } else {
+ if tc.expectError {
+ t.Errorf("Expected Error, but got result for test case %+v", tc)
+ continue
+ }
+ ep := out.(*inaming.Endpoint)
+ if ep.MinIPCVersion != tc.outMin || ep.MaxIPCVersion != tc.outMax {
+ t.Errorf("Unexpected range for case %+v. Got (%d, %d) want (%d, %d)",
+ tc, ep.MinIPCVersion, ep.MaxIPCVersion, tc.outMin, tc.outMax)
+ }
+ }
+ }
+}
+
+func TestCheckCompatibility(t *testing.T) {
+ type testCase struct {
+ supportMin, supportMax version.IPCVersion
+ remoteMin, remoteMax version.IPCVersion
+ expectedError error
+ }
+ tests := []testCase{
+ {0, 0, 0, 0, UnknownVersionErr},
+ {5, 10, 1, 4, NoCompatibleVersionErr},
+ {1, 4, 5, 10, NoCompatibleVersionErr},
+ {1, 10, 2, 9, nil},
+ {3, 8, 1, 4, nil},
+ {3, 8, 7, 9, nil},
+ }
+
+ for _, tc := range tests {
+ r := &Range{Min: tc.supportMin, Max: tc.supportMax}
+ remote := &inaming.Endpoint{
+ MinIPCVersion: tc.remoteMin,
+ MaxIPCVersion: tc.remoteMax,
+ }
+ if err := r.CheckCompatibility(remote); err != tc.expectedError {
+ t.Errorf("Unexpected error for case %+v: got %v, wanted %v",
+ tc, err, tc.expectedError)
+ }
+ }
+}
diff --git a/profiles/internal/lib/bqueue/bqueue.go b/profiles/internal/lib/bqueue/bqueue.go
new file mode 100644
index 0000000..caab5ce
--- /dev/null
+++ b/profiles/internal/lib/bqueue/bqueue.go
@@ -0,0 +1,128 @@
+// Package bqueue implements several kinds of buffer queues, as a N-writer,
+// 1-reader queue. By "buffer," we mean iobuf.Slice values. Each writer has a
+// separate bounded queue to which it writes buffers. The queue also supports
+// flow control.
+//
+// Initialization:
+//
+// // Create a new queue using one of the implementations
+// // (currently only bqueue/drrqueue).
+// q := drrqueue.New()
+//
+// Reader API:
+//
+// // Returns the next buffer in the queue, blocking until there is one
+// // available. Returns with an error if <q> is closed:
+// _, buf, err := q.Get()
+//
+// Writer API:
+//
+// // Allocate a new Writer with the id, priority, and space for N elements.
+// w := q.New(id, priority, N)
+//
+// // Add <buf> to the <w>. Blocks until there is space in the Writer.
+// // Aborts if <cancel> is closed or contains a value.
+// err := w.Put(buf, cancel)
+//
+// w.Release(N) // Make the next N buffers available to q.Get().
+//
+// The q.Release() method is used for rate limiting. Buffers can be added with
+// q.Put(), but they are not passed to q.Get() until they are released.
+package bqueue
+
+import (
+ "errors"
+
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// Priority is an integer priority. Smaller is greater priority.
+//
+// For performance, priorities should be dense and start from 0. Some
+// implementations like drrqueue have use space linear in the max priority.
+type Priority uint // TODO(jyh): Change the dense requirement if we need it.
+
+// ID is the type of Writer identifiers.
+type ID int64
+
+// FlushFunc is the type of flushing functions. See T.Get for more info.
+type FlushFunc func() error
+
+// T specifies a buffer queue. The NewWriter method is used to create new
+// writer queues, and the Get method returns the next buffer to be served.
+type T interface {
+ Close()
+ String() string
+
+ // Find returns the Writer with the specified ID. Returns nil if there is
+ // no such writer.
+ Find(id ID) Writer
+
+ // Get returns the next contents of the queue. Get returns a Writer and an
+ // array of elements dequeued from the Writer. The number of elements
+ // returned depends on the implementation (for example, drrqueue specifies a
+ // cap on how many bytes can be dequeued per Writer per round-robin cycle).
+ // In addition, multiple elements are returned so that iobuf.Coalesce() can
+ // be used to coalesce the contents.
+ //
+ // Get blocks until at least one element can be returned or the queue is
+ // closed. If non-nil, the <flush> function is called just before Get
+ // blocks.
+ //
+ // If a Writer is closed (the Writer's Close() method was called), then Get
+ // returns the Writer with empty contents. The caller should call
+ // writer.Shutdown() to remove the Writer and prevent it from being returned
+ // in subsequent calls.
+ //
+ // It is not safe to call Get() concurrently.
+ Get(flush FlushFunc) (Writer, []*iobuf.Slice, error)
+
+ // NewWriter allocates a new Writer.
+ NewWriter(id ID, p Priority, n int) (Writer, error)
+}
+
+// Writer represents a single writer queue. Writer queues are served
+// according to the policy defined by the container queue T.
+type Writer interface {
+ ID() ID
+
+ // Close closes the Writer, without discarding the contents. All Put
+ // operations currently running may, or may not, add their values to the
+ // Writer. All Put operations that happen-after the Close will fail.
+ Close()
+
+ // Shutdown closes the Writer as in Close and also discards the contents.
+ // If removeWriter is true the writer will be removed from the
+ // associated T's queue entirely, otherwise the now empty writer will
+ // remain and eventually be returned by a T.Get.
+ Shutdown(removeWriter bool)
+
+ // IsClosed returns true if the Writer is closed.
+ IsClosed() bool
+
+ // IsDrained returns true if the Writer is closed and has no data
+ IsDrained() bool
+
+ // Put adds an element to the queue. Put blocks until there is space in
+ // the Writer. The element is not made available to T.Get until it is
+ // released with the Release method. Returns an error if the queue is
+ // closed or the operation is cancelled.
+ Put(buf *iobuf.Slice, cancel <-chan struct{}) error
+
+ // TryPut is like Put, but it is nonblocking.
+ TryPut(buf *iobuf.Slice) error
+
+ // Release allows the next <n> elements to be removed from the Writer and
+ // passed to Get. If <n> is negative, all messages are released and flow
+ // control is no longer used.
+ Release(n int) error
+}
+
+var (
+ ErrBQueueIsClosed = errors.New("bqueue: queue is closed")
+ ErrWriterAlreadyExists = errors.New("bqueue: writer already exists with this identifier")
+ ErrWriterIsClosed = errors.New("bqueue: writer is closed")
+ ErrCantToggleFlowControl = errors.New("bqueue: can't turn on flow control when it is off")
+ ErrCancelled = errors.New("bqueue: operation was canceled")
+ ErrTryAgain = errors.New("bqueue: writer is not ready, try again")
+)
diff --git a/profiles/internal/lib/bqueue/drrqueue/drrqueue.go b/profiles/internal/lib/bqueue/drrqueue/drrqueue.go
new file mode 100644
index 0000000..cf3b82f
--- /dev/null
+++ b/profiles/internal/lib/bqueue/drrqueue/drrqueue.go
@@ -0,0 +1,416 @@
+// Package drrqueue implements a deficit round-robin buffer queue.
+//
+// Efficient Fair Queueing Using Deficit Round-Robin
+// M. Shreedhar and George Varghese
+// IEEE/ACM Transactions on Networking, Vol. 4, No. 3, June 1996
+//
+// The queue supports N-writers and 1-reader queue. By "buffer," we mean []byte
+// blocks.
+//
+// Writers have a priority that takes precedence over the deficit. Writers
+// with greater priority are served first. Deficits are not even updated for
+// lower priorities when higher priority Writers are being served.
+package drrqueue
+
+// LOCKING DISCIPLINE:
+//
+// Each Writer has a lock, and so does T. Locks are always taken in order:
+// Writer.mutex first, then T.mutex. Never take the locks in the opposite
+// order.
+
+import (
+ "fmt"
+ "io"
+ "sync"
+
+ "v.io/x/ref/lib/deque"
+ vsync "v.io/x/ref/lib/sync"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+// T defines the type of round-robin buffer queues. The queue has multiple
+// input Writer queues that are served according to the deficit round-robin
+// policy.
+type T struct {
+ mutex sync.Mutex
+ cond *sync.Cond
+
+ // active contains an array of active Writers, indexed by Priority.
+ active [][]*writer
+
+ // writers contains all of the Writers.
+ writers map[bqueue.ID]*writer
+
+ // quantum is the amount of data that each Writer can send per round-robin cycle.
+ quantum int
+
+ isClosed bool
+}
+
+// Writer is a single bounded input queue supporting a Put operation.
+type writer struct {
+ id bqueue.ID
+ q *T
+ priority bqueue.Priority
+
+ // free contains the number of free bytes in the writer queue.
+ //
+ // INVARIANT: free + size == size of the queue (a constant). This can't be
+ // computed, because <free> is a semaphore, but it is true nonetheless.
+ free *vsync.Semaphore
+
+ // The following are all protected by the mutex.
+ mutex sync.Mutex
+ isClosed bool
+ contents deque.T
+ size int // Total number of bytes in the queue.
+ released int // Number of bytes that can be dequeued (negative for unlimited).
+
+ // The following are protected by q.mutex. mutex is not required.
+ isActive activeMode
+ deficit int
+}
+
+// activeMode has three states:
+// busy: The Writer is being updated by Get. It is not in the active list.
+// idle: The Writer is inactive. It is not in the active list.
+// active: The Writer is in the active list.
+type activeMode int
+
+const (
+ busy activeMode = iota
+ idle
+ active
+)
+
+// ID returns the numeric identifier for the queue.
+func (w *writer) ID() bqueue.ID {
+ return w.id
+}
+
+// Close closes the writer, without discarding the contents. All Put operations
+// currently running may, or may not, add their values to the queue. All Put
+// operations that happen-after the Close will fail.
+func (w *writer) Close() {
+ w.mutex.Lock()
+ w.isClosed = true
+ w.updateStateLocked(false, 0)
+ w.mutex.Unlock()
+ w.free.Close()
+}
+
+// Shutdown closes the writer as in Close and also discards the contents.
+// If removeWriter is true the writer will be removed from the
+// associated T's queue entirely, otherwise the now empty writer will
+// remain and eventually be returned by a T.Get.
+func (w *writer) Shutdown(removeWriter bool) {
+ w.mutex.Lock()
+
+ w.isClosed = true
+ if !removeWriter {
+ w.contents.Clear()
+ w.size = 0
+ w.updateStateLocked(false, 0)
+ }
+
+ w.mutex.Unlock()
+
+ if removeWriter {
+ w.q.removeWriter(w)
+ }
+ w.free.Close()
+}
+
+// IsClosed returns true iff the Writer is closed.
+func (w *writer) IsClosed() bool {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ return w.isClosed
+}
+
+// IsDrained returns true iff the Writer is closed and empty.
+func (w *writer) IsDrained() bool {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ return w.isClosed && w.size == 0
+}
+
+// Put adds an element to the queue. Put blocks until there is space in the
+// Writer. The element is not made available to T.Get until it is released with
+// the Release method. Returns an error if the queue is closed or the operation
+// is cancelled.
+func (w *writer) Put(buf *iobuf.Slice, cancel <-chan struct{}) error {
+ // Block until there is space in the Writer.
+ if err := w.free.DecN(uint(buf.Size()), cancel); err != nil {
+ return err
+ }
+ return w.putContents(buf)
+}
+
+// TryPut is like Put, but it is nonblocking.
+func (w *writer) TryPut(buf *iobuf.Slice) error {
+ if err := w.free.TryDecN(uint(buf.Size())); err != nil {
+ return err
+ }
+ return w.putContents(buf)
+}
+
+func (w *writer) putContents(buf *iobuf.Slice) error {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ if w.isClosed {
+ return bqueue.ErrWriterIsClosed
+ }
+ w.contents.PushBack(buf)
+ w.size += buf.Size()
+ w.updateStateLocked(false, 0)
+ return nil
+}
+
+// Release allows the next <bytes> to be removed from the queue and passed to
+// Get. If <bytes> is negative, all messages are released and flow control is
+// no longer used.
+func (w *writer) Release(bytes int) error {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ if w.released < 0 && bytes >= 0 {
+ return bqueue.ErrCantToggleFlowControl
+ }
+ if bytes < 0 {
+ w.released = -1
+ } else {
+ w.released += bytes
+ }
+ w.updateStateLocked(false, 0)
+ return nil
+}
+
+// getContents returns as much data as possible, up to the deficit, and then
+// updates the state.
+func (w *writer) getContents(deficit int) ([]*iobuf.Slice, bool) {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+
+ // Collect the contents into bufs
+ if w.released >= 0 && deficit > w.released {
+ deficit = w.released
+ }
+
+ // Writer is closed.
+ if w.size == 0 && w.isClosed {
+ return nil, true
+ }
+
+ var consumed int
+ var bufs []*iobuf.Slice
+ for w.contents.Size() != 0 {
+ b := w.contents.Front().(*iobuf.Slice)
+ size := consumed + b.Size()
+ if size > deficit {
+ break
+ }
+ consumed = size
+ bufs = append(bufs, b)
+ w.contents.PopFront()
+ }
+
+ // Update counters by number of bytes consumed.
+ w.size -= consumed
+ // Decrement released, but only if it is nonnegative.
+ if w.released >= 0 {
+ w.released -= consumed
+ if w.released < 0 {
+ panic("released is negative")
+ }
+ }
+ w.updateStateLocked(true, consumed)
+
+ return bufs, bufs != nil
+}
+
+// updateStateLocked updates the ready state of the Writer.
+//
+// REQUIRES: w.mutex is locked.
+func (w *writer) updateStateLocked(overrideBusy bool, consumed int) {
+ w.free.IncN(uint(consumed))
+
+ // The w.isActive state does not depend on the deficit.
+ isActive := (w.size == 0 && w.isClosed) ||
+ (w.size != 0 && (w.released < 0 || w.contents.Front().(*iobuf.Slice).Size() <= w.released))
+ w.q.updateWriterState(w, overrideBusy, isActive, consumed)
+}
+
+// New returns a new T. Each writer is allowed to send quantum bytes per round-robin cycle.
+func New(quantum int) bqueue.T {
+ q := &T{writers: make(map[bqueue.ID]*writer), quantum: quantum}
+ q.cond = sync.NewCond(&q.mutex)
+ return q
+}
+
+// Close closes the queue.
+func (q *T) Close() {
+ q.mutex.Lock()
+ writers := q.writers
+ q.isClosed = true
+ q.writers = make(map[bqueue.ID]*writer)
+ for i := 0; i != len(q.active); i++ {
+ q.active[i] = nil
+ }
+ q.cond.Signal()
+ q.mutex.Unlock()
+
+ // Close the queues outside the q.mutex lock to preserve lock order.
+ for _, w := range writers {
+ w.Shutdown(true)
+ }
+}
+
+// NewWriter allocates a new Writer.
+func (q *T) NewWriter(id bqueue.ID, p bqueue.Priority, bytes int) (bqueue.Writer, error) {
+ w := &writer{
+ id: id,
+ priority: p,
+ q: q,
+ free: vsync.NewSemaphore(),
+ isActive: idle,
+ }
+ w.free.IncN(uint(bytes))
+
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ if q.isClosed {
+ return nil, bqueue.ErrBQueueIsClosed
+ }
+ q.addPriorityLocked(p)
+ if _, ok := q.writers[w.id]; ok {
+ return nil, bqueue.ErrWriterAlreadyExists
+ }
+ q.writers[w.id] = w
+ return w, nil
+}
+
+// String provides a string representation of the queue.
+func (q *T) String() string {
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ s := "q{"
+ for _, w := range q.writers {
+ s += fmt.Sprintf("Writer{id: %d, size: %d, released: %d}, ", w.id, w.size, w.released)
+ }
+ s += "}"
+ return s
+}
+
+// Find returns the queue with the specified ID.
+func (q *T) Find(id bqueue.ID) bqueue.Writer {
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ w, ok := q.writers[id]
+ if !ok {
+ // Don't return w; that would return a non-nil Writer interface
+ // containing nil.
+ return nil
+ }
+ return w
+}
+
+// Get returns the next element from a queue. Get blocks until a buffer is
+// available or the queue is closed.
+func (q *T) Get(flush bqueue.FlushFunc) (bqueue.Writer, []*iobuf.Slice, error) {
+ for {
+ w, deficit, err := q.nextWriter(flush)
+ if w == nil {
+ return nil, nil, err
+ }
+ bufs, ok := w.getContents(deficit)
+ if ok {
+ return w, bufs, nil
+ }
+ }
+}
+
+// nextWriter walks through the pending buffers and returns the first active
+// Writer. The writer is removed from the active queue and made 'busy' so that
+// it will not be re-added to the active queue.
+func (q *T) nextWriter(flush bqueue.FlushFunc) (*writer, int, error) {
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ for {
+ if q.isClosed {
+ return nil, 0, io.EOF
+ }
+ for p, writers := range q.active {
+ if len(writers) != 0 {
+ w := writers[0]
+ w.isActive = busy
+ w.deficit += q.quantum
+ q.active[p] = writers[1:]
+ return w, w.deficit, nil
+ }
+ }
+ if flush != nil {
+ flush()
+ }
+ q.cond.Wait()
+ }
+}
+
+// addPriorityLocked adds a ready queue with the specified priority level.
+//
+// REQUIRES: q.mutex is locked.
+func (q *T) addPriorityLocked(p bqueue.Priority) {
+ if int(p) >= len(q.active) {
+ newActive := make([][]*writer, int(p)+1)
+ copy(newActive, q.active)
+ q.active = newActive
+ }
+}
+
+// removeWriter removes the queue from the q.
+//
+// NOTE: does not require that w.mutex is locked.
+func (q *T) removeWriter(w *writer) {
+ q.mutex.Lock()
+ if w.isActive == active {
+ // Remove the writer from the active queue.
+ active := q.active[w.priority]
+ for i, w2 := range active {
+ if w2 == w {
+ copy(active[i:], active[i+1:])
+ q.active[w.priority] = active[:len(active)-1]
+ break
+ }
+ }
+ }
+ w.isActive = idle
+ delete(q.writers, w.id)
+ q.mutex.Unlock()
+}
+
+// updateWriterState updates the active state of the queue.
+//
+// REQUIRES: w.mutex is locked.
+func (q *T) updateWriterState(w *writer, overrideBusy bool, isActive bool, consumed int) {
+ q.mutex.Lock()
+ if isActive {
+ if w.isActive == idle || w.isActive == busy && overrideBusy {
+ q.active[w.priority] = append(q.active[w.priority], w)
+ w.isActive = active
+ w.deficit -= consumed
+ if w.deficit < 0 {
+ panic("deficit is negative")
+ }
+ q.cond.Signal()
+ }
+ } else {
+ if w.isActive == active {
+ panic("Writer is active when it should not be")
+ }
+ if overrideBusy {
+ w.isActive = idle
+ }
+ w.deficit = 0
+ }
+ q.mutex.Unlock()
+}
diff --git a/profiles/internal/lib/bqueue/drrqueue/drrqueue_test.go b/profiles/internal/lib/bqueue/drrqueue/drrqueue_test.go
new file mode 100644
index 0000000..d928a6b
--- /dev/null
+++ b/profiles/internal/lib/bqueue/drrqueue/drrqueue_test.go
@@ -0,0 +1,264 @@
+package drrqueue
+
+import (
+ "log"
+ "runtime"
+ "testing"
+
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+)
+
+const (
+ testQuantum = 1 << 14 // 16K
+ iobufSize = 1 << 16 // 64K
+)
+
+// concat concatenates the buffers into a string.
+func concat(bufs []*iobuf.Slice) string {
+ buf := []byte{}
+ for _, b := range bufs {
+ buf = append(buf, b.Contents...)
+ b.Release()
+ }
+ return string(buf)
+}
+
+// mkbufs makes a iobuf.Slice from a string.
+func mkbufs(s string) *iobuf.Slice {
+ return iobuf.NewSlice([]byte(s))
+}
+
+// makeBuffer makes a byte buffer filled with the initial char.
+func makeBuffer(size int, c byte) string {
+ b := make([]byte, size)
+ for i := 0; i != size; i++ {
+ b[i] = c
+ }
+ return string(b)
+}
+
+// A "reader" copies data from the q to a string channel.
+func startReader(q bqueue.T) chan string {
+ c := make(chan string)
+ go func() {
+ for {
+ _, bufs, err := q.Get(nil)
+ if err != nil {
+ log.Printf("Reader: %s", err)
+ break
+ }
+ c <- concat(bufs)
+ }
+ c <- "DONE"
+ }()
+ return c
+}
+
+// expectedGet compares the sequence returned from q.Get() against a sequence
+// of expected strings.
+func expectGet(t *testing.T, q bqueue.T, strings []string) {
+ _, file, line, _ := runtime.Caller(1)
+ for _, s1 := range strings {
+ _, buf, err := q.Get(nil)
+ if err != nil {
+ t.Errorf("%s(%d): Unexpected error: %v", file, line, err)
+ break
+ }
+ s2 := concat(buf)
+ if s2 != s1 {
+ t.Errorf("%s(%d): Expected %q, but received %q", file, line, s1, s2)
+ }
+ }
+}
+
+// TestSimple tests a Put/Release/Get sequence.
+func TestSimple(t *testing.T) {
+ q := New(testQuantum)
+ w, _ := q.NewWriter(0, 0, 5)
+ log.Printf("PutV")
+ w.Put(mkbufs("Hello"), nil)
+ log.Printf("Release")
+ w.Release(5)
+ log.Printf("Get")
+ w2, buf, err := q.Get(nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %s", err)
+ }
+ s := concat(buf)
+ if s != "Hello" {
+ t.Errorf("Expected 'Hello', received %q", s)
+ }
+ if w2 != w {
+ t.Errorf("Writer mistmatch")
+ }
+}
+
+func TestShutdownWithoutRemove(t *testing.T) {
+ q := New(testQuantum)
+ w1, _ := q.NewWriter(0, 0, 100)
+ w2, _ := q.NewWriter(1, 1, 100)
+
+ w1.Put(mkbufs("1_1"), nil)
+ w1.Put(mkbufs("1_2"), nil)
+ w2.Put(mkbufs("2_1"), nil)
+ w2.Put(mkbufs("2_2"), nil)
+
+ w1.Release(3)
+ w2.Release(3)
+
+ w, buf, err := q.Get(nil)
+ if s := concat(buf); err != nil || w.ID() != w1.ID() || s != "1_1" {
+ t.Errorf("Expected '1_1' from 0 with nil error, found %s from %d with %v", s, w.ID(), err)
+ }
+
+ w1.Shutdown(false)
+
+ w, buf, err = q.Get(nil)
+ if s := concat(buf); err != nil || w.ID() != w1.ID() || s != "" {
+ t.Errorf("Expected '' from 0 with nil error, found %s from %d with %v", s, w.ID(), err)
+ }
+
+ // Now we have to remove the writer from q.
+ w1.Shutdown(true)
+
+ w, buf, err = q.Get(nil)
+ if s := concat(buf); err != nil || w.ID() != w2.ID() || s != "2_1" {
+ t.Errorf("Expected '2_1' from 1 with nil error, found %s from %d with %v", s, w.ID(), err)
+ }
+}
+
+// TestRelease tests whether data is released in Release() order.
+func TestRelease(t *testing.T) {
+ q := New(testQuantum)
+ c := startReader(q)
+ w1, _ := q.NewWriter(0, 0, 10)
+ w2, _ := q.NewWriter(1, 0, 10)
+ w1.Put(mkbufs("A1"), nil)
+ w1.Put(mkbufs("A2"), nil)
+ w2.Put(mkbufs("B1"), nil)
+ w2.Put(mkbufs("B2"), nil)
+ select {
+ case s := <-c:
+ t.Errorf("Unexpected Get: %q", s)
+ default:
+ }
+
+ w2.Release(1)
+ select {
+ case s := <-c:
+ t.Errorf("Expected no release, but received %q", s)
+ default:
+ }
+
+ w2.Release(1)
+ s := <-c
+ if s != "B1" {
+ t.Errorf("Expected 'B1', but received %q", s)
+ }
+
+ w1.Release(4)
+ s = <-c
+ if s != "A1A2" {
+ t.Errorf("Expected 'A1', but received %q", s)
+ }
+
+ w1.Release(2)
+ select {
+ case s := <-c:
+ t.Errorf("Unexpected Get: %q", s)
+ default:
+ }
+
+ w1.Put(mkbufs("A3"), nil)
+ s = <-c
+ if s != "A3" {
+ t.Errorf("Expected 'A3', but received %q", s)
+ }
+
+ w2.Release(2)
+ s = <-c
+ if s != "B2" {
+ t.Errorf("Expected 'B2', but received %q", s)
+ }
+
+ select {
+ case s := <-c:
+ t.Errorf("Unexpected Get: %q", s)
+ default:
+ }
+
+ q.Close()
+ s = <-c
+ if s != "DONE" {
+ t.Errorf("Expected 'DONE', but received %q", s)
+ }
+}
+
+// TestPriority tests the priority.
+func TestPriority(t *testing.T) {
+ q := New(testQuantum)
+ w1, _ := q.NewWriter(0, 1, 100)
+ w2, _ := q.NewWriter(1, 0, 100)
+ w1.Release(100)
+ w2.Release(100)
+
+ w1.Put(mkbufs("a"), nil)
+ w1.Put(mkbufs("b"), nil)
+ w2.Put(mkbufs("c"), nil)
+ w2.Put(mkbufs("d"), nil)
+ expectGet(t, q, []string{"cd", "ab"})
+
+ w1.Put(mkbufs("a"), nil)
+ w1.Put(mkbufs("b"), nil)
+ w2.Put(mkbufs("c"), nil)
+ w2.Put(mkbufs("d"), nil)
+ expectGet(t, q, []string{"cd", "ab"})
+}
+
+// TestRoundRobin tests the round robin policy.
+func TestRoundRobin(t *testing.T) {
+ q := New(testQuantum)
+ w1, _ := q.NewWriter(0, 0, 100)
+ w2, _ := q.NewWriter(1, 0, 100)
+ w1.Release(100)
+ w2.Release(100)
+
+ w1.Put(mkbufs("a"), nil)
+ w1.Put(mkbufs("b"), nil)
+ w2.Put(mkbufs("c"), nil)
+ w2.Put(mkbufs("d"), nil)
+ expectGet(t, q, []string{"ab", "cd"})
+
+ w2.Put(mkbufs("a"), nil)
+ w1.Put(mkbufs("b"), nil)
+ w2.Put(mkbufs("c"), nil)
+ w1.Put(mkbufs("d"), nil)
+ w1.Put(mkbufs("e"), nil)
+ expectGet(t, q, []string{"ac", "bde"})
+}
+
+// TestDeficit tests the deficit counter.
+func TestDeficit(t *testing.T) {
+ q := New(testQuantum)
+ w1, _ := q.NewWriter(0, 0, testQuantum*10)
+ w2, _ := q.NewWriter(1, 0, testQuantum*10)
+ w1.Release(-1)
+ w2.Release(-1)
+
+ b1a := makeBuffer(2*testQuantum, '1')
+ b1b := makeBuffer(2*testQuantum, '2')
+ b2a := makeBuffer(testQuantum, '3')
+ b2b := makeBuffer(testQuantum, '4')
+ b2c := makeBuffer(testQuantum, '5')
+ b2d := makeBuffer(testQuantum, '6')
+ b2e := makeBuffer(testQuantum, '7')
+ w1.Put(mkbufs(b1a), nil)
+ w1.Put(mkbufs(b1b), nil)
+ w2.Put(mkbufs(b2a), nil)
+ w2.Put(mkbufs(b2b), nil)
+ w2.Put(mkbufs(b2c), nil)
+ w2.Put(mkbufs(b2d), nil)
+ w2.Put(mkbufs(b2e), nil)
+ expectGet(t, q, []string{b2a, b1a, b2b, b2c, b1b, b2d, b2e})
+}
diff --git a/profiles/internal/lib/dependency/dependency.go b/profiles/internal/lib/dependency/dependency.go
new file mode 100644
index 0000000..b777bef
--- /dev/null
+++ b/profiles/internal/lib/dependency/dependency.go
@@ -0,0 +1,123 @@
+// Package dependency keeps track of a dependency graph.
+// You add edges to the graph by specifying an object and the objects it depends on.
+// You can then call FinsihAndWait when the object is finished to wait until
+// all the dependents are also finished.
+package dependency
+
+import (
+ "fmt"
+ "sync"
+)
+
+var NotFoundError = fmt.Errorf(
+ "Attempting to create an object whose dependency has already been terminated.")
+
+// Every object in a Graph depends on the all key. We can wait on this key
+// to know when all objects have been closed.
+type all struct{}
+
+type node struct {
+ dependents int
+ cond *sync.Cond
+ dependsOn []*node
+}
+
+// Graph keeps track of a number of objects and their dependents.
+// Typical usage looks like:
+//
+// g := NewGraph()
+//
+// // Instruct the graph that A depends on B and C.
+// if err := g.Depend(A, B, C); err != nil {
+// // Oops, B or C is already terminating, clean up A immediately.
+// }
+// // D depends on A (You should check the error as above).
+// g.Depend(D, A)
+// ...
+// // At some point we want to mark A as closed to new users and
+// // wait for all the objects that depend on it to finish
+// // (in this case D).
+// finish := g.CloseAndWait(A)
+// // Now we know D (and any other depdendents) are finished, so we
+// // can clean up A.
+// A.CleanUp()
+// // Now notify the objects A depended on that they have one less
+// // dependent.
+// finish()
+type Graph struct {
+ mu sync.Mutex
+ nodes map[interface{}]*node
+}
+
+// NewGraph returns a new Graph ready to be used.
+func NewGraph() *Graph {
+ graph := &Graph{nodes: map[interface{}]*node{}}
+ graph.nodes[all{}] = &node{cond: sync.NewCond(&graph.mu)}
+ return graph
+}
+
+// Depend adds obj as a node in the dependency graph and notes it's
+// dependencies on all the objects in 'on'. If any of the
+// dependencies are already closed (or are not in the graph at all)
+// then Depend returns NotFoundError and does not add any edges.
+func (g *Graph) Depend(obj interface{}, on ...interface{}) error {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+
+ nodes := make([]*node, len(on)+1)
+ for i := range on {
+ if nodes[i] = g.nodes[on[i]]; nodes[i] == nil {
+ return NotFoundError
+ }
+ }
+ if alln := g.nodes[all{}]; alln == nil {
+ return NotFoundError
+ } else {
+ nodes[len(on)] = alln
+ }
+ for _, n := range nodes {
+ n.dependents++
+ }
+ if n := g.nodes[obj]; n != nil {
+ n.dependsOn = append(n.dependsOn, nodes...)
+ } else {
+ g.nodes[obj] = &node{
+ cond: sync.NewCond(&g.mu),
+ dependsOn: nodes,
+ }
+ }
+ return nil
+}
+
+// CloseAndWait closes an object to new dependents and waits for all
+// dependants to complete. When this function returns you can safely
+// clean up Obj knowing that no users remain. Once obj is finished
+// with the objects it depends on, you should call the returned function.
+func (g *Graph) CloseAndWait(obj interface{}) func() {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ n := g.nodes[obj]
+ if n == nil {
+ return func() {}
+ }
+ delete(g.nodes, obj)
+ for n.dependents > 0 {
+ n.cond.Wait()
+ }
+ return func() {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ for _, dn := range n.dependsOn {
+ if dn.dependents--; dn.dependents == 0 {
+ dn.cond.Broadcast()
+ }
+ }
+ }
+}
+
+// CloseAndWaitForAll closes the graph. No new objects or dependencies can be added
+// and this function returns only after all existing objects have called
+// Finish on their finishers.
+func (g *Graph) CloseAndWaitForAll() {
+ g.CloseAndWait(all{})
+}
diff --git a/profiles/internal/lib/dependency/dependency_test.go b/profiles/internal/lib/dependency/dependency_test.go
new file mode 100644
index 0000000..6ac6940
--- /dev/null
+++ b/profiles/internal/lib/dependency/dependency_test.go
@@ -0,0 +1,94 @@
+package dependency
+
+import (
+ "testing"
+ "time"
+)
+
+var nextId = 0
+
+type Dep struct {
+ deps []*Dep
+ stopped bool
+ id int
+}
+
+func NewDep(deps ...*Dep) *Dep {
+ d := &Dep{deps: deps, id: nextId}
+ nextId++
+ return d
+}
+
+func (d *Dep) Use(t *testing.T, by *Dep) {
+ if d.stopped {
+ t.Errorf("Object %d using %d after stop.", by.id, d.id)
+ }
+}
+
+func (d *Dep) Stop(t *testing.T) {
+ d.Use(t, d)
+ d.stopped = true
+ for _, dd := range d.deps {
+ dd.Use(t, d)
+ }
+}
+
+func TestGraph(t *testing.T) {
+ a := NewDep()
+ b, c := NewDep(a), NewDep(a)
+ d := NewDep(c)
+
+ g := NewGraph()
+ if err := g.Depend(a); err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if err := g.Depend(b, a); err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if err := g.Depend(c, a); err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if err := g.Depend(d, c); err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ alldone := make(chan struct{})
+ go func() {
+ g.CloseAndWaitForAll()
+ close(alldone)
+ }()
+
+ // Close d, which is a leaf.
+ finish := g.CloseAndWait(d)
+ d.Stop(t)
+ finish()
+
+ // Set a to close and wait which should wait for b and c.
+ done := make(chan struct{})
+ go func() {
+ finish := g.CloseAndWait(a)
+ a.Stop(t)
+ finish()
+ close(done)
+ }()
+
+ // done and alldone shouldn't be finished yet.
+ select {
+ case <-time.After(time.Second):
+ case <-done:
+ t.Errorf("done is finished before it's time")
+ case <-alldone:
+ t.Errorf("alldone is finished before it's time")
+ }
+
+ // Now close b and c.
+ finish = g.CloseAndWait(b)
+ b.Stop(t)
+ finish()
+ finish = g.CloseAndWait(c)
+ c.Stop(t)
+ finish()
+
+ <-done
+ <-alldone
+}
diff --git a/profiles/internal/lib/iobuf/allocator.go b/profiles/internal/lib/iobuf/allocator.go
new file mode 100644
index 0000000..b26a4ef
--- /dev/null
+++ b/profiles/internal/lib/iobuf/allocator.go
@@ -0,0 +1,74 @@
+package iobuf
+
+import "v.io/x/lib/vlog"
+
+// Allocator is an allocator for Slices that tries to allocate
+// contiguously. That is, sequential allocations will tend to be contiguous,
+// which means that Coalesce() will usually be able to perform coalescing
+// (without copying the data).
+//
+// calloc := iobuf.Allocator(...)
+// slice1 := calloc.Alloc(10)
+// slice2 := calloc.Alloc(20)
+// slices := iobuf.Coalesce([]*iobuf.Slice{slice1, slice2})
+// // slices should contain 1 element with length 30.
+type Allocator struct {
+ pool *Pool
+ index uint
+ reserve uint
+ iobuf *buf
+}
+
+// NewAllocator returns a new Slice allocator.
+//
+// <reserve> is the number of spare bytes to reserve at the beginning of each
+// contiguous iobuf. This can be used to reverse space for a header, for
+// example.
+func NewAllocator(pool *Pool, reserve uint) *Allocator {
+ return &Allocator{pool: pool, reserve: reserve, index: reserve}
+}
+
+// Release releases the allocator.
+func (a *Allocator) Release() {
+ if a.iobuf != nil {
+ a.iobuf.release()
+ a.iobuf = nil
+ }
+ a.pool = nil
+}
+
+// Alloc allocates a new Slice.
+func (a *Allocator) Alloc(bytes uint) *Slice {
+ if a.iobuf == nil {
+ if a.pool == nil {
+ vlog.Info("iobuf.Allocator has already been closed")
+ return nil
+ }
+ a.iobuf = a.pool.alloc(a.reserve + bytes)
+ }
+ if uint(len(a.iobuf.Contents))-a.index < bytes {
+ a.allocIOBUF(bytes)
+ }
+ base := a.index
+ free := base
+ if free == a.reserve {
+ free = 0
+ }
+ a.index += uint(bytes)
+ return a.iobuf.slice(free, base, a.index)
+}
+
+// Copy allocates a Slice and copies the buf into it.
+func (a *Allocator) Copy(buf []byte) *Slice {
+ slice := a.Alloc(uint(len(buf)))
+ copy(slice.Contents, buf)
+ return slice
+}
+
+// allocIOBUF replaces the current iobuf with a new one that has at least
+// <bytes> of storage.
+func (a *Allocator) allocIOBUF(bytes uint) {
+ a.iobuf.release()
+ a.iobuf = a.pool.alloc(bytes + a.reserve)
+ a.index = a.reserve
+}
diff --git a/profiles/internal/lib/iobuf/allocator_test.go b/profiles/internal/lib/iobuf/allocator_test.go
new file mode 100644
index 0000000..6bdce26
--- /dev/null
+++ b/profiles/internal/lib/iobuf/allocator_test.go
@@ -0,0 +1,55 @@
+package iobuf
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestAllocatorSmall(t *testing.T) {
+ pool := NewPool(iobufSize)
+ salloc := NewAllocator(pool, 0)
+ const count = 100
+ var slices [count]*Slice
+ for i := 0; i != count; i++ {
+ slices[i] = salloc.Copy([]byte(fmt.Sprintf("slice[%d]", i)))
+ }
+ for i := 0; i != count; i++ {
+ expectEq(t, fmt.Sprintf("slice[%d]", i), string(slices[i].Contents))
+ slices[i].Release()
+ }
+ salloc.Release()
+}
+
+func TestAllocatorLarge(t *testing.T) {
+ pool := NewPool(iobufSize)
+ salloc := NewAllocator(pool, 0)
+ const count = 100
+ var slices [count]*Slice
+ for i := 0; i != count; i++ {
+ slices[i] = salloc.Alloc(10000)
+ copy(slices[i].Contents, []byte(fmt.Sprintf("slice[%d]", i)))
+ }
+ for i := 0; i != count; i++ {
+ expected := fmt.Sprintf("slice[%d]", i)
+ expectEq(t, expected, string(slices[i].Contents[0:len(expected)]))
+ slices[i].Release()
+ }
+ salloc.Release()
+}
+
+// Check that the Allocator is unusable after it is closed.
+func TestAllocatorClose(t *testing.T) {
+ pool := NewPool(iobufSize)
+ alloc := NewAllocator(pool, 0)
+ slice := alloc.Alloc(10)
+ if slice == nil {
+ t.Fatalf("slice should not be nil")
+ }
+ slice.Release()
+ alloc.Release()
+ slice = alloc.Alloc(10)
+ if slice != nil {
+ t.Errorf("slice should be nil")
+ }
+ pool.Close()
+}
diff --git a/profiles/internal/lib/iobuf/iobuf.go b/profiles/internal/lib/iobuf/iobuf.go
new file mode 100644
index 0000000..ece8f49
--- /dev/null
+++ b/profiles/internal/lib/iobuf/iobuf.go
@@ -0,0 +1,141 @@
+// Package iobuf performs explicit memory management for data buffers used
+// to perform network IO. The intent is that it is more efficient to perform
+// manual allocation than to rely on the Go garbage collector to manage large
+// chunks of frequently recycled memory.
+//
+// In this model, a Pool is a collection of contiguous memory area (of type
+// <buf>) used for memory allocation. The bufs are subdivided into slices of
+// type Slice.
+//
+// Pool: a source of memory areas.
+// Slice: a contiguous region of allocated memory.
+// Allocator: a Slice allocator.
+// Reader: an IO reader that reads into Slices.
+//
+// There is an analogy with sbrk/malloc: the Pool is the source of memory (like
+// sbrk), and the Allocator hands out small areas (like malloc). Allocations
+// are mostly sequential within a buf, allowing sequentially-allocated Slices to
+// be coalesced at some later point.
+//
+// For efficiency, Slice values hold reference counts to the underlying buf.
+// When all references are to a buf released, the buf is recycled into its Pool.
+// This does not happen automatically. The caller is responsible for calling
+// slice.Release() when finished using a slice.
+package iobuf
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "v.io/x/lib/vlog"
+)
+
+// A iobuf is a storage space for memory read from the network. The data should
+// be read into the Contents field, then sliced up into Slice slices that
+// correspond to header, payload, etc.
+//
+// iobufs are reference counted. The count includes one reference for the iobuf
+// itself, plus one for each Slice.
+type buf struct {
+ refcount int32 // Use atomic operations.
+ Contents []byte
+ pool *Pool
+}
+
+// Pool manages a pool of iobufs. The size of the pool is not fixed,
+// it can grow without bound.
+//
+// The implementation here allocates a new iobuf whenever there is an allocation
+// request and the pool is empty. For iobufs to be recycled, explicit Release()
+// calls are required. However, if these Release() calls are missing, the
+// program will continue to function, recycling the buffers through the gc.
+// Therefore, if you forget Release() calls, you will be putting pressure on gc
+// to recycle the iobufs. You can examine the <allocated> field to check how
+// many iobufs have been allocated during the lifetime of the Pool.
+type Pool struct {
+ minSize uint
+ mutex sync.Mutex
+ freelist []*buf
+ allocated uint64 // Total number of iobufs allocated.
+}
+
+const defaultMinSize = 1 << 12
+
+// NewPool creates a new pool. The pool will allocate iobufs in multiples of minSize.
+// If minSize is zero, the default value (4K) will be used.
+func NewPool(minSize uint) *Pool {
+ if minSize == 0 {
+ minSize = defaultMinSize
+ }
+ return &Pool{minSize: minSize, freelist: []*buf{}}
+}
+
+// Close shuts down the Pool.
+func (pool *Pool) Close() {
+ pool.mutex.Lock()
+ pool.freelist = nil
+ pool.mutex.Unlock()
+}
+
+// alloc allocates a new iobuf. The returned iobuf has at least <size> bytes of free space.
+func (pool *Pool) alloc(size uint) *buf {
+ if size == 0 {
+ size = pool.minSize
+ } else if r := size % pool.minSize; r > 0 {
+ size += pool.minSize - r
+ }
+
+ pool.mutex.Lock()
+ defer pool.mutex.Unlock()
+ if pool.freelist == nil {
+ vlog.Info("iobuf.Pool is closed")
+ return nil
+ }
+
+ // Search for an iobuf that is large enough.
+ for i := len(pool.freelist) - 1; i >= 0; i-- {
+ iobuf := pool.freelist[i]
+ if uint(len(iobuf.Contents)) >= size {
+ pool.freelist[i] = pool.freelist[len(pool.freelist)-1]
+ pool.freelist = pool.freelist[:len(pool.freelist)-1]
+ atomic.AddInt32(&iobuf.refcount, 1)
+ return iobuf
+ }
+ }
+
+ // All the free buffers are too small. Allocate a fresh one.
+ pool.allocated++
+ iobuf := &buf{refcount: 1, Contents: make([]byte, size), pool: pool}
+ return iobuf
+}
+
+// release recycles an iobuf that has a zero refcount.
+func (pool *Pool) release(iobuf *buf) {
+ pool.mutex.Lock()
+ defer pool.mutex.Unlock()
+ // TODO(jyh): Ideally we would like to overwrite the iobuf so that if there
+ // are slices still referring to it (due to a double-Release), it will be
+ // more likely that the problem is noticed. Implement this if we define a
+ // "debug mode."
+ if pool.freelist != nil {
+ pool.freelist = append(pool.freelist, iobuf)
+ }
+}
+
+// release decrements the iobuf's refcount, recycling the iobuf when the count
+// reaches zero.
+func (iobuf *buf) release() {
+ refcount := atomic.AddInt32(&iobuf.refcount, -1)
+ if refcount < 0 {
+ vlog.Infof("Refcount is negative: %d. This is a bug in the program.", refcount)
+ }
+ if refcount == 0 {
+ iobuf.pool.release(iobuf)
+ }
+}
+
+// slice creates an Slice that refers to a slice of the iobuf contents.
+func (iobuf *buf) slice(free, base, bound uint) *Slice {
+ atomic.AddInt32(&iobuf.refcount, 1)
+ return &Slice{iobuf: iobuf, free: free, base: base, Contents: iobuf.Contents[base:bound]}
+}
diff --git a/profiles/internal/lib/iobuf/iobuf_test.go b/profiles/internal/lib/iobuf/iobuf_test.go
new file mode 100644
index 0000000..54553ef
--- /dev/null
+++ b/profiles/internal/lib/iobuf/iobuf_test.go
@@ -0,0 +1,110 @@
+package iobuf
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+const (
+ iobufSize = 1 << 16
+)
+
+func expectEq(t *testing.T, x, y interface{}) {
+ if x != y {
+ _, file, line, _ := runtime.Caller(1)
+ t.Errorf("%s(%d): expected %v, actual %v", file, line, x, y)
+ }
+}
+
+// Test basic reference counting.
+func TestFreelist(t *testing.T) {
+ pool := NewPool(iobufSize)
+ iobuf := pool.alloc(0)
+ expectEq(t, iobufSize, len(iobuf.Contents))
+ expectEq(t, uint64(1), pool.allocated)
+ expectEq(t, 0, len(pool.freelist))
+ iobuf.release()
+
+ expectEq(t, 1, len(pool.freelist))
+ iobuf = pool.alloc(0)
+ expectEq(t, 0, len(pool.freelist))
+ pool.alloc(0).release()
+ iobuf.release()
+ expectEq(t, 2, len(pool.freelist))
+}
+
+// Test slice reference counting.
+func TestRefcount(t *testing.T) {
+ pool := NewPool(iobufSize)
+ iobuf := pool.alloc(0)
+ slice1 := iobuf.slice(0, 0, 10)
+ slice2 := iobuf.slice(10, 10, 20)
+ iobuf.release()
+ expectEq(t, 0, len(pool.freelist))
+ slice1.Release()
+ expectEq(t, 0, len(pool.freelist))
+ slice2.Release()
+ expectEq(t, 1, len(pool.freelist))
+}
+
+// Check that the Pool is unusable after it is closed.
+func TestPoolClose(t *testing.T) {
+ pool := NewPool(iobufSize)
+ iobuf := pool.alloc(1024)
+ if iobuf == nil {
+ t.Fatalf("iobuf should not be nil")
+ }
+ iobuf.release()
+ pool.Close()
+ iobuf = pool.alloc(1024)
+ if iobuf != nil {
+ t.Errorf("iobuf should be nil")
+ }
+}
+
+func TestIOBUFConcurrency(t *testing.T) {
+ const threadCount = 100
+ pool := NewPool(iobufSize)
+
+ var pending sync.WaitGroup
+ pending.Add(threadCount)
+ for i := 0; i != threadCount; i++ {
+ go func() {
+ iobufThrasher(t, pool)
+ pending.Done()
+ }()
+ }
+ pending.Wait()
+}
+
+func iobufThrasher(t *testing.T, pool *Pool) {
+ const (
+ iobufCount = 100
+ sliceCount = 100
+ )
+ message := "Hello world"
+ for i := 0; i != iobufCount; i++ {
+ iobuf := pool.alloc(0)
+ var slices []*Slice
+ var base uint
+ for j := 0; j != sliceCount; j++ {
+ if base+uint(len(message)) > uint(len(iobuf.Contents)) {
+ iobuf.release()
+ iobuf = pool.alloc(0)
+ }
+ slice := iobuf.slice(base, base, base+uint(len(message)))
+ base += uint(len(message))
+ copy(slice.Contents, []byte(message))
+ slices = append(slices, slice)
+ }
+ iobuf.release()
+ for _, slice := range slices {
+ content := string(slice.Contents)
+ if content != message {
+ t.Errorf("Expected %q, got %q", message, content)
+ }
+ slice.Release()
+ }
+ }
+}
diff --git a/profiles/internal/lib/iobuf/reader.go b/profiles/internal/lib/iobuf/reader.go
new file mode 100644
index 0000000..4700822
--- /dev/null
+++ b/profiles/internal/lib/iobuf/reader.go
@@ -0,0 +1,67 @@
+package iobuf
+
+import (
+ "io"
+)
+
+// Reader wraps an io.Reader to provide a buffered Read() operation.
+type Reader struct {
+ pool *Pool
+ iobuf *buf
+ base, bound uint
+ conn io.Reader
+}
+
+// NewReader returns a new io reader.
+func NewReader(pool *Pool, conn io.Reader) *Reader {
+ iobuf := pool.alloc(0)
+ return &Reader{pool: pool, iobuf: iobuf, conn: conn}
+}
+
+// Close closes the Reader. Do not call this concurrently with Read. Instead,
+// close r.conn, wait until Read() has completed, then perform the Close().
+func (r *Reader) Close() {
+ r.iobuf.release()
+ r.pool = nil
+ r.iobuf = nil
+}
+
+// Fill ensures that the input contains at least <bytes> bytes. Returns an
+// error iff the input is short (even if some input was read).
+func (r *Reader) fill(bytes uint) error {
+ if r.bound-r.base >= bytes {
+ return nil
+ }
+
+ // If there is not enough space to read the data contiguously, allocate a
+ // new iobuf.
+ if uint(len(r.iobuf.Contents))-r.base < bytes {
+ iobuf := r.pool.alloc(bytes)
+ r.bound = uint(copy(iobuf.Contents, r.iobuf.Contents[r.base:r.bound]))
+ r.base = 0
+ r.iobuf.release()
+ r.iobuf = iobuf
+ }
+
+ // Read into the iobuf.
+ for r.bound-r.base < bytes {
+ amount, err := r.conn.Read(r.iobuf.Contents[r.bound:])
+ if amount == 0 && err != nil {
+ return err
+ }
+ r.bound += uint(amount)
+ }
+ return nil
+}
+
+// Read returns the next <n> bytes of input as a Slice. Returns an error if the
+// read was short, even if some input was read.
+func (r *Reader) Read(n int) (*Slice, error) {
+ bytes := uint(n)
+ if err := r.fill(bytes); err != nil {
+ return nil, err
+ }
+ slice := r.iobuf.slice(r.base, r.base, r.base+bytes)
+ r.base += bytes
+ return slice, nil
+}
diff --git a/profiles/internal/lib/iobuf/reader_test.go b/profiles/internal/lib/iobuf/reader_test.go
new file mode 100644
index 0000000..d7e2b43
--- /dev/null
+++ b/profiles/internal/lib/iobuf/reader_test.go
@@ -0,0 +1,62 @@
+package iobuf
+
+import (
+ "io"
+ "testing"
+)
+
+const (
+ maxReadSize = 10
+)
+
+type testReader struct {
+ off int
+ isClosed bool
+}
+
+func (r *testReader) Read(buf []byte) (int, error) {
+ if r.isClosed {
+ return 0, io.EOF
+ }
+ amount := len(buf)
+ for i := 0; i != amount; i++ {
+ buf[i] = charAt(r.off + i)
+ }
+ r.off += amount
+ return amount, nil
+}
+
+func TestReader(t *testing.T) {
+ pool := NewPool(iobufSize)
+ var tr testReader
+ r := NewReader(pool, &tr)
+
+ const amount = 4
+ const loopCount = 1000
+ for off := 0; off < loopCount*amount; off += amount {
+ s, err := r.Read(amount)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ checkBuf(t, s.Contents, off)
+ s.Release()
+ }
+
+ tr.isClosed = true
+ for off := amount * loopCount; off != tr.off; off++ {
+ s, err := r.Read(1)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ checkBuf(t, s.Contents, off)
+ s.Release()
+ }
+
+ _, err := r.Read(1)
+ if err == nil {
+ t.Errorf("Expected error")
+ }
+
+ r.Close()
+ pool.Close()
+}
diff --git a/profiles/internal/lib/iobuf/slice.go b/profiles/internal/lib/iobuf/slice.go
new file mode 100644
index 0000000..3bfc1f9
--- /dev/null
+++ b/profiles/internal/lib/iobuf/slice.go
@@ -0,0 +1,91 @@
+package iobuf
+
+// Slice refers to an iobuf and the byte slice for the actual data.
+type Slice struct {
+ iobuf *buf
+ free uint // Free area before base, if any.
+ base uint // Index into the underlying iobuf.
+ Contents []byte // iobuf.Contents[base:bound]
+}
+
+// Size returns the number of bytes in the Slice.
+func (slice *Slice) Size() int {
+ return len(slice.Contents)
+}
+
+// FreeEntirePrefix sets the free index to zero. Be careful when using this,
+// you should ensure that no Slices are using the free region.
+func (slice *Slice) FreeEntirePrefix() {
+ slice.free = 0
+}
+
+// Release releases the slice, decrementing the reference count on the iobuf
+// and destroying the slice.
+func (slice *Slice) Release() {
+ if slice.iobuf != nil {
+ slice.iobuf.release()
+ slice.iobuf = nil
+ }
+ slice.Contents = nil
+}
+
+// ReleasePrevious releases the <prev> slice, extending the free prefix of the
+// target slice if possible.
+func (slice *Slice) ReleasePrevious(prev *Slice) {
+ if prev.iobuf == slice.iobuf && prev.base+uint(len(prev.Contents)) == slice.free {
+ slice.free = prev.free
+ }
+ prev.Release()
+}
+
+// TruncateFront removes <bytes> from the front of the Slice.
+func (slice *Slice) TruncateFront(bytes uint) {
+ if bytes > uint(len(slice.Contents)) {
+ bytes = uint(len(slice.Contents))
+ }
+ slice.base += bytes
+ slice.Contents = slice.Contents[bytes:]
+}
+
+// ExpandFront tries to expand the Slice by <bytes> before the front of the Slice.
+// Returns true if the Slice was expanded.
+func (slice *Slice) ExpandFront(bytes uint) bool {
+ if slice.free+bytes > slice.base || slice.iobuf == nil {
+ return false
+ }
+ bound := slice.base + uint(len(slice.Contents))
+ slice.base -= bytes
+ slice.Contents = slice.iobuf.Contents[slice.base:bound]
+ return true
+}
+
+// Coalesce a sequence of slices. If two slices are adjacent, they are
+// combined. Takes ownership of the slices, caller takes ownership of the
+// result.
+func Coalesce(slices []*Slice, maxSize uint) []*Slice {
+ if len(slices) <= 1 {
+ return slices
+ }
+ var result []*Slice
+ c := slices[0]
+ for i := 1; i != len(slices); i++ {
+ s := slices[i]
+ if uint(len(c.Contents)+len(s.Contents)) <= maxSize &&
+ c.iobuf != nil && s.iobuf == c.iobuf &&
+ c.base+uint(len(c.Contents)) == s.base {
+ // The two slices are adjacent. Merge them.
+ c.Contents = c.iobuf.Contents[c.base : s.base+uint(len(s.Contents))]
+ s.Release()
+ } else {
+ result = append(result, c)
+ c = s
+ }
+ }
+ return append(result, c)
+}
+
+// NewSlice creates a Slice from a byte array. The value is not copied into an
+// iobuf, it continues to refer to the buffer that was passed in.
+func NewSlice(buf []byte) *Slice {
+ return &Slice{Contents: buf}
+}
diff --git a/profiles/internal/lib/iobuf/slice_test.go b/profiles/internal/lib/iobuf/slice_test.go
new file mode 100644
index 0000000..4b75b92
--- /dev/null
+++ b/profiles/internal/lib/iobuf/slice_test.go
@@ -0,0 +1,73 @@
+package iobuf
+
+import (
+ "testing"
+)
+
+func TestExpandFront(t *testing.T) {
+ pool := NewPool(iobufSize)
+ calloc := NewAllocator(pool, 8)
+ slice := calloc.Alloc(10)
+ if slice.Size() != 10 {
+ t.Errorf("Expected length 10, got %d", slice.Size())
+ }
+ copy(slice.Contents, []byte("0123456789"))
+ ok := slice.ExpandFront(8)
+ if !ok {
+ t.Errorf("Expected ExpandFront to succeed")
+ }
+ if slice.Size() != 18 {
+ t.Errorf("Expected length 18, got %d", slice.Size())
+ }
+ slice.TruncateFront(11)
+ if slice.Size() != 7 {
+ t.Errorf("Expected length 9, got %d", slice.Size())
+ }
+ ok = slice.ExpandFront(3)
+ if slice.Size() != 10 {
+ t.Errorf("Expected length 10, got %d", slice.Size())
+ }
+ if string(slice.Contents) != "0123456789" {
+ t.Errorf("Expected 0123456789, got %q", string(slice.Contents))
+ }
+ ok = slice.ExpandFront(10)
+ if ok {
+ t.Errorf("Expected expansion to fail")
+ }
+}
+
+func TestCoalesce(t *testing.T) {
+ pool := NewPool(iobufSize)
+ salloc := NewAllocator(pool, 0)
+ const count = 100
+ const blocksize = 1024
+ var slices [count]*Slice
+ for i := 0; i != count; i++ {
+ var block [blocksize]byte
+ for j := 0; j != blocksize; j++ {
+ block[j] = charAt(i*blocksize + j)
+ }
+ slices[i] = salloc.Copy(block[:])
+ }
+ coalesced := Coalesce(slices[:], blocksize*4)
+ expectEq(t, count/4, len(coalesced))
+
+ off := 0
+ for _, buf := range coalesced {
+ checkBuf(t, buf.Contents, off)
+ off += len(buf.Contents)
+ buf.Release()
+ }
+
+ salloc.Release()
+}
+
+func charAt(i int) byte {
+ return "0123456789abcedf"[i%16]
+}
+
+func checkBuf(t *testing.T, buf []byte, off int) {
+ for i, c := range buf {
+ expectEq(t, charAt(off+i), c)
+ }
+}
diff --git a/profiles/internal/lib/pcqueue/pcqueue.go b/profiles/internal/lib/pcqueue/pcqueue.go
new file mode 100644
index 0000000..26095de
--- /dev/null
+++ b/profiles/internal/lib/pcqueue/pcqueue.go
@@ -0,0 +1,151 @@
+// A producer/consumer queue is a concurrent bounded buffer supporting
+// multiple concurrent producers and consumers, with timeouts. The queue can be
+// closed from either end, by the producer and/or the consumer. When closed,
+// the contents are discarded, and subsequent operations return an error.
+//
+// Note: the main reason to use a producer/consumer queue instead of a channel
+// is to allow the consumer to close the channel. This queue can be used for
+// many-to-many communication with multiple producers and/or multiple consumers.
+// Any of the producers and any of the consumers are allowed to close the
+// queue.
+package pcqueue
+
+import (
+ "errors"
+ "sync"
+)
+
+var (
+ ErrQueueIsClosed = errors.New("queue is closed")
+ ErrCancelled = errors.New("operation was canceled")
+ ErrTryAgain = errors.New("operation failed, try again")
+)
+
+// T is a producer/consumer queue. It fulfills the same purpose as a Go
+// channel, the main advantage is that the Put() operation does not panic, even
+// after the queue is closed. The main disadvantage is that the T can't
+// be used in a select operation.
+type T struct {
+ // The mutex R/W mode depends only on whether the immediate struct fields
+ // are being read or modified. It isn't related to whether the channel
+ // operations are mutating. For example, the Put() method takes a read lock
+ // because it reads the contents and isClosed fields. It mutates the
+ // contents channel, but that doesn't matter.
+ mutex sync.RWMutex
+ contents chan interface{} // GUARDED_BY(mutex)
+ isClosed bool // GUARDED_BY(mutex), true iff <closed> is closed.
+
+ closed chan struct{}
+}
+
+// New(size) returns a producer/consumer queue with maximum
+// <size> elements.
+func New(maxSize int) *T {
+ return &T{
+ contents: make(chan interface{}, maxSize),
+ closed: make(chan struct{})}
+}
+
+// Put(item, cancel) adds an item to the queue, or returns an error if the queue
+// is closed or the operation is cancelled. The <cancel> channel may be nil, in
+// which case the operation can't be cancelled.
+func (q *T) Put(item interface{}, cancel <-chan struct{}) error {
+ contents := q.putChannel()
+ select {
+ case contents <- item:
+ case <-q.closed:
+ return ErrQueueIsClosed
+ case <-cancel:
+ return ErrCancelled
+ }
+ return nil
+}
+
+// Get(cancel) returns the next item from the queue, or an error if
+// the queue is closed or the operation is cancelled.
+func (q *T) Get(cancel <-chan struct{}) (interface{}, error) {
+ contents := q.getChannel()
+ select {
+ case v := <-contents:
+ return v, nil
+ case <-q.closed:
+ return q.drain()
+ case <-cancel:
+ return nil, ErrCancelled
+ }
+}
+
+// TryPut attempts to add an item to the queue. If the queue is full,
+// ErrTryAgain is returned immediately, without blocking. If the queue is
+// closed, ErrQueueIsClosed is returned.
+func (q *T) TryPut(item interface{}) error {
+ contents := q.putChannel()
+ select {
+ case contents <- item:
+ return nil
+ default:
+ }
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+ if q.isClosed {
+ return ErrQueueIsClosed
+ }
+ return ErrTryAgain
+}
+
+// Close() closes the queue, without discarding the contents. All Put*() operations
+// currently running may, or may not, add their values to the queue. All Put*()
+// operations that happen-after the Close() will fail.
+func (q *T) Close() {
+ q.mutex.Lock()
+ if !q.isClosed {
+ q.isClosed = true
+ close(q.closed)
+ }
+ q.mutex.Unlock()
+}
+
+// Shutdown() closes the queue and discards all contents. Any concurrent Get()
+// and Put() operations might exchange values, but all operations that
+// happen-after the Shutdown() will fail.
+func (q *T) Shutdown() {
+ q.mutex.Lock()
+ if !q.isClosed {
+ q.isClosed = true
+ close(q.closed)
+ }
+ q.contents = nil
+ q.mutex.Unlock()
+}
+
+// putChannel() returns a channel for inserting new values. Returns nil if
+// the queue has been closed.
+func (q *T) putChannel() chan interface{} {
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+ if q.isClosed {
+ return nil
+ }
+ return q.contents
+}
+
+// getChannel() returns the <contents> channel.
+func (q *T) getChannel() chan interface{} {
+ q.mutex.RLock()
+ defer q.mutex.RUnlock()
+ return q.contents
+}
+
+// drain() returns any queued elements. Once the queue is empty, all subsequent
+// values are discarded.
+func (q *T) drain() (interface{}, error) {
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ select {
+ case v := <-q.contents:
+ return v, nil
+ default:
+ q.contents = nil
+ return nil, ErrQueueIsClosed
+ }
+}
diff --git a/profiles/internal/lib/pcqueue/pcqueue_test.go b/profiles/internal/lib/pcqueue/pcqueue_test.go
new file mode 100644
index 0000000..e41ec89
--- /dev/null
+++ b/profiles/internal/lib/pcqueue/pcqueue_test.go
@@ -0,0 +1,481 @@
+package pcqueue
+
+import (
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "v.io/x/lib/vlog"
+)
+
+//go:generate v23 test generate
+
+const (
+ queueSize = 10
+ elementCount = 100
+ writerCount = 10
+ readerCount = 10
+)
+
+// Test normal Put()/Get() combination.
+func TestSimplePut(t *testing.T) {
+ queue := New(0)
+ done := make(chan struct{}, 1)
+ go func() {
+ queue.Put(1, nil)
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-done:
+ t.Errorf("Unexpected completion")
+ default:
+ }
+
+ item, err := queue.Get(nil)
+ if err != nil {
+ t.Errorf("Get: %v", err)
+ }
+
+ if item.(int) != 1 {
+ t.Errorf("Expected 1, actual=%v", item)
+ }
+
+ <-done
+}
+
+// Test normal Put()/Get() combination.
+func TestSimpleGet(t *testing.T) {
+ queue := New(0)
+ done := make(chan struct{}, 1)
+ go func() {
+ item, err := queue.Get(nil)
+ if err != nil {
+ t.Errorf("Get: %v", item)
+ }
+ if item.(int) != 1 {
+ t.Errorf("Expected 1, actual=%v", item)
+ }
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-done:
+ t.Errorf("Unexpected completion")
+ default:
+ }
+
+ queue.Put(1, nil)
+ <-done
+}
+
+// Test normal queue operation with a single producer and single consumer.
+func TestSequential(t *testing.T) {
+ queue := New(queueSize)
+ done := make(chan struct{}, 1)
+ cancel := make(chan struct{})
+
+ // Check that the queue elements are sequentially increasing ints.
+ vlog.VI(1).Infof("Start consumer")
+ go func() {
+ for i := 0; i != elementCount; i++ {
+ item, err := queue.Get(cancel)
+ if err != nil {
+ t.Errorf("Get: %v", err)
+ }
+ if item == nil {
+ break
+ }
+ j := item.(int)
+ if j != i {
+ t.Errorf("Expected %d, actual %d", i, j)
+ }
+ }
+ done <- struct{}{}
+ }()
+
+ // Generate the sequential ints.
+ vlog.VI(1).Infof("Put values")
+ for i := 0; i != elementCount; i++ {
+ queue.Put(i, nil)
+ }
+
+ // Wait for the consumer.
+ vlog.VI(1).Infof("Waiting for consumer")
+ <-done
+
+ // Any subsequent read should timeout.
+ vlog.VI(1).Infof("Start consumer")
+ go func() {
+ _, err := queue.Get(cancel)
+ if err != ErrCancelled {
+ t.Errorf("Expected timeout: %v", err)
+ }
+ vlog.VI(1).Infof("Consumer done")
+ done <- struct{}{}
+ }()
+
+ vlog.VI(1).Infof("Sleep a little")
+ time.Sleep(100 * time.Millisecond)
+ select {
+ case <-done:
+ t.Errorf("Unexpected completion")
+ default:
+ }
+
+ vlog.VI(1).Infof("Cancel")
+ close(cancel)
+
+ vlog.VI(1).Infof("Wait for consumer")
+ <-done
+}
+
+// Test timeouts for PutWithTimeout() when there is no consumer.
+func TestSequentialPutCancel(t *testing.T) {
+ queue := New(queueSize)
+ done := make(chan struct{}, 1)
+ cancel := make(chan struct{})
+
+ vlog.VI(1).Infof("Put values")
+ for i := 0; i != queueSize; i++ {
+ err := queue.Put(i, nil)
+ if err != nil {
+ t.Errorf("Put: %v", err)
+ }
+ }
+
+ vlog.VI(1).Infof("Start producer")
+ go func() {
+ err := queue.Put(0, cancel)
+ if err != ErrCancelled {
+ t.Errorf("Put: expected cancellation: %v", err)
+ }
+ done <- struct{}{}
+ }()
+
+ vlog.VI(1).Infof("Sleep a little")
+ time.Sleep(100 * time.Millisecond)
+ select {
+ case <-done:
+ t.Errorf("Unexpected completion")
+ default:
+ }
+
+ vlog.VI(1).Infof("Cancel")
+ close(cancel)
+
+ vlog.VI(1).Infof("Wait for producer")
+ <-done
+}
+
+// Test that Get() returns an error when the queue is closed.
+func TestSequentialClose(t *testing.T) {
+ queue := New(queueSize)
+ err := queue.Put(0, nil)
+ if err != nil {
+ t.Errorf("Put: %v", err)
+ }
+ queue.Close()
+
+ // Check that Get() returns the element.
+ item, err := queue.Get(nil)
+ if err != nil {
+ t.Errorf("Get: %v", err)
+ }
+ if item.(int) != 0 {
+ t.Errorf("Unexpected value: %v", item)
+ }
+
+ // Check that Get() returns an error.
+ _, err = queue.Get(nil)
+ if err != ErrQueueIsClosed {
+ t.Errorf("Expected queue to be closed: %v", err)
+ }
+
+ // Check that Put() returns an error.
+ err = queue.Put(0, nil)
+ if err != ErrQueueIsClosed {
+ t.Errorf("Expected queue to be closed: %v", err)
+ }
+}
+
+// Test that concurrent Puts() may add values to the queue.
+func TestConcurrentClose(t *testing.T) {
+ queue := New(0)
+ pending := &sync.WaitGroup{}
+ pending.Add(2 * writerCount)
+ for i := 0; i != writerCount; i++ {
+ go func() {
+ err := queue.Put(1, nil)
+ if err != nil {
+ vlog.VI(1).Infof("Put: %v", err)
+ }
+ pending.Done()
+ }()
+ }
+ time.Sleep(100 * time.Millisecond)
+ queue.Close()
+ for i := 0; i != writerCount; i++ {
+ go func() {
+ err := queue.Put(2, nil)
+ if err == nil {
+ t.Errorf("Expected error")
+ }
+ pending.Done()
+ }()
+ }
+
+ readers := 0
+ for {
+ item, err := queue.Get(nil)
+ if err != nil {
+ break
+ }
+ if item.(int) != 1 {
+ t.Errorf("Expected 1, actual=%v", item)
+ }
+ readers++
+ }
+ vlog.VI(1).Infof("%d operations completed", readers)
+ if readers > writerCount {
+ t.Errorf("Too many readers")
+ }
+ pending.Wait()
+}
+
+// Test that Get() returns an error when the queue is shut down.
+func TestSequentialShutdown(t *testing.T) {
+ queue := New(queueSize)
+ err := queue.Put(0, nil)
+ if err != nil {
+ t.Errorf("Put: %v", err)
+ }
+ queue.Shutdown()
+
+ // Check that Get() returns an error.
+ _, err = queue.Get(nil)
+ if err != ErrQueueIsClosed {
+ t.Errorf("Expected queue to be closed: %v", err)
+ }
+
+ // Check that Put() returns an error.
+ err = queue.Put(0, nil)
+ if err != ErrQueueIsClosed {
+ t.Errorf("Expected queue to be closed: %v", err)
+ }
+}
+
+// Test with concurrent producers, but a single consumer.
+func TestConcurrentPutNoTimeouts(t *testing.T) {
+ queue := New(queueSize)
+ pending := &sync.WaitGroup{}
+
+ // Generate the sequential ints.
+ for i := 0; i != writerCount; i++ {
+ pending.Add(1)
+ go func() {
+ for j := 0; j != elementCount; j++ {
+ queue.Put(j, nil)
+ }
+ pending.Done()
+ }()
+ }
+
+ // Sum up the results and compare.
+ sum := 0
+ for i := 0; i != writerCount*elementCount; i++ {
+ item, err := queue.Get(nil)
+ if err != nil {
+ t.Errorf("Get: %v", err)
+ }
+ if item == nil {
+ break
+ }
+ sum += item.(int)
+ }
+ expected := writerCount * elementCount * (elementCount - 1) / 2
+ if sum != expected {
+ t.Errorf("Expected sum %d, received %d", expected, sum)
+ }
+
+ pending.Wait()
+}
+
+// Test with concurrent consumers and concurrent producers.
+func TestConcurrentGet(t *testing.T) {
+ queue := New(queueSize)
+ done := make(chan struct{})
+ pending := &sync.WaitGroup{}
+ pending.Add(readerCount + writerCount)
+ cancel := make(chan struct{})
+
+ // Sum up the results and compare.
+ sum := uint32(0)
+ count := uint32(0)
+ vlog.VI(1).Infof("Start consumers")
+ for i := 0; i != readerCount; i++ {
+ pid := i
+ go func() {
+ for {
+ c := atomic.LoadUint32(&count)
+ if c == writerCount*elementCount {
+ break
+ }
+
+ // The timeout is required for termination.
+ item, err := queue.Get(cancel)
+ if err != nil {
+ continue
+ }
+ atomic.AddUint32(&sum, uint32(item.(int)))
+ atomic.AddUint32(&count, 1)
+ }
+ vlog.VI(1).Infof("Consumer %d done", pid)
+ pending.Done()
+ }()
+ }
+
+ // Generate the sequential ints.
+ vlog.VI(1).Infof("Start producers")
+ for i := 0; i != writerCount; i++ {
+ pid := i
+ go func() {
+ for j := 0; j != elementCount; j++ {
+ err := queue.Put(j, nil)
+ if err != nil {
+ t.Errorf("Put: %v", err)
+ }
+ }
+ vlog.VI(1).Infof("Producer %d done", pid)
+ pending.Done()
+ }()
+ }
+
+ vlog.VI(1).Infof("Start termination checker")
+ go func() {
+ pending.Wait()
+ done <- struct{}{}
+ }()
+
+ vlog.VI(1).Infof("Wait for processes")
+ stop := false
+ for !stop {
+ time.Sleep(100 * time.Millisecond)
+ select {
+ case <-done:
+ stop = true
+ default:
+ cancel <- struct{}{}
+ }
+ }
+
+ vlog.VI(1).Infof("Checking the sum")
+ expected := writerCount * elementCount * (elementCount - 1) / 2
+ s := atomic.LoadUint32(&sum)
+ if s != uint32(expected) {
+ t.Errorf("Expected sum %d, received %d", expected, sum)
+ }
+}
+
+func TestSimpleTryPut(t *testing.T) {
+ q := New(1)
+ if err := q.TryPut(1); err != nil {
+ t.Errorf("TryPut(1) got error: %q", err)
+ }
+
+ if err := q.TryPut(2); err != ErrTryAgain {
+ t.Errorf("TryPut(2) got error: %q; want: %q", err, ErrTryAgain)
+ }
+
+ if item, err := q.Get(nil); err != nil {
+ t.Errorf("Get() got error: %q", err)
+ } else if item.(int) != 1 {
+ t.Errorf("Get() = %v; want: %v", item, 1)
+ }
+
+ q.Close()
+ if err := q.TryPut(3); err != ErrQueueIsClosed {
+ t.Errorf("TryPut(3) got error: %q; want: %q", err, ErrQueueIsClosed)
+ }
+}
+
+func TestSequentialTryPut(t *testing.T) {
+ q := New(queueSize)
+ const numIter = 5
+ for i := 0; i < numIter; i++ {
+ // All succeed.
+ for j := i * queueSize; j < (i+1)*queueSize; j++ {
+ if err := q.TryPut(j); err != nil {
+ t.Errorf("TryPut(%v) returned error: %q", j, err)
+ }
+ }
+ // All fail.
+ for j := (i + 1) * queueSize; j < (i+2)*queueSize; j++ {
+ if err := q.TryPut(j); err != ErrTryAgain {
+ t.Errorf("TryPut(%v) returned error %q; want %q", j, err, ErrTryAgain)
+ }
+ }
+ // Empty the queue.
+ for j := i * queueSize; j < (i+1)*queueSize; j++ {
+ item, err := q.Get(nil)
+ if err != nil {
+ t.Errorf("Get() returned error: %q", err)
+ } else if item.(int) != j {
+ t.Errorf("Get() = %v; want %v", item.(int), j)
+ }
+ }
+ }
+
+ q.Close()
+ for i := numIter * queueSize; i < (numIter+1)*queueSize; i++ {
+ if err := q.TryPut(i); err != ErrQueueIsClosed {
+ t.Errorf("TryPut(%v) returned error %q; want %q", i, err, ErrQueueIsClosed)
+ }
+ }
+}
+
+func TestConcurrentTryPut(t *testing.T) {
+ q := New(queueSize)
+ pending := &sync.WaitGroup{}
+ for i := 0; i != writerCount; i++ {
+ pending.Add(1)
+ go func() {
+ for j := 0; j != elementCount; j++ {
+ // TryPut(j) until we succeed.
+ for {
+ err := q.TryPut(j)
+ if err == nil {
+ break
+ }
+ if err == ErrTryAgain {
+ time.Sleep(1 * time.Millisecond)
+ } else {
+ t.Errorf("%v: TryPut(%v) returned error %q; want %q", i, j, err, ErrTryAgain)
+ }
+ }
+ }
+ pending.Done()
+ }()
+ }
+
+ // Sum up the results and compare.
+ sum := 0
+ for i := 0; i != writerCount*elementCount; i++ {
+ item, err := q.Get(nil)
+ if err != nil {
+ t.Errorf("Get() returned error: %q", err)
+ continue
+ }
+ if item == nil {
+ continue
+ }
+ sum += item.(int)
+ }
+
+ if expected := writerCount * elementCount * (elementCount - 1) / 2; sum != expected {
+ t.Errorf("got sum %v, want %v", expected, sum)
+ }
+
+ pending.Wait()
+}
diff --git a/profiles/internal/lib/pcqueue/v23_internal_test.go b/profiles/internal/lib/pcqueue/v23_internal_test.go
new file mode 100644
index 0000000..15b84d8
--- /dev/null
+++ b/profiles/internal/lib/pcqueue/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package pcqueue
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/testutil"
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/lib/reflectutil/all_test.go b/profiles/internal/lib/reflectutil/all_test.go
new file mode 100644
index 0000000..eebf63e
--- /dev/null
+++ b/profiles/internal/lib/reflectutil/all_test.go
@@ -0,0 +1,363 @@
+package reflectutil
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+type (
+ Struct struct {
+ A uint
+ B string
+ }
+
+ Recurse struct {
+ U uint
+ R *Recurse
+ }
+
+ RecurseA struct {
+ Ua uint
+ B *RecurseB
+ }
+ RecurseB struct {
+ Ub uint
+ A *RecurseA
+ }
+
+ abIntPtr struct {
+ A, B *int
+ }
+)
+
+var (
+ recurseCycle *Recurse = &Recurse{}
+ recurseABCycle *RecurseA = &RecurseA{}
+
+ intPtr1a *int = new(int)
+ intPtr1b *int = new(int)
+
+ iface interface{}
+)
+
+func init() {
+ recurseCycle.U = 5
+ recurseCycle.R = recurseCycle
+
+ recurseABCycle.Ua = 5
+ recurseABCycle.B = &RecurseB{6, recurseABCycle}
+
+ *intPtr1a = 1
+ *intPtr1b = 1
+}
+
+func TestDeepEqual(t *testing.T) {
+ tests := []struct {
+ a, b interface{}
+ expect bool
+ }{
+ {true, true, true},
+ {1, 1, true},
+ {-1, -1, true},
+ {1.1, 1.1, true},
+ {"abc", "abc", true},
+ {1 + 1i, 1 + 1i, true},
+ {[2]uint{1, 1}, [2]uint{1, 1}, true},
+ {[]uint{1, 1}, []uint{1, 1}, true},
+ {map[uint]string{1: "1", 2: "2"}, map[uint]string{1: "1", 2: "2"}, true},
+ {Struct{1, "a"}, Struct{1, "a"}, true},
+ {recurseCycle, recurseCycle, true},
+ {recurseABCycle, recurseABCycle, true},
+ {abIntPtr{intPtr1a, intPtr1a}, abIntPtr{intPtr1a, intPtr1a}, true},
+ {abIntPtr{intPtr1a, intPtr1b}, abIntPtr{intPtr1a, intPtr1b}, true},
+
+ {true, false, false},
+ {1, 2, false},
+ {-1, -2, false},
+ {1.1, 2.2, false},
+ {"abc", "def", false},
+ {1 + 1i, 2 + 2i, false},
+ {[2]uint{1, 1}, [2]uint{2, 2}, false},
+ {[]uint{1, 1}, []uint{2, 2}, false},
+ {map[uint]string{1: "1", 2: "2"}, map[uint]string{3: "3", 4: "4"}, false},
+ {Struct{1, "a"}, Struct{1, "b"}, false},
+ {recurseCycle, &Recurse{5, &Recurse{5, nil}}, false},
+ {recurseABCycle, &RecurseA{5, &RecurseB{6, nil}}, false},
+ {abIntPtr{intPtr1a, intPtr1a}, abIntPtr{intPtr1a, intPtr1b}, false},
+ {abIntPtr{intPtr1a, intPtr1b}, abIntPtr{intPtr1a, intPtr1a}, false},
+ }
+ for _, test := range tests {
+ actual := DeepEqual(test.a, test.b, &DeepEqualOpts{})
+ if actual != test.expect {
+ t.Errorf("DeepEqual(%#v, %#v) != %v", test.a, test.b, test.expect)
+ }
+ }
+}
+
+func TestAreComparable(t *testing.T) {
+ tests := []struct {
+ a, b interface{}
+ expect bool
+ }{
+ {true, true, true},
+ {"", "", true},
+ {uint(0), uint(0), true},
+ {uint8(0), uint8(0), true},
+ {uint16(0), uint16(0), true},
+ {uint32(0), uint32(0), true},
+ {uint64(0), uint64(0), true},
+ {uintptr(0), uintptr(0), true},
+ {int(0), int(0), true},
+ {int8(0), int8(0), true},
+ {int16(0), int16(0), true},
+ {int32(0), int32(0), true},
+ {int64(0), int64(0), true},
+ {float32(0), float32(0), true},
+ {float64(0), float64(0), true},
+ {complex64(0), complex64(0), true},
+ {complex128(0), complex128(0), true},
+ {[2]uint{1, 1}, [2]uint{1, 1}, true},
+ {[]uint{1, 1}, []uint{1, 1}, true},
+ {Struct{1, "a"}, Struct{1, "a"}, true},
+ {(*int)(nil), (*int)(nil), true},
+ {recurseCycle, recurseCycle, true},
+ {recurseABCycle, recurseABCycle, true},
+ {abIntPtr{intPtr1a, intPtr1a}, abIntPtr{intPtr1a, intPtr1a}, true},
+ {abIntPtr{intPtr1a, intPtr1b}, abIntPtr{intPtr1a, intPtr1b}, true},
+
+ {map[uint]string{1: "1"}, map[uint]string{1: "1"}, false},
+ {&iface, &iface, false},
+ {make(chan int), make(chan int), false},
+ {TestAreComparable, TestAreComparable, false},
+ {unsafe.Pointer(nil), unsafe.Pointer(nil), false},
+ }
+ for _, test := range tests {
+ actual := AreComparable(test.a, test.b)
+ if actual != test.expect {
+ t.Errorf("AreComparable(%#v, %#v) != %v", test.a, test.b, test.expect)
+ }
+ }
+}
+
+func TestLess(t *testing.T) {
+ for _, test := range compareTests {
+ actual := Less(test.a, test.b)
+ expect := false
+ if test.expect == -1 {
+ expect = true // For eq and gt we expect Less to return false.
+ }
+ if actual != expect {
+ t.Errorf("Less(%#v, %#v) != %v", test.a, test.b, expect)
+ }
+ }
+}
+
+func TestCompare(t *testing.T) {
+ for _, test := range compareTests {
+ actual := Compare(test.a, test.b)
+ if actual != test.expect {
+ t.Errorf("Compare(%#v, %#v) != %v", test.a, test.b, test.expect)
+ }
+ }
+}
+
+var compareTests = []struct {
+ a, b interface{}
+ expect int
+}{
+ {false, true, -1},
+ {false, false, 0},
+ {true, false, +1},
+ {true, true, 0},
+
+ {"", "aa", -1},
+ {"a", "aa", -1},
+ {"aa", "ab", -1},
+ {"aa", "b", -1},
+ {"", "", 0},
+ {"aa", "", +1},
+ {"aa", "a", +1},
+ {"ab", "aa", +1},
+ {"b", "aa", +1},
+
+ {uint(0), uint(1), -1},
+ {uint(0), uint(0), 0},
+ {uint(1), uint(0), +1},
+ {uint(1), uint(1), 0},
+
+ {int(-1), int(+1), -1},
+ {int(-1), int(-1), 0},
+ {int(+1), int(-1), +1},
+ {int(+1), int(+1), 0},
+
+ {float32(-1.1), float32(+1.1), -1},
+ {float32(-1.1), float32(-1.1), 0},
+ {float32(+1.1), float32(-1.1), +1},
+ {float32(+1.1), float32(+1.1), 0},
+
+ {complex64(1 + 1i), complex64(1 + 2i), -1},
+ {complex64(1 + 2i), complex64(2 + 1i), -1},
+ {complex64(1 + 2i), complex64(2 + 2i), -1},
+ {complex64(1 + 2i), complex64(2 + 3i), -1},
+ {complex64(1 + 1i), complex64(1 + 1i), 0},
+ {complex64(1 + 2i), complex64(1 + 1i), +1},
+ {complex64(2 + 1i), complex64(1 + 2i), +1},
+ {complex64(2 + 2i), complex64(1 + 2i), +1},
+ {complex64(2 + 3i), complex64(1 + 2i), +1},
+
+ {[2]int{1, 1}, [2]int{1, 2}, -1},
+ {[2]int{1, 2}, [2]int{2, 1}, -1},
+ {[2]int{1, 2}, [2]int{2, 2}, -1},
+ {[2]int{1, 2}, [2]int{2, 3}, -1},
+ {[2]int{1, 1}, [2]int{1, 1}, 0},
+ {[2]int{1, 2}, [2]int{1, 1}, +1},
+ {[2]int{2, 1}, [2]int{1, 2}, +1},
+ {[2]int{2, 2}, [2]int{1, 2}, +1},
+ {[2]int{2, 3}, [2]int{1, 2}, +1},
+
+ {[]int{}, []int{1, 1}, -1},
+ {[]int{1}, []int{1, 1}, -1},
+ {[]int{1, 1}, []int{}, +1},
+ {[]int{1, 1}, []int{1}, +1},
+ {[]int{1, 1}, []int{1, 2}, -1},
+ {[]int{1, 2}, []int{2, 1}, -1},
+ {[]int{1, 2}, []int{2, 2}, -1},
+ {[]int{1, 2}, []int{2, 3}, -1},
+ {[]int{1, 1}, []int{1, 1}, 0},
+ {[]int{1, 2}, []int{1, 1}, +1},
+ {[]int{2, 1}, []int{1, 2}, +1},
+ {[]int{2, 2}, []int{1, 2}, +1},
+ {[]int{2, 3}, []int{1, 2}, +1},
+
+ {Struct{1, "a"}, Struct{1, "b"}, -1},
+ {Struct{1, "b"}, Struct{2, "a"}, -1},
+ {Struct{1, "b"}, Struct{2, "b"}, -1},
+ {Struct{1, "b"}, Struct{2, "c"}, -1},
+ {Struct{1, "a"}, Struct{1, "a"}, 0},
+ {Struct{1, "b"}, Struct{1, "a"}, +1},
+ {Struct{2, "a"}, Struct{1, "b"}, +1},
+ {Struct{2, "b"}, Struct{1, "b"}, +1},
+ {Struct{2, "c"}, Struct{1, "b"}, +1},
+
+ {(*Struct)(nil), &Struct{1, "a"}, -1},
+ {&Struct{1, "a"}, &Struct{1, "b"}, -1},
+ {&Struct{1, "b"}, &Struct{2, "a"}, -1},
+ {&Struct{1, "b"}, &Struct{2, "b"}, -1},
+ {&Struct{1, "b"}, &Struct{2, "c"}, -1},
+ {(*Struct)(nil), (*Struct)(nil), 0},
+ {&Struct{1, "a"}, (*Struct)(nil), +1},
+ {&Struct{1, "a"}, &Struct{1, "a"}, 0},
+ {&Struct{1, "b"}, &Struct{1, "a"}, +1},
+ {&Struct{2, "a"}, &Struct{1, "b"}, +1},
+ {&Struct{2, "b"}, &Struct{1, "b"}, +1},
+ {&Struct{2, "c"}, &Struct{1, "b"}, +1},
+}
+
+type v []interface{}
+
+func toRVS(values v) (rvs []reflect.Value) {
+ for _, val := range values {
+ rvs = append(rvs, reflect.ValueOf(val))
+ }
+ return
+}
+
+func fromRVS(rvs []reflect.Value) (values v) {
+ for _, rv := range rvs {
+ values = append(values, rv.Interface())
+ }
+ return
+}
+
+func TestTrySortValues(t *testing.T) {
+ tests := []struct {
+ values v
+ expect v
+ }{
+ {
+ v{true, false},
+ v{false, true},
+ },
+ {
+ v{"c", "b", "a"},
+ v{"a", "b", "c"},
+ },
+ {
+ v{3, 1, 2},
+ v{1, 2, 3},
+ },
+ {
+ v{3.3, 1.1, 2.2},
+ v{1.1, 2.2, 3.3},
+ },
+ {
+ v{3 + 3i, 1 + 1i, 2 + 2i},
+ v{1 + 1i, 2 + 2i, 3 + 3i},
+ },
+ {
+ v{[1]int{3}, [1]int{1}, [1]int{2}},
+ v{[1]int{1}, [1]int{2}, [1]int{3}},
+ },
+ {
+ v{[]int{3}, []int{}, []int{2, 2}},
+ v{[]int{}, []int{2, 2}, []int{3}},
+ },
+ {
+ v{Struct{3, "c"}, Struct{1, "a"}, Struct{2, "b"}},
+ v{Struct{1, "a"}, Struct{2, "b"}, Struct{3, "c"}},
+ },
+ {
+ v{&Struct{3, "c"}, (*Struct)(nil), &Struct{2, "b"}},
+ v{(*Struct)(nil), &Struct{2, "b"}, &Struct{3, "c"}},
+ },
+ }
+ for _, test := range tests {
+ actual := fromRVS(TrySortValues(toRVS(test.values)))
+ if !reflect.DeepEqual(actual, test.expect) {
+ t.Errorf("TrySortValues(%v) got %v, want %v", test.values, actual, test.expect)
+ }
+ }
+}
+
+func TestOptionSliceEqNilEmpty(t *testing.T) {
+ tests := []struct {
+ first interface{}
+ second interface{}
+ resultWithoutOption bool
+ resultWithOption bool
+ }{
+ {
+ []int{}, []int{}, true, true,
+ },
+ {
+ []int(nil), []int(nil), true, true,
+ },
+ {
+ []int{}, []int(nil), false, true,
+ },
+ {
+ []([]int){([]int)(nil)}, []([]int){[]int{}}, false, true,
+ },
+ }
+
+ for _, nilEqOpt := range []bool{true, false} {
+ for _, test := range tests {
+ options := &DeepEqualOpts{
+ SliceEqNilEmpty: nilEqOpt,
+ }
+
+ result := DeepEqual(test.first, test.second, options)
+
+ if nilEqOpt {
+ if result != test.resultWithOption {
+ t.Errorf("Unexpected result with SliceEqNilEmpty option: inputs %#v and %#v. Got %v, expected: %v", test.first, test.second, result, test.resultWithOption)
+ }
+ } else {
+ if result != test.resultWithoutOption {
+ t.Errorf("Unexpected result without SliceEqNilEmpty option: inputs %#v and %#v. Got %v, expected: %v", test.first, test.second, result, test.resultWithoutOption)
+ }
+ }
+ }
+ }
+}
diff --git a/profiles/internal/lib/reflectutil/deepequal.go b/profiles/internal/lib/reflectutil/deepequal.go
new file mode 100644
index 0000000..242f888
--- /dev/null
+++ b/profiles/internal/lib/reflectutil/deepequal.go
@@ -0,0 +1,152 @@
+package reflectutil
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Equal is similar to reflect.DeepEqual, except that it also
+// considers the sharing structure for pointers. When reflect.DeepEqual
+// encounters pointers it just compares the dereferenced values; we also keep
+// track of the pointers themselves and require that if a pointer appears
+// multiple places in a, it appears in the same places in b.
+func DeepEqual(a, b interface{}, options *DeepEqualOpts) bool {
+ return deepEqual(reflect.ValueOf(a), reflect.ValueOf(b), &orderInfo{}, options)
+}
+
+// TODO(bprosnitz) Implement the debuggable deep equal option.
+// TODO(bprosnitz) Add an option to turn pointer sharing on/off
+
+// DeepEqualOpts represents the options configuration for DeepEqual.
+type DeepEqualOpts struct {
+ SliceEqNilEmpty bool
+}
+
+// orderInfo tracks pointer ordering information. As we encounter new pointers
+// in our a and b values we maintain their ordering information in the slices.
+// We use slices rather than maps for efficiency; we typically have a small
+// number of pointers and sequential lookup is fast.
+type orderInfo struct {
+ orderA, orderB []uintptr
+}
+
+func lookupPtr(items []uintptr, target uintptr) (int, bool) { // (index, seen)
+ for index, item := range items {
+ if item == target {
+ return index, true
+ }
+ }
+ return -1, false
+}
+
+// sharingEqual returns equal=true iff the sharing structure between a and b is
+// the same, and returns seen=true iff we've seen either a or b before.
+func (info *orderInfo) sharingEqual(a, b uintptr) (bool, bool) { // (equal, seen)
+ indexA, seenA := lookupPtr(info.orderA, a)
+ indexB, seenB := lookupPtr(info.orderB, b)
+ if seenA || seenB {
+ return seenA == seenB && indexA == indexB, seenA || seenB
+ }
+ // Neither type has been seen - add to our order slices and return.
+ info.orderA = append(info.orderA, a)
+ info.orderB = append(info.orderB, b)
+ return true, false
+}
+
+func deepEqual(a, b reflect.Value, info *orderInfo, options *DeepEqualOpts) bool {
+ // We only consider sharing via explicit pointers, and ignore sharing via
+ // slices, maps or pointers to internal data.
+ if !a.IsValid() || !b.IsValid() {
+ return a.IsValid() == b.IsValid()
+ }
+ if a.Type() != b.Type() {
+ return false
+ }
+ switch a.Kind() {
+ case reflect.Ptr:
+ if a.IsNil() || b.IsNil() {
+ return a.IsNil() == b.IsNil()
+ }
+ equal, seen := info.sharingEqual(a.Pointer(), b.Pointer())
+ if !equal {
+ return false // a and b are not equal if their sharing isn't equal.
+ }
+ if seen {
+ // Skip the deepEqual call if we've already seen the pointers and they're
+ // equal, otherwise we'll have an infinite loop for cyclic values.
+ return true
+ }
+ return deepEqual(a.Elem(), b.Elem(), info, options)
+ case reflect.Array:
+ if a.Len() != b.Len() {
+ return false
+ }
+ for ix := 0; ix < a.Len(); ix++ {
+ if !deepEqual(a.Index(ix), b.Index(ix), info, options) {
+ return false
+ }
+ }
+ return true
+ case reflect.Slice:
+ if !options.SliceEqNilEmpty {
+ if a.IsNil() || b.IsNil() {
+ return a.IsNil() == b.IsNil()
+ }
+ }
+ if a.Len() != b.Len() {
+ return false
+ }
+ for ix := 0; ix < a.Len(); ix++ {
+ if !deepEqual(a.Index(ix), b.Index(ix), info, options) {
+ return false
+ }
+ }
+ return true
+ case reflect.Map:
+ if a.IsNil() || b.IsNil() {
+ return a.IsNil() == b.IsNil()
+ }
+ if a.Len() != b.Len() {
+ return false
+ }
+ for _, key := range a.MapKeys() {
+ if !deepEqual(a.MapIndex(key), b.MapIndex(key), info, options) {
+ return false
+ }
+ }
+ return true
+ case reflect.Struct:
+ for fx := 0; fx < a.NumField(); fx++ {
+ if !deepEqual(a.Field(fx), b.Field(fx), info, options) {
+ return false
+ }
+ }
+ return true
+ case reflect.Interface:
+ if a.IsNil() || b.IsNil() {
+ return a.IsNil() == b.IsNil()
+ }
+ return deepEqual(a.Elem(), b.Elem(), info, options)
+
+ // Ideally we would add a default clause here that would just return
+ // a.Interface() == b.Interface(), but that panics if we're dealing with
+ // unexported fields. Instead we check each primitive type.
+
+ case reflect.Bool:
+ return a.Bool() == b.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() == b.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() == b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() == b.Float()
+ case reflect.Complex64, reflect.Complex128:
+ return a.Complex() == b.Complex()
+ case reflect.String:
+ return a.String() == b.String()
+ case reflect.UnsafePointer:
+ return a.Pointer() == b.Pointer()
+ default:
+ panic(fmt.Errorf("SharingDeepEqual unhandled kind %v type %q", a.Kind(), a.Type()))
+ }
+}
diff --git a/profiles/internal/lib/reflectutil/doc.go b/profiles/internal/lib/reflectutil/doc.go
new file mode 100644
index 0000000..86a8bdb
--- /dev/null
+++ b/profiles/internal/lib/reflectutil/doc.go
@@ -0,0 +1,2 @@
+// Package reflectutil provides reflection-based utilities.
+package reflectutil
diff --git a/profiles/internal/lib/reflectutil/sort.go b/profiles/internal/lib/reflectutil/sort.go
new file mode 100644
index 0000000..b3631d1
--- /dev/null
+++ b/profiles/internal/lib/reflectutil/sort.go
@@ -0,0 +1,311 @@
+package reflectutil
+
+import (
+ "reflect"
+ "sort"
+)
+
+// AreComparable is a helper to call AreComparableTypes.
+func AreComparable(a, b interface{}) bool {
+ return AreComparableTypes(reflect.TypeOf(a), reflect.TypeOf(b))
+}
+
+// AreComparableTypes returns true iff a and b are comparable types: bools,
+// strings and numbers, and composites using arrays, slices, structs or
+// pointers.
+func AreComparableTypes(a, b reflect.Type) bool {
+ return areComparable(a, b, make(map[reflect.Type]bool))
+}
+
+func areComparable(a, b reflect.Type, seen map[reflect.Type]bool) bool {
+ if a.Kind() != b.Kind() {
+ if isUint(a) && isUint(b) || isInt(a) && isInt(b) || isFloat(a) && isFloat(b) || isComplex(a) && isComplex(b) {
+ return true // Special-case for comparable numbers.
+ }
+ return false // Different kinds are incomparable.
+ }
+
+ // Deal with cyclic types.
+ if seen[a] {
+ return true
+ }
+ seen[a] = true
+
+ switch a.Kind() {
+ case reflect.Bool, reflect.String,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ return true
+ case reflect.Array, reflect.Slice, reflect.Ptr:
+ return areComparable(a.Elem(), b.Elem(), seen)
+ case reflect.Struct:
+ if a.NumField() != b.NumField() {
+ return false
+ }
+ for fx := 0; fx < a.NumField(); fx++ {
+ af := a.Field(fx)
+ bf := b.Field(fx)
+ if af.Name != bf.Name || af.PkgPath != bf.PkgPath {
+ return false
+ }
+ if !areComparable(af.Type, bf.Type, seen) {
+ return false
+ }
+ }
+ return true
+ default:
+ // Unhandled: Map, Interface, Chan, Func, UnsafePointer
+ return false
+ }
+}
+
+func isUint(rt reflect.Type) bool {
+ switch rt.Kind() {
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return true
+ }
+ return false
+}
+
+func isInt(rt reflect.Type) bool {
+ switch rt.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ }
+ return false
+}
+
+func isFloat(rt reflect.Type) bool {
+ switch rt.Kind() {
+ case reflect.Float32, reflect.Float64:
+ return true
+ }
+ return false
+}
+
+func isComplex(rt reflect.Type) bool {
+ switch rt.Kind() {
+ case reflect.Complex64, reflect.Complex128:
+ return true
+ }
+ return false
+}
+
+// Less is a helper to call LessValues.
+func Less(a, b interface{}) bool {
+ return LessValues(reflect.ValueOf(a), reflect.ValueOf(b))
+}
+
+// LessValues returns true iff a and b are comparable and a < b. If a and b are
+// incomparable an arbitrary value is returned. Cyclic values are not handled;
+// if a and b are cyclic and equal, this will infinite loop. Arrays, slices and
+// structs use lexicographic ordering, and complex numbers compare real before
+// imaginary.
+func LessValues(a, b reflect.Value) bool {
+ if a.Kind() != b.Kind() {
+ return false // Different kinds are incomparable.
+ }
+ switch a.Kind() {
+ case reflect.Bool:
+ return lessBool(a.Bool(), b.Bool())
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Complex64, reflect.Complex128:
+ return lessComplex(a.Complex(), b.Complex())
+ case reflect.Array:
+ return compareArray(a, b) == -1
+ case reflect.Slice:
+ return compareSlice(a, b) == -1
+ case reflect.Struct:
+ return compareStruct(a, b) == -1
+ case reflect.Ptr:
+ if a.IsNil() || b.IsNil() {
+ return a.IsNil() && !b.IsNil() // nil is less than non-nil.
+ }
+ return LessValues(a.Elem(), b.Elem())
+ default:
+ return false
+ }
+}
+
+func lessBool(a, b bool) bool {
+ return !a && b // false < true
+}
+
+func lessComplex(a, b complex128) bool {
+ // Compare lexicographically, real part before imaginary part.
+ if real(a) == real(b) {
+ return imag(a) < imag(b)
+ }
+ return real(a) < real(b)
+}
+
+// Compare is a helper to call CompareValues.
+func Compare(a, b interface{}) int {
+ return CompareValues(reflect.ValueOf(a), reflect.ValueOf(b))
+}
+
+// CompareValues returns an integer comparing two values. If a and b are
+// comparable, the result is 0 if a == b, -1 if a < b and +1 if a > b. If a and
+// b are incomparable an arbitrary value is returned. Arrays, slices and
+// structs use lexicographic ordering, and complex numbers compare real before
+// imaginary.
+func CompareValues(a, b reflect.Value) int {
+ if a.Kind() != b.Kind() {
+ return 0 // Different kinds are incomparable.
+ }
+ switch a.Kind() {
+ case reflect.Array:
+ return compareArray(a, b)
+ case reflect.Slice:
+ return compareSlice(a, b)
+ case reflect.Struct:
+ return compareStruct(a, b)
+ case reflect.Ptr:
+ if a.IsNil() || b.IsNil() {
+ if a.IsNil() && !b.IsNil() {
+ return -1
+ }
+ if !a.IsNil() && b.IsNil() {
+ return +1
+ }
+ return 0
+ }
+ return CompareValues(a.Elem(), b.Elem())
+ }
+ if LessValues(a, b) {
+ return -1 // a < b
+ }
+ if LessValues(b, a) {
+ return +1 // a > b
+ }
+ return 0 // a == b, or incomparable.
+}
+
+func compareArray(a, b reflect.Value) int {
+ // Return lexicographic ordering of the array elements.
+ for ix := 0; ix < a.Len(); ix++ {
+ if c := CompareValues(a.Index(ix), b.Index(ix)); c != 0 {
+ return c
+ }
+ }
+ return 0
+}
+
+func compareSlice(a, b reflect.Value) int {
+ // Return lexicographic ordering of the slice elements.
+ for ix := 0; ix < a.Len() && ix < b.Len(); ix++ {
+ if c := CompareValues(a.Index(ix), b.Index(ix)); c != 0 {
+ return c
+ }
+ }
+ // Equal prefixes, shorter comes before longer.
+ if a.Len() < b.Len() {
+ return -1
+ }
+ if a.Len() > b.Len() {
+ return +1
+ }
+ return 0
+}
+
+func compareStruct(a, b reflect.Value) int {
+ // Return lexicographic ordering of the struct fields.
+ for ix := 0; ix < a.NumField(); ix++ {
+ if c := CompareValues(a.Field(ix), b.Field(ix)); c != 0 {
+ return c
+ }
+ }
+ return 0
+}
+
+// TrySortValues sorts a slice of reflect.Value if the value kind is supported.
+// Supported kinds are bools, strings and numbers, and composites using arrays,
+// slices, structs or pointers. Arrays, slices and structs use lexicographic
+// ordering, and complex numbers compare real before imaginary. If the values
+// in the slice aren't comparable or supported, the resulting ordering is
+// arbitrary.
+func TrySortValues(v []reflect.Value) []reflect.Value {
+ if len(v) <= 1 {
+ return v
+ }
+ switch v[0].Kind() {
+ case reflect.Bool:
+ sort.Sort(rvBools{v})
+ case reflect.String:
+ sort.Sort(rvStrings{v})
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ sort.Sort(rvUints{v})
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ sort.Sort(rvInts{v})
+ case reflect.Float32, reflect.Float64:
+ sort.Sort(rvFloats{v})
+ case reflect.Complex64, reflect.Complex128:
+ sort.Sort(rvComplexes{v})
+ case reflect.Array:
+ sort.Sort(rvArrays{v})
+ case reflect.Slice:
+ sort.Sort(rvSlices{v})
+ case reflect.Struct:
+ sort.Sort(rvStructs{v})
+ case reflect.Ptr:
+ sort.Sort(rvPtrs{v})
+ }
+ return v
+}
+
+// Sorting helpers, heavily inspired by similar code in text/template.
+
+type rvs []reflect.Value
+
+func (x rvs) Len() int { return len(x) }
+func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+type rvBools struct{ rvs }
+type rvStrings struct{ rvs }
+type rvUints struct{ rvs }
+type rvInts struct{ rvs }
+type rvFloats struct{ rvs }
+type rvComplexes struct{ rvs }
+type rvArrays struct{ rvs }
+type rvSlices struct{ rvs }
+type rvStructs struct{ rvs }
+type rvPtrs struct{ rvs }
+
+func (x rvBools) Less(i, j int) bool {
+ return lessBool(x.rvs[i].Bool(), x.rvs[j].Bool())
+}
+func (x rvStrings) Less(i, j int) bool {
+ return x.rvs[i].String() < x.rvs[j].String()
+}
+func (x rvUints) Less(i, j int) bool {
+ return x.rvs[i].Uint() < x.rvs[j].Uint()
+}
+func (x rvInts) Less(i, j int) bool {
+ return x.rvs[i].Int() < x.rvs[j].Int()
+}
+func (x rvFloats) Less(i, j int) bool {
+ return x.rvs[i].Float() < x.rvs[j].Float()
+}
+func (x rvComplexes) Less(i, j int) bool {
+ return lessComplex(x.rvs[i].Complex(), x.rvs[j].Complex())
+}
+func (x rvArrays) Less(i, j int) bool {
+ return compareArray(x.rvs[i], x.rvs[j]) == -1
+}
+func (x rvSlices) Less(i, j int) bool {
+ return compareSlice(x.rvs[i], x.rvs[j]) == -1
+}
+func (x rvStructs) Less(i, j int) bool {
+ return compareStruct(x.rvs[i], x.rvs[j]) == -1
+}
+func (x rvPtrs) Less(i, j int) bool {
+ return LessValues(x.rvs[i], x.rvs[j])
+}
diff --git a/profiles/internal/naming/doc.go b/profiles/internal/naming/doc.go
new file mode 100644
index 0000000..ce72738
--- /dev/null
+++ b/profiles/internal/naming/doc.go
@@ -0,0 +1,2 @@
+// Package naming provides an implementation of the interfaces in veyron2/naming.
+package naming
diff --git a/profiles/internal/naming/endpoint.go b/profiles/internal/naming/endpoint.go
new file mode 100644
index 0000000..1300684
--- /dev/null
+++ b/profiles/internal/naming/endpoint.go
@@ -0,0 +1,280 @@
+package naming
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+)
+
+const (
+ separator = "@"
+ suffix = "@@"
+ blessingsSeparator = ","
+)
+
+var (
+ errInvalidEndpointString = errors.New("invalid endpoint string")
+ hostportEP = regexp.MustCompile("^(?:(.*)@)?([^@]+)$")
+)
+
+// Network is the string returned by naming.Endpoint.Network implementations
+// defined in this package.
+const Network = "veyron"
+
+// Endpoint is a naming.Endpoint implementation used to convey RPC information.
+type Endpoint struct {
+ Protocol string
+ Address string
+ RID naming.RoutingID
+ MinIPCVersion version.IPCVersion
+ MaxIPCVersion version.IPCVersion
+ Blessings []string
+ IsMountTable bool
+}
+
+// NewEndpoint creates a new endpoint from a string as per naming.NewEndpoint
+func NewEndpoint(input string) (*Endpoint, error) {
+ ep := new(Endpoint)
+
+ // We have to guess this is a mount table if we don't know.
+ ep.IsMountTable = true
+
+ // If the endpoint does not end in a @, it must be in [blessing@]host:port format.
+ if parts := hostportEP.FindStringSubmatch(input); len(parts) > 0 {
+ hostport := parts[len(parts)-1]
+ var blessing string
+ if len(parts) > 2 {
+ blessing = parts[1]
+ }
+ err := ep.parseHostPort(blessing, hostport)
+ return ep, err
+ }
+ // Trim the prefix and suffix and parse the rest.
+ input = strings.TrimPrefix(strings.TrimSuffix(input, suffix), separator)
+ parts := strings.Split(input, separator)
+ version, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return nil, fmt.Errorf("invalid version: %v", err)
+ }
+
+ switch version {
+ case 1:
+ err = ep.parseV1(parts)
+ case 2:
+ err = ep.parseV2(parts)
+ case 3:
+ err = ep.parseV3(parts)
+ case 4:
+ err = ep.parseV4(parts)
+ default:
+ err = errInvalidEndpointString
+ }
+ return ep, err
+}
+
+func (ep *Endpoint) parseHostPort(blessing, hostport string) error {
+ // Could be in host:port format.
+ if _, _, err := net.SplitHostPort(hostport); err != nil {
+ return errInvalidEndpointString
+ }
+ ep.Protocol = naming.UnknownProtocol
+ ep.Address = hostport
+ ep.RID = naming.NullRoutingID
+ if len(blessing) > 0 {
+ ep.Blessings = []string{blessing}
+ }
+ return nil
+}
+
+func (ep *Endpoint) parseV1(parts []string) error {
+ if len(parts) != 4 {
+ return errInvalidEndpointString
+ }
+
+ ep.Protocol = parts[1]
+ if len(ep.Protocol) == 0 {
+ ep.Protocol = naming.UnknownProtocol
+ }
+
+ ep.Address = parts[2]
+ if len(ep.Address) == 0 {
+ ep.Address = net.JoinHostPort("", "0")
+ }
+
+ if err := ep.RID.FromString(parts[3]); err != nil {
+ return fmt.Errorf("invalid routing id: %v", err)
+ }
+
+ return nil
+}
+
+func parseIPCVersion(input string) (version.IPCVersion, error) {
+ if input == "" {
+ return version.UnknownIPCVersion, nil
+ }
+ v, err := strconv.ParseUint(input, 10, 32)
+ if err != nil {
+ err = fmt.Errorf("invalid IPC version: %s, %v", err)
+ }
+ return version.IPCVersion(v), err
+}
+
+func printIPCVersion(v version.IPCVersion) string {
+ if v == version.UnknownIPCVersion {
+ return ""
+ }
+ return strconv.FormatUint(uint64(v), 10)
+}
+
+func parseMountTableFlag(input string) (bool, error) {
+ switch len(input) {
+ case 0:
+ return true, nil
+ case 1:
+ switch f := input[0]; f {
+ case 'm':
+ return true, nil
+ case 's':
+ return false, nil
+ default:
+ return false, fmt.Errorf("%c is not one of 'm' or 's'", f)
+ }
+ }
+ return false, fmt.Errorf("flag is either missing or too long")
+}
+
+func (ep *Endpoint) parseV2(parts []string) error {
+ var err error
+ if len(parts) != 6 {
+ return errInvalidEndpointString
+ }
+ if err = ep.parseV1(parts[:4]); err != nil {
+ return err
+ }
+ if ep.MinIPCVersion, err = parseIPCVersion(parts[4]); err != nil {
+ return fmt.Errorf("invalid IPC version: %v", err)
+ }
+ if ep.MaxIPCVersion, err = parseIPCVersion(parts[5]); err != nil {
+ return fmt.Errorf("invalid IPC version: %v", err)
+ }
+ return nil
+}
+
+func (ep *Endpoint) parseV3(parts []string) error {
+ var err error
+ if len(parts) != 7 {
+ return errInvalidEndpointString
+ }
+ if err = ep.parseV2(parts[:6]); err != nil {
+ return err
+ }
+ if ep.IsMountTable, err = parseMountTableFlag(parts[6]); err != nil {
+ return fmt.Errorf("invalid mount table flag: %v", err)
+ }
+ return nil
+}
+
+func (ep *Endpoint) parseV4(parts []string) error {
+ if len(parts) < 7 {
+ return errInvalidEndpointString
+ }
+ if err := ep.parseV3(parts[:7]); err != nil {
+ return err
+ }
+ // Join the remaining and re-split.
+ if str := strings.Join(parts[7:], separator); len(str) > 0 {
+ ep.Blessings = strings.Split(str, blessingsSeparator)
+ }
+ return nil
+}
+
+func (ep *Endpoint) RoutingID() naming.RoutingID {
+ //nologcall
+ return ep.RID
+}
+func (ep *Endpoint) Network() string {
+ //nologcall
+ return Network
+}
+
+var defaultVersion = 3 // TODO(ashankar): Change to 4?
+
+func (ep *Endpoint) VersionedString(version int) string {
+ switch version {
+ default:
+ return ep.VersionedString(defaultVersion)
+ case 1:
+ return fmt.Sprintf("@1@%s@%s@@", ep.Protocol, ep.Address)
+ case 2:
+ return fmt.Sprintf("@2@%s@%s@%s@%s@%s@@",
+ ep.Protocol, ep.Address, ep.RID,
+ printIPCVersion(ep.MinIPCVersion), printIPCVersion(ep.MaxIPCVersion))
+ case 3:
+ mt := "s"
+ if ep.IsMountTable {
+ mt = "m"
+ }
+ return fmt.Sprintf("@3@%s@%s@%s@%s@%s@%s@@",
+ ep.Protocol, ep.Address, ep.RID,
+ printIPCVersion(ep.MinIPCVersion), printIPCVersion(ep.MaxIPCVersion),
+ mt)
+ case 4:
+ mt := "s"
+ blessings := strings.Join(ep.Blessings, blessingsSeparator)
+ if ep.IsMountTable {
+ mt = "m"
+ }
+ return fmt.Sprintf("@4@%s@%s@%s@%s@%s@%s@%s@@",
+ ep.Protocol, ep.Address, ep.RID,
+ printIPCVersion(ep.MinIPCVersion), printIPCVersion(ep.MaxIPCVersion),
+ mt, blessings)
+ }
+}
+
+func (ep *Endpoint) String() string {
+ //nologcall
+ // Use version 4 if blessings are present, otherwise there is a loss of information.
+ v := defaultVersion
+ if len(ep.Blessings) > 0 && v < 4 {
+ v = 4
+ }
+ return ep.VersionedString(v)
+}
+
+func (ep *Endpoint) Name() string {
+ //nologcall
+ return naming.JoinAddressName(ep.String(), "")
+}
+
+func (ep *Endpoint) Addr() net.Addr {
+ //nologcall
+ return &addr{network: ep.Protocol, address: ep.Address}
+}
+
+func (ep *Endpoint) ServesMountTable() bool {
+ //nologcall
+ return ep.IsMountTable
+}
+
+func (ep *Endpoint) BlessingNames() []string {
+ //nologcall
+ return ep.Blessings
+}
+
+type addr struct {
+ network, address string
+}
+
+func (a *addr) Network() string {
+ return a.network
+}
+
+func (a *addr) String() string {
+ return a.address
+}
diff --git a/profiles/internal/naming/endpoint_test.go b/profiles/internal/naming/endpoint_test.go
new file mode 100644
index 0000000..de13f54
--- /dev/null
+++ b/profiles/internal/naming/endpoint_test.go
@@ -0,0 +1,275 @@
+package naming
+
+import (
+ "net"
+ "reflect"
+ "testing"
+
+ "v.io/v23/ipc/version"
+ "v.io/v23/naming"
+)
+
+func TestEndpoint(t *testing.T) {
+ defver := defaultVersion
+ defer func() {
+ defaultVersion = defver
+ }()
+ v1 := &Endpoint{
+ Protocol: naming.UnknownProtocol,
+ Address: "batman.com:1234",
+ RID: naming.FixedRoutingID(0xdabbad00),
+ IsMountTable: true,
+ }
+ v2 := &Endpoint{
+ Protocol: naming.UnknownProtocol,
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0xdabbad00),
+ MinIPCVersion: 1,
+ MaxIPCVersion: 10,
+ IsMountTable: true,
+ }
+ v2hp := &Endpoint{
+ Protocol: naming.UnknownProtocol,
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0x0),
+ MinIPCVersion: 2,
+ MaxIPCVersion: 3,
+ IsMountTable: true,
+ }
+ v3s := &Endpoint{
+ Protocol: naming.UnknownProtocol,
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0x0),
+ MinIPCVersion: 2,
+ MaxIPCVersion: 3,
+ IsMountTable: false,
+ }
+ v3m := &Endpoint{
+ Protocol: naming.UnknownProtocol,
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0xdabbad00),
+ MinIPCVersion: 2,
+ MaxIPCVersion: 3,
+ IsMountTable: true,
+ }
+ v3tcp := &Endpoint{
+ Protocol: "tcp",
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0x0),
+ MinIPCVersion: 2,
+ MaxIPCVersion: 3,
+ IsMountTable: false,
+ }
+ v3ws6 := &Endpoint{
+ Protocol: "ws6",
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0x0),
+ MinIPCVersion: 2,
+ MaxIPCVersion: 3,
+ IsMountTable: false,
+ }
+ v4 := &Endpoint{
+ Protocol: "tcp",
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0xba77),
+ MinIPCVersion: 4,
+ MaxIPCVersion: 5,
+ IsMountTable: true,
+ Blessings: []string{"dev.v.io/foo@bar.com", "dev.v.io/bar@bar.com/delegate"},
+ }
+ v4b := &Endpoint{
+ Protocol: "tcp",
+ Address: "batman.com:2345",
+ RID: naming.FixedRoutingID(0xba77),
+ MinIPCVersion: 4,
+ MaxIPCVersion: 5,
+ IsMountTable: true,
+ // Blessings that look similar to other parts of the endpoint.
+ Blessings: []string{"@@", "@s", "@m"},
+ }
+
+ testcasesA := []struct {
+ endpoint naming.Endpoint
+ address string
+ }{
+ {v1, "batman.com:1234"},
+ {v2, "batman.com:2345"},
+ {v2hp, "batman.com:2345"},
+ }
+ for _, test := range testcasesA {
+ addr := test.endpoint.Addr()
+ if addr.String() != test.address {
+ t.Errorf("unexpected address %q, not %q", addr.String(), test.address)
+ }
+ }
+
+ // Test v3 & v4 endpoints.
+ testcasesC := []struct {
+ Endpoint naming.Endpoint
+ String string
+ Version int
+ }{
+ {v3s, "@3@@batman.com:2345@00000000000000000000000000000000@2@3@s@@", 3},
+ {v3m, "@3@@batman.com:2345@000000000000000000000000dabbad00@2@3@m@@", 3},
+ {v3tcp, "@3@tcp@batman.com:2345@00000000000000000000000000000000@2@3@s@@", 3},
+ {v3ws6, "@3@ws6@batman.com:2345@00000000000000000000000000000000@2@3@s@@", 3},
+ {v3s, "@4@@batman.com:2345@00000000000000000000000000000000@2@3@s@@@", 4},
+ {v4, "@4@tcp@batman.com:2345@0000000000000000000000000000ba77@4@5@m@dev.v.io/foo@bar.com,dev.v.io/bar@bar.com/delegate@@", 4},
+ {v4b, "@4@tcp@batman.com:2345@0000000000000000000000000000ba77@4@5@m@@@,@s,@m@@", 4},
+ }
+
+ for _, test := range testcasesC {
+ if got, want := test.Endpoint.VersionedString(test.Version), test.String; got != want {
+ t.Errorf("Got %q want %q for endpoint (v%d): %#v", got, want, test.Version, test.Endpoint)
+ }
+ ep, err := NewEndpoint(test.String)
+ if err != nil {
+ t.Errorf("Endpoint(%q) failed with %v", test.String, err)
+ continue
+ }
+ if !reflect.DeepEqual(ep, test.Endpoint) {
+ t.Errorf("Got endpoint %#v, want %#v for string %q", ep, test.Endpoint, test.String)
+ }
+ }
+
+ // Make sure we can continue to parse and create v2 endpoints.
+ defaultVersion = 2
+ testcasesB := []struct {
+ Endpoint naming.Endpoint
+ String string
+ Input string
+ min, max version.IPCVersion
+ servesMT bool
+ }{
+ {v1, "@2@@batman.com:1234@000000000000000000000000dabbad00@@@@", "", version.UnknownIPCVersion, version.UnknownIPCVersion, true},
+ {v2, "@2@@batman.com:2345@000000000000000000000000dabbad00@1@10@@", "", 1, 10, true},
+ {v2hp, "@2@@batman.com:2345@00000000000000000000000000000000@2@3@@", "batman.com:2345", 2, 3, true},
+ }
+
+ for _, test := range testcasesB {
+ if got, want := test.Endpoint.String(), test.String; got != want {
+ t.Errorf("Got %q want %q for endpoint %T = %#v", got, want, test.Endpoint, test.Endpoint)
+ }
+ str := test.Input
+ var ep naming.Endpoint
+ var err error
+ if str == "" {
+ str = test.String
+ ep, err = NewEndpoint(str)
+ } else {
+ ep, err = NewEndpoint(naming.FormatEndpoint(naming.UnknownProtocol, str,
+ version.IPCVersionRange{test.min, test.max},
+ naming.ServesMountTableOpt(test.servesMT)))
+ }
+ if err != nil {
+ t.Errorf("Endpoint(%q) failed with %v", str, err)
+ continue
+ }
+ if !reflect.DeepEqual(ep, test.Endpoint) {
+ t.Errorf("Got endpoint %T = %#v, want %T = %#v for string %q", ep, ep, test.Endpoint, test.Endpoint, str)
+ }
+ }
+}
+
+type endpointTest struct {
+ input, output string
+ err error
+}
+
+func TestEndpointDefaults(t *testing.T) {
+ testcases := []endpointTest{
+ {"@1@tcp@batman@@@", "@3@tcp@batman@00000000000000000000000000000000@@@m@@", nil},
+ {"@2@tcp@robin@@@@@", "@3@tcp@robin@00000000000000000000000000000000@@@m@@", nil},
+ {"@1@@@@@", "@3@@:0@00000000000000000000000000000000@@@m@@", nil},
+ {"@2@@@@@@@", "@3@@:0@00000000000000000000000000000000@@@m@@", nil},
+ {"@1@tcp@batman:12@@@", "@3@tcp@batman:12@00000000000000000000000000000000@@@m@@", nil},
+ {"@2@tcp@foo:12@@9@@@", "@3@tcp@foo:12@00000000000000000000000000000000@9@@m@@", nil},
+ {"@2@tcp@foo:12@@@4@@", "@3@tcp@foo:12@00000000000000000000000000000000@@4@m@@", nil},
+ {"@2@tcp@foo:12@@2@4@@", "@3@tcp@foo:12@00000000000000000000000000000000@2@4@m@@", nil},
+ {"@3@@host:11@@@@m@@", "@3@@host:11@00000000000000000000000000000000@@@m@@", nil},
+ {"@3@@host:12@@@@@@", "@3@@host:12@00000000000000000000000000000000@@@m@@", nil},
+ }
+ runEndpointTests(t, testcases)
+}
+
+func runEndpointTests(t *testing.T, testcases []endpointTest) {
+ for _, test := range testcases {
+ ep, err := NewEndpoint(test.input)
+ if err == nil && test.err == nil && ep.String() != test.output {
+ t.Errorf("NewEndpoint(%q): unexpected endpoint string %q != %q",
+ test.input, ep.String(), test.output)
+ continue
+ }
+ switch {
+ case test.err == err: // do nothing
+ case test.err == nil && err != nil:
+ t.Errorf("NewEndpoint(%q): unexpected error %q", test.output, err)
+ case test.err != nil && err == nil:
+ t.Errorf("NewEndpoint(%q): missing error %q", test.output, test.err)
+ case err.Error() != test.err.Error():
+ t.Errorf("NewEndpoint(%q): unexpected error %q != %q", test.output, err, test.err)
+ }
+ }
+}
+
+func TestHostPortEndpoint(t *testing.T) {
+ defver := defaultVersion
+ defer func() {
+ defaultVersion = defver
+ }()
+ defaultVersion = 4
+ testcases := []endpointTest{
+ {"localhost:10", "@4@@localhost:10@00000000000000000000000000000000@@@m@@@", nil},
+ {"localhost:", "@4@@localhost:@00000000000000000000000000000000@@@m@@@", nil},
+ {"localhost", "", errInvalidEndpointString},
+ {"dev.v.io/service/mounttabled@ns.dev.v.io:8101", "@4@@ns.dev.v.io:8101@00000000000000000000000000000000@@@m@dev.v.io/service/mounttabled@@", nil},
+ {"dev.v.io/users/foo@bar.com@ns.dev.v.io:8101", "@4@@ns.dev.v.io:8101@00000000000000000000000000000000@@@m@dev.v.io/users/foo@bar.com@@", nil},
+ {"@1@tcp@ns.dev.v.io:8101", "@4@@ns.dev.v.io:8101@00000000000000000000000000000000@@@m@@1@tcp@@", nil},
+ }
+ runEndpointTests(t, testcases)
+}
+
+func TestParseHostPort(t *testing.T) {
+ var min, max version.IPCVersion = 1, 2
+ dns := &Endpoint{
+ Protocol: "tcp",
+ Address: "batman.com:4444",
+ MinIPCVersion: min,
+ MaxIPCVersion: max,
+ IsMountTable: true,
+ }
+ ipv4 := &Endpoint{
+ Protocol: "tcp",
+ Address: "192.168.1.1:4444",
+ MinIPCVersion: min,
+ MaxIPCVersion: max,
+ IsMountTable: true,
+ }
+ ipv6 := &Endpoint{
+ Protocol: "tcp",
+ Address: "[01:02::]:4444",
+ MinIPCVersion: min,
+ MaxIPCVersion: max,
+ IsMountTable: true,
+ }
+ testcases := []struct {
+ Endpoint naming.Endpoint
+ Host, Port string
+ }{
+ {dns, "batman.com", "4444"},
+ {ipv4, "192.168.1.1", "4444"},
+ {ipv6, "01:02::", "4444"},
+ }
+
+ for _, test := range testcases {
+ addr := net.JoinHostPort(test.Host, test.Port)
+ epString := naming.FormatEndpoint("tcp", addr, version.IPCVersionRange{min, max})
+ if ep, err := NewEndpoint(epString); err != nil {
+ t.Errorf("NewEndpoint(%q) failed with %v", addr, err)
+ } else {
+ if !reflect.DeepEqual(test.Endpoint, ep) {
+ t.Errorf("Got endpoint %T = %#v, want %T = %#v for string %q", ep, ep, test.Endpoint, test.Endpoint, addr)
+ }
+ }
+ }
+}
diff --git a/profiles/internal/naming/namespace/acl.go b/profiles/internal/naming/namespace/acl.go
new file mode 100644
index 0000000..59c6950
--- /dev/null
+++ b/profiles/internal/naming/namespace/acl.go
@@ -0,0 +1,58 @@
+package namespace
+
+import (
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/options"
+ "v.io/v23/services/security/access"
+ "v.io/x/lib/vlog"
+)
+
+// setACLInMountTable sets the ACL in a single server.
+func setACLInMountTable(ctx *context.T, client ipc.Client, name string, acl access.TaggedACLMap, etag, id string) (s status) {
+ s.id = id
+ ctx, _ = context.WithTimeout(ctx, callTimeout)
+ call, err := client.StartCall(ctx, name, "SetACL", []interface{}{acl, etag}, options.NoResolve{})
+ s.err = err
+ if err != nil {
+ return
+ }
+ s.err = call.Finish()
+ return
+}
+
+func (ns *namespace) SetACL(ctx *context.T, name string, acl access.TaggedACLMap, etag string) error {
+ defer vlog.LogCall()()
+ client := v23.GetClient(ctx)
+
+ // Apply to all mount tables implementing the name.
+ f := func(ctx *context.T, mt, id string) status {
+ return setACLInMountTable(ctx, client, mt, acl, etag, id)
+ }
+ err := ns.dispatch(ctx, name, f)
+ vlog.VI(1).Infof("SetACL(%s, %v, %s) -> %v", name, acl, etag, err)
+ return err
+}
+
+// GetACL gets an ACL from a mount table.
+func (ns *namespace) GetACL(ctx *context.T, name string) (acl access.TaggedACLMap, etag string, err error) {
+ defer vlog.LogCall()()
+ client := v23.GetClient(ctx)
+
+ // Resolve to all the mount tables implementing name.
+ me, rerr := ns.ResolveToMountTable(ctx, name)
+ if rerr != nil {
+ err = rerr
+ return
+ }
+ mts := me.Names()
+
+ call, serr := ns.parallelStartCall(ctx, client, mts, "GetACL", []interface{}{})
+ if serr != nil {
+ err = serr
+ return
+ }
+ err = call.Finish(&acl, &etag)
+ return
+}
diff --git a/profiles/internal/naming/namespace/acl_test.go b/profiles/internal/naming/namespace/acl_test.go
new file mode 100644
index 0000000..796668e
--- /dev/null
+++ b/profiles/internal/naming/namespace/acl_test.go
@@ -0,0 +1,138 @@
+package namespace_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/v23/services/security/access"
+
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles"
+ service "v.io/x/ref/services/mounttable/lib"
+)
+
+func init() {
+ testutil.Init()
+}
+
+func initTest() (rootCtx *context.T, aliceCtx *context.T, bobCtx *context.T, shutdown v23.Shutdown) {
+ ctx, shutdown := testutil.InitForTest()
+ var err error
+ if rootCtx, err = v23.SetPrincipal(ctx, tsecurity.NewPrincipal("root")); err != nil {
+ panic("failed to set root principal")
+ }
+ if aliceCtx, err = v23.SetPrincipal(ctx, tsecurity.NewPrincipal("alice")); err != nil {
+ panic("failed to set alice principal")
+ }
+ if bobCtx, err = v23.SetPrincipal(ctx, tsecurity.NewPrincipal("bob")); err != nil {
+ panic("failed to set bob principal")
+ }
+ for _, r := range []*context.T{rootCtx, aliceCtx, bobCtx} {
+ // A hack to set the namespace roots to a value that won't work.
+ v23.GetNamespace(r).SetRoots()
+ // And have all principals recognize each others blessings.
+ p1 := v23.GetPrincipal(r)
+ for _, other := range []*context.T{rootCtx, aliceCtx, bobCtx} {
+ // tsecurity.NewPrincipal has already setup each
+ // principal to use the same blessing for both server
+ // and client activities.
+ if err := p1.AddToRoots(v23.GetPrincipal(other).BlessingStore().Default()); err != nil {
+ panic(err)
+ }
+ }
+ }
+ return rootCtx, aliceCtx, bobCtx, shutdown
+}
+
+// Create a new mounttable service.
+func newMT(t *testing.T, ctx *context.T) (func(), string) {
+ estr, stopFunc, err := service.StartServers(ctx, v23.GetListenSpec(ctx), "", "", "")
+ if err != nil {
+ t.Fatalf("r.NewServer: %s", err)
+ }
+ return stopFunc, estr
+}
+
+func TestACLs(t *testing.T) {
+ // Create three different personalities.
+ // TODO(p): Use the multiple personalities to test ACL functionality.
+ rootCtx, _, _, shutdown := initTest()
+ defer shutdown()
+
+ // Create root mounttable.
+ stop, rmtAddr := newMT(t, rootCtx)
+ fmt.Printf("rmt at %s\n", rmtAddr)
+ defer stop()
+ ns := v23.GetNamespace(rootCtx)
+ ns.SetRoots("/" + rmtAddr)
+
+ // Create two parallel mount tables.
+ stop1, mt1Addr := newMT(t, rootCtx)
+ fmt.Printf("mt1 at %s\n", mt1Addr)
+ defer stop1()
+ stop2, mt2Addr := newMT(t, rootCtx)
+ fmt.Printf("mt2 at %s\n", mt2Addr)
+ defer stop2()
+
+ // Mount them into the root.
+ if err := ns.Mount(rootCtx, "a/b/c", mt1Addr, 0, naming.ServesMountTableOpt(true)); err != nil {
+ t.Fatalf("Failed to Mount %s onto a/b/c: %s", "/"+mt1Addr, err)
+ }
+ if err := ns.Mount(rootCtx, "a/b/c", mt2Addr, 0, naming.ServesMountTableOpt(true)); err != nil {
+ t.Fatalf("Failed to Mount %s onto a/b/c: %s", "/"+mt2Addr, err)
+ }
+
+ // Set/Get the mount point's ACL.
+ acl, etag, err := ns.GetACL(rootCtx, "a/b/c")
+ if err != nil {
+ t.Fatalf("GetACL a/b/c: %s", err)
+ }
+ acl = access.TaggedACLMap{"Read": access.ACL{In: []security.BlessingPattern{security.AllPrincipals}}}
+ if err := ns.SetACL(rootCtx, "a/b/c", acl, etag); err != nil {
+ t.Fatalf("SetACL a/b/c: %s", err)
+ }
+ nacl, _, err := ns.GetACL(rootCtx, "a/b/c")
+ if err != nil {
+ t.Fatalf("GetACL a/b/c: %s", err)
+ }
+ if !reflect.DeepEqual(acl, nacl) {
+ t.Fatalf("want %v, got %v", acl, nacl)
+ }
+
+ // Now Set/Get the parallel mount point's ACL.
+ etag = "" // Parallel setacl with any other value is dangerous
+ acl = access.TaggedACLMap{"Read": access.ACL{In: []security.BlessingPattern{security.AllPrincipals}},
+ "Admin": access.ACL{In: []security.BlessingPattern{security.AllPrincipals}}}
+ if err := ns.SetACL(rootCtx, "a/b/c/d/e", acl, etag); err != nil {
+ t.Fatalf("SetACL a/b/c/d/e: %s", err)
+ }
+ nacl, _, err = ns.GetACL(rootCtx, "a/b/c/d/e")
+ if err != nil {
+ t.Fatalf("GetACL a/b/c/d/e: %s", err)
+ }
+ if !reflect.DeepEqual(acl, nacl) {
+ t.Fatalf("want %v, got %v", acl, nacl)
+ }
+
+ // Get from each server individually to make sure both are set.
+ nacl, _, err = ns.GetACL(rootCtx, naming.Join(mt1Addr, "d/e"))
+ if err != nil {
+ t.Fatalf("GetACL a/b/c/d/e: %s", err)
+ }
+ if !reflect.DeepEqual(acl, nacl) {
+ t.Fatalf("want %v, got %v", acl, nacl)
+ }
+ nacl, _, err = ns.GetACL(rootCtx, naming.Join(mt2Addr, "d/e"))
+ if err != nil {
+ t.Fatalf("GetACL a/b/c/d/e: %s", err)
+ }
+ if !reflect.DeepEqual(acl, nacl) {
+ t.Fatalf("want %v, got %v", acl, nacl)
+ }
+}
diff --git a/profiles/internal/naming/namespace/all_test.go b/profiles/internal/naming/namespace/all_test.go
new file mode 100644
index 0000000..932dd0b
--- /dev/null
+++ b/profiles/internal/naming/namespace/all_test.go
@@ -0,0 +1,736 @@
+package namespace_test
+
+import (
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "sync"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles"
+ "v.io/x/ref/profiles/internal/naming/namespace"
+ vsecurity "v.io/x/ref/security"
+ service "v.io/x/ref/services/mounttable/lib"
+)
+
+//go:generate v23 test generate
+
+func createContexts(t *testing.T) (sc, c *context.T, cleanup func()) {
+ ctx, shutdown := testutil.InitForTest()
+ var (
+ err error
+ psc = tsecurity.NewPrincipal("sc")
+ pc = tsecurity.NewPrincipal("c")
+ )
+ // Setup the principals so that they recognize each other.
+ if err := psc.AddToRoots(pc.BlessingStore().Default()); err != nil {
+ t.Fatal(err)
+ }
+ if err := pc.AddToRoots(psc.BlessingStore().Default()); err != nil {
+ t.Fatal(err)
+ }
+ if sc, err = v23.SetPrincipal(ctx, psc); err != nil {
+ t.Fatal(err)
+ }
+ if c, err = v23.SetPrincipal(ctx, pc); err != nil {
+ t.Fatal(err)
+ }
+ return sc, c, shutdown
+}
+
+func boom(t *testing.T, f string, v ...interface{}) {
+ t.Logf(f, v...)
+ t.Fatal(string(debug.Stack()))
+}
+
+// N squared but who cares, this is a little test.
+// Ignores dups.
+func contains(container, contained []string) bool {
+L:
+ for _, d := range contained {
+ for _, r := range container {
+ if r == d {
+ continue L
+ }
+ }
+ return false
+ }
+ return true
+}
+
+func compare(t *testing.T, caller, name string, got, want []string) {
+ // Compare ignoring dups.
+ if !contains(got, want) || !contains(want, got) {
+ boom(t, "%s: %q: got %v, want %v", caller, name, got, want)
+ }
+}
+
+func doGlob(t *testing.T, ctx *context.T, ns ns.Namespace, pattern string, limit int) []string {
+ var replies []string
+ rc, err := ns.Glob(ctx, pattern)
+ if err != nil {
+ boom(t, "Glob(%s): %s", pattern, err)
+ }
+ for s := range rc {
+ switch v := s.(type) {
+ case *naming.MountEntry:
+ replies = append(replies, v.Name)
+ if limit > 0 && len(replies) > limit {
+ boom(t, "Glob returns too many results, perhaps not limiting recursion")
+ }
+ }
+ }
+ return replies
+}
+
+type testServer struct {
+ suffix string
+}
+
+func (testServer) KnockKnock(ctx ipc.ServerCall) (string, error) {
+ return "Who's there?", nil
+}
+
+// testServer has the following namespace:
+// "" -> {level1} -> {level2}
+func (t *testServer) GlobChildren__(ipc.ServerCall) (<-chan string, error) {
+ ch := make(chan string, 1)
+ switch t.suffix {
+ case "":
+ ch <- "level1"
+ case "level1":
+ ch <- "level2"
+ default:
+ return nil, nil
+ }
+ close(ch)
+ return ch, nil
+}
+
+type allowEveryoneAuthorizer struct{}
+
+func (allowEveryoneAuthorizer) Authorize(security.Call) error { return nil }
+
+type dispatcher struct{}
+
+func (d *dispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+ return &testServer{suffix}, allowEveryoneAuthorizer{}, nil
+}
+
+func knockKnock(t *testing.T, ctx *context.T, name string) {
+ client := v23.GetClient(ctx)
+ call, err := client.StartCall(ctx, name, "KnockKnock", nil)
+ if err != nil {
+ boom(t, "StartCall failed: %s", err)
+ }
+ var result string
+ if err := call.Finish(&result); err != nil {
+ boom(t, "Finish returned an error: %s", err)
+ }
+ if result != "Who's there?" {
+ boom(t, "Wrong result: %v", result)
+ }
+}
+
+func doResolveTest(t *testing.T, fname string, f func(*context.T, string, ...naming.ResolveOpt) (*naming.MountEntry, error), ctx *context.T, name string, want []string, opts ...naming.ResolveOpt) {
+ me, err := f(ctx, name, opts...)
+ if err != nil {
+ boom(t, "Failed to %s %s: %s", fname, name, err)
+ }
+ compare(t, fname, name, me.Names(), want)
+}
+
+func testResolveToMountTable(t *testing.T, ctx *context.T, ns ns.Namespace, name string, want ...string) {
+ doResolveTest(t, "ResolveToMountTable", ns.ResolveToMountTable, ctx, name, want)
+}
+
+func testResolveToMountTableWithPattern(t *testing.T, ctx *context.T, ns ns.Namespace, name string, pattern naming.ResolveOpt, want ...string) {
+ doResolveTest(t, "ResolveToMountTable", ns.ResolveToMountTable, ctx, name, want, pattern)
+}
+
+func testResolve(t *testing.T, ctx *context.T, ns ns.Namespace, name string, want ...string) {
+ doResolveTest(t, "Resolve", ns.Resolve, ctx, name, want)
+}
+
+func testResolveWithPattern(t *testing.T, ctx *context.T, ns ns.Namespace, name string, pattern naming.ResolveOpt, want ...string) {
+ doResolveTest(t, "Resolve", ns.Resolve, ctx, name, want, pattern)
+}
+
+type serverEntry struct {
+ mountPoint string
+ server ipc.Server
+ endpoint naming.Endpoint
+ name string
+}
+
+func runServer(t *testing.T, ctx *context.T, disp ipc.Dispatcher, mountPoint string) *serverEntry {
+ return run(t, ctx, disp, mountPoint, false)
+}
+
+func runMT(t *testing.T, ctx *context.T, mountPoint string) *serverEntry {
+ mtd, err := service.NewMountTableDispatcher("")
+ if err != nil {
+ boom(t, "NewMountTableDispatcher returned error: %v", err)
+ }
+ return run(t, ctx, mtd, mountPoint, true)
+}
+
+func run(t *testing.T, ctx *context.T, disp ipc.Dispatcher, mountPoint string, mt bool) *serverEntry {
+ s, err := v23.NewServer(ctx, options.ServesMountTable(mt))
+ if err != nil {
+ boom(t, "r.NewServer: %s", err)
+ }
+ // Add a mount table server.
+ // Start serving on a loopback address.
+ eps, err := s.Listen(v23.GetListenSpec(ctx))
+ if err != nil {
+ boom(t, "Failed to Listen: %s", err)
+ }
+ if err := s.ServeDispatcher(mountPoint, disp); err != nil {
+ boom(t, "Failed to serve mount table at %s: %s", mountPoint, err)
+ }
+ return &serverEntry{mountPoint: mountPoint, server: s, endpoint: eps[0], name: eps[0].Name()}
+}
+
+const (
+ mt1MP = "mt1"
+ mt2MP = "mt2"
+ mt3MP = "mt3"
+ mt4MP = "mt4"
+ mt5MP = "mt5"
+ j1MP = "joke1"
+ j2MP = "joke2"
+ j3MP = "joke3"
+
+ ttl = 100 * time.Second
+)
+
+// runMountTables creates a root mountable with some mount tables mounted
+// in it: mt{1,2,3,4,5}
+func runMountTables(t *testing.T, ctx *context.T) (*serverEntry, map[string]*serverEntry) {
+ root := runMT(t, ctx, "")
+ v23.GetNamespace(ctx).SetRoots(root.name)
+ t.Logf("mountTable %q -> %s", root.mountPoint, root.endpoint)
+
+ mps := make(map[string]*serverEntry)
+ for _, mp := range []string{mt1MP, mt2MP, mt3MP, mt4MP, mt5MP} {
+ m := runMT(t, ctx, mp)
+ t.Logf("mountTable %q -> %s", mp, m.endpoint)
+ mps[mp] = m
+ }
+ return root, mps
+}
+
+// createNamespace creates a hierarchy of mounttables and servers
+// as follows:
+// /mt1, /mt2, /mt3, /mt4, /mt5, /joke1, /joke2, /joke3.
+// That is, mt1 is a mount table mounted in the root mount table,
+// joke1 is a server mounted in the root mount table.
+func createNamespace(t *testing.T, ctx *context.T) (*serverEntry, map[string]*serverEntry, map[string]*serverEntry, func()) {
+ root, mts := runMountTables(t, ctx)
+ jokes := make(map[string]*serverEntry)
+ // Let's run some non-mount table services.
+ for _, j := range []string{j1MP, j2MP, j3MP} {
+ disp := &dispatcher{}
+ jokes[j] = runServer(t, ctx, disp, j)
+ }
+ return root, mts, jokes, func() {
+ for _, s := range jokes {
+ s.server.Stop()
+ }
+ for _, s := range mts {
+ s.server.Stop()
+ }
+ root.server.Stop()
+ }
+}
+
+// runNestedMountTables creates some nested mount tables in the hierarchy
+// created by createNamespace as follows:
+// /mt4/foo, /mt4/foo/bar and /mt4/baz where foo, bar and baz are mount tables.
+func runNestedMountTables(t *testing.T, ctx *context.T, mts map[string]*serverEntry) {
+ ns := v23.GetNamespace(ctx)
+ // Set up some nested mounts and verify resolution.
+ for _, m := range []string{"mt4/foo", "mt4/foo/bar"} {
+ mts[m] = runMT(t, ctx, m)
+ }
+
+ // Use a global name for a mount, rather than a relative one.
+ // We directly mount baz into the mt4/foo mount table.
+ globalMP := naming.JoinAddressName(mts["mt4/foo"].name, "baz")
+ mts["baz"] = runMT(t, ctx, "baz")
+ if err := ns.Mount(ctx, globalMP, mts["baz"].name, ttl); err != nil {
+ boom(t, "Failed to Mount %s: %s", globalMP, err)
+ }
+}
+
+// TestNamespaceCommon tests common use of the Namespace library
+// against a root mount table and some mount tables mounted on it.
+func TestNamespaceCommon(t *testing.T) {
+ _, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, mts, jokes, stopper := createNamespace(t, c)
+ defer stopper()
+ ns := v23.GetNamespace(c)
+
+ // All of the initial mounts are served by the root mounttable
+ // and hence ResolveToMountTable should return the root mountable
+ // as the address portion of the terminal name for those mounttables.
+ testResolveToMountTable(t, c, ns, "", root.name)
+ for _, m := range []string{mt2MP, mt3MP, mt5MP} {
+ rootMT := naming.Join(root.name, m)
+ // All of these mount tables are hosted by the root mount table
+ testResolveToMountTable(t, c, ns, m, rootMT)
+
+ // The server registered for each mount point is a mount table
+ testResolve(t, c, ns, m, mts[m].name)
+
+ // ResolveToMountTable will walk through to the sub MountTables
+ mtbar := naming.Join(m, "bar")
+ subMT := naming.Join(mts[m].name, "bar")
+ testResolveToMountTable(t, c, ns, mtbar, subMT)
+ }
+
+ for _, j := range []string{j1MP, j2MP, j3MP} {
+ testResolve(t, c, ns, j, jokes[j].name)
+ }
+}
+
+// TestNamespaceDetails tests more detailed use of the Namespace library.
+func TestNamespaceDetails(t *testing.T) {
+ sc, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, mts, _, stopper := createNamespace(t, sc)
+ defer stopper()
+
+ ns := v23.GetNamespace(c)
+ ns.SetRoots(root.name)
+
+ // /mt2 is not an endpoint. Thus, the example below will fail.
+ mt3Server := mts[mt3MP].name
+ mt2a := "/mt2/a"
+ if err := ns.Mount(c, mt2a, mt3Server, ttl); verror.Is(err, naming.ErrNoSuchName.ID) {
+ boom(t, "Successfully mounted %s - expected an err %v, not %v", mt2a, naming.ErrNoSuchName, err)
+ }
+
+ // Mount using the relative name.
+ // This means walk through mt2 if it already exists and mount within
+ // the lower level mount table, if the name doesn't exist we'll create
+ // a new name for it.
+ mt2a = "mt2/a"
+ if err := ns.Mount(c, mt2a, mt3Server, ttl); err != nil {
+ boom(t, "Failed to Mount %s: %s", mt2a, err)
+ }
+
+ mt2mt := naming.Join(mts[mt2MP].name, "a")
+ // The mt2/a is served by the mt2 mount table
+ testResolveToMountTable(t, c, ns, mt2a, mt2mt)
+ // The server for mt2a is mt3server from the second mount above.
+ testResolve(t, c, ns, mt2a, mt3Server)
+
+ // Add two more mounts. The // should be stripped off of the
+ // second.
+ for _, mp := range []struct{ name, server string }{
+ {"mt2", mts[mt4MP].name},
+ {"mt2//", mts[mt5MP].name},
+ } {
+ if err := ns.Mount(c, mp.name, mp.server, ttl, naming.ServesMountTableOpt(true)); err != nil {
+ boom(t, "Failed to Mount %s: %s", mp.name, err)
+ }
+ }
+
+ names := []string{naming.JoinAddressName(mts[mt4MP].name, "a"),
+ naming.JoinAddressName(mts[mt5MP].name, "a")}
+ names = append(names, naming.JoinAddressName(mts[mt2MP].name, "a"))
+ // We now have 3 mount tables prepared to serve mt2/a
+ testResolveToMountTable(t, c, ns, "mt2/a", names...)
+ names = []string{mts[mt4MP].name, mts[mt5MP].name}
+ names = append(names, mts[mt2MP].name)
+ testResolve(t, c, ns, "mt2", names...)
+}
+
+// TestNestedMounts tests some more deeply nested mounts
+func TestNestedMounts(t *testing.T) {
+ sc, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, mts, _, stopper := createNamespace(t, sc)
+ runNestedMountTables(t, sc, mts)
+ defer stopper()
+
+ ns := v23.GetNamespace(c)
+ ns.SetRoots(root.name)
+
+ // Set up some nested mounts and verify resolution.
+ for _, m := range []string{"mt4/foo", "mt4/foo/bar"} {
+ testResolve(t, c, ns, m, mts[m].name)
+ }
+
+ testResolveToMountTable(t, c, ns, "mt4/foo",
+ naming.JoinAddressName(mts[mt4MP].name, "foo"))
+ testResolveToMountTable(t, c, ns, "mt4/foo/bar",
+ naming.JoinAddressName(mts["mt4/foo"].name, "bar"))
+ testResolveToMountTable(t, c, ns, "mt4/foo/baz",
+ naming.JoinAddressName(mts["mt4/foo"].name, "baz"))
+}
+
+// TestServers tests invoking RPCs on simple servers
+func TestServers(t *testing.T) {
+ sc, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, mts, jokes, stopper := createNamespace(t, sc)
+ defer stopper()
+ ns := v23.GetNamespace(c)
+ ns.SetRoots(root.name)
+
+ // Let's run some non-mount table services
+ for _, j := range []string{j1MP, j2MP, j3MP} {
+ testResolve(t, c, ns, j, jokes[j].name)
+ knockKnock(t, c, j)
+ globalName := naming.JoinAddressName(mts["mt4"].name, j)
+ disp := &dispatcher{}
+ gj := "g_" + j
+ jokes[gj] = runServer(t, c, disp, globalName)
+ testResolve(t, c, ns, "mt4/"+j, jokes[gj].name)
+ knockKnock(t, c, "mt4/"+j)
+ testResolveToMountTable(t, c, ns, "mt4/"+j, globalName)
+ testResolveToMountTable(t, c, ns, "mt4/"+j+"/garbage", globalName+"/garbage")
+ }
+}
+
+// TestGlob tests some glob patterns.
+func TestGlob(t *testing.T) {
+ sc, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, mts, _, stopper := createNamespace(t, sc)
+ runNestedMountTables(t, sc, mts)
+ defer stopper()
+ ns := v23.GetNamespace(c)
+ ns.SetRoots(root.name)
+
+ tln := []string{"baz", "mt1", "mt2", "mt3", "mt4", "mt5", "joke1", "joke2", "joke3"}
+ barbaz := []string{"mt4/foo/bar", "mt4/foo/baz"}
+ level12 := []string{"joke1/level1", "joke1/level1/level2", "joke2/level1", "joke2/level1/level2", "joke3/level1", "joke3/level1/level2"}
+ foo := append([]string{"mt4/foo"}, barbaz...)
+ foo = append(foo, level12...)
+ // Try various globs.
+ globTests := []struct {
+ pattern string
+ expected []string
+ }{
+ {"*", tln},
+ {"x", []string{}},
+ {"m*", []string{"mt1", "mt2", "mt3", "mt4", "mt5"}},
+ {"mt[2,3]", []string{"mt2", "mt3"}},
+ {"*z", []string{"baz"}},
+ {"joke1/*", []string{"joke1/level1"}},
+ {"j?ke1/level1/*", []string{"joke1/level1/level2"}},
+ {"joke1/level1/*", []string{"joke1/level1/level2"}},
+ {"joke1/level1/level2/...", []string{"joke1/level1/level2"}},
+ {"...", append(append(tln, foo...), "")},
+ {"*/...", append(tln, foo...)},
+ {"*/foo/*", barbaz},
+ {"*/*/*z", []string{"mt4/foo/baz"}},
+ {"*/f??/*z", []string{"mt4/foo/baz"}},
+ {"mt4/foo/baz", []string{"mt4/foo/baz"}},
+ }
+ for _, test := range globTests {
+ out := doGlob(t, c, ns, test.pattern, 0)
+ compare(t, "Glob", test.pattern, out, test.expected)
+ // Do the same with a full rooted name.
+ out = doGlob(t, c, ns, naming.JoinAddressName(root.name, test.pattern), 0)
+ var expectedWithRoot []string
+ for _, s := range test.expected {
+ expectedWithRoot = append(expectedWithRoot, naming.JoinAddressName(root.name, s))
+ }
+ compare(t, "Glob", test.pattern, out, expectedWithRoot)
+ }
+}
+
+type GlobbableServer struct {
+ callCount int
+ mu sync.Mutex
+}
+
+func (g *GlobbableServer) Glob__(ipc.ServerCall, string) (<-chan naming.VDLGlobReply, error) {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ g.callCount++
+ return nil, nil
+}
+
+func (g *GlobbableServer) GetAndResetCount() int {
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ cnt := g.callCount
+ g.callCount = 0
+
+ return cnt
+}
+
+// TestGlobEarlyStop tests that Glob doesn't query terminal servers with finished patterns.
+func TestGlobEarlyStop(t *testing.T) {
+ sc, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, mts, _, stopper := createNamespace(t, sc)
+ runNestedMountTables(t, sc, mts)
+ defer stopper()
+
+ globServer := &GlobbableServer{}
+ name := naming.JoinAddressName(mts["mt4/foo/bar"].name, "glob")
+ runningGlobServer := runServer(t, c, testutil.LeafDispatcher(globServer, nil), name)
+ defer runningGlobServer.server.Stop()
+
+ ns := v23.GetNamespace(c)
+ ns.SetRoots(root.name)
+
+ tests := []struct {
+ pattern string
+ expectedCalls int
+ expected []string
+ }{
+ {"mt4/foo/bar/glob", 0, []string{"mt4/foo/bar/glob"}},
+ {"mt4/foo/bar/glob/...", 1, []string{"mt4/foo/bar/glob"}},
+ {"mt4/foo/bar/glob/*", 1, nil},
+ {"mt4/foo/bar/***", 0, []string{"mt4/foo/bar", "mt4/foo/bar/glob"}},
+ {"mt4/foo/bar/...", 1, []string{"mt4/foo/bar", "mt4/foo/bar/glob"}},
+ {"mt4/foo/bar/*", 0, []string{"mt4/foo/bar/glob"}},
+ {"mt4/***/bar/***", 0, []string{"mt4/foo/bar", "mt4/foo/bar/glob"}},
+ {"mt4/*/bar/***", 0, []string{"mt4/foo/bar", "mt4/foo/bar/glob"}},
+ }
+ // Test allowing the tests to descend into leaves.
+ for _, test := range tests {
+ out := doGlob(t, c, ns, test.pattern, 0)
+ compare(t, "Glob", test.pattern, out, test.expected)
+ if calls := globServer.GetAndResetCount(); calls != test.expectedCalls {
+ boom(t, "Wrong number of Glob calls to terminal server got: %d want: %d.", calls, test.expectedCalls)
+ }
+ }
+}
+
+func TestCycles(t *testing.T) {
+ sc, c, cleanup := createContexts(t)
+ defer cleanup()
+
+ root, _, _, stopper := createNamespace(t, sc)
+ defer stopper()
+ ns := v23.GetNamespace(c)
+ ns.SetRoots(root.name)
+
+ c1 := runMT(t, c, "c1")
+ c2 := runMT(t, c, "c2")
+ c3 := runMT(t, c, "c3")
+ defer c1.server.Stop()
+ defer c2.server.Stop()
+ defer c3.server.Stop()
+
+ m := "c1/c2"
+ if err := ns.Mount(c, m, c1.name, ttl, naming.ServesMountTableOpt(true)); err != nil {
+ boom(t, "Failed to Mount %s: %s", "c1/c2", err)
+ }
+
+ m = "c1/c2/c3"
+ if err := ns.Mount(c, m, c3.name, ttl, naming.ServesMountTableOpt(true)); err != nil {
+ boom(t, "Failed to Mount %s: %s", m, err)
+ }
+
+ m = "c1/c3/c4"
+ if err := ns.Mount(c, m, c1.name, ttl, naming.ServesMountTableOpt(true)); err != nil {
+ boom(t, "Failed to Mount %s: %s", m, err)
+ }
+
+ // Since c1 was mounted with the Serve call, it will have both the tcp and ws endpoints.
+ testResolve(t, c, ns, "c1", c1.name)
+ testResolve(t, c, ns, "c1/c2", c1.name)
+ testResolve(t, c, ns, "c1/c3", c3.name)
+ testResolve(t, c, ns, "c1/c3/c4", c1.name)
+ testResolve(t, c, ns, "c1/c3/c4/c3/c4", c1.name)
+ cycle := "c3/c4"
+ for i := 0; i < 40; i++ {
+ cycle += "/c3/c4"
+ }
+ if _, err := ns.Resolve(c, "c1/"+cycle); !verror.Is(err, naming.ErrResolutionDepthExceeded.ID) {
+ boom(t, "Failed to detect cycle")
+ }
+
+ // Perform the glob with a response length limit.
+ doGlob(t, c, ns, "c1/...", 1000)
+}
+
+// TestGoroutineLeaks tests for leaking goroutines - we have many:-(
+func TestGoroutineLeaks(t *testing.T) {
+ t.Skip()
+ sc, _, cleanup := createContexts(t)
+ defer cleanup()
+
+ _, _, _, stopper := createNamespace(t, sc)
+ defer func() {
+ vlog.Infof("%d goroutines:", runtime.NumGoroutine())
+ }()
+ defer stopper()
+ defer func() {
+ vlog.Infof("%d goroutines:", runtime.NumGoroutine())
+ }()
+ //panic("this will show up lots of goroutine+channel leaks!!!!")
+}
+
+func TestBadRoots(t *testing.T) {
+ if _, err := namespace.New(); err != nil {
+ t.Errorf("namespace.New should not have failed with no roots")
+ }
+ if _, err := namespace.New("not a rooted name"); err == nil {
+ t.Errorf("namespace.New should have failed with an unrooted name")
+ }
+}
+
+func bless(blesser, delegate security.Principal, extension string) {
+ b, err := blesser.Bless(delegate.PublicKey(), blesser.BlessingStore().Default(), extension, security.UnconstrainedUse())
+ if err != nil {
+ panic(err)
+ }
+ delegate.BlessingStore().SetDefault(b)
+}
+
+func TestRootBlessing(t *testing.T) {
+ c, cc, cleanup := createContexts(t)
+ defer cleanup()
+
+ proot, err := vsecurity.NewPrincipal()
+ if err != nil {
+ panic(err)
+ }
+ b, err := proot.BlessSelf("root")
+ if err != nil {
+ panic(err)
+ }
+ proot.BlessingStore().SetDefault(b)
+
+ sprincipal := v23.GetPrincipal(c)
+ cprincipal := v23.GetPrincipal(cc)
+ bless(proot, sprincipal, "server")
+ bless(proot, cprincipal, "client")
+ cprincipal.AddToRoots(proot.BlessingStore().Default())
+ sprincipal.AddToRoots(proot.BlessingStore().Default())
+
+ root, mts, _, stopper := createNamespace(t, c)
+ defer stopper()
+ ns := v23.GetNamespace(c)
+
+ name := naming.Join(root.name, mt2MP)
+ // First check with a non-matching blessing pattern.
+ _, err = ns.Resolve(c, name, naming.RootBlessingPatternOpt("root/foobar"))
+ if !verror.Is(err, verror.ErrNotTrusted.ID) {
+ t.Errorf("Resolve expected NotTrusted error, got %v", err)
+ }
+ _, err = ns.ResolveToMountTable(c, name, naming.RootBlessingPatternOpt("root/foobar"))
+ if !verror.Is(err, verror.ErrNotTrusted.ID) {
+ t.Errorf("ResolveToMountTable expected NotTrusted error, got %v", err)
+ }
+
+ // Now check a matching pattern.
+ testResolveWithPattern(t, c, ns, name, naming.RootBlessingPatternOpt("root/server"), mts[mt2MP].name)
+ testResolveToMountTableWithPattern(t, c, ns, name, naming.RootBlessingPatternOpt("root/server"), name)
+
+ // After successful lookup it should be cached, so the pattern doesn't matter.
+ testResolveWithPattern(t, c, ns, name, naming.RootBlessingPatternOpt("root/foobar"), mts[mt2MP].name)
+}
+
+func TestAuthenticationDuringResolve(t *testing.T) {
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ var (
+ rootMtCtx, _ = v23.SetPrincipal(ctx, tsecurity.NewPrincipal()) // root mounttable
+ mtCtx, _ = v23.SetPrincipal(ctx, tsecurity.NewPrincipal()) // intermediate mounttable
+ serverCtx, _ = v23.SetPrincipal(ctx, tsecurity.NewPrincipal()) // end server
+ clientCtx, _ = v23.SetPrincipal(ctx, tsecurity.NewPrincipal()) // client process (doing Resolves).
+ idp = tsecurity.NewIDProvider("idp") // identity provider
+ ep1 = naming.FormatEndpoint("tcp", "127.0.0.1:14141")
+
+ resolve = func(name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+ return v23.GetNamespace(clientCtx).Resolve(clientCtx, name, opts...)
+ }
+
+ mount = func(name, server string, ttl time.Duration, opts ...naming.MountOpt) error {
+ return v23.GetNamespace(serverCtx).Mount(serverCtx, name, server, ttl, opts...)
+ }
+ )
+ // Setup default blessings for the processes.
+ idp.Bless(v23.GetPrincipal(rootMtCtx), "rootmt")
+ idp.Bless(v23.GetPrincipal(serverCtx), "server")
+ idp.Bless(v23.GetPrincipal(mtCtx), "childmt")
+ idp.Bless(v23.GetPrincipal(clientCtx), "client")
+
+ // Setup the namespace root for all the "processes".
+ rootmt := runMT(t, rootMtCtx, "")
+ for _, ctx := range []*context.T{mtCtx, serverCtx, clientCtx} {
+ v23.GetNamespace(ctx).SetRoots(rootmt.name)
+ }
+ // Disable caching in the client so that any Mount calls by the server
+ // are noticed immediately.
+ v23.GetNamespace(clientCtx).CacheCtl(naming.DisableCache(true))
+
+ // Server mounting without an explicitly specified MountedServerBlessingsOpt,
+ // will automatically fill the Default blessings in.
+ if err := mount("server", ep1, time.Minute); err != nil {
+ t.Error(err)
+ } else if e, err := resolve("server"); err != nil {
+ t.Error(err)
+ } else if len(e.Servers) != 1 {
+ t.Errorf("Got %v, wanted a single server", e.Servers)
+ } else if s := e.Servers[0]; s.Server != ep1 || len(s.BlessingPatterns) != 1 || s.BlessingPatterns[0] != "idp/server" {
+ t.Errorf("Got (%q, %v) want (%q, [%q])", s.Server, s.BlessingPatterns, ep1, "idp/server")
+ } else if e, err = resolve("[otherpattern]server"); err != nil {
+ // Resolving with the "[<pattern>]<OA>" syntax, then <pattern> wins.
+ t.Error(err)
+ } else if s = e.Servers[0]; s.Server != ep1 || len(s.BlessingPatterns) != 1 || s.BlessingPatterns[0] != "otherpattern" {
+ t.Errorf("Got (%q, %v) want (%q, [%q])", s.Server, s.BlessingPatterns, ep1, "otherpattern")
+ }
+ // If an option is explicitly specified, it should be respected.
+ if err := mount("server", ep1, time.Minute, naming.ReplaceMountOpt(true), naming.MountedServerBlessingsOpt{"b1", "b2"}); err != nil {
+ t.Error(err)
+ } else if e, err := resolve("server"); err != nil {
+ t.Error(err)
+ } else if len(e.Servers) != 1 {
+ t.Errorf("Got %v, wanted a single server", e.Servers)
+ } else if s, pats := e.Servers[0], []string{"b1", "b2"}; s.Server != ep1 || !reflect.DeepEqual(s.BlessingPatterns, pats) {
+ t.Errorf("Got (%q, %v) want (%q, %v)", s.Server, s.BlessingPatterns, ep1, pats)
+ }
+
+ // Intermediate mounttables should be authenticated.
+ mt := runMT(t, mtCtx, "mt")
+ // Mount a server on "mt".
+ if err := mount("mt/server", ep1, time.Minute, naming.ReplaceMountOpt(true)); err != nil {
+ t.Error(err)
+ }
+ // Imagine that the network address of "mt" has been taken over by an attacker. However, this attacker cannot
+ // mess with the mount entry for "mt". This would result in "mt" and its mount entry (in the global mounttable)
+ // having inconsistent blessings. Simulate this by explicitly changing the mount entry for "mt".
+ if err := v23.GetNamespace(mtCtx).Mount(mtCtx, "mt", mt.name, time.Minute, naming.ServesMountTableOpt(true), naming.MountedServerBlessingsOpt{"realmounttable"}, naming.ReplaceMountOpt(true)); err != nil {
+ t.Error(err)
+ }
+
+ if e, err := resolve("mt/server", options.SkipResolveAuthorization{}); err != nil {
+ t.Errorf("Resolve should succeed when skipping server authorization. Got (%v, %v)", e, err)
+ } else if e, err := resolve("mt/server"); !verror.Is(err, verror.ErrNotTrusted.ID) {
+ t.Errorf("Resolve should have failed with %q because an attacker has taken over the intermediate mounttable. Got (%+v, errorid=%q:%v)", verror.ErrNotTrusted.ID, e, verror.ErrorID(err), err)
+ }
+}
diff --git a/profiles/internal/naming/namespace/cache.go b/profiles/internal/naming/namespace/cache.go
new file mode 100644
index 0000000..15cbd7f
--- /dev/null
+++ b/profiles/internal/naming/namespace/cache.go
@@ -0,0 +1,163 @@
+package namespace
+
+import (
+ "math/rand"
+ "strings"
+ "sync"
+ "time"
+
+ "v.io/v23/naming"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+)
+
+// maxCacheEntries is the max number of cache entries to keep. It exists only so that we
+// can avoid edge cases blowing us up the cache.
+const maxCacheEntries = 4000
+
+// cacheHisteresisSize is how much we back off to if the cache gets filled up.
+const cacheHisteresisSize = (3 * maxCacheEntries) / 4
+
+// cache is a generic interface to the resolution cache.
+type cache interface {
+ remember(prefix string, entry *naming.MountEntry)
+ forget(names []string)
+ lookup(name string) (naming.MountEntry, error)
+}
+
+// ttlCache is an instance of cache that obeys ttl from the mount points.
+type ttlCache struct {
+ sync.Mutex
+ entries map[string]naming.MountEntry
+}
+
+// newTTLCache creates an empty ttlCache.
+func newTTLCache() cache {
+ return &ttlCache{entries: make(map[string]naming.MountEntry)}
+}
+
+func isStale(now time.Time, e naming.MountEntry) bool {
+ for _, s := range e.Servers {
+ if s.Expires.Before(now) {
+ return true
+ }
+ }
+ return false
+}
+
+// randomDrop randomly removes one cache entry. Assumes we've already locked the cache.
+func (c *ttlCache) randomDrop() {
+ n := rand.Intn(len(c.entries))
+ for k := range c.entries {
+ if n == 0 {
+ delete(c.entries, k)
+ break
+ }
+ n--
+ }
+}
+
+// cleaner reduces the number of entries. Assumes we've already locked the cache.
+func (c *ttlCache) cleaner() {
+ // First dump any stale entries.
+ now := time.Now()
+ for k, v := range c.entries {
+ if len(c.entries) < cacheHisteresisSize {
+ return
+ }
+ if isStale(now, v) {
+ delete(c.entries, k)
+ }
+ }
+
+ // If we haven't gotten low enough, dump randomly.
+ for len(c.entries) >= cacheHisteresisSize {
+ c.randomDrop()
+ }
+}
+
+// remember the servers associated with name with suffix removed.
+func (c *ttlCache) remember(prefix string, entry *naming.MountEntry) {
+ // Remove suffix. We only care about the name that gets us
+ // to the mounttable from the last mounttable.
+ prefix = naming.Clean(prefix)
+ entry.Name = naming.Clean(entry.Name)
+ prefix = naming.TrimSuffix(prefix, entry.Name)
+ // Copy the entry.
+ var ce naming.MountEntry
+ for _, s := range entry.Servers {
+ ce.Servers = append(ce.Servers, s)
+ }
+ ce.SetServesMountTable(entry.ServesMountTable())
+ c.Lock()
+ // Enforce an upper limit on the cache size.
+ if len(c.entries) >= maxCacheEntries {
+ if _, ok := c.entries[prefix]; !ok {
+ c.cleaner()
+ }
+ }
+ c.entries[prefix] = ce
+ c.Unlock()
+}
+
+// forget cache entries whose index begins with an element of names. If names is nil
+// forget all cached entries.
+func (c *ttlCache) forget(names []string) {
+ c.Lock()
+ defer c.Unlock()
+ for key := range c.entries {
+ for _, n := range names {
+ n = naming.Clean(n)
+ if strings.HasPrefix(key, n) {
+ delete(c.entries, key)
+ break
+ }
+ }
+ }
+}
+
+// lookup searches the cache for a maximal prefix of name and returns the associated servers,
+// prefix, and suffix. If any of the associated servers is expired, don't return anything
+// since that would reduce availability.
+func (c *ttlCache) lookup(name string) (naming.MountEntry, error) {
+ name = naming.Clean(name)
+ c.Lock()
+ defer c.Unlock()
+ now := time.Now()
+ for prefix, suffix := name, ""; len(prefix) > 0; prefix, suffix = backup(prefix, suffix) {
+ e, ok := c.entries[prefix]
+ if !ok {
+ continue
+ }
+ if isStale(now, e) {
+ return e, verror.New(naming.ErrNoSuchName, nil, name)
+ }
+ vlog.VI(2).Infof("namespace cache %s -> %v %s", name, e.Servers, e.Name)
+ e.Name = suffix
+ return e, nil
+ }
+ return naming.MountEntry{}, verror.New(naming.ErrNoSuchName, nil, name)
+}
+
+// backup moves the last element of the prefix to the suffix.
+func backup(prefix, suffix string) (string, string) {
+ for i := len(prefix) - 1; i > 0; i-- {
+ if prefix[i] != '/' {
+ continue
+ }
+ suffix = naming.Join(prefix[i+1:], suffix)
+ prefix = prefix[:i]
+ return prefix, suffix
+ }
+ return "", naming.Join(prefix, suffix)
+}
+
+// nullCache is an instance of cache that does nothing.
+type nullCache int
+
+func newNullCache() cache { return nullCache(1) }
+func (nullCache) remember(prefix string, entry *naming.MountEntry) {}
+func (nullCache) forget(names []string) {}
+func (nullCache) lookup(name string) (e naming.MountEntry, err error) {
+ return e, verror.New(naming.ErrNoSuchName, nil, name)
+}
diff --git a/profiles/internal/naming/namespace/cache_test.go b/profiles/internal/naming/namespace/cache_test.go
new file mode 100644
index 0000000..7534854
--- /dev/null
+++ b/profiles/internal/naming/namespace/cache_test.go
@@ -0,0 +1,188 @@
+package namespace
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "v.io/v23/naming"
+)
+
+func compatible(server string, servers []naming.MountedServer) bool {
+ if len(servers) == 0 {
+ return server == ""
+ }
+ return servers[0].Server == server
+}
+
+func future(secs uint32) time.Time {
+ return time.Now().Add(time.Duration(secs) * time.Second)
+}
+
+// TestCache tests the cache directly rather than via the namespace methods.
+func TestCache(t *testing.T) {
+ preload := []struct {
+ name string
+ suffix string
+ server string
+ }{
+ {"/h1//a/b/c/d", "c/d", "/h2"},
+ {"/h2//c/d", "d", "/h3"},
+ {"/h3//d", "", "/h4:1234"},
+ }
+ c := newTTLCache()
+ for _, p := range preload {
+ e := &naming.MountEntry{Name: p.suffix, Servers: []naming.MountedServer{naming.MountedServer{Server: p.server, Expires: future(30)}}}
+ c.remember(p.name, e)
+ }
+
+ tests := []struct {
+ name string
+ suffix string
+ server string
+ succeed bool
+ }{
+ {"/h1//a/b/c/d", "c/d", "/h2", true},
+ {"/h2//c/d", "d", "/h3", true},
+ {"/h3//d", "", "/h4:1234", true},
+ {"/notintcache", "", "", false},
+ {"/h1//a/b/f//g", "f/g", "/h2", true},
+ {"/h3//d//e", "e", "/h4:1234", true},
+ }
+ for _, p := range tests {
+ e, err := c.lookup(p.name)
+ if (err == nil) != p.succeed {
+ t.Errorf("%s: lookup failed", p.name)
+ }
+ if e.Name != p.suffix || !compatible(p.server, e.Servers) {
+ t.Errorf("%s: got %v, %s not %s, %s", p.name, e.Name, e.Servers, p.suffix, p.server)
+ }
+ }
+}
+
+func TestCacheLimit(t *testing.T) {
+ c := newTTLCache().(*ttlCache)
+ e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: "the rain in spain", Expires: future(3000)}}}
+ for i := 0; i < maxCacheEntries; i++ {
+ c.remember(fmt.Sprintf("%d", i), e)
+ if len(c.entries) > maxCacheEntries {
+ t.Errorf("unexpected cache size: got %d not %d", len(c.entries), maxCacheEntries)
+ }
+ }
+ // Adding one more element should reduce us to 3/4 full.
+ c.remember(fmt.Sprintf("%d", maxCacheEntries), e)
+ if len(c.entries) != cacheHisteresisSize {
+ t.Errorf("cache shrunk wrong amount: got %d not %d", len(c.entries), cacheHisteresisSize)
+ }
+}
+
+func TestCacheTTL(t *testing.T) {
+ before := time.Now()
+ c := newTTLCache().(*ttlCache)
+ // Fill cache.
+ e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: "the rain in spain", Expires: future(3000)}}}
+ for i := 0; i < maxCacheEntries; i++ {
+ c.remember(fmt.Sprintf("%d", i), e)
+ }
+ // Time out half the entries.
+ i := len(c.entries) / 2
+ for k := range c.entries {
+ c.entries[k].Servers[0].Expires = before
+ if i == 0 {
+ break
+ }
+ i--
+ }
+ // Add an entry and make sure we now have room.
+ c.remember(fmt.Sprintf("%d", maxCacheEntries+2), e)
+ if len(c.entries) > cacheHisteresisSize {
+ t.Errorf("entries did not timeout: got %d not %d", len(c.entries), cacheHisteresisSize)
+ }
+}
+
+func TestFlushCacheEntry(t *testing.T) {
+ preload := []struct {
+ name string
+ server string
+ }{
+ {"/h1//a/b", "/h2//c"},
+ {"/h2//c", "/h3"},
+ {"/h3//d", "/h4:1234"},
+ }
+ ns, _ := New()
+ c := ns.resolutionCache.(*ttlCache)
+ for _, p := range preload {
+ e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: "p.server", Expires: future(3000)}}}
+ c.remember(p.name, e)
+ }
+ toflush := "/h1/xyzzy"
+ if ns.FlushCacheEntry(toflush) {
+ t.Errorf("%s should not have caused anything to flush", toflush)
+ }
+ toflush = "/h1/a/b/d/e"
+ if !ns.FlushCacheEntry(toflush) {
+ t.Errorf("%s should have caused something to flush", toflush)
+ }
+ name := preload[2].name
+ if _, err := c.lookup(name); err != nil {
+ t.Errorf("%s should not have been flushed", name)
+ }
+ if len(c.entries) != 2 {
+ t.Errorf("%s flushed too many entries", toflush)
+ }
+ toflush = preload[1].name
+ if !ns.FlushCacheEntry(toflush) {
+ t.Errorf("%s should have caused something to flush", toflush)
+ }
+ if _, ok := c.entries[toflush]; ok {
+ t.Errorf("%s should have been flushed", name)
+ }
+ if len(c.entries) != 1 {
+ t.Errorf("%s flushed too many entries", toflush)
+ }
+}
+
+func disabled(ctls []naming.CacheCtl) bool {
+ for _, c := range ctls {
+ if v, ok := c.(naming.DisableCache); ok && bool(v) {
+ return true
+ }
+ }
+ return false
+}
+
+func TestCacheDisableEnable(t *testing.T) {
+ ns, _ := New()
+
+ // Default should be working resolution cache.
+ name := "/h1//a"
+ serverName := "/h2//"
+ c := ns.resolutionCache.(*ttlCache)
+ e := &naming.MountEntry{Servers: []naming.MountedServer{naming.MountedServer{Server: serverName, Expires: future(3000)}}}
+ c.remember(name, e)
+ if ne, err := c.lookup(name); err != nil || ne.Servers[0].Server != serverName {
+ t.Errorf("should have found the server in the cache")
+ }
+
+ // Turn off the resolution cache.
+ ctls := ns.CacheCtl(naming.DisableCache(true))
+ if !disabled(ctls) {
+ t.Errorf("caching not disabled")
+ }
+ nc := ns.resolutionCache.(nullCache)
+ nc.remember(name, e)
+ if _, err := nc.lookup(name); err == nil {
+ t.Errorf("should not have found the server in the cache")
+ }
+
+ // Turn on the resolution cache.
+ ctls = ns.CacheCtl(naming.DisableCache(false))
+ if disabled(ctls) {
+ t.Errorf("caching disabled")
+ }
+ c = ns.resolutionCache.(*ttlCache)
+ c.remember(name, e)
+ if ne, err := c.lookup(name); err != nil || ne.Servers[0].Server != serverName {
+ t.Errorf("should have found the server in the cache")
+ }
+}
diff --git a/profiles/internal/naming/namespace/glob.go b/profiles/internal/naming/namespace/glob.go
new file mode 100644
index 0000000..cdb70ef
--- /dev/null
+++ b/profiles/internal/naming/namespace/glob.go
@@ -0,0 +1,232 @@
+package namespace
+
+import (
+ "io"
+ "strings"
+
+ "v.io/x/ref/lib/glob"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+)
+
+// task is a sub-glob that has to be performed against a mount table. Tasks are
+// done in parrallel to speed up the glob.
+type task struct {
+ pattern *glob.Glob // pattern to match
+ er *naming.GlobError // error for that particular point in the name space
+ me *naming.MountEntry // server to match at
+ error error // any error performing this task
+ depth int // number of mount tables traversed recursively
+}
+
+// globAtServer performs a Glob on the servers at a mount point. It cycles through the set of
+// servers until it finds one that replies.
+func (ns *namespace) globAtServer(ctx *context.T, t *task, replies chan *task) {
+ defer func() {
+ if t.error == nil {
+ replies <- nil
+ } else {
+ replies <- t
+ }
+ }()
+ client := v23.GetClient(ctx)
+ pstr := t.pattern.String()
+ vlog.VI(2).Infof("globAtServer(%v, %v)", *t.me, pstr)
+
+ servers := []string{}
+ for _, s := range t.me.Servers {
+ servers = append(servers, naming.JoinAddressName(s.Server, ""))
+ }
+
+ // If there are no servers to call, this isn't a mount point. No sense
+ // trying to call servers that aren't there.
+ if len(servers) == 0 {
+ t.error = nil
+ return
+ }
+ call, err := ns.parallelStartCall(ctx, client, servers, ipc.GlobMethod, []interface{}{pstr})
+ if err != nil {
+ t.error = err
+ return
+ }
+
+ // At this point we're commited to the server that answered the call
+ // first. Cycle through all replies from that server.
+ for {
+ // If the mount table returns an error, we're done. Send the task to the channel
+ // including the error. This terminates the task.
+ var gr naming.VDLGlobReply
+ err := call.Recv(&gr)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.error = err
+ return
+ }
+
+ var x *task
+ switch v := gr.(type) {
+ case naming.VDLGlobReplyEntry:
+ // Convert to the ever so slightly different name.MountTable version of a MountEntry
+ // and add it to the list.
+ x = &task{
+ me: &naming.MountEntry{
+ Name: naming.Join(t.me.Name, v.Value.Name),
+ Servers: convertServers(v.Value.Servers),
+ },
+ depth: t.depth + 1,
+ }
+ x.me.SetServesMountTable(v.Value.MT)
+ case naming.VDLGlobReplyError:
+ // Pass on the error.
+ x = &task{
+ er: &v.Value,
+ depth: t.depth + 1,
+ }
+ }
+
+ // x.depth is the number of servers we've walked through since we've gone
+ // recursive (i.e. with pattern length of 0). Limit the depth of globs.
+ // TODO(p): return an error?
+ if t.pattern.Len() == 0 {
+ if x.depth > ns.maxRecursiveGlobDepth {
+ continue
+ }
+ }
+ replies <- x
+ }
+ t.error = call.Finish()
+ return
+}
+
+// depth returns the directory depth of a given name. It is used to pick off the unsatisfied part of the pattern.
+func depth(name string) int {
+ name = strings.Trim(naming.Clean(name), "/")
+ if name == "" {
+ return 0
+ }
+ return strings.Count(name, "/") + 1
+}
+
+// globLoop fires off a go routine for each server and read backs replies.
+func (ns *namespace) globLoop(ctx *context.T, e *naming.MountEntry, prefix string, pattern *glob.Glob, reply chan interface{}) {
+ defer close(reply)
+
+ // Provide enough buffers to avoid too much switching between the readers and the writers.
+ // This size is just a guess.
+ replies := make(chan *task, 100)
+ defer close(replies)
+
+ // Push the first task into the channel to start the ball rolling. This task has the
+ // root of the search and the full pattern. It will be the first task fired off in the for
+ // loop that follows.
+ replies <- &task{me: e, pattern: pattern}
+ inFlight := 0
+
+ // Perform a parallel search of the name graph. Each task will send what it learns
+ // on the replies channel. If the reply is a mount point and the pattern is not completely
+ // fulfilled, a new task will be fired off to handle it.
+ for {
+ select {
+ case t := <-replies:
+ // A nil reply represents a successfully terminated task.
+ // If no tasks are running, return.
+ if t == nil {
+ if inFlight--; inFlight <= 0 {
+ return
+ }
+ continue
+ }
+
+ // We want to output this entry if there was a real error other than
+ // "not a mount table".
+ //
+ // An error reply is also a terminated task.
+ // If no tasks are running, return.
+ if t.error != nil {
+ if !notAnMT(t.error) {
+ x := naming.GlobError{Name: naming.Join(prefix, t.me.Name), Error: t.error}
+ reply <- &x
+ }
+ if inFlight--; inFlight <= 0 {
+ return
+ }
+ continue
+ }
+
+ // If this is just an error from the mount table, pass it on.
+ if t.er != nil {
+ x := *t.er
+ x.Name = naming.Join(prefix, x.Name)
+ reply <- &x
+ continue
+ }
+
+ // Get the pattern elements below the current path.
+ suffix := pattern.Split(depth(t.me.Name))
+
+ // If we've satisfied the request and this isn't the root,
+ // reply to the caller.
+ if suffix.Len() == 0 && t.depth != 0 {
+ x := *t.me
+ x.Name = naming.Join(prefix, x.Name)
+ reply <- &x
+ }
+
+ // If the pattern is finished (so we're only querying about the root on the
+ // remote server) and the server is not another MT, then we needn't send the
+ // query on since we know the server will not supply a new address for the
+ // current name.
+ if suffix.Finished() {
+ if !t.me.ServesMountTable() {
+ continue
+ }
+ }
+
+ // If this is restricted recursive and not a mount table, don't descend into it.
+ if suffix.Restricted() && suffix.Len() == 0 && !t.me.ServesMountTable() {
+ continue
+ }
+
+ // Perform a glob at the next server.
+ inFlight++
+ t.pattern = suffix
+ go ns.globAtServer(ctx, t, replies)
+ }
+ }
+}
+
+// Glob implements naming.MountTable.Glob.
+func (ns *namespace) Glob(ctx *context.T, pattern string) (chan interface{}, error) {
+ defer vlog.LogCall()()
+ // Root the pattern. If we have no servers to query, give up.
+ // TODO(ashankar): Should not ignore the pattern on the end server?
+ e, _, patternWasRooted := ns.rootMountEntry(pattern)
+ if len(e.Servers) == 0 {
+ return nil, verror.New(naming.ErrNoMountTable, ctx)
+ }
+
+ // If the name doesn't parse, give up.
+ g, err := glob.Parse(e.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If pattern was already rooted, make sure we tack that root
+ // onto all returned names. Otherwise, just return the relative
+ // name.
+ var prefix string
+ if patternWasRooted {
+ prefix = e.Servers[0].Server
+ }
+ e.Name = ""
+ reply := make(chan interface{}, 100)
+ go ns.globLoop(ctx, e, prefix, g, reply)
+ return reply, nil
+}
diff --git a/profiles/internal/naming/namespace/glob_test.go b/profiles/internal/naming/namespace/glob_test.go
new file mode 100644
index 0000000..7d6ff2c
--- /dev/null
+++ b/profiles/internal/naming/namespace/glob_test.go
@@ -0,0 +1,70 @@
+package namespace
+
+import (
+ "testing"
+
+ "v.io/v23/security"
+)
+
+func TestDepth(t *testing.T) {
+ cases := []struct {
+ name string
+ depth int
+ }{
+ {"", 0},
+ {"foo", 1},
+ {"foo/", 1},
+ {"foo/bar", 2},
+ {"foo//bar", 2},
+ {"/foo/bar", 2},
+ {"//", 0},
+ {"//foo//bar", 2},
+ {"/foo/bar//baz//baf/", 4},
+ }
+ for _, c := range cases {
+ if got, want := depth(c.name), c.depth; want != got {
+ t.Errorf("%q: unexpected depth: %d not %d", c.name, got, want)
+ }
+ }
+}
+
+func TestSplitObjectName(t *testing.T) {
+ const notset = ""
+ cases := []struct {
+ input string
+ mt, server security.BlessingPattern
+ name string
+ }{
+ {"[foo/bar]", notset, "foo/bar", ""},
+ {"[x/y]/", "x/y", notset, "/"},
+ {"[foo]a", notset, "foo", "a"},
+ {"[foo]/a", "foo", notset, "/a"},
+ {"[foo]/a/[bar]", "foo", "bar", "/a"},
+ {"a/b", notset, notset, "a/b"},
+ {"[foo]a/b", notset, "foo", "a/b"},
+ {"/a/b", notset, notset, "/a/b"},
+ {"[foo]/a/b", "foo", notset, "/a/b"},
+ {"/a/[bar]b", notset, "bar", "/a/b"},
+ {"[foo]/a/[bar]b", "foo", "bar", "/a/b"},
+ {"/a/b[foo]", notset, notset, "/a/b[foo]"},
+ {"/a/b/[foo]c", notset, notset, "/a/b/[foo]c"},
+ {"/[01:02::]:444", notset, notset, "/[01:02::]:444"},
+ {"[foo]/[01:02::]:444", "foo", notset, "/[01:02::]:444"},
+ {"/[01:02::]:444/foo", notset, notset, "/[01:02::]:444/foo"},
+ {"[a]/[01:02::]:444/foo", "a", notset, "/[01:02::]:444/foo"},
+ {"/[01:02::]:444/[b]foo", notset, "b", "/[01:02::]:444/foo"},
+ {"[c]/[01:02::]:444/[d]foo", "c", "d", "/[01:02::]:444/foo"},
+ }
+ for _, c := range cases {
+ mt, server, name := splitObjectName(c.input)
+ if mt != c.mt {
+ t.Errorf("%q: unexpected mt pattern: %q not %q", c.input, mt, c.mt)
+ }
+ if server != c.server {
+ t.Errorf("%q: unexpected server pattern: %q not %q", c.input, server, c.server)
+ }
+ if name != c.name {
+ t.Errorf("%q: unexpected name: %q not %q", c.input, name, c.name)
+ }
+ }
+}
diff --git a/profiles/internal/naming/namespace/mount.go b/profiles/internal/naming/namespace/mount.go
new file mode 100644
index 0000000..029004f
--- /dev/null
+++ b/profiles/internal/naming/namespace/mount.go
@@ -0,0 +1,105 @@
+package namespace
+
+import (
+ "fmt"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/x/lib/vlog"
+)
+
+// mountIntoMountTable mounts a single server into a single mount table.
+func mountIntoMountTable(ctx *context.T, client ipc.Client, name, server string, patterns []security.BlessingPattern, ttl time.Duration, flags naming.MountFlag, id string) (s status) {
+ s.id = id
+ ctx, _ = context.WithTimeout(ctx, callTimeout)
+ call, err := client.StartCall(ctx, name, "MountX", []interface{}{server, patterns, uint32(ttl.Seconds()), flags}, options.NoResolve{})
+ s.err = err
+ if err != nil {
+ return
+ }
+ s.err = call.Finish()
+ return
+}
+
+// unmountFromMountTable removes a single mounted server from a single mount table.
+func unmountFromMountTable(ctx *context.T, client ipc.Client, name, server string, id string) (s status) {
+ s.id = id
+ ctx, _ = context.WithTimeout(ctx, callTimeout)
+ call, err := client.StartCall(ctx, name, "Unmount", []interface{}{server}, options.NoResolve{})
+ s.err = err
+ if err != nil {
+ return
+ }
+ s.err = call.Finish()
+ return
+}
+
+func (ns *namespace) Mount(ctx *context.T, name, server string, ttl time.Duration, opts ...naming.MountOpt) error {
+ defer vlog.LogCall()()
+
+ var flags naming.MountFlag
+ var patterns []string
+ for _, o := range opts {
+ // NB: used a switch since we'll be adding more options.
+ switch v := o.(type) {
+ case naming.ReplaceMountOpt:
+ if v {
+ flags |= naming.MountFlag(naming.Replace)
+ }
+ case naming.ServesMountTableOpt:
+ if v {
+ flags |= naming.MountFlag(naming.MT)
+ }
+ case naming.MountedServerBlessingsOpt:
+ patterns = []string(v)
+ }
+ }
+ if len(patterns) == 0 {
+ // No patterns explicitly provided. Take the conservative
+ // approach that the server being mounted is run by this local
+ // process.
+ p := v23.GetPrincipal(ctx)
+ b := p.BlessingStore().Default()
+ if b.IsZero() {
+ return fmt.Errorf("must provide a MountedServerBlessingsOpt")
+ }
+ for str, _ := range p.BlessingsInfo(b) {
+ patterns = append(patterns, str)
+ }
+ vlog.VI(2).Infof("Mount(%s, %s): No MountedServerBlessingsOpt provided using %v", name, server, patterns)
+ }
+
+ client := v23.GetClient(ctx)
+ // Mount the server in all the returned mount tables.
+ f := func(ctx *context.T, mt, id string) status {
+ return mountIntoMountTable(ctx, client, mt, server, str2pattern(patterns), ttl, flags, id)
+ }
+ err := ns.dispatch(ctx, name, f)
+ vlog.VI(1).Infof("Mount(%s, %q, %v) -> %v", name, server, patterns, err)
+ return err
+}
+
+func (ns *namespace) Unmount(ctx *context.T, name, server string) error {
+ defer vlog.LogCall()()
+ // Unmount the server from all the mount tables.
+ client := v23.GetClient(ctx)
+ f := func(ctx *context.T, mt, id string) status {
+ return unmountFromMountTable(ctx, client, mt, server, id)
+ }
+ err := ns.dispatch(ctx, name, f)
+ vlog.VI(1).Infof("Unmount(%s, %s) -> %v", name, server, err)
+ return err
+}
+
+func str2pattern(strs []string) (ret []security.BlessingPattern) {
+ ret = make([]security.BlessingPattern, len(strs))
+ for i, s := range strs {
+ ret[i] = security.BlessingPattern(s)
+ }
+ return
+}
diff --git a/profiles/internal/naming/namespace/namespace.go b/profiles/internal/naming/namespace/namespace.go
new file mode 100644
index 0000000..a4c081e
--- /dev/null
+++ b/profiles/internal/naming/namespace/namespace.go
@@ -0,0 +1,254 @@
+package namespace
+
+import (
+ "regexp"
+ "sync"
+ "time"
+
+ inaming "v.io/x/ref/profiles/internal/naming"
+
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+)
+
+const defaultMaxResolveDepth = 32
+const defaultMaxRecursiveGlobDepth = 10
+
+var serverPatternRegexp = regexp.MustCompile("^\\[([^\\]]+)\\](.*)")
+
+const pkgPath = "v.io/x/ref/profiles/internal/naming/namespace"
+
+var (
+ errNotRootedName = verror.Register(pkgPath+".errNotRootedName", verror.NoRetry, "{1:}{2:} At least one root is not a rooted name{:_}")
+)
+
+// namespace is an implementation of naming.Namespace.
+type namespace struct {
+ sync.RWMutex
+
+ // the default root servers for resolutions in this namespace.
+ roots []string
+
+ // depth limits
+ maxResolveDepth int
+ maxRecursiveGlobDepth int
+
+ // cache for name resolutions
+ resolutionCache cache
+}
+
+func rooted(names []string) bool {
+ for _, n := range names {
+ if a, _ := naming.SplitAddressName(n); len(a) == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func badRoots(roots []string) error {
+ return verror.New(errNotRootedName, nil, roots)
+}
+
+// Create a new namespace.
+func New(roots ...string) (*namespace, error) {
+ if !rooted(roots) {
+ return nil, badRoots(roots)
+ }
+ // A namespace with no roots can still be used for lookups of rooted names.
+ return &namespace{
+ roots: roots,
+ maxResolveDepth: defaultMaxResolveDepth,
+ maxRecursiveGlobDepth: defaultMaxRecursiveGlobDepth,
+ resolutionCache: newTTLCache(),
+ }, nil
+}
+
+// SetRoots implements naming.Namespace.SetRoots
+func (ns *namespace) SetRoots(roots ...string) error {
+ defer vlog.LogCall()()
+ // Allow roots to be cleared with a call of SetRoots()
+ if len(roots) > 0 && !rooted(roots) {
+ return badRoots(roots)
+ }
+ ns.Lock()
+ defer ns.Unlock()
+ // TODO(cnicolaou): filter out duplicate values.
+ ns.roots = roots
+ return nil
+}
+
+// SetDepthLimits overrides the default limits.
+func (ns *namespace) SetDepthLimits(resolve, glob int) {
+ if resolve >= 0 {
+ ns.maxResolveDepth = resolve
+ }
+ if glob >= 0 {
+ ns.maxRecursiveGlobDepth = glob
+ }
+}
+
+// Roots implements naming.Namespace.Roots
+func (ns *namespace) Roots() []string {
+ //nologcall
+ ns.RLock()
+ defer ns.RUnlock()
+ roots := make([]string, len(ns.roots))
+ for i, r := range ns.roots {
+ roots[i] = r
+ }
+ return roots
+}
+
+// rootName 'roots' a name: if name is not a rooted name, it prepends the root
+// mounttable's OA.
+func (ns *namespace) rootName(name string) []string {
+ name = naming.Clean(name)
+ if address, _ := naming.SplitAddressName(name); len(address) == 0 {
+ var ret []string
+ ns.RLock()
+ defer ns.RUnlock()
+ for _, r := range ns.roots {
+ ret = append(ret, naming.Join(r, name))
+ }
+ return ret
+ }
+ return []string{name}
+}
+
+// rootMountEntry 'roots' a name creating a mount entry for the name.
+//
+// Returns:
+// (1) MountEntry
+// (2) The BlessingPattern that the end servers are expected to match
+// (empty string if no such pattern).
+// (3) Whether "name" is a rooted name or not (if not, the namespace roots
+// configured in "ns" will be used).
+func (ns *namespace) rootMountEntry(name string, opts ...naming.ResolveOpt) (*naming.MountEntry, security.BlessingPattern, bool) {
+ name = naming.Clean(name)
+ _, objPattern, name := splitObjectName(name)
+ mtPattern := getRootPattern(opts)
+ e := new(naming.MountEntry)
+ expiration := time.Now().Add(time.Hour) // plenty of time for a call
+ address, suffix := naming.SplitAddressName(name)
+ if len(address) == 0 {
+ e.SetServesMountTable(true)
+ e.Name = name
+ ns.RLock()
+ defer ns.RUnlock()
+ for _, r := range ns.roots {
+ // TODO(ashankar): Configured namespace roots should also include the pattern?
+ server := naming.MountedServer{Server: r, Expires: expiration}
+ if len(mtPattern) > 0 {
+ server.BlessingPatterns = []string{mtPattern}
+ }
+ e.Servers = append(e.Servers, server)
+ }
+ return e, objPattern, false
+ }
+ servesMT := true
+ if ep, err := inaming.NewEndpoint(address); err == nil {
+ servesMT = ep.ServesMountTable()
+ }
+ e.SetServesMountTable(servesMT)
+ e.Name = suffix
+ server := naming.MountedServer{Server: naming.JoinAddressName(address, ""), Expires: expiration}
+ if servesMT && len(mtPattern) > 0 {
+ server.BlessingPatterns = []string{string(mtPattern)}
+ }
+ e.Servers = []naming.MountedServer{server}
+ return e, objPattern, true
+}
+
+// notAnMT returns true if the error indicates this isn't a mounttable server.
+func notAnMT(err error) bool {
+ switch verror.ErrorID(err) {
+ case verror.ErrBadArg.ID:
+ // This should cover "ipc: wrong number of in-args".
+ return true
+ case verror.ErrNoExist.ID:
+ // This should cover "ipc: unknown method", "ipc: dispatcher not
+ // found", and dispatcher Lookup not found errors.
+ return true
+ case verror.ErrBadProtocol.ID:
+ // This covers "ipc: response decoding failed: EOF".
+ return true
+ }
+ return false
+}
+
+// all operations against the mount table service use this fixed timeout for the
+// time being.
+const callTimeout = 30 * time.Second
+
+// CacheCtl implements naming.Namespace.CacheCtl
+func (ns *namespace) CacheCtl(ctls ...naming.CacheCtl) []naming.CacheCtl {
+ defer vlog.LogCall()()
+ for _, c := range ctls {
+ switch v := c.(type) {
+ case naming.DisableCache:
+ ns.Lock()
+ if _, isDisabled := ns.resolutionCache.(nullCache); isDisabled {
+ if !v {
+ ns.resolutionCache = newTTLCache()
+ }
+ } else {
+ if v {
+ ns.resolutionCache = newNullCache()
+ }
+ }
+ ns.Unlock()
+ }
+ }
+ ns.RLock()
+ defer ns.RUnlock()
+ if _, isDisabled := ns.resolutionCache.(nullCache); isDisabled {
+ return []naming.CacheCtl{naming.DisableCache(true)}
+ }
+ return nil
+}
+
+// TODO(ribrdb,ashankar): This is exported only for the mock namespace to share
+// functionality. Refactor this sharing and do not use this function outside
+// the one place it is being used to implement a mock namespace.
+func InternalSplitObjectName(name string) (p security.BlessingPattern, n string) {
+ _, p, n = splitObjectName(name)
+ return
+}
+
+// TODO(ashankar,ribrdb): Get rid of "mtPattern"?
+func splitObjectName(name string) (mtPattern, serverPattern security.BlessingPattern, objectName string) {
+ objectName = name
+ match := serverPatternRegexp.FindSubmatch([]byte(name))
+ if match != nil {
+ objectName = string(match[2])
+ if naming.Rooted(objectName) {
+ mtPattern = security.BlessingPattern(match[1])
+ } else {
+ serverPattern = security.BlessingPattern(match[1])
+ return
+ }
+ }
+ if !naming.Rooted(objectName) {
+ return
+ }
+
+ address, relative := naming.SplitAddressName(objectName)
+ match = serverPatternRegexp.FindSubmatch([]byte(relative))
+ if match != nil {
+ serverPattern = security.BlessingPattern(match[1])
+ objectName = naming.JoinAddressName(address, string(match[2]))
+ }
+ return
+}
+
+func getRootPattern(opts []naming.ResolveOpt) string {
+ for _, opt := range opts {
+ if pattern, ok := opt.(naming.RootBlessingPatternOpt); ok {
+ return string(pattern)
+ }
+ }
+ return ""
+}
diff --git a/profiles/internal/naming/namespace/parallelstartcall.go b/profiles/internal/naming/namespace/parallelstartcall.go
new file mode 100644
index 0000000..115a028
--- /dev/null
+++ b/profiles/internal/naming/namespace/parallelstartcall.go
@@ -0,0 +1,118 @@
+package namespace
+
+import (
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/verror"
+ inaming "v.io/x/ref/profiles/internal/naming"
+)
+
+type startStatus struct {
+ index int
+ err error
+ call ipc.ClientCall
+}
+
+func tryStartCall(ctx *context.T, client ipc.Client, target, method string, args []interface{}, c chan startStatus, index int) {
+ call, err := client.StartCall(ctx, target, method, args, options.NoResolve{})
+ c <- startStatus{index: index, err: err, call: call}
+}
+
+// parallelStartCall returns the first succeeding StartCall.
+func (ns *namespace) parallelStartCall(ctx *context.T, client ipc.Client, servers []string, method string, args []interface{}) (ipc.ClientCall, error) {
+ if len(servers) == 0 {
+ return nil, verror.New(verror.ErrNoExist, ctx, "no servers to resolve query")
+ }
+
+ // StartCall to each of the servers.
+ c := make(chan startStatus, len(servers))
+ cancelFuncs := make([]context.CancelFunc, len(servers))
+ for index, server := range servers {
+ callCtx, cancel := context.WithTimeout(ctx, callTimeout)
+ cancelFuncs[index] = cancel
+ go tryStartCall(callCtx, client, server, method, args, c, index)
+ }
+
+ // First positive response wins. Cancel the rest. The cancellation
+ // will prevent any RPCs from starting or progressing. We do not close
+ // the channel since some go routines may still be in flight and want to
+ // write status to it. The channel will be garbage collected when all
+ // references to it disappear.
+ var final startStatus
+ for range servers {
+ final = <-c
+ if final.err == nil {
+ cancelFuncs[final.index] = nil
+ break
+ }
+ }
+ // Cancel the rest.
+ for _, cancel := range cancelFuncs {
+ if cancel != nil {
+ cancel()
+ }
+ }
+ return final.call, final.err
+}
+
+type status struct {
+ id string
+ err error
+}
+
+// nameToRID converts a name to a routing ID string. If a routing ID can't be obtained,
+// it just returns the name.
+func nameToRID(name string) string {
+ address, _ := naming.SplitAddressName(name)
+ if ep, err := inaming.NewEndpoint(address); err == nil {
+ return ep.RID.String()
+ }
+ return name
+}
+
+// collectStati collects n status messages from channel c and returns an error if, for
+// any id, there is no successful reply.
+func collectStati(c chan status, n int) error {
+ // Make a map indexed by the routing id (or address if routing id not found) of
+ // each mount table. A mount table may be reachable via multiple addresses but
+ // each address should have the same routing id. We should only return an error
+ // if any of the ids had no successful mounts.
+ statusByID := make(map[string]error)
+ // Get the status of each request.
+ for i := 0; i < n; i++ {
+ s := <-c
+ if _, ok := statusByID[s.id]; !ok || s.err == nil {
+ statusByID[s.id] = s.err
+ }
+ }
+ // Return any error.
+ for _, s := range statusByID {
+ if s != nil {
+ return s
+ }
+ }
+ return nil
+}
+
+// dispatch executes f in parallel for each mount table implementing mTName.
+func (ns *namespace) dispatch(ctx *context.T, mTName string, f func(*context.T, string, string) status, opts ...naming.ResolveOpt) error {
+ // Resolve to all the mount tables implementing name.
+ me, err := ns.ResolveToMountTable(ctx, mTName, opts...)
+ if err != nil {
+ return err
+ }
+ mts := me.Names()
+ // Apply f to each of the returned mount tables.
+ c := make(chan status, len(mts))
+ for _, mt := range mts {
+ go func(mt string) {
+ c <- f(ctx, mt, nameToRID(mt))
+ }(mt)
+ }
+ finalerr := collectStati(c, len(mts))
+ // Forget any previous cached information about these names.
+ ns.resolutionCache.forget(mts)
+ return finalerr
+}
diff --git a/profiles/internal/naming/namespace/resolve.go b/profiles/internal/naming/namespace/resolve.go
new file mode 100644
index 0000000..2b93d2a
--- /dev/null
+++ b/profiles/internal/naming/namespace/resolve.go
@@ -0,0 +1,236 @@
+package namespace
+
+import (
+ "errors"
+ "runtime"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+)
+
+func (ns *namespace) resolveAgainstMountTable(ctx *context.T, client ipc.Client, e *naming.MountEntry, opts ...ipc.CallOpt) (*naming.MountEntry, error) {
+ // Try each server till one answers.
+ finalErr := errors.New("no servers to resolve query")
+ skipServerAuth := skipServerAuthorization(opts)
+ opts = append(opts, options.NoResolve{})
+ for _, s := range e.Servers {
+ name := naming.JoinAddressName(s.Server, e.Name)
+ // First check the cache.
+ if ne, err := ns.resolutionCache.lookup(name); err == nil {
+ vlog.VI(2).Infof("resolveAMT %s from cache -> %v", name, convertServersToStrings(ne.Servers, ne.Name))
+ return &ne, nil
+ }
+ // Not in cache, call the real server.
+ if !skipServerAuth {
+ opts = setAllowedServers(opts, s.BlessingPatterns)
+ }
+ callCtx, _ := context.WithTimeout(ctx, callTimeout)
+ call, err := client.StartCall(callCtx, name, "ResolveStep", nil, opts...)
+ if err != nil {
+ finalErr = err
+ vlog.VI(2).Infof("ResolveStep.StartCall %s failed: %s", name, err)
+ continue
+ }
+ var entry naming.VDLMountEntry
+ if err := call.Finish(&entry); err != nil {
+ // If any replica says the name doesn't exist, return that fact.
+ if verror.Is(err, naming.ErrNoSuchName.ID) || verror.Is(err, naming.ErrNoSuchNameRoot.ID) {
+ return nil, err
+ }
+ // Keep track of the final error and continue with next server.
+ finalErr = err
+ vlog.VI(2).Infof("ResolveStep %s failed: %s", name, err)
+ continue
+ }
+ // Add result to cache.
+ ne := convertMountEntry(&entry)
+ ns.resolutionCache.remember(name, ne)
+ vlog.VI(2).Infof("resolveAMT %s -> %v", name, *ne)
+ return ne, nil
+ }
+ return nil, finalErr
+}
+
+func terminal(e *naming.MountEntry) bool {
+ return len(e.Name) == 0
+}
+
+// Resolve implements veyron2/naming.Namespace.
+func (ns *namespace) Resolve(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+ defer vlog.LogCall()()
+ e, objPattern, _ := ns.rootMountEntry(name, opts...)
+ if vlog.V(2) {
+ _, file, line, _ := runtime.Caller(1)
+ vlog.Infof("Resolve(%s) called from %s:%d", name, file, line)
+ vlog.Infof("Resolve(%s) -> rootMountEntry %v", name, *e)
+ }
+ if skipResolve(opts) {
+ setBlessingPatterns(e, objPattern)
+ return e, nil
+ }
+ if len(e.Servers) == 0 {
+ return nil, verror.New(naming.ErrNoSuchName, ctx, name)
+ }
+ client := v23.GetClient(ctx)
+ callOpts := getCallOpts(opts)
+
+ // Iterate walking through mount table servers.
+ for remaining := ns.maxResolveDepth; remaining > 0; remaining-- {
+ vlog.VI(2).Infof("Resolve(%s) loop %v", name, *e)
+ if !e.ServesMountTable() || terminal(e) {
+ setBlessingPatterns(e, objPattern)
+ vlog.VI(1).Infof("Resolve(%s) -> %v", name, *e)
+ return e, nil
+ }
+ var err error
+ curr := e
+ if e, err = ns.resolveAgainstMountTable(ctx, client, curr, callOpts...); err != nil {
+ // Lots of reasons why another error can happen. We are trying
+ // to single out "this isn't a mount table".
+ if notAnMT(err) {
+ setBlessingPatterns(curr, objPattern)
+ vlog.VI(1).Infof("Resolve(%s) -> %v", name, curr)
+ return curr, nil
+ }
+ if verror.Is(err, naming.ErrNoSuchNameRoot.ID) {
+ err = verror.New(naming.ErrNoSuchName, ctx, name)
+ }
+ vlog.VI(1).Infof("Resolve(%s) -> (%s: %v)", err, name, curr)
+ return nil, err
+ }
+ }
+ return nil, verror.New(naming.ErrResolutionDepthExceeded, ctx)
+}
+
+// ResolveToMountTable implements veyron2/naming.Namespace.
+func (ns *namespace) ResolveToMountTable(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+ defer vlog.LogCall()()
+ e, _, _ := ns.rootMountEntry(name, opts...)
+ if vlog.V(2) {
+ _, file, line, _ := runtime.Caller(1)
+ vlog.Infof("ResolveToMountTable(%s) called from %s:%d", name, file, line)
+ vlog.Infof("ResolveToMountTable(%s) -> rootNames %v", name, e)
+ }
+ if len(e.Servers) == 0 {
+ return nil, verror.New(naming.ErrNoMountTable, ctx)
+ }
+ callOpts := getCallOpts(opts)
+ client := v23.GetClient(ctx)
+ last := e
+ for remaining := ns.maxResolveDepth; remaining > 0; remaining-- {
+ vlog.VI(2).Infof("ResolveToMountTable(%s) loop %v", name, e)
+ var err error
+ curr := e
+ // If the next name to resolve doesn't point to a mount table, we're done.
+ if !e.ServesMountTable() || terminal(e) {
+ vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, last)
+ return last, nil
+ }
+ if e, err = ns.resolveAgainstMountTable(ctx, client, e, callOpts...); err != nil {
+ if verror.Is(err, naming.ErrNoSuchNameRoot.ID) {
+ vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v (NoSuchRoot: %v)", name, last, curr)
+ return last, nil
+ }
+ if verror.Is(err, naming.ErrNoSuchName.ID) {
+ vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v (NoSuchName: %v)", name, curr, curr)
+ return curr, nil
+ }
+ // Lots of reasons why another error can happen. We are trying
+ // to single out "this isn't a mount table".
+ if notAnMT(err) {
+ vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, last)
+ return last, nil
+ }
+ // TODO(caprita): If the server is unreachable for
+ // example, we may still want to return its parent
+ // mounttable rather than an error.
+ vlog.VI(1).Infof("ResolveToMountTable(%s) -> %v", name, err)
+ return nil, err
+ }
+ last = curr
+ }
+ return nil, verror.New(naming.ErrResolutionDepthExceeded, ctx)
+}
+
+// FlushCache flushes the most specific entry found for name. It returns true if anything was
+// actually flushed.
+func (ns *namespace) FlushCacheEntry(name string) bool {
+ defer vlog.LogCall()()
+ flushed := false
+ for _, n := range ns.rootName(name) {
+ // Walk the cache as we would in a resolution. Unlike a resolution, we have to follow
+ // all branches since we want to flush all entries at which we might end up whereas in a resolution,
+ // we stop with the first branch that works.
+ if e, err := ns.resolutionCache.lookup(n); err == nil {
+ // Recurse.
+ for _, s := range e.Servers {
+ flushed = flushed || ns.FlushCacheEntry(naming.Join(s.Server, e.Name))
+ }
+ if !flushed {
+ // Forget the entry we just used.
+ ns.resolutionCache.forget([]string{naming.TrimSuffix(n, e.Name)})
+ flushed = true
+ }
+ }
+ }
+ return flushed
+}
+
+func skipResolve(opts []naming.ResolveOpt) bool {
+ for _, o := range opts {
+ if _, ok := o.(options.NoResolve); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func getCallOpts(opts []naming.ResolveOpt) []ipc.CallOpt {
+ var out []ipc.CallOpt
+ for _, o := range opts {
+ if co, ok := o.(ipc.CallOpt); ok {
+ out = append(out, co)
+ }
+ }
+ return out
+}
+
+// setBlessingPatterns overrides e.Servers.BlessingPatterns with p if p is
+// non-empty. This will typically be the case for end servers (i.e., not
+// mounttables) where the client explicitly specified a blessing pattern and
+// thus explicitly chose to ignore the patterns from the MountEntry.
+func setBlessingPatterns(e *naming.MountEntry, p security.BlessingPattern) {
+ if len(p) == 0 {
+ return
+ }
+ slice := []string{string(p)}
+ for idx, _ := range e.Servers {
+ e.Servers[idx].BlessingPatterns = slice
+ }
+}
+
+func setAllowedServers(opts []ipc.CallOpt, patterns []string) []ipc.CallOpt {
+ if len(patterns) == 0 {
+ return opts
+ }
+ p := make(options.AllowedServersPolicy, len(patterns))
+ for i, v := range patterns {
+ p[i] = security.BlessingPattern(v)
+ }
+ return append(opts, p)
+}
+
+func skipServerAuthorization(opts []ipc.CallOpt) bool {
+ for _, o := range opts {
+ if _, ok := o.(options.SkipResolveAuthorization); ok {
+ return true
+ }
+ }
+ return false
+}
diff --git a/profiles/internal/naming/namespace/stub.go b/profiles/internal/naming/namespace/stub.go
new file mode 100644
index 0000000..50b4f09
--- /dev/null
+++ b/profiles/internal/naming/namespace/stub.go
@@ -0,0 +1,39 @@
+package namespace
+
+import (
+ "time"
+
+ "v.io/v23/naming"
+)
+
+func convertServersToStrings(servers []naming.MountedServer, suffix string) (ret []string) {
+ for _, s := range servers {
+ ret = append(ret, naming.Join(s.Server, suffix))
+ }
+ return
+}
+
+func convertStringsToServers(servers []string) (ret []naming.MountedServer) {
+ for _, s := range servers {
+ ret = append(ret, naming.MountedServer{Server: s})
+ }
+ return
+}
+
+func convertServers(servers []naming.VDLMountedServer) []naming.MountedServer {
+ var reply []naming.MountedServer
+ for _, s := range servers {
+ if s.TTL == 0 {
+ s.TTL = 32000000 // > 1 year
+ }
+ expires := time.Now().Add(time.Duration(s.TTL) * time.Second)
+ reply = append(reply, naming.MountedServer{Server: s.Server, BlessingPatterns: s.BlessingPatterns, Expires: expires})
+ }
+ return reply
+}
+
+func convertMountEntry(e *naming.VDLMountEntry) *naming.MountEntry {
+ v := &naming.MountEntry{Name: e.Name, Servers: convertServers(e.Servers)}
+ v.SetServesMountTable(e.MT)
+ return v
+}
diff --git a/profiles/internal/naming/namespace/v23_internal_test.go b/profiles/internal/naming/namespace/v23_internal_test.go
new file mode 100644
index 0000000..314919a
--- /dev/null
+++ b/profiles/internal/naming/namespace/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package namespace
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/testutil"
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/rt/ipc_test.go b/profiles/internal/rt/ipc_test.go
new file mode 100644
index 0000000..b13339f
--- /dev/null
+++ b/profiles/internal/rt/ipc_test.go
@@ -0,0 +1,333 @@
+package rt_test
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+ "v.io/v23/security"
+
+ "v.io/v23/verror"
+ "v.io/x/ref/lib/testutil"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles"
+)
+
+//go:generate v23 test generate
+
+type testService struct{}
+
+func (testService) EchoBlessings(call ipc.ServerCall) ([]string, error) {
+ b, _ := call.RemoteBlessings().ForCall(call)
+ return b, nil
+}
+
+func (testService) Foo(ipc.ServerCall) error {
+ return nil
+}
+
+type dischargeService struct {
+ called int
+ mu sync.Mutex
+}
+
+func (ds *dischargeService) Discharge(ctx ipc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.WireDischarge, error) {
+ tp := cav.ThirdPartyDetails()
+ if tp == nil {
+ return nil, fmt.Errorf("discharger: not a third party caveat (%v)", cav)
+ }
+ if err := tp.Dischargeable(ctx); err != nil {
+ return nil, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", tp, err)
+ }
+ // If its the first time being called, add an expiry caveat and a MethodCaveat for "EchoBlessings".
+ // Otherwise, just add a MethodCaveat for "Foo".
+ ds.mu.Lock()
+ called := ds.called
+ ds.mu.Unlock()
+ caveats := []security.Caveat{mkCaveat(security.MethodCaveat("Foo"))}
+ if called == 0 {
+ caveats = []security.Caveat{
+ mkCaveat(security.MethodCaveat("EchoBlessings")),
+ mkCaveat(security.ExpiryCaveat(time.Now().Add(time.Second))),
+ }
+ }
+
+ d, err := ctx.LocalPrincipal().MintDischarge(cav, caveats[0], caveats[1:]...)
+ if err != nil {
+ return nil, err
+ }
+ return security.MarshalDischarge(d), nil
+}
+
+func newCtx(rootCtx *context.T) *context.T {
+ ctx, err := v23.SetPrincipal(rootCtx, tsecurity.NewPrincipal("defaultBlessings"))
+ if err != nil {
+ panic(err)
+ }
+ return ctx
+}
+
+func union(blessings ...security.Blessings) security.Blessings {
+ var ret security.Blessings
+ var err error
+ for _, b := range blessings {
+ if ret, err = security.UnionOfBlessings(ret, b); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+}
+
+func mkCaveat(cav security.Caveat, err error) security.Caveat {
+ if err != nil {
+ panic(err)
+ }
+ return cav
+}
+
+func mkBlessings(blessings security.Blessings, err error) security.Blessings {
+ if err != nil {
+ panic(err)
+ }
+ return blessings
+}
+
+func mkThirdPartyCaveat(discharger security.PublicKey, location string, caveats ...security.Caveat) security.Caveat {
+ if len(caveats) == 0 {
+ caveats = []security.Caveat{security.UnconstrainedUse()}
+ }
+ tpc, err := security.NewPublicKeyCaveat(discharger, location, security.ThirdPartyRequirements{}, caveats[0], caveats[1:]...)
+ if err != nil {
+ panic(err)
+ }
+ return tpc
+}
+
+func startServer(ctx *context.T, s interface{}) (ipc.Server, string, error) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+ endpoints, err := server.Listen(v23.GetListenSpec(ctx))
+ if err != nil {
+ return nil, "", err
+ }
+ serverObjectName := naming.JoinAddressName(endpoints[0].String(), "")
+ if err := server.Serve("", s, allowEveryone{}); err != nil {
+ return nil, "", err
+ }
+ return server, serverObjectName, nil
+}
+
+func TestClientServerBlessings(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ var (
+ rootAlpha, rootBeta, rootUnrecognized = tsecurity.NewIDProvider("alpha"), tsecurity.NewIDProvider("beta"), tsecurity.NewIDProvider("unrecognized")
+ clientCtx, serverCtx = newCtx(ctx), newCtx(ctx)
+ pclient = v23.GetPrincipal(clientCtx)
+ pserver = v23.GetPrincipal(serverCtx)
+
+ // A bunch of blessings
+ alphaClient = mkBlessings(rootAlpha.NewBlessings(pclient, "client"))
+ betaClient = mkBlessings(rootBeta.NewBlessings(pclient, "client"))
+ unrecognizedClient = mkBlessings(rootUnrecognized.NewBlessings(pclient, "client"))
+
+ alphaServer = mkBlessings(rootAlpha.NewBlessings(pserver, "server"))
+ betaServer = mkBlessings(rootBeta.NewBlessings(pserver, "server"))
+ unrecognizedServer = mkBlessings(rootUnrecognized.NewBlessings(pserver, "server"))
+ )
+ // Setup the client's blessing store
+ pclient.BlessingStore().Set(alphaClient, "alpha/server")
+ pclient.BlessingStore().Set(betaClient, "beta")
+ pclient.BlessingStore().Set(unrecognizedClient, security.AllPrincipals)
+
+ tests := []struct {
+ server security.Blessings // Blessings presented by the server.
+
+ // Expected output
+ wantServer []string // Client's view of the server's blessings
+ wantClient []string // Server's view fo the client's blessings
+ }{
+ {
+ server: unrecognizedServer,
+ wantServer: nil,
+ wantClient: nil,
+ },
+ {
+ server: alphaServer,
+ wantServer: []string{"alpha/server"},
+ wantClient: []string{"alpha/client"},
+ },
+ {
+ server: union(alphaServer, betaServer),
+ wantServer: []string{"alpha/server", "beta/server"},
+ wantClient: []string{"alpha/client", "beta/client"},
+ },
+ }
+
+ // Have the client and server both trust both the root principals.
+ for _, ctx := range []*context.T{clientCtx, serverCtx} {
+ for _, b := range []security.Blessings{alphaClient, betaClient} {
+ p := v23.GetPrincipal(ctx)
+ if err := p.AddToRoots(b); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ // Let it rip!
+ for _, test := range tests {
+ if err := pserver.BlessingStore().SetDefault(test.server); err != nil {
+ t.Errorf("pserver.SetDefault(%v) failed: %v", test.server, err)
+ continue
+ }
+ server, serverObjectName, err := startServer(serverCtx, testService{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ ctx, client, err := v23.SetNewClient(clientCtx)
+ if err != nil {
+ panic(err)
+ }
+
+ var gotClient []string
+ if call, err := client.StartCall(ctx, serverObjectName, "EchoBlessings", nil); err != nil {
+ t.Errorf("client.StartCall failed: %v", err)
+ } else if err = call.Finish(&gotClient); err != nil {
+ t.Errorf("call.Finish failed: %v", err)
+ } else if !reflect.DeepEqual(gotClient, test.wantClient) {
+ t.Errorf("%v: Got %v, want %v for client blessings", test.server, gotClient, test.wantServer)
+ } else if gotServer, _ := call.RemoteBlessings(); !reflect.DeepEqual(gotServer, test.wantServer) {
+ t.Errorf("%v: Got %v, want %v for server blessings", test.server, gotServer, test.wantClient)
+ }
+
+ server.Stop()
+ client.Close()
+ }
+}
+
+func TestServerDischarges(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ var (
+ dischargerCtx, clientCtx, serverCtx = newCtx(ctx), newCtx(ctx), newCtx(ctx)
+ pdischarger = v23.GetPrincipal(dischargerCtx)
+ pclient = v23.GetPrincipal(clientCtx)
+ pserver = v23.GetPrincipal(serverCtx)
+ root = tsecurity.NewIDProvider("root")
+ )
+
+ // Setup the server's and discharger's blessing store and blessing roots, and
+ // start the server and discharger.
+ if err := root.Bless(pdischarger, "discharger"); err != nil {
+ t.Fatal(err)
+ }
+ ds := &dischargeService{}
+ dischargeServer, dischargeServerName, err := startServer(dischargerCtx, ds)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer dischargeServer.Stop()
+ if err := root.Bless(pserver, "server", mkThirdPartyCaveat(pdischarger.PublicKey(), dischargeServerName)); err != nil {
+ t.Fatal(err)
+ }
+ server, serverName, err := startServer(serverCtx, &testService{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+
+ // Setup up the client's blessing store so that it can talk to the server.
+ rootClient := mkBlessings(root.NewBlessings(pclient, "client"))
+ if _, err := pclient.BlessingStore().Set(security.Blessings{}, security.AllPrincipals); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := pclient.BlessingStore().Set(rootClient, "root/server"); err != nil {
+ t.Fatal(err)
+ }
+ if err := pclient.AddToRoots(rootClient); err != nil {
+ t.Fatal(err)
+ }
+
+ // Test that the client and server can communicate with the expected set of blessings
+ // when server provides appropriate discharges.
+ wantClient := []string{"root/client"}
+ wantServer := []string{"root/server"}
+ var gotClient []string
+ // This opt ensures that if the Blessings do not match the pattern, StartCall will fail.
+ allowedServers := options.AllowedServersPolicy{"root/server"}
+
+ // Create a new client.
+ clientCtx, client, err := v23.SetNewClient(clientCtx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ makeCall := func() error {
+ if call, err := client.StartCall(clientCtx, serverName, "EchoBlessings", nil, allowedServers); err != nil {
+ return err
+ } else if err = call.Finish(&gotClient); err != nil {
+ return fmt.Errorf("call.Finish failed: %v", err)
+ } else if !reflect.DeepEqual(gotClient, wantClient) {
+ return fmt.Errorf("Got %v, want %v for client blessings", gotClient, wantClient)
+ } else if gotServer, _ := call.RemoteBlessings(); !reflect.DeepEqual(gotServer, wantServer) {
+ return fmt.Errorf("Got %v, want %v for server blessings", gotServer, wantServer)
+ }
+ return nil
+ }
+
+ if err := makeCall(); err != nil {
+ t.Error(err)
+ }
+ ds.mu.Lock()
+ ds.called++
+ ds.mu.Unlock()
+ // makeCall should eventually fail because the discharge will expire, and when it does
+ // it no longer allows calls to "EchoBlessings".
+ start := time.Now()
+ for {
+ if time.Since(start) > time.Second {
+ t.Fatalf("Discharge no refreshed in 1 second")
+ }
+ if err := makeCall(); err == nil {
+ time.Sleep(10 * time.Millisecond)
+ continue
+ } else if !verror.Is(err, verror.ErrNotTrusted.ID) {
+ t.Fatalf("got error %v, expected %v", err, verror.ErrNotTrusted.ID)
+ }
+ break
+ }
+
+ // Discharge should now be refreshed and calls to "Foo" should succeed.
+ if _, err := client.StartCall(clientCtx, serverName, "Foo", nil, allowedServers); err != nil {
+ t.Errorf("client.StartCall should have succeeded: %v", err)
+ }
+
+ // Test that the client fails to talk to server that does not present appropriate discharges.
+ // Setup a new client so that there are no cached VCs.
+ clientCtx, client, err = v23.SetNewClient(clientCtx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rootServerInvalidTPCaveat := mkBlessings(root.NewBlessings(pserver, "server", mkThirdPartyCaveat(pdischarger.PublicKey(), dischargeServerName, mkCaveat(security.ExpiryCaveat(time.Now().Add(-1*time.Second))))))
+ if err := pserver.BlessingStore().SetDefault(rootServerInvalidTPCaveat); err != nil {
+ t.Fatal(err)
+ }
+ if call, err := client.StartCall(clientCtx, serverName, "EchoBlessings", nil); verror.Is(err, verror.ErrNoAccess.ID) {
+ remoteBlessings, _ := call.RemoteBlessings()
+ t.Errorf("client.StartCall passed unexpectedly with remote end authenticated as: %v", remoteBlessings)
+ }
+}
+
+type allowEveryone struct{}
+
+func (allowEveryone) Authorize(security.Call) error { return nil }
diff --git a/profiles/internal/rt/mgmt.go b/profiles/internal/rt/mgmt.go
new file mode 100644
index 0000000..34f1b4d
--- /dev/null
+++ b/profiles/internal/rt/mgmt.go
@@ -0,0 +1,91 @@
+package rt
+
+import (
+ "fmt"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/mgmt"
+ "v.io/v23/naming"
+ "v.io/v23/options"
+
+ "v.io/x/ref/lib/exec"
+)
+
+func (rt *Runtime) initMgmt(ctx *context.T) error {
+ handle, err := exec.GetChildHandle()
+ if err == exec.ErrNoVersion {
+ // Do not initialize the mgmt runtime if the process has not
+ // been started through the veyron exec library by a device
+ // manager.
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ parentName, err := handle.Config.Get(mgmt.ParentNameConfigKey)
+ if err != nil {
+ return nil
+ }
+ listenSpec, err := getListenSpec(handle)
+ if err != nil {
+ return err
+ }
+ var serverOpts []ipc.ServerOpt
+ parentPeerPattern, err := handle.Config.Get(mgmt.ParentBlessingConfigKey)
+ if err == nil && parentPeerPattern != "" {
+ // Grab the blessing from our blessing store that the parent
+ // told us to use so they can talk to us.
+ serverBlessing := rt.GetPrincipal(ctx).BlessingStore().ForPeer(parentPeerPattern)
+ serverOpts = append(serverOpts, options.ServerBlessings{serverBlessing})
+ }
+ server, err := rt.NewServer(ctx, serverOpts...)
+ if err != nil {
+ return err
+ }
+ eps, err := server.Listen(*listenSpec)
+ if err != nil {
+ return err
+ }
+ if err := server.Serve("", v23.GetAppCycle(ctx).Remote(), nil); err != nil {
+ server.Stop()
+ return err
+ }
+ err = rt.callbackToParent(ctx, parentName, naming.JoinAddressName(eps[0].String(), ""))
+ if err != nil {
+ server.Stop()
+ return err
+ }
+
+ return handle.SetReady()
+}
+
+func getListenSpec(handle *exec.ChildHandle) (*ipc.ListenSpec, error) {
+ protocol, err := handle.Config.Get(mgmt.ProtocolConfigKey)
+ if err != nil {
+ return nil, err
+ }
+ if protocol == "" {
+ return nil, fmt.Errorf("%v is not set", mgmt.ProtocolConfigKey)
+ }
+
+ address, err := handle.Config.Get(mgmt.AddressConfigKey)
+ if err != nil {
+ return nil, err
+ }
+ if address == "" {
+ return nil, fmt.Errorf("%v is not set", mgmt.AddressConfigKey)
+ }
+ return &ipc.ListenSpec{Addrs: ipc.ListenAddrs{{protocol, address}}}, nil
+}
+
+func (rt *Runtime) callbackToParent(ctx *context.T, parentName, myName string) error {
+ ctx, _ = context.WithTimeout(ctx, time.Minute)
+ call, err := rt.GetClient(ctx).StartCall(ctx, parentName, "Set", []interface{}{mgmt.AppCycleManagerConfigKey, myName})
+ if err != nil {
+ return err
+ }
+ return call.Finish()
+}
diff --git a/profiles/internal/rt/mgmt_test.go b/profiles/internal/rt/mgmt_test.go
new file mode 100644
index 0000000..168dbaf
--- /dev/null
+++ b/profiles/internal/rt/mgmt_test.go
@@ -0,0 +1,377 @@
+package rt_test
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/mgmt"
+ "v.io/v23/naming"
+ "v.io/v23/services/mgmt/appcycle"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+ vflag "v.io/x/ref/security/flag"
+ "v.io/x/ref/services/mgmt/device"
+)
+
+//go:generate v23 test generate
+
+const (
+ noWaitersCmd = "noWaiters"
+ forceStopCmd = "forceStop"
+ appCmd = "app"
+)
+
+// TestBasic verifies that the basic plumbing works: LocalStop calls result in
+// stop messages being sent on the channel passed to WaitForStop.
+func TestBasic(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ m := v23.GetAppCycle(ctx)
+ ch := make(chan string, 1)
+ m.WaitForStop(ch)
+ for i := 0; i < 10; i++ {
+ m.Stop()
+ if want, got := v23.LocalStop, <-ch; want != got {
+ t.Errorf("WaitForStop want %q got %q", want, got)
+ }
+ select {
+ case s := <-ch:
+ t.Errorf("channel expected to be empty, got %q instead", s)
+ default:
+ }
+ }
+}
+
+// TestMultipleWaiters verifies that the plumbing works with more than one
+// registered wait channel.
+func TestMultipleWaiters(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ m := v23.GetAppCycle(ctx)
+ ch1 := make(chan string, 1)
+ m.WaitForStop(ch1)
+ ch2 := make(chan string, 1)
+ m.WaitForStop(ch2)
+ for i := 0; i < 10; i++ {
+ m.Stop()
+ if want, got := v23.LocalStop, <-ch1; want != got {
+ t.Errorf("WaitForStop want %q got %q", want, got)
+ }
+ if want, got := v23.LocalStop, <-ch2; want != got {
+ t.Errorf("WaitForStop want %q got %q", want, got)
+ }
+ }
+}
+
+// TestMultipleStops verifies that LocalStop does not block even if the wait
+// channel is not being drained: once the channel's buffer fills up, future
+// Stops become no-ops.
+func TestMultipleStops(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ m := v23.GetAppCycle(ctx)
+ ch := make(chan string, 1)
+ m.WaitForStop(ch)
+ for i := 0; i < 10; i++ {
+ m.Stop()
+ }
+ if want, got := v23.LocalStop, <-ch; want != got {
+ t.Errorf("WaitForStop want %q got %q", want, got)
+ }
+ select {
+ case s := <-ch:
+ t.Errorf("channel expected to be empty, got %q instead", s)
+ default:
+ }
+}
+
+func noWaiters(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ m := v23.GetAppCycle(ctx)
+ fmt.Fprintf(stdout, "ready\n")
+ modules.WaitForEOF(stdin)
+ m.Stop()
+ os.Exit(42) // This should not be reached.
+ return nil
+}
+
+// TestNoWaiters verifies that the child process exits in the absence of any
+// wait channel being registered with its runtime.
+func TestNoWaiters(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+ h, err := sh.Start(noWaitersCmd, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ expect.NewSession(t, h.Stdout(), time.Minute).Expect("ready")
+ want := fmt.Sprintf("exit status %d", v23.UnhandledStopExitCode)
+ if err = h.Shutdown(os.Stderr, os.Stderr); err == nil || err.Error() != want {
+ t.Errorf("got %v, want %s", err, want)
+ }
+}
+
+func forceStop(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ m := v23.GetAppCycle(ctx)
+ fmt.Fprintf(stdout, "ready\n")
+ modules.WaitForEOF(stdin)
+ m.WaitForStop(make(chan string, 1))
+ m.ForceStop()
+ os.Exit(42) // This should not be reached.
+ return nil
+}
+
+// TestForceStop verifies that ForceStop causes the child process to exit
+// immediately.
+func TestForceStop(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+ h, err := sh.Start(forceStopCmd, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("ready")
+ err = h.Shutdown(os.Stderr, os.Stderr)
+ want := fmt.Sprintf("exit status %d", v23.UnhandledStopExitCode)
+ if err == nil || err.Error() != want {
+ t.Errorf("got %v, want %s", err, want)
+ }
+}
+
+func checkProgress(t *testing.T, ch <-chan v23.Task, progress, goal int32) {
+ if want, got := (v23.Task{progress, goal}), <-ch; !reflect.DeepEqual(want, got) {
+ t.Errorf("Unexpected progress: want %+v, got %+v", want, got)
+ }
+}
+
+func checkNoProgress(t *testing.T, ch <-chan v23.Task) {
+ select {
+ case p := <-ch:
+ t.Errorf("channel expected to be empty, got %+v instead", p)
+ default:
+ }
+}
+
+// TestProgress verifies that the ticker update/track logic works for a single
+// tracker.
+func TestProgress(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+
+ m := v23.GetAppCycle(ctx)
+ m.AdvanceGoal(50)
+ ch := make(chan v23.Task, 1)
+ m.TrackTask(ch)
+ checkNoProgress(t, ch)
+ m.AdvanceProgress(10)
+ checkProgress(t, ch, 10, 50)
+ checkNoProgress(t, ch)
+ m.AdvanceProgress(5)
+ checkProgress(t, ch, 15, 50)
+ m.AdvanceGoal(50)
+ checkProgress(t, ch, 15, 100)
+ m.AdvanceProgress(1)
+ checkProgress(t, ch, 16, 100)
+ m.AdvanceGoal(10)
+ checkProgress(t, ch, 16, 110)
+ m.AdvanceProgress(-13)
+ checkNoProgress(t, ch)
+ m.AdvanceGoal(0)
+ checkNoProgress(t, ch)
+ shutdown()
+ if _, ok := <-ch; ok {
+ t.Errorf("Expected channel to be closed")
+ }
+}
+
+// TestProgressMultipleTrackers verifies that the ticker update/track logic
+// works for more than one tracker. It also ensures that the runtime doesn't
+// block when the tracker channels are full.
+func TestProgressMultipleTrackers(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+
+ m := v23.GetAppCycle(ctx)
+ // ch1 is 1-buffered, ch2 is 2-buffered.
+ ch1, ch2 := make(chan v23.Task, 1), make(chan v23.Task, 2)
+ m.TrackTask(ch1)
+ m.TrackTask(ch2)
+ checkNoProgress(t, ch1)
+ checkNoProgress(t, ch2)
+ m.AdvanceProgress(1)
+ checkProgress(t, ch1, 1, 0)
+ checkNoProgress(t, ch1)
+ checkProgress(t, ch2, 1, 0)
+ checkNoProgress(t, ch2)
+ for i := 0; i < 10; i++ {
+ m.AdvanceProgress(1)
+ }
+ checkProgress(t, ch1, 2, 0)
+ checkNoProgress(t, ch1)
+ checkProgress(t, ch2, 2, 0)
+ checkProgress(t, ch2, 3, 0)
+ checkNoProgress(t, ch2)
+ m.AdvanceGoal(4)
+ checkProgress(t, ch1, 11, 4)
+ checkProgress(t, ch2, 11, 4)
+ shutdown()
+ if _, ok := <-ch1; ok {
+ t.Errorf("Expected channel to be closed")
+ }
+ if _, ok := <-ch2; ok {
+ t.Errorf("Expected channel to be closed")
+ }
+}
+
+func app(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ m := v23.GetAppCycle(ctx)
+ ch := make(chan string, 1)
+ m.WaitForStop(ch)
+ fmt.Fprintf(stdout, "Got %s\n", <-ch)
+ m.AdvanceGoal(10)
+ fmt.Fprintf(stdout, "Doing some work\n")
+ m.AdvanceProgress(2)
+ fmt.Fprintf(stdout, "Doing some more work\n")
+ m.AdvanceProgress(5)
+ return nil
+}
+
+type configServer struct {
+ ch chan<- string
+}
+
+func (c *configServer) Set(_ ipc.ServerCall, key, value string) error {
+ if key != mgmt.AppCycleManagerConfigKey {
+ return fmt.Errorf("Unexpected key: %v", key)
+ }
+ c.ch <- value
+ return nil
+
+}
+
+func createConfigServer(t *testing.T, ctx *context.T) (ipc.Server, string, <-chan string) {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ t.Fatalf("Got error: %v", err)
+ }
+ ch := make(chan string)
+ var eps []naming.Endpoint
+ if eps, err = server.Listen(v23.GetListenSpec(ctx)); err != nil {
+ t.Fatalf("Got error: %v", err)
+ }
+ if err := server.Serve("", device.ConfigServer(&configServer{ch}), vflag.NewAuthorizerOrDie()); err != nil {
+ t.Fatalf("Got error: %v", err)
+ }
+ return server, eps[0].Name(), ch
+}
+
+func setupRemoteAppCycleMgr(t *testing.T) (*context.T, modules.Handle, appcycle.AppCycleClientMethods, func()) {
+ ctx, shutdown := testutil.InitForTest()
+
+ configServer, configServiceName, ch := createConfigServer(t, ctx)
+ sh, err := modules.NewShell(ctx, v23.GetPrincipal(ctx))
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ sh.SetConfigKey(mgmt.ParentNameConfigKey, configServiceName)
+ sh.SetConfigKey(mgmt.ProtocolConfigKey, "tcp")
+ sh.SetConfigKey(mgmt.AddressConfigKey, "127.0.0.1:0")
+ h, err := sh.Start("app", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ appCycleName := ""
+ select {
+ case appCycleName = <-ch:
+ case <-time.After(time.Minute):
+ t.Errorf("timeout")
+ }
+ appCycle := appcycle.AppCycleClient(appCycleName)
+ return ctx, h, appCycle, func() {
+ configServer.Stop()
+ sh.Cleanup(os.Stderr, os.Stderr)
+ shutdown()
+ }
+}
+
+// TestRemoteForceStop verifies that the child process exits when sending it
+// a remote ForceStop rpc.
+func TestRemoteForceStop(t *testing.T) {
+ ctx, h, appCycle, cleanup := setupRemoteAppCycleMgr(t)
+ defer cleanup()
+ if err := appCycle.ForceStop(ctx); err == nil || !strings.Contains(err.Error(), "EOF") {
+ t.Fatalf("Expected EOF error, got %v instead", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.ExpectEOF()
+ err := h.Shutdown(os.Stderr, os.Stderr)
+ want := fmt.Sprintf("exit status %d", v23.ForceStopExitCode)
+ if err == nil || err.Error() != want {
+ t.Errorf("got %v, want %s", err, want)
+ }
+}
+
+// TestRemoteStop verifies that the child shuts down cleanly when sending it
+// a remote Stop rpc.
+func TestRemoteStop(t *testing.T) {
+ ctx, h, appCycle, cleanup := setupRemoteAppCycleMgr(t)
+ defer cleanup()
+ stream, err := appCycle.Stop(ctx)
+ if err != nil {
+ t.Fatalf("Got error: %v", err)
+ }
+ rStream := stream.RecvStream()
+ expectTask := func(progress, goal int32) {
+ if !rStream.Advance() {
+ t.Fatalf("unexpected streaming error: %q", rStream.Err())
+ }
+ task := rStream.Value()
+ if task.Progress != progress || task.Goal != goal {
+ t.Errorf("Got (%d, %d), want (%d, %d)", task.Progress, task.Goal, progress, goal)
+ }
+ }
+ expectTask(0, 10)
+ expectTask(2, 10)
+ expectTask(7, 10)
+ if rStream.Advance() || rStream.Err() != nil {
+ t.Errorf("Expected EOF, got (%v, %v) instead", rStream.Value(), rStream.Err())
+ }
+ if err := stream.Finish(); err != nil {
+ t.Errorf("Got error %v", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect(fmt.Sprintf("Got %s", v23.RemoteStop))
+ s.Expect("Doing some work")
+ s.Expect("Doing some more work")
+ s.ExpectEOF()
+ if err := h.Shutdown(os.Stderr, os.Stderr); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
diff --git a/profiles/internal/rt/rt_test.go b/profiles/internal/rt/rt_test.go
new file mode 100644
index 0000000..d564369
--- /dev/null
+++ b/profiles/internal/rt/rt_test.go
@@ -0,0 +1,274 @@
+package rt_test
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "testing"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/security"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/flags/consts"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/testutil"
+ vsecurity "v.io/x/ref/security"
+)
+
+//go:generate v23 test generate
+
+func TestInit(t *testing.T) {
+ testutil.UnsetPrincipalEnvVars()
+ ctx, shutdown := v23.Init()
+ defer shutdown()
+
+ l := vlog.Log
+ fmt.Println(l)
+ args := fmt.Sprintf("%s", l)
+ expected := regexp.MustCompile("name=veyron logdirs=\\[/tmp\\] logtostderr=true|false alsologtostderr=false|true max_stack_buf_size=4292608 v=[0-9] stderrthreshold=2 vmodule= log_backtrace_at=:0")
+ if !expected.MatchString(args) {
+ t.Errorf("unexpected default args: %s", args)
+ }
+ p := v23.GetPrincipal(ctx)
+ if p == nil {
+ t.Fatalf("A new principal should have been created")
+ }
+ if p.BlessingStore() == nil {
+ t.Fatalf("The principal must have a BlessingStore")
+ }
+ if p.BlessingStore().Default().IsZero() {
+ t.Errorf("Principal().BlessingStore().Default() should not be the zero value")
+ }
+ if p.BlessingStore().ForPeer().IsZero() {
+ t.Errorf("Principal().BlessingStore().ForPeer() should not be the zero value")
+ }
+}
+
+func child(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ _, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ logger := vlog.Log
+ vlog.Infof("%s\n", logger)
+ fmt.Fprintf(stdout, "%s\n", logger)
+ modules.WaitForEOF(stdin)
+ fmt.Fprintf(stdout, "done\n")
+ return nil
+}
+
+func TestInitArgs(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+ h, err := sh.Start("child", nil, "--logtostderr=true", "--vmodule=*=3", "--", "foobar")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect(fmt.Sprintf("name=veyron "+
+ "logdirs=[%s] "+
+ "logtostderr=true "+
+ "alsologtostderr=true "+
+ "max_stack_buf_size=4292608 "+
+ "v=0 "+
+ "stderrthreshold=2 "+
+ "vmodule=*=3 "+
+ "log_backtrace_at=:0",
+ os.TempDir()))
+ h.CloseStdin()
+ s.Expect("done")
+ s.ExpectEOF()
+ h.Shutdown(os.Stderr, os.Stderr)
+}
+
+func validatePrincipal(p security.Principal) error {
+ if p == nil {
+ return fmt.Errorf("nil principal")
+ }
+ ctx := security.NewCall(&security.CallParams{LocalPrincipal: p})
+ blessings, rejected := p.BlessingStore().Default().ForCall(ctx)
+ if n := len(blessings); n != 1 {
+ return fmt.Errorf("rt.Principal().BlessingStore().Default() return blessings:%v (rejected:%v), want exactly one recognized blessing", blessings, rejected)
+ }
+ return nil
+}
+
+func defaultBlessing(p security.Principal) string {
+ b, _ := p.BlessingStore().Default().ForCall(security.NewCall(&security.CallParams{LocalPrincipal: p}))
+ return b[0]
+}
+
+func tmpDir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "rt_test_dir")
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return dir
+}
+
+func principal(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ p := v23.GetPrincipal(ctx)
+ if err := validatePrincipal(p); err != nil {
+ return err
+ }
+ fmt.Fprintf(stdout, "DEFAULT_BLESSING=%s\n", defaultBlessing(p))
+ return nil
+}
+
+// Runner runs a principal as a subprocess and reports back with its
+// own security info and it's childs.
+func runner(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ p := v23.GetPrincipal(ctx)
+ if err := validatePrincipal(p); err != nil {
+ return err
+ }
+ fmt.Fprintf(stdout, "RUNNER_DEFAULT_BLESSING=%v\n", defaultBlessing(p))
+ sh, err := modules.NewShell(ctx, p)
+ if err != nil {
+ return err
+ }
+ if _, err := sh.Start("principal", nil, args...); err != nil {
+ return err
+ }
+ // Cleanup copies the output of sh to these Writers.
+ sh.Cleanup(stdout, stderr)
+ return nil
+}
+
+func createCredentialsInDir(t *testing.T, dir string, blessing string) {
+ principal, err := vsecurity.CreatePersistentPrincipal(dir, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if err := vsecurity.InitDefaultBlessings(principal, blessing); err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+func TestPrincipalInheritance(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer func() {
+ sh.Cleanup(os.Stdout, os.Stderr)
+ }()
+
+ // Test that the child inherits from the parent's credentials correctly.
+ // The running test process may or may not have a credentials directory set
+ // up so we have to use a 'runner' process to ensure the correct setup.
+ cdir := tmpDir(t)
+ defer os.RemoveAll(cdir)
+
+ createCredentialsInDir(t, cdir, "test")
+
+ // directory supplied by the environment.
+ credEnv := []string{consts.VeyronCredentials + "=" + cdir}
+
+ h, err := sh.Start("runner", credEnv)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ runnerBlessing := s.ExpectVar("RUNNER_DEFAULT_BLESSING")
+ principalBlessing := s.ExpectVar("DEFAULT_BLESSING")
+ if err := s.Error(); err != nil {
+ t.Fatalf("failed to read input from children: %s", err)
+ }
+ h.Shutdown(os.Stdout, os.Stderr)
+
+ wantRunnerBlessing := "test"
+ wantPrincipalBlessing := "test/child"
+ if runnerBlessing != wantRunnerBlessing || principalBlessing != wantPrincipalBlessing {
+ t.Fatalf("unexpected default blessing: got runner %s, principal %s, want runner %s, principal %s", runnerBlessing, principalBlessing, wantRunnerBlessing, wantPrincipalBlessing)
+ }
+
+}
+
+func TestPrincipalInit(t *testing.T) {
+ // Collect the process' public key and error status
+ collect := func(sh *modules.Shell, env []string, args ...string) string {
+ h, err := sh.Start("principal", env, args...)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.SetVerbosity(testing.Verbose())
+ return s.ExpectVar("DEFAULT_BLESSING")
+ }
+
+ // A credentials directory may, or may, not have been already specified.
+ // Either way, we want to use our own, so we set it aside and use our own.
+ origCredentialsDir := os.Getenv(consts.VeyronCredentials)
+ defer os.Setenv(consts.VeyronCredentials, origCredentialsDir)
+ if err := os.Setenv(consts.VeyronCredentials, ""); err != nil {
+ t.Fatal(err)
+ }
+
+ // We create two shells -- one initializing the principal for a child process
+ // via a credentials directory and the other via an agent.
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ agentSh, err := modules.NewShell(ctx, v23.GetPrincipal(ctx))
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer agentSh.Cleanup(os.Stderr, os.Stderr)
+
+ // Test that with VEYRON_CREDENTIALS unset the runtime's Principal
+ // is correctly initialized for both shells.
+ if len(collect(sh, nil)) == 0 {
+ t.Fatalf("Without agent: child returned an empty default blessings set")
+ }
+ if got, want := collect(agentSh, nil), testutil.TestBlessing+security.ChainSeparator+"child"; got != want {
+ t.Fatalf("With agent: got %q, want %q", got, want)
+ }
+
+ // Test that credentials specified via the VEYRON_CREDENTIALS environment variable take
+ // precedence over an agent.
+ cdir1 := tmpDir(t)
+ defer os.RemoveAll(cdir1)
+ createCredentialsInDir(t, cdir1, "test_env")
+ credEnv := []string{consts.VeyronCredentials + "=" + cdir1}
+
+ if got, want := collect(sh, credEnv), "test_env"; got != want {
+ t.Errorf("Without agent: got default blessings: %q, want %q", got, want)
+ }
+ if got, want := collect(agentSh, credEnv), "test_env"; got != want {
+ t.Errorf("With agent: got default blessings: %q, want %q", got, want)
+ }
+
+ // Test that credentials specified via the command line take precedence over the
+ // VEYRON_CREDENTIALS environment variable and also the agent.
+ cdir2 := tmpDir(t)
+ defer os.RemoveAll(cdir2)
+ createCredentialsInDir(t, cdir2, "test_cmd")
+
+ if got, want := collect(sh, credEnv, "--veyron.credentials="+cdir2), "test_cmd"; got != want {
+ t.Errorf("Without agent: got %q, want %q", got, want)
+ }
+ if got, want := collect(agentSh, credEnv, "--veyron.credentials="+cdir2), "test_cmd"; got != want {
+ t.Errorf("With agent: got %q, want %q", got, want)
+ }
+}
diff --git a/profiles/internal/rt/runtime.go b/profiles/internal/rt/runtime.go
new file mode 100644
index 0000000..7496438
--- /dev/null
+++ b/profiles/internal/rt/runtime.go
@@ -0,0 +1,418 @@
+package rt
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "time"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/i18n"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ ns "v.io/v23/naming/ns"
+ "v.io/v23/options"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/flags"
+ "v.io/x/ref/lib/flags/buildinfo"
+ "v.io/x/ref/lib/stats"
+ _ "v.io/x/ref/lib/stats/sysstats"
+ iipc "v.io/x/ref/profiles/internal/ipc"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+ imanager "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/lib/dependency"
+ inaming "v.io/x/ref/profiles/internal/naming"
+ "v.io/x/ref/profiles/internal/naming/namespace"
+ ivtrace "v.io/x/ref/profiles/internal/vtrace"
+)
+
+type contextKey int
+
+const (
+ streamManagerKey = contextKey(iota)
+ clientKey
+ namespaceKey
+ principalKey
+ reservedNameKey
+ profileKey
+ appCycleKey
+ listenSpecKey
+ protocolsKey
+ backgroundKey
+)
+
+type vtraceDependency struct{}
+
+// Runtime implements the v23.Runtime interface.
+// Please see the interface definition for documentation of the
+// individiual methods.
+type Runtime struct {
+ deps *dependency.Graph
+}
+
+type reservedNameDispatcher struct {
+ dispatcher ipc.Dispatcher
+ opts []ipc.ServerOpt
+}
+
+func Init(ctx *context.T, appCycle v23.AppCycle, protocols []string, listenSpec *ipc.ListenSpec, flags flags.RuntimeFlags,
+ reservedDispatcher ipc.Dispatcher, dispatcherOpts ...ipc.ServerOpt) (*Runtime, *context.T, v23.Shutdown, error) {
+ r := &Runtime{deps: dependency.NewGraph()}
+
+ err := vlog.ConfigureLibraryLoggerFromFlags()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ // TODO(caprita): Only print this out for servers?
+ vlog.Infof("Binary info: %s", buildinfo.Info())
+
+ // Setup the initial trace.
+ ctx, err = ivtrace.Init(ctx, flags.Vtrace)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ ctx, _ = vtrace.SetNewTrace(ctx)
+ r.addChild(ctx, vtraceDependency{}, func() {
+ vtrace.FormatTraces(os.Stderr, vtrace.GetStore(ctx).TraceRecords(), nil)
+ })
+
+ if reservedDispatcher != nil {
+ ctx = context.WithValue(ctx, reservedNameKey, &reservedNameDispatcher{reservedDispatcher, dispatcherOpts})
+ }
+
+ if appCycle != nil {
+ ctx = context.WithValue(ctx, appCycleKey, appCycle)
+ }
+
+ if len(protocols) > 0 {
+ ctx = context.WithValue(ctx, protocolsKey, protocols)
+ }
+
+ if listenSpec != nil {
+ ctx = context.WithValue(ctx, listenSpecKey, listenSpec)
+ }
+
+ // Setup i18n.
+ ctx = i18n.ContextWithLangID(ctx, i18n.LangIDFromEnv())
+ if len(flags.I18nCatalogue) != 0 {
+ cat := i18n.Cat()
+ for _, filename := range strings.Split(flags.I18nCatalogue, ",") {
+ err := cat.MergeFromFile(filename)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%s: i18n: error reading i18n catalogue file %q: %s\n", os.Args[0], filename, err)
+ }
+ }
+ }
+
+ // Setup the program name.
+ ctx = verror.ContextWithComponentName(ctx, filepath.Base(os.Args[0]))
+
+ // Enable signal handling.
+ r.initSignalHandling(ctx)
+
+ // Set the initial namespace.
+ ctx, _, err = r.setNewNamespace(ctx, flags.NamespaceRoots...)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // Set the initial stream manager.
+ ctx, err = r.setNewStreamManager(ctx)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // The client we create here is incomplete (has a nil principal) and only works
+ // because the agent uses anonymous unix sockets and VCSecurityNone.
+ // After security is initialized we attach a real client.
+ _, client, err := r.SetNewClient(ctx)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // Initialize security.
+ principal, err := initSecurity(ctx, flags.Credentials, client)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ ctx = r.setPrincipal(ctx, principal)
+
+ // Set up secure client.
+ ctx, _, err = r.SetNewClient(ctx)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ ctx = r.SetBackgroundContext(ctx)
+
+ return r, ctx, r.shutdown, nil
+}
+
+func (r *Runtime) addChild(ctx *context.T, me interface{}, stop func(), dependsOn ...interface{}) error {
+ if err := r.deps.Depend(me, dependsOn...); err != nil {
+ stop()
+ return err
+ } else if done := ctx.Done(); done != nil {
+ go func() {
+ <-done
+ finish := r.deps.CloseAndWait(me)
+ stop()
+ finish()
+ }()
+ }
+ return nil
+}
+
+func (r *Runtime) Init(ctx *context.T) error {
+ return r.initMgmt(ctx)
+}
+
+func (r *Runtime) shutdown() {
+ r.deps.CloseAndWaitForAll()
+ vlog.FlushLog()
+}
+
+func (r *Runtime) initSignalHandling(ctx *context.T) {
+ // TODO(caprita): Given that our device manager implementation is to
+ // kill all child apps when the device manager dies, we should
+ // enable SIGHUP on apps by default.
+
+ // Automatically handle SIGHUP to prevent applications started as
+ // daemons from being killed. The developer can choose to still listen
+ // on SIGHUP and take a different action if desired.
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, syscall.SIGHUP)
+ go func() {
+ for {
+ sig, ok := <-signals
+ if !ok {
+ break
+ }
+ vlog.Infof("Received signal %v", sig)
+ }
+ }()
+ r.addChild(ctx, signals, func() {
+ signal.Stop(signals)
+ close(signals)
+ })
+}
+
+func (*Runtime) NewEndpoint(ep string) (naming.Endpoint, error) {
+ return inaming.NewEndpoint(ep)
+}
+
+func (r *Runtime) NewServer(ctx *context.T, opts ...ipc.ServerOpt) (ipc.Server, error) {
+ // Create a new RoutingID (and StreamManager) for each server.
+ sm, err := newStreamManager()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create ipc/stream/Manager: %v", err)
+ }
+
+ ns, _ := ctx.Value(namespaceKey).(ns.Namespace)
+ principal, _ := ctx.Value(principalKey).(security.Principal)
+ client, _ := ctx.Value(clientKey).(ipc.Client)
+
+ otherOpts := append([]ipc.ServerOpt{}, opts...)
+ otherOpts = append(otherOpts, vc.LocalPrincipal{principal})
+ if reserved, ok := ctx.Value(reservedNameKey).(*reservedNameDispatcher); ok {
+ otherOpts = append(otherOpts, iipc.ReservedNameDispatcher{reserved.dispatcher})
+ otherOpts = append(otherOpts, reserved.opts...)
+ }
+ if protocols, ok := ctx.Value(protocolsKey).([]string); ok {
+ otherOpts = append(otherOpts, iipc.PreferredServerResolveProtocols(protocols))
+ }
+
+ if !hasServerBlessingsOpt(opts) && principal != nil {
+ otherOpts = append(otherOpts, options.ServerBlessings{principal.BlessingStore().Default()})
+ }
+ server, err := iipc.InternalNewServer(ctx, sm, ns, r.GetClient(ctx), otherOpts...)
+ if err != nil {
+ return nil, err
+ }
+ stop := func() {
+ if err := server.Stop(); err != nil {
+ vlog.Errorf("A server could not be stopped: %v", err)
+ }
+ sm.Shutdown()
+ }
+ if err = r.addChild(ctx, server, stop, client, vtraceDependency{}); err != nil {
+ return nil, err
+ }
+ return server, nil
+}
+
+func hasServerBlessingsOpt(opts []ipc.ServerOpt) bool {
+ for _, o := range opts {
+ if _, ok := o.(options.ServerBlessings); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func newStreamManager() (stream.Manager, error) {
+ rid, err := naming.NewRoutingID()
+ if err != nil {
+ return nil, err
+ }
+ sm := imanager.InternalNew(rid)
+ return sm, nil
+}
+
+func (r *Runtime) setNewStreamManager(ctx *context.T) (*context.T, error) {
+ sm, err := newStreamManager()
+ if err != nil {
+ return nil, err
+ }
+ newctx := context.WithValue(ctx, streamManagerKey, sm)
+ if err = r.addChild(ctx, sm, sm.Shutdown); err != nil {
+ return ctx, err
+ }
+ return newctx, err
+}
+
+func (r *Runtime) SetNewStreamManager(ctx *context.T) (*context.T, error) {
+ newctx, err := r.setNewStreamManager(ctx)
+ if err != nil {
+ return ctx, err
+ }
+
+ // Create a new client since it depends on the stream manager.
+ newctx, _, err = r.SetNewClient(newctx)
+ if err != nil {
+ return ctx, err
+ }
+ return newctx, nil
+}
+
+func (*Runtime) setPrincipal(ctx *context.T, principal security.Principal) *context.T {
+ // We uniquely identity a principal with "security/principal/<publicKey>"
+ principalName := "security/principal/" + principal.PublicKey().String()
+ stats.NewStringFunc(principalName+"/blessingstore", principal.BlessingStore().DebugString)
+ stats.NewStringFunc(principalName+"/blessingroots", principal.Roots().DebugString)
+ return context.WithValue(ctx, principalKey, principal)
+}
+
+func (r *Runtime) SetPrincipal(ctx *context.T, principal security.Principal) (*context.T, error) {
+ var err error
+ newctx := ctx
+
+ newctx = r.setPrincipal(ctx, principal)
+
+ if newctx, err = r.setNewStreamManager(newctx); err != nil {
+ return ctx, err
+ }
+ if newctx, _, err = r.setNewNamespace(newctx, r.GetNamespace(ctx).Roots()...); err != nil {
+ return ctx, err
+ }
+ if newctx, _, err = r.SetNewClient(newctx); err != nil {
+ return ctx, err
+ }
+
+ return newctx, nil
+}
+
+func (*Runtime) GetPrincipal(ctx *context.T) security.Principal {
+ p, _ := ctx.Value(principalKey).(security.Principal)
+ return p
+}
+
+func (r *Runtime) SetNewClient(ctx *context.T, opts ...ipc.ClientOpt) (*context.T, ipc.Client, error) {
+ otherOpts := append([]ipc.ClientOpt{}, opts...)
+
+ sm, _ := ctx.Value(streamManagerKey).(stream.Manager)
+ ns, _ := ctx.Value(namespaceKey).(ns.Namespace)
+ p, _ := ctx.Value(principalKey).(security.Principal)
+ otherOpts = append(otherOpts, vc.LocalPrincipal{p}, &imanager.DialTimeout{5 * time.Minute})
+
+ if protocols, ok := ctx.Value(protocolsKey).([]string); ok {
+ otherOpts = append(otherOpts, iipc.PreferredProtocols(protocols))
+ }
+
+ client, err := iipc.InternalNewClient(sm, ns, otherOpts...)
+ if err != nil {
+ return ctx, nil, err
+ }
+ newctx := context.WithValue(ctx, clientKey, client)
+ if err = r.addChild(ctx, client, client.Close, sm, vtraceDependency{}); err != nil {
+ return ctx, nil, err
+ }
+ return newctx, client, err
+}
+
+func (*Runtime) GetClient(ctx *context.T) ipc.Client {
+ cl, _ := ctx.Value(clientKey).(ipc.Client)
+ return cl
+}
+
+func (r *Runtime) setNewNamespace(ctx *context.T, roots ...string) (*context.T, ns.Namespace, error) {
+ ns, err := namespace.New(roots...)
+
+ if oldNS := r.GetNamespace(ctx); oldNS != nil {
+ ns.CacheCtl(oldNS.CacheCtl()...)
+ }
+
+ if err == nil {
+ ctx = context.WithValue(ctx, namespaceKey, ns)
+ }
+ return ctx, ns, err
+}
+
+func (r *Runtime) SetNewNamespace(ctx *context.T, roots ...string) (*context.T, ns.Namespace, error) {
+ newctx, ns, err := r.setNewNamespace(ctx, roots...)
+ if err != nil {
+ return ctx, nil, err
+ }
+
+ // Replace the client since it depends on the namespace.
+ newctx, _, err = r.SetNewClient(newctx)
+ if err != nil {
+ return ctx, nil, err
+ }
+
+ return newctx, ns, err
+}
+
+func (*Runtime) GetNamespace(ctx *context.T) ns.Namespace {
+ ns, _ := ctx.Value(namespaceKey).(ns.Namespace)
+ return ns
+}
+
+func (*Runtime) GetAppCycle(ctx *context.T) v23.AppCycle {
+ appCycle, _ := ctx.Value(appCycleKey).(v23.AppCycle)
+ return appCycle
+}
+
+func (*Runtime) GetListenSpec(ctx *context.T) ipc.ListenSpec {
+ listenSpec, _ := ctx.Value(listenSpecKey).(*ipc.ListenSpec)
+ return *listenSpec
+}
+
+func (*Runtime) SetBackgroundContext(ctx *context.T) *context.T {
+ // Note we add an extra context with a nil value here.
+ // This prevents users from travelling back through the
+ // chain of background contexts.
+ ctx = context.WithValue(ctx, backgroundKey, nil)
+ return context.WithValue(ctx, backgroundKey, ctx)
+}
+
+func (*Runtime) GetBackgroundContext(ctx *context.T) *context.T {
+ bctx, _ := ctx.Value(backgroundKey).(*context.T)
+ if bctx == nil {
+ // There should always be a background context. If we don't find
+ // it, that means that the user passed us the background context
+ // in hopes of following the chain. Instead we just give them
+ // back what they sent in, which is correct.
+ return ctx
+ }
+ return bctx
+}
diff --git a/profiles/internal/rt/runtime_test.go b/profiles/internal/rt/runtime_test.go
new file mode 100644
index 0000000..79569c2
--- /dev/null
+++ b/profiles/internal/rt/runtime_test.go
@@ -0,0 +1,132 @@
+package rt_test
+
+import (
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/naming"
+
+ "v.io/x/ref/lib/flags"
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ "v.io/x/ref/profiles/internal/rt"
+ "v.io/x/ref/security"
+)
+
+// InitForTest creates a context for use in a test.
+func InitForTest(t *testing.T) (*rt.Runtime, *context.T, v23.Shutdown) {
+ ctx, cancel := context.RootContext()
+ r, ctx, shutdown, err := rt.Init(ctx, nil, nil, nil, flags.RuntimeFlags{}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ctx, err = r.SetPrincipal(ctx, tsecurity.NewPrincipal("test-blessing")); err != nil {
+ t.Fatal(err)
+ }
+ return r, ctx, func() {
+ cancel()
+ shutdown()
+ }
+}
+
+func TestNewServer(t *testing.T) {
+ r, ctx, shutdown := InitForTest(t)
+ defer shutdown()
+
+ if s, err := r.NewServer(ctx); err != nil || s == nil {
+ t.Fatalf("Could not create server: %v", err)
+ }
+}
+
+func TestPrincipal(t *testing.T) {
+ r, ctx, shutdown := InitForTest(t)
+ defer shutdown()
+
+ p2, err := security.NewPrincipal()
+ if err != nil {
+ t.Fatalf("Could not create new principal %v", err)
+ }
+ c2, err := r.SetPrincipal(ctx, p2)
+ if err != nil {
+ t.Fatalf("Could not attach principal: %v", err)
+ }
+ if !c2.Initialized() {
+ t.Fatal("Got uninitialized context.")
+ }
+ if p2 != r.GetPrincipal(c2) {
+ t.Fatal("The new principal should be attached to the context, but it isn't")
+ }
+}
+
+func TestClient(t *testing.T) {
+ r, ctx, shutdown := InitForTest(t)
+ defer shutdown()
+
+ orig := r.GetClient(ctx)
+
+ c2, client, err := r.SetNewClient(ctx)
+ if err != nil || client == nil {
+ t.Fatalf("Could not create client: %v", err)
+ }
+ if !c2.Initialized() {
+ t.Fatal("Got uninitialized context.")
+ }
+ if client == orig {
+ t.Fatal("Should have replaced the client but didn't")
+ }
+ if client != r.GetClient(c2) {
+ t.Fatal("The new client should be attached to the context, but it isn't")
+ }
+}
+
+func TestNamespace(t *testing.T) {
+ r, ctx, shutdown := InitForTest(t)
+ defer shutdown()
+
+ orig := r.GetNamespace(ctx)
+ orig.CacheCtl(naming.DisableCache(true))
+
+ newroots := []string{"/newroot1", "/newroot2"}
+ c2, ns, err := r.SetNewNamespace(ctx, newroots...)
+ if err != nil || ns == nil {
+ t.Fatalf("Could not create namespace: %v", err)
+ }
+ if !c2.Initialized() {
+ t.Fatal("Got uninitialized context.")
+ }
+ if ns == orig {
+ t.Fatal("Should have replaced the namespace but didn't")
+ }
+ if ns != r.GetNamespace(c2) {
+ t.Fatal("The new namespace should be attached to the context, but it isn't")
+ }
+ newrootmap := map[string]bool{"/newroot1": true, "/newroot2": true}
+ for _, root := range ns.Roots() {
+ if !newrootmap[root] {
+ t.Errorf("root %s found in ns, but we expected: %v", root, newroots)
+ }
+ }
+ opts := ns.CacheCtl()
+ if len(opts) != 1 {
+ t.Fatalf("Expected one option for cache control, got %v", opts)
+ }
+ if disable, ok := opts[0].(naming.DisableCache); !ok || !bool(disable) {
+ t.Errorf("expected a disable(true) message got %#v", opts[0])
+ }
+}
+
+func TestBackgroundContext(t *testing.T) {
+ r, ctx, shutdown := InitForTest(t)
+ defer shutdown()
+
+ bgctx := r.GetBackgroundContext(ctx)
+
+ if bgctx == ctx {
+ t.Error("The background context should not be the same as the context")
+ }
+
+ bgctx2 := r.GetBackgroundContext(bgctx)
+ if bgctx != bgctx2 {
+ t.Error("Calling GetBackgroundContext a second time should return the same context.")
+ }
+}
diff --git a/profiles/internal/rt/security.go b/profiles/internal/rt/security.go
new file mode 100644
index 0000000..3459f68
--- /dev/null
+++ b/profiles/internal/rt/security.go
@@ -0,0 +1,116 @@
+package rt
+
+import (
+ "fmt"
+ "os"
+ "os/user"
+ "strconv"
+ "syscall"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/mgmt"
+ "v.io/v23/security"
+
+ "v.io/x/ref/lib/exec"
+ vsecurity "v.io/x/ref/security"
+ "v.io/x/ref/security/agent"
+)
+
+func initSecurity(ctx *context.T, credentials string, client ipc.Client) (security.Principal, error) {
+ principal, err := setupPrincipal(ctx, credentials, client)
+ if err != nil {
+ return nil, err
+ }
+
+ return principal, nil
+}
+
+func setupPrincipal(ctx *context.T, credentials string, client ipc.Client) (security.Principal, error) {
+ var err error
+ var principal security.Principal
+ if principal, _ = ctx.Value(principalKey).(security.Principal); principal != nil {
+ return principal, nil
+ }
+ if len(credentials) > 0 {
+ // We close the agentFD if that is also provided
+ if fd, err := agentFD(); err == nil && fd >= 0 {
+ syscall.Close(fd)
+ }
+ // TODO(ataly, ashankar): If multiple runtimes are getting
+ // initialized at the same time from the same VEYRON_CREDENTIALS
+ // we will need some kind of locking for the credential files.
+ if principal, err = vsecurity.LoadPersistentPrincipal(credentials, nil); err != nil {
+ if os.IsNotExist(err) {
+ if principal, err = vsecurity.CreatePersistentPrincipal(credentials, nil); err != nil {
+ return principal, err
+ }
+ return principal, vsecurity.InitDefaultBlessings(principal, defaultBlessingName())
+ }
+ return nil, err
+ }
+ return principal, nil
+ }
+ if fd, err := agentFD(); err != nil {
+ return nil, err
+ } else if fd >= 0 {
+ return connectToAgent(ctx, fd, client)
+ }
+ if principal, err = vsecurity.NewPrincipal(); err != nil {
+ return principal, err
+ }
+ return principal, vsecurity.InitDefaultBlessings(principal, defaultBlessingName())
+}
+
+// agentFD returns a non-negative file descriptor to be used to communicate with
+// the security agent if the current process has been configured to use the
+// agent.
+func agentFD() (int, error) {
+ handle, err := exec.GetChildHandle()
+ if err != nil && err != exec.ErrNoVersion {
+ return -1, err
+ }
+ var fd string
+ if handle != nil {
+ // We were started by a parent (presumably, device manager).
+ fd, _ = handle.Config.Get(mgmt.SecurityAgentFDConfigKey)
+ } else {
+ fd = os.Getenv(agent.FdVarName)
+ }
+ if fd == "" {
+ return -1, nil
+ }
+ ifd, err := strconv.Atoi(fd)
+ if err == nil {
+ // Don't let children accidentally inherit the agent connection.
+ syscall.CloseOnExec(ifd)
+ }
+ return ifd, err
+}
+
+func defaultBlessingName() string {
+ var name string
+ if user, _ := user.Current(); user != nil && len(user.Username) > 0 {
+ name = user.Username
+ } else {
+ name = "anonymous"
+ }
+ if host, _ := os.Hostname(); len(host) > 0 {
+ name = name + "@" + host
+ }
+ return fmt.Sprintf("%s-%d", name, os.Getpid())
+}
+
+func connectToAgent(ctx *context.T, fd int, client ipc.Client) (security.Principal, error) {
+ // Dup the fd, so we can create multiple runtimes.
+ syscall.ForkLock.Lock()
+ newfd, err := syscall.Dup(fd)
+ if err == nil {
+ syscall.CloseOnExec(newfd)
+ }
+ syscall.ForkLock.Unlock()
+ if err != nil {
+ return nil, err
+ }
+ return agent.NewAgentPrincipal(ctx, newfd, client)
+}
diff --git a/profiles/internal/rt/shutdown_servers_test.go b/profiles/internal/rt/shutdown_servers_test.go
new file mode 100644
index 0000000..4970251
--- /dev/null
+++ b/profiles/internal/rt/shutdown_servers_test.go
@@ -0,0 +1,271 @@
+package rt_test
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/signals"
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+)
+
+func init() {
+ modules.RegisterChild("simpleServerProgram", "", simpleServerProgram)
+ modules.RegisterChild("complexServerProgram", "", complexServerProgram)
+}
+
+type dummy struct{}
+
+func (*dummy) Echo(ipc.ServerCall) error { return nil }
+
+// makeServer sets up a simple dummy server.
+func makeServer(ctx *context.T) ipc.Server {
+ server, err := v23.NewServer(ctx)
+ if err != nil {
+ vlog.Fatalf("r.NewServer error: %s", err)
+ }
+ if _, err := server.Listen(v23.GetListenSpec(ctx)); err != nil {
+ vlog.Fatalf("server.Listen error: %s", err)
+ }
+ if err := server.Serve("", &dummy{}, nil); err != nil {
+ vlog.Fatalf("server.Serve error: %s", err)
+ }
+ return server
+}
+
+// remoteCmdLoop listens on stdin and interprets commands sent over stdin (from
+// the parent process).
+func remoteCmdLoop(ctx *context.T, stdin io.Reader) func() {
+ done := make(chan struct{})
+ go func() {
+ scanner := bufio.NewScanner(stdin)
+ for scanner.Scan() {
+ switch scanner.Text() {
+ case "stop":
+ v23.GetAppCycle(ctx).Stop()
+ case "forcestop":
+ fmt.Println("straight exit")
+ v23.GetAppCycle(ctx).ForceStop()
+ case "close":
+ close(done)
+ return
+ }
+ }
+ }()
+ return func() { <-done }
+}
+
+// complexServerProgram demonstrates the recommended way to write a more
+// complex server application (with several servers, a mix of interruptible
+// and blocking cleanup, and parallel and sequential cleanup execution).
+// For a more typical server, see simpleServerProgram.
+func complexServerProgram(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ // Initialize the runtime. This is boilerplate.
+ ctx, shutdown := testutil.InitForTest()
+ // shutdown is optional, but it's a good idea to clean up, especially
+ // since it takes care of flushing the logs before exiting.
+ defer shutdown()
+
+ // This is part of the test setup -- we need a way to accept
+ // commands from the parent process to simulate Stop and
+ // RemoteStop commands that would normally be issued from
+ // application code.
+ defer remoteCmdLoop(ctx, stdin)()
+
+ // Create a couple servers, and start serving.
+ server1 := makeServer(ctx)
+ server2 := makeServer(ctx)
+
+ // This is how to wait for a shutdown. In this example, a shutdown
+ // comes from a signal or a stop command.
+ var done sync.WaitGroup
+ done.Add(1)
+
+ // This is how to configure signal handling to allow clean shutdown.
+ sigChan := make(chan os.Signal, 2)
+ signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
+
+ // This is how to configure handling of stop commands to allow clean
+ // shutdown.
+ stopChan := make(chan string, 2)
+ v23.GetAppCycle(ctx).WaitForStop(stopChan)
+
+ // Blocking is used to prevent the process from exiting upon receiving a
+ // second signal or stop command while critical cleanup code is
+ // executing.
+ var blocking sync.WaitGroup
+ blockingCh := make(chan struct{})
+
+ // This is how to wait for a signal or stop command and initiate the
+ // clean shutdown steps.
+ go func() {
+ // First signal received.
+ select {
+ case sig := <-sigChan:
+ // If the developer wants to take different actions
+ // depending on the type of signal, they can do it here.
+ fmt.Fprintln(stdout, "Received signal", sig)
+ case stop := <-stopChan:
+ fmt.Fprintln(stdout, "Stop", stop)
+ }
+ // This commences the cleanup stage.
+ done.Done()
+ // Wait for a second signal or stop command, and force an exit,
+ // but only once all blocking cleanup code (if any) has
+ // completed.
+ select {
+ case <-sigChan:
+ case <-stopChan:
+ }
+ <-blockingCh
+ os.Exit(signals.DoubleStopExitCode)
+ }()
+
+ // This communicates to the parent test driver process in our unit test
+ // that this server is ready and waiting on signals or stop commands.
+ // It's purely an artifact of our test setup.
+ fmt.Fprintln(stdout, "Ready")
+
+ // Wait for shutdown.
+ done.Wait()
+
+ // Stop the servers. In this example we stop them in goroutines to
+ // parallelize the wait, but if there was a dependency between the
+ // servers, the developer can simply stop them sequentially.
+ var waitServerStop sync.WaitGroup
+ waitServerStop.Add(2)
+ go func() {
+ server1.Stop()
+ waitServerStop.Done()
+ }()
+ go func() {
+ server2.Stop()
+ waitServerStop.Done()
+ }()
+ waitServerStop.Wait()
+
+ // This is where all cleanup code should go. By placing it at the end,
+ // we make its purpose and order of execution clear.
+
+ // This is an example of how to mix parallel and sequential cleanup
+ // steps. Most real-world servers will likely be simpler, with either
+ // just sequential or just parallel cleanup stages.
+
+ // parallelCleanup is used to wait for all goroutines executing cleanup
+ // code in parallel to finish.
+ var parallelCleanup sync.WaitGroup
+
+ // Simulate four parallel cleanup steps, two blocking and two
+ // interruptible.
+ parallelCleanup.Add(1)
+ blocking.Add(1)
+ go func() {
+ fmt.Fprintln(stdout, "Parallel blocking cleanup1")
+ blocking.Done()
+ parallelCleanup.Done()
+ }()
+
+ parallelCleanup.Add(1)
+ blocking.Add(1)
+ go func() {
+ fmt.Fprintln(stdout, "Parallel blocking cleanup2")
+ blocking.Done()
+ parallelCleanup.Done()
+ }()
+
+ parallelCleanup.Add(1)
+ go func() {
+ fmt.Fprintln(stdout, "Parallel interruptible cleanup1")
+ parallelCleanup.Done()
+ }()
+
+ parallelCleanup.Add(1)
+ go func() {
+ fmt.Fprintln(stdout, "Parallel interruptible cleanup2")
+ parallelCleanup.Done()
+ }()
+
+ // Simulate two sequential cleanup steps, one blocking and one
+ // interruptible.
+ fmt.Fprintln(stdout, "Sequential blocking cleanup")
+ blocking.Wait()
+ close(blockingCh)
+
+ fmt.Fprintln(stdout, "Sequential interruptible cleanup")
+
+ parallelCleanup.Wait()
+ return nil
+}
+
+// simpleServerProgram demonstrates the recommended way to write a typical
+// simple server application (with one server and a clean shutdown triggered by
+// a signal or a stop command). For an example of something more involved, see
+// complexServerProgram.
+func simpleServerProgram(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ // Initialize the runtime. This is boilerplate.
+ ctx, shutdown := testutil.InitForTest()
+ // Calling shutdown is optional, but it's a good idea to clean up, especially
+ // since it takes care of flushing the logs before exiting.
+ //
+ // We use defer to ensure this is the last thing in the program (to
+ // avoid shutting down the runtime while it may still be in use), and to
+ // allow it to execute even if a panic occurs down the road.
+ defer shutdown()
+
+ // This is part of the test setup -- we need a way to accept
+ // commands from the parent process to simulate Stop and
+ // RemoteStop commands that would normally be issued from
+ // application code.
+ defer remoteCmdLoop(ctx, stdin)()
+
+ // Create a server, and start serving.
+ server := makeServer(ctx)
+
+ // This is how to wait for a shutdown. In this example, a shutdown
+ // comes from a signal or a stop command.
+ //
+ // Note, if the developer wants to exit immediately upon receiving a
+ // signal or stop command, they can skip this, in which case the default
+ // behavior is for the process to exit.
+ waiter := signals.ShutdownOnSignals(ctx)
+
+ // This communicates to the parent test driver process in our unit test
+ // that this server is ready and waiting on signals or stop commands.
+ // It's purely an artifact of our test setup.
+ fmt.Fprintln(stdout, "Ready")
+
+ // Use defer for anything that should still execute even if a panic
+ // occurs.
+ defer fmt.Fprintln(stdout, "Deferred cleanup")
+
+ // Wait for shutdown.
+ sig := <-waiter
+ // The developer could take different actions depending on the type of
+ // signal.
+ fmt.Fprintln(stdout, "Received signal", sig)
+
+ // Cleanup code starts here. Alternatively, these steps could be
+ // invoked through defer, but we list them here to make the order of
+ // operations obvious.
+
+ // Stop the server.
+ server.Stop()
+
+ // Note, this will not execute in cases of forced shutdown
+ // (e.g. SIGSTOP), when the process calls os.Exit (e.g. via log.Fatal),
+ // or when a panic occurs.
+ fmt.Fprintln(stdout, "Interruptible cleanup")
+
+ return nil
+}
diff --git a/profiles/internal/rt/shutdown_test.go b/profiles/internal/rt/shutdown_test.go
new file mode 100644
index 0000000..120b0f5
--- /dev/null
+++ b/profiles/internal/rt/shutdown_test.go
@@ -0,0 +1,236 @@
+package rt_test
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "testing"
+ "time"
+
+ "v.io/v23"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/signals"
+)
+
+//go:generate v23 test generate
+
+var cstderr io.Writer
+
+func init() {
+ if testing.Verbose() {
+ cstderr = os.Stderr
+ }
+}
+
+func newShell(t *testing.T) *modules.Shell {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return sh
+}
+
+// TestSimpleServerSignal verifies that sending a signal to the simple server
+// causes it to exit cleanly.
+func TestSimpleServerSignal(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("simpleServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ syscall.Kill(h.Pid(), syscall.SIGINT)
+ s.Expect("Received signal interrupt")
+ s.Expect("Interruptible cleanup")
+ s.Expect("Deferred cleanup")
+ fmt.Fprintln(h.Stdin(), "close")
+ s.ExpectEOF()
+}
+
+// TestSimpleServerLocalStop verifies that sending a local stop command to the
+// simple server causes it to exit cleanly.
+func TestSimpleServerLocalStop(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("simpleServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ fmt.Fprintln(h.Stdin(), "stop")
+ s.Expect(fmt.Sprintf("Received signal %s", v23.LocalStop))
+ s.Expect("Interruptible cleanup")
+ s.Expect("Deferred cleanup")
+ fmt.Fprintln(h.Stdin(), "close")
+ s.ExpectEOF()
+}
+
+// TestSimpleServerDoubleSignal verifies that sending a succession of two
+// signals to the simple server causes it to initiate the cleanup sequence on
+// the first signal and then exit immediately on the second signal.
+func TestSimpleServerDoubleSignal(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("simpleServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ syscall.Kill(h.Pid(), syscall.SIGINT)
+ s.Expect("Received signal interrupt")
+ syscall.Kill(h.Pid(), syscall.SIGINT)
+ err := h.Shutdown(os.Stdout, cstderr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ if got, want := err.Error(), fmt.Sprintf("exit status %d", signals.DoubleStopExitCode); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// TestSimpleServerLocalForceStop verifies that sending a local ForceStop
+// command to the simple server causes it to exit immediately.
+func TestSimpleServerLocalForceStop(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("simpleServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ fmt.Fprintln(h.Stdin(), "forcestop")
+ s.Expect("straight exit")
+ err := h.Shutdown(os.Stdout, cstderr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ if got, want := err.Error(), fmt.Sprintf("exit status %d", v23.ForceStopExitCode); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// TestSimpleServerKill demonstrates that a SIGKILL still forces the server
+// to exit regardless of our signal handling.
+func TestSimpleServerKill(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("simpleServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ syscall.Kill(h.Pid(), syscall.SIGKILL)
+ err := h.Shutdown(os.Stdout, cstderr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ if got, want := err.Error(), "signal: killed"; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// TestComplexServerSignal verifies that sending a signal to the complex server
+// initiates the cleanup sequence in that server (we observe the printouts
+// corresponding to all the simulated sequential/parallel and
+// blocking/interruptible shutdown steps), and then exits cleanly.
+func TestComplexServerSignal(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("complexServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ syscall.Kill(h.Pid(), syscall.SIGINT)
+ s.Expect("Received signal interrupt")
+ s.ExpectSetRE("Sequential blocking cleanup",
+ "Sequential interruptible cleanup",
+ "Parallel blocking cleanup1",
+ "Parallel blocking cleanup2",
+ "Parallel interruptible cleanup1",
+ "Parallel interruptible cleanup2")
+ fmt.Fprintln(h.Stdin(), "close")
+ s.ExpectEOF()
+}
+
+// TestComplexServerLocalStop verifies that sending a local stop command to the
+// complex server initiates the cleanup sequence in that server (we observe the
+// printouts corresponding to all the simulated sequential/parallel and
+// blocking/interruptible shutdown steps), and then exits cleanly.
+func TestComplexServerLocalStop(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("complexServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+
+ fmt.Fprintln(h.Stdin(), "stop")
+ s.Expect(fmt.Sprintf("Stop %s", v23.LocalStop))
+ s.ExpectSetRE(
+ "Sequential blocking cleanup",
+ "Sequential interruptible cleanup",
+ "Parallel blocking cleanup1",
+ "Parallel blocking cleanup2",
+ "Parallel interruptible cleanup1",
+ "Parallel interruptible cleanup2",
+ )
+ fmt.Fprintln(h.Stdin(), "close")
+ s.ExpectEOF()
+}
+
+// TestComplexServerDoubleSignal verifies that sending a succession of two
+// signals to the complex server has the expected effect: the first signal
+// initiates the cleanup steps and the second signal kills the process, but only
+// after the blocking shutdown steps were allowed to complete (as observed by
+// the corresponding printouts from the server). Note that we have no
+// expectations on whether or not the interruptible shutdown steps execute.
+func TestComplexServerDoubleSignal(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("complexServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ syscall.Kill(h.Pid(), syscall.SIGINT)
+ s.Expect("Received signal interrupt")
+ syscall.Kill(h.Pid(), syscall.SIGINT)
+ s.ExpectSetEventuallyRE(
+ "Sequential blocking cleanup",
+ "Parallel blocking cleanup1",
+ "Parallel blocking cleanup2")
+ err := h.Shutdown(os.Stdout, cstderr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ if got, want := err.Error(), fmt.Sprintf("exit status %d", signals.DoubleStopExitCode); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// TestComplexServerLocalForceStop verifies that sending a local ForceStop
+// command to the complex server forces it to exit immediately.
+func TestComplexServerLocalForceStop(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("complexServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ fmt.Fprintln(h.Stdin(), "forcestop")
+ s.Expect("straight exit")
+ err := h.Shutdown(os.Stdout, cstderr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ if got, want := err.Error(), fmt.Sprintf("exit status %d", v23.ForceStopExitCode); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// TestComplexServerKill demonstrates that a SIGKILL still forces the server to
+// exit regardless of our signal handling.
+func TestComplexServerKill(t *testing.T) {
+ sh := newShell(t)
+ defer sh.Cleanup(os.Stdout, cstderr)
+ h, _ := sh.Start("complexServerProgram", nil)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("Ready")
+ syscall.Kill(h.Pid(), syscall.SIGKILL)
+ err := h.Shutdown(os.Stdout, cstderr)
+ if err == nil {
+ t.Fatalf("expected an error")
+ }
+ if got, want := err.Error(), "signal: killed"; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
diff --git a/profiles/internal/rt/signal_test.go b/profiles/internal/rt/signal_test.go
new file mode 100644
index 0000000..291977d
--- /dev/null
+++ b/profiles/internal/rt/signal_test.go
@@ -0,0 +1,86 @@
+package rt_test
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+ "testing"
+ "time"
+
+ "v.io/x/ref/lib/expect"
+ "v.io/x/ref/lib/modules"
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+)
+
+func init() {
+ modules.RegisterChild("withRuntime", "", withRuntime)
+ modules.RegisterChild("withoutRuntime", "", withoutRuntime)
+}
+
+func simpleEchoProgram(stdin io.Reader, stdout io.Writer) {
+ fmt.Fprintf(stdout, "ready\n")
+ scanner := bufio.NewScanner(stdin)
+ if scanner.Scan() {
+ fmt.Fprintf(stdout, "%s\n", scanner.Text())
+ }
+ modules.WaitForEOF(stdin)
+}
+
+func withRuntime(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ _, shutdown := testutil.InitForTest()
+ defer shutdown()
+
+ simpleEchoProgram(stdin, stdout)
+ return nil
+}
+
+func withoutRuntime(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+ simpleEchoProgram(stdin, stdout)
+ return nil
+}
+
+func TestWithRuntime(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+ h, err := sh.Start("withRuntime", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer h.Shutdown(os.Stderr, os.Stderr)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("ready")
+ syscall.Kill(h.Pid(), syscall.SIGHUP)
+ h.Stdin().Write([]byte("foo\n"))
+ s.Expect("foo")
+ h.CloseStdin()
+ s.ExpectEOF()
+}
+
+func TestWithoutRuntime(t *testing.T) {
+ sh, err := modules.NewShell(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer sh.Cleanup(os.Stderr, os.Stderr)
+ h, err := sh.Start("withoutRuntime", nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ defer h.Shutdown(os.Stderr, os.Stderr)
+ s := expect.NewSession(t, h.Stdout(), time.Minute)
+ s.Expect("ready")
+ syscall.Kill(h.Pid(), syscall.SIGHUP)
+ s.ExpectEOF()
+ err = h.Shutdown(os.Stderr, os.Stderr)
+ want := "exit status 2"
+ if err == nil || err.Error() != want {
+ t.Errorf("got %s, want %s", err, want)
+
+ }
+}
diff --git a/profiles/internal/rt/v23_test.go b/profiles/internal/rt/v23_test.go
new file mode 100644
index 0000000..4ca1965
--- /dev/null
+++ b/profiles/internal/rt/v23_test.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package rt_test
+
+import "fmt"
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/modules"
+import "v.io/x/ref/lib/testutil"
+
+func init() {
+ modules.RegisterChild("noWaiters", ``, noWaiters)
+ modules.RegisterChild("forceStop", ``, forceStop)
+ modules.RegisterChild("app", ``, app)
+ modules.RegisterChild("child", ``, child)
+ modules.RegisterChild("principal", ``, principal)
+ modules.RegisterChild("runner", `Runner runs a principal as a subprocess and reports back with its
+own security info and it's childs.`, runner)
+ modules.RegisterChild("complexServerProgram", `complexServerProgram demonstrates the recommended way to write a more
+complex server application (with several servers, a mix of interruptible
+and blocking cleanup, and parallel and sequential cleanup execution).
+For a more typical server, see simpleServerProgram.`, complexServerProgram)
+ modules.RegisterChild("simpleServerProgram", `simpleServerProgram demonstrates the recommended way to write a typical
+simple server application (with one server and a clean shutdown triggered by
+a signal or a stop command). For an example of something more involved, see
+complexServerProgram.`, simpleServerProgram)
+ modules.RegisterChild("withRuntime", ``, withRuntime)
+ modules.RegisterChild("withoutRuntime", ``, withoutRuntime)
+}
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ if modules.IsModulesProcess() {
+ if err := modules.Dispatch(); err != nil {
+ fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+ os.Exit(1)
+ }
+ return
+ }
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/testing/concurrency/choice.go b/profiles/internal/testing/concurrency/choice.go
new file mode 100644
index 0000000..4ef8066
--- /dev/null
+++ b/profiles/internal/testing/concurrency/choice.go
@@ -0,0 +1,19 @@
+package concurrency
+
+// choice enumerates the program transitions to choose from and
+// identifies which transition is to be taken next.
+type choice struct {
+ // next records the thread identifier for the thread that was
+ // selected to be scheduled next.
+ next TID
+ // transitions records the transitions for all the threads that
+ // could have been scheduled next.
+ transitions map[TID]*transition
+}
+
+// newChoice is the choice factory.
+func newChoice() *choice {
+ return &choice{
+ transitions: make(map[TID]*transition),
+ }
+}
diff --git a/profiles/internal/testing/concurrency/clock.go b/profiles/internal/testing/concurrency/clock.go
new file mode 100644
index 0000000..1880c87
--- /dev/null
+++ b/profiles/internal/testing/concurrency/clock.go
@@ -0,0 +1,50 @@
+package concurrency
+
+import (
+ "reflect"
+)
+
+// clock is a type for the vector clock, which is used to keep track
+// of logical time in a concurrent program.
+type clock map[TID]int
+
+// newClock is the clock factory.
+func newClock() clock {
+ return make(clock)
+}
+
+// clone produces a copy of the clock.
+func (c clock) clone() clock {
+ clone := newClock()
+ for k, v := range c {
+ clone[k] = v
+ }
+ return clone
+}
+
+// equals checks if this clock identifies a logical time identical to
+// the logical time identified by the given clock.
+func (c clock) equals(other clock) bool {
+ return reflect.DeepEqual(c, other)
+}
+
+// happensBefore checks if this clock identifies a logical time that
+// happened before (in the sense of Lamport's happens-before relation)
+// the logical time identified by the given clock.
+func (c clock) happensBefore(other clock) bool {
+ for k, v := range c {
+ if value, found := other[k]; !found || v > value {
+ return false
+ }
+ }
+ return true
+}
+
+// merge merges the value of the given clock with this clock.
+func (c clock) merge(other clock) {
+ for key, value := range other {
+ if v, found := c[key]; !found || v < value {
+ c[key] = value
+ }
+ }
+}
diff --git a/profiles/internal/testing/concurrency/clock_test.go b/profiles/internal/testing/concurrency/clock_test.go
new file mode 100644
index 0000000..0166971
--- /dev/null
+++ b/profiles/internal/testing/concurrency/clock_test.go
@@ -0,0 +1,77 @@
+package concurrency
+
+import (
+ "testing"
+
+ "v.io/x/ref/lib/testutil"
+)
+
+//go:generate v23 test generate
+
+// TestClone checks the clone() method of a clock.
+func TestClone(t *testing.T) {
+ c1 := newClock()
+ c1[0] = testutil.Rand.Intn(100)
+ c2 := c1.clone()
+ c1[0]++
+ if c2[0] != c1[0]-1 {
+ t.Errorf("Unexpected clock value: expected %v, got %v", c1[0]-1, c2[0])
+ }
+}
+
+// TestEquality checks the equals() method of a clock.
+func TestEquality(t *testing.T) {
+ c1, c2 := newClock(), newClock()
+ for i := TID(0); i < TID(10); i++ {
+ c1[i] = testutil.Rand.Intn(100)
+ c2[i] = c1[i]
+ }
+ if !c1.equals(c2) {
+ t.Errorf("Unexpected inequality between %v and %v", c1, c2)
+ }
+}
+
+// TestHappensBefore checks the happensBefore() method of a clock.
+func TestHappensBefore(t *testing.T) {
+ c1, c2, c3 := newClock(), newClock(), newClock()
+ for i := TID(0); i < TID(10); i++ {
+ c1[i] = testutil.Rand.Intn(100)
+ if i%2 == 0 {
+ c2[i] = c1[i] + 1
+ c3[i] = c1[i] + 1
+ } else {
+ c2[i] = c1[i]
+ c3[i] = c2[i] - 1
+ }
+ }
+ if !c1.happensBefore(c1) {
+ t.Errorf("Unexpected outcome of %v.happensBefore(%v): expected %v, got %v", c1, c1, true, false)
+ }
+ if !c1.happensBefore(c2) {
+ t.Errorf("Unexpected outcome of %v.happensBefore(%v): expected %v, got %v", c1, c2, true, false)
+ }
+ if c2.happensBefore(c1) {
+ t.Errorf("Unexpected outcome of %v.happensBefore(%v): expected %v, got %v", c2, c1, false, true)
+ }
+ if c1.happensBefore(c3) {
+ t.Errorf("Unexpected outcome of %v.happensBefore(%v): expected %v, got %v", c1, c3, false, true)
+ }
+ if c3.happensBefore(c1) {
+ t.Errorf("Unexpected outcome of %v.happensBefore(%v): expected %v, got %v", c3, c1, false, true)
+ }
+}
+
+// TestMerge checks the merge() method of a clock.
+func TestMerge(t *testing.T) {
+ c1, c2 := newClock(), newClock()
+ for i := TID(0); i < TID(10); i++ {
+ c1[i] = testutil.Rand.Intn(100)
+ c2[i] = testutil.Rand.Intn(100)
+ }
+ c1.merge(c2)
+ for i := TID(0); i < TID(10); i++ {
+ if c1[i] < c2[i] {
+ t.Errorf("Unexpected order between %v and %v: expected '>=', got '<'", c1[i], c2[i])
+ }
+ }
+}
diff --git a/profiles/internal/testing/concurrency/context.go b/profiles/internal/testing/concurrency/context.go
new file mode 100644
index 0000000..4e5a1b7
--- /dev/null
+++ b/profiles/internal/testing/concurrency/context.go
@@ -0,0 +1,22 @@
+package concurrency
+
+import (
+ "sync"
+)
+
+// context stores the abstract state of resources used in an execution
+// of a concurrent program.
+type context struct {
+ // mutexes stores the abstract state of mutexes.
+ mutexes map[*sync.Mutex]*fakeMutex
+ // rwMutexes stores the abstract state of read-write mutexes.
+ rwMutexes map[*sync.RWMutex]*fakeRWMutex
+}
+
+// newContext if the context factory.
+func newContext() *context {
+ return &context{
+ mutexes: make(map[*sync.Mutex]*fakeMutex),
+ rwMutexes: make(map[*sync.RWMutex]*fakeRWMutex),
+ }
+}
diff --git a/profiles/internal/testing/concurrency/doc.go b/profiles/internal/testing/concurrency/doc.go
new file mode 100644
index 0000000..bfb9f05
--- /dev/null
+++ b/profiles/internal/testing/concurrency/doc.go
@@ -0,0 +1,34 @@
+// Package concurrency implements a framework for systematic testing
+// of concurrent veyron Go programs. The framework implements the
+// ideas described in "Systematic and Scalable Testing of Concurrent
+// Programs":
+//
+// http://repository.cmu.edu/cgi/viewcontent.cgi?article=1291&context=dissertations
+//
+// Abstractly, the systematic testing framework divides execution of
+// concurrent threads into coarse-grained transitions, by interposing
+// on events of interest (e.g. thread creation, mutex acquisition, or
+// channel communication).
+//
+// The interposed events suspended and a centralized user-level
+// scheduler is used to serialize the concurrent execution by
+// advancing allowing only one concurrent transition to execute at any
+// given time. In addition to controling the scheduling, this
+// centralized scheduler keeps track of the alternative scheduling
+// choices. This information is then used to explore a different
+// sequence of transitions next time the test body is executed.
+//
+// The framework is initialized through the Init(setup, body, cleanup)
+// function which specifies the test setup, body, and cleanup
+// respectively. To start a systematic exploration, one invokes one of
+// the following functions: Explore(), ExploreN(n), or
+// ExploreFor(d). These functions repeatedly execute the test
+// described through Init(), systematically enumerating the different
+// ways in which different executions sequence concurrent transitions
+// of the test. Finally, each systematic exploration should end by
+// invoking the Finish() function.
+//
+// See mutex_test.go for an example on how to use this framework to
+// test concurrent access to mutexes.
+
+package concurrency
diff --git a/profiles/internal/testing/concurrency/execution.go b/profiles/internal/testing/concurrency/execution.go
new file mode 100644
index 0000000..696a481
--- /dev/null
+++ b/profiles/internal/testing/concurrency/execution.go
@@ -0,0 +1,145 @@
+package concurrency
+
+import (
+ "fmt"
+ "sort"
+)
+
+// execution represents an execution of the test.
+type execution struct {
+ // strategy describes the initial sequence of scheduling decisions
+ // to make.
+ strategy []TID
+ // nsteps records the number of scheduling decision made.
+ nsteps int
+ // nthreads records the number of threads in the system.
+ nthreads int
+ // nrequests records the number of currently pending requests.
+ nrequests int
+ // requests is a channel on which scheduling requests are received.
+ requests chan request
+ // done is a channel that the request handlers can use to wake up
+ // the main scheduling loop.
+ done chan struct{}
+ // nextTID is a function that can be used to generate unique thread
+ // identifiers.
+ nextTID func() TID
+ // activeTID records the identifier of the currently active thread.
+ activeTID TID
+ // ctx stores the abstract state of resources used by the
+ // execution.
+ ctx *context
+ // threads records the abstract state of threads active in the
+ // execution.
+ threads map[TID]*thread
+}
+
+// newExecution is the execution factory.
+func newExecution(strategy []TID) *execution {
+ execution := &execution{
+ strategy: strategy,
+ nthreads: 1,
+ requests: make(chan request),
+ nextTID: TIDGenerator(),
+ ctx: newContext(),
+ threads: make(map[TID]*thread),
+ }
+ clock := newClock()
+ clock[0] = 0
+ execution.threads[0] = newThread(0, clock)
+ return execution
+}
+
+// Run executes the body of the test, exploring a sequence of
+// scheduling decisions, and returns a vector of the scheduling
+// decisions it made as well as the alternative scheduling decisions
+// it could have made instead.
+func (e *execution) Run(testBody func()) []*choice {
+ go testBody()
+ choices := make([]*choice, 0)
+ // Keep scheduling requests until there are threads left in the
+ // system.
+ for e.nthreads != 0 {
+ // Keep receiving scheduling requests until all threads are
+ // blocked on a decision.
+ for e.nrequests != e.nthreads {
+ request, ok := <-e.requests
+ if !ok {
+ panic("Command channel closed unexpectedly.")
+ }
+ e.nrequests++
+ request.process(e)
+ }
+ choice := e.generateChoice()
+ choices = append(choices, choice)
+ e.activeTID = choice.next
+ e.schedule(choice.next)
+ }
+ return choices
+}
+
+// findThread uses the given thread identifier to find a thread among
+// the known threads.
+func (e *execution) findThread(tid TID) *thread {
+ thread, ok := e.threads[tid]
+ if !ok {
+ panic(fmt.Sprintf("Could not find thread %v.", tid))
+ }
+ return thread
+}
+
+// generateChoice describes the scheduling choices available at the
+// current abstract program state.
+func (e *execution) generateChoice() *choice {
+ c := newChoice()
+ enabled := make([]TID, 0)
+ for tid, thread := range e.threads {
+ t := &transition{
+ tid: tid,
+ clock: thread.clock.clone(),
+ enabled: thread.enabled(e.ctx),
+ kind: thread.kind(),
+ readSet: thread.readSet(),
+ writeSet: thread.writeSet(),
+ }
+ c.transitions[tid] = t
+ if t.enabled {
+ enabled = append(enabled, tid)
+ }
+ }
+ if len(c.transitions) == 0 {
+ panic("Encountered a deadlock.")
+ }
+ if e.nsteps < len(e.strategy) {
+ // Follow the scheduling strategy.
+ c.next = e.strategy[e.nsteps]
+ } else {
+ // Schedule an enabled thread using a deterministic round-robin
+ // scheduler.
+ sort.Sort(IncreasingTID(enabled))
+ index := 0
+ for ; index < len(enabled) && enabled[index] <= e.activeTID; index++ {
+ }
+ if index == len(enabled) {
+ index = 0
+ }
+ c.next = enabled[index]
+ }
+ return c
+}
+
+// schedule advances the execution of the given thread.
+func (e *execution) schedule(tid TID) {
+ e.nrequests--
+ e.nsteps++
+ thread, ok := e.threads[tid]
+ if !ok {
+ panic(fmt.Sprintf("Could not find thread %v.\n", tid))
+ }
+ if !thread.enabled(e.ctx) {
+ panic(fmt.Sprintf("Thread %v is about to be scheduled and is not enabled.", tid))
+ }
+ e.done = make(chan struct{})
+ close(thread.ready)
+ <-e.done
+}
diff --git a/profiles/internal/testing/concurrency/fake.go b/profiles/internal/testing/concurrency/fake.go
new file mode 100644
index 0000000..0c47b97
--- /dev/null
+++ b/profiles/internal/testing/concurrency/fake.go
@@ -0,0 +1,127 @@
+package concurrency
+
+// mutexState is a type to represent different states of a mutex.
+type mutexState uint64
+
+// enumeration of different mutex states.
+const (
+ mutexFree mutexState = iota
+ mutexLocked
+)
+
+// fakeMutex is an abstract representation of a mutex.
+type fakeMutex struct {
+ // clock records the logical time of the last access to the mutex.
+ clock clock
+ // state records the state of the mutex.
+ state mutexState
+}
+
+// newFakeMutex is the fakeMutex factory.
+func newFakeMutex(clock clock) *fakeMutex {
+ return &fakeMutex{clock: clock.clone()}
+}
+
+// free checks if the mutex is free.
+func (m *fakeMutex) free() bool {
+ return m.state == mutexFree
+}
+
+// locked checks if the mutex is locked.
+func (m *fakeMutex) locked() bool {
+ return m.state == mutexLocked
+}
+
+// lock models the action of locking the mutex.
+func (m *fakeMutex) lock() {
+ if m.state != mutexFree {
+ panic("Locking a mutex that is already locked.")
+ }
+ m.state = mutexLocked
+}
+
+// unlock models the action of unlocking the mutex.
+func (m *fakeMutex) unlock() {
+ if m.state != mutexLocked {
+ panic("Unlocking a mutex that is not locked.")
+ }
+ m.state = mutexFree
+}
+
+// rwMutexState is a type to represent different states of a mutex.
+type rwMutexState uint64
+
+// enumeration of different rwMutex states.
+const (
+ rwMutexFree rwMutexState = iota
+ rwMutexShared
+ rwMutexExclusive
+)
+
+// fakeRWMutex is an abstract representation of a read-write mutex.
+type fakeRWMutex struct {
+ // clock records the logical time of the last access to the
+ // read-write mutex.
+ clock clock
+ // state records the state of the read-write mutex.
+ state rwMutexState
+ // nreaders records the number of readers.
+ nreaders int
+}
+
+// newFakeRWMutex is the fakeRWMutex factory.
+func newFakeRWMutex(clock clock) *fakeRWMutex {
+ return &fakeRWMutex{clock: clock.clone()}
+}
+
+// exclusive checks if the read-write mutex is exclusive.
+func (rw *fakeRWMutex) exclusive() bool {
+ return rw.state == rwMutexExclusive
+}
+
+// free checks if the read-write mutex is free.
+func (rw *fakeRWMutex) free() bool {
+ return rw.state == rwMutexFree
+}
+
+// shared checks if the read-write mutex is shared.
+func (rw *fakeRWMutex) shared() bool {
+ return rw.state == rwMutexShared
+}
+
+// lock models the action of read-locking or write-locking the
+// read-write mutex.
+func (rw *fakeRWMutex) lock(read bool) {
+ if read {
+ if rw.state == rwMutexExclusive {
+ panic("Read-locking a read-write mutex that is write-locked.")
+ }
+ if rw.state == rwMutexFree {
+ rw.state = rwMutexShared
+ }
+ rw.nreaders++
+ } else {
+ if rw.state != rwMutexFree {
+ panic("Write-locking a read-write mutex that is not free.")
+ }
+ rw.state = rwMutexExclusive
+ }
+}
+
+// unlock models the action of unlocking the read-write mutex.
+func (rw *fakeRWMutex) unlock(read bool) {
+ if read {
+ if rw.state != rwMutexShared {
+ panic("Read-unlocking a read-write mutex that is not read-locked.")
+ }
+ rw.nreaders--
+ if rw.nreaders == 0 {
+ rw.state = rwMutexFree
+ }
+ } else {
+ if rw.state != rwMutexExclusive {
+ panic("Write-unlocking a read-write mutex that is not write-locked.")
+ }
+ rw.state = rwMutexFree
+ }
+}
diff --git a/profiles/internal/testing/concurrency/mutex_test.go b/profiles/internal/testing/concurrency/mutex_test.go
new file mode 100644
index 0000000..9900834
--- /dev/null
+++ b/profiles/internal/testing/concurrency/mutex_test.go
@@ -0,0 +1,161 @@
+// concurrency_test is a simple test of the framework for systematic
+// testing of concurrency.
+package concurrency_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/testing/concurrency"
+ "v.io/x/ref/profiles/internal/testing/concurrency/sync"
+)
+
+var m sync.Mutex
+
+// createMutexSets returns sets of thread identifiers that match the
+// logic of mutexThreadClosure.
+func createMutexSet(n int) map[int]bool {
+ locks := make(map[int]bool)
+ for i := 1; i <= n; i++ {
+ locks[i] = true
+ }
+ return locks
+}
+
+// generateMutexOutputs generates all legal outputs of sequencing
+// calls to rw.Lock(), rw.Unlock(). The input identifies the threads
+// that wish to invoke these functions.
+func generateMutexOutputs(locks map[int]bool) []string {
+ if length(locks) == 0 {
+ return []string{""}
+ }
+ result := make([]string, 0)
+ for lock, ok := range locks {
+ if ok {
+ locks[lock] = false
+ for _, s := range generateMutexOutputs(locks) {
+ result = append(result, fmt.Sprintf("%d:Lock()%d:Unlock()%s", lock, lock, s))
+ }
+ locks[lock] = true
+ }
+ }
+ return result
+}
+
+// mutexThreadClosure folds the input arguments inside of the function body
+// as the testing framework only supports functions with no arguments.
+func mutexThreadClosure(t *testing.T, n, max int, out *os.File) func() {
+ return func() {
+ defer concurrency.Exit()
+ if n < max {
+ child := mutexThreadClosure(t, n+1, max, out)
+ concurrency.Start(child)
+ }
+ m.Lock()
+ fmt.Fprintf(out, "%d:Lock()", n)
+ m.Unlock()
+ fmt.Fprintf(out, "%d:Unlock()", n)
+ }
+}
+
+// TestMutex runs mutexThreadCLosure() without systematically testing
+// concurrency.
+func TestMutex(t *testing.T) {
+ for n := 2; n < 6; n++ {
+ thread := mutexThreadClosure(t, 1, n, nil)
+ thread()
+ }
+}
+
+// TestMutexExplore runs mutexThreadClosure() using the framework for systematic
+// testing of concurrency, checking that the exploration explores the
+// correct number of interleavings.
+func TestMutexExplore(t *testing.T) {
+ for n := 2; n < 6; n++ {
+ out, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile() failed: %v", err)
+ }
+ defer os.Remove(out.Name())
+ defer out.Close()
+ body := mutexThreadClosure(t, 1, n, out)
+ tester := concurrency.Init(setup, body, cleanupClosure(out))
+ defer concurrency.Finish()
+ niterations, err := tester.Explore()
+ if err != nil {
+ t.Fatalf("Unexpected error encountered: %v", err)
+ }
+ outputs := processOutput(t, out)
+ expectedOutputs := generateMutexOutputs(createMutexSet(n))
+ checkExpectedOutputs(t, outputs, expectedOutputs)
+ checkUnexpectedOutputs(t, outputs, expectedOutputs)
+ vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ }
+}
+
+// TestMutexExploreN runs mutexThreadClosure() using the framework for
+// systematic testing of concurrency, checking that the exploration
+// explores at most the given number of interleavings.
+func TestMutexExploreN(t *testing.T) {
+ for n := 2; n < 6; n++ {
+ out, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile() failed: %v", err)
+ }
+ defer os.Remove(out.Name())
+ defer out.Close()
+ body := mutexThreadClosure(t, 1, n, out)
+ tester := concurrency.Init(setup, body, cleanupClosure(out))
+ defer concurrency.Finish()
+ stopAfter := 100
+ niterations, err := tester.ExploreN(stopAfter)
+ if err != nil {
+ t.Fatalf("Unexpected error encountered: %v", err)
+ }
+ outputs := processOutput(t, out)
+ expectedOutputs := generateMutexOutputs(createMutexSet(n))
+ checkUnexpectedOutputs(t, outputs, expectedOutputs)
+ if niterations < stopAfter {
+ checkExpectedOutputs(t, outputs, expectedOutputs)
+ }
+ if niterations > stopAfter {
+ t.Fatalf("Unexpected number of iterations: expected at most %v, got %v", stopAfter, niterations)
+ }
+ vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ }
+}
+
+// TestMutexExploreFor runs mutexThreadClosure() using the framework
+// for systematic testing of concurrency, checking that the
+// exploration respects the given "soft" deadline.
+func TestMutexExploreFor(t *testing.T) {
+ for n := 2; n < 6; n++ {
+ out, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile() failed: %v", err)
+ }
+ defer os.Remove(out.Name())
+ defer out.Close()
+ body := mutexThreadClosure(t, 1, n, out)
+ tester := concurrency.Init(setup, body, cleanupClosure(out))
+ defer concurrency.Finish()
+ start := time.Now()
+ deadline := 10 * time.Millisecond
+ niterations, err := tester.ExploreFor(deadline)
+ end := time.Now()
+ if err != nil {
+ t.Fatalf("Unexpected error encountered: %v", err)
+ }
+ outputs := processOutput(t, out)
+ expectedOutputs := generateMutexOutputs(createMutexSet(n))
+ checkUnexpectedOutputs(t, outputs, expectedOutputs)
+ if start.Add(deadline).After(end) {
+ checkExpectedOutputs(t, outputs, expectedOutputs)
+ }
+ vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ }
+}
diff --git a/profiles/internal/testing/concurrency/request.go b/profiles/internal/testing/concurrency/request.go
new file mode 100644
index 0000000..d6450a6
--- /dev/null
+++ b/profiles/internal/testing/concurrency/request.go
@@ -0,0 +1,390 @@
+package concurrency
+
+import (
+ "sync"
+)
+
+// request is an interface to describe a scheduling request.
+type request interface {
+ // enabled determines whether the given program transition can be
+ // executed without blocking in the given context.
+ enabled(ctx *context) bool
+ // execute models the effect of advancing the execution of the
+ // calling thread.
+ execute(ready chan struct{}, e *execution)
+ // kind returns the kind of the program transition of the calling
+ // thread.
+ kind() transitionKind
+ // process handles initial processing of an incoming
+ // scheduling request, making sure the calling thread is suspended
+ // until the user-level scheduler decides to advance its execution.
+ process(e *execution)
+ // readSet records the identifiers of the abstract resources read by
+ // the program transition of the calling thread.
+ readSet() resourceSet
+ // writeSet records the identifiers of the abstract resources
+ // written by the program transition of the calling thread.
+ writeSet() resourceSet
+}
+
+type defaultRequest struct {
+ request
+ done chan struct{}
+}
+
+func (r defaultRequest) enabled(ctx *context) bool {
+ return true
+}
+
+func (r defaultRequest) execute(ready chan struct{}, e *execution) {
+ <-ready
+ close(r.done)
+ close(e.done)
+}
+
+func (r defaultRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+func (r defaultRequest) kind() transitionKind {
+ return tNil
+}
+
+func (r defaultRequest) readSet() resourceSet {
+ return newResourceSet()
+}
+
+func (r defaultRequest) writeSet() resourceSet {
+ return newResourceSet()
+}
+
+// goRequest is to be called before creating a new goroutine through "go
+// fn(tid)" to obtain a thread identifier to supply to the goroutine
+// that is about to be created. This request is a part of the
+// implementation of the Start() function provided by this package.
+type goRequest struct {
+ defaultRequest
+ reply chan TID
+}
+
+func (r goRequest) process(e *execution) {
+ e.nthreads++
+ tid := e.nextTID()
+ thread := e.findThread(e.activeTID)
+ newThread := newThread(tid, thread.clock)
+ newThread.clock[tid] = 0
+ e.threads[tid] = newThread
+ r.reply <- tid
+ e.nrequests--
+ close(r.done)
+}
+
+// goParentRequest is to be called right after a new goroutine is created
+// through "go fn(tid)" to prevent the race between the parent and the
+// child thread. This request is a part of the implementation of the
+// Start() function provided by this package.
+type goParentRequest struct {
+ defaultRequest
+}
+
+func (r goParentRequest) kind() transitionKind {
+ return tGoParent
+}
+
+func (r goParentRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+// goChildRequest is to be called as the first thing inside of a new
+// goroutine to prevent the race between the parent and the child
+// thread. This request is a part of the implementation of the Start()
+// function provided by this package.
+type goChildRequest struct {
+ defaultRequest
+ tid TID
+}
+
+func (r goChildRequest) kind() transitionKind {
+ return tGoChild
+}
+
+func (r goChildRequest) process(e *execution) {
+ thread := e.findThread(r.tid)
+ thread.clock[r.tid]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+// goExitRequest is to be called as the last thing inside of the body
+// of a test and any goroutine that the test spawns to inform the
+// testing framework about the termination of a thread. This request
+// implements the Exit() function provided by this package.
+type goExitRequest struct {
+ defaultRequest
+}
+
+func (r goExitRequest) execute(ready chan struct{}, e *execution) {
+ <-ready
+ e.nthreads--
+ delete(e.threads, e.activeTID)
+ close(r.done)
+ close(e.done)
+}
+
+func (r goExitRequest) kind() transitionKind {
+ return tGoExit
+}
+
+func (r goExitRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+// mutexLockRequest is to be called to schedule a mutex lock. This request
+// implements the MutexLock() function provided by this package.
+type mutexLockRequest struct {
+ defaultRequest
+ mutex *sync.Mutex
+}
+
+func (r mutexLockRequest) enabled(ctx *context) bool {
+ m, ok := ctx.mutexes[r.mutex]
+ return !ok || m.free()
+}
+
+func (r mutexLockRequest) execute(ready chan struct{}, e *execution) {
+ <-ready
+ thread := e.findThread(e.activeTID)
+ m, ok := e.ctx.mutexes[r.mutex]
+ if !ok {
+ m = newFakeMutex(thread.clock)
+ e.ctx.mutexes[r.mutex] = m
+ }
+ thread.clock.merge(m.clock)
+ m.clock.merge(thread.clock)
+ m.lock()
+ close(r.done)
+ close(e.done)
+}
+
+func (r mutexLockRequest) kind() transitionKind {
+ return tMutexLock
+}
+
+func (r mutexLockRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+func (r mutexLockRequest) readSet() resourceSet {
+ set := newResourceSet()
+ set[r.mutex] = struct{}{}
+ return set
+}
+
+func (r mutexLockRequest) writeSet() resourceSet {
+ set := newResourceSet()
+ set[r.mutex] = struct{}{}
+ return set
+}
+
+// mutexUnlockRequest is to be called to schedule a mutex unlock. This
+// request implements the MutexUnlock() function provided by this
+// package.
+type mutexUnlockRequest struct {
+ defaultRequest
+ mutex *sync.Mutex
+}
+
+func (r mutexUnlockRequest) enabled(ctx *context) bool {
+ m, ok := ctx.mutexes[r.mutex]
+ if !ok {
+ panic("Mutex does not exist.")
+ }
+ return m.locked()
+}
+
+func (r mutexUnlockRequest) execute(ready chan struct{}, e *execution) {
+ <-ready
+ m, ok := e.ctx.mutexes[r.mutex]
+ if !ok {
+ panic("Mutex not found.")
+ }
+ thread := e.findThread(e.activeTID)
+ thread.clock.merge(m.clock)
+ m.clock.merge(thread.clock)
+ m.unlock()
+ close(r.done)
+ close(e.done)
+}
+
+func (r mutexUnlockRequest) kind() transitionKind {
+ return tMutexUnlock
+}
+
+func (r mutexUnlockRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+func (r mutexUnlockRequest) readSet() resourceSet {
+ set := newResourceSet()
+ set[r.mutex] = struct{}{}
+ return set
+}
+
+func (r mutexUnlockRequest) writeSet() resourceSet {
+ set := newResourceSet()
+ set[r.mutex] = struct{}{}
+ return set
+}
+
+// rwMutexLockRequest is to be called to schedule a read-write mutex
+// lock. This request implements the RWMutexLock() function provided
+// by this package.
+type rwMutexLockRequest struct {
+ defaultRequest
+ read bool
+ rwMutex *sync.RWMutex
+}
+
+func (r rwMutexLockRequest) enabled(ctx *context) bool {
+ rw, ok := ctx.rwMutexes[r.rwMutex]
+ if r.read {
+ return !ok || rw.free() || rw.shared()
+ } else {
+ return !ok || rw.free()
+ }
+}
+
+func (r rwMutexLockRequest) execute(ready chan struct{}, e *execution) {
+ <-ready
+ thread := e.findThread(e.activeTID)
+ rw, ok := e.ctx.rwMutexes[r.rwMutex]
+ if !ok {
+ rw = newFakeRWMutex(thread.clock)
+ e.ctx.rwMutexes[r.rwMutex] = rw
+ }
+ thread.clock.merge(rw.clock)
+ rw.clock.merge(thread.clock)
+ rw.lock(r.read)
+ close(r.done)
+ close(e.done)
+}
+
+func (r rwMutexLockRequest) kind() transitionKind {
+ if r.read {
+ return tRWMutexRLock
+ } else {
+ return tRWMutexLock
+ }
+}
+
+func (r rwMutexLockRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+func (r rwMutexLockRequest) readSet() resourceSet {
+ set := newResourceSet()
+ set[r.rwMutex] = struct{}{}
+ return set
+}
+
+func (r rwMutexLockRequest) writeSet() resourceSet {
+ set := newResourceSet()
+ set[r.rwMutex] = struct{}{}
+ return set
+}
+
+// rwMutexUnlockRequest is to be called to schedule a read-write mutex
+// unlock. This request implements the RWMutexUnlock() function
+// provided by this package.
+type rwMutexUnlockRequest struct {
+ defaultRequest
+ read bool
+ rwMutex *sync.RWMutex
+}
+
+func (r rwMutexUnlockRequest) enabled(ctx *context) bool {
+ rw, ok := ctx.rwMutexes[r.rwMutex]
+ if !ok {
+ panic("Read-write mutex does not exist.")
+ }
+ if r.read {
+ return rw.shared()
+ } else {
+ return rw.exclusive()
+ }
+}
+
+func (r rwMutexUnlockRequest) execute(ready chan struct{}, e *execution) {
+ <-ready
+ rw, ok := e.ctx.rwMutexes[r.rwMutex]
+ if !ok {
+ panic("Read-write mutex not found.")
+ }
+ thread := e.findThread(e.activeTID)
+ thread.clock.merge(rw.clock)
+ rw.clock.merge(thread.clock)
+ rw.unlock(r.read)
+ close(r.done)
+ close(e.done)
+}
+
+func (r rwMutexUnlockRequest) kind() transitionKind {
+ if r.read {
+ return tRWMutexRUnlock
+ } else {
+ return tRWMutexUnlock
+ }
+}
+
+func (r rwMutexUnlockRequest) process(e *execution) {
+ thread := e.findThread(e.activeTID)
+ thread.clock[e.activeTID]++
+ ready := make(chan struct{})
+ thread.ready = ready
+ thread.request = r
+ go r.execute(ready, e)
+}
+
+func (r rwMutexUnlockRequest) readSet() resourceSet {
+ set := newResourceSet()
+ set[r.rwMutex] = struct{}{}
+ return set
+}
+
+func (r rwMutexUnlockRequest) writeSet() resourceSet {
+ set := newResourceSet()
+ set[r.rwMutex] = struct{}{}
+ return set
+}
diff --git a/profiles/internal/testing/concurrency/resource.go b/profiles/internal/testing/concurrency/resource.go
new file mode 100644
index 0000000..fc00e69
--- /dev/null
+++ b/profiles/internal/testing/concurrency/resource.go
@@ -0,0 +1,12 @@
+package concurrency
+
+// resourceKey represents an identifier of an abstract resource.
+type resourceKey interface{}
+
+// resourceSet represents a set of abstract resources.
+type resourceSet map[resourceKey]struct{}
+
+// newResourceSet if the resourceSet factory.
+func newResourceSet() resourceSet {
+ return make(resourceSet)
+}
diff --git a/profiles/internal/testing/concurrency/rwmutex_test.go b/profiles/internal/testing/concurrency/rwmutex_test.go
new file mode 100644
index 0000000..41d56cb
--- /dev/null
+++ b/profiles/internal/testing/concurrency/rwmutex_test.go
@@ -0,0 +1,246 @@
+// concurrency_test is a simple test of the framework for systematic
+// testing of concurrency.
+package concurrency_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "v.io/x/lib/vlog"
+ "v.io/x/ref/profiles/internal/testing/concurrency"
+ "v.io/x/ref/profiles/internal/testing/concurrency/sync"
+)
+
+var rw sync.RWMutex
+
+// createRWMutexSets returns sets of thread identifiers that match the
+// logic of rwMutexThreadClosure.
+func createRWMutexSets(n int) (map[int]bool, map[int]bool, map[int]bool, map[int]bool, map[int]bool) {
+ locks := make(map[int]bool)
+ rlocks := make(map[int]bool)
+ runlocks := make(map[int]bool)
+ llocks := make(map[int]bool)
+ lunlocks := make(map[int]bool)
+ for i := 1; i <= n; i++ {
+ switch i % 3 {
+ case 0:
+ locks[i] = true
+ case 1:
+ rlocks[i] = true
+ runlocks[i] = true
+ case 2:
+ llocks[i] = true
+ lunlocks[i] = true
+ }
+ }
+ return locks, rlocks, runlocks, llocks, lunlocks
+}
+
+// generateRWMutexOutputs generates all legal outputs of sequencing calls to
+// rw.Lock(), rw.Unlock(), rw.RLock(), rw.RUnlock(),
+// rw.RLocker().Lock(), and rw.RLocker().Unlock(). The inputs identify
+// the threads that wish to invoke these functions.
+func generateRWMutexOutputs(locks, rlocks, runlocks, llocks, lunlocks map[int]bool) []string {
+ if length(locks) == 0 && length(rlocks) == 0 && length(runlocks) == 0 && length(llocks) == 0 && length(lunlocks) == 0 {
+ // Base case.
+ return []string{""}
+ }
+ result := make([]string, 0)
+ if length(rlocks) == length(runlocks) && length(llocks) == length(lunlocks) {
+ // rw.Lock() + rw.Unlock() can happen next if the previous calls
+ // to rw.RLock(), rw.RUnlock, rw.RLocker().Lock(), and
+ // rw.RLocker.Unlock() are balanced.
+ for lock, ok := range locks {
+ if ok {
+ locks[lock] = false
+ for _, s := range generateRWMutexOutputs(locks, rlocks, runlocks, llocks, lunlocks) {
+ result = append(result, fmt.Sprintf("%d:Lock()%d:Unlock()%s", lock, lock, s))
+ }
+ locks[lock] = true
+ }
+ }
+ }
+ for rlock, ok := range rlocks {
+ if ok {
+ // rw.RLock() can happen next any time.
+ rlocks[rlock] = false
+ for _, s := range generateRWMutexOutputs(locks, rlocks, runlocks, llocks, lunlocks) {
+ result = append(result, fmt.Sprintf("%d:RLock()%s", rlock, s))
+ }
+ rlocks[rlock] = true
+ }
+ }
+ for runlock, ok := range runlocks {
+ if ok {
+ if ok := rlocks[runlock]; !ok {
+ // rw.RUnlock() can happen next as long as the same thread
+ // already invoked rw.RLock().
+ runlocks[runlock] = false
+ for _, s := range generateRWMutexOutputs(locks, rlocks, runlocks, llocks, lunlocks) {
+ result = append(result, fmt.Sprintf("%d:RUnlock()%s", runlock, s))
+ }
+ runlocks[runlock] = true
+ }
+ }
+ }
+ for llock, ok := range llocks {
+ if ok {
+ // rw.RLocker().Lock() can happen next any time.
+ llocks[llock] = false
+ for _, s := range generateRWMutexOutputs(locks, rlocks, runlocks, llocks, lunlocks) {
+ result = append(result, fmt.Sprintf("%d:RLocker().Lock()%s", llock, s))
+ }
+ llocks[llock] = true
+ }
+ }
+ for lunlock, ok := range lunlocks {
+ if ok {
+ if ok := llocks[lunlock]; !ok {
+ // rw.RLocker().Unlock() can happen next as long as the same thread
+ // already invoked rw.RLocker().Lock().
+ lunlocks[lunlock] = false
+ for _, s := range generateRWMutexOutputs(locks, rlocks, runlocks, llocks, lunlocks) {
+ result = append(result, fmt.Sprintf("%d:RLocker().Unlock()%s", lunlock, s))
+ }
+ lunlocks[lunlock] = true
+ }
+ }
+ }
+ return result
+}
+
+// rwMutexThreadClosure folds the input arguments inside of the
+// function body as the testing framework only supports functions with
+// no arguments.
+func rwMutexThreadClosure(t *testing.T, n, max int, out *os.File) func() {
+ return func() {
+ defer concurrency.Exit()
+ if n < max {
+ child := rwMutexThreadClosure(t, n+1, max, out)
+ concurrency.Start(child)
+ }
+ switch n % 3 {
+ case 0:
+ rw.Lock()
+ fmt.Fprintf(out, "%d:Lock()", n)
+ case 1:
+ rw.RLock()
+ fmt.Fprintf(out, "%d:RLock()", n)
+ case 2:
+ rw.RLocker().Lock()
+ fmt.Fprintf(out, "%d:RLocker().Lock()", n)
+ }
+ switch n % 3 {
+ case 0:
+ rw.Unlock()
+ fmt.Fprintf(out, "%d:Unlock()", n)
+ case 1:
+ rw.RUnlock()
+ fmt.Fprintf(out, "%d:RUnlock()", n)
+ case 2:
+ rw.RLocker().Unlock()
+ fmt.Fprintf(out, "%d:RLocker().Unlock()", n)
+ }
+ }
+}
+
+// TestRWMutex runs rwMutexThreadClosure() without systematically
+// testing concurrency.
+func TestRWMutex(t *testing.T) {
+ for n := 2; n < 5; n++ {
+ thread := rwMutexThreadClosure(t, 1, n, nil)
+ thread()
+ }
+}
+
+// TestRWMutexExplore runs rwMutexThreadClosure() using the framework
+// for systematic testing of concurrency, checking that the
+// exploration explores the correct number of interleavings.
+func TestRWMutexExplore(t *testing.T) {
+ for n := 2; n < 5; n++ {
+ out, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile() failed: %v", err)
+ }
+ defer os.Remove(out.Name())
+ defer out.Close()
+ body := rwMutexThreadClosure(t, 1, n, out)
+ tester := concurrency.Init(setup, body, cleanupClosure(out))
+ defer concurrency.Finish()
+ niterations, err := tester.Explore()
+ if err != nil {
+ t.Fatalf("Unexpected error encountered: %v", err)
+ }
+ outputs := processOutput(t, out)
+ expectedOutputs := generateRWMutexOutputs(createRWMutexSets(n))
+ checkExpectedOutputs(t, outputs, expectedOutputs)
+ checkUnexpectedOutputs(t, outputs, expectedOutputs)
+ vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ }
+}
+
+// TestRWMutexExploreN runs rwMutexThreadClosure() using the framework
+// for systematic testing of concurrency, checking that the
+// exploration explores at most the given number of interleavings.
+func TestRWMutexExploreN(t *testing.T) {
+ stopAfter := 100
+ for n := 2; n < 5; n++ {
+ out, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile() failed: %v", err)
+ }
+ defer os.Remove(out.Name())
+ defer out.Close()
+ body := rwMutexThreadClosure(t, 1, n, out)
+ tester := concurrency.Init(setup, body, cleanupClosure(out))
+ defer concurrency.Finish()
+ niterations, err := tester.ExploreN(stopAfter)
+ if err != nil {
+ t.Fatalf("Unexpected error encountered: %v", err)
+ }
+ outputs := processOutput(t, out)
+ expectedOutputs := generateRWMutexOutputs(createRWMutexSets(n))
+ checkUnexpectedOutputs(t, outputs, expectedOutputs)
+ if niterations < stopAfter {
+ checkExpectedOutputs(t, outputs, expectedOutputs)
+ }
+ if niterations > stopAfter {
+ t.Fatalf("Unexpected number of iterations: expected at most %v, got %v", stopAfter, niterations)
+ }
+ vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ }
+}
+
+// TestRWMutexExploreFor runs rwMutexThreadClosure() using the
+// framework for systematic testing of concurrency, checking that the
+// exploration respects the given "soft" deadline.
+func TestRWMutexExploreFor(t *testing.T) {
+ deadline := 10 * time.Millisecond
+ for n := 2; n < 5; n++ {
+ out, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("TempFile() failed: %v", err)
+ }
+ defer os.Remove(out.Name())
+ defer out.Close()
+ body := rwMutexThreadClosure(t, 1, n, out)
+ tester := concurrency.Init(setup, body, cleanupClosure(out))
+ defer concurrency.Finish()
+ start := time.Now()
+ niterations, err := tester.ExploreFor(deadline)
+ if err != nil {
+ t.Fatalf("Unexpected error encountered: %v", err)
+ }
+ end := time.Now()
+ outputs := processOutput(t, out)
+ expectedOutputs := generateRWMutexOutputs(createRWMutexSets(n))
+ checkUnexpectedOutputs(t, outputs, expectedOutputs)
+ if start.Add(deadline).After(end) {
+ checkExpectedOutputs(t, outputs, expectedOutputs)
+ }
+ vlog.VI(1).Infof("Explored %v iterations.", niterations)
+ }
+}
diff --git a/profiles/internal/testing/concurrency/stack.go b/profiles/internal/testing/concurrency/stack.go
new file mode 100644
index 0000000..42faef3
--- /dev/null
+++ b/profiles/internal/testing/concurrency/stack.go
@@ -0,0 +1,60 @@
+package concurrency
+
+import (
+ "errors"
+ "sort"
+)
+
+// stack is an implementation of the stack data structure.
+type stack struct {
+ contents []*state
+}
+
+// IncreasingDepth is used to sort states in the increasing order of
+// their depth.
+type IncreasingDepth []*state
+
+// SORT INTERFACE IMPLEMENTATION
+
+func (states IncreasingDepth) Len() int {
+ return len(states)
+}
+func (states IncreasingDepth) Less(i, j int) bool {
+ return states[i].depth < states[j].depth
+}
+func (states IncreasingDepth) Swap(i, j int) {
+ states[i], states[j] = states[j], states[i]
+}
+
+// Empty checks if the stack is empty.
+func (s *stack) Empty() bool {
+ return len(s.contents) == 0
+}
+
+// Length returns the length of the stack.
+func (s *stack) Length() int {
+ return len(s.contents)
+}
+
+// Pop removes and returns the top element of the stack. If the stack
+// is empty, an error is returned.
+func (s *stack) Pop() (*state, error) {
+ l := len(s.contents)
+ if l > 0 {
+ x := s.contents[l-1]
+ s.contents = s.contents[:l-1]
+ return x, nil
+ }
+ return nil, errors.New("Stack is empty.")
+}
+
+// Push adds a new element to the top of the stack.
+func (s *stack) Push(value *state) {
+ s.contents = append(s.contents, value)
+}
+
+// Sort sorts the elements of the stack in the decreasing order of
+// their depth.
+func (s *stack) Sort() {
+ sort.Sort(IncreasingDepth(s.contents))
+}
diff --git a/profiles/internal/testing/concurrency/state.go b/profiles/internal/testing/concurrency/state.go
new file mode 100644
index 0000000..f4969a8
--- /dev/null
+++ b/profiles/internal/testing/concurrency/state.go
@@ -0,0 +1,242 @@
+package concurrency
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// state records a state of the exploration of the space of all
+// possible program states a concurrent test can encounter. The states
+// of the exploration are organized using a tree data structure
+// referred to as the "execution tree". Nodes of the execution tree
+// represent different abstract program states and edges represent the
+// different scheduling decisions that can be made at those states. A
+// path from the root to a leaf thus uniquely represents an execution
+// as a serialization of the scheduling decisions along the execution.
+type state struct {
+ // depth identifies the depth of this node in the execution tree.
+ depth int
+ // parent records the pointer to the parent abstract state.
+ parent *state
+ // children maps threads to the abstract states that correspond to
+ // executing the next transitions of that thread.
+ children map[TID]*state
+ // explored records whether the subtree rooted at this state has
+ // been fully explored.
+ explored bool
+ // seeded records whether the state has been used to seed an
+ // exploration strategy.
+ seeded bool
+ // visited records whether the state has been visited
+ visited bool
+ // tid records the thread identifier of the thread whose transition
+ // lead to this state.
+ tid TID
+ // kind records the type of the transition that lead to this state.
+ kind transitionKind
+ // clock records the logical time of this state.
+ clock clock
+ // enabled records whether the transition that leads to this state
+ // is enabled.
+ enabled bool
+ // readSet records the identifiers of the abstract resources read by
+ // the transition that lead to this state.
+ readSet resourceSet
+ // writeSet records the identifiers of the abstract resources
+ // written by the transition that lead to this state.
+ writeSet resourceSet
+}
+
+// newState is the state factory.
+func newState(parent *state, tid TID) *state {
+ depth := 0
+ if parent != nil {
+ depth = parent.depth + 1
+ }
+ return &state{
+ depth: depth,
+ children: make(map[TID]*state),
+ parent: parent,
+ tid: tid,
+ readSet: newResourceSet(),
+ writeSet: newResourceSet(),
+ clock: newClock(),
+ }
+}
+
+// addBranch adds a branch described through the given sequence of
+// choices to the execution tree rooted at this state.
+func (s *state) addBranch(branch []*choice, seeds *stack) error {
+ if len(branch) == 0 {
+ s.explored = true
+ return nil
+ }
+ choice := branch[0]
+ if len(s.children) != 0 {
+ // Check for the absence of divergence.
+ if err := s.checkDivergence(choice.transitions); err != nil {
+ return err
+ }
+ } else {
+ // Add new children.
+ for _, transition := range choice.transitions {
+ child := newState(s, transition.tid)
+ child.clock = transition.clock
+ child.enabled = transition.enabled
+ child.kind = transition.kind
+ child.readSet = transition.readSet
+ child.writeSet = transition.writeSet
+ s.children[child.tid] = child
+ }
+ }
+ next, found := s.children[choice.next]
+ if !found {
+ return errors.New(fmt.Sprintf("invalid choice (no transition fo thread %d).", choice.next))
+ }
+ next.visited = true
+ branch = branch[1:]
+ if err := next.addBranch(branch, seeds); err != nil {
+ return err
+ }
+ s.collectSeeds(choice.next, seeds)
+ s.compactTree()
+ return nil
+}
+
+// approach identifies and enforces exploration of transition(s)
+// originating from this state that near execution of the transition
+// leading to the given state.
+func (s *state) approach(other *state, seeds *stack) {
+ candidates := make([]TID, 0)
+ for tid, child := range s.children {
+ if child.enabled && child.happensBefore(other) {
+ candidates = append(candidates, tid)
+ }
+ }
+ if len(candidates) == 0 {
+ for _, child := range s.children {
+ if child.enabled && !child.seeded && !child.visited {
+ seeds.Push(child)
+ child.seeded = true
+ }
+ }
+ } else {
+ for _, tid := range candidates {
+ child := s.children[tid]
+ if child.seeded || child.visited {
+ return
+ }
+ }
+ tid := candidates[0]
+ child := s.children[tid]
+ seeds.Push(child)
+ child.seeded = true
+ }
+}
+
+// checkDivergence checks whether the given transition matches the
+// transition identified by the given thread identifier.
+func (s *state) checkDivergence(transitions map[TID]*transition) error {
+ if len(s.children) != len(transitions) {
+ return errors.New(fmt.Sprintf("divergence encountered (expected %v, got %v)", len(s.children), len(transitions)))
+ }
+ for tid, t := range transitions {
+ child, found := s.children[tid]
+ if !found {
+ return errors.New(fmt.Sprintf("divergence encountered (no transition for thread %d)", tid))
+ }
+ if child.enabled != t.enabled {
+ return errors.New(fmt.Sprintf("divergence encountered (expected %v, got %v)", child.enabled, t.enabled))
+ }
+ if !child.clock.equals(t.clock) {
+ return errors.New(fmt.Sprintf("divergence encountered (expected %v, got %v)", child.clock, t.clock))
+ }
+ if !reflect.DeepEqual(child.readSet, t.readSet) {
+ return errors.New(fmt.Sprintf("divergence encountered (expected %v, got %v)", child.readSet, t.readSet))
+ }
+ if !reflect.DeepEqual(child.writeSet, t.writeSet) {
+ return errors.New(fmt.Sprintf("divergence encountered (expected %v, got %v)", child.writeSet, t.writeSet))
+ }
+ }
+ return nil
+}
+
+// collectSeeds uses dynamic partial order reduction
+// (http://users.soe.ucsc.edu/~cormac/papers/popl05.pdf) to identify
+// which alternative scheduling choices should be explored.
+func (s *state) collectSeeds(next TID, seeds *stack) {
+ for tid, child := range s.children {
+ if tid != next && !s.mayInterfereWith(child) && !s.happensBefore(child) {
+ continue
+ }
+ for handle := s; handle.parent != nil; handle = handle.parent {
+ if handle.mayInterfereWith(child) && !handle.happensBefore(child) {
+ handle.parent.approach(child, seeds)
+ }
+ }
+ }
+}
+
+// compactTree updates the exploration status of this state and
+// deallocates its children map if all of its children are explored.
+func (s *state) compactTree() {
+ for _, child := range s.children {
+ if (child.seeded || child.visited) && !child.explored {
+ return
+ }
+ }
+ s.children = nil
+ s.explored = true
+}
+
+// generateStrategy generates a sequence of scheduling decisions that
+// will steer an execution towards the state of the execution tree.
+func (s *state) generateStrategy() []TID {
+ if s.parent == nil {
+ return make([]TID, 0)
+ }
+ return append(s.parent.generateStrategy(), s.tid)
+}
+
+// happensBefore checks if the logical time of the transition that
+// leads to this state happened before (in the sense of Lamport's
+// happens-before relation) the logical time of the transition that
+// leads to the given state.
+func (s *state) happensBefore(other *state) bool {
+ return s.clock.happensBefore(other.clock)
+}
+
+// mayInterfereWith checks if the execution of the transition that
+// leads to this state may interfere with the execution of the
+// transition that leads to the given state.
+func (s *state) mayInterfereWith(other *state) bool {
+ if (s.kind == tMutexLock && other.kind == tMutexUnlock) ||
+ (s.kind == tMutexUnlock && other.kind == tMutexLock) {
+ return false
+ }
+ if (s.kind == tRWMutexLock && other.kind == tRWMutexUnlock) ||
+ (s.kind == tRWMutexUnlock && other.kind == tRWMutexLock) {
+ return false
+ }
+ if (s.kind == tRWMutexRLock && other.kind == tRWMutexUnlock) ||
+ (s.kind == tRWMutexRUnlock && other.kind == tRWMutexLock) {
+ return false
+ }
+ for k, _ := range s.readSet {
+ if _, found := other.writeSet[k]; found {
+ return true
+ }
+ }
+ for k, _ := range s.writeSet {
+ if _, found := other.readSet[k]; found {
+ return true
+ }
+ }
+ for k, _ := range s.writeSet {
+ if _, found := other.writeSet[k]; found {
+ return true
+ }
+ }
+ return false
+}
diff --git a/profiles/internal/testing/concurrency/state_test.go b/profiles/internal/testing/concurrency/state_test.go
new file mode 100644
index 0000000..acc6315
--- /dev/null
+++ b/profiles/internal/testing/concurrency/state_test.go
@@ -0,0 +1,218 @@
+package concurrency
+
+import (
+ "testing"
+)
+
+// createsBranch creates a branch of a simple execution tree used for
+// testing the state implementation. This branch emulates an execution
+// in which two threads compete for a mutex.
+func createBranch(i, j TID) []*choice {
+ choices := make([]*choice, 0)
+ set := resourceSet{"mutex": struct{}{}}
+
+ {
+ c := newChoice()
+ c.next = i
+ ti := &transition{
+ tid: i,
+ clock: newClock(),
+ enabled: true,
+ kind: tMutexLock,
+ readSet: set,
+ writeSet: set,
+ }
+ ti.clock[i] = 1
+ c.transitions[i] = ti
+ tj := &transition{
+ tid: j,
+ clock: newClock(),
+ enabled: true,
+ kind: tMutexLock,
+ readSet: set,
+ writeSet: set,
+ }
+ tj.clock[j] = 1
+ c.transitions[j] = tj
+ choices = append(choices, c)
+ }
+
+ {
+ c := newChoice()
+ c.next = i
+ ti := &transition{
+ tid: i,
+ clock: newClock(),
+ enabled: true,
+ kind: tMutexUnlock,
+ readSet: set,
+ writeSet: set,
+ }
+ ti.clock[i] = 2
+ c.transitions[i] = ti
+ tj := &transition{
+ tid: j,
+ clock: newClock(),
+ enabled: false,
+ kind: tMutexLock,
+ readSet: set,
+ writeSet: set,
+ }
+ tj.clock[j] = 1
+ c.transitions[j] = tj
+ choices = append(choices, c)
+ }
+
+ {
+ c := newChoice()
+ c.next = j
+ tj := &transition{
+ tid: j,
+ clock: newClock(),
+ enabled: true,
+ kind: tMutexLock,
+ readSet: set,
+ writeSet: set,
+ }
+ tj.clock[j] = 1
+ c.transitions[j] = tj
+ choices = append(choices, c)
+ }
+
+ {
+ c := newChoice()
+ c.next = j
+ tj := &transition{
+ tid: j,
+ clock: newClock(),
+ enabled: true,
+ kind: tMutexUnlock,
+ readSet: set,
+ writeSet: set,
+ }
+ tj.clock[i] = 2
+ tj.clock[j] = 2
+ c.transitions[j] = tj
+ choices = append(choices, c)
+ }
+
+ return choices
+}
+
+// TestCommon checks common operation of the state implementation.
+func TestCommon(t *testing.T) {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ // Check a new exploration seed has been identified.
+ if seeds.Length() != 1 {
+ t.Fatal("Unexpected number of seeds: expected %v, got %v", 1, seeds.Length())
+ }
+ if err := root.addBranch(rightBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ // Check no new exploration seeds have been identified.
+ if seeds.Length() != 1 {
+ t.Fatalf("Unexpected number of seeds: expected %v, got %v", 1, seeds.Length())
+ }
+ // Check exploration status have been correctly updated.
+ if !root.explored {
+ t.Fatalf("Unexpected exploration status: expected %v, got %v", true, root.explored)
+ }
+ // Check compation of explored children subtrees.
+ if len(root.children) != 0 {
+ t.Fatalf("Unexpected number of children: expected %v, got %v", true, root.explored)
+ }
+}
+
+// TestDivergence checks the various types of execution divergence.
+func TestDivergence(t *testing.T) {
+ // Emulate a missing transition.
+ {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ delete(rightBranch[0].transitions, 0)
+ if err := root.addBranch(rightBranch, seeds); err == nil {
+ t.Fatalf("addBranch() did not fail")
+ }
+ }
+ // Emulate an extra transition.
+ {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ rightBranch[0].transitions[2] = &transition{}
+ if err := root.addBranch(rightBranch, seeds); err == nil {
+ t.Fatalf("addBranch() did not fail")
+ }
+ }
+ // Emulate divergent transition enabledness.
+ {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ rightBranch[0].transitions[0].enabled = false
+ if err := root.addBranch(rightBranch, seeds); err == nil {
+ t.Fatalf("addBranch() did not fail")
+ }
+ }
+ // Emulate divergent transition clock.
+ {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ rightBranch[0].transitions[1].clock[0]++
+ if err := root.addBranch(rightBranch, seeds); err == nil {
+ t.Fatalf("addBranch() did not fail")
+ }
+ }
+ // Emulate divergent transition read set.
+ {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ delete(rightBranch[0].transitions[1].readSet, "mutex")
+ if err := root.addBranch(rightBranch, seeds); err == nil {
+ t.Fatalf("addBranch() did not fail")
+ }
+ }
+ // Emulate divergent transition write set.
+ {
+ leftBranch := createBranch(0, 1)
+ rightBranch := createBranch(1, 0)
+ root := newState(nil, 0)
+ seeds := &stack{}
+ if err := root.addBranch(leftBranch, seeds); err != nil {
+ t.Fatalf("addBranch() failed: %v", err)
+ }
+ delete(rightBranch[0].transitions[1].writeSet, "mutex")
+ if err := root.addBranch(rightBranch, seeds); err == nil {
+ t.Fatalf("addBranch() did not fail")
+ }
+ }
+}
diff --git a/profiles/internal/testing/concurrency/sync/sync.go b/profiles/internal/testing/concurrency/sync/sync.go
new file mode 100644
index 0000000..ec8b0c8
--- /dev/null
+++ b/profiles/internal/testing/concurrency/sync/sync.go
@@ -0,0 +1,86 @@
+package sync
+
+import (
+ "sync"
+
+ "v.io/x/ref/profiles/internal/testing/concurrency"
+)
+
+// Mutex is a wrapper around the Go implementation of Mutex.
+type Mutex struct {
+ m sync.Mutex
+}
+
+// MUTEX INTERFACE IMPLEMENTATION
+
+func (m *Mutex) Lock() {
+ if t := concurrency.T(); t != nil {
+ t.MutexLock(&m.m)
+ } else {
+ m.m.Lock()
+ }
+}
+
+func (m *Mutex) Unlock() {
+ if t := concurrency.T(); t != nil {
+ t.MutexUnlock(&m.m)
+ } else {
+ m.m.Unlock()
+ }
+}
+
+// RWMutex is a wrapper around the Go implementation of RWMutex.
+type RWMutex struct {
+ m sync.RWMutex
+}
+
+// RWMUTEX INTERFACE IMPLEMENTATION
+
+func (m *RWMutex) Lock() {
+ if t := concurrency.T(); t != nil {
+ t.RWMutexLock(&m.m)
+ } else {
+ m.m.Lock()
+ }
+}
+
+func (m *RWMutex) RLock() {
+ if t := concurrency.T(); t != nil {
+ t.RWMutexRLock(&m.m)
+ } else {
+ m.m.RLock()
+ }
+}
+
+func (m *RWMutex) RLocker() sync.Locker {
+ if t := concurrency.T(); t != nil {
+ return (*rlocker)(m)
+ } else {
+ return m.m.RLocker()
+ }
+}
+
+func (m *RWMutex) RUnlock() {
+ if t := concurrency.T(); t != nil {
+ t.RWMutexRUnlock(&m.m)
+ } else {
+ m.m.RUnlock()
+ }
+}
+func (m *RWMutex) Unlock() {
+ if t := concurrency.T(); t != nil {
+ t.RWMutexUnlock(&m.m)
+ } else {
+ m.m.Unlock()
+ }
+}
+
+type rlocker RWMutex
+
+func (r *rlocker) Lock() {
+ (*RWMutex)(r).RLock()
+}
+
+func (r *rlocker) Unlock() {
+ (*RWMutex)(r).RUnlock()
+}
diff --git a/profiles/internal/testing/concurrency/tester.go b/profiles/internal/testing/concurrency/tester.go
new file mode 100644
index 0000000..97e74ea
--- /dev/null
+++ b/profiles/internal/testing/concurrency/tester.go
@@ -0,0 +1,234 @@
+package concurrency
+
+import (
+ "sync"
+ "time"
+)
+
+// globalT stores a pointer to the global instance of the tester.
+var (
+ globalT *Tester = nil
+)
+
+// T returns the global instance of the tester.
+func T() *Tester {
+ return globalT
+}
+
+// Setup sets up a new instance of the tester.
+func Init(setup, body, cleanup func()) *Tester {
+ tree := newState(nil, 0)
+ tree.visited = true
+ seeds := &stack{}
+ seeds.Push(tree)
+ tree.seeded = true
+ globalT = &Tester{
+ tree: tree,
+ seeds: seeds,
+ setup: setup,
+ body: body,
+ cleanup: cleanup,
+ }
+ return globalT
+}
+
+// Cleanup destroys the existing instance of the tester.
+func Finish() {
+ globalT = nil
+}
+
+// Tester represents an instance of the systematic test.
+type Tester struct {
+ // enabled records whether the tester is to be used or not.
+ enabled bool
+ // execution represents the currently explored execution.
+ execution *execution
+ // tree represents the current state of the exploration of the space
+ // of all possible interleavings of concurrent transitions.
+ tree *state
+ // seeds is records the collection of scheduling alternatives to be
+ // explored in the future.
+ seeds *stack
+ // setup is a function that is executed before an instance of the
+ // test is started. It is assumed to always produce the same initial
+ // state.
+ setup func()
+ // body is a function that implements the body of the test.
+ body func()
+ // cleanup is a function that is executed after an instance of a
+ // test instance terminates.
+ cleanup func()
+}
+
+// Explore explores the space of possible test schedules until the
+// state space is fully exhausted.
+func (t *Tester) Explore() (int, error) {
+ return t.explore(0, 0)
+}
+
+// ExploreFor explores the space of possible test schedules until the
+// state space is fully exhausted or the given duration elapses,
+// whichever occurs first.
+func (t *Tester) ExploreFor(d time.Duration) (int, error) {
+ return t.explore(0, d)
+}
+
+// ExploreN explores the space of possible test schedules until the
+// state space is fully exhausted or the given number of schedules is
+// explored, whichever occurs first.
+func (t *Tester) ExploreN(n int) (int, error) {
+ return t.explore(n, 0)
+}
+
+// MutexLock implements the logic related to modeling and scheduling
+// an execution of "m.Lock()".
+func (t *Tester) MutexLock(m *sync.Mutex) {
+ if t.enabled {
+ done := make(chan struct{})
+ request := mutexLockRequest{defaultRequest{done: done}, m}
+ t.execution.requests <- request
+ <-done
+ } else {
+ m.Lock()
+ }
+}
+
+// MutexUnlock implements the logic related to modeling and scheduling
+// an execution of "m.Unlock()".
+func (t *Tester) MutexUnlock(m *sync.Mutex) {
+ if t.enabled {
+ done := make(chan struct{})
+ request := mutexUnlockRequest{defaultRequest{done: done}, m}
+ t.execution.requests <- request
+ <-done
+ } else {
+ m.Unlock()
+ }
+}
+
+// RWMutexLock implements the logic related to modeling and scheduling
+// an execution of "rw.Lock()".
+func (t *Tester) RWMutexLock(rw *sync.RWMutex) {
+ if t.enabled {
+ done := make(chan struct{})
+ request := rwMutexLockRequest{defaultRequest{done: done}, false, rw}
+ t.execution.requests <- request
+ <-done
+ } else {
+ rw.Lock()
+ }
+}
+
+// RWMutexRLock implements the logic related to modeling and
+// scheduling an execution of "rw.RLock()".
+func (t *Tester) RWMutexRLock(rw *sync.RWMutex) {
+ if t.enabled {
+ done := make(chan struct{})
+ request := rwMutexLockRequest{defaultRequest{done: done}, true, rw}
+ t.execution.requests <- request
+ <-done
+ } else {
+ rw.RLock()
+ }
+}
+
+// RWMutexRUnlock implements the logic related to modeling and
+// scheduling an execution of "rw.RUnlock()".
+func (t *Tester) RWMutexRUnlock(rw *sync.RWMutex) {
+ if t.enabled {
+ done := make(chan struct{})
+ request := rwMutexUnlockRequest{defaultRequest{done: done}, true, rw}
+ t.execution.requests <- request
+ <-done
+ } else {
+ rw.RUnlock()
+ }
+}
+
+// RWMutexUnlock implements the logic related to modeling and
+// scheduling an execution of "rw.Unlock()".
+func (t *Tester) RWMutexUnlock(rw *sync.RWMutex) {
+ if t.enabled {
+ done := make(chan struct{})
+ request := rwMutexUnlockRequest{defaultRequest{done: done}, false, rw}
+ t.execution.requests <- request
+ <-done
+ } else {
+ rw.Unlock()
+ }
+}
+
+// Start implements the logic related to modeling and scheduling an
+// execution of "go fn()".
+func Start(fn func()) {
+ t := globalT
+ if t != nil && t.enabled {
+ done1 := make(chan struct{})
+ reply := make(chan TID)
+ request1 := goRequest{defaultRequest{done: done1}, reply}
+ t.execution.requests <- request1
+ tid := <-reply
+ <-done1
+ go t.startHelper(tid, fn)
+ done2 := make(chan struct{})
+ request2 := goParentRequest{defaultRequest{done: done2}}
+ t.execution.requests <- request2
+ <-done2
+ } else {
+ fn()
+ }
+}
+
+// Exit implements the logic related to modeling and scheduling thread
+// termination.
+func Exit() {
+ t := globalT
+ if t != nil && t.enabled {
+ done := make(chan struct{})
+ request := goExitRequest{defaultRequest{done: done}}
+ t.execution.requests <- request
+ <-done
+ }
+}
+
+// startHelper is a wrapper used by the implementation of Start() to
+// make sure the child thread is registered with the correct
+// identifier.
+func (t *Tester) startHelper(tid TID, fn func()) {
+ done := make(chan struct{})
+ request := goChildRequest{defaultRequest{done: done}, tid}
+ t.execution.requests <- request
+ <-done
+ fn()
+}
+
+func (t *Tester) explore(n int, d time.Duration) (int, error) {
+ niterations := 0
+ start := time.Now()
+ for !t.seeds.Empty() &&
+ (n == 0 || niterations < n) &&
+ (d == 0 || time.Since(start) < d) {
+ t.setup()
+ seed, err := t.seeds.Pop()
+ if err != nil {
+ panic("Corrupted stack.\n")
+ }
+ strategy := seed.generateStrategy()
+ t.execution = newExecution(strategy)
+ t.enabled = true
+ if err := t.tree.addBranch(t.execution.Run(t.body), t.seeds); err != nil {
+ t.enabled = false
+ return niterations, err
+ }
+ t.enabled = false
+ // Sort the seeds because dynamic partial order reduction might
+ // have added elements that violate the depth-first ordering of
+ // seeds. The depth-first ordering is used for space-efficient
+ // (O(d) where d is the depth of the execution tree) exploration
+ // of the execution tree.
+ t.seeds.Sort()
+ t.cleanup()
+ niterations++
+ }
+ return niterations, nil
+}
diff --git a/profiles/internal/testing/concurrency/thread.go b/profiles/internal/testing/concurrency/thread.go
new file mode 100644
index 0000000..ce162c5
--- /dev/null
+++ b/profiles/internal/testing/concurrency/thread.go
@@ -0,0 +1,89 @@
+package concurrency
+
+import (
+ "fmt"
+)
+
+// TID is the thread identifier type.
+type TID int
+
+// Increasing is used to sort thread identifiers in an increasing order.
+type IncreasingTID []TID
+
+// SORT INTERFACE IMPLEMENTATION
+
+func (tids IncreasingTID) Len() int {
+ return len(tids)
+}
+func (tids IncreasingTID) Less(i, j int) bool {
+ return tids[i] < tids[j]
+}
+func (tids IncreasingTID) Swap(i, j int) {
+ tids[i], tids[j] = tids[j], tids[i]
+}
+
+// TIDGenerator is used for generating unique thread identifiers.
+func TIDGenerator() func() TID {
+ var n int = 0
+ return func() TID {
+ n++
+ return TID(n)
+ }
+}
+
+// thread records the abstract state of a thread during an execution
+// of the test.
+type thread struct {
+ // tid is the thread identifier.
+ tid TID
+ // clock is a vector clock that keeps track of the logical time of
+ // this thread.
+ clock clock
+ // ready is a channel that can be used to schedule execution of the
+ // thread.
+ ready chan struct{}
+ // req holds the current scheduling request of the thread.
+ request request
+}
+
+// newThread is the thread factory.
+func newThread(tid TID, clock clock) *thread {
+ return &thread{
+ tid: tid,
+ clock: clock.clone(),
+ }
+}
+
+// enabled checks if the thread can be scheduled given the current
+// execution context.
+func (t *thread) enabled(ctx *context) bool {
+ if t.request == nil {
+ panic(fmt.Sprintf("Thread %v has no request.", t.tid))
+ }
+ return t.request.enabled(ctx)
+}
+
+// kind returns the kind of the thread transition.
+func (t *thread) kind() transitionKind {
+ if t.request == nil {
+ panic(fmt.Sprintf("Thread %v has no request.", t.tid))
+ }
+ return t.request.kind()
+}
+
+// readSet returns the set of abstract resources read by the thread.
+func (t *thread) readSet() resourceSet {
+ if t.request == nil {
+ panic(fmt.Sprintf("Thread %v has no request.", t.tid))
+ }
+ return t.request.readSet()
+}
+
+// writeSet returns the set of abstract resources written by the
+// thread.
+func (t *thread) writeSet() resourceSet {
+ if t.request == nil {
+ panic(fmt.Sprintf("Thread %v has no request.", t.tid))
+ }
+ return t.request.writeSet()
+}
diff --git a/profiles/internal/testing/concurrency/transition.go b/profiles/internal/testing/concurrency/transition.go
new file mode 100644
index 0000000..547837a
--- /dev/null
+++ b/profiles/internal/testing/concurrency/transition.go
@@ -0,0 +1,37 @@
+package concurrency
+
+// transitionKind identifies the kind of transition.
+type transitionKind int
+
+const (
+ tNil transitionKind = iota
+ tGoParent
+ tGoChild
+ tGoExit
+ tMutexLock
+ tMutexUnlock
+ tRWMutexLock
+ tRWMutexRLock
+ tRWMutexRUnlock
+ tRWMutexUnlock
+)
+
+// transition records information about the abstract program
+// transition of a thread.
+type transition struct {
+ // tid identifies the thread this transition belongs to.
+ tid TID
+ // clock records the logical time at the beginning of this
+ // transition as perceived by the thread this transition belongs to.
+ clock map[TID]int
+ // kind records the kind of this transition.
+ kind transitionKind
+ // enable identifies whether this transition is enabled.
+ enabled bool
+ // readSet identifies the set of abstract resources read by this
+ // transition.
+ readSet resourceSet
+ // writeSet identifies the set of abstract resources written by this
+ // transition.
+ writeSet resourceSet
+}
diff --git a/profiles/internal/testing/concurrency/util_test.go b/profiles/internal/testing/concurrency/util_test.go
new file mode 100644
index 0000000..97f025d
--- /dev/null
+++ b/profiles/internal/testing/concurrency/util_test.go
@@ -0,0 +1,85 @@
+package concurrency_test
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+// checkExpectedOutputs checks that all expected outputs are
+// generated.
+func checkExpectedOutputs(t *testing.T, outputs, expectedOutputs []string) {
+ for _, expected := range expectedOutputs {
+ found := false
+ for _, output := range outputs {
+ if output == expected {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("Expected output %v never generated", expected)
+ }
+ }
+}
+
+// checkUnexpectedOutputs checks that no unexpected outputs are
+// generated.
+func checkUnexpectedOutputs(t *testing.T, outputs, expectedOutputs []string) {
+ for _, output := range outputs {
+ found := false
+ for _, expected := range expectedOutputs {
+ if output == expected {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("Unexpected output %v generated", output)
+ }
+ }
+}
+
+// cleanupClosure returns a function that is used as the cleanup
+// function of systematic tests.
+func cleanupClosure(out *os.File) func() {
+ return func() {
+ fmt.Fprintf(out, "\n")
+ }
+}
+
+// length computes the number of keys in the given set that hold the
+// value 'true'.
+func length(s map[int]bool) int {
+ n := 0
+ for _, ok := range s {
+ if ok {
+ n++
+ }
+ }
+ return n
+}
+
+// processOutput processes the output file, returning a slice of all
+// output lines generated by a test.
+func processOutput(t *testing.T, f *os.File) []string {
+ buffer, err := ioutil.ReadFile(f.Name())
+ if err != nil {
+ t.Fatalf("ReadFile() failed: %v", err)
+ }
+ scanner := bufio.NewScanner(bytes.NewReader(buffer))
+ result := make([]string, 0)
+ for scanner.Scan() {
+ result = append(result, scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatalf("Scanning output file failed: %v", err)
+ }
+ return result
+}
+
+// setup is used as the setup function of systematic tests.
+func setup() {}
diff --git a/profiles/internal/testing/concurrency/v23_internal_test.go b/profiles/internal/testing/concurrency/v23_internal_test.go
new file mode 100644
index 0000000..55d3f56
--- /dev/null
+++ b/profiles/internal/testing/concurrency/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package concurrency
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/testutil"
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ os.Exit(m.Run())
+}
diff --git a/profiles/internal/testing/mocks/ipc/simple_client.go b/profiles/internal/testing/mocks/ipc/simple_client.go
new file mode 100644
index 0000000..521dfde
--- /dev/null
+++ b/profiles/internal/testing/mocks/ipc/simple_client.go
@@ -0,0 +1,135 @@
+package ipc
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/security"
+ "v.io/v23/vdl"
+ "v.io/v23/vom"
+ "v.io/x/lib/vlog"
+)
+
+type ClientWithTimesCalled interface {
+ ipc.Client
+ TimesCalled(method string) int
+}
+
+// NewSimpleClient creates a new mocked ipc client where the given map of method name
+// to outputs is used for evaluating the method calls.
+// It also adds some testing features such as counters for number of times a method is called
+func NewSimpleClient(methodsResults map[string][]interface{}) ClientWithTimesCalled {
+ return &simpleMockClient{
+ results: methodsResults,
+ timesCalled: make(map[string]int),
+ }
+}
+
+// simpleMockClient implements ipc.Client
+type simpleMockClient struct {
+ // Protects timesCalled
+ sync.Mutex
+
+ // results is a map of method names to results
+ results map[string][]interface{}
+ // timesCalled is a counter for number of times StartCall is called on a specific method name
+ timesCalled map[string]int
+}
+
+// TimesCalled returns number of times the given method has been called.
+func (c *simpleMockClient) TimesCalled(method string) int {
+ return c.timesCalled[method]
+}
+
+// StartCall Implements ipc.Client
+func (c *simpleMockClient) StartCall(ctx *context.T, name, method string, args []interface{}, opts ...ipc.CallOpt) (ipc.ClientCall, error) {
+ defer vlog.LogCall()()
+ results, ok := c.results[method]
+ if !ok {
+ return nil, errors.New(fmt.Sprintf("method %s not found", method))
+ }
+
+ // Copy the results so that they can be modified without effecting the original.
+ // This must be done via vom encode and decode rather than a direct deep copy because (among other reasons)
+ // reflect-based deep copy on vdl.Type objects will fail because of their private fields. This is not a problem with vom
+ // as it manually creates the type objects. It is also more realistic to use the same mechanism as the ultimate calls.
+ vomBytes, err := vom.Encode(results)
+ if err != nil {
+ panic(fmt.Sprintf("Error copying value with vom (failed on encode): %v", err))
+ }
+ var copiedResults []interface{}
+ if err := vom.Decode(vomBytes, &copiedResults); err != nil {
+ panic(fmt.Sprintf("Error copying value with vom (failed on decode): %v", err))
+ }
+
+ clientCall := mockCall{
+ results: copiedResults,
+ }
+
+ c.Lock()
+ c.timesCalled[method]++
+ c.Unlock()
+
+ return &clientCall, nil
+}
+
+// Close implements ipc.Client
+func (*simpleMockClient) Close() {
+ defer vlog.LogCall()()
+}
+
+// mockCall implements ipc.ClientCall
+type mockCall struct {
+ mockStream
+ results []interface{}
+}
+
+// Cancel implements ipc.ClientCall
+func (*mockCall) Cancel() {
+ defer vlog.LogCall()()
+}
+
+// CloseSend implements ipc.ClientCall
+func (*mockCall) CloseSend() error {
+ defer vlog.LogCall()()
+ return nil
+}
+
+// Finish implements ipc.ClientCall
+func (mc *mockCall) Finish(resultptrs ...interface{}) error {
+ defer vlog.LogCall()()
+ if got, want := len(resultptrs), len(mc.results); got != want {
+ return errors.New(fmt.Sprintf("wrong number of output results; expected resultptrs of size %d but got %d", want, got))
+ }
+ for ax, res := range resultptrs {
+ if mc.results[ax] != nil {
+ if err := vdl.Convert(res, mc.results[ax]); err != nil {
+ panic(fmt.Sprintf("Error converting out argument %#v: %v", mc.results[ax], err))
+ }
+ }
+ }
+ return nil
+}
+
+// RemoteBlessings implements ipc.ClientCall
+func (*mockCall) RemoteBlessings() ([]string, security.Blessings) {
+ return []string{}, security.Blessings{}
+}
+
+//mockStream implements ipc.Stream
+type mockStream struct{}
+
+//Send implements ipc.Stream
+func (*mockStream) Send(interface{}) error {
+ defer vlog.LogCall()()
+ return nil
+}
+
+//Recv implements ipc.Stream
+func (*mockStream) Recv(interface{}) error {
+ defer vlog.LogCall()()
+ return nil
+}
diff --git a/profiles/internal/testing/mocks/ipc/simple_client_test.go b/profiles/internal/testing/mocks/ipc/simple_client_test.go
new file mode 100644
index 0000000..0e9bd15
--- /dev/null
+++ b/profiles/internal/testing/mocks/ipc/simple_client_test.go
@@ -0,0 +1,139 @@
+package ipc
+
+import (
+ "testing"
+
+ "v.io/v23/context"
+)
+
+func testContext() *context.T {
+ ctx, _ := context.RootContext()
+ return ctx
+}
+
+func TestSuccessfulCalls(t *testing.T) {
+
+ method1ExpectedResult := []interface{}{"one", 2}
+ method2ExpectedResult := []interface{}{"one"}
+ method3ExpectedResult := []interface{}{nil}
+
+ client := NewSimpleClient(map[string][]interface{}{
+ "method1": method1ExpectedResult,
+ "method2": method2ExpectedResult,
+ "method3": method3ExpectedResult,
+ })
+
+ ctx := testContext()
+
+ // method1
+ method1Call, err := client.StartCall(ctx, "name/obj", "method1", []interface{}{})
+ if err != nil {
+ t.Errorf("StartCall: did not expect an error return")
+ return
+ }
+ var resultOne string
+ var resultTwo int64
+ method1Call.Finish(&resultOne, &resultTwo)
+ if resultOne != "one" {
+ t.Errorf(`FinishCall: first result was "%v", want "one"`, resultOne)
+ return
+ }
+ if resultTwo != 2 {
+ t.Errorf(`FinishCall: second result was "%v", want 2`, resultTwo)
+ return
+ }
+
+ // method2
+ method2Call, err := client.StartCall(ctx, "name/obj", "method2", []interface{}{})
+ if err != nil {
+ t.Errorf(`StartCall: did not expect an error return`)
+ return
+ }
+ method2Call.Finish(&resultOne)
+ if resultOne != "one" {
+ t.Errorf(`FinishCall: result "%v", want "one"`, resultOne)
+ return
+ }
+
+ // method3
+ var result interface{}
+ method3Call, err := client.StartCall(ctx, "name/obj", "method3", []interface{}{})
+ if err != nil {
+ t.Errorf(`StartCall: did not expect an error return`)
+ return
+ }
+ method3Call.Finish(&result)
+ if result != nil {
+ t.Errorf(`FinishCall: result "%v", want nil`, result)
+ return
+ }
+}
+
+type sampleStruct struct {
+ Name string
+}
+
+func TestStructResult(t *testing.T) {
+ client := NewSimpleClient(map[string][]interface{}{
+ "foo": []interface{}{
+ sampleStruct{Name: "bar"},
+ },
+ })
+ ctx := testContext()
+ call, _ := client.StartCall(ctx, "name/obj", "foo", []interface{}{})
+ var result sampleStruct
+ call.Finish(&result)
+ if result.Name != "bar" {
+ t.Errorf(`FinishCall: second result was "%v", want "bar"`, result.Name)
+ return
+ }
+}
+
+func TestErrorCall(t *testing.T) {
+ client := NewSimpleClient(map[string][]interface{}{
+ "bar": []interface{}{},
+ })
+ ctx := testContext()
+ _, err := client.StartCall(ctx, "name/obj", "wrongMethodName", []interface{}{})
+ if err == nil {
+ t.Errorf(`StartCall: should have returned an error on invalid method name`)
+ return
+ }
+}
+
+func TestNumberOfCalls(t *testing.T) {
+ client := NewSimpleClient(map[string][]interface{}{
+ "method1": []interface{}{},
+ "method2": []interface{}{},
+ })
+
+ errMsg := "Expected method to be called %d times but it was called %d"
+ ctx := testContext()
+
+ // method 1
+ if n := client.TimesCalled("method1"); n != 0 {
+ t.Errorf(errMsg, 0, n)
+ return
+ }
+ client.StartCall(ctx, "name/of/object", "method1", []interface{}{})
+ if n := client.TimesCalled("method1"); n != 1 {
+ t.Errorf(errMsg, 1, n)
+ return
+ }
+ client.StartCall(ctx, "name/of/object", "method1", []interface{}{})
+ if n := client.TimesCalled("method1"); n != 2 {
+ t.Errorf(errMsg, 2, n)
+ return
+ }
+
+ // method 2
+ if n := client.TimesCalled("method2"); n != 0 {
+ t.Errorf(errMsg, 0, n)
+ return
+ }
+ client.StartCall(ctx, "name/of/object", "method2", []interface{}{})
+ if n := client.TimesCalled("method2"); n != 1 {
+ t.Errorf(errMsg, 1, n)
+ return
+ }
+}
diff --git a/profiles/internal/testing/mocks/naming/namespace.go b/profiles/internal/testing/mocks/naming/namespace.go
new file mode 100644
index 0000000..37a3e8c
--- /dev/null
+++ b/profiles/internal/testing/mocks/naming/namespace.go
@@ -0,0 +1,175 @@
+package naming
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/services/security/access"
+ "v.io/v23/verror"
+ "v.io/x/lib/vlog"
+
+ vnamespace "v.io/x/ref/profiles/internal/naming/namespace"
+)
+
+// NewSimpleNamespace returns a simple implementation of a Namespace
+// server for use in tests. In particular, it ignores TTLs and not
+// allow fully overlapping mount names.
+func NewSimpleNamespace() ns.Namespace {
+ ns, err := vnamespace.New()
+ if err != nil {
+ panic(err)
+ }
+ return &namespace{mounts: make(map[string]*naming.MountEntry), ns: ns}
+}
+
+// namespace is a simple partial implementation of ns.Namespace.
+type namespace struct {
+ sync.Mutex
+ mounts map[string]*naming.MountEntry
+ ns ns.Namespace
+}
+
+func (ns *namespace) Mount(ctx *context.T, name, server string, _ time.Duration, opts ...naming.MountOpt) error {
+ defer vlog.LogCall()()
+ // TODO(ashankar,p): There is a bunch of processing in the real
+ // namespace that is missing from this mock implementation and some of
+ // it is duplicated here. Figure out a way to share more code with the
+ // real implementation?
+ var blessingpatterns []string
+ for _, o := range opts {
+ if v, ok := o.(naming.MountedServerBlessingsOpt); ok {
+ blessingpatterns = v
+ }
+ }
+ ns.Lock()
+ defer ns.Unlock()
+ for n, _ := range ns.mounts {
+ if n != name && (strings.HasPrefix(name, n) || strings.HasPrefix(n, name)) {
+ return fmt.Errorf("simple mount table does not allow names that are a prefix of each other")
+ }
+ }
+ e := ns.mounts[name]
+ if e == nil {
+ e = &naming.MountEntry{}
+ ns.mounts[name] = e
+ }
+ s := naming.MountedServer{
+ Server: server,
+ BlessingPatterns: blessingpatterns,
+ }
+ e.Servers = append(e.Servers, s)
+ return nil
+}
+
+func (ns *namespace) Unmount(ctx *context.T, name, server string) error {
+ defer vlog.LogCall()()
+ ns.Lock()
+ defer ns.Unlock()
+ e := ns.mounts[name]
+ if e == nil {
+ return nil
+ }
+ if len(server) == 0 {
+ delete(ns.mounts, name)
+ return nil
+ }
+ var keep []naming.MountedServer
+ for _, s := range e.Servers {
+ if s.Server != server {
+ keep = append(keep, s)
+ }
+ }
+ if len(keep) == 0 {
+ delete(ns.mounts, name)
+ return nil
+ }
+ e.Servers = keep
+ return nil
+}
+
+func (ns *namespace) Resolve(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+ defer vlog.LogCall()()
+ p, n := vnamespace.InternalSplitObjectName(name)
+ var blessingpatterns []string
+ if len(p) > 0 {
+ blessingpatterns = []string{string(p)}
+ }
+ name = n
+ if address, suffix := naming.SplitAddressName(name); len(address) > 0 {
+ return &naming.MountEntry{
+ Name: suffix,
+ Servers: []naming.MountedServer{
+ {Server: address, BlessingPatterns: blessingpatterns},
+ },
+ }, nil
+ }
+ ns.Lock()
+ defer ns.Unlock()
+ for prefix, e := range ns.mounts {
+ if strings.HasPrefix(name, prefix) {
+ ret := *e
+ ret.Name = strings.TrimLeft(strings.TrimPrefix(name, prefix), "/")
+ if len(blessingpatterns) > 0 {
+ // Replace the blessing patterns with p.
+ for idx, _ := range ret.Servers {
+ ret.Servers[idx].BlessingPatterns = blessingpatterns
+ }
+ }
+ return &ret, nil
+ }
+ }
+ return nil, verror.New(naming.ErrNoSuchName, ctx, fmt.Sprintf("Resolve name %q not found in %v", name, ns.mounts))
+}
+
+func (ns *namespace) ResolveToMountTable(ctx *context.T, name string, opts ...naming.ResolveOpt) (*naming.MountEntry, error) {
+ defer vlog.LogCall()()
+ // TODO(mattr): Implement this method for tests that might need it.
+ panic("ResolveToMountTable not implemented")
+ return nil, nil
+}
+
+func (ns *namespace) FlushCacheEntry(name string) bool {
+ defer vlog.LogCall()()
+ return false
+}
+
+func (ns *namespace) CacheCtl(ctls ...naming.CacheCtl) []naming.CacheCtl {
+ defer vlog.LogCall()()
+ return nil
+}
+
+func (ns *namespace) Glob(ctx *context.T, pattern string) (chan interface{}, error) {
+ defer vlog.LogCall()()
+ // TODO(mattr): Implement this method for tests that might need it.
+ panic("Glob not implemented")
+ return nil, nil
+}
+
+func (ns *namespace) SetRoots(...string) error {
+ defer vlog.LogCall()()
+ panic("Calling SetRoots on a mock namespace. This is not supported.")
+ return nil
+}
+
+func (ns *namespace) Roots() []string {
+ defer vlog.LogCall()()
+ panic("Calling Roots on a mock namespace. This is not supported.")
+ return nil
+}
+
+func (ns *namespace) GetACL(ctx *context.T, name string) (acl access.TaggedACLMap, etag string, err error) {
+ defer vlog.LogCall()()
+ panic("Calling GetACL on a mock namespace. This is not supported.")
+ return nil, "", nil
+}
+
+func (ns *namespace) SetACL(ctx *context.T, name string, acl access.TaggedACLMap, etag string) error {
+ defer vlog.LogCall()()
+ panic("Calling SetACL on a mock namespace. This is not supported.")
+ return nil
+}
diff --git a/profiles/internal/vtrace/store.go b/profiles/internal/vtrace/store.go
new file mode 100644
index 0000000..c733ed0
--- /dev/null
+++ b/profiles/internal/vtrace/store.go
@@ -0,0 +1,280 @@
+package vtrace
+
+import (
+ "math/rand"
+ "regexp"
+ "sync"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/uniqueid"
+ "v.io/v23/vtrace"
+
+ "v.io/x/ref/lib/flags"
+)
+
+// Store implements a store for traces. The idea is to keep all the
+// information we have about some subset of traces that pass through
+// the server. For now we just implement an LRU cache, so the least
+// recently started/finished/annotated traces expire after some
+// maximum trace count is reached.
+// TODO(mattr): LRU is the wrong policy in the long term, we should
+// try to keep some diverse set of traces and allow users to
+// specifically tell us to capture a specific trace. LRU will work OK
+// for many testing scenarios and low volume applications.
+type Store struct {
+ opts flags.VtraceFlags
+ collectRegexp *regexp.Regexp
+
+ // traces and head together implement a linked-hash-map.
+ // head points to the head and tail of the doubly-linked-list
+ // of recently used items (the tail is the LRU traceStore).
+ // TODO(mattr): Use rwmutex.
+ mu sync.Mutex
+ traces map[uniqueid.Id]*traceStore // GUARDED_BY(mu)
+ head *traceStore // GUARDED_BY(mu)
+}
+
+// NewStore creates a new store according to the passed in opts.
+func NewStore(opts flags.VtraceFlags) (*Store, error) {
+ head := &traceStore{}
+ head.next, head.prev = head, head
+
+ var collectRegexp *regexp.Regexp
+ if opts.CollectRegexp != "" {
+ var err error
+ if collectRegexp, err = regexp.Compile(opts.CollectRegexp); err != nil {
+ return nil, err
+ }
+ }
+
+ return &Store{
+ opts: opts,
+ collectRegexp: collectRegexp,
+ traces: make(map[uniqueid.Id]*traceStore),
+ head: head,
+ }, nil
+}
+
+func (s *Store) ForceCollect(id uniqueid.Id) {
+ s.mu.Lock()
+ s.forceCollectLocked(id)
+ s.mu.Unlock()
+}
+
+func (s *Store) forceCollectLocked(id uniqueid.Id) *traceStore {
+ ts := s.traces[id]
+ if ts == nil {
+ ts = newTraceStore(id)
+ s.traces[id] = ts
+ ts.moveAfter(s.head)
+ // Trim elements beyond our size limit.
+ for len(s.traces) > s.opts.CacheSize {
+ el := s.head.prev
+ el.removeFromList()
+ delete(s.traces, el.id)
+ }
+ }
+ return ts
+}
+
+// Merge merges a vtrace.Response into the current store.
+func (s *Store) merge(t vtrace.Response) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var ts *traceStore
+ if t.Flags&vtrace.CollectInMemory != 0 {
+ ts = s.forceCollectLocked(t.Trace.ID)
+ } else {
+ ts = s.traces[t.Trace.ID]
+ }
+ if ts != nil {
+ ts.merge(t.Trace.Spans)
+ }
+}
+
+// annotate stores an annotation for the trace if it is being collected.
+func (s *Store) annotate(span *span, msg string) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ ts := s.traces[span.trace]
+ if ts == nil {
+ if s.collectRegexp != nil && s.collectRegexp.MatchString(msg) {
+ ts = s.forceCollectLocked(span.trace)
+ }
+ }
+
+ if ts != nil {
+ ts.annotate(span, msg)
+ ts.moveAfter(s.head)
+ }
+}
+
+// start stores data about a starting span if the trace is being collected.
+func (s *Store) start(span *span) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ ts := s.traces[span.trace]
+ if ts == nil {
+ sr := s.opts.SampleRate
+ if span.trace == span.parent && sr > 0.0 && (sr >= 1.0 || rand.Float64() < sr) {
+ // If this is a root span, we may automatically sample it for collection.
+ ts = s.forceCollectLocked(span.trace)
+ } else if s.collectRegexp != nil && s.collectRegexp.MatchString(span.name) {
+ // If this span matches collectRegexp, then force collect its trace.
+ ts = s.forceCollectLocked(span.trace)
+ }
+ }
+ if ts != nil {
+ ts.start(span)
+ ts.moveAfter(s.head)
+ }
+}
+
+// finish stores data about a finished span if the trace is being collected.
+func (s *Store) finish(span *span) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if ts := s.traces[span.trace]; ts != nil {
+ ts.finish(span)
+ ts.moveAfter(s.head)
+ }
+}
+
+// method returns the collection method for the given trace.
+func (s *Store) flags(id uniqueid.Id) vtrace.TraceFlags {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if ts := s.traces[id]; ts != nil {
+ return vtrace.CollectInMemory
+ }
+ return vtrace.Empty
+}
+
+// TraceRecords returns TraceRecords for all traces saved in the store.
+func (s *Store) TraceRecords() []vtrace.TraceRecord {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ out := make([]vtrace.TraceRecord, len(s.traces))
+ i := 0
+ for _, ts := range s.traces {
+ ts.traceRecord(&out[i])
+ i++
+ }
+ return out
+}
+
+// TraceRecord returns a TraceRecord for a given ID. Returns
+// nil if the given id is not present.
+func (s *Store) TraceRecord(id uniqueid.Id) *vtrace.TraceRecord {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ out := &vtrace.TraceRecord{}
+ ts := s.traces[id]
+ if ts != nil {
+ ts.traceRecord(out)
+ }
+ return out
+}
+
+type traceStore struct {
+ id uniqueid.Id
+ spans map[uniqueid.Id]*vtrace.SpanRecord
+ prev, next *traceStore
+}
+
+func newTraceStore(id uniqueid.Id) *traceStore {
+ return &traceStore{
+ id: id,
+ spans: make(map[uniqueid.Id]*vtrace.SpanRecord),
+ }
+}
+
+func (ts *traceStore) record(s *span) *vtrace.SpanRecord {
+ record, ok := ts.spans[s.id]
+ if !ok {
+ record = &vtrace.SpanRecord{
+ ID: s.id,
+ Parent: s.parent,
+ Name: s.name,
+ Start: s.start,
+ }
+ ts.spans[s.id] = record
+ }
+ return record
+}
+
+func (ts *traceStore) annotate(s *span, msg string) {
+ record := ts.record(s)
+ record.Annotations = append(record.Annotations, vtrace.Annotation{
+ When: time.Now(),
+ Message: msg,
+ })
+}
+
+func (ts *traceStore) start(s *span) {
+ ts.record(s)
+}
+
+func (ts *traceStore) finish(s *span) {
+ ts.record(s).End = time.Now()
+}
+
+func (ts *traceStore) merge(spans []vtrace.SpanRecord) {
+ // TODO(mattr): We need to carefully merge here to correct for
+ // clock skew and ordering. We should estimate the clock skew
+ // by assuming that children of parent need to start after parent
+ // and end before now.
+ for _, span := range spans {
+ if ts.spans[span.ID] == nil {
+ ts.spans[span.ID] = copySpanRecord(&span)
+ }
+ }
+}
+
+func (ts *traceStore) removeFromList() {
+ if ts.prev != nil {
+ ts.prev.next = ts.next
+ }
+ if ts.next != nil {
+ ts.next.prev = ts.prev
+ }
+ ts.next = nil
+ ts.prev = nil
+}
+
+func (ts *traceStore) moveAfter(prev *traceStore) {
+ ts.removeFromList()
+ ts.prev = prev
+ ts.next = prev.next
+ prev.next.prev = ts
+ prev.next = ts
+}
+
+func copySpanRecord(in *vtrace.SpanRecord) *vtrace.SpanRecord {
+ return &vtrace.SpanRecord{
+ ID: in.ID,
+ Parent: in.Parent,
+ Name: in.Name,
+ Start: in.Start,
+ End: in.End,
+ Annotations: append([]vtrace.Annotation{}, in.Annotations...),
+ }
+}
+
+func (ts *traceStore) traceRecord(out *vtrace.TraceRecord) {
+ spans := make([]vtrace.SpanRecord, 0, len(ts.spans))
+ for _, span := range ts.spans {
+ spans = append(spans, *copySpanRecord(span))
+ }
+ out.ID = ts.id
+ out.Spans = spans
+}
+
+// Merge merges a vtrace.Response into the current store.
+func Merge(ctx *context.T, t vtrace.Response) {
+ getStore(ctx).merge(t)
+}
diff --git a/profiles/internal/vtrace/store_test.go b/profiles/internal/vtrace/store_test.go
new file mode 100644
index 0000000..d9eb1fb
--- /dev/null
+++ b/profiles/internal/vtrace/store_test.go
@@ -0,0 +1,124 @@
+package vtrace
+
+import (
+ "encoding/binary"
+ "reflect"
+ "sort"
+ "testing"
+
+ "v.io/v23/uniqueid"
+ "v.io/v23/vtrace"
+
+ "v.io/x/ref/lib/flags"
+)
+
+var nextid = uint64(1)
+
+func id() uniqueid.Id {
+ var out uniqueid.Id
+ binary.BigEndian.PutUint64(out[8:], nextid)
+ nextid++
+ return out
+}
+
+func makeTraces(n int, st *Store) []uniqueid.Id {
+ traces := make([]uniqueid.Id, n)
+ for i := range traces {
+ curid := id()
+ traces[i] = curid
+ st.ForceCollect(curid)
+ }
+ return traces
+}
+
+func recordids(records ...vtrace.TraceRecord) map[uniqueid.Id]bool {
+ out := make(map[uniqueid.Id]bool)
+ for _, trace := range records {
+ out[trace.ID] = true
+ }
+ return out
+}
+
+func traceids(traces ...uniqueid.Id) map[uniqueid.Id]bool {
+ out := make(map[uniqueid.Id]bool)
+ for _, trace := range traces {
+ out[trace] = true
+ }
+ return out
+}
+
+func pretty(in map[uniqueid.Id]bool) []int {
+ out := make([]int, 0, len(in))
+ for k, _ := range in {
+ out = append(out, int(k[15]))
+ }
+ sort.Ints(out)
+ return out
+}
+
+func compare(t *testing.T, want map[uniqueid.Id]bool, records []vtrace.TraceRecord) {
+ got := recordids(records...)
+ if !reflect.DeepEqual(want, got) {
+ t.Errorf("Got wrong traces. Got %v, want %v.", pretty(got), pretty(want))
+ }
+}
+
+func TestTrimming(t *testing.T) {
+ st, err := NewStore(flags.VtraceFlags{CacheSize: 5})
+ if err != nil {
+ t.Fatalf("Could not create store: %v", err)
+ }
+ traces := makeTraces(10, st)
+
+ compare(t, traceids(traces[5:]...), st.TraceRecords())
+
+ traces = append(traces, id(), id(), id())
+
+ // Starting a span on an existing trace brings it to the front of the queue
+ // and prevent it from being removed when a new trace begins.
+ st.start(&span{trace: traces[5], id: id()})
+ st.ForceCollect(traces[10])
+ compare(t, traceids(traces[10], traces[5], traces[7], traces[8], traces[9]), st.TraceRecords())
+
+ // Finishing a span on one of the traces should bring it back into the stored set.
+ st.finish(&span{trace: traces[7], id: id()})
+ st.ForceCollect(traces[11])
+ compare(t, traceids(traces[10], traces[11], traces[5], traces[7], traces[9]), st.TraceRecords())
+
+ // Annotating a span on one of the traces should bring it back into the stored set.
+ st.annotate(&span{trace: traces[9], id: id()}, "hello")
+ st.ForceCollect(traces[12])
+ compare(t, traceids(traces[10], traces[11], traces[12], traces[7], traces[9]), st.TraceRecords())
+}
+
+func TestRegexp(t *testing.T) {
+ traces := []uniqueid.Id{id(), id(), id()}
+
+ type testcase struct {
+ pattern string
+ results []uniqueid.Id
+ }
+ tests := []testcase{
+ {".*", traces},
+ {"foo.*", traces},
+ {".*bar", traces[1:2]},
+ {".*bang", traces[2:3]},
+ }
+
+ for _, test := range tests {
+ st, err := NewStore(flags.VtraceFlags{
+ CacheSize: 10,
+ CollectRegexp: test.pattern,
+ })
+ if err != nil {
+ t.Fatalf("Could not create store: %v", err)
+ }
+
+ newSpan(traces[0], "foo", traces[0], st)
+ newSpan(traces[1], "foobar", traces[1], st)
+ sp := newSpan(traces[2], "baz", traces[2], st)
+ sp.Annotate("foobang")
+
+ compare(t, traceids(test.results...), st.TraceRecords())
+ }
+}
diff --git a/profiles/internal/vtrace/vtrace.go b/profiles/internal/vtrace/vtrace.go
new file mode 100644
index 0000000..9aca9d7
--- /dev/null
+++ b/profiles/internal/vtrace/vtrace.go
@@ -0,0 +1,175 @@
+// Package vtrace implements the Trace and Span interfaces in veyron2/vtrace.
+// We also provide internal utilities for migrating trace information across
+// IPC calls.
+package vtrace
+
+import (
+ "fmt"
+ "time"
+
+ "v.io/v23/context"
+ "v.io/v23/uniqueid"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/flags"
+)
+
+// A span represents an annotated period of time.
+type span struct {
+ id uniqueid.Id
+ parent uniqueid.Id
+ name string
+ trace uniqueid.Id
+ start time.Time
+ store *Store
+}
+
+func newSpan(parent uniqueid.Id, name string, trace uniqueid.Id, store *Store) *span {
+ id, err := uniqueid.Random()
+ if err != nil {
+ vlog.Errorf("vtrace: Couldn't generate Span ID, debug data may be lost: %v", err)
+ }
+ s := &span{
+ id: id,
+ parent: parent,
+ name: name,
+ trace: trace,
+ start: time.Now(),
+ store: store,
+ }
+ store.start(s)
+ return s
+}
+
+func (s *span) ID() uniqueid.Id { return s.id }
+func (s *span) Parent() uniqueid.Id { return s.parent }
+func (s *span) Name() string { return s.name }
+func (s *span) Trace() uniqueid.Id { return s.trace }
+func (s *span) Annotate(msg string) {
+ s.store.annotate(s, msg)
+}
+func (s *span) Annotatef(format string, a ...interface{}) {
+ s.store.annotate(s, fmt.Sprintf(format, a...))
+}
+func (s *span) Finish() {
+ s.store.finish(s)
+}
+func (s *span) flags() vtrace.TraceFlags {
+ return s.store.flags(s.trace)
+}
+
+// Request generates a vtrace.Request from the active Span.
+func Request(ctx *context.T) vtrace.Request {
+ if span := getSpan(ctx); span != nil {
+ return vtrace.Request{
+ SpanID: span.id,
+ TraceID: span.trace,
+ Flags: span.flags(),
+ }
+ }
+ return vtrace.Request{}
+}
+
+// Response captures the vtrace.Response for the active Span.
+func Response(ctx *context.T) vtrace.Response {
+ if span := getSpan(ctx); span != nil {
+ return vtrace.Response{
+ Flags: span.flags(),
+ Trace: *span.store.TraceRecord(span.trace),
+ }
+ }
+ return vtrace.Response{}
+}
+
+type contextKey int
+
+const (
+ storeKey = contextKey(iota)
+ spanKey
+)
+
+// Manager allows you to create new traces and spans and access the
+// vtrace store.
+type manager struct{}
+
+// SetNewTrace creates a new vtrace context that is not the child of any
+// other span. This is useful when starting operations that are
+// disconnected from the activity ctx is performing. For example
+// this might be used to start background tasks.
+func (m manager) SetNewTrace(ctx *context.T) (*context.T, vtrace.Span) {
+ id, err := uniqueid.Random()
+ if err != nil {
+ vlog.Errorf("vtrace: Couldn't generate Trace ID, debug data may be lost: %v", err)
+ }
+ s := newSpan(id, "", id, getStore(ctx))
+
+ return context.WithValue(ctx, spanKey, s), s
+}
+
+// SetContinuedTrace creates a span that represents a continuation of
+// a trace from a remote server. name is the name of the new span and
+// req contains the parameters needed to connect this span with it's
+// trace.
+func (m manager) SetContinuedTrace(ctx *context.T, name string, req vtrace.Request) (*context.T, vtrace.Span) {
+ st := getStore(ctx)
+ if req.Flags&vtrace.CollectInMemory != 0 {
+ st.ForceCollect(req.TraceID)
+ }
+ newSpan := newSpan(req.SpanID, name, req.TraceID, st)
+ return context.WithValue(ctx, spanKey, newSpan), newSpan
+}
+
+// SetNewSpan derives a context with a new Span that can be used to
+// trace and annotate operations across process boundaries.
+func (m manager) SetNewSpan(ctx *context.T, name string) (*context.T, vtrace.Span) {
+ if curSpan := getSpan(ctx); curSpan != nil {
+ if curSpan.store == nil {
+ panic("nil store")
+ }
+ s := newSpan(curSpan.ID(), name, curSpan.trace, curSpan.store)
+ return context.WithValue(ctx, spanKey, s), s
+ }
+
+ vlog.Error("vtrace: Creating a new child span from context with no existing span.")
+ return m.SetNewTrace(ctx)
+}
+
+// Span finds the currently active span.
+func (m manager) GetSpan(ctx *context.T) vtrace.Span {
+ if span := getSpan(ctx); span != nil {
+ return span
+ }
+ return nil
+}
+
+// Store returns the current vtrace.Store.
+func (m manager) GetStore(ctx *context.T) vtrace.Store {
+ if store := getStore(ctx); store != nil {
+ return store
+ }
+ return nil
+}
+
+// getSpan returns the internal span type.
+func getSpan(ctx *context.T) *span {
+ span, _ := ctx.Value(spanKey).(*span)
+ return span
+}
+
+// GetStore returns the *Store attached to the context.
+func getStore(ctx *context.T) *Store {
+ store, _ := ctx.Value(storeKey).(*Store)
+ return store
+}
+
+// Init initializes vtrace and attaches some state to the context.
+// This should be called by the runtimes initialization function.
+func Init(ctx *context.T, opts flags.VtraceFlags) (*context.T, error) {
+ nctx := vtrace.WithManager(ctx, manager{})
+ store, err := NewStore(opts)
+ if err != nil {
+ return ctx, err
+ }
+ return context.WithValue(nctx, storeKey, store), nil
+}
diff --git a/profiles/internal/vtrace/vtrace_test.go b/profiles/internal/vtrace/vtrace_test.go
new file mode 100644
index 0000000..4ca879c
--- /dev/null
+++ b/profiles/internal/vtrace/vtrace_test.go
@@ -0,0 +1,270 @@
+package vtrace_test
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "v.io/v23"
+ "v.io/v23/context"
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/naming/ns"
+ "v.io/v23/security"
+ "v.io/v23/vtrace"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/testutil"
+ _ "v.io/x/ref/profiles"
+ iipc "v.io/x/ref/profiles/internal/ipc"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+ "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ tnaming "v.io/x/ref/profiles/internal/testing/mocks/naming"
+)
+
+func TestNewFromContext(t *testing.T) {
+ c0, shutdown := testutil.InitForTest()
+ defer shutdown()
+ c1, s1 := vtrace.SetNewSpan(c0, "s1")
+ c2, s2 := vtrace.SetNewSpan(c1, "s2")
+ c3, s3 := vtrace.SetNewSpan(c2, "s3")
+ expected := map[*context.T]vtrace.Span{
+ c1: s1,
+ c2: s2,
+ c3: s3,
+ }
+ for ctx, expectedSpan := range expected {
+ if s := vtrace.GetSpan(ctx); s != expectedSpan {
+ t.Errorf("Wrong span for ctx %v. Got %v, want %v", c0, s, expectedSpan)
+ }
+ }
+}
+
+type fakeAuthorizer int
+
+func (fakeAuthorizer) Authorize(security.Call) error {
+ return nil
+}
+
+type testServer struct {
+ sm stream.Manager
+ ns ns.Namespace
+ name string
+ child string
+ stop func() error
+ forceCollect bool
+}
+
+func (c *testServer) Run(ctx ipc.ServerCall) error {
+ if c.forceCollect {
+ vtrace.ForceCollect(ctx.Context())
+ }
+
+ client, err := iipc.InternalNewClient(c.sm, c.ns)
+ if err != nil {
+ vlog.Error(err)
+ return err
+ }
+
+ vtrace.GetSpan(ctx.Context()).Annotate(c.name + "-begin")
+
+ if c.child != "" {
+ var call ipc.ClientCall
+ if call, err = client.StartCall(ctx.Context(), c.child, "Run", []interface{}{}); err != nil {
+ vlog.Error(err)
+ return err
+ }
+ if err := call.Finish(); err != nil {
+ vlog.Error(err)
+ return err
+ }
+ }
+ vtrace.GetSpan(ctx.Context()).Annotate(c.name + "-end")
+
+ return nil
+}
+
+func makeTestServer(ctx *context.T, ns ns.Namespace, name, child string, forceCollect bool) (*testServer, error) {
+ sm := manager.InternalNew(naming.FixedRoutingID(0x111111111))
+ client, err := iipc.InternalNewClient(sm, ns)
+ if err != nil {
+ return nil, err
+ }
+ s, err := iipc.InternalNewServer(ctx, sm, ns, client)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := s.Listen(v23.GetListenSpec(ctx)); err != nil {
+ return nil, err
+ }
+
+ c := &testServer{
+ sm: sm,
+ ns: ns,
+ name: name,
+ child: child,
+ stop: s.Stop,
+ forceCollect: forceCollect,
+ }
+
+ if err := s.Serve(name, c, fakeAuthorizer(0)); err != nil {
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func summary(span *vtrace.SpanRecord) string {
+ summary := span.Name
+ if len(span.Annotations) > 0 {
+ msgs := []string{}
+ for _, annotation := range span.Annotations {
+ msgs = append(msgs, annotation.Message)
+ }
+ summary += ": " + strings.Join(msgs, ", ")
+ }
+ return summary
+}
+
+func traceString(trace *vtrace.TraceRecord) string {
+ var b bytes.Buffer
+ vtrace.FormatTrace(&b, trace, nil)
+ return b.String()
+}
+
+func expectSequence(t *testing.T, trace vtrace.TraceRecord, expectedSpans []string) {
+ // It's okay to have additional spans - someone may have inserted
+ // additional spans for more debugging.
+ if got, want := len(trace.Spans), len(expectedSpans); got < want {
+ t.Errorf("Found %d spans, want %d", got, want)
+ }
+
+ spans := map[string]*vtrace.SpanRecord{}
+ summaries := []string{}
+ for i := range trace.Spans {
+ span := &trace.Spans[i]
+
+ // All spans should have a start.
+ if span.Start.IsZero() {
+ t.Errorf("span missing start: %x, %s", span.ID[12:], traceString(&trace))
+ }
+ // All spans except the root should have a valid end.
+ // TODO(mattr): For now I'm also skipping connectFlow and
+ // vc.HandshakeDialedVC spans because the ws endpoints are
+ // currently non-deterministic in terms of whether they fail
+ // before the test ends or not. In the future it will be
+ // configurable whether we listen on ws or not and then we should
+ // adjust the test to not listen and remove this check.
+ if span.Name != "" &&
+ span.Name != "<client>connectFlow" &&
+ span.Name != "vc.HandshakeDialedVC" {
+ if span.End.IsZero() {
+ t.Errorf("span missing end: %x, %s", span.ID[12:], traceString(&trace))
+ } else if !span.Start.Before(span.End) {
+ t.Errorf("span end should be after start: %x, %s", span.ID[12:], traceString(&trace))
+ }
+ }
+
+ summary := summary(span)
+ summaries = append(summaries, summary)
+ spans[summary] = span
+ }
+
+ for i := range expectedSpans {
+ child, ok := spans[expectedSpans[i]]
+ if !ok {
+ t.Errorf("expected span %s not found in %#v", expectedSpans[i], summaries)
+ continue
+ }
+ if i == 0 {
+ continue
+ }
+ parent, ok := spans[expectedSpans[i-1]]
+ if !ok {
+ t.Errorf("expected span %s not found in %#v", expectedSpans[i-1], summaries)
+ continue
+ }
+ if child.Parent != parent.ID {
+ t.Errorf("%v should be a child of %v, but it's not.", child, parent)
+ }
+ }
+}
+
+func runCallChain(t *testing.T, ctx *context.T, force1, force2 bool) {
+ sm := manager.InternalNew(naming.FixedRoutingID(0x555555555))
+ ns := tnaming.NewSimpleNamespace()
+
+ client, err := iipc.InternalNewClient(sm, ns)
+ if err != nil {
+ t.Error(err)
+ }
+ ctx1, _ := vtrace.SetNewTrace(ctx)
+ c1, err := makeTestServer(ctx1, ns, "c1", "c2", force1)
+ if err != nil {
+ t.Fatal("Can't start server:", err)
+ }
+ defer c1.stop()
+
+ ctx2, _ := vtrace.SetNewTrace(ctx)
+ c2, err := makeTestServer(ctx2, ns, "c2", "", force2)
+ if err != nil {
+ t.Fatal("Can't start server:", err)
+ }
+ defer c2.stop()
+
+ call, err := client.StartCall(ctx, "c1", "Run", []interface{}{})
+ if err != nil {
+ t.Fatal("can't call: ", err)
+ }
+ if err := call.Finish(); err != nil {
+ t.Error(err)
+ }
+}
+
+// TestCancellationPropagation tests that cancellation propogates along an
+// RPC call chain without user intervention.
+func TestTraceAcrossRPCs(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+ ctx, span := vtrace.SetNewSpan(ctx, "")
+ vtrace.ForceCollect(ctx)
+ span.Annotate("c0-begin")
+
+ runCallChain(t, ctx, false, false)
+
+ span.Annotate("c0-end")
+
+ expectedSpans := []string{
+ ": c0-begin, c0-end",
+ "<client>\"c1\".Run",
+ "\"\".Run: c1-begin, c1-end",
+ "<client>\"c2\".Run",
+ "\"\".Run: c2-begin, c2-end",
+ }
+ record := vtrace.GetStore(ctx).TraceRecord(span.Trace())
+ expectSequence(t, *record, expectedSpans)
+}
+
+// TestCancellationPropagationLateForce tests that cancellation propogates along an
+// RPC call chain when tracing is initiated by someone deep in the call chain.
+func TestTraceAcrossRPCsLateForce(t *testing.T) {
+ ctx, shutdown := testutil.InitForTest()
+ defer shutdown()
+ ctx, span := vtrace.SetNewSpan(ctx, "")
+ span.Annotate("c0-begin")
+
+ runCallChain(t, ctx, false, true)
+
+ span.Annotate("c0-end")
+
+ expectedSpans := []string{
+ ": c0-end",
+ "<client>\"c1\".Run",
+ "\"\".Run: c1-end",
+ "<client>\"c2\".Run",
+ "\"\".Run: c2-begin, c2-end",
+ }
+ record := vtrace.GetStore(ctx).TraceRecord(span.Trace())
+ expectSequence(t, *record, expectedSpans)
+}
diff --git a/profiles/proxy/debug.go b/profiles/proxy/debug.go
new file mode 100644
index 0000000..deeb53d
--- /dev/null
+++ b/profiles/proxy/debug.go
@@ -0,0 +1,37 @@
+package proxy
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// DebugString dumps out the routing table at the proxy in text format.
+// The format is meant for debugging purposes and may change without notice.
+func (p *Proxy) DebugString() string {
+ var buf bytes.Buffer
+ servers := p.servers.List()
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ fmt.Fprintf(&buf, "Proxy with endpoint: %q. #Processes:%d #Servers:%d\n", p.Endpoint(), len(p.processes), len(servers))
+ fmt.Fprintf(&buf, "=========\n")
+ fmt.Fprintf(&buf, "PROCESSES\n")
+ fmt.Fprintf(&buf, "=========\n")
+ index := 1
+ for process, _ := range p.processes {
+ fmt.Fprintf(&buf, "(%d) - %v", index, process)
+ index++
+ process.mu.RLock()
+ fmt.Fprintf(&buf, " NextVCI:%d #Severs:%d\n", process.nextVCI, len(process.servers))
+ for vci, d := range process.routingTable {
+ fmt.Fprintf(&buf, " VCI %4d --> VCI %4d @ %s\n", vci, d.VCI, d.Process)
+ }
+ process.mu.RUnlock()
+ }
+ fmt.Fprintf(&buf, "=======\n")
+ fmt.Fprintf(&buf, "SERVERS\n")
+ fmt.Fprintf(&buf, "=======\n")
+ for ix, is := range servers {
+ fmt.Fprintf(&buf, "(%d) %v\n", ix+1, is)
+ }
+ return buf.String()
+}
diff --git a/profiles/proxy/doc.go b/profiles/proxy/doc.go
new file mode 100644
index 0000000..30967ec
--- /dev/null
+++ b/profiles/proxy/doc.go
@@ -0,0 +1,46 @@
+// Package proxy implements a proxy for the stream layer.
+//
+// Each process in veyron is uniquely identified by a routing id
+// (naming.RoutingID). A proxy routes messages
+// (veyron/profiles/internal/ipc/stream/message) it receives on a network connection
+// (net.Conn) to the network connection on which the destination process
+// (identified by the routing id) is listening.
+//
+// Processes behind a NAT can use the proxy to export their services outside
+// the NAT.
+// Sample usage:
+// var proxyEP naming.Endpoint // Endpoint of the proxy server
+// var manager stream.Manager // Manager used to create and listen for VCs and Flows.
+// ln, ep, err := manager.Listen(proxyEP.Network(), proxyEP.String())
+// // Now ln.Accept() will return Flows initiated by remote processes through the proxy.
+//
+// The proxy implemented in this package operates as follows:
+// - When an OpenVC message is received at the proxy, the RoutingID(R)
+// of the source endpoint is associated with the net.Conn the message
+// was received on.
+// - This association is used to route messages destined for R to the
+// corresponding net.Conn
+// - Servers can "listen" on the proxy's address by establishing a VC to the
+// proxy. Once the VC is established, messages received at the proxy destined
+// for the RoutingID of the server are forwarded to the net.Conn between the
+// server and the proxy.
+//
+// For example, consider the following three processes:
+// - Proxy(P) with routing id Rp
+// - A server (S) wishing to listen on the proxy's address with routing id Rs
+// - A client (C) wishing to connect to S through the proxy with routing id Rc.
+//
+// Here is a valid sequence of events that makes that possible:
+// (1) S establishes a VC with P over a net.Conn c1
+// As a result, P knows that any messages intended for Rs should be
+// forwarded on c1
+// (2) C connects to P over a net.Conn c2 and attempts to establish a VC with S
+// using an OpenVC message.
+// The source endpoint of this message contains the routing id Rc while the
+// destination endpoint contains the routing id Rs.
+// (3) The proxy sees this message and:
+// (a) Forwards the message over c1 (since Rs is mapped to c1)
+// (b) Updates its routing table so that messages intended for Rc are forwarded over c2
+// (4) Any messages from S intended for the client received on c1 are forwarded
+// by the proxy over c2.
+package proxy
diff --git a/profiles/proxy/protocol.vdl b/profiles/proxy/protocol.vdl
new file mode 100644
index 0000000..d4ac8cb
--- /dev/null
+++ b/profiles/proxy/protocol.vdl
@@ -0,0 +1,25 @@
+package proxy
+
+// The proxy protocol is:
+// (1) Server establishes a VC to the proxy to register its routing id and authenticate.
+// (2) The server opens a flow and sends a "Request" message and waits for a "Response"
+// message.
+// (3) This flow is then kept alive with no more data read/written.
+// Closure of this flow indicates that proxying has (or should be) stopped.
+// (4) The proxy immediately closes any other flows on the VC.
+
+// Request is the message sent by a server to request that the proxy route
+// traffic intended for the server's RoutingID to the network connection
+// between the server and the proxy.
+type Request struct {
+}
+
+// Response is sent by the proxy to the server after processing Request.
+type Response struct {
+ // Error is a description of why the proxy refused to proxy the server.
+ // A nil error indicates that the proxy will route traffic to the server.
+ Error error
+ // Endpoint is the string representation of an endpoint that can be
+ // used to communicate with the server through the proxy.
+ Endpoint string
+}
diff --git a/profiles/proxy/protocol.vdl.go b/profiles/proxy/protocol.vdl.go
new file mode 100644
index 0000000..3b9cebc
--- /dev/null
+++ b/profiles/proxy/protocol.vdl.go
@@ -0,0 +1,40 @@
+// This file was auto-generated by the veyron vdl tool.
+// Source: protocol.vdl
+
+package proxy
+
+import (
+ // VDL system imports
+ "v.io/v23/vdl"
+)
+
+// Request is the message sent by a server to request that the proxy route
+// traffic intended for the server's RoutingID to the network connection
+// between the server and the proxy.
+type Request struct {
+}
+
+func (Request) __VDLReflect(struct {
+ Name string "v.io/x/ref/profiles/proxy.Request"
+}) {
+}
+
+// Response is sent by the proxy to the server after processing Request.
+type Response struct {
+ // Error is a description of why the proxy refused to proxy the server.
+ // A nil error indicates that the proxy will route traffic to the server.
+ Error error
+ // Endpoint is the string representation of an endpoint that can be
+ // used to communicate with the server through the proxy.
+ Endpoint string
+}
+
+func (Response) __VDLReflect(struct {
+ Name string "v.io/x/ref/profiles/proxy.Response"
+}) {
+}
+
+func init() {
+ vdl.Register((*Request)(nil))
+ vdl.Register((*Response)(nil))
+}
diff --git a/profiles/proxy/proxy.go b/profiles/proxy/proxy.go
new file mode 100644
index 0000000..79078a6
--- /dev/null
+++ b/profiles/proxy/proxy.go
@@ -0,0 +1,710 @@
+package proxy
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+
+ "v.io/v23/ipc"
+ "v.io/v23/naming"
+ "v.io/v23/security"
+ "v.io/v23/verror"
+ "v.io/v23/vom"
+ "v.io/x/lib/vlog"
+
+ "v.io/x/ref/lib/upcqueue"
+ "v.io/x/ref/profiles/internal/ipc/stream/crypto"
+ "v.io/x/ref/profiles/internal/ipc/stream/id"
+ "v.io/x/ref/profiles/internal/ipc/stream/message"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/internal/ipc/stream/vif"
+ "v.io/x/ref/profiles/internal/ipc/version"
+ "v.io/x/ref/profiles/internal/lib/bqueue"
+ "v.io/x/ref/profiles/internal/lib/bqueue/drrqueue"
+ "v.io/x/ref/profiles/internal/lib/iobuf"
+
+ "v.io/x/ref/lib/stats"
+)
+
+const pkgPath = "v.io/x/ref/profiles/proxy"
+
+var (
+ errNoRoutingTableEntry = errors.New("routing table has no entry for the VC")
+ errProcessVanished = errors.New("remote process vanished")
+ errDuplicateOpenVC = errors.New("duplicate OpenVC request")
+
+ errNoDecoder = verror.Register(pkgPath+".errNoDecoder", verror.NoRetry, "{1:}{2:} proxy: failed to create Decoder{:_}")
+ errNoRequest = verror.Register(pkgPath+".errNoRequest", verror.NoRetry, "{1:}{2:} proxy: unable to read Request{:_}")
+)
+
+// Proxy routes virtual circuit (VC) traffic between multiple underlying
+// network connections.
+type Proxy struct {
+ ln net.Listener
+ rid naming.RoutingID
+ principal security.Principal
+ mu sync.RWMutex
+ servers *servermap
+ processes map[*process]struct{}
+ pubAddress string
+ statsName string
+}
+
+// process encapsulates the physical network connection and the routing table
+// associated with the process at the other end of the network connection.
+type process struct {
+ proxy *Proxy
+ conn net.Conn
+ pool *iobuf.Pool
+ reader *iobuf.Reader
+ isSetup bool
+ ctrlCipher crypto.ControlCipher
+ queue *upcqueue.T
+ mu sync.RWMutex
+ routingTable map[id.VC]*destination
+ nextVCI id.VC
+ servers map[id.VC]*vc.VC // servers wishing to be proxied create a VC that terminates at the proxy
+ bq bqueue.T // Flow control for messages sent on behalf of servers.
+}
+
+// destination is an entry in the routingtable of a process.
+type destination struct {
+ VCI id.VC
+ Process *process
+}
+
+// server encapsulates information stored about a server exporting itself via the proxy.
+type server struct {
+ Process *process
+ VC *vc.VC
+}
+
+func (s *server) RoutingID() naming.RoutingID { return s.VC.RemoteAddr().RoutingID() }
+
+func (s *server) Close(err error) {
+ if vc := s.Process.RemoveServerVC(s.VC.VCI()); vc != nil {
+ if err != nil {
+ vc.Close(err.Error())
+ } else {
+ vc.Close("server closed by proxy")
+ }
+ s.Process.SendCloseVC(s.VC.VCI(), err)
+ }
+}
+
+func (s *server) String() string {
+ return fmt.Sprintf("RoutingID %v on process %v (VCI:%v Blessings:%v)", s.RoutingID(), s.Process, s.VC.VCI(), s.VC.RemoteBlessings())
+}
+
+// servermap is a concurrent-access safe map from the RoutingID of a server exporting itself
+// through the proxy to the underlying network connection that the server is found on.
+type servermap struct {
+ mu sync.Mutex
+ m map[naming.RoutingID]*server
+}
+
+func (m *servermap) Add(server *server) error {
+ key := server.RoutingID()
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.m[key] != nil {
+ return fmt.Errorf("server with routing id %v is already being proxied", key)
+ }
+ m.m[key] = server
+ proxyLog().Infof("Started proxying server: %v", server)
+ return nil
+}
+
+func (m *servermap) Remove(server *server) {
+ key := server.RoutingID()
+ m.mu.Lock()
+ if m.m[key] != nil {
+ delete(m.m, key)
+ proxyLog().Infof("Stopped proxying server: %v", server)
+ }
+ m.mu.Unlock()
+}
+
+func (m *servermap) Process(rid naming.RoutingID) *process {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if s := m.m[rid]; s != nil {
+ return s.Process
+ }
+ return nil
+}
+
+func (m *servermap) List() []string {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ ret := make([]string, 0, len(m.m))
+ for _, s := range m.m {
+ ret = append(ret, s.String())
+ }
+ return ret
+}
+
+// New creates a new Proxy that listens for network connections on the provided
+// (network, address) pair and routes VC traffic between accepted connections.
+func New(rid naming.RoutingID, principal security.Principal, network, address, pubAddress string) (*Proxy, error) {
+ _, listenFn, _ := ipc.RegisteredProtocol(network)
+ if listenFn == nil {
+ return nil, fmt.Errorf("unknown network %s", network)
+ }
+ ln, err := listenFn(network, address)
+ if err != nil {
+ return nil, fmt.Errorf("net.Listen(%q, %q) failed: %v", network, address, err)
+ }
+ if len(pubAddress) == 0 {
+ pubAddress = ln.Addr().String()
+ }
+ proxy := &Proxy{
+ ln: ln,
+ rid: rid,
+ servers: &servermap{m: make(map[naming.RoutingID]*server)},
+ processes: make(map[*process]struct{}),
+ pubAddress: pubAddress,
+ principal: principal,
+ statsName: naming.Join("ipc", "proxy", "routing-id", rid.String(), "debug"),
+ }
+ stats.NewStringFunc(proxy.statsName, proxy.DebugString)
+
+ go proxy.listenLoop()
+ return proxy, nil
+}
+
+func (p *Proxy) listenLoop() {
+ proxyLog().Infof("Proxy listening on (%q, %q): %v", p.ln.Addr().Network(), p.ln.Addr(), p.Endpoint())
+ for {
+ conn, err := p.ln.Accept()
+ if err != nil {
+ proxyLog().Infof("Exiting listenLoop of proxy %q: %v", p.Endpoint(), err)
+ return
+ }
+ go p.acceptProcess(conn)
+ }
+}
+
+func (p *Proxy) acceptProcess(conn net.Conn) {
+ pool := iobuf.NewPool(0)
+ process := &process{
+ proxy: p,
+ conn: conn,
+ pool: pool,
+ reader: iobuf.NewReader(pool, conn),
+ ctrlCipher: &crypto.NullControlCipher{},
+ queue: upcqueue.New(),
+ routingTable: make(map[id.VC]*destination),
+ servers: make(map[id.VC]*vc.VC),
+ bq: drrqueue.New(vc.MaxPayloadSizeBytes),
+ }
+
+ p.mu.Lock()
+ p.processes[process] = struct{}{}
+ p.mu.Unlock()
+
+ go process.serverVCsLoop()
+ go process.writeLoop()
+ go process.readLoop()
+
+ processLog().Infof("Started process %v", process)
+}
+
+func (p *Proxy) removeProcess(process *process) {
+ p.mu.Lock()
+ delete(p.processes, process)
+ p.mu.Unlock()
+}
+
+func (p *Proxy) runServer(server *server, c <-chan vc.HandshakeResult) {
+ hr := <-c
+ if hr.Error != nil {
+ server.Close(hr.Error)
+ return
+ }
+ // See comments in protocol.vdl for the protocol between servers and the proxy.
+ conn, err := hr.Listener.Accept()
+ if err != nil {
+ server.Close(errors.New("failed to accept health check flow"))
+ return
+ }
+ defer server.Close(nil)
+ server.Process.InitVCI(server.VC.VCI())
+ var request Request
+ var response Response
+ dec, err := vom.NewDecoder(conn)
+ if err != nil {
+ response.Error = verror.New(errNoDecoder, nil, err)
+ } else if err := dec.Decode(&request); err != nil {
+ response.Error = verror.New(errNoRequest, nil, err)
+ } else if err := p.servers.Add(server); err != nil {
+ response.Error = verror.Convert(verror.ErrUnknown, nil, err)
+ } else {
+ defer p.servers.Remove(server)
+ ep, err := version.ProxiedEndpoint(server.VC.RemoteAddr().RoutingID(), p.Endpoint())
+ if err != nil {
+ response.Error = verror.Convert(verror.ErrInternal, nil, err)
+ }
+ if ep != nil {
+ response.Endpoint = ep.String()
+ }
+ }
+ enc, err := vom.NewEncoder(conn)
+ if err != nil {
+ proxyLog().Infof("Failed to create Encoder for server %v: %v", server, err)
+ server.Close(err)
+ return
+ }
+ if err := enc.Encode(response); err != nil {
+ proxyLog().Infof("Failed to encode response %#v for server %v", response, server)
+ server.Close(err)
+ return
+ }
+ // Reject all other flows
+ go func() {
+ for {
+ flow, err := hr.Listener.Accept()
+ if err != nil {
+ return
+ }
+ flow.Close()
+ }
+ }()
+ // Wait for this flow to be closed.
+ <-conn.Closed()
+}
+
+func (p *Proxy) routeCounters(process *process, counters message.Counters) {
+ // Since each VC can be routed to a different process, split up the
+ // Counters into one message per VC.
+ // Ideally, would split into one message per process (rather than per
+ // flow). This optimization is left an as excercise to the interested.
+ for cid, bytes := range counters {
+ srcVCI := cid.VCI()
+ if vc := process.ServerVC(srcVCI); vc != nil {
+ vc.ReleaseCounters(cid.Flow(), bytes)
+ continue
+ }
+ if d := process.Route(srcVCI); d != nil {
+ c := message.NewCounters()
+ c.Add(d.VCI, cid.Flow(), bytes)
+ if err := d.Process.queue.Put(&message.AddReceiveBuffers{Counters: c}); err != nil {
+ process.RemoveRoute(srcVCI)
+ process.SendCloseVC(srcVCI, fmt.Errorf("proxy failed to forward receive buffers: %v", err))
+ }
+ }
+ }
+}
+
+func startRoutingVC(srcVCI, dstVCI id.VC, srcProcess, dstProcess *process) {
+ dstProcess.AddRoute(dstVCI, &destination{VCI: srcVCI, Process: srcProcess})
+ srcProcess.AddRoute(srcVCI, &destination{VCI: dstVCI, Process: dstProcess})
+ vcLog().Infof("Routing (VCI %d @ [%s]) <-> (VCI %d @ [%s])", srcVCI, srcProcess, dstVCI, dstProcess)
+}
+
+// Endpoint returns the endpoint of the proxy service. By Dialing a VC to this
+// endpoint, processes can have their services exported through the proxy.
+func (p *Proxy) Endpoint() naming.Endpoint {
+ ep := version.Endpoint(p.ln.Addr().Network(), p.pubAddress, p.rid)
+ return ep
+}
+
+// Shutdown stops the proxy service, closing all network connections.
+func (p *Proxy) Shutdown() {
+ stats.Delete(p.statsName)
+ p.ln.Close()
+ p.mu.Lock()
+ processes := p.processes
+ p.processes = nil
+ p.mu.Unlock()
+ for process, _ := range processes {
+ process.Close()
+ }
+}
+
+func (p *process) serverVCsLoop() {
+ for {
+ w, bufs, err := p.bq.Get(nil)
+ if err != nil {
+ return
+ }
+ vci, fid := unpackIDs(w.ID())
+ if vc := p.ServerVC(vci); vc != nil {
+ queueDataMessages(bufs, vc, fid, p.queue)
+ if len(bufs) == 0 {
+ m := &message.Data{VCI: vci, Flow: fid}
+ m.SetClose()
+ p.queue.Put(m)
+ w.Shutdown(true)
+ }
+ continue
+ }
+ releaseBufs(0, bufs)
+ }
+}
+
+func releaseBufs(start int, bufs []*iobuf.Slice) {
+ for _, buf := range bufs[start:] {
+ buf.Release()
+ }
+}
+
+func queueDataMessages(bufs []*iobuf.Slice, vc *vc.VC, fid id.Flow, q *upcqueue.T) {
+ for ix, b := range bufs {
+ m := &message.Data{VCI: vc.VCI(), Flow: fid}
+ var err error
+ if m.Payload, err = vc.Encrypt(fid, b); err != nil {
+ msgLog().Infof("vc.Encrypt failed. VC:%v Flow:%v Error:%v", vc, fid, err)
+ releaseBufs(ix+1, bufs)
+ return
+ }
+ if err = q.Put(m); err != nil {
+ msgLog().Infof("Failed to enqueue data message %v: %v", m, err)
+ m.Release()
+ releaseBufs(ix+1, bufs)
+ return
+ }
+ }
+}
+
+func (p *process) writeLoop() {
+ defer processLog().Infof("Exited writeLoop for %v", p)
+ defer p.Close()
+
+ for {
+ item, err := p.queue.Get(nil)
+ if err != nil {
+ if err != upcqueue.ErrQueueIsClosed {
+ processLog().Infof("upcqueue.Get failed on %v: %v", p, err)
+ }
+ return
+ }
+ if err = message.WriteTo(p.conn, item.(message.T), p.ctrlCipher); err != nil {
+ processLog().Infof("message.WriteTo on %v failed: %v", p, err)
+ return
+ }
+ }
+}
+
+func (p *process) readLoop() {
+ defer processLog().Infof("Exited readLoop for %v", p)
+ defer p.Close()
+
+ for {
+ msg, err := message.ReadFrom(p.reader, p.ctrlCipher)
+ if err != nil {
+ processLog().Infof("Read on %v failed: %v", p, err)
+ return
+ }
+ msgLog().Infof("Received msg: %T = %v", msg, msg)
+ switch m := msg.(type) {
+ case *message.Data:
+ if vc := p.ServerVC(m.VCI); vc != nil {
+ if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
+ processLog().Infof("Ignoring data message %v from process %v: %v", m, p, err)
+ }
+ if m.Close() {
+ vc.ShutdownFlow(m.Flow)
+ }
+ break
+ }
+ srcVCI := m.VCI
+ if d := p.Route(srcVCI); d != nil {
+ m.VCI = d.VCI
+ if err := d.Process.queue.Put(m); err != nil {
+ m.Release()
+ p.RemoveRoute(srcVCI)
+ p.SendCloseVC(srcVCI, fmt.Errorf("proxy failed to forward data message: %v", err))
+ }
+ break
+ }
+ p.SendCloseVC(srcVCI, errNoRoutingTableEntry)
+ case *message.OpenFlow:
+ if vc := p.ServerVC(m.VCI); vc != nil {
+ if err := vc.AcceptFlow(m.Flow); err != nil {
+ processLog().Infof("OpenFlow %+v on process %v failed: %v", m, p, err)
+ cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
+ cm.SetClose()
+ p.queue.Put(cm)
+ }
+ vc.ReleaseCounters(m.Flow, m.InitialCounters)
+ break
+ }
+ srcVCI := m.VCI
+ if d := p.Route(srcVCI); d != nil {
+ m.VCI = d.VCI
+ if err := d.Process.queue.Put(m); err != nil {
+ p.RemoveRoute(srcVCI)
+ p.SendCloseVC(srcVCI, fmt.Errorf("proxy failed to forward open flow message: %v", err))
+ }
+ break
+ }
+ p.SendCloseVC(srcVCI, errNoRoutingTableEntry)
+ case *message.CloseVC:
+ if vc := p.RemoveServerVC(m.VCI); vc != nil {
+ vc.Close(m.Error)
+ break
+ }
+ srcVCI := m.VCI
+ if d := p.Route(srcVCI); d != nil {
+ m.VCI = d.VCI
+ d.Process.queue.Put(m)
+ d.Process.RemoveRoute(d.VCI)
+ }
+ p.RemoveRoute(srcVCI)
+ case *message.AddReceiveBuffers:
+ p.proxy.routeCounters(p, m.Counters)
+ case *message.SetupVC:
+ dstrid := m.RemoteEndpoint.RoutingID()
+ if naming.Compare(dstrid, p.proxy.rid) || naming.Compare(dstrid, naming.NullRoutingID) {
+ // VC that terminates at the proxy.
+ // TODO(ashankar,mattr): Implement this!
+ p.SendCloseVC(m.VCI, fmt.Errorf("proxy support for SetupVC not implemented yet"))
+ p.proxy.routeCounters(p, m.Counters)
+ break
+ }
+ dstprocess := p.proxy.servers.Process(dstrid)
+ if dstprocess == nil {
+ p.SendCloseVC(m.VCI, fmt.Errorf("no server with routing id %v is being proxied", dstrid))
+ p.proxy.routeCounters(p, m.Counters)
+ break
+ }
+ srcVCI := m.VCI
+ d := p.Route(srcVCI)
+ if d == nil {
+ // SetupVC involves two messages: One sent by
+ // the initiator and one by the acceptor. The
+ // routing table gets setup on the first
+ // message, so if there is no destination
+ // process - setup a routing table entry.
+ // If d != nil, then this SetupVC message is
+ // likely the one sent by the acceptor.
+ dstVCI := dstprocess.AllocVCI()
+ startRoutingVC(srcVCI, dstVCI, p, dstprocess)
+ if d = p.Route(srcVCI); d == nil {
+ p.SendCloseVC(srcVCI, fmt.Errorf("server with routing id %v vanished", dstrid))
+ p.proxy.routeCounters(p, m.Counters)
+ break
+ }
+ }
+ // Forward the SetupVC message.
+ // Typically, a SetupVC message is accompanied with
+ // Counters for the new VC. Keep that in the forwarded
+ // message and route the remaining counters separately.
+ counters := m.Counters
+ m.Counters = message.NewCounters()
+ dstVCI := d.VCI
+ for cid, bytes := range counters {
+ if cid.VCI() == srcVCI {
+ m.Counters.Add(dstVCI, cid.Flow(), bytes)
+ delete(counters, cid)
+ }
+ }
+ m.VCI = dstVCI
+ dstprocess.queue.Put(m)
+ p.proxy.routeCounters(p, counters)
+
+ case *message.OpenVC:
+ dstrid := m.DstEndpoint.RoutingID()
+ if naming.Compare(dstrid, p.proxy.rid) || naming.Compare(dstrid, naming.NullRoutingID) {
+ // VC that terminates at the proxy.
+ // See protocol.vdl for details on the protocol between the server and the proxy.
+ vcObj := p.NewServerVC(m)
+ // route counters after creating the VC so counters to vc are not lost.
+ p.proxy.routeCounters(p, m.Counters)
+ if vcObj != nil {
+ server := &server{Process: p, VC: vcObj}
+ go p.proxy.runServer(server, vcObj.HandshakeAcceptedVC(vc.LocalPrincipal{p.proxy.principal}))
+ }
+ break
+ }
+ dstprocess := p.proxy.servers.Process(dstrid)
+ if dstprocess == nil {
+ p.SendCloseVC(m.VCI, fmt.Errorf("no server with routing id %v is being proxied", dstrid))
+ p.proxy.routeCounters(p, m.Counters)
+ break
+ }
+ srcVCI := m.VCI
+ dstVCI := dstprocess.AllocVCI()
+ startRoutingVC(srcVCI, dstVCI, p, dstprocess)
+ // Forward the OpenVC message.
+ // Typically, an OpenVC message is accompanied with Counters for the new VC.
+ // Keep that in the forwarded message and route the remaining counters separately.
+ counters := m.Counters
+ m.Counters = message.NewCounters()
+ for cid, bytes := range counters {
+ if cid.VCI() == srcVCI {
+ m.Counters.Add(dstVCI, cid.Flow(), bytes)
+ delete(counters, cid)
+ }
+ }
+ m.VCI = dstVCI
+ dstprocess.queue.Put(m)
+ p.proxy.routeCounters(p, counters)
+ case *message.HopSetup:
+ // Set up the hop. This takes over the process during negotiation.
+ if p.isSetup {
+ // Already performed authentication. We don't do it again.
+ processLog().Infof("Process %v is already setup", p)
+ return
+ }
+ var blessings security.Blessings
+ if p.proxy.principal != nil {
+ blessings = p.proxy.principal.BlessingStore().Default()
+ }
+ c, err := vif.AuthenticateAsServer(p.conn, p.reader, nil, p.proxy.principal, blessings, nil, m)
+ if err != nil {
+ processLog().Infof("Process %v failed to authenticate: %s", p, err)
+ return
+ }
+ p.ctrlCipher = c
+ p.isSetup = true
+ default:
+ processLog().Infof("Closing %v because of unrecognized message %T", p, m)
+ return
+ }
+ }
+}
+
+func (p *process) String() string {
+ r := p.conn.RemoteAddr()
+ return fmt.Sprintf("(%s, %s)", r.Network(), r)
+}
+func (p *process) Route(vci id.VC) *destination {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.routingTable[vci]
+}
+func (p *process) AddRoute(vci id.VC, d *destination) {
+ p.mu.Lock()
+ p.routingTable[vci] = d
+ p.mu.Unlock()
+}
+func (p *process) InitVCI(vci id.VC) {
+ p.mu.Lock()
+ if p.nextVCI <= vci {
+ p.nextVCI = vci + 1
+ }
+ p.mu.Unlock()
+}
+func (p *process) AllocVCI() id.VC {
+ p.mu.Lock()
+ ret := p.nextVCI
+ p.nextVCI += 2
+ p.mu.Unlock()
+ return ret
+}
+func (p *process) RemoveRoute(vci id.VC) {
+ p.mu.Lock()
+ delete(p.routingTable, vci)
+ p.mu.Unlock()
+}
+func (p *process) SendCloseVC(vci id.VC, err error) {
+ var estr string
+ if err != nil {
+ estr = err.Error()
+ }
+ p.queue.Put(&message.CloseVC{VCI: vci, Error: estr})
+}
+
+func (p *process) Close() {
+ p.mu.Lock()
+ if p.routingTable == nil {
+ p.mu.Unlock()
+ return
+ }
+ rt := p.routingTable
+ p.routingTable = nil
+ for _, vc := range p.servers {
+ vc.Close("net.Conn is closing")
+ }
+ p.mu.Unlock()
+ for _, d := range rt {
+ d.Process.SendCloseVC(d.VCI, errProcessVanished)
+ }
+ p.bq.Close()
+ p.queue.Close()
+ p.conn.Close()
+
+ p.proxy.removeProcess(p)
+}
+
+func (p *process) ServerVC(vci id.VC) *vc.VC {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ return p.servers[vci]
+}
+
+func (p *process) NewServerVC(m *message.OpenVC) *vc.VC {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if vc := p.servers[m.VCI]; vc != nil {
+ vc.Close("duplicate OpenVC request")
+ return nil
+ }
+ version, err := version.CommonVersion(m.DstEndpoint, m.SrcEndpoint)
+ if err != nil {
+ p.SendCloseVC(m.VCI, fmt.Errorf("incompatible IPC protocol versions: %v", err))
+ return nil
+ }
+ vc := vc.InternalNew(vc.Params{
+ VCI: m.VCI,
+ LocalEP: m.DstEndpoint,
+ RemoteEP: m.SrcEndpoint,
+ Pool: p.pool,
+ ReserveBytes: message.HeaderSizeBytes,
+ Helper: p,
+ Version: version,
+ })
+ p.servers[m.VCI] = vc
+ proxyLog().Infof("Registered VC %v from server on process %v", vc, p)
+ return vc
+}
+
+func (p *process) RemoveServerVC(vci id.VC) *vc.VC {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if vc := p.servers[vci]; vc != nil {
+ delete(p.servers, vci)
+ proxyLog().Infof("Unregistered server VC %v from process %v", vc, p)
+ return vc
+ }
+ return nil
+}
+
+// Make process implement vc.Helper
+func (p *process) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+ msg := &message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)}
+ if err := p.queue.Put(msg); err != nil {
+ processLog().Infof("Failed to send OpenFlow(%+v) on process %v: %v", msg, p, err)
+ }
+}
+
+func (p *process) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+ if bytes == 0 {
+ return
+ }
+ msg := &message.AddReceiveBuffers{Counters: message.NewCounters()}
+ msg.Counters.Add(vci, fid, uint32(bytes))
+ if err := p.queue.Put(msg); err != nil {
+ processLog().Infof("Failed to send AddReceiveBuffers(%+v) on process %v: %v", msg, p, err)
+ }
+}
+
+func (p *process) NewWriter(vci id.VC, fid id.Flow) (bqueue.Writer, error) {
+ return p.bq.NewWriter(packIDs(vci, fid), 0, vc.DefaultBytesBufferedPerFlow)
+}
+
+// Convenience functions to assist with the logging convention.
+func proxyLog() vlog.InfoLog { return vlog.VI(1) }
+func processLog() vlog.InfoLog { return vlog.VI(2) }
+func vcLog() vlog.InfoLog { return vlog.VI(3) }
+func msgLog() vlog.InfoLog { return vlog.VI(4) }
+func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
+ return bqueue.ID(message.MakeCounterID(vci, fid))
+}
+func unpackIDs(b bqueue.ID) (id.VC, id.Flow) {
+ cid := message.CounterID(b)
+ return cid.VCI(), cid.Flow()
+}
diff --git a/profiles/proxy/proxy_test.go b/profiles/proxy/proxy_test.go
new file mode 100644
index 0000000..454fad3
--- /dev/null
+++ b/profiles/proxy/proxy_test.go
@@ -0,0 +1,276 @@
+package proxy_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+
+ "v.io/v23/naming"
+ "v.io/x/ref/profiles/internal/ipc/stream"
+
+ tsecurity "v.io/x/ref/lib/testutil/security"
+ _ "v.io/x/ref/profiles"
+ "v.io/x/ref/profiles/internal/ipc/stream/manager"
+ "v.io/x/ref/profiles/internal/ipc/stream/vc"
+ "v.io/x/ref/profiles/proxy"
+)
+
+//go:generate v23 test generate
+
+func TestProxy(t *testing.T) {
+ proxy, err := proxy.New(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), nil, "tcp", "127.0.0.1:0", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer proxy.Shutdown()
+
+ // Create the stream.Manager for the server.
+ server1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+ defer server1.Shutdown()
+ // Setup a stream.Listener that will accept VCs and Flows routed
+ // through the proxy.
+ ln1, ep1, err := server1.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln1.Close()
+
+ // Create the stream.Manager for a second server.
+ server2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+ defer server2.Shutdown()
+ // Setup a stream.Listener that will accept VCs and Flows routed
+ // through the proxy.
+ ln2, ep2, err := server2.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln2.Close()
+
+ // Create the stream.Manager for a client.
+ client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ defer client.Shutdown()
+
+ cases := []struct {
+ client stream.Manager
+ ln stream.Listener
+ ep naming.Endpoint
+ }{
+ {client, ln1, ep1}, // client writing to server1
+ {server1, ln2, ep2}, // server1 writing to server2
+ {server1, ln1, ep1}, // server1 writing to itself
+ }
+
+ const written = "the dough rises"
+ for i, c := range cases {
+ name := fmt.Sprintf("case #%d(write to %v):", i, c.ep)
+ // Accept a single flow and write out what is read to readChan
+ readChan := make(chan string)
+ go readFlow(t, c.ln, readChan)
+ if err := writeFlow(c.client, c.ep, written); err != nil {
+ t.Errorf("%s: %v", name, err)
+ continue
+ }
+ // Validate that the data read is the same as the data written.
+ if read := <-readChan; read != written {
+ t.Errorf("case #%d: Read %q, wrote %q", i, read, written)
+ }
+ }
+}
+
+func TestDuplicateRoutingID(t *testing.T) {
+ proxy, err := proxy.New(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), nil, "tcp", "127.0.0.1:0", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer proxy.Shutdown()
+
+ // Create the stream.Manager for server1 and server2, both with the same routing ID
+ serverRID := naming.FixedRoutingID(0x5555555555555555)
+ server1 := manager.InternalNew(serverRID)
+ server2 := manager.InternalNew(serverRID)
+ defer server1.Shutdown()
+ defer server2.Shutdown()
+
+ // First server to claim serverRID should win.
+ ln1, ep1, err := server1.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln1.Close()
+
+ ln2, ep2, err := server2.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String())
+ if pattern := "routing id 00000000000000005555555555555555 is already being proxied"; err == nil || !strings.Contains(err.Error(), pattern) {
+ t.Errorf("Got (%v, %v, %v) want error \"...%v\" (ep1:%v)", ln2, ep2, err, pattern, ep1)
+ }
+}
+
+func TestProxyAuthentication(t *testing.T) {
+ pproxy := tsecurity.NewPrincipal("proxy")
+ proxy, err := proxy.New(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), pproxy, "tcp", "127.0.0.1:0", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer proxy.Shutdown()
+
+ other := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ defer other.Shutdown()
+
+ vc, err := other.Dial(proxy.Endpoint())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := flow.RemoteBlessings(), pproxy.BlessingStore().Default(); !reflect.DeepEqual(got, want) {
+ t.Errorf("Proxy authenticated as [%v], want [%v]", got, want)
+ }
+}
+
+func TestServerBlessings(t *testing.T) {
+ proxy, err := proxy.New(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), nil, "tcp", "127.0.0.1:0", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+ defer server.Shutdown()
+ pserver := tsecurity.NewPrincipal("server")
+ ln, ep, err := server.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String(), vc.LocalPrincipal{pserver})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+ go func() {
+ for {
+ if _, err := ln.Accept(); err != nil {
+ return
+ }
+ }
+ }()
+
+ client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+ defer client.Shutdown()
+ vc, err := client.Dial(ep)
+ if err != nil {
+ t.Fatal(err)
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := flow.RemoteBlessings(), pserver.BlessingStore().Default(); !reflect.DeepEqual(got, want) {
+ t.Errorf("Got [%v] want [%v]", got, want)
+ }
+}
+
+func TestHostPort(t *testing.T) {
+ proxy, err := proxy.New(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), nil, "tcp", "127.0.0.1:0", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer proxy.Shutdown()
+ server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+ defer server.Shutdown()
+ addr := proxy.Endpoint().Addr().String()
+ port := addr[strings.LastIndex(addr, ":"):]
+ ln, _, err := server.Listen("veyron", "127.0.0.1"+port)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ln.Close()
+}
+
+func TestClientBecomesServer(t *testing.T) {
+ proxy, err := proxy.New(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), nil, "tcp", "127.0.0.1:0", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+ client1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+ client2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+ defer proxy.Shutdown()
+ defer server.Shutdown()
+ defer client1.Shutdown()
+ defer client2.Shutdown()
+
+ lnS, epS, err := server.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer lnS.Close()
+ rchan := make(chan string)
+
+ // client1 must connect to the proxy to speak to the server.
+ // Keep a VC and Flow open to the server, to ensure that the proxy
+ // maintains routing information (at some point, inactive VIFs
+ // should be garbage collected, so this ensures that the VIF
+ // is "active")
+ if vc, err := client1.Dial(epS); err != nil {
+ t.Fatal(err)
+ } else if flow, err := vc.Connect(); err != nil {
+ t.Fatal(err)
+ } else {
+ defer flow.Close()
+ }
+
+ // Now client1 becomes a server
+ lnC, epC, err := client1.Listen(proxy.Endpoint().Network(), proxy.Endpoint().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer lnC.Close()
+ // client2 should be able to talk to client1 through the proxy
+ rchan = make(chan string)
+ go readFlow(t, lnC, rchan)
+ if err := writeFlow(client2, epC, "daffy duck"); err != nil {
+ t.Fatal("client2 failed to chat with client1: %v", err)
+ }
+ if got, want := <-rchan, "daffy duck"; got != want {
+ t.Fatal("client2->client1 got %q want %q", got, want)
+ }
+}
+
+func writeFlow(mgr stream.Manager, ep naming.Endpoint, data string) error {
+ vc, err := mgr.Dial(ep)
+ if err != nil {
+ return fmt.Errorf("manager.Dial(%v) failed: %v", ep, err)
+ }
+ flow, err := vc.Connect()
+ if err != nil {
+ return fmt.Errorf("vc.Connect failed: %v", err)
+ }
+ defer flow.Close()
+ if _, err := flow.Write([]byte(data)); err != nil {
+ return fmt.Errorf("flow.Write failed: %v", err)
+ }
+ return nil
+}
+
+func readFlow(t *testing.T, ln stream.Listener, read chan<- string) {
+ defer close(read)
+ flow, err := ln.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ var tmp [1024]byte
+ var buf bytes.Buffer
+ for {
+ n, err := flow.Read(tmp[:])
+ if err == io.EOF {
+ read <- buf.String()
+ return
+ }
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ buf.Write(tmp[:n])
+ }
+}
diff --git a/profiles/proxy/v23_internal_test.go b/profiles/proxy/v23_internal_test.go
new file mode 100644
index 0000000..cea70bf
--- /dev/null
+++ b/profiles/proxy/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package proxy
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/lib/testutil"
+
+func TestMain(m *testing.M) {
+ testutil.Init()
+ os.Exit(m.Run())
+}
diff --git a/profiles/roaming/roaminginit.go b/profiles/roaming/roaminginit.go
index bc280fb..e982740 100644
--- a/profiles/roaming/roaminginit.go
+++ b/profiles/roaming/roaminginit.go
@@ -25,10 +25,10 @@
"v.io/x/ref/lib/netstate"
"v.io/x/ref/lib/websocket"
"v.io/x/ref/profiles/internal"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/tcp"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/ws"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/wsh"
- grt "v.io/x/ref/runtimes/google/rt"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/tcp"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/ws"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/wsh"
+ grt "v.io/x/ref/profiles/internal/rt"
"v.io/x/ref/services/mgmt/debug"
// TODO(cnicolaou,ashankar): move this into flags.
diff --git a/profiles/static/staticinit.go b/profiles/static/staticinit.go
index d28e3be..d330b28 100644
--- a/profiles/static/staticinit.go
+++ b/profiles/static/staticinit.go
@@ -13,10 +13,10 @@
"v.io/x/ref/lib/netstate"
"v.io/x/ref/lib/websocket"
"v.io/x/ref/profiles/internal"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/tcp"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/ws"
- _ "v.io/x/ref/runtimes/google/ipc/protocols/wsh"
- grt "v.io/x/ref/runtimes/google/rt"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/tcp"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/ws"
+ _ "v.io/x/ref/profiles/internal/ipc/protocols/wsh"
+ grt "v.io/x/ref/profiles/internal/rt"
"v.io/x/ref/services/mgmt/debug"
// TODO(cnicolaou,ashankar): move this into flags.