ref: Change "profiles" directory to "runtime"

As per vanadium/issues#470

MultiPart: 4/10

Change-Id: I3ac47c1d9c514f7bbe1c80507c2b3db7fcd9f6d4
diff --git a/runtime/internal/rpc/benchmark/README.md b/runtime/internal/rpc/benchmark/README.md
new file mode 100644
index 0000000..b44ce9f
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/README.md
@@ -0,0 +1,128 @@
+This directory contains code uses to measure the performance of the Vanadium RPC stack.
+
+---
+
+This benchmarks use Go's testing package to run benchmarks. Each benchmark involves
+one server and one client. The server has two very simple methods that echo the data
+received from the client back to the client.
+
+* `client ---- Echo(payload) ----> server`
+* `client <--- return payload ---- server`
+
+There are two versions of the Echo method:
+
+* `Echo(payload []byte) ([]byte], error)`
+* `EchoStream() <[]byte,[]byte> error`
+
+# Microbenchmarks
+## `benchmark_test.go`
+
+The first benchmarks use the non-streaming version of Echo with a varying
+payload size. The second benchmarks use the streaming version with varying
+number of chunks and payload sizes. The third one is for measuring the
+performance with multiple clients hosted in the same process.
+
+This test creates a VC before the benchmark begins. So, the VC creation
+overhead is excluded.
+
+```
+$ v23 go test -bench=. -timeout=1h -cpu=1 -benchtime=5s v.io/x/ref/runtime/internal/rpc/benchmark
+PASS
+Benchmark____1B     1000           8301357 ns/op           0.00 MB/s
+--- Histogram (unit: ms)
+        Count: 1000  Min: 7  Max: 17  Avg: 7.89
+        ------------------------------------------------------------
+        [  7,   8)   505   50.5%   50.5%  #####
+        [  8,   9)   389   38.9%   89.4%  ####
+        [  9,  10)    38    3.8%   93.2%
+        [ 10,  11)    12    1.2%   94.4%
+        [ 11,  12)     4    0.4%   94.8%
+        [ 12,  14)    19    1.9%   96.7%
+        [ 14,  16)    23    2.3%   99.0%
+        [ 16,  18)    10    1.0%  100.0%
+        [ 18,  21)     0    0.0%  100.0%
+        [ 21,  24)     0    0.0%  100.0%
+        [ 24, inf)     0    0.0%  100.0%
+Benchmark___10B     1000           8587341 ns/op           0.00 MB/s
+...
+```
+
+`RESULTS.txt` has the full benchmark results.
+
+## `main/main.go`
+
+main/main.go is a simple command-line tool to run the main benchmarks to measure
+RPC setup time, latency, and throughput.
+
+```
+$ v23 go run main/main.go
+RPC Connection  33.48 ms/rpc
+RPC (echo 1000B)  1.31 ms/rpc (763.05 qps)
+RPC Streaming (echo 1000B)  0.11 ms/rpc
+RPC Streaming Throughput (echo 1MB) 313.91 MB/s
+```
+
+# Client/Server
+## `{benchmark,benchmarkd}/main.go`
+
+benchmarkd/main.go and benchmark/main.go are simple command-line tools to run the
+benchmark server and client as separate processes. Unlike the benchmarks above,
+this test includes the startup cost of name resolution, creating the VC, etc. in
+the first RPC.
+
+```
+$ v23 go run benchmarkd/main.go \
+  -v23.tcp.address=localhost:8888 -v23.permissions.literal='{"Read": {"In": ["..."]}}'
+```
+
+(In a different shell)
+
+```
+$ v23 go run benchmark/main.go \
+  -server=/localhost:8888 -iterations=100 -chunk_count=0 -payload_size=10
+iterations: 100  chunk_count: 0  payload_size: 10
+elapsed time: 1.369034277s
+Histogram (unit: ms)
+Count: 100  Min: 7  Max: 94  Avg: 13.17
+------------------------------------------------------------
+[  7,   8)    1    1.0%    1.0%
+[  8,   9)    4    4.0%    5.0%
+[  9,  10)   17   17.0%   22.0%  ##
+[ 10,  12)   24   24.0%   46.0%  ##
+[ 12,  15)   24   24.0%   70.0%  ##
+[ 15,  19)   28   28.0%   98.0%  ###
+[ 19,  24)    1    1.0%   99.0%
+[ 24,  32)    0    0.0%   99.0%
+[ 32,  42)    0    0.0%   99.0%
+[ 42,  56)    0    0.0%   99.0%
+[ 56,  75)    0    0.0%   99.0%
+[ 75, 101)    1    1.0%  100.0%
+[101, 136)    0    0.0%  100.0%
+[136, 183)    0    0.0%  100.0%
+[183, 247)    0    0.0%  100.0%
+[247, 334)    0    0.0%  100.0%
+[334, inf)    0    0.0%  100.0%
+```
+
+# Raspberry Pi
+
+On a Raspberry Pi, everything is much slower. The same tests show the following
+results:
+
+```
+$ ./main
+RPC Connection  1765.47 ms/rpc
+RPC (echo 1000B)  78.61 ms/rpc (12.72 qps)
+RPC Streaming (echo 1000B)  23.85 ms/rpc
+RPC Streaming Throughput (echo 1MB) 0.92 MB/s
+```
+
+On a Raspberry Pi 2,
+
+```
+$ ./main
+RPC Connection  847.41 ms/rpc
+RPC (echo 1000B)  16.47 ms/rpc (60.71 qps)
+RPC Streaming (echo 1000B)  3.33 ms/rpc
+RPC Streaming Throughput (echo 1MB) 2.31 MB/s
+```
diff --git a/runtime/internal/rpc/benchmark/RESULTS.txt b/runtime/internal/rpc/benchmark/RESULTS.txt
new file mode 100644
index 0000000..6460076
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/RESULTS.txt
@@ -0,0 +1,864 @@
+* 'Benchmark___NNB' shows the average time to execute a simple Echo RPC with a payload
+  of NN bytes.
+* 'Benchmark___CC_chunk____NNB' shows the average time to execute a streaming RPC with
+  a payload of CC chunks of NN bytes.
+* 'Benchmark__per_chunk___NNB' shows the average time to send one chunk of NN bytes.
+* 'Benchmark___NNB_mux___CC_chunks___MMB' shows the average time to execute a simple
+  Echo RPC with a payload of NN bytes while streaming payloads of CC chunks of MM bytes
+  continuously in the same process.
+
+================================================================================
+Date: 04/16/2015
+Platform: Intel(R) Xeon(R) CPU E5-2689 0 @ 2.60GHz,  66114888KB Memory
+
+Benchmark____1B-2	   10000	    805392 ns/op	   0.00 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 693  Max: 2503  Avg: 804.62
+	------------------------------------------------------------
+	[ 693,  694)      2    0.0%    0.0%  
+	[ 694,  695)      1    0.0%    0.0%  
+	[ 695,  697)      2    0.0%    0.1%  
+	[ 697,  701)      2    0.0%    0.1%  
+	[ 701,  708)     25    0.2%    0.3%  
+	[ 708,  720)     91    0.9%    1.2%  
+	[ 720,  740)   1444   14.4%   15.7%  #
+	[ 740,  773)   3540   35.4%   51.1%  ####
+	[ 773,  827)   3820   38.2%   89.3%  ####
+	[ 827,  917)    534    5.3%   94.6%  #
+	[ 917, 1065)    135    1.4%   96.0%  
+	[1065, 1309)     43    0.4%   96.4%  
+	[1309, 1712)    307    3.1%   99.5%  
+	[1712, 2377)     53    0.5%  100.0%  
+	[2377, 3474)      1    0.0%  100.0%  
+	[3474, 5283)      0    0.0%  100.0%  
+	[5283,  inf)      0    0.0%  100.0%  
+Benchmark___10B-2	   10000	    808029 ns/op	   0.02 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 695  Max: 2678  Avg: 807.25
+	------------------------------------------------------------
+	[ 695,  696)      2    0.0%    0.0%  
+	[ 696,  697)      1    0.0%    0.0%  
+	[ 697,  699)      3    0.0%    0.1%  
+	[ 699,  703)      5    0.1%    0.1%  
+	[ 703,  710)     13    0.1%    0.2%  
+	[ 710,  722)    101    1.0%    1.2%  
+	[ 722,  742)    616    6.2%    7.4%  #
+	[ 742,  776)   4487   44.9%   52.3%  ####
+	[ 776,  833)   4001   40.0%   92.3%  ####
+	[ 833,  928)    397    4.0%   96.3%  
+	[ 928, 1085)    138    1.4%   97.6%  
+	[1085, 1346)      3    0.0%   97.7%  
+	[1346, 1780)     43    0.4%   98.1%  
+	[1780, 2500)    186    1.9%  100.0%  
+	[2500, 3695)      4    0.0%  100.0%  
+	[3695, 5677)      0    0.0%  100.0%  
+	[5677,  inf)      0    0.0%  100.0%  
+Benchmark__100B-2	   10000	    821234 ns/op	   0.24 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 703  Max: 3921  Avg: 820.44
+	------------------------------------------------------------
+	[ 703,  704)      1    0.0%    0.0%  
+	[ 704,  705)      1    0.0%    0.0%  
+	[ 705,  707)      0    0.0%    0.0%  
+	[ 707,  712)      3    0.0%    0.1%  
+	[ 712,  720)      7    0.1%    0.1%  
+	[ 720,  734)     46    0.5%    0.6%  
+	[ 734,  759)    995   10.0%   10.5%  #
+	[ 759,  802)   6544   65.4%   76.0%  #######
+	[ 802,  876)   1829   18.3%   94.3%  ##
+	[ 876, 1003)    391    3.9%   98.2%  
+	[1003, 1220)     15    0.1%   98.3%  
+	[1220, 1593)      2    0.0%   98.3%  
+	[1593, 2232)     16    0.2%   98.5%  
+	[2232, 3328)    148    1.5%  100.0%  
+	[3328, 5206)      2    0.0%  100.0%  
+	[5206, 8424)      0    0.0%  100.0%  
+	[8424,  inf)      0    0.0%  100.0%  
+Benchmark___1KB-2	   10000	    836132 ns/op	   2.39 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 735  Max: 4663  Avg: 835.34
+	------------------------------------------------------------
+	[ 735,  736)      1    0.0%    0.0%  
+	[ 736,  737)      0    0.0%    0.0%  
+	[ 737,  740)      5    0.1%    0.1%  
+	[ 740,  745)     20    0.2%    0.3%  
+	[ 745,  754)    155    1.6%    1.8%  
+	[ 754,  769)   1175   11.8%   13.6%  #
+	[ 769,  796)   3923   39.2%   52.8%  ####
+	[ 796,  843)   3499   35.0%   87.8%  ###
+	[ 843,  925)    762    7.6%   95.4%  #
+	[ 925, 1068)    304    3.0%   98.4%  
+	[1068, 1316)     14    0.1%   98.6%  
+	[1316, 1748)      0    0.0%   98.6%  
+	[1748, 2498)      0    0.0%   98.6%  
+	[2498, 3801)    140    1.4%  100.0%  
+	[3801, 6063)      2    0.0%  100.0%  
+	[6063, 9991)      0    0.0%  100.0%  
+	[9991,  inf)      0    0.0%  100.0%  
+Benchmark__10KB-2	   10000	    922035 ns/op	  21.69 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 773  Max: 18022  Avg: 921.25
+	------------------------------------------------------------
+	[  773,   774)      1    0.0%    0.0%  
+	[  774,   775)      1    0.0%    0.0%  
+	[  775,   778)      2    0.0%    0.0%  
+	[  778,   785)      4    0.0%    0.1%  
+	[  785,   798)     45    0.5%    0.5%  
+	[  798,   823)   1654   16.5%   17.1%  ##
+	[  823,   872)   6089   60.9%   78.0%  ######
+	[  872,   966)   1494   14.9%   92.9%  #
+	[  966,  1147)    486    4.9%   97.8%  
+	[ 1147,  1495)      4    0.0%   97.8%  
+	[ 1495,  2162)      0    0.0%   97.8%  
+	[ 2162,  3441)     18    0.2%   98.0%  
+	[ 3441,  5892)    201    2.0%  100.0%  
+	[ 5892, 10589)      0    0.0%  100.0%  
+	[10589, 19590)      1    0.0%  100.0%  
+	[19590, 36838)      0    0.0%  100.0%  
+	[36838,   inf)      0    0.0%  100.0%  
+Benchmark_100KB-2	    5000	   2039340 ns/op	  98.07 MB/s
+--- Histogram (unit: ms)
+	Count: 5000  Min: 1  Max: 10  Avg: 1.44
+	------------------------------------------------------------
+	[  1,   2)  4496   89.9%   89.9%  #########
+	[  2,   3)    66    1.3%   91.2%  
+	[  3,   4)     0    0.0%   91.2%  
+	[  4,   5)   119    2.4%   93.6%  
+	[  5,   6)   162    3.2%   96.9%  
+	[  6,   8)    26    0.5%   97.4%  
+	[  8,  10)   123    2.5%   99.8%  
+	[ 10,  12)     8    0.2%  100.0%  
+	[ 12,  15)     0    0.0%  100.0%  
+	[ 15, inf)     0    0.0%  100.0%  
+
+Benchmark____1_chunk_____1B-2	   10000	   1008412 ns/op	   0.00 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 871  Max: 2447  Avg: 1007.61
+	------------------------------------------------------------
+	[ 871,  872)      1    0.0%    0.0%  
+	[ 872,  873)      0    0.0%    0.0%  
+	[ 873,  875)      0    0.0%    0.0%  
+	[ 875,  879)      1    0.0%    0.0%  
+	[ 879,  886)      1    0.0%    0.0%  
+	[ 886,  897)     13    0.1%    0.2%  
+	[ 897,  916)    130    1.3%    1.5%  
+	[ 916,  947)   3640   36.4%   37.9%  ####
+	[ 947,  997)   4612   46.1%   84.0%  #####
+	[ 997, 1079)    739    7.4%   91.4%  #
+	[1079, 1214)    304    3.0%   94.4%  
+	[1214, 1435)     37    0.4%   94.8%  
+	[1435, 1796)    291    2.9%   97.7%  
+	[1796, 2386)    230    2.3%  100.0%  
+	[2386, 3350)      1    0.0%  100.0%  
+	[3350, 4925)      0    0.0%  100.0%  
+	[4925,  inf)      0    0.0%  100.0%  
+Benchmark____1_chunk____10B-2	   10000	   1132272 ns/op	   0.02 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 907  Max: 4069  Avg: 1131.39
+	------------------------------------------------------------
+	[ 907,  908)      1    0.0%    0.0%  
+	[ 908,  909)      0    0.0%    0.0%  
+	[ 909,  911)      1    0.0%    0.0%  
+	[ 911,  916)      5    0.1%    0.1%  
+	[ 916,  924)     69    0.7%    0.8%  
+	[ 924,  938)    516    5.2%    5.9%  #
+	[ 938,  963)   1376   13.8%   19.7%  #
+	[ 963, 1005)   1147   11.5%   31.2%  #
+	[1005, 1078)    791    7.9%   39.1%  #
+	[1078, 1203)   4877   48.8%   87.8%  #####
+	[1203, 1418)    884    8.8%   96.7%  #
+	[1418, 1786)     19    0.2%   96.9%  
+	[1786, 2416)    122    1.2%   98.1%  
+	[2416, 3495)    172    1.7%   99.8%  
+	[3495, 5342)     20    0.2%  100.0%  
+	[5342, 8503)      0    0.0%  100.0%  
+	[8503,  inf)      0    0.0%  100.0%  
+Benchmark____1_chunk___100B-2	   10000	   1183806 ns/op	   0.17 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 909  Max: 6281  Avg: 1182.87
+	------------------------------------------------------------
+	[  909,   910)      1    0.0%    0.0%  
+	[  910,   911)      0    0.0%    0.0%  
+	[  911,   914)      0    0.0%    0.0%  
+	[  914,   919)      2    0.0%    0.0%  
+	[  919,   928)     30    0.3%    0.3%  
+	[  928,   945)    177    1.8%    2.1%  
+	[  945,   976)    422    4.2%    6.3%  
+	[  976,  1031)    390    3.9%   10.2%  
+	[ 1031,  1128)   3324   33.2%   43.5%  ###
+	[ 1128,  1301)   5231   52.3%   95.8%  #####
+	[ 1301,  1607)    198    2.0%   97.8%  
+	[ 1607,  2150)      0    0.0%   97.8%  
+	[ 2150,  3114)     96    1.0%   98.7%  
+	[ 3114,  4823)    111    1.1%   99.8%  
+	[ 4823,  7853)     18    0.2%  100.0%  
+	[ 7853, 13224)      0    0.0%  100.0%  
+	[13224,   inf)      0    0.0%  100.0%  
+Benchmark____1_chunk____1KB-2	   10000	   1176691 ns/op	   1.70 MB/s
+--- Histogram (unit: µs)
+	Count: 10000  Min: 901  Max: 8877  Avg: 1175.76
+	------------------------------------------------------------
+	[  901,   902)      1    0.0%    0.0%  
+	[  902,   903)      0    0.0%    0.0%  
+	[  903,   906)      0    0.0%    0.0%  
+	[  906,   912)      0    0.0%    0.0%  
+	[  912,   922)      0    0.0%    0.0%  
+	[  922,   941)     59    0.6%    0.6%  
+	[  941,   977)    562    5.6%    6.2%  #
+	[  977,  1043)    795    8.0%   14.2%  #
+	[ 1043,  1163)   5802   58.0%   72.2%  ######
+	[ 1163,  1382)   2543   25.4%   97.6%  ###
+	[ 1382,  1781)     54    0.5%   98.2%  
+	[ 1781,  2507)      0    0.0%   98.2%  
+	[ 2507,  3829)    123    1.2%   99.4%  
+	[ 3829,  6236)     56    0.6%  100.0%  
+	[ 6236, 10617)      5    0.1%  100.0%  
+	[10617, 18592)      0    0.0%  100.0%  
+	[18592,   inf)      0    0.0%  100.0%  
+Benchmark____1_chunk___10KB-2	    5000	   1209775 ns/op	  16.53 MB/s
+--- Histogram (unit: µs)
+	Count: 5000  Min: 931  Max: 6999  Avg: 1208.87
+	------------------------------------------------------------
+	[  931,   932)     1    0.0%    0.0%  
+	[  932,   933)     0    0.0%    0.0%  
+	[  933,   936)     1    0.0%    0.0%  
+	[  936,   941)     3    0.1%    0.1%  
+	[  941,   951)    23    0.5%    0.6%  
+	[  951,   969)   114    2.3%    2.8%  
+	[  969,  1001)   302    6.0%    8.9%  #
+	[ 1001,  1059)   872   17.4%   26.3%  ##
+	[ 1059,  1163)  2295   45.9%   72.2%  #####
+	[ 1163,  1349)  1104   22.1%   94.3%  ##
+	[ 1349,  1681)   158    3.2%   97.5%  
+	[ 1681,  2275)     0    0.0%   97.5%  
+	[ 2275,  3337)     0    0.0%   97.5%  
+	[ 3337,  5236)    79    1.6%   99.0%  
+	[ 5236,  8631)    48    1.0%  100.0%  
+	[ 8631, 14699)     0    0.0%  100.0%  
+	[14699,   inf)     0    0.0%  100.0%  
+Benchmark____1_chunk__100KB-2	    3000	   2075895 ns/op	  96.34 MB/s
+--- Histogram (unit: ms)
+	Count: 3000  Min: 1  Max: 7  Avg: 1.40
+	------------------------------------------------------------
+	[  1,   2)  2569   85.6%   85.6%  #########
+	[  2,   3)   155    5.2%   90.8%  #
+	[  3,   4)     0    0.0%   90.8%  
+	[  4,   5)   141    4.7%   95.5%  
+	[  5,   6)    74    2.5%   98.0%  
+	[  6,   7)    38    1.3%   99.2%  
+	[  7, inf)    23    0.8%  100.0%  
+Benchmark___10_chunk_____1B-2	    3000	   2076926 ns/op	   0.01 MB/s
+--- Histogram (unit: ms)
+	Count: 3000  Min: 1  Max: 5  Avg: 1.26
+	------------------------------------------------------------
+	[  1,   2)  2489   83.0%   83.0%  ########
+	[  2,   3)   416   13.9%   96.8%  #
+	[  3,   4)     0    0.0%   96.8%  
+	[  4,   5)     6    0.2%   97.0%  
+	[  5, inf)    89    3.0%  100.0%  
+Benchmark___10_chunk____10B-2	    3000	   2229984 ns/op	   0.09 MB/s
+--- Histogram (unit: ms)
+	Count: 3000  Min: 1  Max: 10  Avg: 1.75
+	------------------------------------------------------------
+	[  1,   2)  1141   38.0%   38.0%  ####
+	[  2,   3)  1758   58.6%   96.6%  ######
+	[  3,   4)    15    0.5%   97.1%  
+	[  4,   5)     0    0.0%   97.1%  
+	[  5,   6)    33    1.1%   98.2%  
+	[  6,   8)    30    1.0%   99.2%  
+	[  8,  10)    14    0.5%   99.7%  
+	[ 10,  12)     9    0.3%  100.0%  
+	[ 12,  15)     0    0.0%  100.0%  
+	[ 15, inf)     0    0.0%  100.0%  
+Benchmark___10_chunk___100B-2	    3000	   2348741 ns/op	   0.85 MB/s
+--- Histogram (unit: ms)
+	Count: 3000  Min: 1  Max: 12  Avg: 2.07
+	------------------------------------------------------------
+	[  1,   2)   278    9.3%    9.3%  #
+	[  2,   3)  2613   87.1%   96.4%  #########
+	[  3,   4)    29    1.0%   97.3%  
+	[  4,   5)     0    0.0%   97.3%  
+	[  5,   6)     0    0.0%   97.3%  
+	[  6,   8)    44    1.5%   98.8%  
+	[  8,  10)     9    0.3%   99.1%  
+	[ 10,  13)    27    0.9%  100.0%  
+	[ 13,  16)     0    0.0%  100.0%  
+	[ 16,  20)     0    0.0%  100.0%  
+	[ 20,  24)     0    0.0%  100.0%  
+	[ 24, inf)     0    0.0%  100.0%  
+Benchmark___10_chunk____1KB-2	    3000	   2426600 ns/op	   8.24 MB/s
+--- Histogram (unit: ms)
+	Count: 3000  Min: 1  Max: 11  Avg: 2.15
+	------------------------------------------------------------
+	[  1,   2)   119    4.0%    4.0%  
+	[  2,   3)  2730   91.0%   95.0%  #########
+	[  3,   4)    59    2.0%   96.9%  
+	[  4,   5)     0    0.0%   96.9%  
+	[  5,   6)     1    0.0%   97.0%  
+	[  6,   8)    55    1.8%   98.8%  
+	[  8,  10)    10    0.3%   99.1%  
+	[ 10,  12)    26    0.9%  100.0%  
+	[ 12,  15)     0    0.0%  100.0%  
+	[ 15,  18)     0    0.0%  100.0%  
+	[ 18, inf)     0    0.0%  100.0%  
+Benchmark___10_chunk___10KB-2	    2000	   3151285 ns/op	  63.47 MB/s
+--- Histogram (unit: ms)
+	Count: 2000  Min: 2  Max: 12  Avg: 2.51
+	------------------------------------------------------------
+	[  2,   3)  1760   88.0%   88.0%  #########
+	[  3,   4)    89    4.5%   92.5%  
+	[  4,   5)    29    1.5%   93.9%  
+	[  5,   6)     0    0.0%   93.9%  
+	[  6,   7)     6    0.3%   94.2%  
+	[  7,   9)    45    2.2%   96.5%  
+	[  9,  11)    29    1.5%   97.9%  
+	[ 11,  13)    42    2.1%  100.0%  
+	[ 13,  16)     0    0.0%  100.0%  
+	[ 16,  19)     0    0.0%  100.0%  
+	[ 19, inf)     0    0.0%  100.0%  
+Benchmark___10_chunk__100KB-2	    1000	   9469459 ns/op	 211.21 MB/s
+--- Histogram (unit: ms)
+	Count: 1000  Min: 6  Max: 14  Avg: 8.96
+	------------------------------------------------------------
+	[  6,   7)     1    0.1%    0.1%  
+	[  7,   8)   626   62.6%   62.7%  ######
+	[  8,   9)    52    5.2%   67.9%  #
+	[  9,  10)    13    1.3%   69.2%  
+	[ 10,  11)     2    0.2%   69.4%  
+	[ 11,  13)    58    5.8%   75.2%  #
+	[ 13,  15)   248   24.8%  100.0%  ##
+	[ 15,  17)     0    0.0%  100.0%  
+	[ 17, inf)     0    0.0%  100.0%  
+
+Benchmark__100_chunk_____1B-2	     500	  12427739 ns/op	   0.02 MB/s
+--- Histogram (unit: ms)
+	Count: 500  Min: 11  Max: 14  Avg: 11.96
+	------------------------------------------------------------
+	[ 11,  12)  124   24.8%   24.8%  ##
+	[ 12,  13)  279   55.8%   80.6%  ######
+	[ 13,  14)   88   17.6%   98.2%  ##
+	[ 14, inf)    9    1.8%  100.0%  
+Benchmark__100_chunk____10B-2	     500	  12206256 ns/op	   0.16 MB/s
+--- Histogram (unit: ms)
+	Count: 500  Min: 11  Max: 14  Avg: 11.73
+	------------------------------------------------------------
+	[ 11,  12)  189   37.8%   37.8%  ####
+	[ 12,  13)  261   52.2%   90.0%  #####
+	[ 13,  14)   48    9.6%   99.6%  #
+	[ 14, inf)    2    0.4%  100.0%  
+Benchmark__100_chunk___100B-2	     500	  12241029 ns/op	   1.63 MB/s
+--- Histogram (unit: ms)
+	Count: 500  Min: 11  Max: 14  Avg: 11.75
+	------------------------------------------------------------
+	[ 11,  12)  184   36.8%   36.8%  ####
+	[ 12,  13)  265   53.0%   89.8%  #####
+	[ 13,  14)   45    9.0%   98.8%  #
+	[ 14, inf)    6    1.2%  100.0%  
+Benchmark__100_chunk____1KB-2	     500	  13043623 ns/op	  15.33 MB/s
+--- Histogram (unit: ms)
+	Count: 500  Min: 11  Max: 33  Avg: 12.53
+	------------------------------------------------------------
+	[ 11,  12)    3    0.6%    0.6%  
+	[ 12,  13)  284   56.8%   57.4%  ######
+	[ 13,  14)  176   35.2%   92.6%  ####
+	[ 14,  15)   35    7.0%   99.6%  #
+	[ 15,  17)    1    0.2%   99.8%  
+	[ 17,  19)    0    0.0%   99.8%  
+	[ 19,  22)    0    0.0%   99.8%  
+	[ 22,  26)    0    0.0%   99.8%  
+	[ 26,  31)    0    0.0%   99.8%  
+	[ 31,  37)    1    0.2%  100.0%  
+	[ 37,  44)    0    0.0%  100.0%  
+	[ 44,  53)    0    0.0%  100.0%  
+	[ 53,  64)    0    0.0%  100.0%  
+	[ 64,  78)    0    0.0%  100.0%  
+	[ 78,  95)    0    0.0%  100.0%  
+	[ 95, 116)    0    0.0%  100.0%  
+	[116, inf)    0    0.0%  100.0%  
+Benchmark__100_chunk___10KB-2	     500	  18162047 ns/op	 110.12 MB/s
+--- Histogram (unit: ms)
+	Count: 500  Min: 16  Max: 21  Avg: 17.64
+	------------------------------------------------------------
+	[ 16,  17)   79   15.8%   15.8%  ##
+	[ 17,  18)  153   30.6%   46.4%  ###
+	[ 18,  19)  165   33.0%   79.4%  ###
+	[ 19,  20)   78   15.6%   95.0%  ##
+	[ 20,  21)   24    4.8%   99.8%  
+	[ 21, inf)    1    0.2%  100.0%  
+Benchmark__100_chunk__100KB-2	     100	  68538838 ns/op	 291.81 MB/s
+--- Histogram (unit: ms)
+	Count: 100  Min: 65  Max: 71  Avg: 68.06
+	------------------------------------------------------------
+	[ 65,  66)    6    6.0%    6.0%  #
+	[ 66,  67)    7    7.0%   13.0%  #
+	[ 67,  68)   15   15.0%   28.0%  ##
+	[ 68,  69)   35   35.0%   63.0%  ####
+	[ 69,  70)   26   26.0%   89.0%  ###
+	[ 70,  71)    6    6.0%   95.0%  #
+	[ 71, inf)    5    5.0%  100.0%  #
+Benchmark___1K_chunk_____1B-2	     100	 108491530 ns/op	   0.02 MB/s
+--- Histogram (unit: ms)
+	Count: 100  Min: 98  Max: 122  Avg: 107.97
+	------------------------------------------------------------
+	[ 98,  99)    1    1.0%    1.0%  
+	[ 99, 100)    1    1.0%    2.0%  
+	[100, 101)    2    2.0%    4.0%  
+	[101, 102)    4    4.0%    8.0%  
+	[102, 104)    6    6.0%   14.0%  #
+	[104, 106)   12   12.0%   26.0%  #
+	[106, 109)   28   28.0%   54.0%  ###
+	[109, 113)   32   32.0%   86.0%  ###
+	[113, 118)   13   13.0%   99.0%  #
+	[118, 124)    1    1.0%  100.0%  
+	[124, 132)    0    0.0%  100.0%  
+	[132, 142)    0    0.0%  100.0%  
+	[142, 154)    0    0.0%  100.0%  
+	[154, 169)    0    0.0%  100.0%  
+	[169, 188)    0    0.0%  100.0%  
+	[188, 212)    0    0.0%  100.0%  
+	[212, inf)    0    0.0%  100.0%  
+Benchmark___1K_chunk____10B-2	     100	 107576447 ns/op	   0.19 MB/s
+--- Histogram (unit: ms)
+	Count: 100  Min: 97  Max: 119  Avg: 107.04
+	------------------------------------------------------------
+	[ 97,  98)    1    1.0%    1.0%  
+	[ 98,  99)    1    1.0%    2.0%  
+	[ 99, 100)    4    4.0%    6.0%  
+	[100, 101)    1    1.0%    7.0%  
+	[101, 103)    6    6.0%   13.0%  #
+	[103, 105)   15   15.0%   28.0%  ##
+	[105, 108)   28   28.0%   56.0%  ###
+	[108, 112)   29   29.0%   85.0%  ###
+	[112, 117)   13   13.0%   98.0%  #
+	[117, 123)    2    2.0%  100.0%  
+	[123, 130)    0    0.0%  100.0%  
+	[130, 139)    0    0.0%  100.0%  
+	[139, 150)    0    0.0%  100.0%  
+	[150, 164)    0    0.0%  100.0%  
+	[164, 181)    0    0.0%  100.0%  
+	[181, 202)    0    0.0%  100.0%  
+	[202, inf)    0    0.0%  100.0%  
+Benchmark___1K_chunk___100B-2	     100	 108458019 ns/op	   1.84 MB/s
+--- Histogram (unit: ms)
+	Count: 100  Min: 98  Max: 117  Avg: 107.93
+	------------------------------------------------------------
+	[ 98,  99)    1    1.0%    1.0%  
+	[ 99, 100)    1    1.0%    2.0%  
+	[100, 101)    2    2.0%    4.0%  
+	[101, 102)    3    3.0%    7.0%  
+	[102, 104)    9    9.0%   16.0%  #
+	[104, 106)    8    8.0%   24.0%  #
+	[106, 109)   28   28.0%   52.0%  ###
+	[109, 112)   34   34.0%   86.0%  ###
+	[112, 116)   12   12.0%   98.0%  #
+	[116, 121)    2    2.0%  100.0%  
+	[121, 128)    0    0.0%  100.0%  
+	[128, 136)    0    0.0%  100.0%  
+	[136, 146)    0    0.0%  100.0%  
+	[146, 158)    0    0.0%  100.0%  
+	[158, 173)    0    0.0%  100.0%  
+	[173, 191)    0    0.0%  100.0%  
+	[191, inf)    0    0.0%  100.0%  
+Benchmark___1K_chunk____1KB-2	     100	 118334262 ns/op	  16.90 MB/s
+--- Histogram (unit: ms)
+	Count: 100  Min: 105  Max: 129  Avg: 117.82
+	------------------------------------------------------------
+	[105, 106)    2    2.0%    2.0%  
+	[106, 107)    3    3.0%    5.0%  
+	[107, 108)    3    3.0%    8.0%  
+	[108, 109)    3    3.0%   11.0%  
+	[109, 111)    3    3.0%   14.0%  
+	[111, 113)    2    2.0%   16.0%  
+	[113, 116)   10   10.0%   26.0%  #
+	[116, 120)   25   25.0%   51.0%  ###
+	[120, 125)   43   43.0%   94.0%  ####
+	[125, 131)    6    6.0%  100.0%  #
+	[131, 139)    0    0.0%  100.0%  
+	[139, 149)    0    0.0%  100.0%  
+	[149, 161)    0    0.0%  100.0%  
+	[161, 176)    0    0.0%  100.0%  
+	[176, 195)    0    0.0%  100.0%  
+	[195, 219)    0    0.0%  100.0%  
+	[219, inf)    0    0.0%  100.0%  
+Benchmark___1K_chunk___10KB-2	      50	 150361259 ns/op	 133.01 MB/s
+--- Histogram (unit: ms)
+	Count: 50  Min: 144  Max: 156  Avg: 149.92
+	------------------------------------------------------------
+	[144, 145)   3    6.0%    6.0%  #
+	[145, 146)   3    6.0%   12.0%  #
+	[146, 147)   4    8.0%   20.0%  #
+	[147, 148)   4    8.0%   28.0%  #
+	[148, 149)   4    8.0%   36.0%  #
+	[149, 151)   8   16.0%   52.0%  ##
+	[151, 153)  11   22.0%   74.0%  ##
+	[153, 156)  12   24.0%   98.0%  ##
+	[156, 159)   1    2.0%  100.0%  
+	[159, 163)   0    0.0%  100.0%  
+	[163, 168)   0    0.0%  100.0%  
+	[168, 174)   0    0.0%  100.0%  
+	[174, inf)   0    0.0%  100.0%  
+Benchmark___1K_chunk__100KB-2	      10	 691013740 ns/op	 289.43 MB/s
+--- Histogram (unit: ms)
+	Count: 10  Min: 683  Max: 699  Avg: 690.40
+	------------------------------------------------------------
+	[683, 684)   1   10.0%   10.0%  #
+	[684, 685)   0    0.0%   10.0%  
+	[685, 686)   1   10.0%   20.0%  #
+	[686, 687)   0    0.0%   20.0%  
+	[687, 689)   2   20.0%   40.0%  ##
+	[689, 691)   1   10.0%   50.0%  #
+	[691, 694)   3   30.0%   80.0%  ###
+	[694, 697)   1   10.0%   90.0%  #
+	[697, 701)   1   10.0%  100.0%  #
+	[701, 706)   0    0.0%  100.0%  
+	[706, 712)   0    0.0%  100.0%  
+	[712, 719)   0    0.0%  100.0%  
+	[719, 728)   0    0.0%  100.0%  
+	[728, 739)   0    0.0%  100.0%  
+	[739, 752)   0    0.0%  100.0%  
+	[752, 768)   0    0.0%  100.0%  
+	[768, inf)   0    0.0%  100.0%  
+
+Benchmark__per_chunk____1B-2	  100000	    113488 ns/op	   0.02 MB/s
+Benchmark__per_chunk___10B-2	  100000	    110432 ns/op	   0.18 MB/s
+Benchmark__per_chunk__100B-2	  100000	    109446 ns/op	   1.83 MB/s
+Benchmark__per_chunk___1KB-2	  100000	    118905 ns/op	  16.82 MB/s
+Benchmark__per_chunk__10KB-2	   50000	    165005 ns/op	 121.21 MB/s
+Benchmark__per_chunk_100KB-2	   10000	    672988 ns/op	 297.18 MB/s
+
+Benchmark___10B_mux__100_chunks___10B-2	    3000	   2096423 ns/op	   0.01 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 853  Max: 7497  Avg: 2095.56
+	------------------------------------------------------------
+	[  853,   854)     1    0.0%    0.0%  
+	[  854,   855)     0    0.0%    0.0%  
+	[  855,   858)     0    0.0%    0.0%  
+	[  858,   863)     0    0.0%    0.0%  
+	[  863,   873)     0    0.0%    0.0%  
+	[  873,   891)     0    0.0%    0.0%  
+	[  891,   924)     3    0.1%    0.1%  
+	[  924,   984)    24    0.8%    0.9%  
+	[  984,  1093)   119    4.0%    4.9%  
+	[ 1093,  1289)   580   19.3%   24.2%  ##
+	[ 1289,  1642)  1010   33.7%   57.9%  ###
+	[ 1642,  2277)   488   16.3%   74.2%  ##
+	[ 2277,  3419)   271    9.0%   83.2%  #
+	[ 3419,  5473)   423   14.1%   97.3%  #
+	[ 5473,  9167)    81    2.7%  100.0%  
+	[ 9167, 15811)     0    0.0%  100.0%  
+	[15811,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux__100_chunks__100B-2	    3000	   2238813 ns/op	   0.01 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 808  Max: 9123  Avg: 2237.91
+	------------------------------------------------------------
+	[  808,   809)     1    0.0%    0.0%  
+	[  809,   810)     0    0.0%    0.0%  
+	[  810,   813)     0    0.0%    0.0%  
+	[  813,   819)     0    0.0%    0.0%  
+	[  819,   830)     0    0.0%    0.0%  
+	[  830,   850)     0    0.0%    0.0%  
+	[  850,   886)     5    0.2%    0.2%  
+	[  886,   953)     9    0.3%    0.5%  
+	[  953,  1076)    67    2.2%    2.7%  
+	[ 1076,  1300)   493   16.4%   19.2%  ##
+	[ 1300,  1710)  1176   39.2%   58.4%  ####
+	[ 1710,  2459)   507   16.9%   75.3%  ##
+	[ 2459,  3826)   206    6.9%   82.1%  #
+	[ 3826,  6321)   467   15.6%   97.7%  ##
+	[ 6321, 10876)    69    2.3%  100.0%  
+	[10876, 19190)     0    0.0%  100.0%  
+	[19190,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux__100_chunks___1KB-2	    3000	   2578794 ns/op	   0.01 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 856  Max: 10981  Avg: 2577.84
+	------------------------------------------------------------
+	[  856,   857)     1    0.0%    0.0%  
+	[  857,   858)     0    0.0%    0.0%  
+	[  858,   861)     0    0.0%    0.0%  
+	[  861,   867)     0    0.0%    0.0%  
+	[  867,   878)     0    0.0%    0.0%  
+	[  878,   899)     1    0.0%    0.1%  
+	[  899,   939)     1    0.0%    0.1%  
+	[  939,  1012)    13    0.4%    0.5%  
+	[ 1012,  1148)    69    2.3%    2.8%  
+	[ 1148,  1401)   392   13.1%   15.9%  #
+	[ 1401,  1869)  1141   38.0%   53.9%  ####
+	[ 1869,  2734)   603   20.1%   74.0%  ##
+	[ 2734,  4334)   228    7.6%   81.6%  #
+	[ 4334,  7294)   466   15.5%   97.2%  ##
+	[ 7294, 12768)    85    2.8%  100.0%  
+	[12768, 22893)     0    0.0%  100.0%  
+	[22893,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux__100_chunks__10KB-2	    2000	   5713209 ns/op	   0.00 MB/s
+--- Histogram (unit: µs)
+	Count: 2000  Min: 881  Max: 19620  Avg: 5711.99
+	------------------------------------------------------------
+	[  881,   882)     1    0.1%    0.1%  
+	[  882,   883)     0    0.0%    0.1%  
+	[  883,   886)     0    0.0%    0.1%  
+	[  886,   893)     0    0.0%    0.1%  
+	[  893,   906)     0    0.0%    0.1%  
+	[  906,   932)     2    0.1%    0.2%  
+	[  932,   983)     7    0.4%    0.5%  
+	[  983,  1081)    40    2.0%    2.5%  
+	[ 1081,  1271)   128    6.4%    8.9%  #
+	[ 1271,  1637)   217   10.9%   19.8%  #
+	[ 1637,  2342)   197    9.9%   29.6%  #
+	[ 2342,  3701)   393   19.7%   49.2%  ##
+	[ 3701,  6320)   314   15.7%   65.0%  ##
+	[ 6320, 11367)   391   19.6%   84.5%  ##
+	[11367, 21092)   310   15.5%  100.0%  ##
+	[21092, 39830)     0    0.0%  100.0%  
+	[39830,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux___1K_chunks___10B-2	    3000	   2563937 ns/op	   0.01 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 963  Max: 42356  Avg: 2562.97
+	------------------------------------------------------------
+	[  963,   964)     2    0.1%    0.1%  
+	[  964,   966)     1    0.0%    0.1%  
+	[  966,   970)     2    0.1%    0.2%  
+	[  970,   978)     0    0.0%    0.2%  
+	[  978,   995)     2    0.1%    0.2%  
+	[  995,  1029)     8    0.3%    0.5%  
+	[ 1029,  1099)    61    2.0%    2.5%  
+	[ 1099,  1241)   302   10.1%   12.6%  #
+	[ 1241,  1530)  1053   35.1%   47.7%  ####
+	[ 1530,  2119)  1182   39.4%   87.1%  ####
+	[ 2119,  3315)   121    4.0%   91.1%  
+	[ 3315,  5745)    71    2.4%   93.5%  
+	[ 5745, 10682)   116    3.9%   97.4%  
+	[10682, 20712)    21    0.7%   98.1%  
+	[20712, 41088)    54    1.8%   99.9%  
+	[41088, 82480)     4    0.1%  100.0%  
+	[82480,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux___1K_chunks__100B-2	    3000	   2657054 ns/op	   0.01 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 931  Max: 46417  Avg: 2656.13
+	------------------------------------------------------------
+	[  931,   932)     1    0.0%    0.0%  
+	[  932,   934)     0    0.0%    0.0%  
+	[  934,   938)     0    0.0%    0.0%  
+	[  938,   946)     0    0.0%    0.0%  
+	[  946,   963)     0    0.0%    0.0%  
+	[  963,   998)     7    0.2%    0.3%  
+	[  998,  1070)    40    1.3%    1.6%  
+	[ 1070,  1219)   247    8.2%    9.8%  #
+	[ 1219,  1523)  1090   36.3%   46.2%  ####
+	[ 1523,  2146)  1210   40.3%   86.5%  ####
+	[ 2146,  3420)   129    4.3%   90.8%  
+	[ 3420,  6024)    74    2.5%   93.3%  
+	[ 6024, 11348)   130    4.3%   97.6%  
+	[11348, 22232)    14    0.5%   98.1%  
+	[22232, 44483)    57    1.9%  100.0%  
+	[44483, 89969)     1    0.0%  100.0%  
+	[89969,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux___1K_chunks___1KB-2	    2000	   2880796 ns/op	   0.01 MB/s
+--- Histogram (unit: µs)
+	Count: 2000  Min: 996  Max: 48649  Avg: 2879.79
+	------------------------------------------------------------
+	[  996,   997)     1    0.1%    0.1%  
+	[  997,   999)     0    0.0%    0.1%  
+	[  999,  1003)     0    0.0%    0.1%  
+	[ 1003,  1011)     0    0.0%    0.1%  
+	[ 1011,  1028)     0    0.0%    0.1%  
+	[ 1028,  1064)     0    0.0%    0.1%  
+	[ 1064,  1138)    17    0.9%    0.9%  
+	[ 1138,  1290)   126    6.3%    7.2%  #
+	[ 1290,  1602)   685   34.2%   41.5%  ###
+	[ 1602,  2242)   850   42.5%   84.0%  ####
+	[ 2242,  3556)   145    7.2%   91.2%  #
+	[ 3556,  6251)    42    2.1%   93.3%  
+	[ 6251, 11777)    84    4.2%   97.5%  
+	[11777, 23109)    10    0.5%   98.0%  
+	[23109, 46348)    37    1.9%   99.9%  
+	[46348, 94001)     3    0.2%  100.0%  
+	[94001,   inf)     0    0.0%  100.0%  
+Benchmark___10B_mux___1K_chunks__10KB-2	     300	  20013539 ns/op	   0.00 MB/s
+--- Histogram (unit: ms)
+	Count: 300  Min: 1  Max: 68  Avg: 19.55
+	------------------------------------------------------------
+	[  1,   2)   47   15.7%   15.7%  ##
+	[  2,   3)   20    6.7%   22.3%  #
+	[  3,   4)   17    5.7%   28.0%  #
+	[  4,   6)   11    3.7%   31.7%  
+	[  6,   9)   12    4.0%   35.7%  
+	[  9,  13)   22    7.3%   43.0%  #
+	[ 13,  18)   20    6.7%   49.7%  #
+	[ 18,  25)   34   11.3%   61.0%  #
+	[ 25,  34)   52   17.3%   78.3%  ##
+	[ 34,  46)   43   14.3%   92.7%  #
+	[ 46,  62)   21    7.0%   99.7%  #
+	[ 62,  83)    1    0.3%  100.0%  
+	[ 83, 111)    0    0.0%  100.0%  
+	[111, 149)    0    0.0%  100.0%  
+	[149, 199)    0    0.0%  100.0%  
+	[199, 265)    0    0.0%  100.0%  
+	[265, inf)    0    0.0%  100.0%  
+Benchmark___1KB_mux__100_chunks___10B-2	    3000	   2184759 ns/op	   0.92 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 818  Max: 14154  Avg: 2183.87
+	------------------------------------------------------------
+	[  818,   819)     1    0.0%    0.0%  
+	[  819,   820)     0    0.0%    0.0%  
+	[  820,   823)     0    0.0%    0.0%  
+	[  823,   829)     0    0.0%    0.0%  
+	[  829,   841)     1    0.0%    0.1%  
+	[  841,   864)     0    0.0%    0.1%  
+	[  864,   908)     3    0.1%    0.2%  
+	[  908,   992)    16    0.5%    0.7%  
+	[  992,  1150)   179    6.0%    6.7%  #
+	[ 1150,  1448)   869   29.0%   35.6%  ###
+	[ 1448,  2010)  1256   41.9%   77.5%  ####
+	[ 2010,  3069)   149    5.0%   82.5%  
+	[ 3069,  5064)   380   12.7%   95.1%  #
+	[ 5064,  8822)    91    3.0%   98.2%  
+	[ 8822, 15901)    55    1.8%  100.0%  
+	[15901, 29236)     0    0.0%  100.0%  
+	[29236,   inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux__100_chunks__100B-2	    3000	   2260787 ns/op	   0.88 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 919  Max: 16047  Avg: 2259.90
+	------------------------------------------------------------
+	[  919,   920)     1    0.0%    0.0%  
+	[  920,   921)     0    0.0%    0.0%  
+	[  921,   924)     0    0.0%    0.0%  
+	[  924,   930)     0    0.0%    0.0%  
+	[  930,   943)     1    0.0%    0.1%  
+	[  943,   967)     3    0.1%    0.2%  
+	[  967,  1013)    10    0.3%    0.5%  
+	[ 1013,  1102)    62    2.1%    2.6%  
+	[ 1102,  1271)   287    9.6%   12.1%  #
+	[ 1271,  1593)  1026   34.2%   46.3%  ###
+	[ 1593,  2204)   980   32.7%   79.0%  ###
+	[ 2204,  3365)    98    3.3%   82.3%  
+	[ 3365,  5572)   411   13.7%   96.0%  #
+	[ 5572,  9764)    80    2.7%   98.6%  
+	[ 9764, 17727)    41    1.4%  100.0%  
+	[17727, 32854)     0    0.0%  100.0%  
+	[32854,   inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux__100_chunks___1KB-2	    3000	   2484297 ns/op	   0.81 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 909  Max: 17797  Avg: 2483.36
+	------------------------------------------------------------
+	[  909,   910)     1    0.0%    0.0%  
+	[  910,   911)     0    0.0%    0.0%  
+	[  911,   914)     0    0.0%    0.0%  
+	[  914,   921)     0    0.0%    0.0%  
+	[  921,   934)     0    0.0%    0.0%  
+	[  934,   959)     2    0.1%    0.1%  
+	[  959,  1008)    17    0.6%    0.7%  
+	[ 1008,  1101)    42    1.4%    2.1%  
+	[ 1101,  1280)   219    7.3%    9.4%  #
+	[ 1280,  1623)   904   30.1%   39.5%  ###
+	[ 1623,  2281)  1155   38.5%   78.0%  ####
+	[ 2281,  3540)   131    4.4%   82.4%  
+	[ 3540,  5950)   396   13.2%   95.6%  #
+	[ 5950, 10562)    64    2.1%   97.7%  
+	[10562, 19387)    69    2.3%  100.0%  
+	[19387, 36275)     0    0.0%  100.0%  
+	[36275,   inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux__100_chunks__10KB-2	    1000	   5790870 ns/op	   0.35 MB/s
+--- Histogram (unit: µs)
+	Count: 1000  Min: 948  Max: 23009  Avg: 5789.90
+	------------------------------------------------------------
+	[  948,   949)     1    0.1%    0.1%  
+	[  949,   950)     0    0.0%    0.1%  
+	[  950,   953)     0    0.0%    0.1%  
+	[  953,   960)     2    0.2%    0.3%  
+	[  960,   974)     0    0.0%    0.3%  
+	[  974,  1002)     2    0.2%    0.5%  
+	[ 1002,  1056)     5    0.5%    1.0%  
+	[ 1056,  1162)    23    2.3%    3.3%  
+	[ 1162,  1369)    49    4.9%    8.2%  
+	[ 1369,  1772)    70    7.0%   15.2%  #
+	[ 1772,  2558)   132   13.2%   28.4%  #
+	[ 2558,  4090)   236   23.6%   52.0%  ##
+	[ 4090,  7074)   123   12.3%   64.3%  #
+	[ 7074, 12888)   290   29.0%   93.3%  ###
+	[12888, 24213)    67    6.7%  100.0%  #
+	[24213, 46274)     0    0.0%  100.0%  
+	[46274,   inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux___1K_chunks___10B-2	    3000	   2732019 ns/op	   0.73 MB/s
+--- Histogram (unit: µs)
+	Count: 3000  Min: 960  Max: 49925  Avg: 2731.11
+	------------------------------------------------------------
+	[  960,   961)     1    0.0%    0.0%  
+	[  961,   963)     0    0.0%    0.0%  
+	[  963,   967)     1    0.0%    0.1%  
+	[  967,   975)     1    0.0%    0.1%  
+	[  975,   992)     2    0.1%    0.2%  
+	[  992,  1028)    11    0.4%    0.5%  
+	[ 1028,  1103)    38    1.3%    1.8%  
+	[ 1103,  1257)   295    9.8%   11.6%  #
+	[ 1257,  1574)  1111   37.0%   48.7%  ####
+	[ 1574,  2225)  1209   40.3%   89.0%  ####
+	[ 2225,  3563)    84    2.8%   91.8%  
+	[ 3563,  6312)    54    1.8%   93.6%  
+	[ 6312, 11960)    61    2.0%   95.6%  
+	[11960, 23562)    76    2.5%   98.1%  
+	[23562, 47397)    53    1.8%   99.9%  
+	[47397, 96362)     3    0.1%  100.0%  
+	[96362,   inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux___1K_chunks__100B-2	    2000	   2642355 ns/op	   0.76 MB/s
+--- Histogram (unit: µs)
+	Count: 2000  Min: 964  Max: 47935  Avg: 2641.43
+	------------------------------------------------------------
+	[  964,   965)     1    0.1%    0.1%  
+	[  965,   967)     0    0.0%    0.1%  
+	[  967,   971)     0    0.0%    0.1%  
+	[  971,   979)     0    0.0%    0.1%  
+	[  979,   996)     1    0.1%    0.1%  
+	[  996,  1032)     3    0.2%    0.2%  
+	[ 1032,  1105)    22    1.1%    1.4%  
+	[ 1105,  1256)   214   10.7%   12.1%  #
+	[ 1256,  1566)   733   36.6%   48.7%  ####
+	[ 1566,  2201)   818   40.9%   89.6%  ####
+	[ 2201,  3502)    73    3.7%   93.2%  
+	[ 3502,  6168)    21    1.1%   94.3%  
+	[ 6168, 11631)    35    1.8%   96.1%  
+	[11631, 22823)    42    2.1%   98.2%  
+	[22823, 45751)    35    1.8%   99.9%  
+	[45751, 92722)     2    0.1%  100.0%  
+	[92722,   inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux___1K_chunks___1KB-2	    2000	   3128442 ns/op	   0.64 MB/s
+--- Histogram (unit: ms)
+	Count: 2000  Min: 1  Max: 54  Avg: 2.58
+	------------------------------------------------------------
+	[  1,   2)  1494   74.7%   74.7%  #######
+	[  2,   3)   330   16.5%   91.2%  ##
+	[  3,   4)    18    0.9%   92.1%  
+	[  4,   6)    19    1.0%   93.1%  
+	[  6,   8)    12    0.6%   93.7%  
+	[  8,  11)    17    0.9%   94.5%  
+	[ 11,  15)    33    1.7%   96.2%  
+	[ 15,  21)    30    1.5%   97.7%  
+	[ 21,  29)     9    0.5%   98.1%  
+	[ 29,  39)    17    0.9%   99.0%  
+	[ 39,  53)    20    1.0%  100.0%  
+	[ 53,  71)     1    0.1%  100.0%  
+	[ 71,  94)     0    0.0%  100.0%  
+	[ 94, 125)     0    0.0%  100.0%  
+	[125, 165)     0    0.0%  100.0%  
+	[165, 218)     0    0.0%  100.0%  
+	[218, inf)     0    0.0%  100.0%  
+Benchmark___1KB_mux___1K_chunks__10KB-2	     300	  21164935 ns/op	   0.09 MB/s
+--- Histogram (unit: µs)
+	Count: 300  Min: 973  Max: 67388  Avg: 21163.92
+	------------------------------------------------------------
+	[   973,    974)    1    0.3%    0.3%  
+	[   974,    976)    0    0.0%    0.3%  
+	[   976,    980)    1    0.3%    0.7%  
+	[   980,    989)    0    0.0%    0.7%  
+	[   989,   1008)    0    0.0%    0.7%  
+	[  1008,   1048)    0    0.0%    0.7%  
+	[  1048,   1132)    2    0.7%    1.3%  
+	[  1132,   1309)    7    2.3%    3.7%  
+	[  1309,   1682)   14    4.7%    8.3%  
+	[  1682,   2464)   15    5.0%   13.3%  #
+	[  2464,   4104)   28    9.3%   22.7%  #
+	[  4104,   7542)   25    8.3%   31.0%  #
+	[  7542,  14749)   37   12.3%   43.3%  #
+	[ 14749,  29860)   72   24.0%   67.3%  ##
+	[ 29860,  61539)   95   31.7%   99.0%  ###
+	[ 61539, 127953)    3    1.0%  100.0%  
+	[127953,    inf)    0    0.0%  100.0%  
diff --git a/runtime/internal/rpc/benchmark/benchmark.vdl b/runtime/internal/rpc/benchmark/benchmark.vdl
new file mode 100644
index 0000000..c3aacaa
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/benchmark.vdl
@@ -0,0 +1,18 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package benchmark provides simple tools to measure the performance of the
+// IPC system.
+package benchmark
+
+import (
+	"v.io/v23/security/access"
+)
+
+type Benchmark interface {
+  // Echo returns the payload that it receives.
+  Echo(Payload []byte) ([]byte | error) {access.Read}
+  // EchoStream returns the payload that it receives via the stream.
+  EchoStream() stream<[]byte,[]byte> error {access.Read}
+}
diff --git a/runtime/internal/rpc/benchmark/benchmark.vdl.go b/runtime/internal/rpc/benchmark/benchmark.vdl.go
new file mode 100644
index 0000000..2a63c41
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/benchmark.vdl.go
@@ -0,0 +1,338 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: benchmark.vdl
+
+// package benchmark provides simple tools to measure the performance of the
+// IPC system.
+package benchmark
+
+import (
+	// VDL system imports
+	"io"
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/rpc"
+	"v.io/v23/vdl"
+
+	// VDL user imports
+	"v.io/v23/security/access"
+)
+
+// BenchmarkClientMethods is the client interface
+// containing Benchmark methods.
+type BenchmarkClientMethods interface {
+	// Echo returns the payload that it receives.
+	Echo(ctx *context.T, Payload []byte, opts ...rpc.CallOpt) ([]byte, error)
+	// EchoStream returns the payload that it receives via the stream.
+	EchoStream(*context.T, ...rpc.CallOpt) (BenchmarkEchoStreamClientCall, error)
+}
+
+// BenchmarkClientStub adds universal methods to BenchmarkClientMethods.
+type BenchmarkClientStub interface {
+	BenchmarkClientMethods
+	rpc.UniversalServiceMethods
+}
+
+// BenchmarkClient returns a client stub for Benchmark.
+func BenchmarkClient(name string) BenchmarkClientStub {
+	return implBenchmarkClientStub{name}
+}
+
+type implBenchmarkClientStub struct {
+	name string
+}
+
+func (c implBenchmarkClientStub) Echo(ctx *context.T, i0 []byte, opts ...rpc.CallOpt) (o0 []byte, err error) {
+	err = v23.GetClient(ctx).Call(ctx, c.name, "Echo", []interface{}{i0}, []interface{}{&o0}, opts...)
+	return
+}
+
+func (c implBenchmarkClientStub) EchoStream(ctx *context.T, opts ...rpc.CallOpt) (ocall BenchmarkEchoStreamClientCall, err error) {
+	var call rpc.ClientCall
+	if call, err = v23.GetClient(ctx).StartCall(ctx, c.name, "EchoStream", nil, opts...); err != nil {
+		return
+	}
+	ocall = &implBenchmarkEchoStreamClientCall{ClientCall: call}
+	return
+}
+
+// BenchmarkEchoStreamClientStream is the client stream for Benchmark.EchoStream.
+type BenchmarkEchoStreamClientStream interface {
+	// RecvStream returns the receiver side of the Benchmark.EchoStream client stream.
+	RecvStream() interface {
+		// Advance stages an item so that it may be retrieved via Value.  Returns
+		// true iff there is an item to retrieve.  Advance must be called before
+		// Value is called.  May block if an item is not available.
+		Advance() bool
+		// Value returns the item that was staged by Advance.  May panic if Advance
+		// returned false or was not called.  Never blocks.
+		Value() []byte
+		// Err returns any error encountered by Advance.  Never blocks.
+		Err() error
+	}
+	// SendStream returns the send side of the Benchmark.EchoStream client stream.
+	SendStream() interface {
+		// Send places the item onto the output stream.  Returns errors
+		// encountered while sending, or if Send is called after Close or
+		// the stream has been canceled.  Blocks if there is no buffer
+		// space; will unblock when buffer space is available or after
+		// the stream has been canceled.
+		Send(item []byte) error
+		// Close indicates to the server that no more items will be sent;
+		// server Recv calls will receive io.EOF after all sent items.
+		// This is an optional call - e.g. a client might call Close if it
+		// needs to continue receiving items from the server after it's
+		// done sending.  Returns errors encountered while closing, or if
+		// Close is called after the stream has been canceled.  Like Send,
+		// blocks if there is no buffer space available.
+		Close() error
+	}
+}
+
+// BenchmarkEchoStreamClientCall represents the call returned from Benchmark.EchoStream.
+type BenchmarkEchoStreamClientCall interface {
+	BenchmarkEchoStreamClientStream
+	// Finish performs the equivalent of SendStream().Close, then blocks until
+	// the server is done, and returns the positional return values for the call.
+	//
+	// Finish returns immediately if the call has been canceled; depending on the
+	// timing the output could either be an error signaling cancelation, or the
+	// valid positional return values from the server.
+	//
+	// Calling Finish is mandatory for releasing stream resources, unless the call
+	// has been canceled or any of the other methods return an error.  Finish should
+	// be called at most once.
+	Finish() error
+}
+
+type implBenchmarkEchoStreamClientCall struct {
+	rpc.ClientCall
+	valRecv []byte
+	errRecv error
+}
+
+func (c *implBenchmarkEchoStreamClientCall) RecvStream() interface {
+	Advance() bool
+	Value() []byte
+	Err() error
+} {
+	return implBenchmarkEchoStreamClientCallRecv{c}
+}
+
+type implBenchmarkEchoStreamClientCallRecv struct {
+	c *implBenchmarkEchoStreamClientCall
+}
+
+func (c implBenchmarkEchoStreamClientCallRecv) Advance() bool {
+	c.c.errRecv = c.c.Recv(&c.c.valRecv)
+	return c.c.errRecv == nil
+}
+func (c implBenchmarkEchoStreamClientCallRecv) Value() []byte {
+	return c.c.valRecv
+}
+func (c implBenchmarkEchoStreamClientCallRecv) Err() error {
+	if c.c.errRecv == io.EOF {
+		return nil
+	}
+	return c.c.errRecv
+}
+func (c *implBenchmarkEchoStreamClientCall) SendStream() interface {
+	Send(item []byte) error
+	Close() error
+} {
+	return implBenchmarkEchoStreamClientCallSend{c}
+}
+
+type implBenchmarkEchoStreamClientCallSend struct {
+	c *implBenchmarkEchoStreamClientCall
+}
+
+func (c implBenchmarkEchoStreamClientCallSend) Send(item []byte) error {
+	return c.c.Send(item)
+}
+func (c implBenchmarkEchoStreamClientCallSend) Close() error {
+	return c.c.CloseSend()
+}
+func (c *implBenchmarkEchoStreamClientCall) Finish() (err error) {
+	err = c.ClientCall.Finish()
+	return
+}
+
+// BenchmarkServerMethods is the interface a server writer
+// implements for Benchmark.
+type BenchmarkServerMethods interface {
+	// Echo returns the payload that it receives.
+	Echo(ctx *context.T, call rpc.ServerCall, Payload []byte) ([]byte, error)
+	// EchoStream returns the payload that it receives via the stream.
+	EchoStream(*context.T, BenchmarkEchoStreamServerCall) error
+}
+
+// BenchmarkServerStubMethods is the server interface containing
+// Benchmark methods, as expected by rpc.Server.
+// The only difference between this interface and BenchmarkServerMethods
+// is the streaming methods.
+type BenchmarkServerStubMethods interface {
+	// Echo returns the payload that it receives.
+	Echo(ctx *context.T, call rpc.ServerCall, Payload []byte) ([]byte, error)
+	// EchoStream returns the payload that it receives via the stream.
+	EchoStream(*context.T, *BenchmarkEchoStreamServerCallStub) error
+}
+
+// BenchmarkServerStub adds universal methods to BenchmarkServerStubMethods.
+type BenchmarkServerStub interface {
+	BenchmarkServerStubMethods
+	// Describe the Benchmark interfaces.
+	Describe__() []rpc.InterfaceDesc
+}
+
+// BenchmarkServer returns a server stub for Benchmark.
+// It converts an implementation of BenchmarkServerMethods into
+// an object that may be used by rpc.Server.
+func BenchmarkServer(impl BenchmarkServerMethods) BenchmarkServerStub {
+	stub := implBenchmarkServerStub{
+		impl: impl,
+	}
+	// Initialize GlobState; always check the stub itself first, to handle the
+	// case where the user has the Glob method defined in their VDL source.
+	if gs := rpc.NewGlobState(stub); gs != nil {
+		stub.gs = gs
+	} else if gs := rpc.NewGlobState(impl); gs != nil {
+		stub.gs = gs
+	}
+	return stub
+}
+
+type implBenchmarkServerStub struct {
+	impl BenchmarkServerMethods
+	gs   *rpc.GlobState
+}
+
+func (s implBenchmarkServerStub) Echo(ctx *context.T, call rpc.ServerCall, i0 []byte) ([]byte, error) {
+	return s.impl.Echo(ctx, call, i0)
+}
+
+func (s implBenchmarkServerStub) EchoStream(ctx *context.T, call *BenchmarkEchoStreamServerCallStub) error {
+	return s.impl.EchoStream(ctx, call)
+}
+
+func (s implBenchmarkServerStub) Globber() *rpc.GlobState {
+	return s.gs
+}
+
+func (s implBenchmarkServerStub) Describe__() []rpc.InterfaceDesc {
+	return []rpc.InterfaceDesc{BenchmarkDesc}
+}
+
+// BenchmarkDesc describes the Benchmark interface.
+var BenchmarkDesc rpc.InterfaceDesc = descBenchmark
+
+// descBenchmark hides the desc to keep godoc clean.
+var descBenchmark = rpc.InterfaceDesc{
+	Name:    "Benchmark",
+	PkgPath: "v.io/x/ref/runtime/internal/rpc/benchmark",
+	Methods: []rpc.MethodDesc{
+		{
+			Name: "Echo",
+			Doc:  "// Echo returns the payload that it receives.",
+			InArgs: []rpc.ArgDesc{
+				{"Payload", ``}, // []byte
+			},
+			OutArgs: []rpc.ArgDesc{
+				{"", ``}, // []byte
+			},
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+		},
+		{
+			Name: "EchoStream",
+			Doc:  "// EchoStream returns the payload that it receives via the stream.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+		},
+	},
+}
+
+// BenchmarkEchoStreamServerStream is the server stream for Benchmark.EchoStream.
+type BenchmarkEchoStreamServerStream interface {
+	// RecvStream returns the receiver side of the Benchmark.EchoStream server stream.
+	RecvStream() interface {
+		// Advance stages an item so that it may be retrieved via Value.  Returns
+		// true iff there is an item to retrieve.  Advance must be called before
+		// Value is called.  May block if an item is not available.
+		Advance() bool
+		// Value returns the item that was staged by Advance.  May panic if Advance
+		// returned false or was not called.  Never blocks.
+		Value() []byte
+		// Err returns any error encountered by Advance.  Never blocks.
+		Err() error
+	}
+	// SendStream returns the send side of the Benchmark.EchoStream server stream.
+	SendStream() interface {
+		// Send places the item onto the output stream.  Returns errors encountered
+		// while sending.  Blocks if there is no buffer space; will unblock when
+		// buffer space is available.
+		Send(item []byte) error
+	}
+}
+
+// BenchmarkEchoStreamServerCall represents the context passed to Benchmark.EchoStream.
+type BenchmarkEchoStreamServerCall interface {
+	rpc.ServerCall
+	BenchmarkEchoStreamServerStream
+}
+
+// BenchmarkEchoStreamServerCallStub is a wrapper that converts rpc.StreamServerCall into
+// a typesafe stub that implements BenchmarkEchoStreamServerCall.
+type BenchmarkEchoStreamServerCallStub struct {
+	rpc.StreamServerCall
+	valRecv []byte
+	errRecv error
+}
+
+// Init initializes BenchmarkEchoStreamServerCallStub from rpc.StreamServerCall.
+func (s *BenchmarkEchoStreamServerCallStub) Init(call rpc.StreamServerCall) {
+	s.StreamServerCall = call
+}
+
+// RecvStream returns the receiver side of the Benchmark.EchoStream server stream.
+func (s *BenchmarkEchoStreamServerCallStub) RecvStream() interface {
+	Advance() bool
+	Value() []byte
+	Err() error
+} {
+	return implBenchmarkEchoStreamServerCallRecv{s}
+}
+
+type implBenchmarkEchoStreamServerCallRecv struct {
+	s *BenchmarkEchoStreamServerCallStub
+}
+
+func (s implBenchmarkEchoStreamServerCallRecv) Advance() bool {
+	s.s.errRecv = s.s.Recv(&s.s.valRecv)
+	return s.s.errRecv == nil
+}
+func (s implBenchmarkEchoStreamServerCallRecv) Value() []byte {
+	return s.s.valRecv
+}
+func (s implBenchmarkEchoStreamServerCallRecv) Err() error {
+	if s.s.errRecv == io.EOF {
+		return nil
+	}
+	return s.s.errRecv
+}
+
+// SendStream returns the send side of the Benchmark.EchoStream server stream.
+func (s *BenchmarkEchoStreamServerCallStub) SendStream() interface {
+	Send(item []byte) error
+} {
+	return implBenchmarkEchoStreamServerCallSend{s}
+}
+
+type implBenchmarkEchoStreamServerCallSend struct {
+	s *BenchmarkEchoStreamServerCallStub
+}
+
+func (s implBenchmarkEchoStreamServerCallSend) Send(item []byte) error {
+	return s.s.Send(item)
+}
diff --git a/runtime/internal/rpc/benchmark/benchmark/main.go b/runtime/internal/rpc/benchmark/benchmark/main.go
new file mode 100644
index 0000000..1afe697
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/benchmark/main.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A simple command-line tool to run the benchmark client.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"testing"
+	"time"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/runtime/internal/rpc/benchmark/internal"
+	tbm "v.io/x/ref/test/benchmark"
+
+	"v.io/v23"
+	"v.io/x/lib/vlog"
+)
+
+var (
+	server = flag.String("server", "", "address of the server to connect to")
+
+	iterations = flag.Int("iterations", 100, "number of iterations to run")
+
+	chunkCnt       = flag.Int("chunk_count", 0, "number of chunks to send per streaming RPC (if zero, use non-streaming RPC)")
+	payloadSize    = flag.Int("payload_size", 0, "size of payload in bytes")
+	chunkCntMux    = flag.Int("mux_chunk_count", 0, "number of chunks to send in background")
+	payloadSizeMux = flag.Int("mux_payload_size", 0, "size of payload to send in background")
+)
+
+func main() {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	if *chunkCntMux > 0 && *payloadSizeMux > 0 {
+		dummyB := testing.B{}
+		_, stop := internal.StartEchoStream(&dummyB, ctx, *server, 0, *chunkCntMux, *payloadSizeMux, nil)
+		defer stop()
+		vlog.Infof("Started background streaming (chunk_size=%d, payload_size=%d)", *chunkCntMux, *payloadSizeMux)
+	}
+
+	dummyB := testing.B{}
+	stats := tbm.NewStats(16)
+
+	now := time.Now()
+	if *chunkCnt == 0 {
+		internal.CallEcho(&dummyB, ctx, *server, *iterations, *payloadSize, stats)
+	} else {
+		internal.CallEchoStream(&dummyB, ctx, *server, *iterations, *chunkCnt, *payloadSize, stats)
+	}
+	elapsed := time.Since(now)
+
+	fmt.Printf("iterations: %d  chunk_count: %d  payload_size: %d\n", *iterations, *chunkCnt, *payloadSize)
+	fmt.Printf("elapsed time: %v\n", elapsed)
+	stats.Print(os.Stdout)
+}
diff --git a/runtime/internal/rpc/benchmark/benchmark_test.go b/runtime/internal/rpc/benchmark/benchmark_test.go
new file mode 100644
index 0000000..fe41273
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/benchmark_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark_test
+
+import (
+	"os"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+
+	_ "v.io/x/ref/runtime/factories/static"
+	"v.io/x/ref/runtime/internal/rpc/benchmark/internal"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/benchmark"
+)
+
+var (
+	serverAddr string
+	ctx        *context.T
+)
+
+// Benchmarks for non-streaming RPC.
+func runEcho(b *testing.B, payloadSize int) {
+	internal.CallEcho(b, ctx, serverAddr, b.N, payloadSize, benchmark.AddStats(b, 16))
+}
+
+func Benchmark____1B(b *testing.B) { runEcho(b, 1) }
+func Benchmark___10B(b *testing.B) { runEcho(b, 10) }
+func Benchmark__100B(b *testing.B) { runEcho(b, 100) }
+func Benchmark___1KB(b *testing.B) { runEcho(b, 1000) }
+func Benchmark__10KB(b *testing.B) { runEcho(b, 10000) }
+func Benchmark_100KB(b *testing.B) { runEcho(b, 100000) }
+
+// Benchmarks for streaming RPC.
+func runEchoStream(b *testing.B, chunkCnt, payloadSize int) {
+	internal.CallEchoStream(b, ctx, serverAddr, b.N, chunkCnt, payloadSize, benchmark.AddStats(b, 16))
+}
+
+func Benchmark____1_chunk_____1B(b *testing.B) { runEchoStream(b, 1, 1) }
+func Benchmark____1_chunk____10B(b *testing.B) { runEchoStream(b, 1, 10) }
+func Benchmark____1_chunk___100B(b *testing.B) { runEchoStream(b, 1, 100) }
+func Benchmark____1_chunk____1KB(b *testing.B) { runEchoStream(b, 1, 1000) }
+func Benchmark____1_chunk___10KB(b *testing.B) { runEchoStream(b, 1, 10000) }
+func Benchmark____1_chunk__100KB(b *testing.B) { runEchoStream(b, 1, 100000) }
+func Benchmark___10_chunk_____1B(b *testing.B) { runEchoStream(b, 10, 1) }
+func Benchmark___10_chunk____10B(b *testing.B) { runEchoStream(b, 10, 10) }
+func Benchmark___10_chunk___100B(b *testing.B) { runEchoStream(b, 10, 100) }
+func Benchmark___10_chunk____1KB(b *testing.B) { runEchoStream(b, 10, 1000) }
+func Benchmark___10_chunk___10KB(b *testing.B) { runEchoStream(b, 10, 10000) }
+func Benchmark___10_chunk__100KB(b *testing.B) { runEchoStream(b, 10, 100000) }
+func Benchmark__100_chunk_____1B(b *testing.B) { runEchoStream(b, 100, 1) }
+func Benchmark__100_chunk____10B(b *testing.B) { runEchoStream(b, 100, 10) }
+func Benchmark__100_chunk___100B(b *testing.B) { runEchoStream(b, 100, 100) }
+func Benchmark__100_chunk____1KB(b *testing.B) { runEchoStream(b, 100, 1000) }
+func Benchmark__100_chunk___10KB(b *testing.B) { runEchoStream(b, 100, 10000) }
+func Benchmark__100_chunk__100KB(b *testing.B) { runEchoStream(b, 100, 100000) }
+func Benchmark___1K_chunk_____1B(b *testing.B) { runEchoStream(b, 1000, 1) }
+func Benchmark___1K_chunk____10B(b *testing.B) { runEchoStream(b, 1000, 10) }
+func Benchmark___1K_chunk___100B(b *testing.B) { runEchoStream(b, 1000, 100) }
+func Benchmark___1K_chunk____1KB(b *testing.B) { runEchoStream(b, 1000, 1000) }
+func Benchmark___1K_chunk___10KB(b *testing.B) { runEchoStream(b, 1000, 10000) }
+func Benchmark___1K_chunk__100KB(b *testing.B) { runEchoStream(b, 1000, 100000) }
+
+// Benchmarks for per-chunk throughput in streaming RPC.
+func runPerChunk(b *testing.B, payloadSize int) {
+	internal.CallEchoStream(b, ctx, serverAddr, 1, b.N, payloadSize, benchmark.NewStats(1))
+}
+
+func Benchmark__per_chunk____1B(b *testing.B) { runPerChunk(b, 1) }
+func Benchmark__per_chunk___10B(b *testing.B) { runPerChunk(b, 10) }
+func Benchmark__per_chunk__100B(b *testing.B) { runPerChunk(b, 100) }
+func Benchmark__per_chunk___1KB(b *testing.B) { runPerChunk(b, 1000) }
+func Benchmark__per_chunk__10KB(b *testing.B) { runPerChunk(b, 10000) }
+func Benchmark__per_chunk_100KB(b *testing.B) { runPerChunk(b, 100000) }
+
+// Benchmarks for non-streaming RPC while running streaming RPC in background.
+func runMux(b *testing.B, payloadSize, chunkCntB, payloadSizeB int) {
+	_, stop := internal.StartEchoStream(&testing.B{}, ctx, serverAddr, 0, chunkCntB, payloadSizeB, benchmark.NewStats(1))
+	internal.CallEcho(b, ctx, serverAddr, b.N, payloadSize, benchmark.AddStats(b, 16))
+	stop()
+}
+
+func Benchmark___10B_mux__100_chunks___10B(b *testing.B) { runMux(b, 10, 100, 10) }
+func Benchmark___10B_mux__100_chunks__100B(b *testing.B) { runMux(b, 10, 100, 100) }
+func Benchmark___10B_mux__100_chunks___1KB(b *testing.B) { runMux(b, 10, 100, 1000) }
+func Benchmark___10B_mux__100_chunks__10KB(b *testing.B) { runMux(b, 10, 100, 10000) }
+func Benchmark___10B_mux___1K_chunks___10B(b *testing.B) { runMux(b, 10, 1000, 10) }
+func Benchmark___10B_mux___1K_chunks__100B(b *testing.B) { runMux(b, 10, 1000, 100) }
+func Benchmark___10B_mux___1K_chunks___1KB(b *testing.B) { runMux(b, 10, 1000, 1000) }
+func Benchmark___10B_mux___1K_chunks__10KB(b *testing.B) { runMux(b, 10, 1000, 10000) }
+func Benchmark___1KB_mux__100_chunks___10B(b *testing.B) { runMux(b, 1000, 100, 10) }
+func Benchmark___1KB_mux__100_chunks__100B(b *testing.B) { runMux(b, 1000, 100, 100) }
+func Benchmark___1KB_mux__100_chunks___1KB(b *testing.B) { runMux(b, 1000, 100, 1000) }
+func Benchmark___1KB_mux__100_chunks__10KB(b *testing.B) { runMux(b, 1000, 100, 10000) }
+func Benchmark___1KB_mux___1K_chunks___10B(b *testing.B) { runMux(b, 1000, 1000, 10) }
+func Benchmark___1KB_mux___1K_chunks__100B(b *testing.B) { runMux(b, 1000, 1000, 100) }
+func Benchmark___1KB_mux___1K_chunks___1KB(b *testing.B) { runMux(b, 1000, 1000, 1000) }
+func Benchmark___1KB_mux___1K_chunks__10KB(b *testing.B) { runMux(b, 1000, 1000, 10000) }
+
+// A single empty test to avoid:
+// testing: warning: no tests to run
+// from showing up when running benchmarks in this package via "go test"
+func TestNoOp(t *testing.T) {}
+
+func TestMain(m *testing.M) {
+	test.Init()
+	// We do not use defer here since this program will exit at the end of
+	// this function through os.Exit().
+	var shutdown v23.Shutdown
+	ctx, shutdown = test.InitForTest()
+
+	var serverStop func()
+	serverEP, serverStop := internal.StartServer(ctx, v23.GetListenSpec(ctx))
+	serverAddr = serverEP.Name()
+
+	// Create a VC to exclude the VC setup time from the benchmark.
+	internal.CallEcho(&testing.B{}, ctx, serverAddr, 1, 0, benchmark.NewStats(1))
+
+	r := benchmark.RunTestMain(m)
+
+	serverStop()
+	shutdown()
+
+	os.Exit(r)
+}
diff --git a/runtime/internal/rpc/benchmark/benchmarkd/main.go b/runtime/internal/rpc/benchmark/benchmarkd/main.go
new file mode 100644
index 0000000..a9ce539
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/benchmarkd/main.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A simple command-line tool to run the benchmark server.
+package main
+
+import (
+	"v.io/v23"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/lib/signals"
+	_ "v.io/x/ref/runtime/factories/roaming"
+	"v.io/x/ref/runtime/internal/rpc/benchmark/internal"
+)
+
+func main() {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	ep, stop := internal.StartServer(ctx, v23.GetListenSpec(ctx))
+	vlog.Infof("Listening on %s", ep.Name())
+	defer stop()
+	<-signals.ShutdownOnSignals(ctx)
+}
diff --git a/runtime/internal/rpc/benchmark/glob/README.txt b/runtime/internal/rpc/benchmark/glob/README.txt
new file mode 100644
index 0000000..0103cd2
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/glob/README.txt
@@ -0,0 +1,120 @@
+Glob Benchmarks
+
+The benchmarks in this directory attempt to provide some guidance for the amount
+of buffering to use with the channels returned by Glob__ and GlobChildren__.
+
+The first set of benchmarks (BenchmarkChanN) shows the relationship between the
+buffer size and the latency of a very simple channel with one writer and one
+reader doing nothing else.
+
+The second set of benchmarks (BenchmarkGlobN) does the same thing but with a
+Glob__ server and a Glob client. The third set (BenchmarkGlobChildrenN) uses
+GlobChildren__.
+
+As of 2014-12-03, the conclusion is that the queue size has very little impact
+on performance.
+
+The BenchmarkChanN set shows that increasing the queue size improves latency for
+the very simple case, but not for Glob__ or GlobChildren__.
+
+An interesting observation is that all the benchmarks get slower as the number
+of cpus increases.
+
+Below are the test results for 1, 2, and 4 cpus on a HP Z420 workstation with
+2 × 6 cpu cores (Intel(R) Xeon(R) CPU E5-1650 v2 @ 3.50GHz).
+
+$ ./glob.test -test.bench=. -test.benchtime=5s -test.cpu=1
+BenchmarkChan0	20000000	       464 ns/op
+BenchmarkChan1	20000000	       585 ns/op
+BenchmarkChan2	20000000	       484 ns/op
+BenchmarkChan4	20000000	       425 ns/op
+BenchmarkChan8	50000000	       396 ns/op
+BenchmarkChan16	50000000	       381 ns/op
+BenchmarkChan32	50000000	       371 ns/op
+BenchmarkChan64	50000000	       365 ns/op
+BenchmarkChan128	50000000	       363 ns/op
+BenchmarkChan256	50000000	       362 ns/op
+BenchmarkGlob0	  500000	     35029 ns/op
+BenchmarkGlob1	  500000	     63536 ns/op
+BenchmarkGlob2	  500000	     34753 ns/op
+BenchmarkGlob4	  500000	     26379 ns/op
+BenchmarkGlob8	  500000	     19293 ns/op
+BenchmarkGlob16	 1000000	     18149 ns/op
+BenchmarkGlob32	  500000	     52364 ns/op
+BenchmarkGlob64	  500000	     83879 ns/op
+BenchmarkGlob128	  100000	     88448 ns/op
+BenchmarkGlob256	  100000	     57922 ns/op
+BenchmarkGlobChildren0	  100000	    118448 ns/op
+BenchmarkGlobChildren1	  100000	    123274 ns/op
+BenchmarkGlobChildren2	  100000	    116110 ns/op
+BenchmarkGlobChildren4	  100000	    134175 ns/op
+BenchmarkGlobChildren8	  100000	    118776 ns/op
+BenchmarkGlobChildren16	  100000	    123191 ns/op
+BenchmarkGlobChildren32	  100000	    132195 ns/op
+BenchmarkGlobChildren64	  100000	    126004 ns/op
+BenchmarkGlobChildren128	  100000	    135072 ns/op
+BenchmarkGlobChildren256	  100000	    127399 ns/op
+
+$ ./glob.test -test.bench=. -test.benchtime=5s -test.cpu=2
+BenchmarkChan0-2	 5000000	      1595 ns/op
+BenchmarkChan1-2	 5000000	      1649 ns/op
+BenchmarkChan2-2	10000000	      1245 ns/op
+BenchmarkChan4-2	10000000	      1299 ns/op
+BenchmarkChan8-2	10000000	       982 ns/op
+BenchmarkChan16-2	10000000	       929 ns/op
+BenchmarkChan32-2	10000000	       916 ns/op
+BenchmarkChan64-2	10000000	       903 ns/op
+BenchmarkChan128-2	10000000	       907 ns/op
+BenchmarkChan256-2	10000000	       914 ns/op
+BenchmarkGlob0-2	  500000	     61455 ns/op
+BenchmarkGlob1-2	  500000	     46890 ns/op
+BenchmarkGlob2-2	  200000	     56462 ns/op
+BenchmarkGlob4-2	  500000	     22783 ns/op
+BenchmarkGlob8-2	  200000	     64783 ns/op
+BenchmarkGlob16-2	 1000000	     68119 ns/op
+BenchmarkGlob32-2	  200000	     78611 ns/op
+BenchmarkGlob64-2	  500000	     82180 ns/op
+BenchmarkGlob128-2	 1000000	     81548 ns/op
+BenchmarkGlob256-2	  100000	     88278 ns/op
+BenchmarkGlobChildren0-2	  100000	     83188 ns/op
+BenchmarkGlobChildren1-2	  100000	     81751 ns/op
+BenchmarkGlobChildren2-2	  100000	     81896 ns/op
+BenchmarkGlobChildren4-2	  100000	     81857 ns/op
+BenchmarkGlobChildren8-2	  100000	     81531 ns/op
+BenchmarkGlobChildren16-2	  100000	     89915 ns/op
+BenchmarkGlobChildren32-2	  100000	     81112 ns/op
+BenchmarkGlobChildren64-2	  100000	     80997 ns/op
+BenchmarkGlobChildren128-2	  100000	     81350 ns/op
+BenchmarkGlobChildren256-2	  100000	     81344 ns/op
+
+$ ./glob.test -test.bench=. -test.benchtime=5s -test.cpu=4
+BenchmarkChan0-4	 5000000	      2012 ns/op
+BenchmarkChan1-4	 5000000	      3149 ns/op
+BenchmarkChan2-4	 5000000	      1839 ns/op
+BenchmarkChan4-4	10000000	       957 ns/op
+BenchmarkChan8-4	20000000	       660 ns/op
+BenchmarkChan16-4	20000000	       523 ns/op
+BenchmarkChan32-4	20000000	       507 ns/op
+BenchmarkChan64-4	20000000	       509 ns/op
+BenchmarkChan128-4	20000000	       507 ns/op
+BenchmarkChan256-4	20000000	       511 ns/op
+BenchmarkGlob0-4	  100000	    103269 ns/op
+BenchmarkGlob1-4	  100000	    101222 ns/op
+BenchmarkGlob2-4	  100000	    102049 ns/op
+BenchmarkGlob4-4	  100000	    102763 ns/op
+BenchmarkGlob8-4	  100000	    101939 ns/op
+BenchmarkGlob16-4	  100000	    102989 ns/op
+BenchmarkGlob32-4	  100000	    103898 ns/op
+BenchmarkGlob64-4	  100000	    102838 ns/op
+BenchmarkGlob128-4	  100000	    101532 ns/op
+BenchmarkGlob256-4	  100000	    101059 ns/op
+BenchmarkGlobChildren0-4	  100000	    106617 ns/op
+BenchmarkGlobChildren1-4	  100000	    102576 ns/op
+BenchmarkGlobChildren2-4	  100000	    106313 ns/op
+BenchmarkGlobChildren4-4	  100000	    102774 ns/op
+BenchmarkGlobChildren8-4	  100000	    102886 ns/op
+BenchmarkGlobChildren16-4	  100000	    106771 ns/op
+BenchmarkGlobChildren32-4	  100000	    103309 ns/op
+BenchmarkGlobChildren64-4	  100000	    105112 ns/op
+BenchmarkGlobChildren128-4	  100000	    102295 ns/op
+BenchmarkGlobChildren256-4	  100000	    102951 ns/op
diff --git a/runtime/internal/rpc/benchmark/glob/doc.go b/runtime/internal/rpc/benchmark/glob/doc.go
new file mode 100644
index 0000000..ce0aad5
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/glob/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package glob
+
+// This file exists only to prevent build failures from having a test-only
+// package.
diff --git a/runtime/internal/rpc/benchmark/glob/glob_test.go b/runtime/internal/rpc/benchmark/glob/glob_test.go
new file mode 100644
index 0000000..44454b9
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/glob/glob_test.go
@@ -0,0 +1,258 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package glob_test
+
+import (
+	"fmt"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/test"
+)
+
+func TestNothing(t *testing.T) {
+}
+
+func RunBenchmarkChan(b *testing.B, bufferSize int) {
+	ch := make(chan string, bufferSize)
+	go func() {
+		for i := 0; i < b.N; i++ {
+			ch <- fmt.Sprintf("%09d", i)
+		}
+		close(ch)
+	}()
+	for _ = range ch {
+		continue
+	}
+}
+
+func BenchmarkChan0(b *testing.B) {
+	RunBenchmarkChan(b, 0)
+}
+
+func BenchmarkChan1(b *testing.B) {
+	RunBenchmarkChan(b, 1)
+}
+
+func BenchmarkChan2(b *testing.B) {
+	RunBenchmarkChan(b, 2)
+}
+
+func BenchmarkChan4(b *testing.B) {
+	RunBenchmarkChan(b, 4)
+}
+
+func BenchmarkChan8(b *testing.B) {
+	RunBenchmarkChan(b, 8)
+}
+
+func BenchmarkChan16(b *testing.B) {
+	RunBenchmarkChan(b, 16)
+}
+
+func BenchmarkChan32(b *testing.B) {
+	RunBenchmarkChan(b, 32)
+}
+
+func BenchmarkChan64(b *testing.B) {
+	RunBenchmarkChan(b, 64)
+}
+
+func BenchmarkChan128(b *testing.B) {
+	RunBenchmarkChan(b, 128)
+}
+
+func BenchmarkChan256(b *testing.B) {
+	RunBenchmarkChan(b, 256)
+}
+
+type disp struct {
+	obj interface{}
+}
+
+func (d *disp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	return d.obj, nil, nil
+}
+
+func startServer(b *testing.B, ctx *context.T, obj interface{}) (string, func(), error) {
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to start server: %v", err)
+	}
+	endpoints, err := server.Listen(v23.GetListenSpec(ctx))
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to listen: %v", err)
+	}
+	if err := server.ServeDispatcher("", &disp{obj}); err != nil {
+		return "", nil, err
+	}
+	return endpoints[0].Name(), func() { server.Stop() }, nil
+}
+
+type globObject struct {
+	b          *testing.B
+	bufferSize int
+}
+
+func (o *globObject) Glob__(_ *context.T, _ rpc.ServerCall, pattern string) (<-chan naming.GlobReply, error) {
+	if pattern != "*" {
+		panic("this benchmark only works with pattern='*'")
+	}
+	ch := make(chan naming.GlobReply, o.bufferSize)
+	go func() {
+		for i := 0; i < o.b.N; i++ {
+			name := fmt.Sprintf("%09d", i)
+			ch <- naming.GlobReplyEntry{naming.MountEntry{Name: name}}
+		}
+		close(ch)
+	}()
+	return ch, nil
+}
+
+type globChildrenObject struct {
+	b          *testing.B
+	bufferSize int
+}
+
+func (o *globChildrenObject) GlobChildren__(_ *context.T, call rpc.ServerCall) (<-chan string, error) {
+	if call.Suffix() != "" {
+		return nil, nil
+	}
+	ch := make(chan string, o.bufferSize)
+	go func() {
+		for i := 0; i < o.b.N; i++ {
+			ch <- fmt.Sprintf("%09d", i)
+		}
+		close(ch)
+	}()
+	return ch, nil
+}
+
+func globClient(b *testing.B, ctx *context.T, name string) (int, error) {
+	client := v23.GetClient(ctx)
+	call, err := client.StartCall(ctx, name, rpc.GlobMethod, []interface{}{"*"})
+	if err != nil {
+		return 0, err
+	}
+	var me naming.MountEntry
+	b.ResetTimer()
+	count := 0
+	for {
+		if err := call.Recv(&me); err != nil {
+			break
+		}
+		count++
+	}
+	b.StopTimer()
+	if err := call.Finish(); err != nil {
+		return 0, err
+	}
+	return count, nil
+}
+
+func RunBenchmarkGlob(b *testing.B, obj interface{}) {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+
+	addr, stop, err := startServer(b, ctx, obj)
+	if err != nil {
+		b.Fatalf("startServer failed: %v", err)
+	}
+	defer stop()
+
+	count, err := globClient(b, ctx, addr)
+	if err != nil {
+		b.Fatalf("globClient failed: %v", err)
+	}
+	if count != b.N {
+		b.Fatalf("unexpected number of results: got %d, expected %d", count, b.N)
+	}
+}
+
+func BenchmarkGlob0(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 0})
+}
+
+func BenchmarkGlob1(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 1})
+}
+
+func BenchmarkGlob2(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 2})
+}
+
+func BenchmarkGlob4(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 4})
+}
+
+func BenchmarkGlob8(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 8})
+}
+
+func BenchmarkGlob16(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 16})
+}
+
+func BenchmarkGlob32(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 32})
+}
+
+func BenchmarkGlob64(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 64})
+}
+
+func BenchmarkGlob128(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 128})
+}
+
+func BenchmarkGlob256(b *testing.B) {
+	RunBenchmarkGlob(b, &globObject{b, 256})
+}
+
+func BenchmarkGlobChildren0(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 0})
+}
+
+func BenchmarkGlobChildren1(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 1})
+}
+
+func BenchmarkGlobChildren2(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 2})
+}
+
+func BenchmarkGlobChildren4(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 4})
+}
+
+func BenchmarkGlobChildren8(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 8})
+}
+
+func BenchmarkGlobChildren16(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 16})
+}
+
+func BenchmarkGlobChildren32(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 32})
+}
+
+func BenchmarkGlobChildren64(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 64})
+}
+
+func BenchmarkGlobChildren128(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 128})
+}
+
+func BenchmarkGlobChildren256(b *testing.B) {
+	RunBenchmarkGlob(b, &globChildrenObject{b, 256})
+}
diff --git a/runtime/internal/rpc/benchmark/internal/client.go b/runtime/internal/rpc/benchmark/internal/client.go
new file mode 100644
index 0000000..95a76e0
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/internal/client.go
@@ -0,0 +1,152 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+	"time"
+
+	"v.io/v23/context"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/rpc/benchmark"
+	tbm "v.io/x/ref/test/benchmark"
+)
+
+// CallEcho calls 'Echo' method 'iterations' times with the given payload size.
+func CallEcho(b *testing.B, ctx *context.T, address string, iterations, payloadSize int, stats *tbm.Stats) {
+	stub := benchmark.BenchmarkClient(address)
+	payload := make([]byte, payloadSize)
+	for i := range payload {
+		payload[i] = byte(i & 0xff)
+	}
+
+	b.SetBytes(int64(payloadSize) * 2) // 2 for round trip of each payload.
+	b.ResetTimer()                     // Exclude setup time from measurement.
+
+	for i := 0; i < iterations; i++ {
+		b.StartTimer()
+		start := time.Now()
+
+		r, err := stub.Echo(ctx, payload)
+
+		elapsed := time.Since(start)
+		b.StopTimer()
+
+		if err != nil {
+			vlog.Fatalf("Echo failed: %v", err)
+		}
+		if !bytes.Equal(r, payload) {
+			vlog.Fatalf("Echo returned %v, but expected %v", r, payload)
+		}
+
+		stats.Add(elapsed)
+	}
+}
+
+// CallEchoStream calls 'EchoStream' method 'iterations' times. Each iteration sends
+// 'chunkCnt' chunks on the stream and receives the same number of chunks back. Each
+// chunk has the given payload size.
+func CallEchoStream(b *testing.B, ctx *context.T, address string, iterations, chunkCnt, payloadSize int, stats *tbm.Stats) {
+	done, _ := StartEchoStream(b, ctx, address, iterations, chunkCnt, payloadSize, stats)
+	<-done
+}
+
+// StartEchoStream starts to call 'EchoStream' method 'iterations' times. This does
+// not block, and returns a channel that will receive the number of iterations when
+// it's done. It also returns a callback function to stop the streaming. Each iteration
+// requests 'chunkCnt' chunks on the stream and receives that number of chunks back.
+// Each chunk has the given payload size. Zero 'iterations' means unlimited.
+func StartEchoStream(b *testing.B, ctx *context.T, address string, iterations, chunkCnt, payloadSize int, stats *tbm.Stats) (<-chan int, func()) {
+	stub := benchmark.BenchmarkClient(address)
+	payload := make([]byte, payloadSize)
+	for i := range payload {
+		payload[i] = byte(i & 0xff)
+	}
+
+	stop := make(chan struct{})
+	stopped := func() bool {
+		select {
+		case <-stop:
+			return true
+		default:
+			return false
+		}
+	}
+	done := make(chan int, 1)
+
+	if b.N > 0 {
+		// 2 for round trip of each payload.
+		b.SetBytes(int64((iterations*chunkCnt/b.N)*payloadSize) * 2)
+	}
+	b.ResetTimer() // Exclude setup time from measurement.
+
+	go func() {
+		defer close(done)
+
+		n := 0
+		for ; !stopped() && (iterations == 0 || n < iterations); n++ {
+			b.StartTimer()
+			start := time.Now()
+
+			stream, err := stub.EchoStream(ctx)
+			if err != nil {
+				vlog.Fatalf("EchoStream failed: %v", err)
+			}
+
+			rDone := make(chan error, 1)
+			go func() {
+				defer close(rDone)
+
+				rStream := stream.RecvStream()
+				i := 0
+				for ; rStream.Advance(); i++ {
+					r := rStream.Value()
+					if !bytes.Equal(r, payload) {
+						rDone <- fmt.Errorf("EchoStream returned %v, but expected %v", r, payload)
+						return
+					}
+				}
+				if i != chunkCnt {
+					rDone <- fmt.Errorf("EchoStream returned %d chunks, but expected %d", i, chunkCnt)
+					return
+				}
+				rDone <- rStream.Err()
+			}()
+
+			sStream := stream.SendStream()
+			for i := 0; i < chunkCnt; i++ {
+				if err = sStream.Send(payload); err != nil {
+					vlog.Fatalf("EchoStream Send failed: %v", err)
+				}
+			}
+			if err = sStream.Close(); err != nil {
+				vlog.Fatalf("EchoStream Close failed: %v", err)
+			}
+
+			if err = <-rDone; err != nil {
+				vlog.Fatalf("%v", err)
+			}
+
+			if err = stream.Finish(); err != nil {
+				vlog.Fatalf("Finish failed: %v", err)
+			}
+
+			elapsed := time.Since(start)
+			b.StopTimer()
+
+			stats.Add(elapsed)
+		}
+
+		done <- n
+	}()
+
+	return done, func() {
+		close(stop)
+		<-done
+	}
+}
diff --git a/runtime/internal/rpc/benchmark/internal/server.go b/runtime/internal/rpc/benchmark/internal/server.go
new file mode 100644
index 0000000..ec3c0f5
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/internal/server.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/lib/security/securityflag"
+	"v.io/x/ref/runtime/internal/rpc/benchmark"
+)
+
+type impl struct {
+}
+
+func (i *impl) Echo(_ *context.T, _ rpc.ServerCall, payload []byte) ([]byte, error) {
+	return payload, nil
+}
+
+func (i *impl) EchoStream(_ *context.T, call benchmark.BenchmarkEchoStreamServerCall) error {
+	rStream := call.RecvStream()
+	sStream := call.SendStream()
+	for rStream.Advance() {
+		sStream.Send(rStream.Value())
+	}
+	if err := rStream.Err(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// StartServer starts a server that implements the Benchmark service. The
+// server listens to the given protocol and address, and returns the vanadium
+// address of the server and a callback function to stop the server.
+func StartServer(ctx *context.T, listenSpec rpc.ListenSpec) (naming.Endpoint, func()) {
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		vlog.Fatalf("NewServer failed: %v", err)
+	}
+	eps, err := server.Listen(listenSpec)
+	if err != nil {
+		vlog.Fatalf("Listen failed: %v", err)
+	}
+	if len(eps) == 0 {
+		vlog.Fatal("No local address to listen on")
+	}
+
+	if err := server.Serve("", benchmark.BenchmarkServer(&impl{}), securityflag.NewAuthorizerOrDie()); err != nil {
+		vlog.Fatalf("Serve failed: %v", err)
+	}
+	return eps[0], func() {
+		if err := server.Stop(); err != nil {
+			vlog.Fatalf("Stop() failed: %v", err)
+		}
+	}
+}
diff --git a/runtime/internal/rpc/benchmark/main/main.go b/runtime/internal/rpc/benchmark/main/main.go
new file mode 100644
index 0000000..47ae752
--- /dev/null
+++ b/runtime/internal/rpc/benchmark/main/main.go
@@ -0,0 +1,132 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+	"runtime"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+
+	"v.io/x/lib/vlog"
+	_ "v.io/x/ref/runtime/factories/static"
+	"v.io/x/ref/runtime/internal/rpc/benchmark/internal"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/benchmark"
+	"v.io/x/ref/test/testutil"
+)
+
+const (
+	payloadSize = 1000
+	chunkCnt    = 1000
+
+	bulkPayloadSize = 1000000
+
+	numCPUs          = 2
+	defaultBenchTime = 5 * time.Second
+)
+
+var (
+	serverEP naming.Endpoint
+	ctx      *context.T
+)
+
+// Benchmark for measuring RPC connection time including authentication.
+//
+// rpc.Client doesn't export an interface for closing connection. So we
+// use the stream manager directly here.
+func benchmarkRPCConnection(b *testing.B) {
+	mp := runtime.GOMAXPROCS(numCPUs)
+	defer runtime.GOMAXPROCS(mp)
+
+	principal := testutil.NewPrincipal("test")
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		client := manager.InternalNew(naming.FixedRoutingID(0xc))
+
+		b.StartTimer()
+
+		_, err := client.Dial(serverEP, principal)
+		if err != nil {
+			vlog.Fatalf("Dial failed: %v", err)
+		}
+
+		b.StopTimer()
+
+		client.Shutdown()
+	}
+}
+
+// Benchmark for non-streaming RPC.
+func benchmarkRPC(b *testing.B) {
+	mp := runtime.GOMAXPROCS(numCPUs)
+	defer runtime.GOMAXPROCS(mp)
+	internal.CallEcho(b, ctx, serverEP.Name(), b.N, payloadSize, benchmark.NewStats(1))
+}
+
+// Benchmark for streaming RPC.
+func benchmarkStreamingRPC(b *testing.B) {
+	mp := runtime.GOMAXPROCS(numCPUs)
+	defer runtime.GOMAXPROCS(mp)
+	internal.CallEchoStream(b, ctx, serverEP.Name(), b.N, chunkCnt, payloadSize, benchmark.NewStats(1))
+}
+
+// Benchmark for measuring throughput in streaming RPC.
+func benchmarkStreamingRPCThroughput(b *testing.B) {
+	mp := runtime.GOMAXPROCS(numCPUs)
+	defer runtime.GOMAXPROCS(mp)
+	internal.CallEchoStream(b, ctx, serverEP.Name(), 1, b.N, bulkPayloadSize, benchmark.NewStats(1))
+}
+
+func msPerRPC(r testing.BenchmarkResult) float64 {
+	return r.T.Seconds() / float64(r.N) * 1000
+}
+
+func rpcPerSec(r testing.BenchmarkResult) float64 {
+	return float64(r.N) / r.T.Seconds()
+}
+func mbPerSec(r testing.BenchmarkResult) float64 {
+	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
+}
+
+func runBenchmarks() {
+	r := testing.Benchmark(benchmarkRPCConnection)
+	fmt.Printf("RPC Connection\t%.2f ms/rpc\n", msPerRPC(r))
+
+	// Create a connection to exclude the setup time from the following benchmarks.
+	internal.CallEcho(&testing.B{}, ctx, serverEP.Name(), 1, 0, benchmark.NewStats(1))
+
+	r = testing.Benchmark(benchmarkRPC)
+	fmt.Printf("RPC (echo %vB)\t%.2f ms/rpc (%.2f qps)\n", payloadSize, msPerRPC(r), rpcPerSec(r))
+
+	r = testing.Benchmark(benchmarkStreamingRPC)
+	fmt.Printf("RPC Streaming (echo %vB)\t%.2f ms/rpc\n", payloadSize, msPerRPC(r)/chunkCnt)
+
+	r = testing.Benchmark(benchmarkStreamingRPCThroughput)
+	fmt.Printf("RPC Streaming Throughput (echo %vMB)\t%.2f MB/s\n", bulkPayloadSize/1e6, mbPerSec(r))
+}
+
+func main() {
+	// Set the default benchmark time.
+	flag.Set("test.benchtime", defaultBenchTime.String())
+	test.Init()
+
+	var shutdown v23.Shutdown
+	ctx, shutdown = test.InitForTest()
+	defer shutdown()
+
+	var serverStop func()
+	serverEP, serverStop = internal.StartServer(ctx, v23.GetListenSpec(ctx))
+	defer serverStop()
+
+	runBenchmarks()
+}
diff --git a/runtime/internal/rpc/blessings_cache.go b/runtime/internal/rpc/blessings_cache.go
new file mode 100644
index 0000000..a6e9d92
--- /dev/null
+++ b/runtime/internal/rpc/blessings_cache.go
@@ -0,0 +1,179 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"crypto/sha256"
+	"sync"
+
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errMissingBlessingsKey    = reg(".blessingsKey", "key {3} was not in blessings cache")
+	errInvalidClientBlessings = reg("invalidClientBlessings", "client sent invalid Blessings")
+)
+
+// clientEncodeBlessings gets or inserts the blessings into the cache.
+func clientEncodeBlessings(cache stream.VCDataCache, blessings security.Blessings) rpc.BlessingsRequest {
+	blessingsCacheAny := cache.GetOrInsert(clientBlessingsCacheKey{}, newClientBlessingsCache)
+	blessingsCache := blessingsCacheAny.(*clientBlessingsCache)
+	return blessingsCache.getOrInsert(blessings)
+}
+
+// clientAckBlessings verifies that the server has updated its cache to include blessings.
+// This means that subsequent rpcs from the client with blessings can send only a cache key.
+func clientAckBlessings(cache stream.VCDataCache, blessings security.Blessings) {
+	blessingsCacheAny := cache.GetOrInsert(clientBlessingsCacheKey{}, newClientBlessingsCache)
+	blessingsCache := blessingsCacheAny.(*clientBlessingsCache)
+	blessingsCache.acknowledge(blessings)
+}
+
+// serverDecodeBlessings insert the key and blessings into the cache or get the blessings if only
+// key is provided in req.
+func serverDecodeBlessings(cache stream.VCDataCache, req rpc.BlessingsRequest, stats *rpcStats) (security.Blessings, error) {
+	blessingsCacheAny := cache.GetOrInsert(serverBlessingsCacheKey{}, newServerBlessingsCache)
+	blessingsCache := blessingsCacheAny.(*serverBlessingsCache)
+	return blessingsCache.getOrInsert(req, stats)
+}
+
+// IMPLEMENTATION DETAILS BELOW
+
+// clientBlessingsCache is a thread-safe map from blessings to cache id.
+type clientBlessingsCache struct {
+	sync.RWMutex
+	m     map[[sha256.Size]byte]clientCacheValue
+	curId uint64
+}
+
+type clientCacheValue struct {
+	id uint64
+	// ack is set to true once the server has confirmed receipt of the cache id.
+	// Clients that insert into the cache when ack is false must send both the id
+	// and the blessings.
+	ack bool
+}
+
+// clientBlessingsCacheKey is the key used to retrieve the clientBlessingsCache from the VCDataCache.
+type clientBlessingsCacheKey struct{}
+
+func newClientBlessingsCache() interface{} {
+	return &clientBlessingsCache{m: make(map[[sha256.Size]byte]clientCacheValue)}
+}
+
+func getBlessingsHashKey(blessings security.Blessings) (key [sha256.Size]byte) {
+	h := sha256.New()
+	for _, chain := range security.MarshalBlessings(blessings).CertificateChains {
+		if len(chain) == 0 {
+			continue
+		}
+		cert := chain[len(chain)-1]
+		s := sha256.Sum256(cert.Signature.R)
+		h.Write(s[:])
+		s = sha256.Sum256(cert.Signature.S)
+		h.Write(s[:])
+	}
+	copy(key[:], h.Sum(nil))
+	return
+}
+
+func (c *clientBlessingsCache) getOrInsert(blessings security.Blessings) rpc.BlessingsRequest {
+	key := getBlessingsHashKey(blessings)
+	c.RLock()
+	val, exists := c.m[key]
+	c.RUnlock()
+	if exists {
+		return c.makeBlessingsRequest(val, blessings)
+	}
+	// If the val doesn't exist we must create a new id, update the cache, and send the id and blessings.
+	c.Lock()
+	// We must check that the val wasn't inserted in the time we changed locks.
+	val, exists = c.m[key]
+	if !exists {
+		val = clientCacheValue{id: c.nextIdLocked()}
+		c.m[key] = val
+	}
+	c.Unlock()
+	return c.makeBlessingsRequest(val, blessings)
+}
+
+func (c *clientBlessingsCache) acknowledge(blessings security.Blessings) {
+	key := getBlessingsHashKey(blessings)
+	c.Lock()
+	val := c.m[key]
+	val.ack = true
+	c.m[key] = val
+	c.Unlock()
+}
+
+func (c *clientBlessingsCache) makeBlessingsRequest(val clientCacheValue, blessings security.Blessings) rpc.BlessingsRequest {
+	if val.ack {
+		// when the value is acknowledged, only send the key, since the server has confirmed that it knows the key.
+		return rpc.BlessingsRequest{Key: val.id}
+	}
+	// otherwise we still need to send both key and blessings, but we must ensure that we send the same key.
+	return rpc.BlessingsRequest{val.id, &blessings}
+}
+
+// nextIdLocked creates a new id for inserting blessings. It must be called after acquiring a writer lock.
+func (c *clientBlessingsCache) nextIdLocked() uint64 {
+	c.curId++
+	return c.curId
+}
+
+// serverBlessingsCache is a thread-safe map from cache key to blessings.
+type serverBlessingsCache struct {
+	sync.RWMutex
+	m map[uint64]security.Blessings
+}
+
+// serverBlessingsCacheKey is the key used to retrieve the serverBlessingsCache from the VCDataCache.
+type serverBlessingsCacheKey struct{}
+
+func newServerBlessingsCache() interface{} {
+	return &serverBlessingsCache{m: make(map[uint64]security.Blessings)}
+}
+
+func (c *serverBlessingsCache) getOrInsert(req rpc.BlessingsRequest, stats *rpcStats) (security.Blessings, error) {
+	// In the case that the key sent is 0, we are running in SecurityNone
+	// and should return the zero value.
+	if req.Key == 0 {
+		return security.Blessings{}, nil
+	}
+	if req.Blessings == nil {
+		// Fastpath, lookup based on the key.
+		c.RLock()
+		cached, exists := c.m[req.Key]
+		c.RUnlock()
+		if !exists {
+			return security.Blessings{}, verror.New(errMissingBlessingsKey, nil, req.Key)
+		}
+		stats.recordBlessingCache(true)
+		return cached, nil
+	}
+	// Always count the slow path as a cache miss since we do not get the benefit of sending only the cache key.
+	stats.recordBlessingCache(false)
+	// Slowpath, might need to update the cache, or check that the received blessings are
+	// the same as what's in the cache.
+	recv := *req.Blessings
+	c.Lock()
+	defer c.Unlock()
+	if cached, exists := c.m[req.Key]; exists {
+		if !cached.Equivalent(recv) {
+			return security.Blessings{}, verror.New(errInvalidClientBlessings, nil)
+		}
+		return cached, nil
+	}
+	c.m[req.Key] = recv
+	return recv, nil
+}
diff --git a/runtime/internal/rpc/cancel_test.go b/runtime/internal/rpc/cancel_test.go
new file mode 100644
index 0000000..716a456
--- /dev/null
+++ b/runtime/internal/rpc/cancel_test.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/namespace"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
+)
+
+type canceld struct {
+	sm       stream.Manager
+	ns       namespace.T
+	name     string
+	child    string
+	started  chan struct{}
+	canceled chan struct{}
+	stop     func() error
+}
+
+func (c *canceld) Run(ctx *context.T, _ rpc.StreamServerCall) error {
+	close(c.started)
+
+	client, err := InternalNewClient(c.sm, c.ns)
+	if err != nil {
+		vlog.Error(err)
+		return err
+	}
+
+	if c.child != "" {
+		if _, err = client.StartCall(ctx, c.child, "Run", []interface{}{}); err != nil {
+			vlog.Error(err)
+			return err
+		}
+	}
+
+	vlog.Info(c.name, " waiting for cancellation")
+	<-ctx.Done()
+	vlog.Info(c.name, " canceled")
+	close(c.canceled)
+	return nil
+}
+
+func makeCanceld(ctx *context.T, ns namespace.T, name, child string) (*canceld, error) {
+	sm := manager.InternalNew(naming.FixedRoutingID(0x111111111))
+	s, err := testInternalNewServer(ctx, sm, ns, v23.GetPrincipal(ctx))
+	if err != nil {
+		return nil, err
+	}
+	if _, err := s.Listen(listenSpec); err != nil {
+		return nil, err
+	}
+
+	c := &canceld{
+		sm:       sm,
+		ns:       ns,
+		name:     name,
+		child:    child,
+		started:  make(chan struct{}, 0),
+		canceled: make(chan struct{}, 0),
+		stop:     s.Stop,
+	}
+
+	if err := s.Serve(name, c, security.AllowEveryone()); err != nil {
+		return nil, err
+	}
+
+	return c, nil
+}
+
+// TestCancellationPropagation tests that cancellation propogates along an
+// RPC call chain without user intervention.
+func TestCancellationPropagation(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		sm               = manager.InternalNew(naming.FixedRoutingID(0x555555555))
+		ns               = tnaming.NewSimpleNamespace()
+		pclient, pserver = newClientServerPrincipals()
+		serverCtx, _     = v23.WithPrincipal(ctx, pserver)
+		clientCtx, _     = v23.WithPrincipal(ctx, pclient)
+	)
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Error(err)
+	}
+
+	c1, err := makeCanceld(serverCtx, ns, "c1", "c2")
+	if err != nil {
+		t.Fatal("Can't start server:", err)
+	}
+	defer c1.stop()
+
+	c2, err := makeCanceld(serverCtx, ns, "c2", "")
+	if err != nil {
+		t.Fatal("Can't start server:", err)
+	}
+	defer c2.stop()
+
+	clientCtx, cancel := context.WithCancel(clientCtx)
+	_, err = client.StartCall(clientCtx, "c1", "Run", []interface{}{})
+	if err != nil {
+		t.Fatal("can't call: ", err)
+	}
+
+	<-c1.started
+	<-c2.started
+
+	vlog.Info("cancelling initial call")
+	cancel()
+
+	vlog.Info("waiting for children to be canceled")
+	<-c1.canceled
+	<-c2.canceled
+}
diff --git a/runtime/internal/rpc/client.go b/runtime/internal/rpc/client.go
new file mode 100644
index 0000000..ff1ee51
--- /dev/null
+++ b/runtime/internal/rpc/client.go
@@ -0,0 +1,1055 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"reflect"
+	"sync"
+	"time"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/namespace"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/vdl"
+	vtime "v.io/v23/vdlroot/time"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/v23/vtrace"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc"
+
+func reg(id, msg string) verror.IDAction {
+	// Note: the error action is never used and is instead computed
+	// at a higher level. The errors here are purely for informational
+	// purposes.
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errClientCloseAlreadyCalled  = reg(".errCloseAlreadyCalled", "rpc.Client.Close has already been called")
+	errClientFinishAlreadyCalled = reg(".errFinishAlreadyCalled", "rpc.ClientCall.Finish has already been called")
+	errNonRootedName             = reg(".errNonRootedName", "{3} does not appear to contain an address")
+	errInvalidEndpoint           = reg(".errInvalidEndpoint", "failed to parse endpoint")
+	errIncompatibleEndpoint      = reg(".errIncompatibleEndpoint", "incompatible endpoint")
+	errRequestEncoding           = reg(".errRequestEncoding", "failed to encode request {3}{:4}")
+	errDischargeEncoding         = reg(".errDischargeEncoding", "failed to encode discharges {:3}")
+	errBlessingEncoding          = reg(".errBlessingEncoding", "failed to encode blessing {3}{:4}")
+	errArgEncoding               = reg(".errArgEncoding", "failed to encode arg #{3}{:4:}")
+	errMismatchedResults         = reg(".errMismatchedResults", "got {3} results, but want {4}")
+	errResultDecoding            = reg(".errResultDecoding", "failed to decode result #{3}{:4}")
+	errResponseDecoding          = reg(".errResponseDecoding", "failed to decode response{:3}")
+	errRemainingStreamResults    = reg(".errRemaingStreamResults", "stream closed with remaining stream results")
+	errNoBlessingsForPeer        = reg(".errNoBlessingsForPeer", "no blessings tagged for peer {3}{:4}")
+	errBlessingGrant             = reg(".errBlessingGrant", "failed to grant blessing to server with blessings{:3}")
+	errBlessingAdd               = reg(".errBlessingAdd", "failed to add blessing granted to server{:3}")
+	errServerAuthorizeFailed     = reg(".errServerAuthorizedFailed", "failed to authorize flow with remote blessings{:3} {:4}")
+
+	errPrepareBlessingsAndDischarges = reg(".prepareBlessingsAndDischarges", "failed to prepare blessings and discharges: remote blessings{:3} {:4}")
+
+	errDischargeImpetus = reg(".errDischargeImpetus", "couldn't make discharge impetus{:3}")
+	errNoPrincipal      = reg(".errNoPrincipal", "principal required for secure connections")
+)
+
+type client struct {
+	streamMgr          stream.Manager
+	ns                 namespace.T
+	vcOpts             []stream.VCOpt // vc opts passed to dial
+	preferredProtocols []string
+
+	// We cache the IP networks on the device since it is not that cheap to read
+	// network interfaces through os syscall.
+	// TODO(jhahn): Add monitoring the network interface changes.
+	ipNets []*net.IPNet
+
+	vcCache *vc.VCCache
+
+	dc vc.DischargeClient
+}
+
+var _ rpc.Client = (*client)(nil)
+
+func InternalNewClient(streamMgr stream.Manager, ns namespace.T, opts ...rpc.ClientOpt) (rpc.Client, error) {
+	c := &client{
+		streamMgr: streamMgr,
+		ns:        ns,
+		ipNets:    ipNetworks(),
+		vcCache:   vc.NewVCCache(),
+	}
+	c.dc = InternalNewDischargeClient(nil, c, 0)
+	for _, opt := range opts {
+		// Collect all client opts that are also vc opts.
+		switch v := opt.(type) {
+		case stream.VCOpt:
+			c.vcOpts = append(c.vcOpts, v)
+		case PreferredProtocols:
+			c.preferredProtocols = v
+		}
+	}
+
+	return c, nil
+}
+
+func (c *client) createFlow(ctx *context.T, principal security.Principal, ep naming.Endpoint, vcOpts []stream.VCOpt) (stream.Flow, *verror.SubErr) {
+	suberr := func(err error) *verror.SubErr {
+		return &verror.SubErr{Err: err, Options: verror.Print}
+	}
+
+	found, err := c.vcCache.ReservedFind(ep, principal)
+	if err != nil {
+		return nil, suberr(verror.New(errClientCloseAlreadyCalled, ctx))
+	}
+	defer c.vcCache.Unreserve(ep, principal)
+	if found != nil {
+		// We are serializing the creation of all flows per VC. This is okay
+		// because if one flow creation is to block, it is likely that all others
+		// for that VC would block as well.
+		if flow, err := found.Connect(); err == nil {
+			return flow, nil
+		}
+		// If the vc fails to establish a new flow, we assume it's
+		// broken, remove it from the cache, and proceed to establishing
+		// a new vc.
+		//
+		// TODO(suharshs): The decision to redial 1 time when the dialing the vc
+		// in the cache fails is a bit inconsistent with the behavior when a newly
+		// dialed vc.Connect fails. We should revisit this.
+		//
+		// TODO(caprita): Should we distinguish errors due to vc being
+		// closed from other errors?  If not, should we call vc.Close()
+		// before removing the vc from the cache?
+		if err := c.vcCache.Delete(found); err != nil {
+			return nil, suberr(verror.New(errClientCloseAlreadyCalled, ctx))
+		}
+	}
+
+	sm := c.streamMgr
+	v, err := sm.Dial(ep, principal, vcOpts...)
+	if err != nil {
+		return nil, suberr(err)
+	}
+
+	flow, err := v.Connect()
+	if err != nil {
+		return nil, suberr(err)
+	}
+
+	if err := c.vcCache.Insert(v.(*vc.VC)); err != nil {
+		sm.ShutdownEndpoint(ep)
+		return nil, suberr(verror.New(errClientCloseAlreadyCalled, ctx))
+	}
+
+	return flow, nil
+}
+
+// A randomized exponential backoff. The randomness deters error convoys
+// from forming.  The first time you retry n should be 0, then 1 etc.
+func backoff(n uint, deadline time.Time) bool {
+	// This is ((100 to 200) * 2^n) ms.
+	b := time.Duration((100+rand.Intn(100))<<n) * time.Millisecond
+	if b > maxBackoff {
+		b = maxBackoff
+	}
+	r := deadline.Sub(time.Now())
+	if b > r {
+		// We need to leave a little time for the call to start or
+		// we'll just timeout in startCall before we actually do
+		// anything.  If we just have a millisecond left, give up.
+		if r <= time.Millisecond {
+			return false
+		}
+		b = r - time.Millisecond
+	}
+	time.Sleep(b)
+	return true
+}
+
+func (c *client) StartCall(ctx *context.T, name, method string, args []interface{}, opts ...rpc.CallOpt) (rpc.ClientCall, error) {
+	defer vlog.LogCallf("ctx=,name=%.10s...,method=%.10s...,args=,opts...=%v", name, method, opts)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return c.startCall(ctx, name, method, args, opts)
+}
+
+func (c *client) Call(ctx *context.T, name, method string, inArgs, outArgs []interface{}, opts ...rpc.CallOpt) error {
+	defer vlog.LogCallf("ctx=,name=%.10s...,method=%.10s...,inArgs=,outArgs=,opts...=%v", name, method, opts)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	deadline := getDeadline(ctx, opts)
+
+	var lastErr error
+	for retries := uint(0); ; retries++ {
+		call, err := c.startCall(ctx, name, method, inArgs, opts)
+		if err != nil {
+			return err
+		}
+		err = call.Finish(outArgs...)
+		if err == nil {
+			return nil
+		}
+		lastErr = err
+		// We only retry if RetryBackoff is returned by the application because other
+		// RetryConnection and RetryRefetch required actions by the client before
+		// retrying.
+		if !shouldRetryBackoff(verror.Action(lastErr), deadline, opts) {
+			vlog.VI(4).Infof("Cannot retry after error: %s", lastErr)
+			break
+		}
+		if !backoff(retries, deadline) {
+			break
+		}
+		vlog.VI(4).Infof("Retrying due to error: %s", lastErr)
+	}
+	return lastErr
+}
+
+func getDeadline(ctx *context.T, opts []rpc.CallOpt) time.Time {
+	// Context specified deadline.
+	deadline, hasDeadline := ctx.Deadline()
+	if !hasDeadline {
+		// Default deadline.
+		deadline = time.Now().Add(defaultCallTimeout)
+	}
+	if r, ok := getRetryTimeoutOpt(opts); ok {
+		// Caller specified deadline.
+		deadline = time.Now().Add(r)
+	}
+	return deadline
+}
+
+func shouldRetryBackoff(action verror.ActionCode, deadline time.Time, opts []rpc.CallOpt) bool {
+	switch {
+	case noRetry(opts):
+		return false
+	case action != verror.RetryBackoff:
+		return false
+	case time.Now().After(deadline):
+		return false
+	}
+	return true
+}
+
+func shouldRetry(action verror.ActionCode, requireResolve bool, deadline time.Time, opts []rpc.CallOpt) bool {
+	switch {
+	case noRetry(opts):
+		return false
+	case action != verror.RetryConnection && action != verror.RetryRefetch:
+		return false
+	case time.Now().After(deadline):
+		return false
+	case requireResolve && getNoNamespaceOpt(opts):
+		// If we're skipping resolution and there are no servers for
+		// this call retrying is not going to help, we can't come up
+		// with new servers if there is no resolution.
+		return false
+	}
+	return true
+}
+
+func mkDischargeImpetus(serverBlessings []string, method string, args []interface{}) (security.DischargeImpetus, error) {
+	var impetus security.DischargeImpetus
+	if len(serverBlessings) > 0 {
+		impetus.Server = make([]security.BlessingPattern, len(serverBlessings))
+		for i, b := range serverBlessings {
+			impetus.Server[i] = security.BlessingPattern(b)
+		}
+	}
+	impetus.Method = method
+	if len(args) > 0 {
+		impetus.Arguments = make([]*vdl.Value, len(args))
+		for i, a := range args {
+			vArg, err := vdl.ValueFromReflect(reflect.ValueOf(a))
+			if err != nil {
+				return security.DischargeImpetus{}, err
+			}
+			impetus.Arguments[i] = vArg
+		}
+	}
+	return impetus, nil
+}
+
+// startCall ensures StartCall always returns verror.E.
+func (c *client) startCall(ctx *context.T, name, method string, args []interface{}, opts []rpc.CallOpt) (rpc.ClientCall, error) {
+	if !ctx.Initialized() {
+		return nil, verror.ExplicitNew(verror.ErrBadArg, i18n.LangID("en-us"), "<rpc.Client>", "StartCall", "context not initialized")
+	}
+	ctx, span := vtrace.WithNewSpan(ctx, fmt.Sprintf("<rpc.Client>%q.%s", name, method))
+	if err := canCreateServerAuthorizer(ctx, opts); err != nil {
+		return nil, verror.New(verror.ErrBadArg, ctx, err)
+	}
+
+	deadline := getDeadline(ctx, opts)
+
+	var lastErr error
+	for retries := uint(0); ; retries++ {
+		call, action, requireResolve, err := c.tryCall(ctx, name, method, args, opts)
+		if err == nil {
+			return call, nil
+		}
+		lastErr = err
+		if !shouldRetry(action, requireResolve, deadline, opts) {
+			span.Annotatef("Cannot retry after error: %s", err)
+			break
+		}
+		if !backoff(retries, deadline) {
+			break
+		}
+		span.Annotatef("Retrying due to error: %s", err)
+	}
+	return nil, lastErr
+}
+
+type serverStatus struct {
+	index             int
+	server, suffix    string
+	flow              stream.Flow
+	blessings         []string                    // authorized server blessings
+	rejectedBlessings []security.RejectedBlessing // rejected server blessings
+	serverErr         *verror.SubErr
+}
+
+func suberrName(server, name, method string) string {
+	// In the case the client directly dialed an endpoint we want to avoid printing
+	// the endpoint twice.
+	if server == name {
+		return fmt.Sprintf("%s.%s", server, method)
+	}
+	return fmt.Sprintf("%s:%s.%s", server, name, method)
+}
+
+// tryCreateFlow attempts to establish a Flow to "server" (which must be a
+// rooted name), over which a method invocation request could be sent.
+//
+// The server at the remote end of the flow is authorized using the provided
+// authorizer, both during creation of the VC underlying the flow and the
+// flow itself.
+// TODO(cnicolaou): implement real, configurable load balancing.
+func (c *client) tryCreateFlow(ctx *context.T, principal security.Principal, index int, name, server, method string, auth security.Authorizer, ch chan<- *serverStatus, vcOpts []stream.VCOpt) {
+	status := &serverStatus{index: index, server: server}
+	var span vtrace.Span
+	ctx, span = vtrace.WithNewSpan(ctx, "<client>tryCreateFlow")
+	span.Annotatef("address:%v", server)
+	defer func() {
+		ch <- status
+		span.Finish()
+	}()
+
+	suberr := func(err error) *verror.SubErr {
+		return &verror.SubErr{
+			Name:    suberrName(server, name, method),
+			Err:     err,
+			Options: verror.Print,
+		}
+	}
+
+	address, suffix := naming.SplitAddressName(server)
+	if len(address) == 0 {
+		status.serverErr = suberr(verror.New(errNonRootedName, ctx, server))
+		return
+	}
+	status.suffix = suffix
+
+	ep, err := inaming.NewEndpoint(address)
+	if err != nil {
+		status.serverErr = suberr(verror.New(errInvalidEndpoint, ctx))
+		return
+	}
+	if status.flow, status.serverErr = c.createFlow(ctx, principal, ep, append(vcOpts, &vc.ServerAuthorizer{Suffix: status.suffix, Method: method, Policy: auth})); status.serverErr != nil {
+		status.serverErr.Name = suberrName(server, name, method)
+		vlog.VI(2).Infof("rpc: Failed to create Flow with %v: %v", server, status.serverErr.Err)
+		return
+	}
+
+	// Authorize the remote end of the flow using the provided authorizer.
+	if status.flow.LocalPrincipal() == nil {
+		// LocalPrincipal is nil which means we are operating under
+		// SecurityNone.
+		return
+	}
+
+	seccall := security.NewCall(&security.CallParams{
+		LocalPrincipal:   status.flow.LocalPrincipal(),
+		LocalBlessings:   status.flow.LocalBlessings(),
+		RemoteBlessings:  status.flow.RemoteBlessings(),
+		LocalEndpoint:    status.flow.LocalEndpoint(),
+		RemoteEndpoint:   status.flow.RemoteEndpoint(),
+		RemoteDischarges: status.flow.RemoteDischarges(),
+		Method:           method,
+		Suffix:           status.suffix,
+	})
+	if err := auth.Authorize(ctx, seccall); err != nil {
+		// We will test for errServerAuthorizeFailed in failedTryCall and report
+		// verror.ErrNotTrusted
+		status.serverErr = suberr(verror.New(errServerAuthorizeFailed, ctx, status.flow.RemoteBlessings(), err))
+		vlog.VI(2).Infof("rpc: Failed to authorize Flow created with server %v: %s", server, status.serverErr.Err)
+		status.flow.Close()
+		status.flow = nil
+		return
+	}
+	status.blessings, status.rejectedBlessings = security.RemoteBlessingNames(ctx, seccall)
+	return
+}
+
+// tryCall makes a single attempt at a call. It may connect to multiple servers
+// (all that serve "name"), but will invoke the method on at most one of them
+// (the server running on the most preferred protcol and network amongst all
+// the servers that were successfully connected to and authorized).
+// if requireResolve is true on return, then we shouldn't bother retrying unless
+// you can re-resolve.
+func (c *client) tryCall(ctx *context.T, name, method string, args []interface{}, opts []rpc.CallOpt) (call rpc.ClientCall, action verror.ActionCode, requireResolve bool, err error) {
+	var resolved *naming.MountEntry
+	var blessingPattern security.BlessingPattern
+	blessingPattern, name = security.SplitPatternName(name)
+	if resolved, err = c.ns.Resolve(ctx, name, getNamespaceOpts(opts)...); err != nil {
+		// We always return NoServers as the error so that the caller knows
+		// that's ok to retry the operation since the name may be registered
+		// in the near future.
+		switch {
+		case verror.ErrorID(err) == naming.ErrNoSuchName.ID:
+			return nil, verror.RetryRefetch, false, verror.New(verror.ErrNoServers, ctx, name)
+		case verror.ErrorID(err) == verror.ErrNoServers.ID:
+			// Avoid wrapping errors unnecessarily.
+			return nil, verror.NoRetry, false, err
+		default:
+			return nil, verror.NoRetry, false, verror.New(verror.ErrNoServers, ctx, name, err)
+		}
+	} else {
+		if len(resolved.Servers) == 0 {
+			// This should never happen.
+			return nil, verror.NoRetry, true, verror.New(verror.ErrInternal, ctx, name)
+		}
+		// An empty set of protocols means all protocols...
+		if resolved.Servers, err = filterAndOrderServers(resolved.Servers, c.preferredProtocols, c.ipNets); err != nil {
+			return nil, verror.RetryRefetch, true, verror.New(verror.ErrNoServers, ctx, name, err)
+		}
+	}
+
+	// We need to ensure calls to v23 factory methods do not occur during runtime
+	// initialization. Currently, the agent, which uses SecurityNone, is the only caller
+	// during runtime initialization. We would like to set the principal in the context
+	// to nil if we are running in SecurityNone, but this always results in a panic since
+	// the agent client would trigger the call v23.WithPrincipal during runtime
+	// initialization. So, we gate the call to v23.GetPrincipal instead since the agent
+	// client will have callEncrypted == false.
+	// Potential solutions to this are:
+	// (1) Create a separate client for the agent so that this code doesn't have to
+	//     account for its use during runtime initialization.
+	// (2) Have a ctx.IsRuntimeInitialized() method that we can additionally predicate
+	//     on here.
+	var principal security.Principal
+	if callEncrypted(opts) {
+		if principal = v23.GetPrincipal(ctx); principal == nil {
+			return nil, verror.NoRetry, false, verror.New(errNoPrincipal, ctx)
+		}
+	}
+
+	// servers is now ordered by the priority heurestic implemented in
+	// filterAndOrderServers.
+	//
+	// Try to connect to all servers in parallel.  Provide sufficient
+	// buffering for all of the connections to finish instantaneously. This
+	// is important because we want to process the responses in priority
+	// order; that order is indicated by the order of entries in servers.
+	// So, if two respones come in at the same 'instant', we prefer the
+	// first in the resolved.Servers)
+	attempts := len(resolved.Servers)
+
+	responses := make([]*serverStatus, attempts)
+	ch := make(chan *serverStatus, attempts)
+	vcOpts := append(getVCOpts(opts), c.vcOpts...)
+	authorizer := newServerAuthorizer(blessingPattern, opts...)
+	for i, server := range resolved.Names() {
+		// Create a copy of vcOpts for each call to tryCreateFlow
+		// to avoid concurrent tryCreateFlows from stepping on each
+		// other while manipulating their copy of the options.
+		vcOptsCopy := make([]stream.VCOpt, len(vcOpts))
+		copy(vcOptsCopy, vcOpts)
+		go c.tryCreateFlow(ctx, principal, i, name, server, method, authorizer, ch, vcOptsCopy)
+	}
+
+	var timeoutChan <-chan time.Time
+	if deadline, ok := ctx.Deadline(); ok {
+		timeoutChan = time.After(deadline.Sub(time.Now()))
+	}
+
+	for {
+		// Block for at least one new response from the server, or the timeout.
+		select {
+		case r := <-ch:
+			responses[r.index] = r
+			// Read as many more responses as we can without blocking.
+		LoopNonBlocking:
+			for {
+				select {
+				default:
+					break LoopNonBlocking
+				case r := <-ch:
+					responses[r.index] = r
+				}
+			}
+		case <-timeoutChan:
+			vlog.VI(2).Infof("rpc: timeout on connection to server %v ", name)
+			_, _, _, err := c.failedTryCall(ctx, name, method, responses, ch)
+			if verror.ErrorID(err) != verror.ErrTimeout.ID {
+				return nil, verror.NoRetry, false, verror.New(verror.ErrTimeout, ctx, err)
+			}
+			return nil, verror.NoRetry, false, err
+		}
+
+		dc := c.dc
+		if shouldNotFetchDischarges(opts) {
+			dc = nil
+		}
+		// Process new responses, in priority order.
+		numResponses := 0
+		for _, r := range responses {
+			if r != nil {
+				numResponses++
+			}
+			if r == nil || r.flow == nil {
+				continue
+			}
+
+			doneChan := ctx.Done()
+			r.flow.SetDeadline(doneChan)
+			fc, err := newFlowClient(ctx, r.flow, r.blessings, dc)
+			if err != nil {
+				return nil, verror.NoRetry, false, err
+			}
+
+			if err := fc.prepareBlessingsAndDischarges(ctx, method, r.suffix, args, r.rejectedBlessings, opts); err != nil {
+				r.serverErr = &verror.SubErr{
+					Name:    suberrName(r.server, name, method),
+					Options: verror.Print,
+					Err:     verror.New(verror.ErrNotTrusted, nil, verror.New(errPrepareBlessingsAndDischarges, ctx, r.flow.RemoteBlessings(), err)),
+				}
+				vlog.VI(2).Infof("rpc: err: %s", r.serverErr)
+				r.flow.Close()
+				r.flow = nil
+				continue
+			}
+
+			// This is the 'point of no return'; once the RPC is started (fc.start
+			// below) we can't be sure if it makes it to the server or not so, this
+			// code will never call fc.start more than once to ensure that we provide
+			// 'at-most-once' rpc semantics at this level. Retrying the network
+			// connections (i.e. creating flows) is fine since we can cleanup that
+			// state if we abort a call (i.e. close the flow).
+			//
+			// We must ensure that all flows other than r.flow are closed.
+			//
+			// TODO(cnicolaou): all errors below are marked as NoRetry
+			// because we want to provide at-most-once rpc semantics so
+			// we only ever attempt an RPC once. In the future, we'll cache
+			// responses on the server and then we can retry in-flight
+			// RPCs.
+			go cleanupTryCall(r, responses, ch)
+
+			if doneChan != nil {
+				go func() {
+					select {
+					case <-doneChan:
+						vtrace.GetSpan(fc.ctx).Annotate("Canceled")
+						fc.flow.Cancel()
+					case <-fc.flow.Closed():
+					}
+				}()
+			}
+
+			deadline, _ := ctx.Deadline()
+			if verr := fc.start(r.suffix, method, args, deadline); verr != nil {
+				return nil, verror.NoRetry, false, verr
+			}
+			return fc, verror.NoRetry, false, nil
+		}
+		if numResponses == len(responses) {
+			return c.failedTryCall(ctx, name, method, responses, ch)
+		}
+	}
+}
+
+// cleanupTryCall ensures we've waited for every response from the tryCreateFlow
+// goroutines, and have closed the flow from each one except skip.  This is a
+// blocking function; it should be called in its own goroutine.
+func cleanupTryCall(skip *serverStatus, responses []*serverStatus, ch chan *serverStatus) {
+	numPending := 0
+	for _, r := range responses {
+		switch {
+		case r == nil:
+			// The response hasn't arrived yet.
+			numPending++
+		case r == skip || r.flow == nil:
+			// Either we should skip this flow, or we've closed the flow for this
+			// response already; nothing more to do.
+		default:
+			// We received the response, but haven't closed the flow yet.
+			r.flow.Close()
+		}
+	}
+	// Now we just need to wait for the pending responses and close their flows.
+	for i := 0; i < numPending; i++ {
+		if r := <-ch; r.flow != nil {
+			r.flow.Close()
+		}
+	}
+}
+
+// failedTryCall performs ©asynchronous cleanup for tryCall, and returns an
+// appropriate error from the responses we've already received.  All parallel
+// calls in tryCall failed or we timed out if we get here.
+func (c *client) failedTryCall(ctx *context.T, name, method string, responses []*serverStatus, ch chan *serverStatus) (rpc.ClientCall, verror.ActionCode, bool, error) {
+	go cleanupTryCall(nil, responses, ch)
+	c.ns.FlushCacheEntry(name)
+	suberrs := []verror.SubErr{}
+	topLevelError := verror.ErrNoServers
+	topLevelAction := verror.RetryRefetch
+	onlyErrNetwork := true
+	for _, r := range responses {
+		if r != nil && r.serverErr != nil && r.serverErr.Err != nil {
+			switch verror.ErrorID(r.serverErr.Err) {
+			case stream.ErrNotTrusted.ID, verror.ErrNotTrusted.ID, errServerAuthorizeFailed.ID:
+				topLevelError = verror.ErrNotTrusted
+				topLevelAction = verror.NoRetry
+				onlyErrNetwork = false
+			case stream.ErrAborted.ID, stream.ErrNetwork.ID:
+				// do nothing
+			default:
+				onlyErrNetwork = false
+			}
+			suberrs = append(suberrs, *r.serverErr)
+		}
+	}
+
+	if onlyErrNetwork {
+		// If we only encountered network errors, then report ErrBadProtocol.
+		topLevelError = verror.ErrBadProtocol
+	}
+
+	// TODO(cnicolaou): we get system errors for things like dialing using
+	// the 'ws' protocol which can never succeed even if we retry the connection,
+	// hence we return RetryRefetch below except for the case where the servers
+	// are not trusted, in case there's no point in retrying at all.
+	// TODO(cnicolaou): implementing at-most-once rpc semantics in the future
+	// will require thinking through all of the cases where the RPC can
+	// be retried by the client whilst it's actually being executed on the
+	// server.
+	return nil, topLevelAction, false, verror.AddSubErrs(verror.New(topLevelError, ctx), ctx, suberrs...)
+}
+
+// prepareBlessingsAndDischarges prepares blessings and discharges for
+// the call.
+//
+// This includes: (1) preparing blessings that must be granted to the
+// server, (2) preparing blessings that the client authenticates with,
+// and, (3) preparing any discharges for third-party caveats on the client's
+// blessings.
+func (fc *flowClient) prepareBlessingsAndDischarges(ctx *context.T, method, suffix string, args []interface{}, rejectedServerBlessings []security.RejectedBlessing, opts []rpc.CallOpt) error {
+	// LocalPrincipal is nil which means we are operating under
+	// SecurityNone.
+	if fc.flow.LocalPrincipal() == nil {
+		return nil
+	}
+
+	// Fetch blessings from the client's blessing store that are to be
+	// shared with the server.
+	if fc.blessings = fc.flow.LocalPrincipal().BlessingStore().ForPeer(fc.server...); fc.blessings.IsZero() {
+		// TODO(ataly, ashankar): We need not error out here and instead can just send the <nil> blessings
+		// to the server.
+		return verror.New(errNoBlessingsForPeer, fc.ctx, fc.server, rejectedServerBlessings)
+	}
+
+	// Fetch any discharges for third-party caveats on the client's blessings.
+	if !fc.blessings.IsZero() && fc.dc != nil {
+		impetus, err := mkDischargeImpetus(fc.server, method, args)
+		if err != nil {
+			return verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errDischargeImpetus, nil, err))
+		}
+		fc.discharges = fc.dc.PrepareDischarges(fc.ctx, fc.blessings.ThirdPartyCaveats(), impetus)
+	}
+
+	// Prepare blessings that must be granted to the server (using any
+	// rpc.Granter implementation in 'opts').
+	//
+	// NOTE(ataly, suharshs): Before invoking the granter, we set the parameters
+	// of the current call. The user can now retrieve the principal via
+	// v23.GetPrincipal(ctx), or via call.LocalPrincipal(). While in theory the
+	// two principals can be different, the flow.LocalPrincipal == nil check at
+	// the beginning of this method ensures that the two are the same and non-nil
+	// at this point in the code.
+	ldischargeMap := make(map[string]security.Discharge)
+	for _, d := range fc.discharges {
+		ldischargeMap[d.ID()] = d
+	}
+	seccall := security.NewCall(&security.CallParams{
+		LocalPrincipal:   fc.flow.LocalPrincipal(),
+		LocalBlessings:   fc.blessings,
+		RemoteBlessings:  fc.flow.RemoteBlessings(),
+		LocalEndpoint:    fc.flow.LocalEndpoint(),
+		RemoteEndpoint:   fc.flow.RemoteEndpoint(),
+		LocalDischarges:  ldischargeMap,
+		RemoteDischarges: fc.flow.RemoteDischarges(),
+		Method:           method,
+		Suffix:           suffix,
+	})
+	if err := fc.prepareGrantedBlessings(ctx, seccall, opts); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (fc *flowClient) prepareGrantedBlessings(ctx *context.T, call security.Call, opts []rpc.CallOpt) error {
+	for _, o := range opts {
+		switch v := o.(type) {
+		case rpc.Granter:
+			if b, err := v.Grant(ctx, call); err != nil {
+				return verror.New(errBlessingGrant, fc.ctx, err)
+			} else if fc.grantedBlessings, err = security.UnionOfBlessings(fc.grantedBlessings, b); err != nil {
+				return verror.New(errBlessingAdd, fc.ctx, err)
+			}
+		}
+	}
+	return nil
+}
+
+func (c *client) Close() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	for _, v := range c.vcCache.Close() {
+		c.streamMgr.ShutdownEndpoint(v.RemoteEndpoint())
+	}
+}
+
+// flowClient implements the RPC client-side protocol for a single RPC, over a
+// flow that's already connected to the server.
+type flowClient struct {
+	ctx      *context.T   // context to annotate with call details
+	dec      *vom.Decoder // to decode responses and results from the server
+	enc      *vom.Encoder // to encode requests and args to the server
+	server   []string     // Blessings bound to the server that authorize it to receive the RPC request from the client.
+	flow     stream.Flow  // the underlying flow
+	response rpc.Response // each decoded response message is kept here
+
+	discharges []security.Discharge // discharges used for this request
+	dc         vc.DischargeClient   // client-global discharge-client
+
+	blessings        security.Blessings // the local blessings for the current RPC.
+	grantedBlessings security.Blessings // the blessings granted to the server.
+
+	sendClosedMu sync.Mutex
+	sendClosed   bool // is the send side already closed? GUARDED_BY(sendClosedMu)
+	finished     bool // has Finish() already been called?
+}
+
+var _ rpc.ClientCall = (*flowClient)(nil)
+var _ rpc.Stream = (*flowClient)(nil)
+
+func newFlowClient(ctx *context.T, flow stream.Flow, server []string, dc vc.DischargeClient) (*flowClient, error) {
+	fc := &flowClient{
+		ctx:    ctx,
+		flow:   flow,
+		server: server,
+		dc:     dc,
+	}
+	typeenc := flow.VCDataCache().Get(vc.TypeEncoderKey{})
+	if typeenc == nil {
+		fc.enc = vom.NewEncoder(flow)
+		fc.dec = vom.NewDecoder(flow)
+	} else {
+		fc.enc = vom.NewEncoderWithTypeEncoder(flow, typeenc.(*vom.TypeEncoder))
+		typedec := flow.VCDataCache().Get(vc.TypeDecoderKey{})
+		fc.dec = vom.NewDecoderWithTypeDecoder(flow, typedec.(*vom.TypeDecoder))
+	}
+	return fc, nil
+}
+
+// close determines the appropriate error to return, in particular,
+// if a timeout or cancelation has occured then any error
+// is turned into a timeout or cancelation as appropriate.
+// Cancelation takes precedence over timeout. This is needed because
+// a timeout can lead to any other number of errors due to the underlying
+// network connection being shutdown abruptly.
+func (fc *flowClient) close(err error) error {
+	subErr := verror.SubErr{Err: err, Options: verror.Print}
+	subErr.Name = "remote=" + fc.flow.RemoteEndpoint().String()
+	if cerr := fc.flow.Close(); cerr != nil && err == nil {
+		return verror.New(verror.ErrInternal, fc.ctx, subErr)
+	}
+	if err == nil {
+		return nil
+	}
+	switch verror.ErrorID(err) {
+	case verror.ErrCanceled.ID:
+		return err
+	case verror.ErrTimeout.ID:
+		// Canceled trumps timeout.
+		if fc.ctx.Err() == context.Canceled {
+			return verror.AddSubErrs(verror.New(verror.ErrCanceled, fc.ctx), fc.ctx, subErr)
+		}
+		return err
+	default:
+		switch fc.ctx.Err() {
+		case context.DeadlineExceeded:
+			timeout := verror.New(verror.ErrTimeout, fc.ctx)
+			err := verror.AddSubErrs(timeout, fc.ctx, subErr)
+			return err
+		case context.Canceled:
+			canceled := verror.New(verror.ErrCanceled, fc.ctx)
+			err := verror.AddSubErrs(canceled, fc.ctx, subErr)
+			return err
+		}
+	}
+	switch verror.ErrorID(err) {
+	case errRequestEncoding.ID, errArgEncoding.ID, errResponseDecoding.ID:
+		return verror.New(verror.ErrBadProtocol, fc.ctx, err)
+	}
+	return err
+}
+
+func (fc *flowClient) start(suffix, method string, args []interface{}, deadline time.Time) error {
+	// Encode the Blessings information for the client to authorize the flow.
+	var blessingsRequest rpc.BlessingsRequest
+	if fc.flow.LocalPrincipal() != nil {
+		blessingsRequest = clientEncodeBlessings(fc.flow.VCDataCache(), fc.blessings)
+	}
+	req := rpc.Request{
+		Suffix:           suffix,
+		Method:           method,
+		NumPosArgs:       uint64(len(args)),
+		Deadline:         vtime.Deadline{deadline},
+		GrantedBlessings: fc.grantedBlessings,
+		Blessings:        blessingsRequest,
+		Discharges:       fc.discharges,
+		TraceRequest:     vtrace.GetRequest(fc.ctx),
+		Language:         string(i18n.GetLangID(fc.ctx)),
+	}
+	if err := fc.enc.Encode(req); err != nil {
+		berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errRequestEncoding, fc.ctx, fmt.Sprintf("%#v", req), err))
+		return fc.close(berr)
+	}
+	for ix, arg := range args {
+		if err := fc.enc.Encode(arg); err != nil {
+			berr := verror.New(errArgEncoding, fc.ctx, ix, err)
+			return fc.close(berr)
+		}
+	}
+	return nil
+}
+
+func (fc *flowClient) Send(item interface{}) error {
+	defer vlog.LogCallf("item=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	if fc.sendClosed {
+		return verror.New(verror.ErrAborted, fc.ctx)
+	}
+
+	// The empty request header indicates what follows is a streaming arg.
+	if err := fc.enc.Encode(rpc.Request{}); err != nil {
+		berr := verror.New(errRequestEncoding, fc.ctx, rpc.Request{}, err)
+		return fc.close(berr)
+	}
+	if err := fc.enc.Encode(item); err != nil {
+		berr := verror.New(errArgEncoding, fc.ctx, -1, err)
+		return fc.close(berr)
+	}
+	return nil
+}
+
+// decodeNetError tests for a net.Error from the lower stream code and
+// translates it into an appropriate error to be returned by the higher level
+// RPC api calls. It also tests for the net.Error being a stream.NetError
+// and if so, uses the error it stores rather than the stream.NetError itself
+// as its retrun value. This allows for the stack trace of the original
+// error to be chained to that of any verror created with it as a first parameter.
+func decodeNetError(ctx *context.T, err error) (verror.IDAction, error) {
+	if neterr, ok := err.(net.Error); ok {
+		if streamNeterr, ok := err.(*stream.NetError); ok {
+			err = streamNeterr.Err() // return the error stored in the stream.NetError
+		}
+		if neterr.Timeout() || neterr.Temporary() {
+			// If a read is canceled in the lower levels we see
+			// a timeout error - see readLocked in vc/reader.go
+			if ctx.Err() == context.Canceled {
+				return verror.ErrCanceled, err
+			}
+			return verror.ErrTimeout, err
+		}
+	}
+	if id := verror.ErrorID(err); id != verror.ErrUnknown.ID {
+		return verror.IDAction{id, verror.Action(err)}, err
+	}
+	return verror.ErrBadProtocol, err
+}
+
+func (fc *flowClient) Recv(itemptr interface{}) error {
+	defer vlog.LogCallf("itemptr=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	switch {
+	case fc.response.Error != nil:
+		return verror.New(verror.ErrBadProtocol, fc.ctx, fc.response.Error)
+	case fc.response.EndStreamResults:
+		return io.EOF
+	}
+
+	// Decode the response header and handle errors and EOF.
+	if err := fc.dec.Decode(&fc.response); err != nil {
+		id, verr := decodeNetError(fc.ctx, err)
+		berr := verror.New(id, fc.ctx, verror.New(errResponseDecoding, fc.ctx, verr))
+		return fc.close(berr)
+	}
+	if fc.response.Error != nil {
+		return verror.New(verror.ErrBadProtocol, fc.ctx, fc.response.Error)
+	}
+	if fc.response.EndStreamResults {
+		// Return EOF to indicate to the caller that there are no more stream
+		// results.  Any error sent by the server is kept in fc.response.Error, and
+		// returned to the user in Finish.
+		return io.EOF
+	}
+	// Decode the streaming result.
+	if err := fc.dec.Decode(itemptr); err != nil {
+		id, verr := decodeNetError(fc.ctx, err)
+		berr := verror.New(id, fc.ctx, verror.New(errResponseDecoding, fc.ctx, verr))
+		// TODO(cnicolaou): should we be caching this?
+		fc.response.Error = berr
+		return fc.close(berr)
+	}
+	return nil
+}
+
+func (fc *flowClient) CloseSend() error {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return fc.closeSend()
+}
+
+// closeSend ensures CloseSend always returns verror.E.
+func (fc *flowClient) closeSend() error {
+	fc.sendClosedMu.Lock()
+	defer fc.sendClosedMu.Unlock()
+	if fc.sendClosed {
+		return nil
+	}
+	if err := fc.enc.Encode(rpc.Request{EndStreamArgs: true}); err != nil {
+		// TODO(caprita): Indiscriminately closing the flow below causes
+		// a race as described in:
+		// https://docs.google.com/a/google.com/document/d/1C0kxfYhuOcStdV7tnLZELZpUhfQCZj47B0JrzbE29h8/edit
+		//
+		// There should be a finer grained way to fix this (for example,
+		// encoding errors should probably still result in closing the
+		// flow); on the flip side, there may exist other instances
+		// where we are closing the flow but should not.
+		//
+		// For now, commenting out the line below removes the flakiness
+		// from our existing unit tests, but this needs to be revisited
+		// and fixed correctly.
+		//
+		//   return fc.close(verror.ErrBadProtocolf("rpc: end stream args encoding failed: %v", err))
+	}
+	fc.sendClosed = true
+	return nil
+}
+
+func (fc *flowClient) Finish(resultptrs ...interface{}) error {
+	defer vlog.LogCallf("resultptrs...=%v", resultptrs)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	err := fc.finish(resultptrs...)
+	vtrace.GetSpan(fc.ctx).Finish()
+	return err
+}
+
+// finish ensures Finish always returns a verror.E.
+func (fc *flowClient) finish(resultptrs ...interface{}) error {
+	if fc.finished {
+		err := verror.New(errClientFinishAlreadyCalled, fc.ctx)
+		return fc.close(verror.New(verror.ErrBadState, fc.ctx, err))
+	}
+	fc.finished = true
+
+	// Call closeSend implicitly, if the user hasn't already called it.  There are
+	// three cases:
+	// 1) Server is blocked on Recv waiting for the final request message.
+	// 2) Server has already finished processing, the final response message and
+	//    out args are queued up on the client, and the flow is closed.
+	// 3) Between 1 and 2: the server isn't blocked on Recv, but the final
+	//    response and args aren't queued up yet, and the flow isn't closed.
+	//
+	// We must call closeSend to handle case (1) and unblock the server; otherwise
+	// we'll deadlock with both client and server waiting for each other.  We must
+	// ignore the error (if any) to handle case (2).  In that case the flow is
+	// closed, meaning writes will fail and reads will succeed, and closeSend will
+	// always return an error.  But this isn't a "real" error; the client should
+	// read the rest of the results and succeed.
+	_ = fc.closeSend()
+	// Decode the response header, if it hasn't already been decoded by Recv.
+	if fc.response.Error == nil && !fc.response.EndStreamResults {
+		if err := fc.dec.Decode(&fc.response); err != nil {
+			id, verr := decodeNetError(fc.ctx, err)
+			berr := verror.New(id, fc.ctx, verror.New(errResponseDecoding, fc.ctx, verr))
+			return fc.close(berr)
+		}
+		// The response header must indicate the streaming results have ended.
+		if fc.response.Error == nil && !fc.response.EndStreamResults {
+			berr := verror.New(errRemainingStreamResults, fc.ctx)
+			return fc.close(berr)
+		}
+	}
+	if fc.response.AckBlessings {
+		clientAckBlessings(fc.flow.VCDataCache(), fc.blessings)
+	}
+	// Incorporate any VTrace info that was returned.
+	vtrace.GetStore(fc.ctx).Merge(fc.response.TraceResponse)
+	if fc.response.Error != nil {
+		id := verror.ErrorID(fc.response.Error)
+		if id == verror.ErrNoAccess.ID && fc.dc != nil {
+			// In case the error was caused by a bad discharge, we do not want to get stuck
+			// with retrying again and again with this discharge. As there is no direct way
+			// to detect it, we conservatively flush all discharges we used from the cache.
+			// TODO(ataly,andreser): add verror.BadDischarge and handle it explicitly?
+			vlog.VI(3).Infof("Discarding %d discharges as RPC failed with %v", len(fc.discharges), fc.response.Error)
+			fc.dc.Invalidate(fc.discharges...)
+		}
+		if id == errBadNumInputArgs.ID || id == errBadInputArg.ID {
+			return fc.close(verror.New(verror.ErrBadProtocol, fc.ctx, fc.response.Error))
+		}
+		return fc.close(verror.Convert(verror.ErrInternal, fc.ctx, fc.response.Error))
+	}
+	if got, want := fc.response.NumPosResults, uint64(len(resultptrs)); got != want {
+		berr := verror.New(verror.ErrBadProtocol, fc.ctx, verror.New(errMismatchedResults, fc.ctx, got, want))
+		return fc.close(berr)
+	}
+	for ix, r := range resultptrs {
+		if err := fc.dec.Decode(r); err != nil {
+			id, verr := decodeNetError(fc.ctx, err)
+			berr := verror.New(id, fc.ctx, verror.New(errResultDecoding, fc.ctx, ix, verr))
+			return fc.close(berr)
+		}
+	}
+	return fc.close(nil)
+}
+
+func (fc *flowClient) RemoteBlessings() ([]string, security.Blessings) {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return fc.server, fc.flow.RemoteBlessings()
+}
+
+func bpatterns(patterns []string) []security.BlessingPattern {
+	if patterns == nil {
+		return nil
+	}
+	bpatterns := make([]security.BlessingPattern, len(patterns))
+	for i, p := range patterns {
+		bpatterns[i] = security.BlessingPattern(p)
+	}
+	return bpatterns
+}
diff --git a/runtime/internal/rpc/consts.go b/runtime/internal/rpc/consts.go
new file mode 100644
index 0000000..b194861
--- /dev/null
+++ b/runtime/internal/rpc/consts.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import "time"
+
+const (
+	// The publisher re-mounts on this period.
+	publishPeriod = time.Minute
+
+	// The server uses this timeout for incoming calls before the real timeout is known.
+	// The client uses this as the default max time for connecting to the server including
+	// name resolution.
+	defaultCallTimeout = time.Minute
+
+	// The client uses this as the maximum time between retry attempts when starting a call.
+	maxBackoff = time.Minute
+)
diff --git a/runtime/internal/rpc/debug_test.go b/runtime/internal/rpc/debug_test.go
new file mode 100644
index 0000000..6d9445c
--- /dev/null
+++ b/runtime/internal/rpc/debug_test.go
@@ -0,0 +1,132 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"io"
+	"reflect"
+	"sort"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
+	"v.io/x/ref/services/debug/debuglib"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestDebugServer(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Setup the client and server principals, with the client willing to share its
+	// blessing with the server.
+	var (
+		pclient = testutil.NewPrincipal("client")
+		pserver = testutil.NewPrincipal("server")
+		bclient = bless(pserver, pclient, "client") // server/client blessing.
+	)
+	pclient.AddToRoots(bclient)                    // Client recognizes "server" as a root of blessings.
+	pclient.BlessingStore().Set(bclient, "server") // Client presents bclient to server
+
+	debugDisp := debuglib.NewDispatcher(vlog.Log.LogDir, nil)
+
+	sm := manager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(ctx, sm, ns, pserver, ReservedNameDispatcher{debugDisp})
+	if err != nil {
+		t.Fatalf("InternalNewServer failed: %v", err)
+	}
+	defer server.Stop()
+	eps, err := server.Listen(listenSpec)
+	if err != nil {
+		t.Fatalf("server.Listen failed: %v", err)
+	}
+	if err := server.Serve("", &testObject{}, nil); err != nil {
+		t.Fatalf("server.Serve failed: %v", err)
+	}
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	defer client.Close()
+	ep := eps[0]
+	// Call the Foo method on ""
+	{
+		var value string
+		if err := client.Call(ctx, ep.Name(), "Foo", nil, []interface{}{&value}); err != nil {
+			t.Fatalf("client.Call failed: %v", err)
+		}
+		if want := "BAR"; value != want {
+			t.Errorf("unexpected value: Got %v, want %v", value, want)
+		}
+	}
+	// Call Value on __debug/stats/testing/foo
+	{
+		foo := stats.NewString("testing/foo")
+		foo.Set("The quick brown fox jumps over the lazy dog")
+		addr := naming.JoinAddressName(ep.String(), "__debug/stats/testing/foo")
+		var value string
+		if err := client.Call(ctx, addr, "Value", nil, []interface{}{&value}, options.NoResolve{}); err != nil {
+			t.Fatalf("client.Call failed: %v", err)
+		}
+		if want := foo.Value(); value != want {
+			t.Errorf("unexpected result: Got %v, want %v", value, want)
+		}
+	}
+
+	// Call Glob
+	testcases := []struct {
+		name, pattern string
+		expected      []string
+	}{
+		{"", "*", []string{}},
+		{"", "__*", []string{"__debug"}},
+		{"", "__*/*", []string{"__debug/logs", "__debug/pprof", "__debug/stats", "__debug/vtrace"}},
+		{"__debug", "*", []string{"logs", "pprof", "stats", "vtrace"}},
+	}
+	for _, tc := range testcases {
+		addr := naming.JoinAddressName(ep.String(), tc.name)
+		call, err := client.StartCall(ctx, addr, rpc.GlobMethod, []interface{}{tc.pattern}, options.NoResolve{})
+		if err != nil {
+			t.Fatalf("client.StartCall failed for %q: %v", tc.name, err)
+		}
+		results := []string{}
+		for {
+			var gr naming.GlobReply
+			if err := call.Recv(&gr); err != nil {
+				if err != io.EOF {
+					t.Fatalf("Recv failed for %q: %v. Results received thus far: %q", tc.name, err, results)
+				}
+				break
+			}
+			switch v := gr.(type) {
+			case naming.GlobReplyEntry:
+				results = append(results, v.Value.Name)
+			}
+		}
+		if err := call.Finish(); err != nil {
+			t.Fatalf("call.Finish failed for %q: %v", tc.name, err)
+		}
+		sort.Strings(results)
+		if !reflect.DeepEqual(tc.expected, results) {
+			t.Errorf("unexpected results for %q. Got %v, want %v", tc.name, results, tc.expected)
+		}
+	}
+}
+
+type testObject struct {
+}
+
+func (o testObject) Foo(*context.T, rpc.ServerCall) (string, error) {
+	return "BAR", nil
+}
diff --git a/runtime/internal/rpc/discharges.go b/runtime/internal/rpc/discharges.go
new file mode 100644
index 0000000..c67c530
--- /dev/null
+++ b/runtime/internal/rpc/discharges.go
@@ -0,0 +1,294 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+
+	"v.io/v23/context"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/vdl"
+	"v.io/v23/vtrace"
+	"v.io/x/lib/vlog"
+)
+
+// NoDischarges specifies that the RPC call should not fetch discharges.
+type NoDischarges struct{}
+
+func (NoDischarges) RPCCallOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+func (NoDischarges) NSOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+// discharger implements vc.DischargeClient.
+type dischargeClient struct {
+	c                     rpc.Client
+	defaultCtx            *context.T
+	cache                 dischargeCache
+	dischargeExpiryBuffer time.Duration
+}
+
+// InternalNewDischargeClient creates a vc.DischargeClient that will be used to
+// fetch discharges to support blessings presented to a remote process.
+//
+// defaultCtx is the context used when none (nil) is explicitly provided to the
+// PrepareDischarges call. This typically happens when fetching discharges on
+// behalf of a server accepting connections, i.e., before any notion of the
+// "context" of an API call has been established.
+// dischargeExpiryBuffer specifies how much before discharge expiration we should
+// refresh discharges.
+// Attempts will be made to refresh a discharge DischargeExpiryBuffer before they expire.
+func InternalNewDischargeClient(defaultCtx *context.T, client rpc.Client, dischargeExpiryBuffer time.Duration) vc.DischargeClient {
+	return &dischargeClient{
+		c:          client,
+		defaultCtx: defaultCtx,
+		cache: dischargeCache{
+			cache:    make(map[dischargeCacheKey]security.Discharge),
+			idToKeys: make(map[string][]dischargeCacheKey),
+		},
+		dischargeExpiryBuffer: dischargeExpiryBuffer,
+	}
+}
+
+func (*dischargeClient) RPCStreamListenerOpt() {}
+func (*dischargeClient) RPCStreamVCOpt()       {}
+
+// PrepareDischarges retrieves the caveat discharges required for using blessings
+// at server. The discharges are either found in the dischargeCache, in the call
+// options, or requested from the discharge issuer indicated on the caveat.
+// Note that requesting a discharge is an rpc call, so one copy of this
+// function must be able to successfully terminate while another is blocked.
+func (d *dischargeClient) PrepareDischarges(ctx *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) (ret []security.Discharge) {
+	if len(forcaveats) == 0 {
+		return
+	}
+	// Make a copy since this copy will be mutated.
+	var caveats []security.Caveat
+	var filteredImpetuses []security.DischargeImpetus
+	for _, cav := range forcaveats {
+		// It shouldn't happen, but in case there are non-third-party
+		// caveats, drop them.
+		if tp := cav.ThirdPartyDetails(); tp != nil {
+			caveats = append(caveats, cav)
+			filteredImpetuses = append(filteredImpetuses, filteredImpetus(tp.Requirements(), impetus))
+		}
+	}
+
+	// Gather discharges from cache.
+	// (Collect a set of pointers, where nil implies a missing discharge)
+	discharges := make([]*security.Discharge, len(caveats))
+	if d.cache.Discharges(caveats, filteredImpetuses, discharges) > 0 {
+		// Fetch discharges for caveats for which no discharges were
+		// found in the cache.
+		if ctx == nil {
+			ctx = d.defaultCtx
+		}
+		if ctx != nil {
+			var span vtrace.Span
+			ctx, span = vtrace.WithNewSpan(ctx, "Fetching Discharges")
+			defer span.Finish()
+		}
+		d.fetchDischarges(ctx, caveats, filteredImpetuses, discharges)
+	}
+	for _, d := range discharges {
+		if d != nil {
+			ret = append(ret, *d)
+		}
+	}
+	return
+}
+func (d *dischargeClient) Invalidate(discharges ...security.Discharge) {
+	d.cache.invalidate(discharges...)
+}
+
+// fetchDischarges fills out by fetching discharges for caveats from the
+// appropriate discharge service. Since there may be dependencies in the
+// caveats, fetchDischarges keeps retrying until either all discharges can be
+// fetched or no new discharges are fetched.
+// REQUIRES: len(caveats) == len(out)
+// REQUIRES: caveats[i].ThirdPartyDetails() != nil for 0 <= i < len(caveats)
+func (d *dischargeClient) fetchDischarges(ctx *context.T, caveats []security.Caveat, impetuses []security.DischargeImpetus, out []*security.Discharge) {
+	var wg sync.WaitGroup
+	for {
+		type fetched struct {
+			idx       int
+			discharge *security.Discharge
+			impetus   security.DischargeImpetus
+		}
+		discharges := make(chan fetched, len(caveats))
+		want := 0
+		for i := range caveats {
+			if !d.shouldFetchDischarge(out[i]) {
+				continue
+			}
+			want++
+			wg.Add(1)
+			go func(i int, ctx *context.T, cav security.Caveat) {
+				defer wg.Done()
+				tp := cav.ThirdPartyDetails()
+				var dis security.Discharge
+				vlog.VI(3).Infof("Fetching discharge for %v", tp)
+				if err := d.c.Call(ctx, tp.Location(), "Discharge", []interface{}{cav, impetuses[i]}, []interface{}{&dis}, NoDischarges{}); err != nil {
+					vlog.VI(3).Infof("Discharge fetch for %v failed: %v", tp, err)
+					return
+				}
+				discharges <- fetched{i, &dis, impetuses[i]}
+			}(i, ctx, caveats[i])
+		}
+		wg.Wait()
+		close(discharges)
+		var got int
+		for fetched := range discharges {
+			d.cache.Add(*fetched.discharge, fetched.impetus)
+			out[fetched.idx] = fetched.discharge
+			got++
+		}
+		if want > 0 {
+			vlog.VI(3).Infof("fetchDischarges: got %d of %d discharge(s) (total %d caveats)", got, want, len(caveats))
+		}
+		if got == 0 || got == want {
+			return
+		}
+	}
+}
+
+func (d *dischargeClient) shouldFetchDischarge(dis *security.Discharge) bool {
+	if dis == nil {
+		return true
+	}
+	expiry := dis.Expiry()
+	if expiry.IsZero() {
+		return false
+	}
+	return expiry.Before(time.Now().Add(d.dischargeExpiryBuffer))
+}
+
+// dischargeCache is a concurrency-safe cache for third party caveat discharges.
+type dischargeCache struct {
+	mu       sync.RWMutex
+	cache    map[dischargeCacheKey]security.Discharge // GUARDED_BY(mu)
+	idToKeys map[string][]dischargeCacheKey           // GUARDED_BY(mu)
+}
+
+type dischargeCacheKey struct {
+	id, method, serverPatterns string
+}
+
+func (dcc *dischargeCache) cacheKey(id string, impetus security.DischargeImpetus) dischargeCacheKey {
+	// We currently do not cache on impetus.Arguments because there it seems there is no
+	// universal way to generate a key from them.
+	// Add sorted BlessingPatterns to the key.
+	var bps []string
+	for _, bp := range impetus.Server {
+		bps = append(bps, string(bp))
+	}
+	sort.Strings(bps)
+	return dischargeCacheKey{
+		id:             id,
+		method:         impetus.Method,
+		serverPatterns: strings.Join(bps, ","), // "," is restricted in blessingPatterns.
+	}
+}
+
+// Add inserts the argument to the cache, the previous discharge for the same caveat.
+func (dcc *dischargeCache) Add(d security.Discharge, filteredImpetus security.DischargeImpetus) {
+	// Only add to the cache if the caveat did not require arguments.
+	if len(filteredImpetus.Arguments) > 0 {
+		return
+	}
+	id := d.ID()
+	dcc.mu.Lock()
+	dcc.cache[dcc.cacheKey(id, filteredImpetus)] = d
+	if _, ok := dcc.idToKeys[id]; !ok {
+		dcc.idToKeys[id] = []dischargeCacheKey{}
+	}
+	dcc.idToKeys[id] = append(dcc.idToKeys[id], dcc.cacheKey(id, filteredImpetus))
+	dcc.mu.Unlock()
+}
+
+// Discharges takes a slice of caveats, a slice of filtered Discharge impetuses
+// corresponding to the caveats, and a slice of discharges of the same length and
+// fills in nil entries in the discharges slice with pointers to cached discharges
+// (if there are any).
+//
+// REQUIRES: len(caveats) == len(impetuses) == len(out)
+// REQUIRES: caveats[i].ThirdPartyDetails() != nil, for all 0 <= i < len(caveats)
+func (dcc *dischargeCache) Discharges(caveats []security.Caveat, impetuses []security.DischargeImpetus, out []*security.Discharge) (remaining int) {
+	dcc.mu.Lock()
+	for i, d := range out {
+		if d != nil {
+			continue
+		}
+		id := caveats[i].ThirdPartyDetails().ID()
+		key := dcc.cacheKey(id, impetuses[i])
+		if cached, exists := dcc.cache[key]; exists {
+			out[i] = &cached
+			// If the discharge has expired, purge it from the cache.
+			if hasDischargeExpired(out[i]) {
+				out[i] = nil
+				delete(dcc.cache, key)
+				remaining++
+			}
+		} else {
+			remaining++
+		}
+	}
+	dcc.mu.Unlock()
+	return
+}
+
+func hasDischargeExpired(dis *security.Discharge) bool {
+	expiry := dis.Expiry()
+	if expiry.IsZero() {
+		return false
+	}
+	return expiry.Before(time.Now())
+}
+
+func (dcc *dischargeCache) invalidate(discharges ...security.Discharge) {
+	dcc.mu.Lock()
+	for _, d := range discharges {
+		if keys, ok := dcc.idToKeys[d.ID()]; ok {
+			var newKeys []dischargeCacheKey
+			for _, k := range keys {
+				if cached := dcc.cache[k]; cached.Equivalent(d) {
+					delete(dcc.cache, k)
+				} else {
+					newKeys = append(newKeys, k)
+				}
+			}
+			dcc.idToKeys[d.ID()] = newKeys
+		}
+	}
+	dcc.mu.Unlock()
+}
+
+// filteredImpetus returns a copy of 'before' after removing any values that are not required as per 'r'.
+func filteredImpetus(r security.ThirdPartyRequirements, before security.DischargeImpetus) (after security.DischargeImpetus) {
+	if r.ReportServer && len(before.Server) > 0 {
+		after.Server = make([]security.BlessingPattern, len(before.Server))
+		for i := range before.Server {
+			after.Server[i] = before.Server[i]
+		}
+	}
+	if r.ReportMethod {
+		after.Method = before.Method
+	}
+	if r.ReportArguments && len(before.Arguments) > 0 {
+		after.Arguments = make([]*vdl.Value, len(before.Arguments))
+		for i := range before.Arguments {
+			after.Arguments[i] = vdl.CopyValue(before.Arguments[i])
+		}
+	}
+	return
+}
diff --git a/runtime/internal/rpc/discharges_test.go b/runtime/internal/rpc/discharges_test.go
new file mode 100644
index 0000000..7b67b1b
--- /dev/null
+++ b/runtime/internal/rpc/discharges_test.go
@@ -0,0 +1,105 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"testing"
+	"time"
+
+	"v.io/v23/security"
+	"v.io/v23/vdl"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestDischargeClientCache(t *testing.T) {
+	dcc := &dischargeCache{
+		cache:    make(map[dischargeCacheKey]security.Discharge),
+		idToKeys: make(map[string][]dischargeCacheKey),
+	}
+
+	var (
+		discharger = testutil.NewPrincipal("discharger")
+		expiredCav = mkCaveat(security.NewPublicKeyCaveat(discharger.PublicKey(), "moline", security.ThirdPartyRequirements{}, security.UnconstrainedUse()))
+		argsCav    = mkCaveat(security.NewPublicKeyCaveat(discharger.PublicKey(), "moline", security.ThirdPartyRequirements{}, security.UnconstrainedUse()))
+		methodCav  = mkCaveat(security.NewPublicKeyCaveat(discharger.PublicKey(), "moline", security.ThirdPartyRequirements{}, security.UnconstrainedUse()))
+		serverCav  = mkCaveat(security.NewPublicKeyCaveat(discharger.PublicKey(), "moline", security.ThirdPartyRequirements{}, security.UnconstrainedUse()))
+
+		dExpired = mkDischarge(discharger.MintDischarge(expiredCav, mkCaveat(security.NewExpiryCaveat(time.Now().Add(-1*time.Minute)))))
+		dArgs    = mkDischarge(discharger.MintDischarge(argsCav, security.UnconstrainedUse()))
+		dMethod  = mkDischarge(discharger.MintDischarge(methodCav, security.UnconstrainedUse()))
+		dServer  = mkDischarge(discharger.MintDischarge(serverCav, security.UnconstrainedUse()))
+
+		emptyImp       = security.DischargeImpetus{}
+		argsImp        = security.DischargeImpetus{Arguments: []*vdl.Value{&vdl.Value{}}}
+		methodImp      = security.DischargeImpetus{Method: "foo"}
+		otherMethodImp = security.DischargeImpetus{Method: "bar"}
+		serverImp      = security.DischargeImpetus{Server: []security.BlessingPattern{security.BlessingPattern("fooserver")}}
+		otherServerImp = security.DischargeImpetus{Server: []security.BlessingPattern{security.BlessingPattern("barserver")}}
+	)
+
+	// Discharges for different cavs should not be cached.
+	d := mkDischarge(discharger.MintDischarge(argsCav, security.UnconstrainedUse()))
+	dcc.Add(d, emptyImp)
+	outdis := make([]*security.Discharge, 1)
+	if remaining := dcc.Discharges([]security.Caveat{methodCav}, []security.DischargeImpetus{emptyImp}, outdis); remaining == 0 {
+		t.Errorf("Discharge for different caveat should not have been in cache")
+	}
+	dcc.invalidate(d)
+
+	// Add some discharges into the cache.
+	dcc.Add(dArgs, argsImp)
+	dcc.Add(dMethod, methodImp)
+	dcc.Add(dServer, serverImp)
+	dcc.Add(dExpired, emptyImp)
+
+	testCases := []struct {
+		caveat          security.Caveat           // caveat that we are fetching discharges for.
+		queryImpetus    security.DischargeImpetus // Impetus used to  query the cache.
+		cachedDischarge *security.Discharge       // Discharge that we expect to be returned from the cache, nil if the discharge should not be cached.
+	}{
+		// Expired discharges should not be returned by the cache.
+		{expiredCav, emptyImp, nil},
+
+		// Discharges with Impetuses that have Arguments should not be cached.
+		{argsCav, argsImp, nil},
+
+		{methodCav, methodImp, &dMethod},
+		{methodCav, otherMethodImp, nil},
+		{methodCav, emptyImp, nil},
+
+		{serverCav, serverImp, &dServer},
+		{serverCav, otherServerImp, nil},
+		{serverCav, emptyImp, nil},
+	}
+
+	for i, test := range testCases {
+		out := make([]*security.Discharge, 1)
+		remaining := dcc.Discharges([]security.Caveat{test.caveat}, []security.DischargeImpetus{test.queryImpetus}, out)
+		if test.cachedDischarge != nil {
+			got := "nil"
+			if remaining == 0 {
+				got = out[0].ID()
+			}
+			if got != test.cachedDischarge.ID() {
+				t.Errorf("#%d: got discharge %v, want %v, queried with %v", i, got, test.cachedDischarge.ID(), test.queryImpetus)
+			}
+		} else if remaining == 0 {
+			t.Errorf("#%d: discharge %v should not have been in cache, queried with %v", i, out[0].ID(), test.queryImpetus)
+		}
+	}
+	if t.Failed() {
+		t.Logf("dArgs.ID():    %v", dArgs.ID())
+		t.Logf("dMethod.ID():  %v", dMethod.ID())
+		t.Logf("dServer.ID():  %v", dServer.ID())
+		t.Logf("dExpired.ID(): %v", dExpired.ID())
+	}
+}
+
+func mkDischarge(d security.Discharge, err error) security.Discharge {
+	if err != nil {
+		panic(err)
+	}
+	return d
+}
diff --git a/runtime/internal/rpc/errors.vdl b/runtime/internal/rpc/errors.vdl
new file mode 100644
index 0000000..8f24707
--- /dev/null
+++ b/runtime/internal/rpc/errors.vdl
@@ -0,0 +1,30 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+error (
+	// Internal errors.
+	badRequest(err error) {
+		"en": "failed to decode request: {err}",
+	}
+	badNumInputArgs(suffix, method string, numCalled, numWanted uint64) {
+		"en": "wrong number of input arguments for {suffix}.{method} (called with {numCalled} args, want {numWanted})",
+	}
+	badInputArg(suffix, method string, index uint64, err error) {
+		"en": "failed to decode request {suffix}.{method} arg #{index}: {err}",
+	}
+	badBlessings(err error) {
+		"en": "failed to decode blessings: {err}",
+	}
+	badBlessingsCache(err error) {
+		"en": "failed to find blessings in cache: {err}",
+	}
+	badDischarge(index uint64, err error) {
+		"en": "failed to decode discharge #{index}: {err}",
+	}
+	badAuth(suffix, method string, err error) {
+		"en": "not authorized to call {suffix}.{method}: {err}",
+	}
+)
diff --git a/runtime/internal/rpc/errors.vdl.go b/runtime/internal/rpc/errors.vdl.go
new file mode 100644
index 0000000..2c543d7
--- /dev/null
+++ b/runtime/internal/rpc/errors.vdl.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: errors.vdl
+
+package rpc
+
+import (
+	// VDL system imports
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/verror"
+)
+
+var (
+	// Internal errors.
+	errBadRequest        = verror.Register("v.io/x/ref/runtime/internal/rpc.badRequest", verror.NoRetry, "{1:}{2:} failed to decode request: {3}")
+	errBadNumInputArgs   = verror.Register("v.io/x/ref/runtime/internal/rpc.badNumInputArgs", verror.NoRetry, "{1:}{2:} wrong number of input arguments for {3}.{4} (called with {5} args, want {6})")
+	errBadInputArg       = verror.Register("v.io/x/ref/runtime/internal/rpc.badInputArg", verror.NoRetry, "{1:}{2:} failed to decode request {3}.{4} arg #{5}: {6}")
+	errBadBlessings      = verror.Register("v.io/x/ref/runtime/internal/rpc.badBlessings", verror.NoRetry, "{1:}{2:} failed to decode blessings: {3}")
+	errBadBlessingsCache = verror.Register("v.io/x/ref/runtime/internal/rpc.badBlessingsCache", verror.NoRetry, "{1:}{2:} failed to find blessings in cache: {3}")
+	errBadDischarge      = verror.Register("v.io/x/ref/runtime/internal/rpc.badDischarge", verror.NoRetry, "{1:}{2:} failed to decode discharge #{3}: {4}")
+	errBadAuth           = verror.Register("v.io/x/ref/runtime/internal/rpc.badAuth", verror.NoRetry, "{1:}{2:} not authorized to call {3}.{4}: {5}")
+)
+
+func init() {
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadRequest.ID), "{1:}{2:} failed to decode request: {3}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadNumInputArgs.ID), "{1:}{2:} wrong number of input arguments for {3}.{4} (called with {5} args, want {6})")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadInputArg.ID), "{1:}{2:} failed to decode request {3}.{4} arg #{5}: {6}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadBlessings.ID), "{1:}{2:} failed to decode blessings: {3}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadBlessingsCache.ID), "{1:}{2:} failed to find blessings in cache: {3}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadDischarge.ID), "{1:}{2:} failed to decode discharge #{3}: {4}")
+	i18n.Cat().SetWithBase(i18n.LangID("en"), i18n.MsgID(errBadAuth.ID), "{1:}{2:} not authorized to call {3}.{4}: {5}")
+}
+
+// newErrBadRequest returns an error with the errBadRequest ID.
+func newErrBadRequest(ctx *context.T, err error) error {
+	return verror.New(errBadRequest, ctx, err)
+}
+
+// newErrBadNumInputArgs returns an error with the errBadNumInputArgs ID.
+func newErrBadNumInputArgs(ctx *context.T, suffix string, method string, numCalled uint64, numWanted uint64) error {
+	return verror.New(errBadNumInputArgs, ctx, suffix, method, numCalled, numWanted)
+}
+
+// newErrBadInputArg returns an error with the errBadInputArg ID.
+func newErrBadInputArg(ctx *context.T, suffix string, method string, index uint64, err error) error {
+	return verror.New(errBadInputArg, ctx, suffix, method, index, err)
+}
+
+// newErrBadBlessings returns an error with the errBadBlessings ID.
+func newErrBadBlessings(ctx *context.T, err error) error {
+	return verror.New(errBadBlessings, ctx, err)
+}
+
+// newErrBadBlessingsCache returns an error with the errBadBlessingsCache ID.
+func newErrBadBlessingsCache(ctx *context.T, err error) error {
+	return verror.New(errBadBlessingsCache, ctx, err)
+}
+
+// newErrBadDischarge returns an error with the errBadDischarge ID.
+func newErrBadDischarge(ctx *context.T, index uint64, err error) error {
+	return verror.New(errBadDischarge, ctx, index, err)
+}
+
+// newErrBadAuth returns an error with the errBadAuth ID.
+func newErrBadAuth(ctx *context.T, suffix string, method string, err error) error {
+	return verror.New(errBadAuth, ctx, suffix, method, err)
+}
diff --git a/runtime/internal/rpc/full_test.go b/runtime/internal/rpc/full_test.go
new file mode 100644
index 0000000..1fd3ca8
--- /dev/null
+++ b/runtime/internal/rpc/full_test.go
@@ -0,0 +1,2087 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"testing"
+	"time"
+
+	"v.io/x/lib/netstate"
+	"v.io/x/lib/pubsub"
+	"v.io/x/lib/vlog"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/namespace"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/security/access"
+	"v.io/v23/uniqueid"
+	"v.io/v23/vdl"
+	"v.io/v23/verror"
+	"v.io/v23/vtrace"
+
+	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/runtime/internal/lib/publisher"
+	"v.io/x/ref/runtime/internal/lib/websocket"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/wsh"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
+	"v.io/x/ref/test/testutil"
+)
+
+//go:generate v23 test generate
+
+var (
+	errMethod     = verror.New(verror.ErrAborted, nil)
+	clock         = new(fakeClock)
+	listenAddrs   = rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}
+	listenWSAddrs = rpc.ListenAddrs{{"ws", "127.0.0.1:0"}, {"tcp", "127.0.0.1:0"}}
+	listenSpec    = rpc.ListenSpec{Addrs: listenAddrs}
+	listenWSSpec  = rpc.ListenSpec{Addrs: listenWSAddrs}
+)
+
+type fakeClock struct {
+	sync.Mutex
+	time int64
+}
+
+func (c *fakeClock) Now() int64 {
+	c.Lock()
+	defer c.Unlock()
+	return c.time
+}
+
+func (c *fakeClock) Advance(steps uint) {
+	c.Lock()
+	c.time += int64(steps)
+	c.Unlock()
+}
+
+func testInternalNewServerWithPubsub(ctx *context.T, streamMgr stream.Manager, ns namespace.T, settingsPublisher *pubsub.Publisher, settingsStreamName string, principal security.Principal, opts ...rpc.ServerOpt) (rpc.Server, error) {
+	client, err := InternalNewClient(streamMgr, ns)
+	if err != nil {
+		return nil, err
+	}
+	return InternalNewServer(ctx, streamMgr, ns, settingsPublisher, settingsStreamName, client, principal, opts...)
+}
+
+func testInternalNewServer(ctx *context.T, streamMgr stream.Manager, ns namespace.T, principal security.Principal, opts ...rpc.ServerOpt) (rpc.Server, error) {
+	return testInternalNewServerWithPubsub(ctx, streamMgr, ns, nil, "", principal, opts...)
+}
+
+type userType string
+
+type testServer struct{}
+
+func (*testServer) Closure(*context.T, rpc.ServerCall) error {
+	return nil
+}
+
+func (*testServer) Error(*context.T, rpc.ServerCall) error {
+	return errMethod
+}
+
+func (*testServer) Echo(_ *context.T, call rpc.ServerCall, arg string) (string, error) {
+	return fmt.Sprintf("method:%q,suffix:%q,arg:%q", "Echo", call.Suffix(), arg), nil
+}
+
+func (*testServer) EchoUser(_ *context.T, call rpc.ServerCall, arg string, u userType) (string, userType, error) {
+	return fmt.Sprintf("method:%q,suffix:%q,arg:%q", "EchoUser", call.Suffix(), arg), u, nil
+}
+
+func (*testServer) EchoLang(ctx *context.T, call rpc.ServerCall) (string, error) {
+	return string(i18n.GetLangID(ctx)), nil
+}
+
+func (*testServer) EchoBlessings(ctx *context.T, call rpc.ServerCall) (server, client string, _ error) {
+	local := security.LocalBlessingNames(ctx, call.Security())
+	remote, _ := security.RemoteBlessingNames(ctx, call.Security())
+	return fmt.Sprintf("%v", local), fmt.Sprintf("%v", remote), nil
+}
+
+func (*testServer) EchoGrantedBlessings(_ *context.T, call rpc.ServerCall, arg string) (result, blessing string, _ error) {
+	return arg, fmt.Sprintf("%v", call.GrantedBlessings()), nil
+}
+
+func (*testServer) EchoAndError(_ *context.T, call rpc.ServerCall, arg string) (string, error) {
+	result := fmt.Sprintf("method:%q,suffix:%q,arg:%q", "EchoAndError", call.Suffix(), arg)
+	if arg == "error" {
+		return result, errMethod
+	}
+	return result, nil
+}
+
+func (*testServer) Stream(_ *context.T, call rpc.StreamServerCall, arg string) (string, error) {
+	result := fmt.Sprintf("method:%q,suffix:%q,arg:%q", "Stream", call.Suffix(), arg)
+	var u userType
+	var err error
+	for err = call.Recv(&u); err == nil; err = call.Recv(&u) {
+		result += " " + string(u)
+		if err := call.Send(u); err != nil {
+			return "", err
+		}
+	}
+	if err == io.EOF {
+		err = nil
+	}
+	return result, err
+}
+
+func (*testServer) Unauthorized(*context.T, rpc.StreamServerCall) (string, error) {
+	return "UnauthorizedResult", nil
+}
+
+type testServerAuthorizer struct{}
+
+func (testServerAuthorizer) Authorize(ctx *context.T, call security.Call) error {
+	// Verify that the Call object seen by the authorizer
+	// has the necessary fields.
+	lb := call.LocalBlessings()
+	if lb.IsZero() {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalBlessings", call)
+	}
+	if tpcavs := lb.ThirdPartyCaveats(); len(tpcavs) > 0 && call.LocalDischarges() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalDischarges even when LocalBlessings have third-party caveats", call)
+
+	}
+	if call.LocalPrincipal() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalPrincipal", call)
+	}
+	if call.Method() == "" {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no Method", call)
+	}
+	if call.LocalEndpoint() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no LocalEndpoint", call)
+	}
+	if call.RemoteEndpoint() == nil {
+		return fmt.Errorf("testServerAuthorzer: Call object %v has no RemoteEndpoint", call)
+	}
+
+	// Do not authorize the method "Unauthorized".
+	if call.Method() == "Unauthorized" {
+		return fmt.Errorf("testServerAuthorizer denied access")
+	}
+	return nil
+}
+
+type testServerDisp struct{ server interface{} }
+
+func (t testServerDisp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	// If suffix is "nilAuth" we use default authorization, if it is "aclAuth" we
+	// use an AccessList-based authorizer, and otherwise we use the custom testServerAuthorizer.
+	var authorizer security.Authorizer
+	switch suffix {
+	case "discharger":
+		return &dischargeServer{}, testServerAuthorizer{}, nil
+	case "nilAuth":
+		authorizer = nil
+	case "aclAuth":
+		authorizer = &access.AccessList{
+			In: []security.BlessingPattern{"client", "server"},
+		}
+	default:
+		authorizer = testServerAuthorizer{}
+	}
+	return t.server, authorizer, nil
+}
+
+type dischargeServer struct {
+	mu     sync.Mutex
+	called bool
+}
+
+func (ds *dischargeServer) Discharge(ctx *context.T, call rpc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.Discharge, error) {
+	ds.mu.Lock()
+	ds.called = true
+	ds.mu.Unlock()
+	tp := cav.ThirdPartyDetails()
+	if tp == nil {
+		return security.Discharge{}, fmt.Errorf("discharger: %v does not represent a third-party caveat", cav)
+	}
+	if err := tp.Dischargeable(ctx, call.Security()); err != nil {
+		return security.Discharge{}, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", cav, err)
+	}
+	// Add a fakeTimeCaveat to be able to control discharge expiration via 'clock'.
+	expiry, err := security.NewCaveat(fakeTimeCaveat, clock.Now())
+	if err != nil {
+		return security.Discharge{}, fmt.Errorf("failed to create an expiration on the discharge: %v", err)
+	}
+	return call.Security().LocalPrincipal().MintDischarge(cav, expiry)
+}
+
+func startServer(t *testing.T, ctx *context.T, principal security.Principal, sm stream.Manager, ns namespace.T, name string, disp rpc.Dispatcher, opts ...rpc.ServerOpt) (naming.Endpoint, rpc.Server) {
+	return startServerWS(t, ctx, principal, sm, ns, name, disp, noWebsocket, opts...)
+}
+
+func endpointsToStrings(eps []naming.Endpoint) []string {
+	r := make([]string, len(eps))
+	for i, e := range eps {
+		r[i] = e.String()
+	}
+	sort.Strings(r)
+	return r
+}
+
+func startServerWS(t *testing.T, ctx *context.T, principal security.Principal, sm stream.Manager, ns namespace.T, name string, disp rpc.Dispatcher, shouldUseWebsocket websocketMode, opts ...rpc.ServerOpt) (naming.Endpoint, rpc.Server) {
+	vlog.VI(1).Info("InternalNewServer")
+	ctx, _ = v23.WithPrincipal(ctx, principal)
+	server, err := testInternalNewServer(ctx, sm, ns, principal, opts...)
+	if err != nil {
+		t.Errorf("InternalNewServer failed: %v", err)
+	}
+	vlog.VI(1).Info("server.Listen")
+	spec := listenSpec
+	if shouldUseWebsocket {
+		spec = listenWSSpec
+	}
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Errorf("server.Listen failed: %v", err)
+	}
+	vlog.VI(1).Info("server.Serve")
+	if err := server.ServeDispatcher(name, disp); err != nil {
+		t.Errorf("server.ServeDispatcher failed: %v", err)
+	}
+
+	status := server.Status()
+	if got, want := endpointsToStrings(status.Endpoints), endpointsToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	names := status.Mounts.Names()
+	if len(names) != 1 || names[0] != name {
+		t.Fatalf("unexpected names: %v", names)
+	}
+	return eps[0], server
+}
+
+func loc(d int) string {
+	_, file, line, _ := runtime.Caller(d + 1)
+	return fmt.Sprintf("%s:%d", filepath.Base(file), line)
+}
+
+func verifyMount(t *testing.T, ctx *context.T, ns namespace.T, name string) []string {
+	me, err := ns.Resolve(ctx, name)
+	if err != nil {
+		t.Errorf("%s: %s not found in mounttable", loc(1), name)
+		return nil
+	}
+	return me.Names()
+}
+
+func verifyMountMissing(t *testing.T, ctx *context.T, ns namespace.T, name string) {
+	if me, err := ns.Resolve(ctx, name); err == nil {
+		names := me.Names()
+		t.Errorf("%s: %s not supposed to be found in mounttable; got %d servers instead: %v (%+v)", loc(1), name, len(names), names, me)
+	}
+}
+
+func stopServer(t *testing.T, ctx *context.T, server rpc.Server, ns namespace.T, name string) {
+	vlog.VI(1).Info("server.Stop")
+	new_name := "should_appear_in_mt/server"
+	verifyMount(t, ctx, ns, name)
+
+	// publish a second name
+	if err := server.AddName(new_name); err != nil {
+		t.Errorf("server.Serve failed: %v", err)
+	}
+	verifyMount(t, ctx, ns, new_name)
+
+	if err := server.Stop(); err != nil {
+		t.Errorf("server.Stop failed: %v", err)
+	}
+
+	verifyMountMissing(t, ctx, ns, name)
+	verifyMountMissing(t, ctx, ns, new_name)
+
+	// Check that we can no longer serve after Stop.
+	err := server.AddName("name doesn't matter")
+	if err == nil || verror.ErrorID(err) != verror.ErrBadState.ID {
+		t.Errorf("either no error, or a wrong error was returned: %v", err)
+	}
+	vlog.VI(1).Info("server.Stop DONE")
+}
+
+// fakeWSName creates a name containing a endpoint address that forces
+// the use of websockets. It does so by resolving the original name
+// and choosing the 'ws' endpoint from the set of endpoints returned.
+// It must return a name since it'll be passed to StartCall.
+func fakeWSName(ctx *context.T, ns namespace.T, name string) (string, error) {
+	// Find the ws endpoint and use that.
+	me, err := ns.Resolve(ctx, name)
+	if err != nil {
+		return "", err
+	}
+	names := me.Names()
+	for _, s := range names {
+		if strings.Index(s, "@ws@") != -1 {
+			return s, nil
+		}
+	}
+	return "", fmt.Errorf("No ws endpoint found %v", names)
+}
+
+type bundle struct {
+	client rpc.Client
+	server rpc.Server
+	ep     naming.Endpoint
+	ns     namespace.T
+	sm     stream.Manager
+	name   string
+}
+
+func (b bundle) cleanup(t *testing.T, ctx *context.T) {
+	if b.server != nil {
+		stopServer(t, ctx, b.server, b.ns, b.name)
+	}
+	if b.client != nil {
+		b.client.Close()
+	}
+}
+
+func createBundle(t *testing.T, ctx *context.T, server security.Principal, ts interface{}) (b bundle) {
+	return createBundleWS(t, ctx, server, ts, noWebsocket)
+}
+
+func createBundleWS(t *testing.T, ctx *context.T, server security.Principal, ts interface{}, shouldUseWebsocket websocketMode) (b bundle) {
+	b.sm = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	b.ns = tnaming.NewSimpleNamespace()
+	b.name = "mountpoint/server"
+	if server != nil {
+		b.ep, b.server = startServerWS(t, ctx, server, b.sm, b.ns, b.name, testServerDisp{ts}, shouldUseWebsocket)
+	}
+	var err error
+	if b.client, err = InternalNewClient(b.sm, b.ns); err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	return
+}
+
+func matchesErrorPattern(err error, id verror.IDAction, pattern string) bool {
+	if len(pattern) > 0 && err != nil && strings.Index(err.Error(), pattern) < 0 {
+		return false
+	}
+	if err == nil && id.ID == "" {
+		return true
+	}
+	return verror.ErrorID(err) == id.ID
+}
+
+func runServer(t *testing.T, ctx *context.T, ns namespace.T, principal security.Principal, name string, obj interface{}, opts ...rpc.ServerOpt) stream.Manager {
+	rid, err := naming.NewRoutingID()
+	if err != nil {
+		t.Fatal(err)
+	}
+	sm := imanager.InternalNew(rid)
+	server, err := testInternalNewServer(ctx, sm, ns, principal, opts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := server.Listen(listenSpec); err != nil {
+		t.Fatal(err)
+	}
+	if err := server.Serve(name, obj, security.AllowEveryone()); err != nil {
+		t.Fatal(err)
+	}
+	return sm
+}
+
+func TestMultipleCallsToServeAndName(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	ns := tnaming.NewSimpleNamespace()
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+	if err != nil {
+		t.Errorf("InternalNewServer failed: %v", err)
+	}
+	_, err = server.Listen(listenSpec)
+	if err != nil {
+		t.Errorf("server.Listen failed: %v", err)
+	}
+
+	disp := &testServerDisp{&testServer{}}
+	if err := server.ServeDispatcher("mountpoint/server", disp); err != nil {
+		t.Errorf("server.ServeDispatcher failed: %v", err)
+	}
+
+	n1 := "mountpoint/server"
+	n2 := "should_appear_in_mt/server"
+	n3 := "should_appear_in_mt/server"
+	n4 := "should_not_appear_in_mt/server"
+
+	verifyMount(t, ctx, ns, n1)
+
+	if server.ServeDispatcher(n2, disp) == nil {
+		t.Errorf("server.ServeDispatcher should have failed")
+	}
+
+	if err := server.Serve(n2, &testServer{}, nil); err == nil {
+		t.Errorf("server.Serve should have failed")
+	}
+
+	if err := server.AddName(n3); err != nil {
+		t.Errorf("server.AddName failed: %v", err)
+	}
+
+	if err := server.AddName(n3); err != nil {
+		t.Errorf("server.AddName failed: %v", err)
+	}
+	verifyMount(t, ctx, ns, n2)
+	verifyMount(t, ctx, ns, n3)
+
+	server.RemoveName(n1)
+	verifyMountMissing(t, ctx, ns, n1)
+
+	server.RemoveName("some randome name")
+
+	if err := server.ServeDispatcher(n4, &testServerDisp{&testServer{}}); err == nil {
+		t.Errorf("server.ServeDispatcher should have failed")
+	}
+	verifyMountMissing(t, ctx, ns, n4)
+
+	if err := server.Stop(); err != nil {
+		t.Errorf("server.Stop failed: %v", err)
+	}
+
+	verifyMountMissing(t, ctx, ns, n1)
+	verifyMountMissing(t, ctx, ns, n2)
+	verifyMountMissing(t, ctx, ns, n3)
+}
+
+func TestRPCServerAuthorization(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+
+	const (
+		publicKeyErr        = "not matched by server key"
+		missingDischargeErr = "missing discharge"
+		expiryErr           = "is after expiry"
+		allowedErr          = "do not match any allowed server patterns"
+	)
+	type O []rpc.CallOpt // shorthand
+	var (
+		pprovider, pclient, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal(), testutil.NewPrincipal()
+		pdischarger                 = pprovider
+		now                         = time.Now()
+		noErrID                     verror.IDAction
+
+		// Third-party caveats on blessings presented by server.
+		cavTPValid   = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(now.Add(24*time.Hour))))
+		cavTPExpired = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(now.Add(-1*time.Second))))
+
+		// Server blessings.
+		bServer          = bless(pprovider, pserver, "server")
+		bServerExpired   = bless(pprovider, pserver, "expiredserver", mkCaveat(security.NewExpiryCaveat(time.Now().Add(-1*time.Second))))
+		bServerTPValid   = bless(pprovider, pserver, "serverWithTPCaveats", cavTPValid)
+		bServerTPExpired = bless(pprovider, pserver, "serverWithExpiredTPCaveats", cavTPExpired)
+		bOther           = bless(pprovider, pserver, "other")
+		bTwoBlessings, _ = security.UnionOfBlessings(bServer, bOther)
+
+		mgr   = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+		ns    = tnaming.NewSimpleNamespace()
+		tests = []struct {
+			server security.Blessings // blessings presented by the server to the client.
+			name   string             // name provided by the client to StartCall
+			opts   O                  // options provided to StartCall.
+			errID  verror.IDAction
+			err    string
+		}{
+			// Client accepts talking to the server only if the
+			// server presents valid blessings (and discharges)
+			// consistent with the ones published in the endpoint.
+			{bServer, "mountpoint/server", nil, noErrID, ""},
+			{bServerTPValid, "mountpoint/server", nil, noErrID, ""},
+
+			// Client will not talk to a server that presents
+			// expired blessings or is missing discharges.
+			{bServerExpired, "mountpoint/server", nil, verror.ErrNotTrusted, expiryErr},
+			{bServerTPExpired, "mountpoint/server", nil, verror.ErrNotTrusted, missingDischargeErr},
+
+			// Testing the AllowedServersPolicy option.
+			{bServer, "mountpoint/server", O{options.AllowedServersPolicy{"otherroot"}}, verror.ErrNotTrusted, allowedErr},
+			{bServer, "mountpoint/server", O{options.AllowedServersPolicy{"root"}}, noErrID, ""},
+			{bTwoBlessings, "mountpoint/server", O{options.AllowedServersPolicy{"root/other"}}, noErrID, ""},
+
+			// Test the ServerPublicKey option.
+			{bOther, "mountpoint/server", O{options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{bOther.PublicKey()}}, noErrID, ""},
+			{bOther, "mountpoint/server", O{options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{testutil.NewPrincipal("irrelevant").PublicKey()}}, verror.ErrNotTrusted, publicKeyErr},
+
+			// Test the "paranoid" names, where the pattern is provided in the name.
+			{bServer, "__(root/server)/mountpoint/server", nil, noErrID, ""},
+			{bServer, "__(root/other)/mountpoint/server", nil, verror.ErrNotTrusted, allowedErr},
+			{bTwoBlessings, "__(root/server)/mountpoint/server", O{options.AllowedServersPolicy{"root/other"}}, noErrID, ""},
+		}
+	)
+	// Start the discharge server.
+	_, dischargeServer := startServer(t, ctx, pdischarger, mgr, ns, "mountpoint/dischargeserver", testutil.LeafDispatcher(&dischargeServer{}, security.AllowEveryone()))
+	defer stopServer(t, ctx, dischargeServer, ns, "mountpoint/dischargeserver")
+
+	// Make the client and server principals trust root certificates from
+	// pprovider
+	pclient.AddToRoots(pprovider.BlessingStore().Default())
+	pserver.AddToRoots(pprovider.BlessingStore().Default())
+	// Set a blessing that the client is willing to share with servers
+	// (that are blessed by pprovider).
+	pclient.BlessingStore().Set(bless(pprovider, pclient, "client"), "root")
+
+	clientCtx, _ := v23.WithPrincipal(ctx, pclient)
+	client, err := InternalNewClient(mgr, ns)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer client.Close()
+
+	var server rpc.Server
+	stop := func() {
+		if server != nil {
+			stopServer(t, ctx, server, ns, "mountpoint/server")
+		}
+	}
+	defer stop()
+	for i, test := range tests {
+		stop() // Stop any server started in the previous test.
+		name := fmt.Sprintf("(#%d: Name:%q, Server:%q, opts:%v)", i, test.name, test.server, test.opts)
+		if err := pserver.BlessingStore().SetDefault(test.server); err != nil {
+			t.Fatalf("SetDefault failed on server's BlessingStore: %v", err)
+		}
+		if _, err := pserver.BlessingStore().Set(test.server, "root"); err != nil {
+			t.Fatalf("Set failed on server's BlessingStore: %v", err)
+		}
+		_, server = startServer(t, ctx, pserver, mgr, ns, "mountpoint/server", testServerDisp{&testServer{}})
+		clientCtx, cancel := context.WithCancel(clientCtx)
+		call, err := client.StartCall(clientCtx, test.name, "Method", nil, test.opts...)
+		if !matchesErrorPattern(err, test.errID, test.err) {
+			t.Errorf(`%s: client.StartCall: got error "%v", want to match "%v"`, name, err, test.err)
+		} else if call != nil {
+			blessings, proof := call.RemoteBlessings()
+			if proof.IsZero() {
+				t.Errorf("%s: Returned zero value for remote blessings", name)
+			}
+			// Currently all tests are configured so that the only
+			// blessings presented by the server that are
+			// recognized by the client match the pattern
+			// "root"
+			if len(blessings) < 1 || !security.BlessingPattern("root").MatchedBy(blessings...) {
+				t.Errorf("%s: Client sees server as %v, expected a single blessing matching root", name, blessings)
+			}
+		}
+		cancel()
+	}
+}
+
+func TestServerManInTheMiddleAttack(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Test scenario: A server mounts itself, but then some other service
+	// somehow "takes over" the network endpoint (a naughty router
+	// perhaps), thus trying to steal traffic.
+	var (
+		pclient   = testutil.NewPrincipal("client")
+		pserver   = testutil.NewPrincipal("server")
+		pattacker = testutil.NewPrincipal("attacker")
+	)
+	// Client recognizes both the server and the attacker's blessings.
+	// (Though, it doesn't need to do the latter for the purposes of this
+	// test).
+	pclient.AddToRoots(pserver.BlessingStore().Default())
+	pclient.AddToRoots(pattacker.BlessingStore().Default())
+
+	// Start up the attacker's server.
+	attacker, err := testInternalNewServer(
+		ctx,
+		imanager.InternalNew(naming.FixedRoutingID(0xaaaaaaaaaaaaaaaa)),
+		// (To prevent the attacker for legitimately mounting on the
+		// namespace that the client will use, provide it with a
+		// different namespace).
+		tnaming.NewSimpleNamespace(),
+		pattacker)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := attacker.Listen(listenSpec); err != nil {
+		t.Fatal(err)
+	}
+	if err := attacker.ServeDispatcher("mountpoint/server", testServerDisp{&testServer{}}); err != nil {
+		t.Fatal(err)
+	}
+	var ep naming.Endpoint
+	if status := attacker.Status(); len(status.Endpoints) < 1 {
+		t.Fatalf("Attacker server does not have an endpoint: %+v", status)
+	} else {
+		ep = status.Endpoints[0]
+	}
+
+	// The legitimate server would have mounted the same endpoint on the
+	// namespace, but with different blessings.
+	ns := tnaming.NewSimpleNamespace()
+	ep.(*inaming.Endpoint).Blessings = []string{"server"}
+	if err := ns.Mount(ctx, "mountpoint/server", ep.Name(), time.Hour); err != nil {
+		t.Fatal(err)
+	}
+
+	// The RPC call should fail because the blessings presented by the
+	// (attacker's) server are not consistent with the ones registered in
+	// the mounttable trusted by the client.
+	client, err := InternalNewClient(
+		imanager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc)),
+		ns)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer client.Close()
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	if _, err := client.StartCall(ctx, "mountpoint/server", "Closure", nil); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+		t.Errorf("Got error %v (errorid=%v), want errorid=%v", err, verror.ErrorID(err), verror.ErrNotTrusted.ID)
+	}
+	// But the RPC should succeed if the client explicitly
+	// decided to skip server authorization.
+	if _, err := client.StartCall(ctx, "mountpoint/server", "Closure", nil, options.SkipServerEndpointAuthorization{}); err != nil {
+		t.Errorf("Unexpected error(%v) when skipping server authorization", err)
+	}
+}
+
+type websocketMode bool
+type closeSendMode bool
+
+const (
+	useWebsocket websocketMode = true
+	noWebsocket  websocketMode = false
+
+	closeSend   closeSendMode = true
+	noCloseSend closeSendMode = false
+)
+
+func TestRPC(t *testing.T) {
+	testRPC(t, closeSend, noWebsocket)
+}
+
+func TestRPCWithWebsocket(t *testing.T) {
+	testRPC(t, closeSend, useWebsocket)
+}
+
+// TestCloseSendOnFinish tests that Finish informs the server that no more
+// inputs will be sent by the client if CloseSend has not already done so.
+func TestRPCCloseSendOnFinish(t *testing.T) {
+	testRPC(t, noCloseSend, noWebsocket)
+}
+
+func TestRPCCloseSendOnFinishWithWebsocket(t *testing.T) {
+	testRPC(t, noCloseSend, useWebsocket)
+}
+
+func testRPC(t *testing.T, shouldCloseSend closeSendMode, shouldUseWebsocket websocketMode) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	type v []interface{}
+	type testcase struct {
+		name       string
+		method     string
+		args       v
+		streamArgs v
+		startErr   error
+		results    v
+		finishErr  error
+	}
+	var (
+		tests = []testcase{
+			{"mountpoint/server/suffix", "Closure", nil, nil, nil, nil, nil},
+			{"mountpoint/server/suffix", "Error", nil, nil, nil, nil, errMethod},
+
+			{"mountpoint/server/suffix", "Echo", v{"foo"}, nil, nil, v{`method:"Echo",suffix:"suffix",arg:"foo"`}, nil},
+			{"mountpoint/server/suffix/abc", "Echo", v{"bar"}, nil, nil, v{`method:"Echo",suffix:"suffix/abc",arg:"bar"`}, nil},
+
+			{"mountpoint/server/suffix", "EchoUser", v{"foo", userType("bar")}, nil, nil, v{`method:"EchoUser",suffix:"suffix",arg:"foo"`, userType("bar")}, nil},
+			{"mountpoint/server/suffix/abc", "EchoUser", v{"baz", userType("bla")}, nil, nil, v{`method:"EchoUser",suffix:"suffix/abc",arg:"baz"`, userType("bla")}, nil},
+			{"mountpoint/server/suffix", "Stream", v{"foo"}, v{userType("bar"), userType("baz")}, nil, v{`method:"Stream",suffix:"suffix",arg:"foo" bar baz`}, nil},
+			{"mountpoint/server/suffix/abc", "Stream", v{"123"}, v{userType("456"), userType("789")}, nil, v{`method:"Stream",suffix:"suffix/abc",arg:"123" 456 789`}, nil},
+			{"mountpoint/server/suffix", "EchoBlessings", nil, nil, nil, v{"[server]", "[client]"}, nil},
+			{"mountpoint/server/suffix", "EchoAndError", v{"bugs bunny"}, nil, nil, v{`method:"EchoAndError",suffix:"suffix",arg:"bugs bunny"`}, nil},
+			{"mountpoint/server/suffix", "EchoAndError", v{"error"}, nil, nil, nil, errMethod},
+			{"mountpoint/server/suffix", "EchoLang", nil, nil, nil, v{"foolang"}, nil},
+		}
+		name = func(t testcase) string {
+			return fmt.Sprintf("%s.%s(%v)", t.name, t.method, t.args)
+		}
+
+		pclient, pserver = newClientServerPrincipals()
+		b                = createBundleWS(t, ctx, pserver, &testServer{}, shouldUseWebsocket)
+	)
+	defer b.cleanup(t, ctx)
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	ctx = i18n.WithLangID(ctx, "foolang")
+	for _, test := range tests {
+		vlog.VI(1).Infof("%s client.StartCall", name(test))
+		vname := test.name
+		if shouldUseWebsocket {
+			var err error
+			vname, err = fakeWSName(ctx, b.ns, vname)
+			if err != nil && err != test.startErr {
+				t.Errorf(`%s ns.Resolve got error "%v", want "%v"`, name(test), err, test.startErr)
+				continue
+			}
+		}
+		call, err := b.client.StartCall(ctx, vname, test.method, test.args)
+		if err != test.startErr {
+			t.Errorf(`%s client.StartCall got error "%v", want "%v"`, name(test), err, test.startErr)
+			continue
+		}
+		for _, sarg := range test.streamArgs {
+			vlog.VI(1).Infof("%s client.Send(%v)", name(test), sarg)
+			if err := call.Send(sarg); err != nil {
+				t.Errorf(`%s call.Send(%v) got unexpected error "%v"`, name(test), sarg, err)
+			}
+			var u userType
+			if err := call.Recv(&u); err != nil {
+				t.Errorf(`%s call.Recv(%v) got unexpected error "%v"`, name(test), sarg, err)
+			}
+			if !reflect.DeepEqual(u, sarg) {
+				t.Errorf("%s call.Recv got value %v, want %v", name(test), u, sarg)
+			}
+		}
+		if shouldCloseSend {
+			vlog.VI(1).Infof("%s call.CloseSend", name(test))
+			// When the method does not involve streaming
+			// arguments, the server gets all the arguments in
+			// StartCall and then sends a response without
+			// (unnecessarily) waiting for a CloseSend message from
+			// the client.  If the server responds before the
+			// CloseSend call is made at the client, the CloseSend
+			// call will fail.  Thus, only check for errors on
+			// CloseSend if there are streaming arguments to begin
+			// with (i.e., only if the server is expected to wait
+			// for the CloseSend notification).
+			if err := call.CloseSend(); err != nil && len(test.streamArgs) > 0 {
+				t.Errorf(`%s call.CloseSend got unexpected error "%v"`, name(test), err)
+			}
+		}
+		vlog.VI(1).Infof("%s client.Finish", name(test))
+		results := makeResultPtrs(test.results)
+		err = call.Finish(results...)
+		if got, want := err, test.finishErr; (got == nil) != (want == nil) {
+			t.Errorf(`%s call.Finish got error "%v", want "%v'`, name(test), got, want)
+		} else if want != nil && verror.ErrorID(got) != verror.ErrorID(want) {
+			t.Errorf(`%s call.Finish got error "%v", want "%v"`, name(test), got, want)
+		}
+		checkResultPtrs(t, name(test), results, test.results)
+	}
+}
+
+func TestMultipleFinish(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	type v []interface{}
+	var (
+		pclient, pserver = newClientServerPrincipals()
+		b                = createBundle(t, ctx, pserver, &testServer{})
+	)
+	defer b.cleanup(t, ctx)
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "Echo", v{"foo"})
+	if err != nil {
+		t.Fatalf(`client.StartCall got error "%v"`, err)
+	}
+	var results string
+	err = call.Finish(&results)
+	if err != nil {
+		t.Fatalf(`call.Finish got error "%v"`, err)
+	}
+	// Calling Finish a second time should result in a useful error.
+	if err = call.Finish(&results); !matchesErrorPattern(err, verror.ErrBadState, "Finish has already been called") {
+		t.Fatalf(`got "%v", want "%v"`, err, verror.ErrBadState)
+	}
+}
+
+// granter implements rpc.Granter.
+//
+// It returns the specified (security.Blessings, error) pair if either the
+// blessing or the error is specified. Otherwise it returns a blessing
+// derived from the local blessings of the current call.
+type granter struct {
+	rpc.CallOpt
+	b   security.Blessings
+	err error
+}
+
+func (g granter) Grant(ctx *context.T, call security.Call) (security.Blessings, error) {
+	if !g.b.IsZero() || g.err != nil {
+		return g.b, g.err
+	}
+	return call.LocalPrincipal().Bless(call.RemoteBlessings().PublicKey(), call.LocalBlessings(), "blessed", security.UnconstrainedUse())
+}
+
+func TestGranter(t *testing.T) {
+	var (
+		pclient, pserver = newClientServerPrincipals()
+		ctx, shutdown    = initForTest()
+		b                = createBundle(t, ctx, pserver, &testServer{})
+	)
+	defer shutdown()
+	defer b.cleanup(t, ctx)
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	tests := []struct {
+		granter                       rpc.Granter
+		startErrID, finishErrID       verror.IDAction
+		blessing, starterr, finisherr string
+	}{
+		{blessing: ""},
+		{granter: granter{b: bless(pclient, pserver, "blessed")}, blessing: "client/blessed"},
+		{granter: granter{err: errors.New("hell no")}, startErrID: verror.ErrNotTrusted, starterr: "hell no"},
+		{granter: granter{}, blessing: "client/blessed"},
+		{granter: granter{b: pclient.BlessingStore().Default()}, finishErrID: verror.ErrNoAccess, finisherr: "blessing granted not bound to this server"},
+	}
+	for i, test := range tests {
+		call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "EchoGrantedBlessings", []interface{}{"argument"}, test.granter)
+		if !matchesErrorPattern(err, test.startErrID, test.starterr) {
+			t.Errorf("%d: %+v: StartCall returned error %v", i, test, err)
+		}
+		if err != nil {
+			continue
+		}
+		var result, blessing string
+		if err = call.Finish(&result, &blessing); !matchesErrorPattern(err, test.finishErrID, test.finisherr) {
+			t.Errorf("%+v: Finish returned error %v", test, err)
+		}
+		if err != nil {
+			continue
+		}
+		if result != "argument" || blessing != test.blessing {
+			t.Errorf("%+v: Got (%q, %q)", test, result, blessing)
+		}
+	}
+}
+
+// dischargeTestServer implements the discharge service. Always fails to
+// issue a discharge, but records the impetus and traceid of the RPC call.
+type dischargeTestServer struct {
+	p       security.Principal
+	impetus []security.DischargeImpetus
+	traceid []uniqueid.Id
+}
+
+func (s *dischargeTestServer) Discharge(ctx *context.T, _ rpc.ServerCall, cav security.Caveat, impetus security.DischargeImpetus) (security.Discharge, error) {
+	s.impetus = append(s.impetus, impetus)
+	s.traceid = append(s.traceid, vtrace.GetSpan(ctx).Trace())
+	return security.Discharge{}, fmt.Errorf("discharges not issued")
+}
+
+func (s *dischargeTestServer) Release() ([]security.DischargeImpetus, []uniqueid.Id) {
+	impetus, traceid := s.impetus, s.traceid
+	s.impetus, s.traceid = nil, nil
+	return impetus, traceid
+}
+
+func TestDischargeImpetusAndContextPropagation(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		pserver     = testutil.NewPrincipal("server")
+		pdischarger = testutil.NewPrincipal("discharger")
+		pclient     = testutil.NewPrincipal("client")
+		sm          = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+		ns          = tnaming.NewSimpleNamespace()
+
+		// Setup the client so that it shares a blessing with a third-party caveat with the server.
+		setClientBlessings = func(req security.ThirdPartyRequirements) security.Principal {
+			cav, err := security.NewPublicKeyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", req, security.UnconstrainedUse())
+			if err != nil {
+				t.Fatalf("Failed to create ThirdPartyCaveat(%+v): %v", req, err)
+			}
+			b, err := pclient.BlessSelf("client_for_server", cav)
+			if err != nil {
+				t.Fatalf("BlessSelf failed: %v", err)
+			}
+			pclient.BlessingStore().Set(b, "server")
+			return pclient
+		}
+	)
+	// Initialize the client principal.
+	// It trusts both the application server and the discharger.
+	pclient.AddToRoots(pserver.BlessingStore().Default())
+	pclient.AddToRoots(pdischarger.BlessingStore().Default())
+
+	// Setup the discharge server.
+	var tester dischargeTestServer
+	dischargeServer, err := testInternalNewServer(ctx, sm, ns, pdischarger)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer dischargeServer.Stop()
+	if _, err := dischargeServer.Listen(listenSpec); err != nil {
+		t.Fatal(err)
+	}
+	if err := dischargeServer.Serve("mountpoint/discharger", &tester, &testServerAuthorizer{}); err != nil {
+		t.Fatal(err)
+	}
+
+	// Setup the application server.
+	appServer, err := testInternalNewServer(ctx, sm, ns, pserver)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer appServer.Stop()
+	eps, err := appServer.Listen(listenSpec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// TODO(bjornick,cnicolaou,ashankar): This is a hack to workaround the
+	// fact that a single Listen on the "tcp" protocol followed by a call
+	// to Serve(<name>, ...) transparently creates two endpoints (one for
+	// tcp, one for websockets) and maps both to <name> via a mount.
+	// Because all endpoints to a name are tried in a parallel, this
+	// transparency makes this test hard to follow (many discharge fetch
+	// attempts are made - one for VIF authentication, one for VC
+	// authentication and one for the actual RPC - and having them be made
+	// to two different endpoints in parallel leads to a lot of
+	// non-determinism). The last plan of record known by the author of
+	// this comment was to stop this sly creation of two endpoints and
+	// require that they be done explicitly. When that happens, this hack
+	// can go away, but till then, this workaround allows the test to be
+	// more predictable by ensuring there is only one VIF/VC/Flow to the
+	// server.
+	object := naming.JoinAddressName(eps[0].String(), "object") // instead of "mountpoint/object"
+	if err := appServer.Serve("mountpoint/object", &testServer{}, &testServerAuthorizer{}); err != nil {
+		t.Fatal(err)
+	}
+	tests := []struct {
+		Requirements security.ThirdPartyRequirements
+		Impetus      security.DischargeImpetus
+	}{
+		{ // No requirements, no impetus
+			Requirements: security.ThirdPartyRequirements{},
+			Impetus:      security.DischargeImpetus{},
+		},
+		{ // Require everything
+			Requirements: security.ThirdPartyRequirements{ReportServer: true, ReportMethod: true, ReportArguments: true},
+			Impetus:      security.DischargeImpetus{Server: []security.BlessingPattern{"server"}, Method: "Method", Arguments: []*vdl.Value{vdl.StringValue("argument")}},
+		},
+		{ // Require only the method name
+			Requirements: security.ThirdPartyRequirements{ReportMethod: true},
+			Impetus:      security.DischargeImpetus{Method: "Method"},
+		},
+	}
+
+	for _, test := range tests {
+		pclient := setClientBlessings(test.Requirements)
+		ctx, _ = v23.WithPrincipal(ctx, pclient)
+		client, err := InternalNewClient(sm, ns)
+		if err != nil {
+			t.Fatalf("InternalNewClient(%+v) failed: %v", test.Requirements, err)
+		}
+		defer client.Close()
+		tid := vtrace.GetSpan(ctx).Trace()
+		// StartCall should fetch the discharge, do not worry about finishing the RPC - do not care about that for this test.
+		if _, err := client.StartCall(ctx, object, "Method", []interface{}{"argument"}); err != nil {
+			t.Errorf("StartCall(%+v) failed: %v", test.Requirements, err)
+			continue
+		}
+		impetus, traceid := tester.Release()
+		// There should have been exactly 1 attempt to fetch discharges when making
+		// the RPC to the remote object.
+		if len(impetus) != 1 || len(traceid) != 1 {
+			t.Errorf("Test %+v: Got (%d, %d) (#impetus, #traceid), wanted exactly one", test.Requirements, len(impetus), len(traceid))
+			continue
+		}
+		// VC creation does not have any "impetus", it is established without
+		// knowledge of the context of the RPC. So ignore that.
+		//
+		// TODO(ashankar): Should the impetus of the RPC that initiated the
+		// VIF/VC creation be propagated?
+		if got, want := impetus[len(impetus)-1], test.Impetus; !reflect.DeepEqual(got, want) {
+			t.Errorf("Test %+v: Got impetus %v, want %v", test.Requirements, got, want)
+		}
+		// But the context used for all of this should be the same
+		// (thereby allowing debug traces to link VIF/VC creation with
+		// the RPC that initiated them).
+		for idx, got := range traceid {
+			if !reflect.DeepEqual(got, tid) {
+				t.Errorf("Test %+v: %d - Got trace id %q, want %q", test.Requirements, idx, hex.EncodeToString(got[:]), hex.EncodeToString(tid[:]))
+			}
+		}
+	}
+}
+
+func TestRPCClientAuthorization(t *testing.T) {
+	type v []interface{}
+	var (
+		// Principals
+		pclient, pserver = testutil.NewPrincipal("client"), testutil.NewPrincipal("server")
+		pdischarger      = testutil.NewPrincipal("discharger")
+
+		now = time.Now()
+
+		serverName          = "mountpoint/server"
+		dischargeServerName = "mountpoint/dischargeserver"
+
+		// Caveats on blessings to the client: First-party caveats
+		cavOnlyEcho = mkCaveat(security.NewMethodCaveat("Echo"))
+		cavExpired  = mkCaveat(security.NewExpiryCaveat(now.Add(-1 * time.Second)))
+		// Caveats on blessings to the client: Third-party caveats
+		cavTPValid   = mkThirdPartyCaveat(pdischarger.PublicKey(), dischargeServerName, mkCaveat(security.NewExpiryCaveat(now.Add(24*time.Hour))))
+		cavTPExpired = mkThirdPartyCaveat(pdischarger.PublicKey(), dischargeServerName, mkCaveat(security.NewExpiryCaveat(now.Add(-1*time.Second))))
+
+		// Client blessings that will be tested.
+		bServerClientOnlyEcho  = bless(pserver, pclient, "onlyecho", cavOnlyEcho)
+		bServerClientExpired   = bless(pserver, pclient, "expired", cavExpired)
+		bServerClientTPValid   = bless(pserver, pclient, "dischargeable_third_party_caveat", cavTPValid)
+		bServerClientTPExpired = bless(pserver, pclient, "expired_third_party_caveat", cavTPExpired)
+		bClient                = pclient.BlessingStore().Default()
+		bRandom, _             = pclient.BlessSelf("random")
+
+		mgr   = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+		ns    = tnaming.NewSimpleNamespace()
+		tests = []struct {
+			blessings  security.Blessings // Blessings used by the client
+			name       string             // object name on which the method is invoked
+			method     string
+			args       v
+			results    v
+			authorized bool // Whether or not the RPC should be authorized by the server.
+		}{
+			// There are three different authorization policies (security.Authorizer implementations)
+			// used by the server, depending on the suffix (see testServerDisp.Lookup):
+			// - nilAuth suffix: the default authorization policy (only delegates of or delegators of the server can call RPCs)
+			// - aclAuth suffix: the AccessList only allows blessings matching the patterns "server" or "client"
+			// - other suffixes: testServerAuthorizer allows any principal to call any method except "Unauthorized"
+
+			// Expired blessings should fail nilAuth and aclAuth (which care about names), but should succeed on
+			// other suffixes (which allow all blessings), unless calling the Unauthorized method.
+			{bServerClientExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// Same for blessings that should fail to obtain a discharge for the third party caveat.
+			{bServerClientTPExpired, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientTPExpired, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+			{bServerClientTPExpired, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPExpired, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// The "server/client" blessing (with MethodCaveat("Echo")) should satisfy all authorization policies
+			// when "Echo" is called.
+			{bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientOnlyEcho, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+
+			// The "server/client" blessing (with MethodCaveat("Echo")) should satisfy no authorization policy
+			// when any other method is invoked, except for the testServerAuthorizer policy (which will
+			// not recognize the blessing "server/onlyecho", but it would authorize anyone anyway).
+			{bServerClientOnlyEcho, "mountpoint/server/nilAuth", "Closure", nil, nil, false},
+			{bServerClientOnlyEcho, "mountpoint/server/aclAuth", "Closure", nil, nil, false},
+			{bServerClientOnlyEcho, "mountpoint/server/suffix", "Closure", nil, nil, true},
+
+			// The "client" blessing doesn't satisfy the default authorization policy, but does satisfy
+			// the AccessList and the testServerAuthorizer policy.
+			{bClient, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bClient, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+			{bClient, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bClient, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// The "random" blessing does not satisfy either the default policy or the AccessList, but does
+			// satisfy testServerAuthorizer.
+			{bRandom, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, false},
+			{bRandom, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, false},
+			{bRandom, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bRandom, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+
+			// The "server/dischargeable_third_party_caveat" blessing satisfies all policies.
+			// (the discharges should be fetched).
+			{bServerClientTPValid, "mountpoint/server/nilAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPValid, "mountpoint/server/aclAuth", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPValid, "mountpoint/server/suffix", "Echo", v{"foo"}, v{""}, true},
+			{bServerClientTPValid, "mountpoint/server/suffix", "Unauthorized", nil, v{""}, false},
+		}
+	)
+
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Start the main server.
+	_, server := startServer(t, ctx, pserver, mgr, ns, serverName, testServerDisp{&testServer{}})
+	defer stopServer(t, ctx, server, ns, serverName)
+
+	// Start the discharge server.
+	_, dischargeServer := startServer(t, ctx, pdischarger, mgr, ns, dischargeServerName, testutil.LeafDispatcher(&dischargeServer{}, security.AllowEveryone()))
+	defer stopServer(t, ctx, dischargeServer, ns, dischargeServerName)
+
+	// The server should recognize the client principal as an authority on "client" and "random" blessings.
+	pserver.AddToRoots(bClient)
+	pserver.AddToRoots(bRandom)
+	// And the client needs to recognize the server's and discharger's blessings to decide which of its
+	// own blessings to share.
+	pclient.AddToRoots(pserver.BlessingStore().Default())
+	pclient.AddToRoots(pdischarger.BlessingStore().Default())
+	// Set a blessing on the client's blessing store to be presented to the discharge server.
+	pclient.BlessingStore().Set(pclient.BlessingStore().Default(), "discharger")
+	// testutil.NewPrincipal sets up a principal that shares blessings with all servers, undo that.
+	pclient.BlessingStore().Set(security.Blessings{}, security.AllPrincipals)
+
+	for i, test := range tests {
+		name := fmt.Sprintf("#%d: %q.%s(%v) by %v", i, test.name, test.method, test.args, test.blessings)
+		client, err := InternalNewClient(mgr, ns)
+		if err != nil {
+			t.Fatalf("InternalNewClient failed: %v", err)
+		}
+		defer client.Close()
+
+		pclient.BlessingStore().Set(test.blessings, "server")
+		ctx, _ := v23.WithPrincipal(ctx, pclient)
+		err = client.Call(ctx, test.name, test.method, test.args, makeResultPtrs(test.results))
+		if err != nil && test.authorized {
+			t.Errorf(`%s client.Call got error: "%v", wanted the RPC to succeed`, name, err)
+		} else if err == nil && !test.authorized {
+			t.Errorf("%s call.Finish succeeded, expected authorization failure", name)
+		} else if !test.authorized && verror.ErrorID(err) != verror.ErrNoAccess.ID {
+			t.Errorf("%s. call.Finish returned error %v(%v), wanted %v", name, verror.ErrorID(verror.Convert(verror.ErrNoAccess, nil, err)), err, verror.ErrNoAccess)
+		}
+	}
+}
+
+// singleBlessingStore implements security.BlessingStore. It is a
+// BlessingStore that marks the last blessing that was set on it as
+// shareable with any peer. It does not care about the public key that
+// blessing being set is bound to.
+type singleBlessingStore struct {
+	b security.Blessings
+}
+
+func (s *singleBlessingStore) Set(b security.Blessings, _ security.BlessingPattern) (security.Blessings, error) {
+	s.b = b
+	return security.Blessings{}, nil
+}
+func (s *singleBlessingStore) ForPeer(...string) security.Blessings {
+	return s.b
+}
+func (*singleBlessingStore) SetDefault(b security.Blessings) error {
+	return nil
+}
+func (*singleBlessingStore) Default() security.Blessings {
+	return security.Blessings{}
+}
+func (*singleBlessingStore) PublicKey() security.PublicKey {
+	return nil
+}
+func (*singleBlessingStore) DebugString() string {
+	return ""
+}
+func (*singleBlessingStore) PeerBlessings() map[security.BlessingPattern]security.Blessings {
+	return nil
+}
+
+// singleBlessingPrincipal implements security.Principal. It is a wrapper over
+// a security.Principal that intercepts  all invocations on the
+// principal's BlessingStore and serves them via a singleBlessingStore.
+type singleBlessingPrincipal struct {
+	security.Principal
+	b singleBlessingStore
+}
+
+func (p *singleBlessingPrincipal) BlessingStore() security.BlessingStore {
+	return &p.b
+}
+
+func TestRPCClientBlessingsPublicKey(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		pprovider, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal("server")
+		pclient            = &singleBlessingPrincipal{Principal: testutil.NewPrincipal("client")}
+
+		bserver = bless(pprovider, pserver, "server")
+		bclient = bless(pprovider, pclient, "client")
+		bvictim = bless(pprovider, testutil.NewPrincipal("victim"), "victim")
+	)
+	// Make the client and server trust blessings from pprovider.
+	pclient.AddToRoots(pprovider.BlessingStore().Default())
+	pserver.AddToRoots(pprovider.BlessingStore().Default())
+
+	// Make the server present bserver to all clients and start the server.
+	pserver.BlessingStore().SetDefault(bserver)
+	b := createBundle(t, ctx, pserver, &testServer{})
+	defer b.cleanup(t, ctx)
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	tests := []struct {
+		blessings security.Blessings
+		errID     verror.IDAction
+		err       string
+	}{
+		{blessings: bclient},
+		// server disallows clients from authenticating with blessings not bound to
+		// the client principal's public key
+		{blessings: bvictim, errID: verror.ErrNoAccess, err: "bound to a different public key"},
+		{blessings: bserver, errID: verror.ErrNoAccess, err: "bound to a different public key"},
+	}
+	for i, test := range tests {
+		name := fmt.Sprintf("%d: Client RPCing with blessings %v", i, test.blessings)
+		pclient.BlessingStore().Set(test.blessings, "root")
+		if err := b.client.Call(ctx, "mountpoint/server/suffix", "Closure", nil, nil); !matchesErrorPattern(err, test.errID, test.err) {
+			t.Errorf("%v: client.Call returned error %v", name, err)
+			continue
+		}
+	}
+}
+
+func TestServerLocalBlessings(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		pprovider, pclient, pserver = testutil.NewPrincipal("root"), testutil.NewPrincipal("client"), testutil.NewPrincipal("server")
+		pdischarger                 = pprovider
+
+		mgr = imanager.InternalNew(naming.FixedRoutingID(0x1111111))
+		ns  = tnaming.NewSimpleNamespace()
+
+		tpCav = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/dischargeserver", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+
+		bserver = bless(pprovider, pserver, "server", tpCav)
+		bclient = bless(pprovider, pclient, "client")
+	)
+	// Make the client and server principals trust root certificates from
+	// pprovider.
+	pclient.AddToRoots(pprovider.BlessingStore().Default())
+	pserver.AddToRoots(pprovider.BlessingStore().Default())
+
+	// Make the server present bserver to all clients.
+	pserver.BlessingStore().SetDefault(bserver)
+
+	// Start the server and the discharger.
+	_, server := startServer(t, ctx, pserver, mgr, ns, "mountpoint/server", testServerDisp{&testServer{}})
+	defer stopServer(t, ctx, server, ns, "mountpoint/server")
+
+	_, dischargeServer := startServer(t, ctx, pdischarger, mgr, ns, "mountpoint/dischargeserver", testutil.LeafDispatcher(&dischargeServer{}, security.AllowEveryone()))
+	defer stopServer(t, ctx, dischargeServer, ns, "mountpoint/dischargeserver")
+
+	// Make the client present bclient to all servers that are blessed
+	// by pprovider.
+	pclient.BlessingStore().Set(bclient, "root")
+	client, err := InternalNewClient(mgr, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	defer client.Close()
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	var gotServer, gotClient string
+	if err := client.Call(ctx, "mountpoint/server/suffix", "EchoBlessings", nil, []interface{}{&gotServer, &gotClient}); err != nil {
+		t.Fatalf("Finish failed: %v", err)
+	}
+	if wantServer, wantClient := "[root/server]", "[root/client]"; gotServer != wantServer || gotClient != wantClient {
+		t.Fatalf("EchoBlessings: got %v, %v want %v, %v", gotServer, gotClient, wantServer, wantClient)
+	}
+}
+
+func TestDischargePurgeFromCache(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+
+	var (
+		pserver     = testutil.NewPrincipal("server")
+		pdischarger = pserver // In general, the discharger can be a separate principal. In this test, it happens to be the server.
+		pclient     = testutil.NewPrincipal("client")
+		// Client is blessed with a third-party caveat. The discharger service issues discharges with a fakeTimeCaveat.
+		// This blessing is presented to "server".
+		bclient = bless(pserver, pclient, "client", mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/server/discharger", security.UnconstrainedUse()))
+
+		b = createBundle(t, ctx, pserver, &testServer{})
+	)
+	defer b.cleanup(t, ctx)
+	// Setup the client to recognize the server's blessing and present bclient to it.
+	pclient.AddToRoots(pserver.BlessingStore().Default())
+	pclient.BlessingStore().Set(bclient, "server")
+
+	var err error
+	if b.client, err = InternalNewClient(b.sm, b.ns); err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	call := func() error {
+		var got string
+		if err := b.client.Call(ctx, "mountpoint/server/aclAuth", "Echo", []interface{}{"batman"}, []interface{}{&got}); err != nil {
+			return err
+		}
+		if want := `method:"Echo",suffix:"aclAuth",arg:"batman"`; got != want {
+			return verror.Convert(verror.ErrBadArg, nil, fmt.Errorf("Got [%v] want [%v]", got, want))
+		}
+		return nil
+	}
+
+	// First call should succeed
+	if err := call(); err != nil {
+		t.Fatal(err)
+	}
+	// Advance virtual clock, which will invalidate the discharge
+	clock.Advance(1)
+	if err, want := call(), "not authorized"; !matchesErrorPattern(err, verror.ErrNoAccess, want) {
+		t.Errorf("Got error [%v] wanted to match pattern %q", err, want)
+	}
+	// But retrying will succeed since the discharge should be purged from cache and refreshed
+	if err := call(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+type cancelTestServer struct {
+	started   chan struct{}
+	cancelled chan struct{}
+	t         *testing.T
+}
+
+func newCancelTestServer(t *testing.T) *cancelTestServer {
+	return &cancelTestServer{
+		started:   make(chan struct{}),
+		cancelled: make(chan struct{}),
+		t:         t,
+	}
+}
+
+func (s *cancelTestServer) CancelStreamReader(ctx *context.T, call rpc.StreamServerCall) error {
+	close(s.started)
+	var b []byte
+	if err := call.Recv(&b); err != io.EOF {
+		s.t.Errorf("Got error %v, want io.EOF", err)
+	}
+	<-ctx.Done()
+	close(s.cancelled)
+	return nil
+}
+
+// CancelStreamIgnorer doesn't read from it's input stream so all it's
+// buffers fill.  The intention is to show that call.Done() is closed
+// even when the stream is stalled.
+func (s *cancelTestServer) CancelStreamIgnorer(ctx *context.T, _ rpc.StreamServerCall) error {
+	close(s.started)
+	<-ctx.Done()
+	close(s.cancelled)
+	return nil
+}
+
+func waitForCancel(t *testing.T, ts *cancelTestServer, cancel context.CancelFunc) {
+	<-ts.started
+	cancel()
+	<-ts.cancelled
+}
+
+// TestCancel tests cancellation while the server is reading from a stream.
+func TestCancel(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		ts               = newCancelTestServer(t)
+		pclient, pserver = newClientServerPrincipals()
+		b                = createBundle(t, ctx, pserver, ts)
+	)
+	defer b.cleanup(t, ctx)
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	ctx, cancel := context.WithCancel(ctx)
+	_, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "CancelStreamReader", []interface{}{})
+	if err != nil {
+		t.Fatalf("Start call failed: %v", err)
+	}
+	waitForCancel(t, ts, cancel)
+}
+
+// TestCancelWithFullBuffers tests that even if the writer has filled the buffers and
+// the server is not reading that the cancel message gets through.
+func TestCancelWithFullBuffers(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		ts               = newCancelTestServer(t)
+		pclient, pserver = newClientServerPrincipals()
+		b                = createBundle(t, ctx, pserver, ts)
+	)
+	defer b.cleanup(t, ctx)
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	ctx, cancel := context.WithCancel(ctx)
+	call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "CancelStreamIgnorer", []interface{}{})
+	if err != nil {
+		t.Fatalf("Start call failed: %v", err)
+	}
+	// Fill up all the write buffers to ensure that cancelling works even when the stream
+	// is blocked.
+	call.Send(make([]byte, vc.MaxSharedBytes))
+	call.Send(make([]byte, vc.DefaultBytesBufferedPerFlow))
+
+	waitForCancel(t, ts, cancel)
+}
+
+type streamRecvInGoroutineServer struct{ c chan error }
+
+func (s *streamRecvInGoroutineServer) RecvInGoroutine(_ *context.T, call rpc.StreamServerCall) error {
+	// Spawn a goroutine to read streaming data from the client.
+	go func() {
+		var i interface{}
+		for {
+			err := call.Recv(&i)
+			if err != nil {
+				s.c <- err
+				return
+			}
+		}
+	}()
+	// Imagine the server did some processing here and now that it is done,
+	// it does not care to see what else the client has to say.
+	return nil
+}
+
+func TestStreamReadTerminatedByServer(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		pclient, pserver = newClientServerPrincipals()
+		s                = &streamRecvInGoroutineServer{c: make(chan error, 1)}
+		b                = createBundle(t, ctx, pserver, s)
+	)
+	defer b.cleanup(t, ctx)
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	call, err := b.client.StartCall(ctx, "mountpoint/server/suffix", "RecvInGoroutine", []interface{}{})
+	if err != nil {
+		t.Fatalf("StartCall failed: %v", err)
+	}
+
+	c := make(chan error, 1)
+	go func() {
+		for i := 0; true; i++ {
+			if err := call.Send(i); err != nil {
+				c <- err
+				return
+			}
+		}
+	}()
+
+	// The goroutine at the server executing "Recv" should have terminated
+	// with EOF.
+	if err := <-s.c; err != io.EOF {
+		t.Errorf("Got %v at server, want io.EOF", err)
+	}
+	// The client Send should have failed since the RPC has been
+	// terminated.
+	if err := <-c; err == nil {
+		t.Errorf("Client Send should fail as the server should have closed the flow")
+	}
+}
+
+// TestConnectWithIncompatibleServers tests that clients ignore incompatible endpoints.
+func TestConnectWithIncompatibleServers(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		pclient, pserver = newClientServerPrincipals()
+		b                = createBundle(t, ctx, pserver, &testServer{})
+	)
+	defer b.cleanup(t, ctx)
+
+	// Publish some incompatible endpoints.
+	publisher := publisher.New(ctx, b.ns, publishPeriod)
+	defer publisher.WaitForStop()
+	defer publisher.Stop()
+	publisher.AddName("incompatible", false, false)
+	publisher.AddServer("/@2@tcp@localhost:10000@@1000000@2000000@@")
+	publisher.AddServer("/@2@tcp@localhost:10001@@2000000@3000000@@")
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	_, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"}, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Errorf("Expected error %v, found: %v", verror.ErrNoServers, err)
+	}
+
+	// Now add a server with a compatible endpoint and try again.
+	publisher.AddServer("/" + b.ep.String())
+	publisher.AddName("incompatible", false, false)
+
+	call, err := b.client.StartCall(ctx, "incompatible/suffix", "Echo", []interface{}{"foo"})
+	if err != nil {
+		t.Fatal(err)
+	}
+	var result string
+	if err = call.Finish(&result); err != nil {
+		t.Errorf("Unexpected error finishing call %v", err)
+	}
+	expected := `method:"Echo",suffix:"suffix",arg:"foo"`
+	if result != expected {
+		t.Errorf("Wrong result returned.  Got %s, wanted %s", result, expected)
+	}
+}
+
+func TestPreferredAddress(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	pa := func(string, []net.Addr) ([]net.Addr, error) {
+		return []net.Addr{netstate.NewNetAddr("tcp", "1.1.1.1")}, nil
+	}
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+	if err != nil {
+		t.Errorf("InternalNewServer failed: %v", err)
+	}
+	defer server.Stop()
+
+	spec := rpc.ListenSpec{
+		Addrs:          rpc.ListenAddrs{{"tcp", ":0"}},
+		AddressChooser: pa,
+	}
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	iep := eps[0].(*inaming.Endpoint)
+	host, _, err := net.SplitHostPort(iep.Address)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if got, want := host, "1.1.1.1"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+	// Won't override the specified address.
+	eps, err = server.Listen(listenSpec)
+	iep = eps[0].(*inaming.Endpoint)
+	host, _, err = net.SplitHostPort(iep.Address)
+	if err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if got, want := host, "127.0.0.1"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+}
+
+func TestPreferredAddressErrors(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	paerr := func(_ string, a []net.Addr) ([]net.Addr, error) {
+		return nil, fmt.Errorf("oops")
+	}
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+	if err != nil {
+		t.Errorf("InternalNewServer failed: %v", err)
+	}
+	defer server.Stop()
+	spec := rpc.ListenSpec{
+		Addrs:          rpc.ListenAddrs{{"tcp", ":0"}},
+		AddressChooser: paerr,
+	}
+	eps, err := server.Listen(spec)
+
+	if got, want := len(eps), 0; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+	status := server.Status()
+	if got, want := len(status.Errors), 1; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+	if got, want := status.Errors[0].Error(), "oops"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+}
+
+func TestSecurityNone(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(ctx, sm, ns, nil, options.SecurityNone)
+	if err != nil {
+		t.Fatalf("InternalNewServer failed: %v", err)
+	}
+	if _, err = server.Listen(listenSpec); err != nil {
+		t.Fatalf("server.Listen failed: %v", err)
+	}
+	disp := &testServerDisp{&testServer{}}
+	if err := server.ServeDispatcher("mp/server", disp); err != nil {
+		t.Fatalf("server.Serve failed: %v", err)
+	}
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	// When using SecurityNone, all authorization checks should be skipped, so
+	// unauthorized methods should be callable.
+	var got string
+	if err := client.Call(ctx, "mp/server", "Unauthorized", nil, []interface{}{&got}, options.SecurityNone); err != nil {
+		t.Fatalf("client.Call failed: %v", err)
+	}
+	if want := "UnauthorizedResult"; got != want {
+		t.Errorf("got (%v), want (%v)", got, want)
+	}
+}
+
+func TestNoPrincipal(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("server"))
+	if err != nil {
+		t.Fatalf("InternalNewServer failed: %v", err)
+	}
+	if _, err = server.Listen(listenSpec); err != nil {
+		t.Fatalf("server.Listen failed: %v", err)
+	}
+	disp := &testServerDisp{&testServer{}}
+	if err := server.ServeDispatcher("mp/server", disp); err != nil {
+		t.Fatalf("server.Serve failed: %v", err)
+	}
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+
+	// A call should fail if the principal in the ctx is nil and SecurityNone is not specified.
+	ctx, err = v23.WithPrincipal(ctx, nil)
+	if err != nil {
+		t.Fatalf("failed to set principal: %v", err)
+	}
+	_, err = client.StartCall(ctx, "mp/server", "Echo", []interface{}{"foo"})
+	if err == nil || verror.ErrorID(err) != errNoPrincipal.ID {
+		t.Fatalf("Expected errNoPrincipal, got %v", err)
+	}
+}
+
+func TestCallWithNilContext(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x66666666))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	call, err := client.StartCall(nil, "foo", "bar", []interface{}{}, options.SecurityNone)
+	if call != nil {
+		t.Errorf("Expected nil interface got: %#v", call)
+	}
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Errorf("Expected a BadArg error, got: %s", err.Error())
+	}
+}
+
+func TestServerBlessingsOpt(t *testing.T) {
+	var (
+		pserver   = testutil.NewPrincipal("server")
+		pclient   = testutil.NewPrincipal("client")
+		batman, _ = pserver.BlessSelf("batman")
+	)
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Client and server recognize the servers blessings
+	for _, p := range []security.Principal{pserver, pclient} {
+		if err := p.AddToRoots(pserver.BlessingStore().Default()); err != nil {
+			t.Fatal(err)
+		}
+		if err := p.AddToRoots(batman); err != nil {
+			t.Fatal(err)
+		}
+	}
+	// Start a server that uses the ServerBlessings option to configure itself
+	// to act as batman (as opposed to using the default blessing).
+	ns := tnaming.NewSimpleNamespace()
+
+	defer runServer(t, ctx, ns, pserver, "mountpoint/batman", &testServer{}, options.ServerBlessings{batman}).Shutdown()
+	defer runServer(t, ctx, ns, pserver, "mountpoint/default", &testServer{}).Shutdown()
+
+	// And finally, make an RPC and see that the client sees "batman"
+	runClient := func(server string) ([]string, error) {
+		smc := imanager.InternalNew(naming.FixedRoutingID(0xc))
+		defer smc.Shutdown()
+		client, err := InternalNewClient(
+			smc,
+			ns)
+		if err != nil {
+			return nil, err
+		}
+		defer client.Close()
+		ctx, _ = v23.WithPrincipal(ctx, pclient)
+		call, err := client.StartCall(ctx, server, "Closure", nil)
+		if err != nil {
+			return nil, err
+		}
+		blessings, _ := call.RemoteBlessings()
+		return blessings, nil
+	}
+
+	// When talking to mountpoint/batman, should see "batman"
+	// When talking to mountpoint/default, should see "server"
+	if got, err := runClient("mountpoint/batman"); err != nil || len(got) != 1 || got[0] != "batman" {
+		t.Errorf("Got (%v, %v) wanted 'batman'", got, err)
+	}
+	if got, err := runClient("mountpoint/default"); err != nil || len(got) != 1 || got[0] != "server" {
+		t.Errorf("Got (%v, %v) wanted 'server'", got, err)
+	}
+}
+
+func TestNoDischargesOpt(t *testing.T) {
+	var (
+		pdischarger = testutil.NewPrincipal("discharger")
+		pserver     = testutil.NewPrincipal("server")
+		pclient     = testutil.NewPrincipal("client")
+	)
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Make the client recognize all server blessings
+	if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
+		t.Fatal(err)
+	}
+	if err := pclient.AddToRoots(pdischarger.BlessingStore().Default()); err != nil {
+		t.Fatal(err)
+	}
+
+	// Bless the client with a ThirdPartyCaveat.
+	tpcav := mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+	blessings, err := pserver.Bless(pclient.PublicKey(), pserver.BlessingStore().Default(), "tpcav", tpcav)
+	if err != nil {
+		t.Fatalf("failed to create Blessings: %v", err)
+	}
+	if _, err = pclient.BlessingStore().Set(blessings, "server"); err != nil {
+		t.Fatalf("failed to set blessings: %v", err)
+	}
+
+	ns := tnaming.NewSimpleNamespace()
+
+	// Setup the disharger and test server.
+	discharger := &dischargeServer{}
+	defer runServer(t, ctx, ns, pdischarger, "mountpoint/discharger", discharger).Shutdown()
+	defer runServer(t, ctx, ns, pserver, "mountpoint/testServer", &testServer{}).Shutdown()
+
+	runClient := func(noDischarges bool) {
+		rid, err := naming.NewRoutingID()
+		if err != nil {
+			t.Fatal(err)
+		}
+		smc := imanager.InternalNew(rid)
+		defer smc.Shutdown()
+		client, err := InternalNewClient(smc, ns)
+		if err != nil {
+			t.Fatalf("failed to create client: %v", err)
+		}
+		defer client.Close()
+		var opts []rpc.CallOpt
+		if noDischarges {
+			opts = append(opts, NoDischarges{})
+		}
+		ctx, _ = v23.WithPrincipal(ctx, pclient)
+		if _, err = client.StartCall(ctx, "mountpoint/testServer", "Closure", nil, opts...); err != nil {
+			t.Fatalf("failed to StartCall: %v", err)
+		}
+	}
+
+	// Test that when the NoDischarges option is set, dischargeServer does not get called.
+	if runClient(true); discharger.called {
+		t.Errorf("did not expect discharger to be called")
+	}
+	discharger.called = false
+	// Test that when the Nodischarges option is not set, dischargeServer does get called.
+	if runClient(false); !discharger.called {
+		t.Errorf("expected discharger to be called")
+	}
+}
+
+func TestNoImplicitDischargeFetching(t *testing.T) {
+	// This test ensures that discharge clients only fetch discharges for the specified tp caveats and not its own.
+	var (
+		pdischarger1     = testutil.NewPrincipal("discharger1")
+		pdischarger2     = testutil.NewPrincipal("discharger2")
+		pdischargeClient = testutil.NewPrincipal("dischargeClient")
+	)
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Bless the client with a ThirdPartyCaveat from discharger1.
+	tpcav1 := mkThirdPartyCaveat(pdischarger1.PublicKey(), "mountpoint/discharger1", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+	blessings, err := pdischarger1.Bless(pdischargeClient.PublicKey(), pdischarger1.BlessingStore().Default(), "tpcav1", tpcav1)
+	if err != nil {
+		t.Fatalf("failed to create Blessings: %v", err)
+	}
+	if err = pdischargeClient.BlessingStore().SetDefault(blessings); err != nil {
+		t.Fatalf("failed to set blessings: %v", err)
+	}
+	// The client will only talk to the discharge services if it recognizes them.
+	pdischargeClient.AddToRoots(pdischarger1.BlessingStore().Default())
+	pdischargeClient.AddToRoots(pdischarger2.BlessingStore().Default())
+
+	ns := tnaming.NewSimpleNamespace()
+
+	// Setup the disharger and test server.
+	discharger1 := &dischargeServer{}
+	discharger2 := &dischargeServer{}
+	defer runServer(t, ctx, ns, pdischarger1, "mountpoint/discharger1", discharger1).Shutdown()
+	defer runServer(t, ctx, ns, pdischarger2, "mountpoint/discharger2", discharger2).Shutdown()
+
+	rid, err := naming.NewRoutingID()
+	if err != nil {
+		t.Fatal(err)
+	}
+	sm := imanager.InternalNew(rid)
+
+	c, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("failed to create client: %v", err)
+	}
+	dc := c.(*client).dc
+	tpcav2, err := security.NewPublicKeyCaveat(pdischarger2.PublicKey(), "mountpoint/discharger2", security.ThirdPartyRequirements{}, mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+	if err != nil {
+		t.Error(err)
+	}
+	ctx, _ = v23.WithPrincipal(ctx, pdischargeClient)
+	dc.PrepareDischarges(ctx, []security.Caveat{tpcav2}, security.DischargeImpetus{})
+
+	// Ensure that discharger1 was not called and discharger2 was called.
+	if discharger1.called {
+		t.Errorf("discharge for caveat on discharge client should not have been fetched.")
+	}
+	if !discharger2.called {
+		t.Errorf("discharge for caveat passed to PrepareDischarges should have been fetched.")
+	}
+}
+
+// TestBlessingsCache tests that the VCCache is used to sucessfully used to cache duplicate
+// calls blessings.
+func TestBlessingsCache(t *testing.T) {
+	var (
+		pserver = testutil.NewPrincipal("server")
+		pclient = testutil.NewPrincipal("client")
+	)
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	// Make the client recognize all server blessings
+	if err := pclient.AddToRoots(pserver.BlessingStore().Default()); err != nil {
+		t.Fatal(err)
+	}
+
+	ns := tnaming.NewSimpleNamespace()
+
+	serverSM := runServer(t, ctx, ns, pserver, "mountpoint/testServer", &testServer{})
+	defer serverSM.Shutdown()
+	rid := serverSM.RoutingID()
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+
+	newClient := func() rpc.Client {
+		rid, err := naming.NewRoutingID()
+		if err != nil {
+			t.Fatal(err)
+		}
+		smc := imanager.InternalNew(rid)
+		defer smc.Shutdown()
+		client, err := InternalNewClient(smc, ns)
+		if err != nil {
+			t.Fatalf("failed to create client: %v", err)
+		}
+		return client
+	}
+
+	runClient := func(client rpc.Client) {
+		if err := client.Call(ctx, "mountpoint/testServer", "Closure", nil, nil); err != nil {
+			t.Fatalf("failed to Call: %v", err)
+		}
+	}
+
+	cachePrefix := naming.Join("rpc", "server", "routing-id", rid.String(), "security", "blessings", "cache")
+	cacheHits, err := stats.GetStatsObject(naming.Join(cachePrefix, "hits"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	cacheAttempts, err := stats.GetStatsObject(naming.Join(cachePrefix, "attempts"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Check that the blessings cache is not used on the first call.
+	clientA := newClient()
+	runClient(clientA)
+	if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 1 || gotHits != 0 {
+		t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(1), cacheHits(0)", gotAttempts, gotHits)
+	}
+	// Check that the cache is hit on the second call with the same blessings.
+	runClient(clientA)
+	if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 2 || gotHits != 1 {
+		t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(2), cacheHits(1)", gotAttempts, gotHits)
+	}
+	clientA.Close()
+	// Check that the cache is not used with a different client.
+	clientB := newClient()
+	runClient(clientB)
+	if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 3 || gotHits != 1 {
+		t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(3), cacheHits(1)", gotAttempts, gotHits)
+	}
+	// clientB changes its blessings, the cache should not be used.
+	blessings, err := pserver.Bless(pclient.PublicKey(), pserver.BlessingStore().Default(), "cav", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+	if err != nil {
+		t.Fatalf("failed to create Blessings: %v", err)
+	}
+	if _, err = pclient.BlessingStore().Set(blessings, "server"); err != nil {
+		t.Fatalf("failed to set blessings: %v", err)
+	}
+	runClient(clientB)
+	if gotAttempts, gotHits := cacheAttempts.Value().(int64), cacheHits.Value().(int64); gotAttempts != 4 || gotHits != 1 {
+		t.Errorf("got cacheAttempts(%v), cacheHits(%v), expected cacheAttempts(4), cacheHits(1)", gotAttempts, gotHits)
+	}
+	clientB.Close()
+}
+
+var fakeTimeCaveat = security.CaveatDescriptor{
+	Id:        uniqueid.Id{0x18, 0xba, 0x6f, 0x84, 0xd5, 0xec, 0xdb, 0x9b, 0xf2, 0x32, 0x19, 0x5b, 0x53, 0x92, 0x80, 0x0},
+	ParamType: vdl.TypeOf(int64(0)),
+}
+
+func TestServerPublicKeyOpt(t *testing.T) {
+	var (
+		pserver = testutil.NewPrincipal("server")
+		pother  = testutil.NewPrincipal("other")
+		pclient = testutil.NewPrincipal("client")
+	)
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	mountName := "mountpoint/default"
+
+	// Start a server with pserver.
+	defer runServer(t, ctx, ns, pserver, mountName, &testServer{}).Shutdown()
+
+	smc := imanager.InternalNew(naming.FixedRoutingID(0xc))
+	client, err := InternalNewClient(smc, ns)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer smc.Shutdown()
+	defer client.Close()
+
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	// The call should succeed when the server presents the same public as the opt...
+	if _, err = client.StartCall(ctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{pserver.PublicKey()}); err != nil {
+		t.Errorf("Expected call to succeed but got %v", err)
+	}
+	// ...but fail if they differ.
+	if _, err = client.StartCall(ctx, mountName, "Closure", nil, options.SkipServerEndpointAuthorization{}, options.ServerPublicKey{pother.PublicKey()}); verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+		t.Errorf("got %v, want %v", verror.ErrorID(err), verror.ErrNotTrusted.ID)
+	}
+}
+
+type expiryDischarger struct {
+	called bool
+}
+
+func (ed *expiryDischarger) Discharge(ctx *context.T, call rpc.StreamServerCall, cav security.Caveat, _ security.DischargeImpetus) (security.Discharge, error) {
+	tp := cav.ThirdPartyDetails()
+	if tp == nil {
+		return security.Discharge{}, fmt.Errorf("discharger: %v does not represent a third-party caveat", cav)
+	}
+	if err := tp.Dischargeable(ctx, call.Security()); err != nil {
+		return security.Discharge{}, fmt.Errorf("third-party caveat %v cannot be discharged for this context: %v", cav, err)
+	}
+	expDur := 10 * time.Millisecond
+	if ed.called {
+		expDur = time.Second
+	}
+	expiry, err := security.NewExpiryCaveat(time.Now().Add(expDur))
+	if err != nil {
+		return security.Discharge{}, fmt.Errorf("failed to create an expiration on the discharge: %v", err)
+	}
+	d, err := call.Security().LocalPrincipal().MintDischarge(cav, expiry)
+	if err != nil {
+		return security.Discharge{}, err
+	}
+	ed.called = true
+	return d, nil
+}
+
+func TestDischargeClientFetchExpiredDischarges(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	var (
+		pclient, pdischarger = newClientServerPrincipals()
+		tpcav                = mkThirdPartyCaveat(pdischarger.PublicKey(), "mountpoint/discharger", mkCaveat(security.NewExpiryCaveat(time.Now().Add(time.Hour))))
+		ns                   = tnaming.NewSimpleNamespace()
+		discharger           = &expiryDischarger{}
+	)
+
+	// Setup the disharge server.
+	defer runServer(t, ctx, ns, pdischarger, "mountpoint/discharger", discharger).Shutdown()
+
+	// Create a discharge client.
+	rid, err := naming.NewRoutingID()
+	if err != nil {
+		t.Fatal(err)
+	}
+	smc := imanager.InternalNew(rid)
+	defer smc.Shutdown()
+	client, err := InternalNewClient(smc, ns)
+	if err != nil {
+		t.Fatalf("failed to create client: %v", err)
+	}
+	defer client.Close()
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	dc := InternalNewDischargeClient(ctx, client, 0)
+
+	// Fetch discharges for tpcav.
+	dis := dc.PrepareDischarges(ctx, []security.Caveat{tpcav}, security.DischargeImpetus{})[0]
+	// Check that the discharges is not yet expired, but is expired after 100 milliseconds.
+	expiry := dis.Expiry()
+	// The discharge should expire.
+	select {
+	case <-time.After(time.Now().Sub(expiry)):
+		break
+	case <-time.After(time.Second):
+		t.Fatalf("discharge didn't expire within a second")
+	}
+	// Preparing Discharges again to get fresh discharges.
+	now := time.Now()
+	dis = dc.PrepareDischarges(ctx, []security.Caveat{tpcav}, security.DischargeImpetus{})[0]
+	if expiry = dis.Expiry(); expiry.Before(now) {
+		t.Fatalf("discharge has expired %v, but should be fresh", dis)
+	}
+}
+
+// newClientServerPrincipals creates a pair of principals and sets them up to
+// recognize each others default blessings.
+//
+// If the client does not recognize the blessings presented by the server,
+// then it will not even send it the request.
+//
+// If the server does not recognize the blessings presented by the client,
+// it is likely to deny access (unless the server authorizes all principals).
+func newClientServerPrincipals() (client, server security.Principal) {
+	client = testutil.NewPrincipal("client")
+	server = testutil.NewPrincipal("server")
+	client.AddToRoots(server.BlessingStore().Default())
+	server.AddToRoots(client.BlessingStore().Default())
+	return
+}
+
+func init() {
+	rpc.RegisterUnknownProtocol("wsh", websocket.HybridDial, websocket.HybridResolve, websocket.HybridListener)
+	security.RegisterCaveatValidator(fakeTimeCaveat, func(_ *context.T, _ security.Call, t int64) error {
+		if now := clock.Now(); now > t {
+			return fmt.Errorf("fakeTimeCaveat expired: now=%d > then=%d", now, t)
+		}
+		return nil
+	})
+}
diff --git a/runtime/internal/rpc/init.go b/runtime/internal/rpc/init.go
new file mode 100644
index 0000000..6ab1428
--- /dev/null
+++ b/runtime/internal/rpc/init.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"math/rand"
+	"time"
+)
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
diff --git a/runtime/internal/rpc/options.go b/runtime/internal/rpc/options.go
new file mode 100644
index 0000000..16004aa
--- /dev/null
+++ b/runtime/internal/rpc/options.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// PreferredProtocols instructs the Runtime implementation to select
+// endpoints with the specified protocols when a Client makes a call
+// and to order them in the specified order.
+type PreferredProtocols []string
+
+func (PreferredProtocols) RPCClientOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+// This option is used to sort and filter the endpoints when resolving the
+// proxy name from a mounttable.
+type PreferredServerResolveProtocols []string
+
+func (PreferredServerResolveProtocols) RPCServerOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+// ReservedNameDispatcher specifies the dispatcher that controls access
+// to framework managed portion of the namespace.
+type ReservedNameDispatcher struct {
+	Dispatcher rpc.Dispatcher
+}
+
+func (ReservedNameDispatcher) RPCServerOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+func getRetryTimeoutOpt(opts []rpc.CallOpt) (time.Duration, bool) {
+	for _, o := range opts {
+		if r, ok := o.(options.RetryTimeout); ok {
+			return time.Duration(r), true
+		}
+	}
+	return 0, false
+}
+
+func getNoNamespaceOpt(opts []rpc.CallOpt) bool {
+	for _, o := range opts {
+		if _, ok := o.(options.NoResolve); ok {
+			return true
+		}
+	}
+	return false
+}
+
+func shouldNotFetchDischarges(opts []rpc.CallOpt) bool {
+	for _, o := range opts {
+		if _, ok := o.(NoDischarges); ok {
+			return true
+		}
+	}
+	return false
+}
+
+func noRetry(opts []rpc.CallOpt) bool {
+	for _, o := range opts {
+		if _, ok := o.(options.NoRetry); ok {
+			return true
+		}
+	}
+	return false
+}
+
+func getVCOpts(opts []rpc.CallOpt) (vcOpts []stream.VCOpt) {
+	for _, o := range opts {
+		if v, ok := o.(stream.VCOpt); ok {
+			vcOpts = append(vcOpts, v)
+		}
+	}
+	return
+}
+
+func getNamespaceOpts(opts []rpc.CallOpt) (resolveOpts []naming.NamespaceOpt) {
+	for _, o := range opts {
+		if r, ok := o.(naming.NamespaceOpt); ok {
+			resolveOpts = append(resolveOpts, r)
+		}
+	}
+	return
+}
+
+func callEncrypted(opts []rpc.CallOpt) bool {
+	encrypted := true
+	for _, o := range opts {
+		switch o {
+		case options.SecurityNone:
+			encrypted = false
+		case options.SecurityConfidential:
+			encrypted = true
+		}
+	}
+	return encrypted
+}
diff --git a/runtime/internal/rpc/protocols/tcp/init.go b/runtime/internal/rpc/protocols/tcp/init.go
new file mode 100644
index 0000000..23ec14d
--- /dev/null
+++ b/runtime/internal/rpc/protocols/tcp/init.go
@@ -0,0 +1,76 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tcp
+
+import (
+	"net"
+	"time"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/runtime/internal/lib/tcputil"
+)
+
+func init() {
+	rpc.RegisterProtocol("tcp", tcpDial, tcpResolve, tcpListen, "tcp4", "tcp6")
+	rpc.RegisterProtocol("tcp4", tcpDial, tcpResolve, tcpListen)
+	rpc.RegisterProtocol("tcp6", tcpDial, tcpResolve, tcpListen)
+}
+
+func tcpDial(network, address string, timeout time.Duration) (net.Conn, error) {
+	conn, err := net.DialTimeout(network, address, timeout)
+	if err != nil {
+		return nil, err
+	}
+	if err := tcputil.EnableTCPKeepAlive(conn); err != nil {
+		return nil, err
+	}
+	return conn, nil
+}
+
+// tcpResolve performs a DNS resolution on the provided network and address.
+func tcpResolve(network, address string) (string, string, error) {
+	tcpAddr, err := net.ResolveTCPAddr(network, address)
+	if err != nil {
+		return "", "", err
+	}
+	return tcpAddr.Network(), tcpAddr.String(), nil
+}
+
+// tcpListen returns a listener that sets KeepAlive on all accepted connections.
+func tcpListen(network, laddr string) (net.Listener, error) {
+	ln, err := net.Listen(network, laddr)
+	if err != nil {
+		return nil, err
+	}
+	return &tcpListener{ln}, nil
+}
+
+// tcpListener is a wrapper around net.Listener that sets KeepAlive on all
+// accepted connections.
+type tcpListener struct {
+	netLn net.Listener
+}
+
+func (ln *tcpListener) Accept() (net.Conn, error) {
+	conn, err := ln.netLn.Accept()
+	if err != nil {
+		return nil, err
+	}
+	if err := tcputil.EnableTCPKeepAlive(conn); err != nil {
+		vlog.Errorf("Failed to enable TCP keep alive: %v", err)
+	}
+	return conn, nil
+}
+
+func (ln *tcpListener) Close() error {
+	return ln.netLn.Close()
+}
+
+func (ln *tcpListener) Addr() net.Addr {
+	return ln.netLn.Addr()
+}
diff --git a/runtime/internal/rpc/protocols/ws/init.go b/runtime/internal/rpc/protocols/ws/init.go
new file mode 100644
index 0000000..5aac575
--- /dev/null
+++ b/runtime/internal/rpc/protocols/ws/init.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+func init() {
+	// ws, ws4, ws6 represent websocket protocol instances.
+	rpc.RegisterProtocol("ws", websocket.Dial, websocket.Resolve, websocket.Listener, "ws4", "ws6")
+	rpc.RegisterProtocol("ws4", websocket.Dial, websocket.Resolve, websocket.Listener)
+	rpc.RegisterProtocol("ws6", websocket.Dial, websocket.Resolve, websocket.Listener)
+}
diff --git a/runtime/internal/rpc/protocols/wsh/init.go b/runtime/internal/rpc/protocols/wsh/init.go
new file mode 100644
index 0000000..26cc680
--- /dev/null
+++ b/runtime/internal/rpc/protocols/wsh/init.go
@@ -0,0 +1,19 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package wsh registers the websocket 'hybrid' protocol.
+// We prefer to use tcp whenever we can to avoid the overhead of websockets.
+package wsh
+
+import (
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+func init() {
+	rpc.RegisterProtocol("wsh", websocket.HybridDial, websocket.HybridResolve, websocket.HybridListener, "tcp4", "tcp6", "ws4", "ws6")
+	rpc.RegisterProtocol("wsh4", websocket.HybridDial, websocket.HybridResolve, websocket.HybridListener, "tcp4", "ws4")
+	rpc.RegisterProtocol("wsh6", websocket.HybridDial, websocket.HybridResolve, websocket.HybridListener, "tcp6", "ws6")
+}
diff --git a/runtime/internal/rpc/protocols/wsh_nacl/init.go b/runtime/internal/rpc/protocols/wsh_nacl/init.go
new file mode 100644
index 0000000..276a567
--- /dev/null
+++ b/runtime/internal/rpc/protocols/wsh_nacl/init.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package wsh_nacl registers the websocket 'hybrid' protocol for nacl
+// architectures.
+package wsh_nacl
+
+import (
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+func init() {
+	// We limit wsh to ws since in general nacl does not allow direct access
+	// to TCP/UDP networking.
+	rpc.RegisterProtocol("wsh", websocket.Dial, websocket.Resolve, websocket.Listener, "ws4", "ws6")
+	rpc.RegisterProtocol("wsh4", websocket.Dial, websocket.Resolve, websocket.Listener, "ws4")
+	rpc.RegisterProtocol("wsh6", websocket.Dial, websocket.Resolve, websocket.Listener, "ws6")
+}
diff --git a/runtime/internal/rpc/pubsub.go b/runtime/internal/rpc/pubsub.go
new file mode 100644
index 0000000..7bfe46c
--- /dev/null
+++ b/runtime/internal/rpc/pubsub.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"net"
+
+	"v.io/x/lib/pubsub"
+)
+
+// NewAddAddrsSetting creates the Setting to be sent to Listen to inform
+// it of new addresses that have become available since the last change.
+func NewAddAddrsSetting(a []net.Addr) pubsub.Setting {
+	return pubsub.NewAny(NewAddrsSetting, NewAddrsSettingDesc, a)
+}
+
+// NewRmAddrsSetting creates the Setting to be sent to Listen to inform
+// it of addresses that are no longer available.
+func NewRmAddrsSetting(a []net.Addr) pubsub.Setting {
+	return pubsub.NewAny(RmAddrsSetting, RmAddrsSettingDesc, a)
+}
+
+const (
+	NewAddrsSetting     = "NewAddrs"
+	NewAddrsSettingDesc = "New Addresses discovered since last change"
+	RmAddrsSetting      = "RmAddrs"
+	RmAddrsSettingDesc  = "Addresses that have been removed since last change"
+)
diff --git a/runtime/internal/rpc/reserved.go b/runtime/internal/rpc/reserved.go
new file mode 100644
index 0000000..03a63fc
--- /dev/null
+++ b/runtime/internal/rpc/reserved.go
@@ -0,0 +1,388 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"strings"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/rpc/reserved"
+	"v.io/v23/security"
+	"v.io/v23/security/access"
+	"v.io/v23/vdl"
+	"v.io/v23/vdlroot/signature"
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/lib/glob"
+)
+
+// reservedInvoker returns a special invoker for reserved methods.  This invoker
+// has access to the internal dispatchers, which allows it to perform special
+// handling for methods like Glob and Signature.
+func reservedInvoker(dispNormal, dispReserved rpc.Dispatcher) rpc.Invoker {
+	methods := &reservedMethods{dispNormal: dispNormal, dispReserved: dispReserved}
+	invoker := rpc.ReflectInvokerOrDie(methods)
+	methods.selfInvoker = invoker
+	return invoker
+}
+
+// reservedMethods is a regular server implementation object, which is passed to
+// the regular ReflectInvoker in order to implement reserved methods.  The
+// leading reserved "__" prefix is stripped before any methods are called.
+//
+// To add a new reserved method, simply add a method below, along with a
+// description of the method.
+type reservedMethods struct {
+	dispNormal   rpc.Dispatcher
+	dispReserved rpc.Dispatcher
+	selfInvoker  rpc.Invoker
+}
+
+func (r *reservedMethods) Describe__() []rpc.InterfaceDesc {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return []rpc.InterfaceDesc{{
+		Name: "__Reserved",
+		Doc:  `Reserved methods implemented by the RPC framework.  Each method name is prefixed with a double underscore "__".`,
+		Methods: []rpc.MethodDesc{
+			{
+				Name:      "Glob",
+				Doc:       "Glob returns all entries matching the pattern.",
+				InArgs:    []rpc.ArgDesc{{Name: "pattern", Doc: ""}},
+				OutStream: rpc.ArgDesc{Doc: "Streams matching entries back to the client."},
+			},
+			{
+				Name: "MethodSignature",
+				Doc:  "MethodSignature returns the signature for the given method.",
+				InArgs: []rpc.ArgDesc{{
+					Name: "method",
+					Doc:  "Method name whose signature will be returned.",
+				}},
+				OutArgs: []rpc.ArgDesc{{
+					Doc: "Method signature for the given method.",
+				}},
+			},
+			{
+				Name: "Signature",
+				Doc:  "Signature returns all interface signatures implemented by the object.",
+				OutArgs: []rpc.ArgDesc{{
+					Doc: "All interface signatures implemented by the object.",
+				}},
+			},
+		},
+	}}
+}
+
+func (r *reservedMethods) Signature(ctx *context.T, call rpc.ServerCall) ([]signature.Interface, error) {
+	suffix := call.Suffix()
+	disp := r.dispNormal
+	if naming.IsReserved(suffix) {
+		disp = r.dispReserved
+	}
+	if disp == nil {
+		return nil, verror.New(verror.ErrUnknownSuffix, ctx, suffix)
+	}
+	obj, _, err := disp.Lookup(suffix)
+	switch {
+	case err != nil:
+		return nil, err
+	case obj == nil:
+		return nil, verror.New(verror.ErrUnknownSuffix, ctx, suffix)
+	}
+	invoker, err := objectToInvoker(obj)
+	if err != nil {
+		return nil, err
+	}
+	sig, err := invoker.Signature(ctx, call)
+	if err != nil {
+		return nil, err
+	}
+	// Append the reserved methods.  We wait until now to add the "__" prefix to
+	// each method, so that we can use the regular ReflectInvoker.Signature logic.
+	rsig, err := r.selfInvoker.Signature(ctx, call)
+	if err != nil {
+		return nil, err
+	}
+	for i := range rsig {
+		for j := range rsig[i].Methods {
+			rsig[i].Methods[j].Name = "__" + rsig[i].Methods[j].Name
+		}
+	}
+	return signature.CleanInterfaces(append(sig, rsig...)), nil
+}
+
+func (r *reservedMethods) MethodSignature(ctx *context.T, call rpc.ServerCall, method string) (signature.Method, error) {
+	// Reserved methods use our self invoker, to describe our own methods,
+	if naming.IsReserved(method) {
+		return r.selfInvoker.MethodSignature(ctx, call, naming.StripReserved(method))
+	}
+
+	suffix := call.Suffix()
+	disp := r.dispNormal
+	if naming.IsReserved(suffix) {
+		disp = r.dispReserved
+	}
+	if disp == nil {
+		return signature.Method{}, verror.New(verror.ErrUnknownMethod, ctx, rpc.ReservedMethodSignature)
+	}
+	obj, _, err := disp.Lookup(suffix)
+	switch {
+	case err != nil:
+		return signature.Method{}, err
+	case obj == nil:
+		return signature.Method{}, verror.New(verror.ErrUnknownMethod, ctx, rpc.ReservedMethodSignature)
+	}
+	invoker, err := objectToInvoker(obj)
+	if err != nil {
+		return signature.Method{}, err
+	}
+	// TODO(toddw): Decide if we should hide the method signature if the
+	// caller doesn't have access to call it.
+	return invoker.MethodSignature(ctx, call, method)
+}
+
+func (r *reservedMethods) Glob(ctx *context.T, call rpc.StreamServerCall, pattern string) error {
+	// Copy the original call to shield ourselves from changes the flowServer makes.
+	glob := globInternal{r.dispNormal, r.dispReserved, call.Suffix()}
+	return glob.Glob(ctx, call, pattern)
+}
+
+// globInternal handles ALL the Glob requests received by a server and
+// constructs a response from the state of internal server objects and the
+// service objects.
+//
+// Internal objects exist only at the root of the server and have a name that
+// starts with a double underscore ("__"). They are only visible in the Glob
+// response if the double underscore is explicitly part of the pattern, e.g.
+// "".Glob("__*/*"), or "".Glob("__debug/...").
+//
+// Service objects may choose to implement either AllGlobber or ChildrenGlobber.
+// AllGlobber is more flexible, but ChildrenGlobber is simpler to implement and
+// less prone to errors.
+//
+// If objects implement AllGlobber, it must be able to handle recursive pattern
+// for the entire namespace below the receiver object, i.e. "a/b".Glob("...")
+// must return the name of all the objects under "a/b".
+//
+// If they implement ChildrenGlobber, it provides a list of the receiver's
+// immediate children names, or a non-nil error if the receiver doesn't exist.
+//
+// globInternal constructs the Glob response by internally accessing the
+// AllGlobber or ChildrenGlobber interface of objects as many times as needed.
+//
+// Before accessing an object, globInternal ensures that the requester is
+// authorized to access it. Internal objects require access.Debug. Service
+// objects require access.Resolve.
+type globInternal struct {
+	dispNormal   rpc.Dispatcher
+	dispReserved rpc.Dispatcher
+	receiver     string
+}
+
+// The maximum depth of recursion in Glob. We only count recursion levels
+// associated with a recursive glob pattern, e.g. a pattern like "..." will be
+// allowed to recurse up to 10 levels, but "*/*/*/*/..." will go up to 14
+// levels.
+const maxRecursiveGlobDepth = 10
+
+func (i *globInternal) Glob(ctx *context.T, call rpc.StreamServerCall, pattern string) error {
+	vlog.VI(3).Infof("rpc Glob: Incoming request: %q.Glob(%q)", i.receiver, pattern)
+	g, err := glob.Parse(pattern)
+	if err != nil {
+		return err
+	}
+	disp := i.dispNormal
+	tags := []*vdl.Value{vdl.ValueOf(access.Resolve)}
+	if naming.IsReserved(i.receiver) || (i.receiver == "" && naming.IsReserved(pattern)) {
+		disp = i.dispReserved
+		tags = []*vdl.Value{vdl.ValueOf(access.Debug)}
+	}
+	if disp == nil {
+		return reserved.NewErrGlobNotImplemented(ctx)
+	}
+	call = callWithMethodTags(ctx, call, tags)
+
+	type gState struct {
+		name  string
+		glob  *glob.Glob
+		depth int
+	}
+	queue := []gState{gState{glob: g}}
+
+	someMatchesOmitted := false
+	for len(queue) != 0 {
+		select {
+		case <-ctx.Done():
+			// RPC timed out or was canceled.
+			return nil
+		default:
+		}
+		state := queue[0]
+		queue = queue[1:]
+
+		subcall := callWithSuffix(ctx, call, naming.Join(i.receiver, state.name))
+		suffix := subcall.Suffix()
+		if state.depth > maxRecursiveGlobDepth {
+			vlog.Errorf("rpc Glob: exceeded recursion limit (%d): %q", maxRecursiveGlobDepth, suffix)
+			call.Send(naming.GlobReplyError{
+				naming.GlobError{Name: state.name, Error: reserved.NewErrGlobMaxRecursionReached(ctx)},
+			})
+			continue
+		}
+		obj, auth, err := disp.Lookup(suffix)
+		if err != nil {
+			vlog.VI(3).Infof("rpc Glob: Lookup failed for %q: %v", suffix, err)
+			call.Send(naming.GlobReplyError{
+				naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrNoExist, ctx, err)},
+			})
+			continue
+		}
+		if obj == nil {
+			vlog.VI(3).Infof("rpc Glob: object not found for %q", suffix)
+			call.Send(naming.GlobReplyError{
+				naming.GlobError{Name: state.name, Error: verror.New(verror.ErrNoExist, ctx, "nil object")},
+			})
+			continue
+		}
+
+		// Verify that that requester is authorized for the current object.
+		if err := authorize(ctx, call.Security(), auth); err != nil {
+			someMatchesOmitted = true
+			vlog.VI(3).Infof("rpc Glob: client is not authorized for %q: %v", suffix, err)
+			continue
+		}
+
+		// If the object implements both AllGlobber and ChildrenGlobber, we'll
+		// use AllGlobber.
+		invoker, err := objectToInvoker(obj)
+		if err != nil {
+			vlog.VI(3).Infof("rpc Glob: object for %q cannot be converted to invoker: %v", suffix, err)
+			call.Send(naming.GlobReplyError{
+				naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrInternal, ctx, err)},
+			})
+			continue
+		}
+		gs := invoker.Globber()
+		if gs == nil || (gs.AllGlobber == nil && gs.ChildrenGlobber == nil) {
+			if state.glob.Len() == 0 {
+				subcall.Send(naming.GlobReplyEntry{naming.MountEntry{Name: state.name, IsLeaf: true}})
+			} else {
+				subcall.Send(naming.GlobReplyError{
+					naming.GlobError{Name: state.name, Error: reserved.NewErrGlobNotImplemented(ctx)},
+				})
+			}
+			continue
+		}
+		if gs.AllGlobber != nil {
+			vlog.VI(3).Infof("rpc Glob: %q implements AllGlobber", suffix)
+			ch, err := gs.AllGlobber.Glob__(ctx, subcall, state.glob.String())
+			if err != nil {
+				vlog.VI(3).Infof("rpc Glob: %q.Glob(%q) failed: %v", suffix, state.glob, err)
+				subcall.Send(naming.GlobReplyError{naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrInternal, ctx, err)}})
+				continue
+			}
+			if ch == nil {
+				continue
+			}
+			for gr := range ch {
+				switch v := gr.(type) {
+				case naming.GlobReplyEntry:
+					v.Value.Name = naming.Join(state.name, v.Value.Name)
+					subcall.Send(v)
+				case naming.GlobReplyError:
+					v.Value.Name = naming.Join(state.name, v.Value.Name)
+					subcall.Send(v)
+				}
+			}
+			continue
+		}
+		vlog.VI(3).Infof("rpc Glob: %q implements ChildrenGlobber", suffix)
+		children, err := gs.ChildrenGlobber.GlobChildren__(ctx, subcall)
+		// The requested object doesn't exist.
+		if err != nil {
+			subcall.Send(naming.GlobReplyError{naming.GlobError{Name: state.name, Error: verror.Convert(verror.ErrInternal, ctx, err)}})
+			continue
+		}
+		// The glob pattern matches the current object.
+		if state.glob.Len() == 0 {
+			subcall.Send(naming.GlobReplyEntry{naming.MountEntry{Name: state.name}})
+		}
+		// The current object has no children.
+		if children == nil {
+			continue
+		}
+		depth := state.depth
+		// This is a recursive pattern. Make sure we don't recurse forever.
+		if state.glob.Len() == 0 {
+			depth++
+		}
+		for child := range children {
+			if len(child) == 0 || strings.Contains(child, "/") {
+				vlog.Errorf("rpc Glob: %q.GlobChildren__() sent an invalid child name: %q", suffix, child)
+				continue
+			}
+			if ok, _, left := state.glob.MatchInitialSegment(child); ok {
+				next := naming.Join(state.name, child)
+				queue = append(queue, gState{next, left, depth})
+			}
+		}
+	}
+	if someMatchesOmitted {
+		call.Send(naming.GlobReplyError{naming.GlobError{Error: reserved.NewErrGlobMatchesOmitted(ctx)}})
+	}
+	return nil
+}
+
+// derivedServerCall allows us to derive calls with slightly different properties,
+// useful for our various special-cased reserved methods.
+type derivedServerCall struct {
+	rpc.StreamServerCall
+	suffix   string
+	security security.Call
+}
+
+func callWithSuffix(ctx *context.T, src rpc.StreamServerCall, suffix string) rpc.StreamServerCall {
+	sec := securityCallWithSuffix(src.Security(), suffix)
+	return &derivedServerCall{src, suffix, sec}
+}
+
+func callWithMethodTags(ctx *context.T, src rpc.StreamServerCall, tags []*vdl.Value) rpc.StreamServerCall {
+	sec := securityCallWithMethodTags(src.Security(), tags)
+	return &derivedServerCall{src, src.Suffix(), sec}
+}
+
+func (c *derivedServerCall) Suffix() string {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return c.suffix
+}
+func (c *derivedServerCall) Security() security.Call {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return c.security
+}
+
+type derivedSecurityCall struct {
+	security.Call
+	suffix     string
+	methodTags []*vdl.Value
+}
+
+func securityCallWithSuffix(src security.Call, suffix string) security.Call {
+	return &derivedSecurityCall{src, suffix, src.MethodTags()}
+}
+
+func securityCallWithMethodTags(src security.Call, tags []*vdl.Value) security.Call {
+	return &derivedSecurityCall{src, src.Suffix(), tags}
+}
+
+func (c *derivedSecurityCall) Suffix() string {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return c.suffix
+}
+func (c *derivedSecurityCall) MethodTags() []*vdl.Value {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return c.methodTags
+}
diff --git a/runtime/internal/rpc/resolve_internal_test.go b/runtime/internal/rpc/resolve_internal_test.go
new file mode 100644
index 0000000..7236ceb
--- /dev/null
+++ b/runtime/internal/rpc/resolve_internal_test.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"v.io/v23/rpc"
+)
+
+func InternalServerResolveToEndpoint(s rpc.Server, name string) (string, error) {
+	return s.(*server).resolveToEndpoint(name)
+}
diff --git a/runtime/internal/rpc/resolve_test.go b/runtime/internal/rpc/resolve_test.go
new file mode 100644
index 0000000..73ace16
--- /dev/null
+++ b/runtime/internal/rpc/resolve_test.go
@@ -0,0 +1,162 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc_test
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"os"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+
+	"v.io/x/ref/lib/flags"
+	"v.io/x/ref/runtime/factories/fake"
+	"v.io/x/ref/runtime/internal"
+	"v.io/x/ref/runtime/internal/lib/appcycle"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	irpc "v.io/x/ref/runtime/internal/rpc"
+	grt "v.io/x/ref/runtime/internal/rt"
+	"v.io/x/ref/services/mounttable/mounttablelib"
+	"v.io/x/ref/test/expect"
+	"v.io/x/ref/test/modules"
+)
+
+var commonFlags *flags.Flags
+
+func init() {
+	commonFlags = flags.CreateAndRegister(flag.CommandLine, flags.Runtime)
+	if err := internal.ParseFlags(commonFlags); err != nil {
+		panic(err)
+	}
+}
+
+func setupRuntime() {
+	ac := appcycle.New()
+
+	listenSpec := rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}}
+
+	rootctx, rootcancel := context.RootContext()
+	ctx, cancel := context.WithCancel(rootctx)
+	runtime, ctx, sd, err := grt.Init(ctx,
+		ac,
+		nil,
+		&listenSpec,
+		nil,
+		"",
+		commonFlags.RuntimeFlags(),
+		nil)
+	if err != nil {
+		panic(err)
+	}
+	shutdown := func() {
+		ac.Shutdown()
+		cancel()
+		sd()
+		rootcancel()
+	}
+	fake.InjectRuntime(runtime, ctx, shutdown)
+}
+
+func rootMountTable(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	setupRuntime()
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	lspec := v23.GetListenSpec(ctx)
+	server, err := v23.NewServer(ctx, options.ServesMountTable(true))
+	if err != nil {
+		return fmt.Errorf("root failed: %v", err)
+	}
+	mp := ""
+	mt, err := mounttablelib.NewMountTableDispatcher("", "", "mounttable")
+	if err != nil {
+		return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
+	}
+	eps, err := server.Listen(lspec)
+	if err != nil {
+		return fmt.Errorf("server.Listen failed: %s", err)
+	}
+	if err := server.ServeDispatcher(mp, mt); err != nil {
+		return fmt.Errorf("root failed: %s", err)
+	}
+	fmt.Fprintf(stdout, "PID=%d\n", os.Getpid())
+	for _, ep := range eps {
+		fmt.Fprintf(stdout, "MT_NAME=%s\n", ep.Name())
+	}
+	modules.WaitForEOF(stdin)
+	return nil
+}
+
+func startMT(t *testing.T, sh *modules.Shell) string {
+	h, err := sh.Start("rootMountTable", nil)
+	if err != nil {
+		t.Fatalf("unexpected error for root mt: %s", err)
+	}
+	s := expect.NewSession(t, h.Stdout(), time.Minute)
+	s.ExpectVar("PID")
+	return s.ExpectVar("MT_NAME")
+}
+
+func TestResolveToEndpoint(t *testing.T) {
+	setupRuntime()
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+	sh, err := modules.NewShell(ctx, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("modules.NewShell failed: %s", err)
+	}
+	defer sh.Cleanup(nil, nil)
+	root := startMT(t, sh)
+
+	ns := v23.GetNamespace(ctx)
+	ns.SetRoots(root)
+
+	proxyEp, _ := inaming.NewEndpoint("proxy.v.io:123")
+	proxyEpStr := proxyEp.String()
+	proxyAddr := naming.JoinAddressName(proxyEpStr, "")
+	if err := ns.Mount(ctx, "proxy", proxyAddr, time.Hour); err != nil {
+		t.Fatalf("ns.Mount failed: %s", err)
+	}
+
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		t.Fatalf("runtime.NewServer failed: %s", err)
+	}
+
+	notfound := fmt.Errorf("not found")
+	testcases := []struct {
+		address string
+		result  string
+		err     error
+	}{
+		{"/proxy.v.io:123", proxyEpStr, nil},
+		{"proxy.v.io:123", "", notfound},
+		{"proxy", proxyEpStr, nil},
+		{naming.JoinAddressName(root, "proxy"), proxyEpStr, nil},
+		{proxyAddr, proxyEpStr, nil},
+		{proxyEpStr, "", notfound},
+		{"unknown", "", notfound},
+	}
+	for _, tc := range testcases {
+		result, err := irpc.InternalServerResolveToEndpoint(server, tc.address)
+		if (err == nil) != (tc.err == nil) {
+			t.Errorf("Unexpected err for %q. Got %v, expected %v", tc.address, err, tc.err)
+		}
+		if result != tc.result {
+			t.Errorf("Unexpected result for %q. Got %q, expected %q", tc.address, result, tc.result)
+		}
+	}
+	if t.Failed() {
+		t.Logf("proxyEpStr: %v", proxyEpStr)
+		t.Logf("proxyAddr: %v", proxyAddr)
+	}
+}
diff --git a/runtime/internal/rpc/results_store.go b/runtime/internal/rpc/results_store.go
new file mode 100644
index 0000000..1ecabd3
--- /dev/null
+++ b/runtime/internal/rpc/results_store.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"container/heap"
+	"sync"
+)
+
+const (
+	// TODO(cnicolaou): what are good initial values? Large servers want
+	// large values, most won't.
+	initialResults           = 1000
+	initialOutOfOrderResults = 100
+)
+
+type results []interface{}
+
+// Implement heap.Interface to maintain an ordered min-heap of uint64s.
+type intHeap []uint64
+
+func (h intHeap) Len() int           { return len(h) }
+func (h intHeap) Less(i, j int) bool { return h[i] < h[j] }
+func (h intHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+func (h *intHeap) Push(x interface{}) {
+	// Push and Pop use pointer receivers because they modify the slice's length,
+	// not just its contents.
+	*h = append(*h, x.(uint64))
+}
+
+func (h *intHeap) Pop() interface{} {
+	old := *h
+	n := len(old)
+	x := old[n-1]
+	*h = old[0 : n-1]
+	return x
+}
+
+// resultStore is used to store the results of previously exited RPCs
+// until the client indicates that it has received those results and hence
+// the server no longer needs to store them. Store entries are added
+// one at a time, but the client indicates that it has received entries up to
+// given value and that all entries with key lower than that can be deleted.
+// Retrieving values is complicated by the fact that requests may arrive
+// out of order and hence one RPC may have to wait for another to complete
+// in order to access its stored results. A separate map of channels is
+// used to implement this synchronization.
+// TODO(cnicolaou): Servers protect themselves from badly behaved clients by
+// refusing to allocate beyond a certain number of results.
+type resultsStore struct {
+	sync.Mutex
+	store map[uint64]results
+	chans map[uint64]chan struct{}
+	keys  intHeap
+	// TODO(cnicolaou): Should addEntry/waitForEntry return an error when
+	// the calls do not match the frontier?
+	frontier uint64 // results with index less than this have been removed.
+}
+
+func newStore() *resultsStore {
+	r := &resultsStore{
+		store: make(map[uint64]results, initialResults),
+		chans: make(map[uint64]chan struct{}, initialOutOfOrderResults),
+	}
+	heap.Init(&r.keys)
+	return r
+}
+
+func (rs *resultsStore) addEntry(key uint64, data results) {
+	rs.Lock()
+	if _, present := rs.store[key]; !present && rs.frontier <= key {
+		rs.store[key] = data
+		heap.Push(&rs.keys, key)
+	}
+	if ch, present := rs.chans[key]; present {
+		close(ch)
+		delete(rs.chans, key)
+	}
+	rs.Unlock()
+}
+
+func (rs *resultsStore) removeEntriesTo(to uint64) {
+	rs.Lock()
+	if rs.frontier > to {
+		rs.Unlock()
+		return
+	}
+	rs.frontier = to + 1
+	for rs.keys.Len() > 0 && to >= rs.keys[0] {
+		k := heap.Pop(&rs.keys).(uint64)
+		delete(rs.store, k)
+		if ch, present := rs.chans[k]; present {
+			close(ch)
+			delete(rs.chans, k)
+		}
+	}
+	rs.Unlock()
+}
+
+func (rs *resultsStore) waitForEntry(key uint64) results {
+	rs.Lock()
+	if r, present := rs.store[key]; present {
+		rs.Unlock()
+		return r
+	}
+	if key < rs.frontier {
+		rs.Unlock()
+		return nil
+	}
+	// entry is not present, need to wait for it.
+	ch, present := rs.chans[key]
+	if !present {
+		heap.Push(&rs.keys, key)
+		ch = make(chan struct{}, 1)
+		rs.chans[key] = ch
+	}
+	rs.Unlock()
+	<-ch
+	rs.Lock()
+	defer rs.Unlock()
+	delete(rs.chans, key) // Allow the channel to be GC'ed.
+	return rs.store[key]  // This may be nil if the entry has been removed
+}
diff --git a/runtime/internal/rpc/results_store_test.go b/runtime/internal/rpc/results_store_test.go
new file mode 100644
index 0000000..b0ee8eb
--- /dev/null
+++ b/runtime/internal/rpc/results_store_test.go
@@ -0,0 +1,144 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"sort"
+	"sync"
+	"testing"
+
+	"v.io/x/ref/test/testutil"
+)
+
+func randomKeys() []uint64 {
+	n := (testutil.Intn(256*10) / 10) + 256
+	k := make([]uint64, n)
+	for i := 0; i < n; i++ {
+		k[i] = uint64(testutil.Int63())
+	}
+	return k
+}
+
+type keySlice []uint64
+
+func (p keySlice) Len() int           { return len(p) }
+func (p keySlice) Less(i, j int) bool { return p[i] < p[j] }
+func (p keySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p keySlice) Sort()              { sort.Sort(p) }
+
+func TestStoreRandom(t *testing.T) {
+	store := newStore()
+	keys := randomKeys()
+
+	for i := 0; i < len(keys); i++ {
+		r := []interface{}{i}
+		store.addEntry(keys[i], r)
+	}
+	if len(store.store) != len(keys) {
+		t.Errorf("num stored entries: got %d, want %d", len(store.store), len(keys))
+	}
+	for i := 0; i < len(keys); i++ {
+		// Each call to removeEntries will remove an unknown number of entries
+		// depending on the original randomised value of the ints.
+		store.removeEntriesTo(keys[i])
+	}
+	if len(store.store) != 0 {
+		t.Errorf("store is not empty: %d", len(store.store))
+	}
+}
+
+func TestStoreOrdered(t *testing.T) {
+	store := newStore()
+	keys := randomKeys()
+
+	for i := 0; i < len(keys); i++ {
+		r := []interface{}{i}
+		store.addEntry(keys[i], r)
+	}
+	if len(store.store) != len(keys) {
+		t.Errorf("num stored entries: got %d, want %d", len(store.store), len(keys))
+	}
+
+	(keySlice(keys)).Sort()
+	l := len(keys)
+	for i := 0; i < len(keys); i++ {
+		store.removeEntriesTo(keys[i])
+		l--
+		if len(store.store) != l {
+			t.Errorf("failed to remove a single item(%d): %d != %d", keys[i], len(store.store), l)
+		}
+	}
+	if len(store.store) != 0 {
+		t.Errorf("store is not empty: %d", len(store.store))
+	}
+}
+
+func TestStoreWaitForEntry(t *testing.T) {
+	store := newStore()
+	store.addEntry(1, []interface{}{"1"})
+	r := store.waitForEntry(1)
+	if r[0].(string) != "1" {
+		t.Errorf("Got: %q, Want: %q", r[0], "1")
+	}
+	ch := make(chan string)
+	go func(ch chan string) {
+		r := store.waitForEntry(2)
+		ch <- r[0].(string)
+	}(ch)
+	store.addEntry(2, []interface{}{"2"})
+	if result := <-ch; result != "2" {
+		t.Errorf("Got: %q, Want: %q", r[0], "2")
+	}
+}
+
+func TestStoreWaitForEntryRandom(t *testing.T) {
+	store := newStore()
+	keys := randomKeys()
+	var wg sync.WaitGroup
+	for _, k := range keys {
+		wg.Add(1)
+		go func(t *testing.T, id uint64) {
+			r := store.waitForEntry(id)
+			if r[0].(uint64) != id {
+				t.Errorf("Got: %d, Want: %d", r[0].(uint64), id)
+			}
+			wg.Done()
+		}(t, k)
+	}
+	(keySlice(keys)).Sort()
+	for _, k := range keys {
+		store.addEntry(k, []interface{}{k})
+	}
+	wg.Wait()
+}
+
+func TestStoreWaitForRemovedEntry(t *testing.T) {
+	store := newStore()
+	keys := randomKeys()
+	var wg sync.WaitGroup
+	for _, k := range keys {
+		wg.Add(1)
+		go func(t *testing.T, id uint64) {
+			if r := store.waitForEntry(id); r != nil {
+				t.Errorf("Got %v, want nil", r)
+			}
+			wg.Done()
+		}(t, k)
+	}
+	(keySlice(keys)).Sort()
+	for _, k := range keys {
+		store.removeEntriesTo(k)
+	}
+	wg.Wait()
+}
+
+func TestStoreWaitForOldEntry(t *testing.T) {
+	store := newStore()
+	store.addEntry(1, []interface{}{"result"})
+	store.removeEntriesTo(1)
+	if got := store.waitForEntry(1); got != nil {
+		t.Errorf("Got %T=%v, want nil", got, got)
+	}
+}
diff --git a/runtime/internal/rpc/server.go b/runtime/internal/rpc/server.go
new file mode 100644
index 0000000..3505ef5
--- /dev/null
+++ b/runtime/internal/rpc/server.go
@@ -0,0 +1,1392 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/x/lib/netstate"
+	"v.io/x/lib/pubsub"
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/namespace"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/security/access"
+	"v.io/v23/vdl"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/v23/vtrace"
+
+	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/runtime/internal/lib/publisher"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errResponseEncoding          = reg(".errResponseEncoding", "failed to encode RPC response {3} <-> {4}{:5}")
+	errResultEncoding            = reg(".errResultEncoding", "failed to encode result #{3} [{4}]{:5}")
+	errFailedToResolveToEndpoint = reg(".errFailedToResolveToEndpoint", "failed to resolve {3} to an endpoint")
+	errFailedToResolveProxy      = reg(".errFailedToResolveProxy", "failed to resolve proxy {3}{:4}")
+	errFailedToListenForProxy    = reg(".errFailedToListenForProxy", "failed to listen on {3}{:4}")
+	errInternalTypeConversion    = reg(".errInternalTypeConversion", "failed to convert {3} to v.io/x/ref/runtime/internal/naming.Endpoint")
+	errFailedToParseIP           = reg(".errFailedToParseIP", "failed to parse {3} as an IP host")
+	errUnexpectedSuffix          = reg(".errUnexpectedSuffix", "suffix {3} was not expected because either server has the option IsLeaf set to true or it served an object and not a dispatcher")
+)
+
+// state for each requested listen address
+type listenState struct {
+	protocol, address string
+	ln                stream.Listener
+	lep               naming.Endpoint
+	lnerr, eperr      error
+	roaming           bool
+	// We keep track of all of the endpoints, the port and a copy of
+	// the original listen endpoint for use with roaming network changes.
+	ieps     []*inaming.Endpoint // list of currently active eps
+	port     string              // port to use for creating new eps
+	protoIEP inaming.Endpoint    // endpoint to use as template for new eps (includes rid, versions etc)
+}
+
+// state for each requested proxy
+type proxyState struct {
+	endpoint naming.Endpoint
+	err      error
+}
+
+type dhcpState struct {
+	name      string
+	publisher *pubsub.Publisher
+	stream    *pubsub.Stream
+	ch        chan pubsub.Setting // channel to receive dhcp settings over
+	err       error               // error status.
+	watchers  map[chan<- rpc.NetworkChange]struct{}
+}
+
+type server struct {
+	sync.Mutex
+	// context used by the server to make internal RPCs, error messages etc.
+	ctx               *context.T
+	cancel            context.CancelFunc   // function to cancel the above context.
+	state             serverState          // track state of the server.
+	streamMgr         stream.Manager       // stream manager to listen for new flows.
+	publisher         publisher.Publisher  // publisher to publish mounttable mounts.
+	dc                vc.DischargeClient   // fetches discharges of blessings
+	listenerOpts      []stream.ListenerOpt // listener opts for Listen.
+	settingsPublisher *pubsub.Publisher    // pubsub publisher for dhcp
+	settingsName      string               // pubwsub stream name for dhcp
+	dhcpState         *dhcpState           // dhcpState, nil if not using dhcp
+	principal         security.Principal
+	blessings         security.Blessings
+
+	// maps that contain state on listeners.
+	listenState map[*listenState]struct{}
+	listeners   map[stream.Listener]struct{}
+
+	// state of proxies keyed by the name of the proxy
+	proxies map[string]proxyState
+
+	disp               rpc.Dispatcher // dispatcher to serve RPCs
+	dispReserved       rpc.Dispatcher // dispatcher for reserved methods
+	active             sync.WaitGroup // active goroutines we've spawned.
+	stoppedChan        chan struct{}  // closed when the server has been stopped.
+	preferredProtocols []string       // protocols to use when resolving proxy name to endpoint.
+	// We cache the IP networks on the device since it is not that cheap to read
+	// network interfaces through os syscall.
+	// TODO(jhahn): Add monitoring the network interface changes.
+	ipNets           []*net.IPNet
+	ns               namespace.T
+	servesMountTable bool
+	isLeaf           bool
+
+	// TODO(cnicolaou): add roaming stats to rpcStats
+	stats *rpcStats // stats for this server.
+}
+
+type serverState int
+
+const (
+	initialized serverState = iota
+	listening
+	serving
+	publishing
+	stopping
+	stopped
+)
+
+// Simple state machine for the server implementation.
+type next map[serverState]bool
+type transitions map[serverState]next
+
+var (
+	states = transitions{
+		initialized: next{listening: true, stopping: true},
+		listening:   next{listening: true, serving: true, stopping: true},
+		serving:     next{publishing: true, stopping: true},
+		publishing:  next{publishing: true, stopping: true},
+		stopping:    next{},
+		stopped:     next{},
+	}
+
+	externalStates = map[serverState]rpc.ServerState{
+		initialized: rpc.ServerInit,
+		listening:   rpc.ServerActive,
+		serving:     rpc.ServerActive,
+		publishing:  rpc.ServerActive,
+		stopping:    rpc.ServerStopping,
+		stopped:     rpc.ServerStopped,
+	}
+)
+
+func (s *server) allowed(next serverState, method string) error {
+	if states[s.state][next] {
+		s.state = next
+		return nil
+	}
+	return verror.New(verror.ErrBadState, s.ctx, fmt.Sprintf("%s called out of order or more than once", method))
+}
+
+func (s *server) isStopState() bool {
+	return s.state == stopping || s.state == stopped
+}
+
+var _ rpc.Server = (*server)(nil)
+
+func InternalNewServer(
+	ctx *context.T,
+	streamMgr stream.Manager,
+	ns namespace.T,
+	settingsPublisher *pubsub.Publisher,
+	settingsName string,
+	client rpc.Client,
+	principal security.Principal,
+	opts ...rpc.ServerOpt) (rpc.Server, error) {
+	ctx, cancel := context.WithRootCancel(ctx)
+	ctx, _ = vtrace.WithNewSpan(ctx, "NewServer")
+	statsPrefix := naming.Join("rpc", "server", "routing-id", streamMgr.RoutingID().String())
+	s := &server{
+		ctx:               ctx,
+		cancel:            cancel,
+		streamMgr:         streamMgr,
+		principal:         principal,
+		publisher:         publisher.New(ctx, ns, publishPeriod),
+		listenState:       make(map[*listenState]struct{}),
+		listeners:         make(map[stream.Listener]struct{}),
+		proxies:           make(map[string]proxyState),
+		stoppedChan:       make(chan struct{}),
+		ipNets:            ipNetworks(),
+		ns:                ns,
+		stats:             newRPCStats(statsPrefix),
+		settingsPublisher: settingsPublisher,
+		settingsName:      settingsName,
+	}
+	var (
+		dischargeExpiryBuffer = vc.DefaultServerDischargeExpiryBuffer
+		securityLevel         options.SecurityLevel
+	)
+	for _, opt := range opts {
+		switch opt := opt.(type) {
+		case stream.ListenerOpt:
+			// Collect all ServerOpts that are also ListenerOpts.
+			s.listenerOpts = append(s.listenerOpts, opt)
+			switch opt := opt.(type) {
+			case vc.DischargeExpiryBuffer:
+				dischargeExpiryBuffer = time.Duration(opt)
+			}
+		case options.ServerBlessings:
+			s.blessings = opt.Blessings
+		case options.ServesMountTable:
+			s.servesMountTable = bool(opt)
+		case options.IsLeaf:
+			s.isLeaf = bool(opt)
+		case ReservedNameDispatcher:
+			s.dispReserved = opt.Dispatcher
+		case PreferredServerResolveProtocols:
+			s.preferredProtocols = []string(opt)
+		case options.SecurityLevel:
+			securityLevel = opt
+		}
+	}
+	if s.blessings.IsZero() && principal != nil {
+		s.blessings = principal.BlessingStore().Default()
+	}
+	if securityLevel == options.SecurityNone {
+		s.principal = nil
+		s.blessings = security.Blessings{}
+		s.dispReserved = nil
+	}
+	// Make dischargeExpiryBuffer shorter than the VC discharge buffer to ensure we have fetched
+	// the discharges by the time the VC asks for them.`
+	s.dc = InternalNewDischargeClient(ctx, client, dischargeExpiryBuffer-(5*time.Second))
+	s.listenerOpts = append(s.listenerOpts, s.dc)
+	s.listenerOpts = append(s.listenerOpts, vc.DialContext{ctx})
+	blessingsStatsName := naming.Join(statsPrefix, "security", "blessings")
+	// TODO(caprita): revist printing the blessings with %s, and
+	// instead expose them as a list.
+	stats.NewString(blessingsStatsName).Set(fmt.Sprintf("%s", s.blessings))
+	if principal != nil {
+		stats.NewStringFunc(blessingsStatsName, func() string {
+			return fmt.Sprintf("%s (default)", principal.BlessingStore().Default())
+		})
+	}
+	return s, nil
+}
+
+func (s *server) Status() rpc.ServerStatus {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	status := rpc.ServerStatus{}
+	defer vlog.LogCall()()
+	s.Lock()
+	defer s.Unlock()
+	status.State = externalStates[s.state]
+	status.ServesMountTable = s.servesMountTable
+	status.Mounts = s.publisher.Status()
+	status.Endpoints = []naming.Endpoint{}
+	for ls, _ := range s.listenState {
+		if ls.eperr != nil {
+			status.Errors = append(status.Errors, ls.eperr)
+		}
+		if ls.lnerr != nil {
+			status.Errors = append(status.Errors, ls.lnerr)
+		}
+		for _, iep := range ls.ieps {
+			status.Endpoints = append(status.Endpoints, iep)
+		}
+	}
+	status.Proxies = make([]rpc.ProxyStatus, 0, len(s.proxies))
+	for k, v := range s.proxies {
+		status.Proxies = append(status.Proxies, rpc.ProxyStatus{k, v.endpoint, v.err})
+	}
+	return status
+}
+
+func (s *server) WatchNetwork(ch chan<- rpc.NetworkChange) {
+	defer vlog.LogCallf("ch=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	s.Lock()
+	defer s.Unlock()
+	if s.dhcpState != nil {
+		s.dhcpState.watchers[ch] = struct{}{}
+	}
+}
+
+func (s *server) UnwatchNetwork(ch chan<- rpc.NetworkChange) {
+	defer vlog.LogCallf("ch=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	s.Lock()
+	defer s.Unlock()
+	if s.dhcpState != nil {
+		delete(s.dhcpState.watchers, ch)
+	}
+}
+
+// resolveToEndpoint resolves an object name or address to an endpoint.
+func (s *server) resolveToEndpoint(address string) (string, error) {
+	var resolved *naming.MountEntry
+	var err error
+	if s.ns != nil {
+		if resolved, err = s.ns.Resolve(s.ctx, address); err != nil {
+			return "", err
+		}
+	} else {
+		// Fake a namespace resolution
+		resolved = &naming.MountEntry{Servers: []naming.MountedServer{
+			{Server: address},
+		}}
+	}
+	// An empty set of protocols means all protocols...
+	if resolved.Servers, err = filterAndOrderServers(resolved.Servers, s.preferredProtocols, s.ipNets); err != nil {
+		return "", err
+	}
+	for _, n := range resolved.Names() {
+		address, suffix := naming.SplitAddressName(n)
+		if suffix != "" {
+			continue
+		}
+		if ep, err := inaming.NewEndpoint(address); err == nil {
+			return ep.String(), nil
+		}
+	}
+	return "", verror.New(errFailedToResolveToEndpoint, s.ctx, address)
+}
+
+// createEndpoints creates appropriate inaming.Endpoint instances for
+// all of the externally accessible network addresses that can be used
+// to reach this server.
+func (s *server) createEndpoints(lep naming.Endpoint, chooser netstate.AddressChooser) ([]*inaming.Endpoint, string, bool, error) {
+	iep, ok := lep.(*inaming.Endpoint)
+	if !ok {
+		return nil, "", false, verror.New(errInternalTypeConversion, nil, fmt.Sprintf("%T", lep))
+	}
+	if !strings.HasPrefix(iep.Protocol, "tcp") &&
+		!strings.HasPrefix(iep.Protocol, "ws") {
+		// If not tcp, ws, or wsh, just return the endpoint we were given.
+		return []*inaming.Endpoint{iep}, "", false, nil
+	}
+	host, port, err := net.SplitHostPort(iep.Address)
+	if err != nil {
+		return nil, "", false, err
+	}
+	addrs, unspecified, err := netstate.PossibleAddresses(iep.Protocol, host, chooser)
+	if err != nil {
+		return nil, port, false, err
+	}
+
+	ieps := make([]*inaming.Endpoint, 0, len(addrs))
+	for _, addr := range addrs {
+		n, err := inaming.NewEndpoint(lep.String())
+		if err != nil {
+			return nil, port, false, err
+		}
+		n.IsMountTable = s.servesMountTable
+		n.Address = net.JoinHostPort(addr.String(), port)
+		ieps = append(ieps, n)
+	}
+	return ieps, port, unspecified, nil
+}
+
+func (s *server) Listen(listenSpec rpc.ListenSpec) ([]naming.Endpoint, error) {
+	defer vlog.LogCallf("listenSpec=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	useProxy := len(listenSpec.Proxy) > 0
+	if !useProxy && len(listenSpec.Addrs) == 0 {
+		return nil, verror.New(verror.ErrBadArg, s.ctx, "ListenSpec contains no proxy or addresses to listen on")
+	}
+
+	s.Lock()
+	defer s.Unlock()
+
+	if err := s.allowed(listening, "Listen"); err != nil {
+		return nil, err
+	}
+
+	// Start the proxy as early as possible, ignore duplicate requests
+	// for the same proxy.
+	if _, inuse := s.proxies[listenSpec.Proxy]; useProxy && !inuse {
+		// Pre-emptively fetch discharges on the blessings (they will be cached
+		// within s.dc for future calls).
+		// This shouldn't be required, but is a hack to reduce flakiness in
+		// JavaScript browser integration tests.
+		// See https://v.io/i/392
+		s.dc.PrepareDischarges(s.ctx, s.blessings.ThirdPartyCaveats(), security.DischargeImpetus{})
+		// We have a goroutine for listening on proxy connections.
+		s.active.Add(1)
+		go func() {
+			s.proxyListenLoop(listenSpec.Proxy)
+			s.active.Done()
+		}()
+	}
+
+	roaming := false
+	lnState := make([]*listenState, 0, len(listenSpec.Addrs))
+	for _, addr := range listenSpec.Addrs {
+		if len(addr.Address) > 0 {
+			// Listen if we have a local address to listen on.
+			ls := &listenState{
+				protocol: addr.Protocol,
+				address:  addr.Address,
+			}
+			ls.ln, ls.lep, ls.lnerr = s.streamMgr.Listen(addr.Protocol, addr.Address, s.principal, s.blessings, s.listenerOpts...)
+			lnState = append(lnState, ls)
+			if ls.lnerr != nil {
+				vlog.VI(2).Infof("Listen(%q, %q, ...) failed: %v", addr.Protocol, addr.Address, ls.lnerr)
+				continue
+			}
+			ls.ieps, ls.port, ls.roaming, ls.eperr = s.createEndpoints(ls.lep, listenSpec.AddressChooser)
+			if ls.roaming && ls.eperr == nil {
+				ls.protoIEP = *ls.lep.(*inaming.Endpoint)
+				roaming = true
+			}
+		}
+	}
+
+	found := false
+	for _, ls := range lnState {
+		if ls.ln != nil {
+			found = true
+			break
+		}
+	}
+	if !found && !useProxy {
+		return nil, verror.New(verror.ErrBadArg, s.ctx, "failed to create any listeners")
+	}
+
+	if roaming && s.dhcpState == nil && s.settingsPublisher != nil {
+		// Create a dhcp listener if we haven't already done so.
+		dhcp := &dhcpState{
+			name:      s.settingsName,
+			publisher: s.settingsPublisher,
+			watchers:  make(map[chan<- rpc.NetworkChange]struct{}),
+		}
+		s.dhcpState = dhcp
+		dhcp.ch = make(chan pubsub.Setting, 10)
+		dhcp.stream, dhcp.err = dhcp.publisher.ForkStream(dhcp.name, dhcp.ch)
+		if dhcp.err == nil {
+			// We have a goroutine to listen for dhcp changes.
+			s.active.Add(1)
+			go func() {
+				s.dhcpLoop(dhcp.ch)
+				s.active.Done()
+			}()
+		}
+	}
+
+	eps := make([]naming.Endpoint, 0, 10)
+	for _, ls := range lnState {
+		s.listenState[ls] = struct{}{}
+		if ls.ln != nil {
+			// We have a goroutine per listener to accept new flows.
+			// Each flow is served from its own goroutine.
+			s.active.Add(1)
+			go func(ln stream.Listener, ep naming.Endpoint) {
+				s.listenLoop(ln, ep)
+				s.active.Done()
+			}(ls.ln, ls.lep)
+		}
+
+		for _, iep := range ls.ieps {
+			eps = append(eps, iep)
+		}
+	}
+
+	return eps, nil
+}
+
+func (s *server) reconnectAndPublishProxy(proxy string) (*inaming.Endpoint, stream.Listener, error) {
+	resolved, err := s.resolveToEndpoint(proxy)
+	if err != nil {
+		return nil, nil, verror.New(errFailedToResolveProxy, s.ctx, proxy, err)
+	}
+	opts := append([]stream.ListenerOpt{proxyAuth{s}}, s.listenerOpts...)
+	ln, ep, err := s.streamMgr.Listen(inaming.Network, resolved, s.principal, s.blessings, opts...)
+	if err != nil {
+		return nil, nil, verror.New(errFailedToListenForProxy, s.ctx, resolved, err)
+	}
+	iep, ok := ep.(*inaming.Endpoint)
+	if !ok {
+		ln.Close()
+		return nil, nil, verror.New(errInternalTypeConversion, s.ctx, fmt.Sprintf("%T", ep))
+	}
+	s.Lock()
+	s.proxies[proxy] = proxyState{iep, nil}
+	s.Unlock()
+	iep.IsMountTable = s.servesMountTable
+	iep.IsLeaf = s.isLeaf
+	s.publisher.AddServer(iep.String())
+	return iep, ln, nil
+}
+
+func (s *server) proxyListenLoop(proxy string) {
+	const (
+		min = 5 * time.Millisecond
+		max = 5 * time.Minute
+	)
+
+	iep, ln, err := s.reconnectAndPublishProxy(proxy)
+	if err != nil {
+		vlog.Errorf("Failed to connect to proxy: %s", err)
+	}
+	// the initial connection maybe have failed, but we enter the retry
+	// loop anyway so that we will continue to try and connect to the
+	// proxy.
+	s.Lock()
+	if s.isStopState() {
+		s.Unlock()
+		return
+	}
+	s.Unlock()
+
+	for {
+		if ln != nil && iep != nil {
+			err := s.listenLoop(ln, iep)
+			// The listener is done, so:
+			// (1) Unpublish its name
+			s.publisher.RemoveServer(iep.String())
+			s.Lock()
+			if err != nil {
+				s.proxies[proxy] = proxyState{iep, verror.New(verror.ErrNoServers, s.ctx, err)}
+			} else {
+				// err will be nil if we're stopping.
+				s.proxies[proxy] = proxyState{iep, nil}
+				s.Unlock()
+				return
+			}
+			s.Unlock()
+		}
+
+		s.Lock()
+		if s.isStopState() {
+			s.Unlock()
+			return
+		}
+		s.Unlock()
+
+		// (2) Reconnect to the proxy unless the server has been stopped
+		backoff := min
+		ln = nil
+		for {
+			select {
+			case <-time.After(backoff):
+				if backoff = backoff * 2; backoff > max {
+					backoff = max
+				}
+			case <-s.stoppedChan:
+				return
+			}
+			// (3) reconnect, publish new address
+			if iep, ln, err = s.reconnectAndPublishProxy(proxy); err != nil {
+				vlog.Errorf("Failed to reconnect to proxy %q: %s", proxy, err)
+			} else {
+				vlog.VI(1).Infof("Reconnected to proxy %q, %s", proxy, iep)
+				break
+			}
+		}
+	}
+}
+
+// addListener adds the supplied listener taking care to
+// check to see if we're already stopping. It returns true
+// if the listener was added.
+func (s *server) addListener(ln stream.Listener) bool {
+	s.Lock()
+	defer s.Unlock()
+	if s.isStopState() {
+		return false
+	}
+	s.listeners[ln] = struct{}{}
+	return true
+}
+
+// rmListener removes the supplied listener taking care to
+// check if we're already stopping. It returns true if the
+// listener was removed.
+func (s *server) rmListener(ln stream.Listener) bool {
+	s.Lock()
+	defer s.Unlock()
+	if s.isStopState() {
+		return false
+	}
+	delete(s.listeners, ln)
+	return true
+}
+
+func (s *server) listenLoop(ln stream.Listener, ep naming.Endpoint) error {
+	defer vlog.VI(1).Infof("rpc: Stopped listening on %s", ep)
+	var calls sync.WaitGroup
+
+	if !s.addListener(ln) {
+		// We're stopping.
+		return nil
+	}
+
+	defer func() {
+		calls.Wait()
+		s.rmListener(ln)
+	}()
+	for {
+		flow, err := ln.Accept()
+		if err != nil {
+			vlog.VI(10).Infof("rpc: Accept on %v failed: %v", ep, err)
+			return err
+		}
+		calls.Add(1)
+		go func(flow stream.Flow) {
+			defer calls.Done()
+			fs, err := newFlowServer(flow, s)
+			if err != nil {
+				vlog.VI(1).Infof("newFlowServer on %v failed: %v", ep, err)
+				return
+			}
+			if err := fs.serve(); err != nil {
+				// TODO(caprita): Logging errors here is too spammy. For example, "not
+				// authorized" errors shouldn't be logged as server errors.
+				// TODO(cnicolaou): revisit this when verror2 transition is
+				// done.
+				if err != io.EOF {
+					vlog.VI(2).Infof("Flow.serve on %v failed: %v", ep, err)
+				}
+			}
+		}(flow)
+	}
+}
+
+func (s *server) dhcpLoop(ch chan pubsub.Setting) {
+	defer vlog.VI(1).Infof("rpc: Stopped listen for dhcp changes")
+	vlog.VI(2).Infof("rpc: dhcp loop")
+	for setting := range ch {
+		if setting == nil {
+			return
+		}
+		switch v := setting.Value().(type) {
+		case []net.Addr:
+			s.Lock()
+			if s.isStopState() {
+				s.Unlock()
+				return
+			}
+			change := rpc.NetworkChange{
+				Time:  time.Now(),
+				State: externalStates[s.state],
+			}
+			switch setting.Name() {
+			case NewAddrsSetting:
+				change.Changed = s.addAddresses(v)
+				change.AddedAddrs = v
+			case RmAddrsSetting:
+				change.Changed, change.Error = s.removeAddresses(v)
+				change.RemovedAddrs = v
+			}
+			vlog.VI(2).Infof("rpc: dhcp: change %v", change)
+			for ch, _ := range s.dhcpState.watchers {
+				select {
+				case ch <- change:
+				default:
+				}
+			}
+			s.Unlock()
+		default:
+			vlog.Errorf("rpc: dhcpLoop: unhandled setting type %T", v)
+		}
+	}
+}
+
+func getHost(address net.Addr) string {
+	host, _, err := net.SplitHostPort(address.String())
+	if err == nil {
+		return host
+	}
+	return address.String()
+
+}
+
+// Remove all endpoints that have the same host address as the supplied
+// address parameter.
+func (s *server) removeAddresses(addrs []net.Addr) ([]naming.Endpoint, error) {
+	var removed []naming.Endpoint
+	for _, address := range addrs {
+		host := getHost(address)
+		for ls, _ := range s.listenState {
+			if ls != nil && ls.roaming && len(ls.ieps) > 0 {
+				remaining := make([]*inaming.Endpoint, 0, len(ls.ieps))
+				for _, iep := range ls.ieps {
+					lnHost, _, err := net.SplitHostPort(iep.Address)
+					if err != nil {
+						lnHost = iep.Address
+					}
+					if lnHost == host {
+						vlog.VI(2).Infof("rpc: dhcp removing: %s", iep)
+						removed = append(removed, iep)
+						s.publisher.RemoveServer(iep.String())
+						continue
+					}
+					remaining = append(remaining, iep)
+				}
+				ls.ieps = remaining
+			}
+		}
+	}
+	return removed, nil
+}
+
+// Add new endpoints for the new address. There is no way to know with
+// 100% confidence which new endpoints to publish without shutting down
+// all network connections and reinitializing everything from scratch.
+// Instead, we find all roaming listeners with at least one endpoint
+// and create a new endpoint with the same port as the existing ones
+// but with the new address supplied to us to by the dhcp code. As
+// an additional safeguard we reject the new address if it is not
+// externally accessible.
+// This places the onus on the dhcp/roaming code that sends us addresses
+// to ensure that those addresses are externally reachable.
+func (s *server) addAddresses(addrs []net.Addr) []naming.Endpoint {
+	var added []naming.Endpoint
+	for _, address := range netstate.ConvertToAddresses(addrs) {
+		if !netstate.IsAccessibleIP(address) {
+			return added
+		}
+		host := getHost(address)
+		for ls, _ := range s.listenState {
+			if ls != nil && ls.roaming {
+				niep := ls.protoIEP
+				niep.Address = net.JoinHostPort(host, ls.port)
+				niep.IsMountTable = s.servesMountTable
+				niep.IsLeaf = s.isLeaf
+				ls.ieps = append(ls.ieps, &niep)
+				vlog.VI(2).Infof("rpc: dhcp adding: %s", niep)
+				s.publisher.AddServer(niep.String())
+				added = append(added, &niep)
+			}
+		}
+	}
+	return added
+}
+
+type leafDispatcher struct {
+	invoker rpc.Invoker
+	auth    security.Authorizer
+}
+
+func (d leafDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	defer vlog.LogCallf("suffix=%.10s...", suffix)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	if suffix != "" {
+		return nil, nil, verror.New(verror.ErrUnknownSuffix, nil, suffix)
+	}
+	return d.invoker, d.auth, nil
+}
+
+func (s *server) Serve(name string, obj interface{}, authorizer security.Authorizer) error {
+	defer vlog.LogCallf("name=%.10s...,obj=,authorizer=", name)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	if obj == nil {
+		return verror.New(verror.ErrBadArg, s.ctx, "nil object")
+	}
+	invoker, err := objectToInvoker(obj)
+	if err != nil {
+		return verror.New(verror.ErrBadArg, s.ctx, fmt.Sprintf("bad object: %v", err))
+	}
+	s.setLeaf(true)
+	return s.ServeDispatcher(name, &leafDispatcher{invoker, authorizer})
+}
+
+func (s *server) setLeaf(value bool) {
+	s.Lock()
+	defer s.Unlock()
+	s.isLeaf = value
+	for ls, _ := range s.listenState {
+		for i := range ls.ieps {
+			ls.ieps[i].IsLeaf = s.isLeaf
+		}
+	}
+}
+
+func (s *server) ServeDispatcher(name string, disp rpc.Dispatcher) error {
+	defer vlog.LogCallf("name=%.10s...,disp=", name)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	if disp == nil {
+		return verror.New(verror.ErrBadArg, s.ctx, "nil dispatcher")
+	}
+	s.Lock()
+	defer s.Unlock()
+	if err := s.allowed(serving, "Serve or ServeDispatcher"); err != nil {
+		return err
+	}
+	vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
+	s.disp = disp
+	if len(name) > 0 {
+		for ls, _ := range s.listenState {
+			for _, iep := range ls.ieps {
+				s.publisher.AddServer(iep.String())
+			}
+		}
+		s.publisher.AddName(name, s.servesMountTable, s.isLeaf)
+	}
+	return nil
+}
+
+func (s *server) AddName(name string) error {
+	defer vlog.LogCallf("name=%.10s...", name)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	if len(name) == 0 {
+		return verror.New(verror.ErrBadArg, s.ctx, "name is empty")
+	}
+	s.Lock()
+	defer s.Unlock()
+	if err := s.allowed(publishing, "AddName"); err != nil {
+		return err
+	}
+	vtrace.GetSpan(s.ctx).Annotate("Serving under name: " + name)
+	s.publisher.AddName(name, s.servesMountTable, s.isLeaf)
+	return nil
+}
+
+func (s *server) RemoveName(name string) {
+	defer vlog.LogCallf("name=%.10s...", name)("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	s.Lock()
+	defer s.Unlock()
+	if err := s.allowed(publishing, "RemoveName"); err != nil {
+		return
+	}
+	vtrace.GetSpan(s.ctx).Annotate("Removed name: " + name)
+	s.publisher.RemoveName(name)
+}
+
+func (s *server) Stop() error {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	serverDebug := fmt.Sprintf("Dispatcher: %T, Status:[%v]", s.disp, s.Status())
+	defer vlog.LogCall()()
+	vlog.VI(1).Infof("Stop: %s", serverDebug)
+	defer vlog.VI(1).Infof("Stop done: %s", serverDebug)
+	s.Lock()
+	if s.isStopState() {
+		s.Unlock()
+		return nil
+	}
+	s.state = stopping
+	close(s.stoppedChan)
+	s.Unlock()
+
+	// Delete the stats object.
+	s.stats.stop()
+
+	// Note, It's safe to Stop/WaitForStop on the publisher outside of the
+	// server lock, since publisher is safe for concurrent access.
+
+	// Stop the publisher, which triggers unmounting of published names.
+	s.publisher.Stop()
+	// Wait for the publisher to be done unmounting before we can proceed to
+	// close the listeners (to minimize the number of mounted names pointing
+	// to endpoint that are no longer serving).
+	//
+	// TODO(caprita): See if make sense to fail fast on rejecting
+	// connections once listeners are closed, and parallelize the publisher
+	// and listener shutdown.
+	s.publisher.WaitForStop()
+
+	s.Lock()
+
+	// Close all listeners.  No new flows will be accepted, while in-flight
+	// flows will continue until they terminate naturally.
+	nListeners := len(s.listeners)
+	errCh := make(chan error, nListeners)
+
+	for ln, _ := range s.listeners {
+		go func(ln stream.Listener) {
+			errCh <- ln.Close()
+		}(ln)
+	}
+
+	drain := func(ch chan pubsub.Setting) {
+		for {
+			select {
+			case v := <-ch:
+				if v == nil {
+					return
+				}
+			default:
+				close(ch)
+				return
+			}
+		}
+	}
+
+	if dhcp := s.dhcpState; dhcp != nil {
+		// TODO(cnicolaou,caprita): investigate not having to close and drain
+		// the channel here. It's a little awkward right now since we have to
+		// be careful to not close the channel in two places, i.e. here and
+		// and from the publisher's Shutdown method.
+		if err := dhcp.publisher.CloseFork(dhcp.name, dhcp.ch); err == nil {
+			drain(dhcp.ch)
+		}
+	}
+
+	s.Unlock()
+
+	var firstErr error
+	for i := 0; i < nListeners; i++ {
+		if err := <-errCh; err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	// At this point, we are guaranteed that no new requests are going to be
+	// accepted.
+
+	// Wait for the publisher and active listener + flows to finish.
+	done := make(chan struct{}, 1)
+	go func() { s.active.Wait(); done <- struct{}{} }()
+
+	select {
+	case <-done:
+	case <-time.After(5 * time.Second):
+		vlog.Errorf("%s: Listener Close Error: %v", serverDebug, firstErr)
+		vlog.Errorf("%s: Timedout waiting for goroutines to stop: listeners: %d (currently: %d)", serverDebug, nListeners, len(s.listeners))
+		for ln, _ := range s.listeners {
+			vlog.Errorf("%s: Listener: %v", serverDebug, ln)
+		}
+		for ls, _ := range s.listenState {
+			vlog.Errorf("%s: ListenState: %v", serverDebug, ls)
+		}
+		<-done
+		vlog.Infof("%s: Done waiting.", serverDebug)
+	}
+
+	s.Lock()
+	defer s.Unlock()
+	s.disp = nil
+	if firstErr != nil {
+		return verror.New(verror.ErrInternal, s.ctx, firstErr)
+	}
+	s.state = stopped
+	s.cancel()
+	return nil
+}
+
+// flowServer implements the RPC server-side protocol for a single RPC, over a
+// flow that's already connected to the client.
+type flowServer struct {
+	ctx    *context.T     // context associated with the RPC
+	server *server        // rpc.Server that this flow server belongs to
+	disp   rpc.Dispatcher // rpc.Dispatcher that will serve RPCs on this flow
+	dec    *vom.Decoder   // to decode requests and args from the client
+	enc    *vom.Encoder   // to encode responses and results to the client
+	flow   stream.Flow    // underlying flow
+
+	// Fields filled in during the server invocation.
+	clientBlessings  security.Blessings
+	ackBlessings     bool
+	grantedBlessings security.Blessings
+	method, suffix   string
+	tags             []*vdl.Value
+	discharges       map[string]security.Discharge
+	starttime        time.Time
+	endStreamArgs    bool // are the stream args at EOF?
+}
+
+var (
+	_ rpc.StreamServerCall = (*flowServer)(nil)
+	_ security.Call        = (*flowServer)(nil)
+)
+
+func newFlowServer(flow stream.Flow, server *server) (*flowServer, error) {
+	server.Lock()
+	disp := server.disp
+	server.Unlock()
+
+	fs := &flowServer{
+		ctx:        server.ctx,
+		server:     server,
+		disp:       disp,
+		flow:       flow,
+		discharges: make(map[string]security.Discharge),
+	}
+	typeenc := flow.VCDataCache().Get(vc.TypeEncoderKey{})
+	if typeenc == nil {
+		fs.enc = vom.NewEncoder(flow)
+		fs.dec = vom.NewDecoder(flow)
+	} else {
+		fs.enc = vom.NewEncoderWithTypeEncoder(flow, typeenc.(*vom.TypeEncoder))
+		typedec := flow.VCDataCache().Get(vc.TypeDecoderKey{})
+		fs.dec = vom.NewDecoderWithTypeDecoder(flow, typedec.(*vom.TypeDecoder))
+	}
+	return fs, nil
+}
+
+// authorizeVtrace works by simulating a call to __debug/vtrace.Trace.  That
+// rpc is essentially equivalent in power to the data we are attempting to
+// attach here.
+func (fs *flowServer) authorizeVtrace() error {
+	// Set up a context as though we were calling __debug/vtrace.
+	params := &security.CallParams{}
+	params.Copy(fs)
+	params.Method = "Trace"
+	params.MethodTags = []*vdl.Value{vdl.ValueOf(access.Debug)}
+	params.Suffix = "__debug/vtrace"
+
+	var auth security.Authorizer
+	if fs.server.dispReserved != nil {
+		_, auth, _ = fs.server.dispReserved.Lookup(params.Suffix)
+	}
+	return authorize(fs.ctx, security.NewCall(params), auth)
+}
+
+func (fs *flowServer) serve() error {
+	defer fs.flow.Close()
+
+	results, err := fs.processRequest()
+
+	vtrace.GetSpan(fs.ctx).Finish()
+
+	var traceResponse vtrace.Response
+	// Check if the caller is permitted to view vtrace data.
+	if fs.authorizeVtrace() == nil {
+		traceResponse = vtrace.GetResponse(fs.ctx)
+	}
+
+	// Respond to the client with the response header and positional results.
+	response := rpc.Response{
+		Error:            err,
+		EndStreamResults: true,
+		NumPosResults:    uint64(len(results)),
+		TraceResponse:    traceResponse,
+		AckBlessings:     fs.ackBlessings,
+	}
+	if err := fs.enc.Encode(response); err != nil {
+		if err == io.EOF {
+			return err
+		}
+		return verror.New(errResponseEncoding, fs.ctx, fs.LocalEndpoint().String(), fs.RemoteEndpoint().String(), err)
+	}
+	if response.Error != nil {
+		return response.Error
+	}
+	for ix, res := range results {
+		if err := fs.enc.Encode(res); err != nil {
+			if err == io.EOF {
+				return err
+			}
+			return verror.New(errResultEncoding, fs.ctx, ix, fmt.Sprintf("%T=%v", res, res), err)
+		}
+	}
+	// TODO(ashankar): Should unread data from the flow be drained?
+	//
+	// Reason to do so:
+	// The common stream.Flow implementation (v.io/x/ref/runtime/internal/rpc/stream/vc/reader.go)
+	// uses iobuf.Slices backed by an iobuf.Pool. If the stream is not drained, these
+	// slices will not be returned to the pool leading to possibly increased memory usage.
+	//
+	// Reason to not do so:
+	// Draining here will conflict with any Reads on the flow in a separate goroutine
+	// (for example, see TestStreamReadTerminatedByServer in full_test.go).
+	//
+	// For now, go with the reason to not do so as having unread data in the stream
+	// should be a rare case.
+	return nil
+}
+
+func (fs *flowServer) readRPCRequest() (*rpc.Request, error) {
+	// Set a default timeout before reading from the flow. Without this timeout,
+	// a client that sends no request or a partial request will retain the flow
+	// indefinitely (and lock up server resources).
+	initTimer := newTimer(defaultCallTimeout)
+	defer initTimer.Stop()
+	fs.flow.SetDeadline(initTimer.C)
+
+	// Decode the initial request.
+	var req rpc.Request
+	if err := fs.dec.Decode(&req); err != nil {
+		return nil, verror.New(verror.ErrBadProtocol, fs.ctx, newErrBadRequest(fs.ctx, err))
+	}
+	return &req, nil
+}
+
+func (fs *flowServer) processRequest() ([]interface{}, error) {
+	fs.starttime = time.Now()
+	req, err := fs.readRPCRequest()
+	if err != nil {
+		// We don't know what the rpc call was supposed to be, but we'll create
+		// a placeholder span so we can capture annotations.
+		fs.ctx, _ = vtrace.WithNewSpan(fs.ctx, fmt.Sprintf("\"%s\".UNKNOWN", fs.suffix))
+		return nil, err
+	}
+	fs.method = req.Method
+	fs.suffix = strings.TrimLeft(req.Suffix, "/")
+
+	if req.Language != "" {
+		fs.ctx = i18n.WithLangID(fs.ctx, i18n.LangID(req.Language))
+	}
+
+	// TODO(mattr): Currently this allows users to trigger trace collection
+	// on the server even if they will not be allowed to collect the
+	// results later.  This might be considered a DOS vector.
+	spanName := fmt.Sprintf("\"%s\".%s", fs.suffix, fs.method)
+	fs.ctx, _ = vtrace.WithContinuedTrace(fs.ctx, spanName, req.TraceRequest)
+
+	var cancel context.CancelFunc
+	if !req.Deadline.IsZero() {
+		fs.ctx, cancel = context.WithDeadline(fs.ctx, req.Deadline.Time)
+	} else {
+		fs.ctx, cancel = context.WithCancel(fs.ctx)
+	}
+	fs.flow.SetDeadline(fs.ctx.Done())
+	go fs.cancelContextOnClose(cancel)
+
+	// Initialize security: blessings, discharges, etc.
+	if err := fs.initSecurity(req); err != nil {
+		return nil, err
+	}
+	// Lookup the invoker.
+	invoker, auth, err := fs.lookup(fs.suffix, fs.method)
+	if err != nil {
+		return nil, err
+	}
+
+	// Note that we strip the reserved prefix when calling the invoker so
+	// that __Glob will call Glob.  Note that we've already assigned a
+	// special invoker so that we never call the wrong method by mistake.
+	strippedMethod := naming.StripReserved(fs.method)
+
+	// Prepare invoker and decode args.
+	numArgs := int(req.NumPosArgs)
+	argptrs, tags, err := invoker.Prepare(strippedMethod, numArgs)
+	fs.tags = tags
+	if err != nil {
+		return nil, err
+	}
+	if called, want := req.NumPosArgs, uint64(len(argptrs)); called != want {
+		err := newErrBadNumInputArgs(fs.ctx, fs.suffix, fs.method, called, want)
+		// If the client is sending the wrong number of arguments, try to drain the
+		// arguments sent by the client before returning an error to ensure the client
+		// receives the correct error in call.Finish(). Otherwise, the client may get
+		// an EOF error while encoding args since the server closes the flow upon returning.
+		var any interface{}
+		for i := 0; i < int(req.NumPosArgs); i++ {
+			if decerr := fs.dec.Decode(&any); decerr != nil {
+				return nil, err
+			}
+		}
+		return nil, err
+	}
+	for ix, argptr := range argptrs {
+		if err := fs.dec.Decode(argptr); err != nil {
+			return nil, newErrBadInputArg(fs.ctx, fs.suffix, fs.method, uint64(ix), err)
+		}
+	}
+
+	// Check application's authorization policy.
+	if err := authorize(fs.ctx, fs, auth); err != nil {
+		return nil, err
+	}
+
+	// Invoke the method.
+	results, err := invoker.Invoke(fs.ctx, fs, strippedMethod, argptrs)
+	fs.server.stats.record(fs.method, time.Since(fs.starttime))
+	return results, err
+}
+
+func (fs *flowServer) cancelContextOnClose(cancel context.CancelFunc) {
+	// Ensure that the context gets cancelled if the flow is closed
+	// due to a network error, or client cancellation.
+	select {
+	case <-fs.flow.Closed():
+		// Here we remove the contexts channel as a deadline to the flow.
+		// We do this to ensure clients get a consistent error when they read/write
+		// after the flow is closed.  Since the flow is already closed, it doesn't
+		// matter that the context is also cancelled.
+		fs.flow.SetDeadline(nil)
+		cancel()
+	case <-fs.ctx.Done():
+	}
+}
+
+// lookup returns the invoker and authorizer responsible for serving the given
+// name and method.  The suffix is stripped of any leading slashes. If it begins
+// with rpc.DebugKeyword, we use the internal debug dispatcher to look up the
+// invoker. Otherwise, and we use the server's dispatcher. The suffix and method
+// value may be modified to match the actual suffix and method to use.
+func (fs *flowServer) lookup(suffix string, method string) (rpc.Invoker, security.Authorizer, error) {
+	if naming.IsReserved(method) {
+		return reservedInvoker(fs.disp, fs.server.dispReserved), security.AllowEveryone(), nil
+	}
+	disp := fs.disp
+	if naming.IsReserved(suffix) {
+		disp = fs.server.dispReserved
+	} else if fs.server.isLeaf && suffix != "" {
+		innerErr := verror.New(errUnexpectedSuffix, fs.ctx, suffix)
+		return nil, nil, verror.New(verror.ErrUnknownSuffix, fs.ctx, suffix, innerErr)
+	}
+	if disp != nil {
+		obj, auth, err := disp.Lookup(suffix)
+		switch {
+		case err != nil:
+			return nil, nil, err
+		case obj != nil:
+			invoker, err := objectToInvoker(obj)
+			if err != nil {
+				return nil, nil, verror.New(verror.ErrInternal, fs.ctx, "invalid received object", err)
+			}
+			return invoker, auth, nil
+		}
+	}
+	return nil, nil, verror.New(verror.ErrUnknownSuffix, fs.ctx, suffix)
+}
+
+func objectToInvoker(obj interface{}) (rpc.Invoker, error) {
+	if obj == nil {
+		return nil, errors.New("nil object")
+	}
+	if invoker, ok := obj.(rpc.Invoker); ok {
+		return invoker, nil
+	}
+	return rpc.ReflectInvoker(obj)
+}
+
+func (fs *flowServer) initSecurity(req *rpc.Request) error {
+	// LocalPrincipal is nil which means we are operating under
+	// SecurityNone.
+	if fs.LocalPrincipal() == nil {
+		return nil
+	}
+
+	// If additional credentials are provided, make them available in the context
+	// Detect unusable blessings now, rather then discovering they are unusable on
+	// first use.
+	//
+	// TODO(ashankar,ataly): Potential confused deputy attack: The client provides
+	// the server's identity as the blessing. Figure out what we want to do about
+	// this - should servers be able to assume that a blessing is something that
+	// does not have the authorizations that the server's own identity has?
+	if got, want := req.GrantedBlessings.PublicKey(), fs.LocalPrincipal().PublicKey(); got != nil && !reflect.DeepEqual(got, want) {
+		return verror.New(verror.ErrNoAccess, fs.ctx, fmt.Sprintf("blessing granted not bound to this server(%v vs %v)", got, want))
+	}
+	fs.grantedBlessings = req.GrantedBlessings
+
+	var err error
+	if fs.clientBlessings, err = serverDecodeBlessings(fs.flow.VCDataCache(), req.Blessings, fs.server.stats); err != nil {
+		// When the server can't access the blessings cache, the client is not following
+		// protocol, so the server closes the VCs corresponding to the client endpoint.
+		// TODO(suharshs,toddw): Figure out a way to only shutdown the current VC, instead
+		// of all VCs connected to the RemoteEndpoint.
+		fs.server.streamMgr.ShutdownEndpoint(fs.RemoteEndpoint())
+		return verror.New(verror.ErrBadProtocol, fs.ctx, newErrBadBlessingsCache(fs.ctx, err))
+	}
+	// Verify that the blessings sent by the client in the request have the same public
+	// key as those sent by the client during VC establishment.
+	if got, want := fs.clientBlessings.PublicKey(), fs.flow.RemoteBlessings().PublicKey(); got != nil && !reflect.DeepEqual(got, want) {
+		return verror.New(verror.ErrNoAccess, fs.ctx, fmt.Sprintf("blessings sent with the request are bound to a different public key (%v) from the blessing used during VC establishment (%v)", got, want))
+	}
+	fs.ackBlessings = true
+
+	for _, d := range req.Discharges {
+		fs.discharges[d.ID()] = d
+	}
+	return nil
+}
+
+func authorize(ctx *context.T, call security.Call, auth security.Authorizer) error {
+	if call.LocalPrincipal() == nil {
+		// LocalPrincipal is nil means that the server wanted to avoid
+		// authentication, and thus wanted to skip authorization as well.
+		return nil
+	}
+	if auth == nil {
+		auth = security.DefaultAuthorizer()
+	}
+	if err := auth.Authorize(ctx, call); err != nil {
+		return verror.New(verror.ErrNoAccess, ctx, newErrBadAuth(ctx, call.Suffix(), call.Method(), err))
+	}
+	return nil
+}
+
+// Send implements the rpc.Stream method.
+func (fs *flowServer) Send(item interface{}) error {
+	defer vlog.LogCallf("item=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	// The empty response header indicates what follows is a streaming result.
+	if err := fs.enc.Encode(rpc.Response{}); err != nil {
+		return err
+	}
+	return fs.enc.Encode(item)
+}
+
+// Recv implements the rpc.Stream method.
+func (fs *flowServer) Recv(itemptr interface{}) error {
+	defer vlog.LogCallf("itemptr=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	var req rpc.Request
+	if err := fs.dec.Decode(&req); err != nil {
+		return err
+	}
+	if req.EndStreamArgs {
+		fs.endStreamArgs = true
+		return io.EOF
+	}
+	return fs.dec.Decode(itemptr)
+}
+
+// Implementations of rpc.ServerCall and security.Call methods.
+
+func (fs *flowServer) Security() security.Call {
+	//nologcall
+	return fs
+}
+func (fs *flowServer) LocalDischarges() map[string]security.Discharge {
+	//nologcall
+	return fs.flow.LocalDischarges()
+}
+func (fs *flowServer) RemoteDischarges() map[string]security.Discharge {
+	//nologcall
+	return fs.discharges
+}
+func (fs *flowServer) Server() rpc.Server {
+	//nologcall
+	return fs.server
+}
+func (fs *flowServer) Timestamp() time.Time {
+	//nologcall
+	return fs.starttime
+}
+func (fs *flowServer) Method() string {
+	//nologcall
+	return fs.method
+}
+func (fs *flowServer) MethodTags() []*vdl.Value {
+	//nologcall
+	return fs.tags
+}
+func (fs *flowServer) Suffix() string {
+	//nologcall
+	return fs.suffix
+}
+func (fs *flowServer) LocalPrincipal() security.Principal {
+	//nologcall
+	return fs.flow.LocalPrincipal()
+}
+func (fs *flowServer) LocalBlessings() security.Blessings {
+	//nologcall
+	return fs.flow.LocalBlessings()
+}
+func (fs *flowServer) RemoteBlessings() security.Blessings {
+	//nologcall
+	if !fs.clientBlessings.IsZero() {
+		return fs.clientBlessings
+	}
+	return fs.flow.RemoteBlessings()
+}
+func (fs *flowServer) GrantedBlessings() security.Blessings {
+	//nologcall
+	return fs.grantedBlessings
+}
+func (fs *flowServer) LocalEndpoint() naming.Endpoint {
+	//nologcall
+	return fs.flow.LocalEndpoint()
+}
+func (fs *flowServer) RemoteEndpoint() naming.Endpoint {
+	//nologcall
+	return fs.flow.RemoteEndpoint()
+}
+
+type proxyAuth struct {
+	s *server
+}
+
+func (a proxyAuth) RPCStreamListenerOpt() {}
+
+func (a proxyAuth) Login(proxy stream.Flow) (security.Blessings, []security.Discharge, error) {
+	var (
+		principal = a.s.principal
+		dc        = a.s.dc
+		ctx       = a.s.ctx
+	)
+	if principal == nil {
+		return security.Blessings{}, nil, nil
+	}
+	proxyNames, _ := security.RemoteBlessingNames(ctx, security.NewCall(&security.CallParams{
+		LocalPrincipal:   principal,
+		RemoteBlessings:  proxy.RemoteBlessings(),
+		RemoteDischarges: proxy.RemoteDischarges(),
+		RemoteEndpoint:   proxy.RemoteEndpoint(),
+		LocalEndpoint:    proxy.LocalEndpoint(),
+	}))
+	blessings := principal.BlessingStore().ForPeer(proxyNames...)
+	tpc := blessings.ThirdPartyCaveats()
+	if len(tpc) == 0 {
+		return blessings, nil, nil
+	}
+	// Set DischargeImpetus.Server = proxyNames.
+	// See https://v.io/i/392
+	discharges := dc.PrepareDischarges(ctx, tpc, security.DischargeImpetus{})
+	return blessings, discharges, nil
+}
+
+var _ manager.ProxyAuthenticator = proxyAuth{}
diff --git a/runtime/internal/rpc/server_authorizer.go b/runtime/internal/rpc/server_authorizer.go
new file mode 100644
index 0000000..cda735d
--- /dev/null
+++ b/runtime/internal/rpc/server_authorizer.go
@@ -0,0 +1,138 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"reflect"
+
+	"v.io/v23/context"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+)
+
+// TODO(ribrdb): Flip this to true once everything is updated and also update
+// the server authorizer tests.
+const enableSecureServerAuth = false
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errNoBlessingsFromServer      = reg(".errNoBlessingsFromServer", "server has not presented any blessings")
+	errAuthNoServerBlessingsMatch = reg(".errAuthNoServerBlessingsMatch",
+		"server blessings {3} do not match client expectations {4}, (rejected blessings: {5})")
+	errAuthServerNotAllowed = reg(".errAuthServerNotAllowed",
+		"server blessings {3} do not match any allowed server patterns {4}{:5}")
+	errAuthServerKeyNotAllowed = reg(".errAuthServerKeyNotAllowed",
+		"remote public key {3} not matched by server key {4}")
+	errMultiplePublicKeys = reg(".errMultiplePublicKeyOptions", "at most one ServerPublicKey options can be provided")
+)
+
+// serverAuthorizer implements security.Authorizer.
+type serverAuthorizer struct {
+	allowedServerPolicies     [][]security.BlessingPattern
+	serverPublicKey           security.PublicKey
+	ignoreBlessingsInEndpoint bool
+}
+
+// newServerAuthorizer returns a security.Authorizer for authorizing the server
+// during a flow. The authorization policy is based on options supplied to the
+// call that initiated the flow. Additionally, if pattern is non-empty then
+// the server will be authorized only if it presents at least one blessing
+// that matches pattern.
+//
+// This method assumes that canCreateServerAuthorizer(opts) is nil.
+func newServerAuthorizer(pattern security.BlessingPattern, opts ...rpc.CallOpt) security.Authorizer {
+	auth := &serverAuthorizer{}
+	for _, o := range opts {
+		switch v := o.(type) {
+		case options.ServerPublicKey:
+			auth.serverPublicKey = v.PublicKey
+		case options.AllowedServersPolicy:
+			auth.allowedServerPolicies = append(auth.allowedServerPolicies, v)
+		case options.SkipServerEndpointAuthorization:
+			auth.ignoreBlessingsInEndpoint = true
+		}
+	}
+	if len(pattern) > 0 {
+		auth.allowedServerPolicies = append(auth.allowedServerPolicies, []security.BlessingPattern{pattern})
+	}
+	return auth
+}
+
+func (a *serverAuthorizer) Authorize(ctx *context.T, call security.Call) error {
+	defer vlog.LogCallf("ctx=,call=")("") // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	if call.RemoteBlessings().IsZero() {
+		return verror.New(errNoBlessingsFromServer, ctx)
+	}
+	serverBlessings, rejectedBlessings := security.RemoteBlessingNames(ctx, call)
+
+	if epb := call.RemoteEndpoint().BlessingNames(); len(epb) > 0 && !a.ignoreBlessingsInEndpoint {
+		matched := false
+		for _, b := range epb {
+			// TODO(ashankar,ataly): Should this be
+			// security.BlessingPattern(b).MakeNonExtendable().MatchedBy()?
+			// Because, without that, a delegate of the real server
+			// can be a man-in-the-middle without failing
+			// authorization. Is that a desirable property?
+			if security.BlessingPattern(b).MatchedBy(serverBlessings...) {
+				matched = true
+				break
+			}
+		}
+		if !matched {
+			return verror.New(errAuthNoServerBlessingsMatch, ctx, serverBlessings, epb, rejectedBlessings)
+		}
+	} else if enableSecureServerAuth && len(epb) == 0 {
+		// No blessings in the endpoint to set expectations on the
+		// "identity" of the server.  Use the default authorization
+		// policy.
+		if err := security.DefaultAuthorizer().Authorize(ctx, call); err != nil {
+			return err
+		}
+	}
+
+	for _, patterns := range a.allowedServerPolicies {
+		if !matchedBy(patterns, serverBlessings) {
+			return verror.New(errAuthServerNotAllowed, ctx, serverBlessings, patterns, rejectedBlessings)
+		}
+	}
+
+	if remoteKey, key := call.RemoteBlessings().PublicKey(), a.serverPublicKey; key != nil && !reflect.DeepEqual(remoteKey, key) {
+		return verror.New(errAuthServerKeyNotAllowed, ctx, remoteKey, key)
+	}
+
+	return nil
+}
+
+func matchedBy(patterns []security.BlessingPattern, blessings []string) bool {
+	if patterns == nil {
+		return true
+	}
+	for _, p := range patterns {
+		if p.MatchedBy(blessings...) {
+			return true
+		}
+	}
+	return false
+}
+
+func canCreateServerAuthorizer(ctx *context.T, opts []rpc.CallOpt) error {
+	var pkey security.PublicKey
+	for _, o := range opts {
+		switch v := o.(type) {
+		case options.ServerPublicKey:
+			if pkey != nil && !reflect.DeepEqual(pkey, v.PublicKey) {
+				return verror.New(errMultiplePublicKeys, ctx)
+			}
+			pkey = v.PublicKey
+		}
+	}
+	return nil
+}
diff --git a/runtime/internal/rpc/server_authorizer_test.go b/runtime/internal/rpc/server_authorizer_test.go
new file mode 100644
index 0000000..68afdec
--- /dev/null
+++ b/runtime/internal/rpc/server_authorizer_test.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/options"
+	"v.io/v23/security"
+	"v.io/x/ref/runtime/internal/naming"
+
+	"v.io/x/ref/test/testutil"
+)
+
+func TestServerAuthorizer(t *testing.T) {
+	var (
+		pclient = testutil.NewPrincipal()
+		pserver = testutil.NewPrincipal()
+		pother  = testutil.NewPrincipal()
+
+		ali, _      = pserver.BlessSelf("ali")
+		bob, _      = pserver.BlessSelf("bob")
+		che, _      = pserver.BlessSelf("che")
+		otherAli, _ = pother.BlessSelf("ali")
+		zero        = security.Blessings{}
+
+		ctx, shutdown = initForTest()
+
+		U = func(blessings ...security.Blessings) security.Blessings {
+			u, err := security.UnionOfBlessings(blessings...)
+			if err != nil {
+				t.Fatal(err)
+			}
+			return u
+		}
+	)
+	defer shutdown()
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	// Make client recognize ali, bob and otherAli blessings
+	for _, b := range []security.Blessings{ali, bob, otherAli} {
+		if err := pclient.AddToRoots(b); err != nil {
+			t.Fatal(err)
+		}
+	}
+	// All tests are run as if pclient is the client end and pserver is remote end.
+	tests := []struct {
+		serverBlessingNames []string
+		auth                security.Authorizer
+		authorizedServers   []security.Blessings
+		unauthorizedServers []security.Blessings
+	}{
+		{
+			// No blessings in the endpoint means that all servers are authorized.
+			nil,
+			newServerAuthorizer(""),
+			[]security.Blessings{ali, otherAli, bob, che},
+			[]security.Blessings{zero},
+		},
+		{
+			// Endpoint sets the expectations for "ali" and "bob".
+			[]string{"ali", "bob"},
+			newServerAuthorizer(""),
+			[]security.Blessings{ali, otherAli, bob, U(ali, che), U(bob, che)},
+			[]security.Blessings{che},
+		},
+		{
+			// Still only ali, otherAli and bob are authorized (che is not
+			// authorized since it is not recognized by the client)
+			[]string{"ali", "bob", "che"},
+			newServerAuthorizer(""),
+			[]security.Blessings{ali, otherAli, bob, U(ali, che), U(bob, che)},
+			[]security.Blessings{che},
+		},
+		{
+
+			// Only ali and otherAli are authorized (since there is an
+			// allowed-servers policy that does not allow "bob")
+			[]string{"ali", "bob", "che"},
+			newServerAuthorizer("", options.AllowedServersPolicy{"ali", "bob"}, options.AllowedServersPolicy{"ali"}),
+			[]security.Blessings{ali, otherAli, U(ali, che), U(ali, bob)},
+			[]security.Blessings{bob, che},
+		},
+		{
+			// Multiple AllowedServersPolicy are treated as an AND (and individual ones are "ORs")
+			nil,
+			newServerAuthorizer("", options.AllowedServersPolicy{"ali", "che"}, options.AllowedServersPolicy{"bob", "che"}),
+			[]security.Blessings{U(ali, bob)},
+			[]security.Blessings{ali, bob, che, U(ali, che), U(bob, che)},
+		},
+		{
+			// Only otherAli is authorized (since only pother's public key is
+			// authorized)
+			[]string{"ali"},
+			newServerAuthorizer("", options.ServerPublicKey{pother.PublicKey()}),
+			[]security.Blessings{otherAli},
+			[]security.Blessings{ali, bob, che},
+		},
+		{
+			// Blessings in endpoint can be ignored.
+			[]string{"ali"},
+			newServerAuthorizer("", options.SkipServerEndpointAuthorization{}),
+			[]security.Blessings{ali, bob, che, otherAli},
+			nil,
+		},
+		{
+			// Pattern specified is respected
+			nil,
+			newServerAuthorizer("bob"),
+			[]security.Blessings{bob, U(ali, bob)},
+			[]security.Blessings{ali, otherAli, che},
+		},
+		{
+			// And concatenated with any existing AllowedServersPolicy
+			[]string{"ali", "bob", "che"},
+			newServerAuthorizer("bob", options.AllowedServersPolicy{"bob", "che"}),
+			[]security.Blessings{bob, U(ali, bob), U(ali, bob, che)},
+			[]security.Blessings{ali, che},
+		},
+		{
+			// And if the intersection of AllowedServersPolicy and the pattern be empty, then so be it!
+			[]string{"ali", "bob", "che"},
+			newServerAuthorizer("bob", options.AllowedServersPolicy{"ali", "che"}),
+			[]security.Blessings{U(ali, bob), U(ali, bob, che)},
+			[]security.Blessings{ali, otherAli, bob, che, U(ali, che)},
+		},
+	}
+	for _, test := range tests {
+		for _, s := range test.authorizedServers {
+			if err := test.auth.Authorize(ctx, &mockCall{
+				p:   pclient,
+				r:   s,
+				rep: &naming.Endpoint{Blessings: test.serverBlessingNames},
+			}); err != nil {
+				t.Errorf("serverAuthorizer: %#v failed to authorize server: %v", test.auth, s)
+			}
+		}
+		for _, s := range test.unauthorizedServers {
+			if err := test.auth.Authorize(ctx, &mockCall{
+				p:   pclient,
+				r:   s,
+				rep: &naming.Endpoint{Blessings: test.serverBlessingNames},
+			}); err == nil {
+				t.Errorf("serverAuthorizer: %#v authorized server: %v", test.auth, s)
+			}
+		}
+	}
+}
diff --git a/runtime/internal/rpc/server_test.go b/runtime/internal/rpc/server_test.go
new file mode 100644
index 0000000..5f2c5c4
--- /dev/null
+++ b/runtime/internal/rpc/server_test.go
@@ -0,0 +1,684 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"net"
+	"reflect"
+	"sort"
+	"testing"
+	"time"
+
+	"v.io/x/lib/pubsub"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/lib/netstate"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
+	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
+	"v.io/x/ref/test/testutil"
+)
+
+type noMethodsType struct{ Field string }
+
+type fieldType struct {
+	unexported string
+}
+type noExportedFieldsType struct{}
+
+func (noExportedFieldsType) F(_ *context.T, _ rpc.ServerCall, f fieldType) error { return nil }
+
+type badObjectDispatcher struct{}
+
+func (badObjectDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	return noMethodsType{}, nil, nil
+}
+
+// TestBadObject ensures that Serve handles bad receiver objects gracefully (in
+// particular, it doesn't panic).
+func TestBadObject(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	pclient, pserver := newClientServerPrincipals()
+	server, err := testInternalNewServer(ctx, sm, ns, pserver)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	if _, err := server.Listen(listenSpec); err != nil {
+		t.Fatalf("Listen failed: %v", err)
+	}
+	if err := server.Serve("", nil, nil); err == nil {
+		t.Fatal("should have failed")
+	}
+	if err := server.Serve("", new(noMethodsType), nil); err == nil {
+		t.Fatal("should have failed")
+	}
+	if err := server.Serve("", new(noExportedFieldsType), nil); err == nil {
+		t.Fatal("should have failed")
+	}
+	if err := server.ServeDispatcher("servername", badObjectDispatcher{}); err != nil {
+		t.Fatalf("ServeDispatcher failed: %v", err)
+	}
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
+	var result string
+	if err := client.Call(ctx, "servername", "SomeMethod", nil, []interface{}{&result}); err == nil {
+		// TODO(caprita): Check the error type rather than
+		// merely ensuring the test doesn't panic.
+		t.Fatalf("Call should have failed")
+	}
+}
+
+func TestServerArgs(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("test"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+	_, err = server.Listen(rpc.ListenSpec{})
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "*:0"}}})
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	_, err = server.Listen(rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", "*:0"},
+			{"tcp", "127.0.0.1:0"},
+		}})
+	if verror.ErrorID(err) == verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	status := server.Status()
+	if got, want := len(status.Errors), 1; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "*:0"}}})
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("expected a BadArg error: got %v", err)
+	}
+	status = server.Status()
+	if got, want := len(status.Errors), 1; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+type statusServer struct{ ch chan struct{} }
+
+func (s *statusServer) Hang(*context.T, rpc.ServerCall) error {
+	s.ch <- struct{}{} // Notify the server has received a call.
+	<-s.ch             // Wait for the server to be ready to go.
+	return nil
+}
+
+func TestServerStatus(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	principal := testutil.NewPrincipal("testServerStatus")
+	server, err := testInternalNewServer(ctx, sm, ns, principal)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	status := server.Status()
+	if got, want := status.State, rpc.ServerInit; got != want {
+		t.Fatalf("got %s, want %s", got, want)
+	}
+	server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
+	status = server.Status()
+	if got, want := status.State, rpc.ServerActive; got != want {
+		t.Fatalf("got %s, want %s", got, want)
+	}
+	serverChan := make(chan struct{})
+	err = server.Serve("test", &statusServer{serverChan}, nil)
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+	status = server.Status()
+	if got, want := status.State, rpc.ServerActive; got != want {
+		t.Fatalf("got %s, want %s", got, want)
+	}
+
+	progress := make(chan error)
+
+	client, err := InternalNewClient(sm, ns)
+	ctx, _ = v23.WithPrincipal(ctx, principal)
+	makeCall := func(ctx *context.T) {
+		call, err := client.StartCall(ctx, "test", "Hang", nil)
+		progress <- err
+		progress <- call.Finish()
+	}
+	go makeCall(ctx)
+
+	// Wait for RPC to start and the server has received the call.
+	if err := <-progress; err != nil {
+		t.Fatalf(err.Error())
+	}
+	<-serverChan
+
+	// Stop server asynchronously
+	go func() {
+		err = server.Stop()
+		if err != nil {
+			t.Fatalf(err.Error())
+		}
+	}()
+
+	// Server should enter 'ServerStopping' state.
+	then := time.Now()
+	for {
+		status = server.Status()
+		if got, want := status.State, rpc.ServerStopping; got != want {
+			if time.Now().Sub(then) > time.Minute {
+				t.Fatalf("got %s, want %s", got, want)
+			}
+		} else {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+	// Server won't stop until the statusServer's hung method completes.
+	close(serverChan)
+	// Wait for RPC to finish
+	if err := <-progress; err != nil {
+		t.Fatalf(err.Error())
+	}
+
+	// Now that the RPC is done, the server should be able to stop.
+	then = time.Now()
+	for {
+		status = server.Status()
+		if got, want := status.State, rpc.ServerStopped; got != want {
+			if time.Now().Sub(then) > time.Minute {
+				t.Fatalf("got %s, want %s", got, want)
+			}
+		} else {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+}
+
+func TestServerStates(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	ctx, shutdown := initForTest()
+	defer shutdown()
+
+	expectBadState := func(err error) {
+		if verror.ErrorID(err) != verror.ErrBadState.ID {
+			t.Fatalf("%s: unexpected error: %v", loc(1), err)
+		}
+	}
+
+	expectNoError := func(err error) {
+		if err != nil {
+			t.Fatalf("%s: unexpected error: %v", loc(1), err)
+		}
+	}
+
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("test"))
+	expectNoError(err)
+	defer server.Stop()
+
+	expectState := func(s rpc.ServerState) {
+		if got, want := server.Status().State, s; got != want {
+			t.Fatalf("%s: got %s, want %s", loc(1), got, want)
+		}
+	}
+
+	expectState(rpc.ServerInit)
+
+	// Need to call Listen first.
+	err = server.Serve("", &testServer{}, nil)
+	expectBadState(err)
+	err = server.AddName("a")
+	expectBadState(err)
+
+	_, err = server.Listen(rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}})
+	expectNoError(err)
+
+	expectState(rpc.ServerActive)
+
+	err = server.Serve("", &testServer{}, nil)
+	expectNoError(err)
+
+	err = server.Serve("", &testServer{}, nil)
+	expectBadState(err)
+
+	expectState(rpc.ServerActive)
+
+	err = server.AddName("a")
+	expectNoError(err)
+
+	expectState(rpc.ServerActive)
+
+	server.RemoveName("a")
+
+	expectState(rpc.ServerActive)
+
+	err = server.Stop()
+	expectNoError(err)
+	err = server.Stop()
+	expectNoError(err)
+
+	err = server.AddName("a")
+	expectBadState(err)
+}
+
+func TestMountStatus(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	server, err := testInternalNewServer(ctx, sm, ns, testutil.NewPrincipal("test"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	eps, err := server.Listen(rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", "127.0.0.1:0"},
+			{"tcp", "127.0.0.1:0"},
+		}})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := len(eps), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	setLeafEndpoints(eps)
+	status := server.Status()
+	if got, want := len(status.Mounts), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	servers := status.Mounts.Servers()
+	if got, want := len(servers), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	// Add a second name and we should now see 4 mounts, 2 for each name.
+	if err := server.AddName("bar"); err != nil {
+		t.Fatal(err)
+	}
+	status = server.Status()
+	if got, want := len(status.Mounts), 4; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	servers = status.Mounts.Servers()
+	if got, want := len(servers), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := servers, endpointToStrings(eps); !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	names := status.Mounts.Names()
+	if got, want := len(names), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	serversPerName := map[string][]string{}
+	for _, ms := range status.Mounts {
+		serversPerName[ms.Name] = append(serversPerName[ms.Name], ms.Server)
+	}
+	if got, want := len(serversPerName), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	for _, name := range []string{"foo", "bar"} {
+		if got, want := len(serversPerName[name]), 2; got != want {
+			t.Fatalf("got %d, want %d", got, want)
+		}
+	}
+}
+
+func updateHost(ep naming.Endpoint, address string) naming.Endpoint {
+	niep := *(ep).(*inaming.Endpoint)
+	niep.Address = address
+	return &niep
+}
+
+func getIPAddrs(eps []naming.Endpoint) []net.Addr {
+	hosts := map[string]struct{}{}
+	for _, ep := range eps {
+		iep := (ep).(*inaming.Endpoint)
+		h, _, _ := net.SplitHostPort(iep.Address)
+		if len(h) > 0 {
+			hosts[h] = struct{}{}
+		}
+	}
+	addrs := []net.Addr{}
+	for h, _ := range hosts {
+		addrs = append(addrs, netstate.NewNetAddr("ip", h))
+	}
+	return addrs
+}
+
+func endpointToStrings(eps []naming.Endpoint) []string {
+	r := []string{}
+	for _, ep := range eps {
+		r = append(r, ep.String())
+	}
+	sort.Strings(r)
+	return r
+}
+
+func cmpEndpoints(got, want []naming.Endpoint) bool {
+	if len(got) != len(want) {
+		return false
+	}
+	return reflect.DeepEqual(endpointToStrings(got), endpointToStrings(want))
+}
+
+func getUniqPorts(eps []naming.Endpoint) []string {
+	ports := map[string]struct{}{}
+	for _, ep := range eps {
+		iep := ep.(*inaming.Endpoint)
+		_, p, _ := net.SplitHostPort(iep.Address)
+		ports[p] = struct{}{}
+	}
+	r := []string{}
+	for p, _ := range ports {
+		r = append(r, p)
+	}
+	return r
+}
+
+func TestRoaming(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	ctx, shutdown := initForTest()
+	defer shutdown()
+
+	publisher := pubsub.NewPublisher()
+	roaming := make(chan pubsub.Setting)
+	stop, err := publisher.CreateStream("TestRoaming", "TestRoaming", roaming)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() { publisher.Shutdown(); <-stop }()
+
+	server, err := testInternalNewServerWithPubsub(ctx, sm, ns, publisher, "TestRoaming", testutil.NewPrincipal("test"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	ipv4And6 := func(network string, addrs []net.Addr) ([]net.Addr, error) {
+		accessible := netstate.ConvertToAddresses(addrs)
+		ipv4 := accessible.Filter(netstate.IsUnicastIPv4)
+		ipv6 := accessible.Filter(netstate.IsUnicastIPv6)
+		return append(ipv4.AsNetAddrs(), ipv6.AsNetAddrs()...), nil
+	}
+	spec := rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", "*:0"},
+			{"tcp", ":0"},
+			{"tcp", ":0"},
+		},
+		AddressChooser: ipv4And6,
+	}
+
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(eps) == 0 {
+		t.Fatal(err)
+	}
+
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	setLeafEndpoints(eps)
+	if err = server.AddName("bar"); err != nil {
+		t.Fatal(err)
+	}
+
+	status := server.Status()
+	if got, want := status.Endpoints, eps; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	if got, want := len(status.Mounts), len(eps)*2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	n1 := netstate.NewNetAddr("ip", "1.1.1.1")
+	n2 := netstate.NewNetAddr("ip", "2.2.2.2")
+
+	watcher := make(chan rpc.NetworkChange, 10)
+	server.WatchNetwork(watcher)
+	defer close(watcher)
+
+	roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
+
+	waitForChange := func() *rpc.NetworkChange {
+		vlog.Infof("Waiting on %p", watcher)
+		select {
+		case c := <-watcher:
+			return &c
+		case <-time.After(time.Minute):
+			t.Fatalf("timedout: %s", loc(1))
+		}
+		return nil
+	}
+
+	// We expect 4 changes, one for each IP per usable listen spec addr.
+	change := waitForChange()
+	if got, want := len(change.Changed), 4; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	nepsA := make([]naming.Endpoint, len(eps))
+	copy(nepsA, eps)
+	for _, p := range getUniqPorts(eps) {
+		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		nepsA = append(nepsA, []naming.Endpoint{nep1, nep2}...)
+	}
+
+	status = server.Status()
+	if got, want := status.Endpoints, nepsA; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+	}
+
+	if got, want := len(status.Mounts), len(nepsA)*2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+	if got, want := len(status.Mounts.Servers()), len(nepsA); got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	roaming <- NewRmAddrsSetting([]net.Addr{n1})
+
+	// We expect 2 changes, one for each usable listen spec addr.
+	change = waitForChange()
+	if got, want := len(change.Changed), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	nepsR := make([]naming.Endpoint, len(eps))
+	copy(nepsR, eps)
+	for _, p := range getUniqPorts(eps) {
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		nepsR = append(nepsR, nep2)
+	}
+
+	status = server.Status()
+	if got, want := status.Endpoints, nepsR; !cmpEndpoints(got, want) {
+		t.Fatalf("got %v, want %v [%d, %d]", got, want, len(got), len(want))
+	}
+
+	// Remove all addresses to mimic losing all connectivity.
+	roaming <- NewRmAddrsSetting(getIPAddrs(nepsR))
+
+	// We expect changes for all of the current endpoints
+	change = waitForChange()
+	if got, want := len(change.Changed), len(nepsR); got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+	status = server.Status()
+	if got, want := len(status.Mounts), 0; got != want {
+		t.Fatalf("got %d, want %d: %v", got, want, status.Mounts)
+	}
+
+	roaming <- NewAddAddrsSetting([]net.Addr{n1})
+	// We expect 2 changes, one for each usable listen spec addr.
+	change = waitForChange()
+	if got, want := len(change.Changed), 2; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+
+}
+
+func TestWatcherDeadlock(t *testing.T) {
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	ctx, shutdown := initForTest()
+	defer shutdown()
+
+	publisher := pubsub.NewPublisher()
+	roaming := make(chan pubsub.Setting)
+	stop, err := publisher.CreateStream("TestWatcherDeadlock", "TestWatcherDeadlock", roaming)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() { publisher.Shutdown(); <-stop }()
+
+	server, err := testInternalNewServerWithPubsub(ctx, sm, ns, publisher, "TestWatcherDeadlock", testutil.NewPrincipal("test"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	spec := rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{
+			{"tcp", ":0"},
+		},
+	}
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err = server.Serve("foo", &testServer{}, nil); err != nil {
+		t.Fatal(err)
+	}
+	setLeafEndpoints(eps)
+
+	// Set a watcher that we never read from - the intent is to make sure
+	// that the listener still listens to changes even though there is no
+	// goroutine to read from the watcher channel.
+	watcher := make(chan rpc.NetworkChange, 0)
+	server.WatchNetwork(watcher)
+	defer close(watcher)
+
+	// Remove all addresses to mimic losing all connectivity.
+	roaming <- NewRmAddrsSetting(getIPAddrs(eps))
+
+	// Add in two new addresses
+	n1 := netstate.NewNetAddr("ip", "1.1.1.1")
+	n2 := netstate.NewNetAddr("ip", "2.2.2.2")
+	roaming <- NewAddAddrsSetting([]net.Addr{n1, n2})
+
+	neps := make([]naming.Endpoint, 0, len(eps))
+	for _, p := range getUniqPorts(eps) {
+		nep1 := updateHost(eps[0], net.JoinHostPort("1.1.1.1", p))
+		nep2 := updateHost(eps[0], net.JoinHostPort("2.2.2.2", p))
+		neps = append(neps, []naming.Endpoint{nep1, nep2}...)
+	}
+	then := time.Now()
+	for {
+		status := server.Status()
+		if got, want := status.Endpoints, neps; cmpEndpoints(got, want) {
+			break
+		}
+		time.Sleep(100 * time.Millisecond)
+		if time.Now().Sub(then) > time.Minute {
+			t.Fatalf("timed out waiting for changes to take effect")
+		}
+	}
+}
+
+func TestIsLeafServerOption(t *testing.T) {
+	ctx, shutdown := initForTest()
+	defer shutdown()
+	sm := imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+	defer sm.Shutdown()
+	ns := tnaming.NewSimpleNamespace()
+	pclient, pserver := newClientServerPrincipals()
+	server, err := testInternalNewServer(ctx, sm, ns, pserver, options.IsLeaf(true))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	disp := &testServerDisp{&testServer{}}
+
+	if _, err := server.Listen(listenSpec); err != nil {
+		t.Fatalf("Listen failed: %v", err)
+	}
+
+	if err := server.ServeDispatcher("leafserver", disp); err != nil {
+		t.Fatalf("ServeDispatcher failed: %v", err)
+	}
+	client, err := InternalNewClient(sm, ns)
+	if err != nil {
+		t.Fatalf("InternalNewClient failed: %v", err)
+	}
+	ctx, _ = v23.WithPrincipal(ctx, pclient)
+	ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
+	var result string
+	// we have set IsLeaf to true, sending any suffix to leafserver should result
+	// in an suffix was not expected error.
+	callErr := client.Call(ctx, "leafserver/unwantedSuffix", "Echo", []interface{}{"Mirror on the wall"}, []interface{}{&result})
+	if callErr == nil {
+		t.Fatalf("Call should have failed with suffix was not expected error")
+	}
+}
+
+func setLeafEndpoints(eps []naming.Endpoint) {
+	for i := range eps {
+		eps[i].(*inaming.Endpoint).IsLeaf = true
+	}
+}
diff --git a/runtime/internal/rpc/sort_endpoints.go b/runtime/internal/rpc/sort_endpoints.go
new file mode 100644
index 0000000..ee7193c
--- /dev/null
+++ b/runtime/internal/rpc/sort_endpoints.go
@@ -0,0 +1,212 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"fmt"
+	"net"
+	"sort"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/naming"
+	"v.io/v23/verror"
+
+	"v.io/x/lib/netstate"
+	inaming "v.io/x/ref/runtime/internal/naming"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errMalformedEndpoint            = reg(".errMalformedEndpoint", "malformed endpoint{:3}")
+	errUndesiredProtocol            = reg(".errUndesiredProtocol", "undesired protocol{:3}")
+	errIncompatibleEndpointVersions = reg(".errIncompatibleEndpointVersions", "incompatible endpoint versions{:3}")
+	errNoCompatibleServers          = reg(".errNoComaptibleServers", "failed to find any compatible servers{:3}")
+)
+
+type serverLocality int
+
+const (
+	unknownNetwork serverLocality = iota
+	remoteNetwork
+	localNetwork
+)
+
+type sortableServer struct {
+	server       naming.MountedServer
+	protocolRank int            // larger values are preferred.
+	locality     serverLocality // larger values are preferred.
+}
+
+func (s *sortableServer) String() string {
+	return fmt.Sprintf("%v", s.server)
+}
+
+type sortableServerList []sortableServer
+
+func (l sortableServerList) Len() int      { return len(l) }
+func (l sortableServerList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l sortableServerList) Less(i, j int) bool {
+	if l[i].protocolRank == l[j].protocolRank {
+		return l[i].locality > l[j].locality
+	}
+	return l[i].protocolRank > l[j].protocolRank
+}
+
+func mkProtocolRankMap(list []string) map[string]int {
+	if len(list) == 0 {
+		return nil
+	}
+	m := make(map[string]int)
+	for idx, protocol := range list {
+		m[protocol] = len(list) - idx
+	}
+	return m
+}
+
+var defaultPreferredProtocolOrder = mkProtocolRankMap([]string{"unixfd", "wsh", "tcp4", "tcp", "*"})
+
+// filterAndOrderServers returns a set of servers that are compatible with
+// the current client in order of 'preference' specified by the supplied
+// protocols and a notion of 'locality' according to the supplied protocol
+// list as follows:
+// - if the protocol parameter is non-empty, then only servers matching those
+// protocols are returned and the endpoints are ordered first by protocol
+// and then by locality within each protocol. If tcp4 and unixfd are requested
+// for example then only protocols that match tcp4 and unixfd will returned
+// with the tcp4 ones preceeding the unixfd ones.
+// - if the protocol parameter is empty, then a default protocol ordering
+// will be used, but unlike the previous case, any servers that don't support
+// these protocols will be returned also, but following the default
+// preferences.
+func filterAndOrderServers(servers []naming.MountedServer, protocols []string, ipnets []*net.IPNet) ([]naming.MountedServer, error) {
+	vlog.VI(3).Infof("filterAndOrderServers%v: %v", protocols, servers)
+	var (
+		errs       = verror.SubErrs{}
+		list       = make(sortableServerList, 0, len(servers))
+		protoRanks = mkProtocolRankMap(protocols)
+	)
+	if len(protoRanks) == 0 {
+		protoRanks = defaultPreferredProtocolOrder
+	}
+	adderr := func(name string, err error) {
+		errs = append(errs, verror.SubErr{Name: "server=" + name, Err: err, Options: verror.Print})
+	}
+	for _, server := range servers {
+		name := server.Server
+		ep, err := name2endpoint(name)
+		if err != nil {
+			adderr(name, verror.New(errMalformedEndpoint, nil, err))
+			continue
+		}
+		rank, err := protocol2rank(ep.Addr().Network(), protoRanks)
+		if err != nil {
+			adderr(name, err)
+			continue
+		}
+		list = append(list, sortableServer{
+			server:       server,
+			protocolRank: rank,
+			locality:     locality(ep, ipnets),
+		})
+	}
+	if len(list) == 0 {
+		return nil, verror.AddSubErrs(verror.New(errNoCompatibleServers, nil), nil, errs...)
+	}
+	// TODO(ashankar): Don't have to use stable sorting, could
+	// just use sort.Sort. The only problem with that is the
+	// unittest.
+	sort.Stable(list)
+	// Convert to []naming.MountedServer
+	ret := make([]naming.MountedServer, len(list))
+	for idx, item := range list {
+		ret[idx] = item.server
+	}
+	return ret, nil
+}
+
+// name2endpoint returns the naming.Endpoint encoded in a name.
+func name2endpoint(name string) (naming.Endpoint, error) {
+	addr := name
+	if naming.Rooted(name) {
+		addr, _ = naming.SplitAddressName(name)
+	}
+	return inaming.NewEndpoint(addr)
+}
+
+// protocol2rank returns the "rank" of a protocol (given a map of ranks).
+// The higher the rank, the more preferable the protocol.
+func protocol2rank(protocol string, ranks map[string]int) (int, error) {
+	if r, ok := ranks[protocol]; ok {
+		return r, nil
+	}
+	// Special case: if "wsh" has a rank but "wsh4"/"wsh6" don't,
+	// then they get the same rank as "wsh". Similar for "tcp" and "ws".
+	//
+	// TODO(jhahn): We have similar protocol equivalency checks at a few places.
+	// Figure out a way for this mapping to be shared.
+	if p := protocol; p == "wsh4" || p == "wsh6" || p == "tcp4" || p == "tcp6" || p == "ws4" || p == "ws6" {
+		if r, ok := ranks[p[:len(p)-1]]; ok {
+			return r, nil
+		}
+	}
+	// "*" means that any protocol is acceptable.
+	if r, ok := ranks["*"]; ok {
+		return r, nil
+	}
+	// UnknownProtocol should be rare, it typically happens when
+	// the endpoint is described in <host>:<port> format instead of
+	// the full fidelity description (@<version>@<protocol>@...).
+	if protocol == naming.UnknownProtocol {
+		return -1, nil
+	}
+	return 0, verror.New(errUndesiredProtocol, nil, protocol)
+}
+
+// locality returns the serverLocality to use given an endpoint and the
+// set of IP networks configured on this machine.
+func locality(ep naming.Endpoint, ipnets []*net.IPNet) serverLocality {
+	if len(ipnets) < 1 {
+		return unknownNetwork // 0 IP networks, locality doesn't matter.
+
+	}
+	host, _, err := net.SplitHostPort(ep.Addr().String())
+	if err != nil {
+		host = ep.Addr().String()
+	}
+	ip := net.ParseIP(host)
+	if ip == nil {
+		// Not an IP address (possibly not an IP network).
+		return unknownNetwork
+	}
+	for _, ipnet := range ipnets {
+		if ipnet.Contains(ip) {
+			return localNetwork
+		}
+	}
+	return remoteNetwork
+}
+
+// ipNetworks returns the IP networks on this machine.
+func ipNetworks() []*net.IPNet {
+	ifcs, err := netstate.GetAllAddresses()
+	if err != nil {
+		vlog.VI(5).Infof("netstate.GetAllAddresses failed: %v", err)
+		return nil
+	}
+	ret := make([]*net.IPNet, 0, len(ifcs))
+	for _, a := range ifcs {
+		_, ipnet, err := net.ParseCIDR(a.String())
+		if err != nil {
+			vlog.VI(5).Infof("net.ParseCIDR(%q) failed: %v", a, err)
+			continue
+		}
+		ret = append(ret, ipnet)
+	}
+	return ret
+}
diff --git a/runtime/internal/rpc/sort_internal_test.go b/runtime/internal/rpc/sort_internal_test.go
new file mode 100644
index 0000000..2d0d545
--- /dev/null
+++ b/runtime/internal/rpc/sort_internal_test.go
@@ -0,0 +1,211 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"net"
+	"reflect"
+	"strings"
+	"testing"
+
+	"v.io/v23/naming"
+)
+
+func servers2names(servers []naming.MountedServer) []string {
+	e := naming.MountEntry{Servers: servers}
+	return e.Names()
+}
+
+func TestIncompatible(t *testing.T) {
+	servers := []naming.MountedServer{}
+
+	_, err := filterAndOrderServers(servers, []string{"tcp"}, nil)
+	if err == nil || err.Error() != "failed to find any compatible servers" {
+		t.Errorf("expected a different error: %v", err)
+	}
+
+	for _, a := range []string{"127.0.0.3", "127.0.0.4"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+
+	_, err = filterAndOrderServers(servers, []string{"foobar"}, nil)
+	if err == nil || !strings.HasSuffix(err.Error(), "undesired protocol: tcp]") {
+		t.Errorf("expected a different error to: %v", err)
+	}
+}
+
+func TestOrderingByProtocol(t *testing.T) {
+	servers := []naming.MountedServer{}
+	_, ipnet, _ := net.ParseCIDR("127.0.0.0/8")
+	ipnets := []*net.IPNet{ipnet}
+
+	for _, a := range []string{"127.0.0.3", "127.0.0.4"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	for _, a := range []string{"127.0.0.1", "127.0.0.2"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("tcp4", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	for _, a := range []string{"127.0.0.10", "127.0.0.11"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("foobar", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	for _, a := range []string{"127.0.0.7", "127.0.0.8"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("tcp6", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	if _, err := filterAndOrderServers(servers, []string{"batman"}, ipnets); err == nil {
+		t.Fatalf("expected an error")
+	}
+
+	// Add a server with naming.UnknownProtocol. This typically happens
+	// when the endpoint is in <host>:<port> format. Currently, the sorting
+	// is setup to always allow UnknownProtocol, but put it in the end.
+	// May want to revisit this choice, but for now the test captures what
+	// the current state of the code intends.
+	servers = append(servers, naming.MountedServer{Server: "127.0.0.12:14141"})
+
+	// Just foobar and tcp4
+	want := []string{
+		"/@5@foobar@127.0.0.10@@@@@",
+		"/@5@foobar@127.0.0.11@@@@@",
+		"/@5@tcp4@127.0.0.1@@@@@",
+		"/@5@tcp4@127.0.0.2@@@@@",
+		"/127.0.0.12:14141",
+	}
+	result, err := filterAndOrderServers(servers, []string{"foobar", "tcp4"}, ipnets)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+
+	// Everything, since we didn't specify a protocol, but ordered by
+	// the internal metric - see defaultPreferredProtocolOrder.
+	// The order will be the default preferred order for protocols, the
+	// original ordering within each protocol, with protocols that
+	// are not in the default ordering list at the end.
+	want = []string{
+		"/@5@tcp4@127.0.0.1@@@@@",
+		"/@5@tcp4@127.0.0.2@@@@@",
+		"/@5@tcp@127.0.0.3@@@@@",
+		"/@5@tcp@127.0.0.4@@@@@",
+		"/@5@tcp6@127.0.0.7@@@@@",
+		"/@5@tcp6@127.0.0.8@@@@@",
+		"/@5@foobar@127.0.0.10@@@@@",
+		"/@5@foobar@127.0.0.11@@@@@",
+		"/127.0.0.12:14141",
+	}
+	if result, err = filterAndOrderServers(servers, nil, ipnets); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+
+	if result, err = filterAndOrderServers(servers, []string{}, ipnets); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+
+	// Just "tcp" implies tcp4 and tcp6 as well.
+	want = []string{
+		"/@5@tcp@127.0.0.3@@@@@",
+		"/@5@tcp@127.0.0.4@@@@@",
+		"/@5@tcp4@127.0.0.1@@@@@",
+		"/@5@tcp4@127.0.0.2@@@@@",
+		"/@5@tcp6@127.0.0.7@@@@@",
+		"/@5@tcp6@127.0.0.8@@@@@",
+		"/127.0.0.12:14141",
+	}
+	if result, err = filterAndOrderServers(servers, []string{"tcp"}, ipnets); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+
+	// Ask for all protocols, with no ordering, except for locality
+	want = []string{
+		"/@5@tcp@127.0.0.3@@@@@",
+		"/@5@tcp@127.0.0.1@@@@@",
+		"/@5@tcp@74.125.69.139@@@@@",
+		"/@5@tcp@192.168.1.10@@@@@",
+		"/@5@tcp@74.125.142.83@@@@@",
+		"/127.0.0.12:14141",
+		"/@5@foobar@127.0.0.10@@@@@",
+		"/@5@foobar@127.0.0.11@@@@@",
+	}
+	servers = []naming.MountedServer{}
+	// naming.UnknownProtocol
+	servers = append(servers, naming.MountedServer{Server: "127.0.0.12:14141"})
+	for _, a := range []string{"74.125.69.139", "127.0.0.3", "127.0.0.1", "192.168.1.10", "74.125.142.83"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	for _, a := range []string{"127.0.0.10", "127.0.0.11"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("foobar", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	if result, err = filterAndOrderServers(servers, []string{}, ipnets); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+}
+
+func TestOrderingByLocality(t *testing.T) {
+	servers := []naming.MountedServer{}
+	_, ipnet, _ := net.ParseCIDR("127.0.0.0/8")
+	ipnets := []*net.IPNet{ipnet}
+
+	for _, a := range []string{"74.125.69.139", "127.0.0.3", "127.0.0.1", "192.168.1.10", "74.125.142.83"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("tcp", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+	result, err := filterAndOrderServers(servers, []string{"tcp"}, ipnets)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	want := []string{
+		"/@5@tcp@127.0.0.3@@@@@",
+		"/@5@tcp@127.0.0.1@@@@@",
+		"/@5@tcp@74.125.69.139@@@@@",
+		"/@5@tcp@192.168.1.10@@@@@",
+		"/@5@tcp@74.125.142.83@@@@@",
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+	for _, a := range []string{"74.125.69.139", "127.0.0.3:123", "127.0.0.1", "192.168.1.10", "74.125.142.83"} {
+		name := naming.JoinAddressName(naming.FormatEndpoint("ws", a), "")
+		servers = append(servers, naming.MountedServer{Server: name})
+	}
+
+	if result, err = filterAndOrderServers(servers, []string{"ws", "tcp"}, ipnets); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	want = []string{
+		"/@5@ws@127.0.0.3:123@@@@@",
+		"/@5@ws@127.0.0.1@@@@@",
+		"/@5@ws@74.125.69.139@@@@@",
+		"/@5@ws@192.168.1.10@@@@@",
+		"/@5@ws@74.125.142.83@@@@@",
+		"/@5@tcp@127.0.0.3@@@@@",
+		"/@5@tcp@127.0.0.1@@@@@",
+		"/@5@tcp@74.125.69.139@@@@@",
+		"/@5@tcp@192.168.1.10@@@@@",
+		"/@5@tcp@74.125.142.83@@@@@",
+	}
+	if got := servers2names(result); !reflect.DeepEqual(got, want) {
+		t.Errorf("got: %v, want %v", got, want)
+	}
+}
diff --git a/runtime/internal/rpc/stats.go b/runtime/internal/rpc/stats.go
new file mode 100644
index 0000000..640618a
--- /dev/null
+++ b/runtime/internal/rpc/stats.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"sync"
+	"time"
+
+	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/lib/stats/counter"
+	"v.io/x/ref/lib/stats/histogram"
+
+	"v.io/v23/naming"
+)
+
+type rpcStats struct {
+	mu                  sync.RWMutex
+	prefix              string
+	methods             map[string]*perMethodStats
+	blessingsCacheStats *blessingsCacheStats
+}
+
+func newRPCStats(prefix string) *rpcStats {
+	return &rpcStats{
+		prefix:              prefix,
+		methods:             make(map[string]*perMethodStats),
+		blessingsCacheStats: newBlessingsCacheStats(prefix),
+	}
+}
+
+type perMethodStats struct {
+	latency *histogram.Histogram
+}
+
+func (s *rpcStats) stop() {
+	stats.Delete(s.prefix)
+}
+
+func (s *rpcStats) record(method string, latency time.Duration) {
+	// Try first with a read lock. This will succeed in the most common
+	// case. If it fails, try again with a write lock and create the stats
+	// objects if they are still not there.
+	s.mu.RLock()
+	m, ok := s.methods[method]
+	s.mu.RUnlock()
+	if !ok {
+		m = s.newPerMethodStats(method)
+	}
+	m.latency.Add(int64(latency / time.Millisecond))
+}
+
+func (s *rpcStats) recordBlessingCache(hit bool) {
+	s.blessingsCacheStats.incr(hit)
+}
+
+// newPerMethodStats creates a new perMethodStats object if one doesn't exist
+// already. It returns the newly created object, or the already existing one.
+func (s *rpcStats) newPerMethodStats(method string) *perMethodStats {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	m, ok := s.methods[method]
+	if !ok {
+		name := naming.Join(s.prefix, "methods", method, "latency-ms")
+		s.methods[method] = &perMethodStats{
+			latency: stats.NewHistogram(name, histogram.Options{
+				NumBuckets:         25,
+				GrowthFactor:       1,
+				SmallestBucketSize: 1,
+				MinValue:           0,
+			}),
+		}
+		m = s.methods[method]
+	}
+	return m
+}
+
+// blessingsCacheStats keeps blessing cache hits and total calls received to determine
+// how often the blessingCache is being used.
+type blessingsCacheStats struct {
+	callsReceived, cacheHits *counter.Counter
+}
+
+func newBlessingsCacheStats(prefix string) *blessingsCacheStats {
+	cachePrefix := naming.Join(prefix, "security", "blessings", "cache")
+	return &blessingsCacheStats{
+		callsReceived: stats.NewCounter(naming.Join(cachePrefix, "attempts")),
+		cacheHits:     stats.NewCounter(naming.Join(cachePrefix, "hits")),
+	}
+}
+
+// Incr increments the cache attempt counter and the cache hit counter if hit is true.
+func (s *blessingsCacheStats) incr(hit bool) {
+	s.callsReceived.Incr(1)
+	if hit {
+		s.cacheHits.Incr(1)
+	}
+}
diff --git a/runtime/internal/rpc/stream/benchmark/RESULTS.txt b/runtime/internal/rpc/stream/benchmark/RESULTS.txt
new file mode 100644
index 0000000..905f1db
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/RESULTS.txt
@@ -0,0 +1,82 @@
+Date: 01/30/2015
+Platform: Intel(R) Xeon(R) CPU E5-2689 0 @ 2.60GHz,  66114888KB Memory
+
+$ v23 go test -bench=. -cpu=1 -benchtime=5s \
+  v.io/x/ref/runtime/internal/rpc/stream/benchmark
+
+Benchmark_dial_VIF	  500000	     14292 ns/op
+--- Histogram (unit: s)
+	Count: 500000  Min: 4  Max: 16455  Avg: 13.58
+	------------------------------------------------------------
+	[    4,     5)  139232   27.8%   27.8%  ###
+	[    5,     6)  257818   51.6%   79.4%  #####
+	[    6,     9)   92644   18.5%   97.9%  ##
+	[    9,    15)    5963    1.2%   99.1%
+	[   15,    28)    3162    0.6%   99.8%
+	[   28,    53)     171    0.0%   99.8%
+	[   53,   101)      67    0.0%   99.8%
+	[  101,   193)       1    0.0%   99.8%
+	[  193,   370)       0    0.0%   99.8%
+	[  370,   708)       0    0.0%   99.8%
+	[  708,  1354)      57    0.0%   99.8%
+	[ 1354,  2589)     152    0.0%   99.9%
+	[ 2589,  4949)     393    0.1%   99.9%
+	[ 4949,  9457)     322    0.1%  100.0%
+	[ 9457, 18069)      18    0.0%  100.0%
+	[18069, 34520)       0    0.0%  100.0%
+	[34520,   inf)       0    0.0%  100.0%
+Benchmark_dial_VIF_TLS	     500	  12594281 ns/op
+--- Histogram (unit: ms)
+	Count: 500  Min: 12  Max: 14  Avg: 12.31
+	------------------------------------------------------------
+	[ 12,  13)  352   70.4%   70.4%  #######
+	[ 13,  14)  141   28.2%   98.6%  ###
+	[ 14, inf)    7    1.4%  100.0%
+Benchmark_dial_VC_TLS	     500	  16116072 ns/op
+--- Histogram (unit: ms)
+	Count: 500  Min: 15  Max: 22  Avg: 15.53
+	------------------------------------------------------------
+	[ 15,  16)  313   62.6%   62.6%  ######
+	[ 16,  17)  121   24.2%   86.8%  ##
+	[ 17,  18)   60   12.0%   98.8%  #
+	[ 18,  19)    3    0.6%   99.4%
+	[ 19,  20)    2    0.4%   99.8%
+	[ 20,  21)    0    0.0%   99.8%
+	[ 21,  23)    1    0.2%  100.0%
+	[ 23, inf)    0    0.0%  100.0%
+Benchmark_throughput_TCP_1Conn	 1000000	      9197 ns/op	5566.89 MB/s
+Benchmark_throughput_TCP_2Conns	 1000000	      9083 ns/op	5636.56 MB/s
+Benchmark_throughput_TCP_4Conns	 1000000	      9855 ns/op	5194.81 MB/s
+Benchmark_throughput_TCP_8Conns	  500000	     12541 ns/op	4082.43 MB/s
+Benchmark_throughput_WS_1Conn	   30000	    206804 ns/op	 247.58 MB/s
+Benchmark_throughput_WS_2Conns	   30000	    211842 ns/op	 241.69 MB/s
+Benchmark_throughput_WS_4Conns	   30000	    209994 ns/op	 243.82 MB/s
+Benchmark_throughput_WS_8Conns	   30000	    217110 ns/op	 235.83 MB/s
+Benchmark_throughput_WSH_TCP_1Conn	 1000000	      9322 ns/op	5491.85 MB/s
+Benchmark_throughput_WSH_TCP_2Conns	 1000000	      9370 ns/op	5463.77 MB/s
+Benchmark_throughput_WSH_TCP_4Conns	 1000000	      9466 ns/op	5408.50 MB/s
+Benchmark_throughput_WSH_TCP_8Conns	  500000	     12526 ns/op	4087.22 MB/s
+Benchmark_throughput_WSH_WS_1Conn	   30000	    207833 ns/op	 246.35 MB/s
+Benchmark_throughput_WSH_WS_2Conns	   30000	    208567 ns/op	 245.48 MB/s
+Benchmark_throughput_WSH_WS_4Conns	   30000	    211562 ns/op	 242.01 MB/s
+Benchmark_throughput_WSH_WS_8Conns	   30000	    216454 ns/op	 236.54 MB/s
+Benchmark_throughput_Pipe_1Conn	  500000	     20169 ns/op	2538.54 MB/s
+Benchmark_throughput_Pipe_2Conns	  500000	     19935 ns/op	2568.29 MB/s
+Benchmark_throughput_Pipe_4Conns	  300000	     19893 ns/op	2573.76 MB/s
+Benchmark_throughput_Pipe_8Conns	 1000000	     20235 ns/op	2530.22 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_1Flow	  300000	     28014 ns/op	1827.66 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_2Flow	  300000	     27495 ns/op	1862.09 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_8Flow	  200000	     35584 ns/op	1438.84 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_2Flow	  300000	     27665 ns/op	1850.66 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_8Flow	  200000	     34974 ns/op	1463.94 MB/s
+Benchmark_throughput_Flow_2VIF_4VC_8Flow	  200000	     37642 ns/op	1360.15 MB/s
+Benchmark_throughput_TLS_1Conn	   20000	    415149 ns/op	 123.33 MB/s
+Benchmark_throughput_TLS_2Conns	   20000	    416008 ns/op	 123.07 MB/s
+Benchmark_throughput_TLS_4Conns	   20000	    421083 ns/op	 121.59 MB/s
+Benchmark_throughput_TLS_8Conns	   20000	    423079 ns/op	 121.02 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_1FlowTLS	   20000	    466212 ns/op	 109.82 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_2FlowTLS	   20000	    466104 ns/op	 109.85 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_8FlowTLS	   20000	    476604 ns/op	 107.43 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_2FlowTLS	   20000	    466818 ns/op	 109.68 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_8FlowTLS	   20000	    477094 ns/op	 107.32 MB/s
+Benchmark_throughput_Flow_2VIF_4VC_8FlowTLS	   20000	    476370 ns/op	 107.48 MB/s
diff --git a/runtime/internal/rpc/stream/benchmark/benchmark_test.go b/runtime/internal/rpc/stream/benchmark/benchmark_test.go
new file mode 100644
index 0000000..2102047
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/benchmark_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"os"
+	"testing"
+
+	"v.io/x/ref/test/benchmark"
+)
+
+// A single empty test to avoid:
+// testing: warning: no tests to run
+// from showing up when running benchmarks in this package via "go test"
+func TestNoOp(t *testing.T) {}
+
+func TestMain(m *testing.M) {
+	os.Exit(benchmark.RunTestMain(m))
+}
diff --git a/runtime/internal/rpc/stream/benchmark/dial_test.go b/runtime/internal/rpc/stream/benchmark/dial_test.go
new file mode 100644
index 0000000..789df5b
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/dial_test.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import "testing"
+
+func Benchmark_dial_VIF_NoSecurity(b *testing.B) { benchmarkDialVIF(b, securityNone) }
+func Benchmark_dial_VIF(b *testing.B)            { benchmarkDialVIF(b, securityDefault) }
+
+// Note: We don't benchmark SecurityNone VC Dial for now since it doesn't wait ack
+// from the server after sending "OpenVC".
+func Benchmark_dial_VC(b *testing.B) { benchmarkDialVC(b, securityDefault) }
diff --git a/runtime/internal/rpc/stream/benchmark/dial_vc.go b/runtime/internal/rpc/stream/benchmark/dial_vc.go
new file mode 100644
index 0000000..fd7ed2d
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/dial_vc.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"testing"
+	"time"
+
+	_ "v.io/x/ref/runtime/factories/static"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/test/benchmark"
+	"v.io/x/ref/test/testutil"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/security"
+)
+
+// benchmarkDialVC measures VC creation time over the underlying VIF.
+func benchmarkDialVC(b *testing.B, mode options.SecurityLevel) {
+	stats := benchmark.AddStats(b, 16)
+
+	server := manager.InternalNew(naming.FixedRoutingID(0x5))
+	client := manager.InternalNew(naming.FixedRoutingID(0xc))
+	var (
+		principal security.Principal
+		blessings security.Blessings
+	)
+	if mode == securityDefault {
+		principal = testutil.NewPrincipal("test")
+		blessings = principal.BlessingStore().Default()
+	}
+
+	_, ep, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	// Create one VC to prevent the underlying VIF from being closed.
+	_, err = client.Dial(ep, principal, vc.IdleTimeout{0})
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	b.ResetTimer() // Exclude setup time from measurement.
+
+	for i := 0; i < b.N; i++ {
+		b.StartTimer()
+		start := time.Now()
+
+		VC, err := client.Dial(ep, principal)
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		duration := time.Since(start)
+		b.StopTimer()
+
+		stats.Add(duration)
+
+		VC.Close(nil)
+	}
+
+	client.Shutdown()
+	server.Shutdown()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/dial_vif.go b/runtime/internal/rpc/stream/benchmark/dial_vif.go
new file mode 100644
index 0000000..2278839
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/dial_vif.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"net"
+	"testing"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test/benchmark"
+	"v.io/x/ref/test/testutil"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/security"
+)
+
+// benchmarkDialVIF measures VIF creation time over the underlying net connection.
+func benchmarkDialVIF(b *testing.B, mode options.SecurityLevel) {
+	stats := benchmark.AddStats(b, 16)
+	var (
+		principal security.Principal
+		blessings security.Blessings
+	)
+	if mode == securityDefault {
+		principal = testutil.NewPrincipal("test")
+		blessings = principal.BlessingStore().Default()
+	}
+
+	b.ResetTimer() // Exclude setup time from measurement.
+
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		nc, ns := net.Pipe()
+
+		server, err := vif.InternalNewAcceptedVIF(ns, naming.FixedRoutingID(0x5), principal, blessings, nil, nil)
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		b.StartTimer()
+		start := time.Now()
+
+		client, err := vif.InternalNewDialedVIF(nc, naming.FixedRoutingID(0xc), principal, nil, nil)
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		duration := time.Since(start)
+		b.StopTimer()
+
+		stats.Add(duration)
+
+		client.Close()
+		server.Close()
+	}
+}
diff --git a/runtime/internal/rpc/stream/benchmark/doc.go b/runtime/internal/rpc/stream/benchmark/doc.go
new file mode 100644
index 0000000..ba50140
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package benchmark implements some benchmarks for comparing the
+// v.io/v23/x/ref/profiles/internal/rpc/stream implementation with raw TCP
+// connections and/or pipes.
+//
+// Sample usage:
+//	go test v.io/v23/x/ref/profiles/internal/rpc/stream/benchmark -bench=.
+package benchmark
diff --git a/runtime/internal/rpc/stream/benchmark/throughput.go b/runtime/internal/rpc/stream/benchmark/throughput.go
new file mode 100644
index 0000000..f8a2819
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"crypto/rand"
+	"io"
+	"sync"
+	"testing"
+)
+
+const (
+	// Number of bytes to read/write
+	throughputBlockSize = 50 << 10 // 50 KB
+)
+
+type throughputTester struct {
+	b       *testing.B
+	writers []io.WriteCloser
+	readers []io.ReadCloser
+
+	data    []byte
+	pending sync.WaitGroup
+}
+
+func (t *throughputTester) Run() {
+	t.pending.Add(len(t.writers) + len(t.readers))
+	iters := t.b.N / len(t.writers)
+	t.data = make([]byte, throughputBlockSize)
+	if n, err := rand.Read(t.data); n != len(t.data) || err != nil {
+		t.b.Fatalf("Failed to fill write buffer with data: (%d, %v)", n, err)
+	}
+	t.b.ResetTimer()
+	for _, w := range t.writers {
+		go t.writeLoop(w, iters)
+	}
+	for _, r := range t.readers {
+		go t.readLoop(r)
+	}
+	t.pending.Wait()
+}
+
+func (t *throughputTester) writeLoop(w io.WriteCloser, N int) {
+	defer t.pending.Done()
+	defer w.Close()
+	size := len(t.data)
+	t.b.SetBytes(int64(size))
+	for i := 0; i < N; i++ {
+		if n, err := w.Write(t.data); err != nil || n != size {
+			t.b.Fatalf("Write error: %v", err)
+			return
+		}
+	}
+}
+
+func (t *throughputTester) readLoop(r io.ReadCloser) {
+	defer t.pending.Done()
+	defer r.Close()
+	var buf [throughputBlockSize]byte
+	total := 0
+	for {
+		n, err := r.Read(buf[:])
+		if err != nil {
+			if err != io.EOF {
+				t.b.Errorf("Read returned (%d, %v)", n, err)
+			}
+			break
+		}
+		total += n
+	}
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_flow.go b/runtime/internal/rpc/stream/benchmark/throughput_flow.go
new file mode 100644
index 0000000..605ebbe
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_flow.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/security"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/test/testutil"
+)
+
+const (
+	// Shorthands
+	securityNone    = options.SecurityNone
+	securityDefault = options.SecurityConfidential
+)
+
+type listener struct {
+	ln stream.Listener
+	ep naming.Endpoint
+}
+
+// createListeners returns N (stream.Listener, naming.Endpoint) pairs, such
+// that calling stream.Manager.Dial to each of the endpoints will end up
+// creating a new VIF.
+func createListeners(mode options.SecurityLevel, m stream.Manager, N int) (servers []listener, err error) {
+	for i := 0; i < N; i++ {
+		var (
+			l         listener
+			principal security.Principal
+			blessings security.Blessings
+		)
+		if mode == securityDefault {
+			principal = testutil.NewPrincipal("test")
+			blessings = principal.BlessingStore().Default()
+		}
+		if l.ln, l.ep, err = m.Listen("tcp", "127.0.0.1:0", principal, blessings); err != nil {
+			return
+		}
+		servers = append(servers, l)
+	}
+	return
+}
+
+func benchmarkFlow(b *testing.B, mode options.SecurityLevel, nVIFs, nVCsPerVIF, nFlowsPerVC int) {
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
+	server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
+
+	var principal security.Principal
+	if mode == securityDefault {
+		principal = testutil.NewPrincipal("test")
+	}
+
+	lns, err := createListeners(mode, server, nVIFs)
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	nFlows := nVIFs * nVCsPerVIF * nFlowsPerVC
+	rchan := make(chan io.ReadCloser, nFlows)
+	wchan := make(chan io.WriteCloser, nFlows)
+
+	b.ResetTimer()
+
+	go func() {
+		defer close(wchan)
+		for i := 0; i < nVIFs; i++ {
+			ep := lns[i].ep
+			for j := 0; j < nVCsPerVIF; j++ {
+				vc, err := client.Dial(ep, principal)
+				if err != nil {
+					b.Error(err)
+					return
+				}
+				for k := 0; k < nFlowsPerVC; k++ {
+					flow, err := vc.Connect()
+					if err != nil {
+						b.Error(err)
+						return
+					}
+					// Flows are "Accepted" by the remote
+					// end only on the first Write.
+					if _, err := flow.Write([]byte("hello")); err != nil {
+						b.Error(err)
+						return
+					}
+					wchan <- flow
+				}
+			}
+		}
+	}()
+
+	go func() {
+		defer close(rchan)
+		for i := 0; i < nVIFs; i++ {
+			ln := lns[i].ln
+			nFlowsPerVIF := nVCsPerVIF * nFlowsPerVC
+			for j := 0; j < nFlowsPerVIF; j++ {
+				flow, err := ln.Accept()
+				if err != nil {
+					b.Error(err)
+					return
+				}
+				rchan <- flow
+			}
+		}
+	}()
+
+	var readers []io.ReadCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	var writers []io.WriteCloser
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_pipe.go b/runtime/internal/rpc/stream/benchmark/throughput_pipe.go
new file mode 100644
index 0000000..0a3d348
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_pipe.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"os"
+	"testing"
+)
+
+// benchmarkPipe runs a benchmark to test the throughput when nPipes each are
+// reading and writing.
+func benchmarkPipe(b *testing.B, nPipes int) {
+	readers := make([]io.ReadCloser, nPipes)
+	writers := make([]io.WriteCloser, nPipes)
+	var err error
+	for i := 0; i < nPipes; i++ {
+		// Use os.Pipe and NOT net.Pipe.
+		// The latter (based on io.Pipe) doesn't really do any I/O
+		// on the Write, it just manipulates pointers (the slice)
+		// and thus isn't useful when benchmarking since that
+		// implementation is excessively cache friendly.
+		readers[i], writers[i], err = os.Pipe()
+		if err != nil {
+			b.Fatalf("Failed to create pipe #%d: %v", i, err)
+			return
+		}
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_tcp.go b/runtime/internal/rpc/stream/benchmark/throughput_tcp.go
new file mode 100644
index 0000000..a4b54e1
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_tcp.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"net"
+	"testing"
+)
+
+// benchmarkTCP sets up nConns TCP connections and measures throughput.
+func benchmarkTCP(b *testing.B, nConns int) {
+	rchan := make(chan net.Conn, nConns)
+	wchan := make(chan net.Conn, nConns)
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("net.Listen failed: %v", err)
+		return
+	}
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := net.Dial("tcp", ln.Addr().String())
+			if err != nil {
+				b.Fatalf("net.Dial(%q, %q) failed: %v", "tcp", ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+				return
+			}
+			rchan <- conn
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_test.go b/runtime/internal/rpc/stream/benchmark/throughput_test.go
new file mode 100644
index 0000000..39a0c46
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_test.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import "testing"
+
+func Benchmark_throughput_TCP_1Conn(b *testing.B)  { benchmarkTCP(b, 1) }
+func Benchmark_throughput_TCP_2Conns(b *testing.B) { benchmarkTCP(b, 2) }
+func Benchmark_throughput_TCP_4Conns(b *testing.B) { benchmarkTCP(b, 4) }
+func Benchmark_throughput_TCP_8Conns(b *testing.B) { benchmarkTCP(b, 8) }
+
+func Benchmark_throughput_WS_1Conn(b *testing.B)  { benchmarkWS(b, 1) }
+func Benchmark_throughput_WS_2Conns(b *testing.B) { benchmarkWS(b, 2) }
+func Benchmark_throughput_WS_4Conns(b *testing.B) { benchmarkWS(b, 4) }
+func Benchmark_throughput_WS_8Conns(b *testing.B) { benchmarkWS(b, 8) }
+
+func Benchmark_throughput_WSH_TCP_1Conn(b *testing.B)  { benchmarkWSH(b, "tcp", 1) }
+func Benchmark_throughput_WSH_TCP_2Conns(b *testing.B) { benchmarkWSH(b, "tcp", 2) }
+func Benchmark_throughput_WSH_TCP_4Conns(b *testing.B) { benchmarkWSH(b, "tcp", 4) }
+func Benchmark_throughput_WSH_TCP_8Conns(b *testing.B) { benchmarkWSH(b, "tcp", 8) }
+
+func Benchmark_throughput_WSH_WS_1Conn(b *testing.B)  { benchmarkWSH(b, "ws", 1) }
+func Benchmark_throughput_WSH_WS_2Conns(b *testing.B) { benchmarkWSH(b, "ws", 2) }
+func Benchmark_throughput_WSH_WS_4Conns(b *testing.B) { benchmarkWSH(b, "ws", 4) }
+func Benchmark_throughput_WSH_WS_8Conns(b *testing.B) { benchmarkWSH(b, "ws", 8) }
+
+func Benchmark_throughput_Pipe_1Conn(b *testing.B)  { benchmarkPipe(b, 1) }
+func Benchmark_throughput_Pipe_2Conns(b *testing.B) { benchmarkPipe(b, 2) }
+func Benchmark_throughput_Pipe_4Conns(b *testing.B) { benchmarkPipe(b, 4) }
+func Benchmark_throughput_Pipe_8Conns(b *testing.B) { benchmarkPipe(b, 8) }
+
+func Benchmark_throughput_Flow_1VIF_1VC_1Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 1, 1)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_2Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 1, 2)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_8Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 1, 8)
+}
+
+func Benchmark_throughput_Flow_1VIF_2VC_2Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 2, 1)
+}
+func Benchmark_throughput_Flow_1VIF_2VC_8Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 2, 4)
+}
+
+func Benchmark_throughput_Flow_2VIF_4VC_8Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 2, 2, 2)
+}
+
+func Benchmark_throughput_TLS_1Conn(b *testing.B)  { benchmarkTLS(b, 1) }
+func Benchmark_throughput_TLS_2Conns(b *testing.B) { benchmarkTLS(b, 2) }
+func Benchmark_throughput_TLS_4Conns(b *testing.B) { benchmarkTLS(b, 4) }
+func Benchmark_throughput_TLS_8Conns(b *testing.B) { benchmarkTLS(b, 8) }
+
+func Benchmark_throughput_Flow_1VIF_1VC_1Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 1, 1)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_2Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 1, 2)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_8Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 1, 8)
+}
+
+func Benchmark_throughput_Flow_1VIF_2VC_2Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 2, 1)
+}
+func Benchmark_throughput_Flow_1VIF_2VC_8Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 2, 4)
+}
+
+func Benchmark_throughput_Flow_2VIF_4VC_8Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 2, 2, 2)
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_tls.go b/runtime/internal/rpc/stream/benchmark/throughput_tls.go
new file mode 100644
index 0000000..db4e96a
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_tls.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"crypto/tls"
+	"io"
+	"net"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+)
+
+func benchmarkTLS(b *testing.B, nConns int) {
+	rchan := make(chan *tls.Conn, nConns)
+	wchan := make(chan *tls.Conn, nConns)
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("net.Listen failed: %v", err)
+		return
+	}
+
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	var tlsConfig tls.Config
+	tlsConfig.InsecureSkipVerify = true
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := tls.Dial("tcp", ln.Addr().String(), &tlsConfig)
+			if err != nil {
+				b.Fatalf("tls.Dial(%q, %q) failed: %v", "tcp", ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+			}
+			server := tls.Server(conn, crypto.ServerTLSConfig())
+			server.Handshake()
+			rchan <- server
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_ws.go b/runtime/internal/rpc/stream/benchmark/throughput_ws.go
new file mode 100644
index 0000000..07babce
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_ws.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"net"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+// benchmarkWS sets up nConns WS connections and measures throughput.
+func benchmarkWS(b *testing.B, nConns int) {
+	rchan := make(chan net.Conn, nConns)
+	wchan := make(chan net.Conn, nConns)
+	ln, err := websocket.Listener("ws", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("websocket.Listener failed: %v", err)
+		return
+	}
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := websocket.Dial("ws", ln.Addr().String(), 0)
+			if err != nil {
+				b.Fatalf("websocket.Dial(%q, %q) failed: %v", "ws", ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+				return
+			}
+			rchan <- conn
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_wsh.go b/runtime/internal/rpc/stream/benchmark/throughput_wsh.go
new file mode 100644
index 0000000..f160184
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_wsh.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"net"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+// benchmarkWS sets up nConns WS connections and measures throughput.
+func benchmarkWSH(b *testing.B, protocol string, nConns int) {
+	rchan := make(chan net.Conn, nConns)
+	wchan := make(chan net.Conn, nConns)
+	ln, err := websocket.HybridListener("wsh", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("websocket.HybridListener failed: %v", err)
+		return
+	}
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			var conn net.Conn
+			var err error
+			switch protocol {
+			case "tcp":
+				conn, err = net.Dial("tcp", ln.Addr().String())
+			case "ws":
+				conn, err = websocket.Dial("ws", ln.Addr().String(), 0)
+			}
+			if err != nil {
+				b.Fatalf("Dial(%q, %q) failed: %v", protocol, ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			if protocol == "tcp" {
+				// Write a dummy byte since wsh waits for magic byte forever.
+				conn.Write([]byte("."))
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+				return
+			}
+			if protocol == "tcp" {
+				// Read a dummy byte.
+				conn.Read(make([]byte, 1))
+			}
+			rchan <- conn
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/crypto/box.go b/runtime/internal/rpc/stream/crypto/box.go
new file mode 100644
index 0000000..eec1bc9
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/box.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"fmt"
+
+	"golang.org/x/crypto/nacl/box"
+
+	"v.io/v23/verror"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/crypto"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errCipherTextTooShort     = reg(".errCipherTextTooShort", "ciphertext too short")
+	errRemotePublicKey        = reg(".errRemotePublicKey", "failed to get remote public key")
+	errMessageAuthFailed      = reg(".errMessageAuthFailed", "message authentication failed")
+	errUnrecognizedCipherText = reg(".errUnrecognizedCipherText", "CipherSuite {3} is not recognized. Must use one that uses Diffie-Hellman as the key exchange algorithm")
+)
+
+type boxcrypter struct {
+	alloc                 *iobuf.Allocator
+	sharedKey             [32]byte
+	sortedPubkeys         []byte
+	writeNonce, readNonce uint64
+}
+
+type BoxKey [32]byte
+
+// BoxKeyExchanger is used to communicate public keys between the two ends of
+// communication.
+type BoxKeyExchanger func(myPublicKey *BoxKey) (theirPublicKey *BoxKey, err error)
+
+// NewBoxCrypter uses Curve25519, XSalsa20 and Poly1305 to encrypt and
+// authenticate messages (as defined in http://nacl.cr.yp.to/box.html).
+// An ephemeral Diffie-Hellman key exchange is performed per invocation
+// of NewBoxCrypter; the data sent has forward security with connection
+// granularity. One round-trip is required before any data can be sent.
+// BoxCrypter does NOT do anything to verify the identity of the peer.
+func NewBoxCrypter(exchange BoxKeyExchanger, pool *iobuf.Pool) (Crypter, error) {
+	pk, sk, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		return nil, err
+	}
+
+	theirPK, err := exchange((*BoxKey)(pk))
+	if err != nil {
+		return nil, err
+	}
+	if theirPK == nil {
+		return nil, verror.New(errRemotePublicKey, nil)
+	}
+
+	ret := &boxcrypter{alloc: iobuf.NewAllocator(pool, 0)}
+
+	box.Precompute(&ret.sharedKey, (*[32]byte)(theirPK), sk)
+	// Distinct messages between the same {sender, receiver} set are required
+	// to have distinct nonces. The server with the lexicographically smaller
+	// public key will be sending messages with 0, 2, 4... and the other will
+	// be using 1, 3, 5...
+	if bytes.Compare(pk[:], theirPK[:]) < 0 {
+		ret.writeNonce, ret.readNonce = 0, 1
+		ret.sortedPubkeys = append(pk[:], theirPK[:]...)
+	} else {
+		ret.writeNonce, ret.readNonce = 1, 0
+		ret.sortedPubkeys = append(theirPK[:], pk[:]...)
+	}
+	return ret, nil
+}
+
+func (c *boxcrypter) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
+	defer src.Release()
+	var nonce [24]byte
+	binary.LittleEndian.PutUint64(nonce[:], c.writeNonce)
+	c.writeNonce += 2
+	ret := c.alloc.Alloc(uint(len(src.Contents) + box.Overhead))
+	ret.Contents = box.SealAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+	return ret, nil
+}
+
+func (c *boxcrypter) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
+	defer src.Release()
+	var nonce [24]byte
+	binary.LittleEndian.PutUint64(nonce[:], c.readNonce)
+	c.readNonce += 2
+	retLen := len(src.Contents) - box.Overhead
+	if retLen < 0 {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errCipherTextTooShort, nil))
+	}
+	ret := c.alloc.Alloc(uint(retLen))
+	var ok bool
+	ret.Contents, ok = box.OpenAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+	if !ok {
+		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errMessageAuthFailed, nil))
+	}
+	return ret, nil
+}
+
+func (c *boxcrypter) ChannelBinding() []byte {
+	return c.sortedPubkeys
+}
+
+func (c *boxcrypter) String() string {
+	return fmt.Sprintf("%#v", *c)
+}
diff --git a/runtime/internal/rpc/stream/crypto/box_cipher.go b/runtime/internal/rpc/stream/crypto/box_cipher.go
new file mode 100644
index 0000000..f28757d
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/box_cipher.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import (
+	"encoding/binary"
+
+	"golang.org/x/crypto/nacl/box"
+	"golang.org/x/crypto/salsa20/salsa"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// cbox implements a ControlCipher using go.crypto/nacl/box.
+type cbox struct {
+	sharedKey [32]byte
+	enc       cboxStream
+	dec       cboxStream
+}
+
+// cboxStream implements one stream of encryption or decryption.
+type cboxStream struct {
+	counter uint64
+	nonce   [24]byte
+	// buffer is a temporary used for in-place crypto.
+	buffer []byte
+}
+
+const (
+	cboxMACSize = box.Overhead
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errMessageTooShort = reg(".errMessageTooShort", "control cipher: message is too short")
+)
+
+func (s *cboxStream) alloc(n int) []byte {
+	if len(s.buffer) < n {
+		s.buffer = make([]byte, n*2)
+	}
+	return s.buffer[:0]
+}
+
+func (s *cboxStream) currentNonce() *[24]byte {
+	return &s.nonce
+}
+
+func (s *cboxStream) advanceNonce() {
+	s.counter++
+	binary.LittleEndian.PutUint64(s.nonce[:], s.counter)
+}
+
+// setupXSalsa20 produces a sub-key and Salsa20 counter given a nonce and key.
+//
+// See, "Extending the Salsa20 nonce," by Daniel J. Bernsten, Department of
+// Computer Science, University of Illinois at Chicago, 2008.
+func setupXSalsa20(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
+	// We use XSalsa20 for encryption so first we need to generate a
+	// key and nonce with HSalsa20.
+	var hNonce [16]byte
+	copy(hNonce[:], nonce[:])
+	salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
+
+	// The final 8 bytes of the original nonce form the new nonce.
+	copy(counter[:], nonce[16:])
+}
+
+// NewControlCipher returns a ControlCipher for RPC versions greater than 6.
+func NewControlCipherRPC6(peersPublicKey, privateKey *BoxKey, isServer bool) ControlCipher {
+	var c cbox
+	box.Precompute(&c.sharedKey, (*[32]byte)(peersPublicKey), (*[32]byte)(privateKey))
+	// The stream is full-duplex, and we want the directions to use different
+	// nonces, so we set bit (1 << 64) in the server-to-client stream, and leave
+	// it cleared in the client-to-server stream.  advanceNone touches only the
+	// first 8 bytes, so this change is permanent for the duration of the
+	// stream.
+	if isServer {
+		c.enc.nonce[8] = 1
+	} else {
+		c.dec.nonce[8] = 1
+	}
+	return &c
+}
+
+// MACSize implements the ControlCipher method.
+func (c *cbox) MACSize() int {
+	return cboxMACSize
+}
+
+// Seal implements the ControlCipher method.
+func (c *cbox) Seal(data []byte) error {
+	n := len(data)
+	if n < cboxMACSize {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errMessageTooShort, nil))
+	}
+	tmp := c.enc.alloc(n)
+	nonce := c.enc.currentNonce()
+	out := box.SealAfterPrecomputation(tmp, data[:n-cboxMACSize], nonce, &c.sharedKey)
+	c.enc.advanceNonce()
+	copy(data, out)
+	return nil
+}
+
+// Open implements the ControlCipher method.
+func (c *cbox) Open(data []byte) bool {
+	n := len(data)
+	if n < cboxMACSize {
+		return false
+	}
+	tmp := c.dec.alloc(n - cboxMACSize)
+	nonce := c.dec.currentNonce()
+	out, ok := box.OpenAfterPrecomputation(tmp, data, nonce, &c.sharedKey)
+	if !ok {
+		return false
+	}
+	c.dec.advanceNonce()
+	copy(data, out)
+	return true
+}
+
+// Encrypt implements the ControlCipher method.
+func (c *cbox) Encrypt(data []byte) {
+	var subKey [32]byte
+	var counter [16]byte
+	nonce := c.enc.currentNonce()
+	setupXSalsa20(&subKey, &counter, nonce, &c.sharedKey)
+	c.enc.advanceNonce()
+	salsa.XORKeyStream(data, data, &counter, &subKey)
+}
+
+// Decrypt implements the ControlCipher method.
+func (c *cbox) Decrypt(data []byte) {
+	var subKey [32]byte
+	var counter [16]byte
+	nonce := c.dec.currentNonce()
+	setupXSalsa20(&subKey, &counter, nonce, &c.sharedKey)
+	c.dec.advanceNonce()
+	salsa.XORKeyStream(data, data, &counter, &subKey)
+}
diff --git a/runtime/internal/rpc/stream/crypto/box_cipher_test.go b/runtime/internal/rpc/stream/crypto/box_cipher_test.go
new file mode 100644
index 0000000..6727f7d
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/box_cipher_test.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto_test
+
+import (
+	"bytes"
+	"crypto/rand"
+	"testing"
+
+	"golang.org/x/crypto/nacl/box"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+)
+
+// Add space for a MAC.
+func newMessage(s string) []byte {
+	b := make([]byte, len(s)+box.Overhead)
+	copy(b, []byte(s))
+	return b
+}
+
+func TestOpenSeal(t *testing.T) {
+	pub1, pvt1, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	pub2, pvt2, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	c1 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub2), (*crypto.BoxKey)(pvt1), true)
+	c2 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub1), (*crypto.BoxKey)(pvt2), false)
+
+	msg1 := newMessage("hello")
+	if err := c1.Seal(msg1); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	msg2 := newMessage("world")
+	if err := c1.Seal(msg2); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	msg3 := newMessage("hello")
+	if err := c1.Seal(msg3); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if bytes.Compare(msg1, msg3) == 0 {
+		t.Errorf("message should differ: %q, %q", msg1, msg3)
+	}
+
+	// Check that the client does not encrypt the same.
+	msg4 := newMessage("hello")
+	if err := c2.Seal(msg4); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if bytes.Compare(msg4, msg1) == 0 {
+		t.Errorf("messages should differ %q vs. %q", msg4, msg1)
+	}
+
+	// Corrupted message should not decrypt.
+	msg1[0] ^= 1
+	if ok := c2.Open(msg1); ok {
+		t.Errorf("expected error")
+	}
+
+	// Fix the message and try again.
+	msg1[0] ^= 1
+	if ok := c2.Open(msg1); !ok {
+		t.Errorf("Open failed")
+	}
+	if bytes.Compare(msg1[:5], []byte("hello")) != 0 {
+		t.Errorf("got %q, expected %q", msg1[:5], "hello")
+	}
+
+	// msg3 should not decrypt.
+	if ok := c2.Open(msg3); ok {
+		t.Errorf("expected error")
+	}
+
+	// Resume.
+	if ok := c2.Open(msg2); !ok {
+		t.Errorf("Open failed")
+	}
+	if bytes.Compare(msg2[:5], []byte("world")) != 0 {
+		t.Errorf("got %q, expected %q", msg2[:5], "world")
+	}
+	if ok := c2.Open(msg3); !ok {
+		t.Errorf("Open failed")
+	}
+	if bytes.Compare(msg3[:5], []byte("hello")) != 0 {
+		t.Errorf("got %q, expected %q", msg3[:5], "hello")
+	}
+}
+
+func TestXORKeyStream(t *testing.T) {
+	pub1, pvt1, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	pub2, pvt2, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	c1 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub2), (*crypto.BoxKey)(pvt1), true)
+	c2 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub1), (*crypto.BoxKey)(pvt2), false)
+
+	msg1 := []byte("hello")
+	msg2 := []byte("world")
+	msg3 := []byte("hello")
+	c1.Encrypt(msg1)
+	c1.Encrypt(msg2)
+	c1.Encrypt(msg3)
+	if bytes.Compare(msg1, msg3) == 0 {
+		t.Errorf("messages should differ: %q, %q", msg1, msg3)
+	}
+
+	c2.Decrypt(msg1)
+	c2.Decrypt(msg2)
+	c2.Decrypt(msg3)
+	s1 := string(msg1)
+	s2 := string(msg2)
+	s3 := string(msg3)
+	if s1 != "hello" {
+		t.Errorf("got %q, expected 'hello'", s1)
+	}
+	if s2 != "world" {
+		t.Errorf("got %q, expected 'world'", s2)
+	}
+	if s3 != "hello" {
+		t.Errorf("got %q, expected 'hello'", s3)
+	}
+}
diff --git a/runtime/internal/rpc/stream/crypto/control_cipher.go b/runtime/internal/rpc/stream/crypto/control_cipher.go
new file mode 100644
index 0000000..ae25daf
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/control_cipher.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+// ControlCipher provides the ciphers and MAC for control channel encryption.
+// Encryption and decryption are performed in place.
+type ControlCipher interface {
+	// MACSize returns the number of bytes in the MAC.
+	MACSize() int
+
+	// Seal replaces the message with an authenticated and encrypted version.
+	// The trailing MACSize bytes of the data are used for the MAC; they are
+	// discarded and overwritten.
+	Seal(data []byte) error
+
+	// Open authenticates and decrypts a box produced by Seal.  The trailing
+	// MACSize bytes are not changed.  Returns true on success.
+	Open(data []byte) bool
+
+	// Encrypt encrypts the data in place.  No MAC is added.
+	Encrypt(data []byte)
+
+	// Decrypt decrypts the data in place.  No MAC is verified.
+	Decrypt(data []byte)
+}
diff --git a/runtime/internal/rpc/stream/crypto/crypto.go b/runtime/internal/rpc/stream/crypto/crypto.go
new file mode 100644
index 0000000..bee1726
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/crypto.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crypto implements encryption and decryption interfaces intended for
+// securing communication over VCs.
+package crypto
+
+import "v.io/x/ref/runtime/internal/lib/iobuf"
+
+type Encrypter interface {
+	// Encrypt encrypts the provided plaintext data and returns the
+	// corresponding ciphertext slice (or nil if an error is returned).
+	//
+	// It always calls Release on plaintext and thus plaintext should not
+	// be used after calling Encrypt.
+	Encrypt(plaintext *iobuf.Slice) (ciphertext *iobuf.Slice, err error)
+}
+
+type Decrypter interface {
+	// Decrypt decrypts the provided ciphertext slice and returns the
+	// corresponding plaintext (or nil if an error is returned).
+	//
+	// It always calls Release on ciphertext and thus ciphertext should not
+	// be used after calling Decrypt.
+	Decrypt(ciphertext *iobuf.Slice) (plaintext *iobuf.Slice, err error)
+}
+
+type Crypter interface {
+	Encrypter
+	Decrypter
+	// ChannelBinding Returns a byte slice that is unique for the the
+	// particular crypter (and the parties between which it is operating).
+	// Having both parties assert out of the band that they are indeed
+	// participating in a connection with that channel binding value is
+	// sufficient to authenticate the data received through the crypter.
+	ChannelBinding() []byte
+	String() string
+}
diff --git a/runtime/internal/rpc/stream/crypto/crypto_test.go b/runtime/internal/rpc/stream/crypto/crypto_test.go
new file mode 100644
index 0000000..fe83db7
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/crypto_test.go
@@ -0,0 +1,270 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import (
+	"bytes"
+	"crypto/rand"
+	"net"
+	"testing"
+	"testing/quick"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+)
+
+func quickTest(t *testing.T, e Encrypter, d Decrypter) {
+	f := func(plaintext []byte) bool {
+		plainslice := iobuf.NewSlice(plaintext)
+		cipherslice, err := e.Encrypt(plainslice)
+		if err != nil {
+			t.Error(err)
+			return false
+		}
+		plainslice, err = d.Decrypt(cipherslice)
+		if err != nil {
+			t.Error(err)
+			return false
+		}
+		defer plainslice.Release()
+		return bytes.Equal(plainslice.Contents, plaintext)
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestNull(t *testing.T) {
+	crypter := NewNullCrypter()
+	quickTest(t, crypter, crypter)
+	crypter.String() // Only to test that String does not crash.
+}
+
+func testSimple(t *testing.T, c1, c2 Crypter) {
+	// Execute String just to check that it does not crash.
+	c1.String()
+	c2.String()
+	if t.Failed() {
+		return
+	}
+	quickTest(t, c1, c2)
+	quickTest(t, c2, c1)
+
+	// Log the byte overhead of encryption, just so that test output has a
+	// record.
+	var overhead [10]int
+	for i := 0; i < len(overhead); i++ {
+		size := 1 << uint(i)
+		slice, err := c1.Encrypt(iobuf.NewSlice(make([]byte, size)))
+		overhead[i] = slice.Size() - size
+		slice.Release()
+		if err != nil {
+			t.Fatalf("%d: %v", i, err)
+		}
+	}
+	t.Logf("Byte overhead of encryption: %v", overhead)
+}
+
+func TestTLS(t *testing.T) {
+	server, client := net.Pipe()
+	c1, c2 := tlsCrypters(t, server, client)
+	testSimple(t, c1, c2)
+}
+
+func TestBox(t *testing.T) {
+	c1, c2 := boxCrypters(t, nil, nil)
+	testSimple(t, c1, c2)
+}
+
+// testChannelBinding attempts to ensure that:
+// (a) ChannelBinding returns the same value for both ends of a Crypter
+// (b) ChannelBindings are unique
+// For (b), we simply test many times and check that no two instances have the same ChannelBinding value.
+// Yes, this test isn't exhaustive. If you have ideas, please share.
+func testChannelBinding(t *testing.T, factory func(testing.TB, net.Conn, net.Conn) (Crypter, Crypter)) {
+	values := make([][]byte, 100)
+	for i := 0; i < len(values); i++ {
+		conn1, conn2 := net.Pipe()
+		c1, c2 := factory(t, conn1, conn2)
+		if !bytes.Equal(c1.ChannelBinding(), c2.ChannelBinding()) {
+			t.Fatalf("Two ends of the crypter ended up with different channel bindings (iteration #%d)", i)
+		}
+		values[i] = c1.ChannelBinding()
+	}
+	for i := 0; i < len(values); i++ {
+		for j := i + 1; j < len(values); j++ {
+			if bytes.Equal(values[i], values[j]) {
+				t.Fatalf("Same ChannelBinding seen on multiple channels (%d and %d)", i, j)
+			}
+		}
+	}
+}
+
+func TestChannelBindingTLS(t *testing.T) { testChannelBinding(t, tlsCrypters) }
+func TestChannelBindingBox(t *testing.T) { testChannelBinding(t, boxCrypters) }
+
+func TestTLSNil(t *testing.T) {
+	conn1, conn2 := net.Pipe()
+	c1, c2 := tlsCrypters(t, conn1, conn2)
+	if t.Failed() {
+		return
+	}
+	cipher, err := c1.Encrypt(iobuf.NewSlice(nil))
+	if err != nil {
+		t.Fatal(err)
+	}
+	plain, err := c2.Decrypt(cipher)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if plain.Size() != 0 {
+		t.Fatalf("Decryption produced non-empty data (%d)", plain.Size())
+	}
+}
+
+func TestTLSFragmentedPlaintext(t *testing.T) {
+	// Form RFC 5246, Section 6.2.1, the maximum length of a TLS record is
+	// 16K (it is represented by a uint16).
+	// http://tools.ietf.org/html/rfc5246#section-6.2.1
+	const dataLen = 16384 + 1
+	conn1, conn2 := net.Pipe()
+	enc, dec := tlsCrypters(t, conn1, conn2)
+	cipher, err := enc.Encrypt(iobuf.NewSlice(make([]byte, dataLen)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	plain, err := dec.Decrypt(cipher)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(plain.Contents, make([]byte, dataLen)) {
+		t.Errorf("Got %d bytes, want %d bytes of zeroes", plain.Size(), dataLen)
+	}
+}
+
+type factory func(t testing.TB, server, client net.Conn) (Crypter, Crypter)
+
+func tlsCrypters(t testing.TB, serverConn, clientConn net.Conn) (Crypter, Crypter) {
+	crypters := make(chan Crypter)
+	go func() {
+		server, err := NewTLSServer(serverConn, serverConn.LocalAddr(), serverConn.RemoteAddr(), iobuf.NewPool(0))
+		if err != nil {
+			t.Fatal(err)
+		}
+		crypters <- server
+	}()
+
+	go func() {
+		client, err := NewTLSClient(clientConn, clientConn.LocalAddr(), clientConn.RemoteAddr(), TLSClientSessionCache{}, iobuf.NewPool(0))
+		if err != nil {
+			t.Fatal(err)
+		}
+		crypters <- client
+	}()
+	c1 := <-crypters
+	c2 := <-crypters
+	return c1, c2
+}
+
+func boxCrypters(t testing.TB, _, _ net.Conn) (Crypter, Crypter) {
+	server, client := make(chan *BoxKey, 1), make(chan *BoxKey, 1)
+	clientExchanger := func(pubKey *BoxKey) (*BoxKey, error) {
+		client <- pubKey
+		return <-server, nil
+	}
+	serverExchanger := func(pubKey *BoxKey) (*BoxKey, error) {
+		server <- pubKey
+		return <-client, nil
+	}
+	crypters := make(chan Crypter)
+	for _, ex := range []BoxKeyExchanger{clientExchanger, serverExchanger} {
+		go func(exchanger BoxKeyExchanger) {
+			crypter, err := NewBoxCrypter(exchanger, iobuf.NewPool(0))
+			if err != nil {
+				t.Fatal(err)
+			}
+			crypters <- crypter
+		}(ex)
+	}
+	return <-crypters, <-crypters
+}
+
+func benchmarkEncrypt(b *testing.B, crypters factory, size int) {
+	plaintext := make([]byte, size)
+	if _, err := rand.Read(plaintext); err != nil {
+		b.Fatal(err)
+	}
+	conn1, conn2 := net.Pipe()
+	defer conn1.Close()
+	defer conn2.Close()
+	e, _ := crypters(b, conn1, conn2)
+	b.SetBytes(int64(size))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		cipher, err := e.Encrypt(iobuf.NewSlice(plaintext))
+		if err != nil {
+			b.Fatal(err)
+		}
+		cipher.Release()
+	}
+}
+
+func BenchmarkTLSEncrypt_1B(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 1) }
+func BenchmarkTLSEncrypt_1K(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 1<<10) }
+func BenchmarkTLSEncrypt_10K(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 10<<10) }
+func BenchmarkTLSEncrypt_1M(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 1<<20) }
+func BenchmarkTLSEncrypt_5M(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 5<<20) }
+
+func BenchmarkBoxEncrypt_1B(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 1) }
+func BenchmarkBoxEncrypt_1K(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 1<<10) }
+func BenchmarkBoxEncrypt_10K(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 10<<10) }
+func BenchmarkBoxEncrypt_1M(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 1<<20) }
+func BenchmarkBoxEncrypt_5M(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 5<<20) }
+
+func benchmarkRoundTrip(b *testing.B, crypters factory, size int) {
+	plaintext := make([]byte, size)
+	if _, err := rand.Read(plaintext); err != nil {
+		b.Fatal(err)
+	}
+	conn1, conn2 := net.Pipe()
+	defer conn1.Close()
+	defer conn2.Close()
+	e, d := crypters(b, conn1, conn2)
+	b.SetBytes(int64(size))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		cipherslice, err := e.Encrypt(iobuf.NewSlice(plaintext))
+		if err != nil {
+			b.Fatal(err)
+		}
+		plainslice, err := d.Decrypt(cipherslice)
+		if err != nil {
+			b.Fatal(err)
+		}
+		plainslice.Release()
+	}
+}
+func BenchmarkTLSRoundTrip_1B(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 1) }
+func BenchmarkTLSRoundTrip_1K(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 1<<10) }
+func BenchmarkTLSRoundTrip_10K(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 10<<10) }
+func BenchmarkTLSRoundTrip_1M(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 1<<20) }
+func BenchmarkTLSRoundTrip_5M(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 5<<20) }
+
+func BenchmarkBoxRoundTrip_1B(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 1) }
+func BenchmarkBoxRoundTrip_1K(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 1<<10) }
+func BenchmarkBoxRoundTrip_10K(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 10<<10) }
+func BenchmarkBoxRoundTrip_1M(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 1<<20) }
+func BenchmarkBoxRoundTrip_5M(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 5<<20) }
+
+func benchmarkSetup(b *testing.B, crypters factory) {
+	for i := 0; i < b.N; i++ {
+		conn1, conn2 := net.Pipe()
+		crypters(b, conn1, conn2)
+		conn1.Close()
+		conn2.Close()
+	}
+}
+
+func BenchmarkTLSSetup(b *testing.B) { benchmarkSetup(b, tlsCrypters) }
+func BenchmarkBoxSetup(b *testing.B) { benchmarkSetup(b, boxCrypters) }
diff --git a/runtime/internal/rpc/stream/crypto/null.go b/runtime/internal/rpc/stream/crypto/null.go
new file mode 100644
index 0000000..036b541
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/null.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import "v.io/x/ref/runtime/internal/lib/iobuf"
+
+// NewNullCrypter returns a Crypter that does no encryption/decryption.
+func NewNullCrypter() Crypter { return null{} }
+
+type null struct{}
+
+func (null) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) { return src, nil }
+func (null) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) { return src, nil }
+func (null) String() string                                 { return "Null" }
+func (null) ChannelBinding() []byte                         { return nil }
diff --git a/runtime/internal/rpc/stream/crypto/null_cipher.go b/runtime/internal/rpc/stream/crypto/null_cipher.go
new file mode 100644
index 0000000..cdfadc5
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/null_cipher.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+// NullControlCipher is a cipher that does nothing.
+type NullControlCipher struct{}
+
+func (NullControlCipher) MACSize() int           { return 0 }
+func (NullControlCipher) Seal(data []byte) error { return nil }
+func (NullControlCipher) Open(data []byte) bool  { return true }
+func (NullControlCipher) Encrypt(data []byte)    {}
+func (NullControlCipher) Decrypt(data []byte)    {}
+
+type disabledControlCipher struct {
+	NullControlCipher
+	macSize int
+}
+
+func (c *disabledControlCipher) MACSize() int { return c.macSize }
+
+// NewDisabledControlCipher returns a cipher that has the correct MACSize, but
+// encryption and decryption are disabled.
+func NewDisabledControlCipher(c ControlCipher) ControlCipher {
+	return &disabledControlCipher{macSize: c.MACSize()}
+}
diff --git a/runtime/internal/rpc/stream/crypto/tls.go b/runtime/internal/rpc/stream/crypto/tls.go
new file mode 100644
index 0000000..db8833c
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/tls.go
@@ -0,0 +1,252 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.4
+
+package crypto
+
+import (
+	"bytes"
+	"crypto/tls"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"time"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errDeadlinesNotSupported = reg(".errDeadlinesNotSupported", "deadlines not supported")
+	errEndOfEncryptedSlice   = reg(".errEndOfEncryptedSlice", "end of encrypted slice")
+)
+
+// TLSClientSessionCacheOpt specifies the ClientSessionCache used to resume TLS sessions.
+// It adapts tls.ClientSessionCache to the v.io/v23/x/ref/profiles/internal/rpc/stream.VCOpt interface.
+type TLSClientSessionCache struct{ tls.ClientSessionCache }
+
+func (TLSClientSessionCache) RPCStreamVCOpt() {}
+
+// NewTLSClient returns a Crypter implementation that uses TLS, assuming
+// handshaker was initiated by a client.
+func NewTLSClient(handshaker io.ReadWriteCloser, local, remote net.Addr, sessionCache TLSClientSessionCache, pool *iobuf.Pool) (Crypter, error) {
+	var config tls.Config
+	// TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+	config.SessionTicketsDisabled = true
+	config.InsecureSkipVerify = true
+	config.ClientSessionCache = sessionCache.ClientSessionCache
+	return newTLSCrypter(handshaker, local, remote, &config, pool, false)
+}
+
+// NewTLSServer returns a Crypter implementation that uses TLS, assuming
+// handshaker was accepted by a server.
+func NewTLSServer(handshaker io.ReadWriteCloser, local, remote net.Addr, pool *iobuf.Pool) (Crypter, error) {
+	return newTLSCrypter(handshaker, local, remote, ServerTLSConfig(), pool, true)
+}
+
+type fakeConn struct {
+	handshakeConn io.ReadWriteCloser
+	out           bytes.Buffer
+	in            []byte
+	laddr, raddr  net.Addr
+}
+
+func (c *fakeConn) Read(b []byte) (n int, err error) {
+	if c.handshakeConn != nil {
+		return c.handshakeConn.Read(b)
+	}
+	if len(c.in) == 0 {
+		return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errEndOfEncryptedSlice, nil)), false, true)
+	}
+	n = copy(b, c.in)
+	c.in = c.in[n:]
+	return
+}
+
+func (c *fakeConn) Write(b []byte) (int, error) {
+	if c.handshakeConn != nil {
+		return c.handshakeConn.Write(b)
+	}
+	return c.out.Write(b)
+}
+
+func (*fakeConn) Close() error           { return nil }
+func (c *fakeConn) LocalAddr() net.Addr  { return c.laddr }
+func (c *fakeConn) RemoteAddr() net.Addr { return c.raddr }
+func (*fakeConn) SetDeadline(t time.Time) error {
+	return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+func (*fakeConn) SetReadDeadline(t time.Time) error {
+	return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+func (*fakeConn) SetWriteDeadline(t time.Time) error {
+	return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+
+// tlsCrypter implements the Crypter interface using crypto/tls.
+//
+// crypto/tls provides a net.Conn, while the Crypter interface operates on
+// iobuf.Slice objects. In order to adapt to the Crypter in stream.ErrNetwork, verrorterface, the
+// strategy is as follows:
+//
+// - netTLSCrypter wraps a net.Conn with an alternative implementation
+//   (fakeConn) for the TLS handshake protocol.
+// - Once the TLS handshake is complete, fakeConn switches to a mode where all
+//   Write calls add to a bytes.Buffer and all Read calls read from a
+//   bytes.Buffer.
+// - Encrypt uses tls.Conn.Write, which in-turn invokes fakeConn.Write and then
+//   it extracts the contents of the underlying bytes.Buffer.
+// - Decrypt adds to the read buffer and then invokes tls.Conn.Read, which
+//   in-turn invokes fakeConn.Read, which reads from that buffer.
+type tlsCrypter struct {
+	mu    sync.Mutex
+	alloc *iobuf.Allocator
+	tls   *tls.Conn
+	fc    *fakeConn
+}
+
+func newTLSCrypter(handshaker io.ReadWriteCloser, local, remote net.Addr, config *tls.Config, pool *iobuf.Pool, server bool) (Crypter, error) {
+	fc := &fakeConn{handshakeConn: handshaker, laddr: local, raddr: remote}
+	var t *tls.Conn
+	if server {
+		t = tls.Server(fc, config)
+	} else {
+		// The TLS handshake protocol ends with a message received by the client.
+		// handshaker should be closed only after the handshake protocol completes.
+		// So, the client closes the handshaker.
+		defer handshaker.Close()
+		t = tls.Client(fc, config)
+	}
+	if err := t.Handshake(); err != nil {
+		return nil, err
+	}
+	// Must have used Diffie-Hellman to exchange keys (so that the key selection
+	// is independent of any TLS certificates used). This helps ensure that
+	// identities exchanged during the vanadium authentication protocol cannot be
+	// stolen and are bound to the session key established during the TLS handshake.
+	switch cs := t.ConnectionState().CipherSuite; cs {
+	case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+	case tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+	case tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+	case tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+	default:
+		t.Close()
+		return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnrecognizedCipherText, nil, fmt.Sprintf("0x%04x", cs)))
+	}
+	fc.handshakeConn = nil
+	return &tlsCrypter{
+		alloc: iobuf.NewAllocator(pool, 0),
+		tls:   t,
+		fc:    fc,
+	}, nil
+}
+
+func (c *tlsCrypter) Encrypt(plaintext *iobuf.Slice) (*iobuf.Slice, error) {
+	defer plaintext.Release()
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	defer c.fc.out.Reset()
+	if _, err := c.tls.Write(plaintext.Contents); err != nil {
+		return nil, err
+	}
+	return c.alloc.Copy(c.fc.out.Bytes()), nil
+}
+
+func (c *tlsCrypter) Decrypt(ciphertext *iobuf.Slice) (*iobuf.Slice, error) {
+	defer ciphertext.Release()
+	if ciphertext.Size() == 0 {
+		return ciphertext, nil
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.fc.in = ciphertext.Contents
+	// Given the cipher suites used, len(plaintext) < len(ciphertext)
+	// (ciphertext includes TLS record headers). Allocating space for
+	// plaintext based on ciphertext.Size should suffice.
+	plaintext := c.alloc.Alloc(uint(ciphertext.Size()))
+	out := plaintext.Contents
+	for {
+		n, err := c.tls.Read(out)
+		if err != nil {
+			if _, exit := err.(*stream.NetError); exit {
+				break
+			}
+			plaintext.Release()
+			return nil, err
+		}
+		out = out[n:]
+	}
+	plaintext.Contents = plaintext.Contents[:plaintext.Size()-len(out)]
+	return plaintext, nil
+}
+
+func (c *tlsCrypter) String() string {
+	state := c.tls.ConnectionState()
+	return fmt.Sprintf("TLS CipherSuite:0x%04x Resumed:%v", state.CipherSuite, state.DidResume)
+}
+
+// ServerTLSConfig returns the tls.Config used by NewTLSServer.
+func ServerTLSConfig() *tls.Config {
+	c, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey))
+	if err != nil {
+		panic(err)
+	}
+	return &tls.Config{
+		// TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+		SessionTicketsDisabled: true,
+		Certificates:           []tls.Certificate{c},
+		InsecureSkipVerify:     true,
+		// RC4_128_SHA is 4-5X faster compared to the other cipher suites.
+		// There are concerns with its security (see http://en.wikipedia.org/wiki/RC4 and
+		// https://www.usenix.org/conference/usenixsecurity13/technical-sessions/paper/alFardan),
+		// so this decision will be revisted.
+		// TODO(ashankar,ataly): Figure out what cipher to use and how to
+		// have a speedy Go implementation of it.
+		CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA},
+	}
+}
+
+func (c *tlsCrypter) ChannelBinding() []byte {
+	return c.tls.ConnectionState().TLSUnique
+}
+
+// PEM-encoded certificates and keys used in the tests.
+// One way to generate them is:
+//   go run $GOROOT/src/pkg/crypto/tls/generate_cert.go  --host=localhost --duration=87600h --ecdsa-curve=P256
+// (This generates a self-signed certificate valid for 10 years)
+// which will create cert.pem and key.pem files.
+const (
+	serverCert = `
+-----BEGIN CERTIFICATE-----
+MIIBbTCCAROgAwIBAgIQMD+Kzawjvhij1B/BmvHxLDAKBggqhkjOPQQDAjASMRAw
+DgYDVQQKEwdBY21lIENvMB4XDTE0MDcxODIzMTYxMloXDTI0MDcxNTIzMTYxMlow
+EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABLiz
+Ajsly1DS8NJF2KE195V83TgidfgGEB7nudscdKWH3+5uQHgCc+2BV/7AGGj3yePR
+ZZLzYD95goJ/a7eet/2jSzBJMA4GA1UdDwEB/wQEAwIAoDATBgNVHSUEDDAKBggr
+BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
+hkjOPQQDAgNIADBFAiAb4tBxggEpnKdxv66TBVFxAUn3EBWX25XlL1G2GF8RkAIh
+AOAwys3mvzM4Td/2kV9QNyQPZ9kLLQr9A9ryB0H3N9Yz
+-----END CERTIFICATE-----
+`
+	serverKey = `
+-----BEGIN ECDSA PRIVATE KEY-----
+MHcCAQEEIPLfwg+SVC2/xUcKq0bI9y2+SDEEdCeGuxuBz22BhAw1oAoGCCqGSM49
+AwEHoUQDQgAEuLMCOyXLUNLw0kXYoTX3lXzdOCJ1+AYQHue52xx0pYff7m5AeAJz
+7YFX/sAYaPfJ49FlkvNgP3mCgn9rt563/Q==
+-----END ECDSA PRIVATE KEY-----
+`
+)
diff --git a/runtime/internal/rpc/stream/doc.go b/runtime/internal/rpc/stream/doc.go
new file mode 100644
index 0000000..509628a
--- /dev/null
+++ b/runtime/internal/rpc/stream/doc.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package stream implements authenticated byte streams to vanadium endpoints.
+//
+// It is split into multiple sub-packages in an attempt to keep the code
+// healthier by limiting the dependencies between objects. Most users should not
+// need to use this package.
+//
+// Package contents and dependencies are as follows:
+//
+//      * manager provides a factory for Manager objects.
+//        It depends on the vif and proxy packages.
+//      * vif implements a VIF type that wraps over a net.Conn and enables the
+//        creation of VC objects over the underlying network connection.
+//        It depends on the id, message and vc packages.
+//      * message implements serialization and deserialization for messages
+//        exchanged over a VIF.
+//        It depends on the id package.
+//      * vc provides types implementing VC and Flow.
+//        It depends on the id and crypto packages.
+//      * crypto provides types to secure communication over VCs.
+//        It does not depend on any other package.
+//      * id defines identifier types used by other packages.
+//        It does not depend on any other package.
+package stream
+
+// A dump of some ideas/thoughts/TODOs arising from the first iteration of this
+// package. Big ticket items like proxying and TLS/authentication are obvious
+// and won't be missed. I just wanted to put some smaller items on record (in
+// no particular order).
+//
+// (1) Garbage collection of VIFs: Create a policy to close the underlying
+// network connection (and shutdown the VIF) when it is "inactive" (i.e., no VCs
+// have existed on it for a while).
+// (2) On the first write of a new flow, counters are stolen from a shared pool
+// (to avoid a round trip of a "create flow" message followed by a "here are
+// your counters" message). Currently, this happens on either end of the flow
+// (on both the remote and local process). This doesn't need to be the case,
+// the end that received the first message of the flow doesn't need to steal
+// on its first write.
+// (3) Should flow control counters be part of the Data message?
+// If so, maybe the flowQ should have a lower priority than that of Data
+// messages? At a higher level I'm thinking of ways to reduce the number
+// of messages sent per flow. Currently, just creating a flow results in
+// two messages - One where the initiator sends counters to the receiver
+// and one where the receiver does the same. The first write does not
+// block on receiving the counters because of the "steal from shared pool on
+// first write" scheme, but still, sounds like too much traffic.
+// (4) As an example of the above, consider the following code:
+//     vc.Connect().Close()
+// This will result in 3 messages. But ideally it should involve 0.
+// (5) Encryption of control messages to protect from network sniffers.
diff --git a/runtime/internal/rpc/stream/error_test.go b/runtime/internal/rpc/stream/error_test.go
new file mode 100644
index 0000000..fa0716f
--- /dev/null
+++ b/runtime/internal/rpc/stream/error_test.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream_test
+
+import (
+	"net"
+	"testing"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+func TestTimeoutError(t *testing.T) {
+	e := verror.Register(".test", verror.NoRetry, "hello{:3}")
+	timeoutErr := stream.NewNetError(verror.New(e, nil, "world"), true, false)
+
+	// TimeoutError implements error & net.Error. We test that it
+	// implements error by assigning timeoutErr to err which is of type error.
+	var err error
+	err = timeoutErr
+
+	neterr, ok := err.(net.Error)
+	if !ok {
+		t.Fatalf("%T not a net.Error", err)
+	}
+
+	if got, want := neterr.Timeout(), true; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	if got, want := neterr.Error(), "hello: world"; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
diff --git a/runtime/internal/rpc/stream/errors.go b/runtime/internal/rpc/stream/errors.go
new file mode 100644
index 0000000..7ba12b2
--- /dev/null
+++ b/runtime/internal/rpc/stream/errors.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream
+
+import (
+	"net"
+
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream"
+
+// The stream family of packages guarantee to return one of the verror codes defined
+// here, their messages are constructed so as to avoid embedding a component/method name
+// and are thus more suitable for inclusion in other verrors.
+// This practiced of omitting {1}{2} is used throughout the stream packages since all
+// of their errors are intended to be used as arguments to higher level errors.
+var (
+	// TODO(cnicolaou): rename ErrSecurity to ErrAuth
+	ErrSecurity      = verror.Register(pkgPath+".errSecurity", verror.NoRetry, "{:3}")
+	ErrNotTrusted    = verror.Register(pkgPath+".errNotTrusted", verror.NoRetry, "{:3}")
+	ErrNetwork       = verror.Register(pkgPath+".errNetwork", verror.NoRetry, "{:3}")
+	ErrDialFailed    = verror.Register(pkgPath+".errDialFailed", verror.NoRetry, "{:3}")
+	ErrResolveFailed = verror.Register(pkgPath+".errResolveFailed", verror.NoRetry, "{:3}")
+	ErrProxy         = verror.Register(pkgPath+".errProxy", verror.NoRetry, "{:3}")
+	ErrBadArg        = verror.Register(pkgPath+".errBadArg", verror.NoRetry, "{:3}")
+	ErrBadState      = verror.Register(pkgPath+".errBadState", verror.NoRetry, "{:3}")
+	ErrAborted       = verror.Register(pkgPath+".errAborted", verror.NoRetry, "{:3}")
+)
+
+// NetError implements net.Error
+type NetError struct {
+	err           error
+	timeout, temp bool
+}
+
+// TODO(cnicolaou): investigate getting rid of the use of net.Error
+// entirely. The rpc code can now test for a specific verror code and it's
+// not clear that the net.Conns we implement in Vanadium will ever be used
+// directly by code that expects them to return a net.Error when they
+// timeout.
+
+// NewNetError returns a new net.Error which will return the
+// supplied error, timeout and temporary parameters when the corresponding
+// methods are invoked.
+func NewNetError(err error, timeout, temporary bool) net.Error {
+	return &NetError{err, timeout, temporary}
+}
+
+func (t NetError) Err() error { return t.err }
+func (t NetError) Error() string {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return t.err.Error()
+}
+func (t NetError) Timeout() bool   { return t.timeout }
+func (t NetError) Temporary() bool { return t.temp }
diff --git a/runtime/internal/rpc/stream/id/id.go b/runtime/internal/rpc/stream/id/id.go
new file mode 100644
index 0000000..fd6a641
--- /dev/null
+++ b/runtime/internal/rpc/stream/id/id.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package id provides types for identifying VCs and Flows over them.
+package id
+
+// VC identifies a VC over a VIF.
+type VC uint32
+
+// Flow identifies a Flow over a VC.
+type Flow uint32
diff --git a/runtime/internal/rpc/stream/manager/error_test.go b/runtime/internal/rpc/stream/manager/error_test.go
new file mode 100644
index 0000000..4c61f49
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/error_test.go
@@ -0,0 +1,140 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package manager_test
+
+import (
+	"net"
+	"testing"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/testing/mocks/mocknet"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestListenErrors(t *testing.T) {
+	server := manager.InternalNew(naming.FixedRoutingID(0x1))
+	pserver := testutil.NewPrincipal("server")
+
+	// principal, no blessings
+	_, _, err := server.Listen("tcp", "127.0.0.1:0", pserver, security.Blessings{}, nil)
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// blessings, no principal
+	_, _, err = server.Listen("tcp", "127.0.0.1:0", nil, pserver.BlessingStore().Default(), nil)
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// bad protocol
+	_, _, err = server.Listen("foo", "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// bad address
+	_, _, err = server.Listen("tcp", "xx.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if verror.ErrorID(err) != stream.ErrNetwork.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// bad address for proxy
+	_, _, err = server.Listen("v23", "127x.0.0.1", pserver, pserver.BlessingStore().Default())
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+}
+
+func acceptLoop(ln stream.Listener) {
+	for {
+		f, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		f.Close()
+	}
+
+}
+func dropDataDialer(network, address string, timeout time.Duration) (net.Conn, error) {
+	matcher := func(read bool, msg message.T) bool {
+		switch msg.(type) {
+		case *message.Setup:
+			return true
+		}
+		return false
+	}
+	opts := mocknet.Opts{
+		Mode:              mocknet.V23CloseAtMessage,
+		V23MessageMatcher: matcher,
+	}
+	return mocknet.DialerWithOpts(opts, network, address, timeout)
+}
+
+func simpleResolver(network, address string) (string, string, error) {
+	return network, address, nil
+}
+
+func TestDialErrors(t *testing.T) {
+	_, shutdown := test.InitForTest()
+	defer shutdown()
+	server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
+	pclient := testutil.NewPrincipal("client")
+	pserver := testutil.NewPrincipal("server")
+
+	// bad protocol
+	ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("x", "127.0.0.1:2"))
+	_, err := client.Dial(ep, pclient)
+	// A bad protocol should result in a Resolve Error.
+	if verror.ErrorID(err) != stream.ErrResolveFailed.ID {
+		t.Errorf("wrong error: %v", err)
+	}
+	t.Log(err)
+
+	// no server
+	ep, _ = inaming.NewEndpoint(naming.FormatEndpoint("tcp", "127.0.0.1:2"))
+	_, err = client.Dial(ep, pclient)
+	if verror.ErrorID(err) != stream.ErrDialFailed.ID {
+		t.Errorf("wrong error: %v", err)
+	}
+	t.Log(err)
+
+	rpc.RegisterProtocol("dropData", dropDataDialer, simpleResolver, net.Listen)
+
+	ln, sep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+
+	cep, err := mocknet.RewriteEndpointProtocol(sep.String(), "dropData")
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Dial(cep, pclient)
+	if verror.ErrorID(err) != stream.ErrNetwork.ID {
+		t.Errorf("wrong error: %v", err)
+	}
+	t.Log(err)
+}
diff --git a/runtime/internal/rpc/stream/manager/listener.go b/runtime/internal/rpc/stream/manager/listener.go
new file mode 100644
index 0000000..387752e
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/listener.go
@@ -0,0 +1,424 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package manager
+
+import (
+	"fmt"
+	"math/rand"
+	"net"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// ProxyAuthenticator is a stream.ListenerOpt that is used when listening via a
+// proxy to authenticate with the proxy.
+type ProxyAuthenticator interface {
+	stream.ListenerOpt
+	// Login returns the Blessings (and the set of Discharges to make them
+	// valid) to send to the proxy. Typically, the proxy uses these to
+	// determine whether it wants to authorize use.
+	Login(proxy stream.Flow) (security.Blessings, []security.Discharge, error)
+}
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errVomEncodeRequest           = reg(".errVomEncodeRequest", "failed to encode request to proxy{:3}")
+	errVomDecodeResponse          = reg(".errVomDecodeRequest", "failed to decoded response from proxy{:3}")
+	errProxyError                 = reg(".errProxyError", "proxy error {:3}")
+	errProxyEndpointError         = reg(".errProxyEndpointError", "proxy returned an invalid endpoint {:3}{:4}")
+	errAlreadyConnected           = reg(".errAlreadyConnected", "already connected to proxy and accepting connections? VIF: {3}, StartAccepting{:_}")
+	errFailedToCreateLivenessFlow = reg(".errFailedToCreateLivenessFlow", "unable to create liveness check flow to proxy{:3}")
+	errAcceptFailed               = reg(".errAcceptFailed", "accept failed{:3}")
+	errFailedToEstablishVC        = reg(".errFailedToEstablishVC", "VC establishment with proxy failed{:_}")
+	errListenerAlreadyClosed      = reg(".errListenerAlreadyClosed", "listener already closed")
+	errRefusedProxyLogin          = reg(".errRefusedProxyLogin", "server did not want to listen via proxy{:_}")
+)
+
+// listener extends stream.Listener with a DebugString method.
+type listener interface {
+	stream.Listener
+	DebugString() string
+}
+
+// netListener implements the listener interface by accepting flows (and VCs)
+// over network connections accepted on an underlying net.Listener.
+type netListener struct {
+	q       *upcqueue.T
+	netLn   net.Listener
+	manager *manager
+	vifs    *vif.Set
+
+	connsMu sync.Mutex
+	conns   map[net.Conn]bool
+
+	netLoop  sync.WaitGroup
+	vifLoops sync.WaitGroup
+}
+
+var _ stream.Listener = (*netListener)(nil)
+
+// proxyListener implements the listener interface by connecting to a remote
+// proxy (typically used to "listen" across network domains).
+type proxyListener struct {
+	q       *upcqueue.T
+	proxyEP naming.Endpoint
+	manager *manager
+	vif     *vif.VIF
+
+	vifLoop sync.WaitGroup
+}
+
+var _ stream.Listener = (*proxyListener)(nil)
+
+func newNetListener(m *manager, netLn net.Listener, principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) listener {
+	ln := &netListener{
+		q:       upcqueue.New(),
+		manager: m,
+		netLn:   netLn,
+		vifs:    vif.NewSet(),
+		conns:   make(map[net.Conn]bool),
+	}
+
+	// Set the default idle timeout for VC. But for "unixfd", we do not set
+	// the idle timeout since we cannot reconnect it.
+	if ln.netLn.Addr().Network() != "unixfd" {
+		opts = append([]stream.ListenerOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
+	}
+
+	ln.netLoop.Add(1)
+	go ln.netAcceptLoop(principal, blessings, opts)
+	return ln
+}
+
+func isTemporaryError(err error) bool {
+	if oErr, ok := err.(*net.OpError); ok && oErr.Temporary() {
+		return true
+	}
+	return false
+}
+
+func isTooManyOpenFiles(err error) bool {
+	if oErr, ok := err.(*net.OpError); ok && oErr.Err == syscall.EMFILE {
+		return true
+	}
+	return false
+}
+
+func (ln *netListener) killConnections(n int) {
+	ln.connsMu.Lock()
+	if n > len(ln.conns) {
+		n = len(ln.conns)
+	}
+	remaining := make([]net.Conn, 0, len(ln.conns))
+	for c := range ln.conns {
+		remaining = append(remaining, c)
+	}
+	removed := remaining[:n]
+	ln.connsMu.Unlock()
+
+	vlog.Infof("Killing %d Conns", n)
+
+	var wg sync.WaitGroup
+	wg.Add(n)
+	for i := 0; i < n; i++ {
+		idx := rand.Intn(len(remaining))
+		conn := remaining[idx]
+		go func(conn net.Conn) {
+			vlog.Infof("Killing connection (%s, %s)", conn.LocalAddr(), conn.RemoteAddr())
+			conn.Close()
+			ln.manager.killedConns.Incr(1)
+			wg.Done()
+		}(conn)
+		remaining[idx], remaining[0] = remaining[0], remaining[idx]
+		remaining = remaining[1:]
+	}
+
+	ln.connsMu.Lock()
+	for _, conn := range removed {
+		delete(ln.conns, conn)
+	}
+	ln.connsMu.Unlock()
+
+	wg.Wait()
+}
+
+func (ln *netListener) netAcceptLoop(principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) {
+	defer ln.netLoop.Done()
+	opts = append([]stream.ListenerOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
+	for {
+		conn, err := ln.netLn.Accept()
+		if isTemporaryError(err) {
+			// Use Info instead of Error to reduce the changes that
+			// the log library will cause the process to abort on
+			// failing to create a new file.
+			vlog.Infof("net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+			for tokill := 1; isTemporaryError(err); tokill *= 2 {
+				if isTooManyOpenFiles(err) {
+					ln.killConnections(tokill)
+				} else {
+					tokill = 1
+				}
+				time.Sleep(10 * time.Millisecond)
+				conn, err = ln.netLn.Accept()
+			}
+		}
+		if err != nil {
+			// TODO(cnicolaou): closeListener in manager.go writes to ln (by calling
+			// ln.Close()) and we read it here in the Infof output, so there is
+			// an unguarded read here that will fail under --race. This will only show
+			// itself if the Infof below is changed to always be printed (which is
+			// how I noticed). The right solution is to lock these datastructures, but
+			// that can wait until a bigger overhaul occurs. For now, we leave this at
+			// VI(1) knowing that it's basically harmless.
+			vlog.VI(1).Infof("Exiting netAcceptLoop: net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+			return
+		}
+		ln.connsMu.Lock()
+		ln.conns[conn] = true
+		ln.connsMu.Unlock()
+
+		vlog.VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
+		go func() {
+			vf, err := vif.InternalNewAcceptedVIF(conn, ln.manager.rid, principal, blessings, nil, ln.deleteVIF, opts...)
+			if err != nil {
+				vlog.Infof("Shutting down conn from %s (local address: %s) as a VIF could not be created: %v", conn.RemoteAddr(), conn.LocalAddr(), err)
+				conn.Close()
+				return
+			}
+			ln.vifs.Insert(vf, conn.RemoteAddr().Network(), conn.RemoteAddr().String())
+			ln.manager.vifs.Insert(vf, conn.RemoteAddr().Network(), conn.RemoteAddr().String())
+
+			ln.vifLoops.Add(1)
+			vifLoop(vf, ln.q, func() {
+				ln.connsMu.Lock()
+				delete(ln.conns, conn)
+				ln.connsMu.Unlock()
+				ln.vifLoops.Done()
+			})
+		}()
+	}
+}
+
+func (ln *netListener) deleteVIF(vf *vif.VIF) {
+	vlog.VI(2).Infof("VIF %v is closed, removing from cache", vf)
+	ln.vifs.Delete(vf)
+	ln.manager.vifs.Delete(vf)
+}
+
+func (ln *netListener) Accept() (stream.Flow, error) {
+	item, err := ln.q.Get(nil)
+	switch {
+	case err == upcqueue.ErrQueueIsClosed:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errListenerAlreadyClosed, nil))
+	case err != nil:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
+	default:
+		return item.(vif.ConnectorAndFlow).Flow, nil
+	}
+}
+
+func (ln *netListener) Close() error {
+	closeNetListener(ln.netLn)
+	ln.netLoop.Wait()
+	for _, vif := range ln.vifs.List() {
+		// NOTE(caprita): We do not actually Close down the vifs, as
+		// that would require knowing when all outstanding requests are
+		// finished.  For now, do not worry about it, since we expect
+		// shut down to immediately precede process exit.
+		vif.StopAccepting()
+	}
+	ln.q.Shutdown()
+	ln.manager.removeListener(ln)
+	ln.vifLoops.Wait()
+	vlog.VI(3).Infof("Closed stream.Listener %s", ln)
+	return nil
+}
+
+func (ln *netListener) String() string {
+	return fmt.Sprintf("%T: (%v, %v)", ln, ln.netLn.Addr().Network(), ln.netLn.Addr())
+}
+
+func (ln *netListener) DebugString() string {
+	ret := []string{
+		fmt.Sprintf("stream.Listener: net.Listener on (%q, %q)", ln.netLn.Addr().Network(), ln.netLn.Addr()),
+	}
+	if vifs := ln.vifs.List(); len(vifs) > 0 {
+		ret = append(ret, fmt.Sprintf("===Accepted VIFs(%d)===", len(vifs)))
+		for ix, vif := range vifs {
+			ret = append(ret, fmt.Sprintf("%4d) %v", ix, vif))
+		}
+	}
+	return strings.Join(ret, "\n")
+}
+
+func newProxyListener(m *manager, proxyEP naming.Endpoint, principal security.Principal, opts []stream.ListenerOpt) (listener, *inaming.Endpoint, error) {
+	ln := &proxyListener{
+		q:       upcqueue.New(),
+		proxyEP: proxyEP,
+		manager: m,
+	}
+	vf, ep, err := ln.connect(principal, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	ln.vif = vf
+	ln.vifLoop.Add(1)
+	go vifLoop(ln.vif, ln.q, func() {
+		ln.vifLoop.Done()
+	})
+	return ln, ep, nil
+}
+
+func (ln *proxyListener) connect(principal security.Principal, opts []stream.ListenerOpt) (*vif.VIF, *inaming.Endpoint, error) {
+	vlog.VI(1).Infof("Connecting to proxy at %v", ln.proxyEP)
+	// Requires dialing a VC to the proxy, need to extract options from ln.opts to do so.
+	var dialOpts []stream.VCOpt
+	var auth ProxyAuthenticator
+	for _, opt := range opts {
+		switch v := opt.(type) {
+		case stream.VCOpt:
+			dialOpts = append(dialOpts, v)
+		case ProxyAuthenticator:
+			auth = v
+		}
+	}
+	// TODO(cnicolaou, ashankar): probably want to set a timeout here. (is
+	// this covered by opts?)
+	// TODO(ashankar): Authorize the proxy server as well (similar to how
+	// clients authorize servers in RPCs).
+	vf, err := ln.manager.FindOrDialVIF(ln.proxyEP, principal, dialOpts...)
+	if err != nil {
+		return nil, nil, err
+	}
+	// Prepend the default idle timeout for VC.
+	opts = append([]stream.ListenerOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
+	if err := vf.StartAccepting(opts...); err != nil {
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errAlreadyConnected, nil, vf, err))
+	}
+	// Proxy protocol: See v.io/x/ref/runtime/internal/rpc/stream/proxy/protocol.vdl
+	//
+	// We don't need idle timeout for this VC, since one flow will be kept alive.
+	vc, err := vf.Dial(ln.proxyEP, principal, dialOpts...)
+	if err != nil {
+		vf.StopAccepting()
+		if verror.ErrorID(err) == verror.ErrAborted.ID {
+			ln.manager.vifs.Delete(vf)
+			return nil, nil, verror.New(stream.ErrAborted, nil, err)
+		}
+		return nil, nil, err
+	}
+	flow, err := vc.Connect()
+	if err != nil {
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateLivenessFlow, nil, err))
+	}
+	var request proxy.Request
+	var response proxy.Response
+	if auth != nil {
+		if request.Blessings, request.Discharges, err = auth.Login(flow); err != nil {
+			vf.StopAccepting()
+			return nil, nil, verror.New(stream.ErrSecurity, nil, verror.New(errRefusedProxyLogin, nil, err))
+		}
+	}
+	enc := vom.NewEncoder(flow)
+	if err := enc.Encode(request); err != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeRequest, nil, err))
+	}
+	dec := vom.NewDecoder(flow)
+	if err := dec.Decode(&response); err != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errVomDecodeResponse, nil, err))
+	}
+	if response.Error != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrProxy, nil, response.Error)
+	}
+	ep, err := inaming.NewEndpoint(response.Endpoint)
+	if err != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrProxy, nil, verror.New(errProxyEndpointError, nil, response.Endpoint, err))
+	}
+	go func(vf *vif.VIF, flow stream.Flow, q *upcqueue.T) {
+		<-flow.Closed()
+		vf.StopAccepting()
+		q.Close()
+	}(vf, flow, ln.q)
+	return vf, ep, nil
+}
+
+func (ln *proxyListener) Accept() (stream.Flow, error) {
+	item, err := ln.q.Get(nil)
+	switch {
+	case err == upcqueue.ErrQueueIsClosed:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errListenerAlreadyClosed, nil))
+	case err != nil:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
+	default:
+		return item.(vif.ConnectorAndFlow).Flow, nil
+	}
+}
+
+func (ln *proxyListener) Close() error {
+	ln.vif.StopAccepting()
+	ln.q.Shutdown()
+	ln.manager.removeListener(ln)
+	ln.vifLoop.Wait()
+	vlog.VI(3).Infof("Closed stream.Listener %s", ln)
+	return nil
+}
+
+func (ln *proxyListener) String() string {
+	return ln.DebugString()
+}
+
+func (ln *proxyListener) DebugString() string {
+	return fmt.Sprintf("stream.Listener: PROXY:%v RoutingID:%v", ln.proxyEP, ln.manager.rid)
+}
+
+func vifLoop(vf *vif.VIF, q *upcqueue.T, cleanup func()) {
+	defer cleanup()
+	for {
+		cAndf, err := vf.Accept()
+		switch {
+		case err != nil:
+			vlog.VI(2).Infof("Shutting down listener on VIF %v: %v", vf, err)
+			return
+		case cAndf.Flow == nil:
+			vlog.VI(1).Infof("New VC %v on VIF %v", cAndf.Connector, vf)
+		default:
+			if err := q.Put(cAndf); err != nil {
+				vlog.VI(1).Infof("Closing new flow on VC %v (VIF %v) as Put failed in vifLoop: %v", cAndf.Connector, vf, err)
+				cAndf.Flow.Close()
+			}
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/manager/manager.go b/runtime/internal/rpc/stream/manager/manager.go
new file mode 100644
index 0000000..a297962
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/manager.go
@@ -0,0 +1,352 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package manager provides an implementation of the Manager interface defined in v.io/x/ref/runtime/internal/rpc/stream.
+package manager
+
+import (
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/lib/stats/counter"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/manager"
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errUnknownNetwork                          = reg(".errUnknownNetwork", "unknown network{:3}")
+	errEndpointParseError                      = reg(".errEndpointParseError", "failed to parse endpoint {3}{:4}")
+	errAlreadyShutdown                         = reg(".errAlreadyShutdown", "already shutdown")
+	errProvidedServerBlessingsWithoutPrincipal = reg(".errServerBlessingsWithoutPrincipal", "blessings provided but with no principal")
+	errNoBlessingNames                         = reg(".errNoBlessingNames", "no blessing names could be extracted for the provided principal")
+)
+
+const (
+	// The default time after which an VIF is closed if no VC is opened.
+	defaultStartTimeout = 3 * time.Second
+	// The default time after which an idle VC is closed.
+	defaultIdleTimeout = 30 * time.Second
+)
+
+// InternalNew creates a new stream.Manager for managing streams where the local
+// process is identified by the provided RoutingID.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNew(rid naming.RoutingID) stream.Manager {
+	statsPrefix := naming.Join("rpc", "stream", "routing-id", rid.String())
+	m := &manager{
+		rid:         rid,
+		vifs:        vif.NewSet(),
+		listeners:   make(map[listener]bool),
+		statsPrefix: statsPrefix,
+		killedConns: stats.NewCounter(naming.Join(statsPrefix, "killed-connections")),
+	}
+	stats.NewStringFunc(naming.Join(m.statsPrefix, "debug"), m.DebugString)
+	return m
+}
+
+type manager struct {
+	rid  naming.RoutingID
+	vifs *vif.Set
+
+	muListeners sync.Mutex
+	listeners   map[listener]bool // GUARDED_BY(muListeners)
+	shutdown    bool              // GUARDED_BY(muListeners)
+
+	statsPrefix string
+	killedConns *counter.Counter
+}
+
+var _ stream.Manager = (*manager)(nil)
+
+type DialTimeout time.Duration
+
+func (DialTimeout) RPCStreamVCOpt() {}
+func (DialTimeout) RPCClientOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+func dial(d rpc.DialerFunc, network, address string, timeout time.Duration) (net.Conn, error) {
+	if d != nil {
+		conn, err := d(network, address, timeout)
+		if err != nil {
+			return nil, verror.New(stream.ErrDialFailed, nil, err)
+		}
+		return conn, nil
+	}
+	return nil, verror.New(stream.ErrDialFailed, nil, verror.New(errUnknownNetwork, nil, network))
+}
+
+func resolve(r rpc.ResolverFunc, network, address string) (string, string, error) {
+	if r != nil {
+		net, addr, err := r(network, address)
+		if err != nil {
+			return "", "", verror.New(stream.ErrResolveFailed, nil, err)
+		}
+		return net, addr, nil
+	}
+	return "", "", verror.New(stream.ErrResolveFailed, nil, verror.New(errUnknownNetwork, nil, network))
+}
+
+// FindOrDialVIF returns the network connection (VIF) to the provided address
+// from the cache in the manager. If not already present in the cache, a new
+// connection will be created using net.Dial.
+func (m *manager) FindOrDialVIF(remote naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (*vif.VIF, error) {
+	// Extract options.
+	var timeout time.Duration
+	for _, o := range opts {
+		switch v := o.(type) {
+		case DialTimeout:
+			timeout = time.Duration(v)
+		}
+	}
+	addr := remote.Addr()
+	d, r, _, _ := rpc.RegisteredProtocol(addr.Network())
+	// (network, address) in the endpoint might not always match up
+	// with the key used in the vifs. For example:
+	// - conn, err := net.Dial("tcp", "www.google.com:80")
+	//   fmt.Println(conn.RemoteAddr()) // Might yield the corresponding IP address
+	// - Similarly, an unspecified IP address (net.IP.IsUnspecified) like "[::]:80"
+	//   might yield "[::1]:80" (loopback interface) in conn.RemoteAddr().
+	// Thus, look for VIFs with the resolved address.
+	network, address, err := resolve(r, addr.Network(), addr.String())
+	if err != nil {
+		return nil, err
+	}
+	vf, unblock := m.vifs.BlockingFind(network, address)
+	if vf != nil {
+		vlog.VI(1).Infof("(%q, %q) resolved to (%q, %q) which exists in the VIF cache.", addr.Network(), addr.String(), network, address)
+		return vf, nil
+	}
+	defer unblock()
+
+	vlog.VI(1).Infof("(%q, %q) not in VIF cache. Dialing", network, address)
+	conn, err := dial(d, network, address, timeout)
+	if err != nil {
+		return nil, err
+	}
+
+	opts = append([]stream.VCOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
+	vf, err = vif.InternalNewDialedVIF(conn, m.rid, principal, nil, m.deleteVIF, opts...)
+	if err != nil {
+		conn.Close()
+		return nil, err
+	}
+	m.vifs.Insert(vf, network, address)
+	return vf, nil
+}
+
+func (m *manager) Dial(remote naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (stream.VC, error) {
+	// If vif.Dial fails because the cached network connection was broken, remove from
+	// the cache and try once more.
+	for retry := true; true; retry = false {
+		vf, err := m.FindOrDialVIF(remote, principal, opts...)
+		if err != nil {
+			return nil, err
+		}
+		opts = append([]stream.VCOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
+		vc, err := vf.Dial(remote, principal, opts...)
+		if !retry || verror.ErrorID(err) != stream.ErrAborted.ID {
+			return vc, err
+		}
+		vf.Close()
+	}
+	return nil, verror.NewErrInternal(nil) // Not reached
+}
+
+func (m *manager) deleteVIF(vf *vif.VIF) {
+	vlog.VI(2).Infof("%p: VIF %v is closed, removing from cache", m, vf)
+	m.vifs.Delete(vf)
+}
+
+func listen(protocol, address string) (net.Listener, error) {
+	if _, _, l, _ := rpc.RegisteredProtocol(protocol); l != nil {
+		ln, err := l(protocol, address)
+		if err != nil {
+			return nil, verror.New(stream.ErrNetwork, nil, err)
+		}
+		return ln, nil
+	}
+	return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnknownNetwork, nil, protocol))
+}
+
+func (m *manager) Listen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, naming.Endpoint, error) {
+	bNames, err := extractBlessingNames(principal, blessings)
+	if err != nil {
+		return nil, nil, err
+	}
+	ln, ep, err := m.internalListen(protocol, address, principal, blessings, opts...)
+	if err != nil {
+		return nil, nil, err
+	}
+	ep.Blessings = bNames
+	return ln, ep, nil
+}
+
+func (m *manager) internalListen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
+	m.muListeners.Lock()
+	if m.shutdown {
+		m.muListeners.Unlock()
+		return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
+	}
+	m.muListeners.Unlock()
+
+	if protocol == inaming.Network {
+		// Act as if listening on the address of a remote proxy.
+		ep, err := inaming.NewEndpoint(address)
+		if err != nil {
+			return nil, nil, verror.New(stream.ErrBadArg, nil, verror.New(errEndpointParseError, nil, address, err))
+		}
+		return m.remoteListen(ep, principal, opts)
+	}
+	netln, err := listen(protocol, address)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	m.muListeners.Lock()
+	if m.shutdown {
+		m.muListeners.Unlock()
+		closeNetListener(netln)
+		return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
+	}
+
+	ln := newNetListener(m, netln, principal, blessings, opts)
+	m.listeners[ln] = true
+	m.muListeners.Unlock()
+	ep := &inaming.Endpoint{
+		Protocol: protocol,
+		Address:  netln.Addr().String(),
+		RID:      m.rid,
+	}
+	return ln, ep, nil
+}
+
+func (m *manager) remoteListen(proxy naming.Endpoint, principal security.Principal, listenerOpts []stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
+	ln, ep, err := newProxyListener(m, proxy, principal, listenerOpts)
+	if err != nil {
+		return nil, nil, err
+	}
+	m.muListeners.Lock()
+	defer m.muListeners.Unlock()
+	if m.shutdown {
+		ln.Close()
+		return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
+	}
+	m.listeners[ln] = true
+	return ln, ep, nil
+}
+
+func (m *manager) ShutdownEndpoint(remote naming.Endpoint) {
+	vifs := m.vifs.List()
+	total := 0
+	for _, vf := range vifs {
+		total += vf.ShutdownVCs(remote)
+	}
+	vlog.VI(1).Infof("ShutdownEndpoint(%q) closed %d VCs", remote, total)
+}
+
+func closeNetListener(ln net.Listener) {
+	addr := ln.Addr()
+	err := ln.Close()
+	vlog.VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
+}
+
+func (m *manager) removeListener(ln listener) {
+	m.muListeners.Lock()
+	delete(m.listeners, ln)
+	m.muListeners.Unlock()
+}
+
+func (m *manager) Shutdown() {
+	stats.Delete(m.statsPrefix)
+	m.muListeners.Lock()
+	if m.shutdown {
+		m.muListeners.Unlock()
+		return
+	}
+	m.shutdown = true
+	var wg sync.WaitGroup
+	wg.Add(len(m.listeners))
+	for ln, _ := range m.listeners {
+		go func(ln stream.Listener) {
+			ln.Close()
+			wg.Done()
+		}(ln)
+	}
+	m.listeners = make(map[listener]bool)
+	m.muListeners.Unlock()
+	wg.Wait()
+
+	vifs := m.vifs.List()
+	for _, vf := range vifs {
+		vf.Close()
+	}
+}
+
+func (m *manager) RoutingID() naming.RoutingID {
+	return m.rid
+}
+
+func (m *manager) DebugString() string {
+	vifs := m.vifs.List()
+
+	m.muListeners.Lock()
+	defer m.muListeners.Unlock()
+
+	l := make([]string, 0)
+	l = append(l, fmt.Sprintf("Manager: RoutingID:%v #VIFs:%d #Listeners:%d Shutdown:%t", m.rid, len(vifs), len(m.listeners), m.shutdown))
+	if len(vifs) > 0 {
+		l = append(l, "============================VIFs================================================")
+		for ix, vif := range vifs {
+			l = append(l, fmt.Sprintf("%4d) %v", ix, vif.DebugString()))
+			l = append(l, "--------------------------------------------------------------------------------")
+		}
+	}
+	if len(m.listeners) > 0 {
+		l = append(l, "=======================================Listeners==================================================")
+		l = append(l, "  (stream listeners, their local network listeners (missing for proxied listeners), and VIFS")
+		for ln, _ := range m.listeners {
+			l = append(l, ln.DebugString())
+		}
+	}
+	return strings.Join(l, "\n")
+}
+
+func extractBlessingNames(p security.Principal, b security.Blessings) ([]string, error) {
+	if !b.IsZero() && p == nil {
+		return nil, verror.New(stream.ErrBadArg, nil, verror.New(errProvidedServerBlessingsWithoutPrincipal, nil))
+	}
+	if p == nil {
+		return nil, nil
+	}
+	var ret []string
+	for b, _ := range p.BlessingsInfo(b) {
+		ret = append(ret, b)
+	}
+	if len(ret) == 0 {
+		return nil, verror.New(stream.ErrBadArg, nil, verror.New(errNoBlessingNames, nil))
+	}
+	return ret, nil
+}
diff --git a/runtime/internal/rpc/stream/manager/manager_test.go b/runtime/internal/rpc/stream/manager/manager_test.go
new file mode 100644
index 0000000..bce2bab
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/manager_test.go
@@ -0,0 +1,969 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package manager
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"syscall"
+	"testing"
+	"time"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/expect"
+	"v.io/x/ref/test/modules"
+	"v.io/x/ref/test/testutil"
+)
+
+func init() {
+	modules.RegisterChild("runServer", "", runServer)
+	modules.RegisterChild("runRLimitedServer", "", runRLimitedServer)
+}
+
+// We write our own TestMain here instead of relying on v23 test generate because
+// we need to set runtime.GOMAXPROCS.
+func TestMain(m *testing.M) {
+	test.Init()
+	// testutil.Init sets GOMAXPROCS to NumCPU.  We want to force
+	// GOMAXPROCS to remain at 1, in order to trigger a particular race
+	// condition that occurs when closing the server; also, using 1 cpu
+	// introduces less variance in the behavior of the test.
+	runtime.GOMAXPROCS(1)
+	if modules.IsModulesChildProcess() {
+		if err := modules.Dispatch(); err != nil {
+			fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+			os.Exit(1)
+		}
+		return
+	}
+	os.Exit(m.Run())
+}
+
+func testSimpleFlow(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	pclient := testutil.NewPrincipal("client")
+	pserver := testutil.NewPrincipal("server")
+
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	data := "the dark knight rises"
+	var clientVC stream.VC
+	var clientF1 stream.Flow
+	go func() {
+		if clientVC, err = client.Dial(ep, pclient); err != nil {
+			t.Errorf("Dial(%q) failed: %v", ep, err)
+			return
+		}
+		if clientF1, err = clientVC.Connect(); err != nil {
+			t.Errorf("Connect() failed: %v", err)
+			return
+		}
+		if err := writeLine(clientF1, data); err != nil {
+			t.Error(err)
+		}
+	}()
+	serverF, err := ln.Accept()
+	if err != nil {
+		t.Fatalf("Accept failed: %v", err)
+	}
+	if got, err := readLine(serverF); got != data || err != nil {
+		t.Errorf("Got (%q, %v), want (%q, nil)", got, err, data)
+	}
+	// By this point, the goroutine has passed the write call (or exited
+	// early) since the read has gotten through.  Check if the goroutine
+	// encountered any errors in creating the VC or flow and abort.
+	if t.Failed() {
+		return
+	}
+	defer clientF1.Close()
+
+	ln.Close()
+
+	// Writes on flows opened before the server listener was closed should
+	// still succeed.
+	data = "the dark knight goes to bed"
+	go func() {
+		if err := writeLine(clientF1, data); err != nil {
+			t.Error(err)
+		}
+	}()
+	if got, err := readLine(serverF); got != data || err != nil {
+		t.Errorf("Got (%q, %v), want (%q, nil)", got, err, data)
+	}
+
+	// Opening a new flow on an existing VC will succeed initially, but
+	// writes on the client end will eventually fail once the server has
+	// stopped listening.
+	//
+	// It will require a round-trip to the server to notice the failure,
+	// hence the client should write enough data to ensure that the Write
+	// call will not return before a round-trip.
+	//
+	// The length of the data is taken to exceed the queue buffer size
+	// (DefaultBytesBufferedPerFlow), the shared counters (MaxSharedBytes)
+	// and the per-flow counters (DefaultBytesBufferedPerFlow) that are
+	// given when the flow gets established.
+	//
+	// TODO(caprita): separate the constants for the queue buffer size and
+	// the default number of counters to avoid confusion.
+	lotsOfData := string(make([]byte, vc.DefaultBytesBufferedPerFlow*2+vc.MaxSharedBytes+1))
+	clientF2, err := clientVC.Connect()
+	if err != nil {
+		t.Fatalf("Connect() failed: %v", err)
+	}
+	defer clientF2.Close()
+	if err := writeLine(clientF2, lotsOfData); err == nil {
+		t.Errorf("Should not be able to Dial or Write after the Listener is closed")
+	}
+	// Opening a new VC should fail fast.
+	if _, err := client.Dial(ep, pclient); err == nil {
+		t.Errorf("Should not be able to Dial after listener is closed")
+	}
+}
+
+func TestSimpleFlow(t *testing.T) {
+	testSimpleFlow(t, "tcp")
+}
+
+func TestSimpleFlowWS(t *testing.T) {
+	testSimpleFlow(t, "ws")
+}
+
+func TestConnectionTimeout(t *testing.T) {
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+	ch := make(chan error)
+	go func() {
+		// 203.0.113.0 is TEST-NET-3 from RFC5737
+		ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("tcp", "203.0.113.10:80"))
+		_, err := client.Dial(ep, testutil.NewPrincipal("client"), DialTimeout(time.Second))
+		ch <- err
+	}()
+
+	select {
+	case err := <-ch:
+		if err == nil {
+			t.Fatalf("expected an error")
+		}
+	case <-time.After(time.Minute):
+		t.Fatalf("timedout")
+	}
+}
+
+func testAuthenticatedByDefault(t *testing.T, protocol string) {
+	var (
+		server = InternalNew(naming.FixedRoutingID(0x55555555))
+		client = InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+		clientPrincipal = testutil.NewPrincipal("client")
+		serverPrincipal = testutil.NewPrincipal("server")
+		clientKey       = clientPrincipal.PublicKey()
+		serverBlessings = serverPrincipal.BlessingStore().Default()
+	)
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", serverPrincipal, serverPrincipal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	// And the server blessing should be in the endpoint.
+	if got, want := ep.BlessingNames(), []string{"server"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Got blessings %v from endpoint, want %v", got, want)
+	}
+
+	errs := make(chan error)
+
+	testAuth := func(tag string, flow stream.Flow, wantServer security.Blessings, wantClientKey security.PublicKey) {
+		// Since the client's blessing is expected to be self-signed we only test
+		// its public key
+		gotServer := flow.RemoteBlessings()
+		gotClientKey := flow.LocalBlessings().PublicKey()
+		if tag == "server" {
+			gotServer = flow.LocalBlessings()
+			gotClientKey = flow.RemoteBlessings().PublicKey()
+		}
+		if !reflect.DeepEqual(gotServer, wantServer) || !reflect.DeepEqual(gotClientKey, wantClientKey) {
+			errs <- fmt.Errorf("%s: Server: Got Blessings %q, want %q. Server: Got Blessings %q, want %q", tag, gotServer, wantServer, gotClientKey, wantClientKey)
+			return
+		}
+		errs <- nil
+	}
+
+	go func() {
+		flow, err := ln.Accept()
+		if err != nil {
+			errs <- err
+			return
+		}
+		defer flow.Close()
+		testAuth("server", flow, serverBlessings, clientKey)
+	}()
+
+	go func() {
+		vc, err := client.Dial(ep, clientPrincipal)
+		if err != nil {
+			errs <- err
+			return
+		}
+		flow, err := vc.Connect()
+		if err != nil {
+			errs <- err
+			return
+		}
+		defer flow.Close()
+		testAuth("client", flow, serverBlessings, clientKey)
+	}()
+
+	if err := <-errs; err != nil {
+		t.Error(err)
+	}
+	if err := <-errs; err != nil {
+		t.Error(err)
+	}
+}
+
+func TestAuthenticatedByDefault(t *testing.T) {
+	testAuthenticatedByDefault(t, "tcp")
+}
+
+func TestAuthenticatedByDefaultWS(t *testing.T) {
+	testAuthenticatedByDefault(t, "ws")
+}
+
+func numListeners(m stream.Manager) int   { return len(m.(*manager).listeners) }
+func debugString(m stream.Manager) string { return m.(*manager).DebugString() }
+func numVIFs(m stream.Manager) int        { return len(m.(*manager).vifs.List()) }
+
+func TestListenEndpoints(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0xcafe))
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	ln1, ep1, err1 := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+	ln2, ep2, err2 := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+	// Since "127.0.0.1:0" was used as the network address, a random port will be
+	// assigned in each case. The endpoint should include that random port.
+	if err1 != nil {
+		t.Error(err1)
+	}
+	if err2 != nil {
+		t.Error(err2)
+	}
+	if ep1.String() == ep2.String() {
+		t.Errorf("Both listeners got the same endpoint: %q", ep1)
+	}
+	if n, expect := numListeners(server), 2; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+	ln1.Close()
+	if n, expect := numListeners(server), 1; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+	ln2.Close()
+	if n, expect := numListeners(server), 0; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+}
+
+func acceptLoop(ln stream.Listener) {
+	for {
+		f, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		f.Close()
+	}
+}
+
+func TestCloseListener(t *testing.T) {
+	testCloseListener(t, "tcp")
+}
+
+func TestCloseListenerWS(t *testing.T) {
+	testCloseListener(t, "ws")
+}
+
+func testCloseListener(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+	pclient := testutil.NewPrincipal("client")
+	pserver := testutil.NewPrincipal("server")
+	blessings := pserver.BlessingStore().Default()
+
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", pserver, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+	client := InternalNew(naming.FixedRoutingID(0xc1e41))
+	if _, err = client.Dial(ep, pclient); err != nil {
+		t.Fatal(err)
+	}
+	ln.Close()
+	client = InternalNew(naming.FixedRoutingID(0xc1e42))
+	if _, err := client.Dial(ep, pclient); err == nil {
+		t.Errorf("client.Dial(%q) should have failed", ep)
+	}
+}
+
+func TestShutdown(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	ln, _, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+	if n, expect := numListeners(server), 1; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+	server.Shutdown()
+	if _, _, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings); err == nil {
+		t.Error("server should have shut down")
+	}
+	if n, expect := numListeners(server), 0; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+}
+
+func TestShutdownEndpoint(t *testing.T) {
+	testShutdownEndpoint(t, "tcp")
+}
+
+func TestShutdownEndpointWS(t *testing.T) {
+	testShutdownEndpoint(t, "ws")
+}
+
+func testShutdownEndpoint(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+
+	vc, err := client.Dial(ep, testutil.NewPrincipal("client"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if f, err := vc.Connect(); f == nil || err != nil {
+		t.Errorf("vc.Connect failed: (%v, %v)", f, err)
+	}
+	client.ShutdownEndpoint(ep)
+	if f, err := vc.Connect(); f != nil || err == nil {
+		t.Errorf("vc.Connect unexpectedly succeeded: (%v, %v)", f, err)
+	}
+}
+
+func TestStartTimeout(t *testing.T) {
+	const (
+		startTime = 5 * time.Millisecond
+	)
+
+	var (
+		server  = InternalNew(naming.FixedRoutingID(0x55555555))
+		pserver = testutil.NewPrincipal("server")
+		lopts   = []stream.ListenerOpt{vc.StartTimeout{startTime}}
+	)
+
+	// Pause the start timers.
+	triggerTimers := vif.SetFakeTimers()
+
+	ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	go func() {
+		for {
+			_, err := ln.Accept()
+			if err != nil {
+				return
+			}
+		}
+	}()
+
+	_, err = net.Dial(ep.Addr().Network(), ep.Addr().String())
+	if err != nil {
+		t.Fatalf("net.Dial failed: %v", err)
+	}
+
+	// Trigger the start timers.
+	triggerTimers()
+
+	// No VC is opened. The VIF should be closed after start timeout.
+	for range time.Tick(startTime) {
+		if numVIFs(server) == 0 {
+			break
+		}
+	}
+}
+
+func testIdleTimeout(t *testing.T, testServer bool) {
+	const (
+		idleTime = 10 * time.Millisecond
+		// We use a long wait time here since it takes some time to handle VC close
+		// especially in race testing.
+		waitTime = 150 * time.Millisecond
+	)
+
+	var (
+		server  = InternalNew(naming.FixedRoutingID(0x55555555))
+		client  = InternalNew(naming.FixedRoutingID(0xcccccccc))
+		pclient = testutil.NewPrincipal("client")
+		pserver = testutil.NewPrincipal("server")
+
+		opts  []stream.VCOpt
+		lopts []stream.ListenerOpt
+	)
+	if testServer {
+		lopts = []stream.ListenerOpt{vc.IdleTimeout{idleTime}}
+	} else {
+		opts = []stream.VCOpt{vc.IdleTimeout{idleTime}}
+	}
+
+	// Pause the idle timers.
+	triggerTimers := vif.SetFakeTimers()
+
+	ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	go func() {
+		for {
+			_, err := ln.Accept()
+			if err != nil {
+				return
+			}
+		}
+	}()
+
+	vc, err := client.Dial(ep, pclient, opts...)
+	if err != nil {
+		t.Fatalf("client.Dial(%q) failed: %v", ep, err)
+	}
+	f, err := vc.Connect()
+	if f == nil || err != nil {
+		t.Fatalf("vc.Connect failed: (%v, %v)", f, err)
+	}
+
+	// Trigger the idle timers.
+	triggerTimers()
+
+	// One active flow. The VIF should be kept open.
+	time.Sleep(waitTime)
+	if n := numVIFs(client); n != 1 {
+		t.Errorf("Client has %d VIFs; want 1\n%v", n, debugString(client))
+	}
+	if n := numVIFs(server); n != 1 {
+		t.Errorf("Server has %d VIFs; want 1\n%v", n, debugString(server))
+	}
+
+	f.Close()
+
+	// The flow has been closed. The VIF should be closed after idle timeout.
+	for range time.Tick(idleTime) {
+		if numVIFs(client) == 0 && numVIFs(server) == 0 {
+			break
+		}
+	}
+}
+
+func TestIdleTimeout(t *testing.T)       { testIdleTimeout(t, false) }
+func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
+
+/* TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+func TestSessionTicketCache(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	_, ep, err := server.Listen("tcp", "127.0.0.1:0", testutil.NewPrincipal("server"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	if _, err = client.Dial(ep, testutil.NewPrincipal("TestSessionTicketCacheClient")); err != nil {
+		t.Fatalf("Dial(%q) failed: %v", ep, err)
+	}
+
+	if _, ok := client.(*manager).sessionCache.Get(ep.String()); !ok {
+		t.Fatalf("SessionTicket from TLS handshake not cached")
+	}
+}
+*/
+
+func testMultipleVCs(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	const nVCs = 2
+	const data = "bugs bunny"
+
+	// Have the server read from each flow and write to rchan.
+	rchan := make(chan string)
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	read := func(flow stream.Flow, c chan string) {
+		var buf bytes.Buffer
+		var tmp [1024]byte
+		for {
+			n, err := flow.Read(tmp[:])
+			buf.Write(tmp[:n])
+			if err == io.EOF {
+				c <- buf.String()
+				return
+			}
+			if err != nil {
+				t.Error(err)
+				return
+			}
+		}
+	}
+	go func() {
+		for i := 0; i < nVCs; i++ {
+			flow, err := ln.Accept()
+			if err != nil {
+				t.Error(err)
+				rchan <- ""
+				continue
+			}
+			go read(flow, rchan)
+		}
+	}()
+
+	// Have the client establish nVCs and a flow on each.
+	var vcs [nVCs]stream.VC
+	for i := 0; i < nVCs; i++ {
+		var err error
+		vcs[i], err = client.Dial(ep, testutil.NewPrincipal("client"))
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+	write := func(vc stream.VC) {
+		if err != nil {
+			ln.Close()
+			t.Error(err)
+			return
+		}
+		flow, err := vc.Connect()
+		if err != nil {
+			ln.Close()
+			t.Error(err)
+			return
+		}
+		defer flow.Close()
+		if _, err := flow.Write([]byte(data)); err != nil {
+			ln.Close()
+			t.Error(err)
+			return
+		}
+	}
+	for _, vc := range vcs {
+		go write(vc)
+	}
+	for i := 0; i < nVCs; i++ {
+		if got := <-rchan; got != data {
+			t.Errorf("Got %q want %q", got, data)
+		}
+	}
+}
+
+func TestMultipleVCs(t *testing.T) {
+	testMultipleVCs(t, "tcp")
+}
+
+func TestMultipleVCsWS(t *testing.T) {
+	testMultipleVCs(t, "ws")
+}
+
+func TestAddressResolution(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	// Using "tcp4" instead of "tcp" because the latter can end up with IPv6
+	// addresses and our Google Compute Engine integration test machines cannot
+	// resolve IPv6 addresses.
+	// As of April 2014, https://developers.google.com/compute/docs/networking
+	// said that IPv6 is not yet supported.
+	ln, ep, err := server.Listen("tcp4", "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	go acceptLoop(ln)
+
+	// We'd like an endpoint that contains an address that's different than the
+	// one used for the connection. In practice this is awkward to achieve since
+	// we don't want to listen on ":0" since that will annoy firewalls. Instead we
+	// create a endpoint with "localhost", which will result in an endpoint that
+	// doesn't contain 127.0.0.1.
+	_, port, _ := net.SplitHostPort(ep.Addr().String())
+	nep := &inaming.Endpoint{
+		Protocol: ep.Addr().Network(),
+		Address:  net.JoinHostPort("localhost", port),
+		RID:      ep.RoutingID(),
+	}
+
+	// Dial multiple VCs
+	for i := 0; i < 2; i++ {
+		if _, err = client.Dial(nep, testutil.NewPrincipal("client")); err != nil {
+			t.Fatalf("Dial #%d failed: %v", i, err)
+		}
+	}
+	// They should all be on the same VIF.
+	if n := numVIFs(client); n != 1 {
+		t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
+	}
+	// TODO(ashankar): While a VIF can be re-used to Dial from the server
+	// to the client, currently there is no way to have the client "listen"
+	// on the same VIF. It can listen on a VC for new flows, but it cannot
+	// listen on an established VIF for new VCs. Figure this out?
+}
+
+func TestServerRestartDuringClientLifetime(t *testing.T) {
+	testServerRestartDuringClientLifetime(t, "tcp")
+}
+
+func TestServerRestartDuringClientLifetimeWS(t *testing.T) {
+	testServerRestartDuringClientLifetime(t, "ws")
+}
+
+func testServerRestartDuringClientLifetime(t *testing.T, protocol string) {
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	pclient := testutil.NewPrincipal("client")
+	sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	defer sh.Cleanup(nil, nil)
+	h, err := sh.Start("runServer", nil, protocol, "127.0.0.1:0")
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	epstr := expect.NewSession(t, h.Stdout(), time.Minute).ExpectVar("ENDPOINT")
+	ep, err := inaming.NewEndpoint(epstr)
+	if err != nil {
+		t.Fatalf("inaming.NewEndpoint(%q): %v", epstr, err)
+	}
+	if _, err := client.Dial(ep, pclient); err != nil {
+		t.Fatal(err)
+	}
+	h.Shutdown(nil, os.Stderr)
+
+	// A new VC cannot be created since the server is dead
+	if _, err := client.Dial(ep, pclient); err == nil {
+		t.Fatal("Expected client.Dial to fail since server is dead")
+	}
+
+	h, err = sh.Start("runServer", nil, protocol, ep.Addr().String())
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	// Restarting the server, listening on the same address as before
+	ep2, err := inaming.NewEndpoint(expect.NewSession(t, h.Stdout(), time.Minute).ExpectVar("ENDPOINT"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := ep.Addr().String(), ep2.Addr().String(); got != want {
+		t.Fatalf("Got %q, want %q", got, want)
+	}
+	if _, err := client.Dial(ep2, pclient); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func runServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	principal := testutil.NewPrincipal("test")
+	_, ep, err := server.Listen(args[0], args[1], principal, principal.BlessingStore().Default())
+	if err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	fmt.Fprintf(stdout, "ENDPOINT=%v\n", ep)
+	// Live forever (till the process is explicitly killed)
+	modules.WaitForEOF(stdin)
+	return nil
+}
+
+func runRLimitedServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	var rlimit syscall.Rlimit
+	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	rlimit.Cur = 9
+	if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	fmt.Fprintf(stdout, "RLIMIT_NOFILE=%d\n", rlimit.Cur)
+	return runServer(stdin, stdout, stderr, env, args...)
+}
+
+func readLine(f stream.Flow) (string, error) {
+	var result bytes.Buffer
+	var buf [5]byte
+	for {
+		n, err := f.Read(buf[:])
+		result.Write(buf[:n])
+		if err == io.EOF || buf[n-1] == '\n' {
+			return strings.TrimRight(result.String(), "\n"), nil
+		}
+		if err != nil {
+			return "", fmt.Errorf("Read returned (%d, %v)", n, err)
+		}
+	}
+}
+
+func writeLine(f stream.Flow, data string) error {
+	data = data + "\n"
+	vlog.VI(1).Infof("write sending %d bytes", len(data))
+	if n, err := f.Write([]byte(data)); err != nil {
+		return fmt.Errorf("Write returned (%d, %v)", n, err)
+	}
+	return nil
+}
+
+func TestRegistration(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("server")
+	blessings := principal.BlessingStore().Default()
+
+	dialer := func(_, _ string, _ time.Duration) (net.Conn, error) {
+		return nil, fmt.Errorf("tn.Dial")
+	}
+	resolver := func(_, _ string) (string, string, error) {
+		return "", "", fmt.Errorf("tn.Resolve")
+	}
+	listener := func(_, _ string) (net.Listener, error) {
+		return nil, fmt.Errorf("tn.Listen")
+	}
+	rpc.RegisterProtocol("tn", dialer, resolver, listener)
+
+	_, _, err := server.Listen("tnx", "127.0.0.1:0", principal, blessings)
+	if err == nil || !strings.Contains(err.Error(), "unknown network: tnx") {
+		t.Fatalf("expected error is missing (%v)", err)
+	}
+
+	_, _, err = server.Listen("tn", "127.0.0.1:0", principal, blessings)
+	if err == nil || !strings.Contains(err.Error(), "tn.Listen") {
+		t.Fatalf("expected error is missing (%v)", err)
+	}
+
+	// Need a functional listener to test Dial.
+	listener = func(_, addr string) (net.Listener, error) {
+		return net.Listen("tcp", addr)
+	}
+
+	if got, want := rpc.RegisterProtocol("tn", dialer, resolver, listener), true; got != want {
+		t.Errorf("got %t, want %t", got, want)
+	}
+
+	_, ep, err := server.Listen("tn", "127.0.0.1:0", principal, blessings)
+	if err != nil {
+		t.Errorf("unexpected error %s", err)
+	}
+
+	_, err = client.Dial(ep, testutil.NewPrincipal("client"))
+	if err == nil || !strings.Contains(err.Error(), "tn.Resolve") {
+		t.Fatalf("expected error is missing (%v)", err)
+	}
+}
+
+func TestBlessingNamesInEndpoint(t *testing.T) {
+	var (
+		p    = testutil.NewPrincipal("default")
+		b, _ = p.BlessSelf("dev.v.io/users/foo@bar.com/devices/desktop/app/myapp")
+
+		server = InternalNew(naming.FixedRoutingID(0x1))
+
+		tests = []struct {
+			principal     security.Principal
+			blessings     security.Blessings
+			blessingNames []string
+			err           bool
+		}{
+			{
+				// provided blessings should match returned output.
+				principal:     p,
+				blessings:     b,
+				blessingNames: []string{"dev.v.io/users/foo@bar.com/devices/desktop/app/myapp"},
+			},
+			{
+				// It is an error to provide a principal without providing blessings.
+				principal: p,
+				blessings: security.Blessings{},
+				err:       true,
+			},
+			{
+				// It is an error to provide inconsistent blessings and principal
+				principal: testutil.NewPrincipal("random"),
+				blessings: b,
+				err:       true,
+			},
+		}
+	)
+	// p must recognize its own blessings!
+	p.AddToRoots(b)
+	for idx, test := range tests {
+		ln, ep, err := server.Listen("tcp", "127.0.0.1:0", test.principal, test.blessings)
+		if (err != nil) != test.err {
+			t.Errorf("test #%d: Got error %v, wanted error: %v", idx, err, test.err)
+		}
+		if err != nil {
+			continue
+		}
+		ln.Close()
+		got, want := ep.BlessingNames(), test.blessingNames
+		sort.Strings(got)
+		sort.Strings(want)
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("test #%d: Got %v, want %v", idx, got, want)
+		}
+	}
+}
+
+func TestVIFCleanupWhenFDLimitIsReached(t *testing.T) {
+	sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer sh.Cleanup(nil, nil)
+	h, err := sh.Start("runRLimitedServer", nil, "--logtostderr=true", "tcp", "127.0.0.1:0")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.CloseStdin()
+	stdout := expect.NewSession(t, h.Stdout(), time.Minute)
+	nfiles, err := strconv.Atoi(stdout.ExpectVar("RLIMIT_NOFILE"))
+	if stdout.Error() != nil {
+		t.Fatal(stdout.Error())
+	}
+	if err != nil {
+		t.Fatal(err)
+	}
+	epstr := stdout.ExpectVar("ENDPOINT")
+	if stdout.Error() != nil {
+		t.Fatal(stdout.Error())
+	}
+	ep, err := inaming.NewEndpoint(epstr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Different client processes (represented by different stream managers
+	// in this test) should be able to make progress, even if the server
+	// has reached its file descriptor limit.
+	nattempts := 0
+	for i := 0; i < 2*nfiles; i++ {
+		client := InternalNew(naming.FixedRoutingID(uint64(i)))
+		defer client.Shutdown()
+		principal := testutil.NewPrincipal(fmt.Sprintf("client%d", i))
+		connected := false
+		for !connected {
+			nattempts++
+			// If the client connection reached the server when it
+			// was at its limit, it might fail.  However, this
+			// failure will trigger the "kill connections" logic at
+			// the server and eventually the client should succeed.
+			vc, err := client.Dial(ep, principal)
+			if err != nil {
+				continue
+			}
+			// Establish a flow to prevent the VC (and thus the
+			// underlying VIF) from being garbage collected as an
+			// "inactive" connection.
+			flow, err := vc.Connect()
+			if err != nil {
+				continue
+			}
+			defer flow.Close()
+			connected = true
+		}
+	}
+	var stderr bytes.Buffer
+	if err := h.Shutdown(nil, &stderr); err != nil {
+		t.Fatal(err)
+	}
+	if log := expect.NewSession(t, bytes.NewReader(stderr.Bytes()), time.Minute).ExpectSetEventuallyRE("listener.go.*Killing [1-9][0-9]* Conns"); len(log) == 0 {
+		t.Errorf("Failed to find log message talking about killing Conns in:\n%v", stderr.String())
+	}
+	t.Logf("Server FD limit:%d", nfiles)
+	t.Logf("Client connection attempts: %d", nattempts)
+}
+
+func TestConcurrentDials(t *testing.T) {
+	// Concurrent Dials to the same network, address should only result in one VIF.
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	// Using "tcp4" instead of "tcp" because the latter can end up with IPv6
+	// addresses and our Google Compute Engine integration test machines cannot
+	// resolve IPv6 addresses.
+	// As of April 2014, https://developers.google.com/compute/docs/networking
+	// said that IPv6 is not yet supported.
+	ln, ep, err := server.Listen("tcp4", "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	go acceptLoop(ln)
+
+	nep := &inaming.Endpoint{
+		Protocol: ep.Addr().Network(),
+		Address:  ep.Addr().String(),
+		RID:      ep.RoutingID(),
+	}
+
+	// Dial multiple VCs
+	errCh := make(chan error, 10)
+	for i := 0; i < 10; i++ {
+		go func() {
+			_, err = client.Dial(nep, testutil.NewPrincipal("client"))
+			errCh <- err
+		}()
+	}
+	for i := 0; i < 10; i++ {
+		if err = <-errCh; err != nil {
+			t.Fatal(err)
+		}
+	}
+	// They should all be on the same VIF.
+	if n := numVIFs(client); n != 1 {
+		t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
+	}
+}
diff --git a/runtime/internal/rpc/stream/message/coding.go b/runtime/internal/rpc/stream/message/coding.go
new file mode 100644
index 0000000..d8e7c68
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/coding.go
@@ -0,0 +1,213 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"encoding/binary"
+	"io"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/message"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errLargerThan3ByteUint = reg(".errLargerThan3ByteUnit", "integer too large to represent in 3 bytes")
+	errReadWrongNumBytes   = reg(".errReadWrongNumBytes", "read {3} bytes, wanted to read {4}")
+)
+
+func write3ByteUint(dst []byte, n int) error {
+	if n >= (1<<24) || n < 0 {
+		return verror.New(errLargerThan3ByteUint, nil)
+	}
+	dst[0] = byte((n & 0xff0000) >> 16)
+	dst[1] = byte((n & 0x00ff00) >> 8)
+	dst[2] = byte(n & 0x0000ff)
+	return nil
+}
+
+func read3ByteUint(src []byte) int {
+	return int(src[0])<<16 | int(src[1])<<8 | int(src[2])
+}
+
+func write4ByteUint(dst []byte, n uint32) {
+	dst[0] = byte((n & 0xff000000) >> 24)
+	dst[1] = byte((n & 0x00ff0000) >> 16)
+	dst[2] = byte((n & 0x0000ff00) >> 8)
+	dst[3] = byte(n & 0x000000ff)
+}
+
+func read4ByteUint(src []byte) uint32 {
+	return uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+}
+
+func readInt(r io.Reader, ptr interface{}) error {
+	return binary.Read(r, binary.BigEndian, ptr)
+}
+
+func writeInt(w io.Writer, ptr interface{}) error {
+	return binary.Write(w, binary.BigEndian, ptr)
+}
+
+func readString(r io.Reader, s *string) error {
+	var size uint32
+	if err := readInt(r, &size); err != nil {
+		return err
+	}
+	bytes := make([]byte, size)
+	n, err := r.Read(bytes)
+	if err != nil {
+		return err
+	}
+	if n != int(size) {
+		return verror.New(errReadWrongNumBytes, nil, n, int(size))
+	}
+	*s = string(bytes)
+	return nil
+}
+
+func writeString(w io.Writer, s string) error {
+	size := uint32(len(s))
+	if err := writeInt(w, size); err != nil {
+		return err
+	}
+	n, err := w.Write([]byte(s))
+	if err != nil {
+		return err
+	}
+	if n != int(size) {
+		return verror.New(errReadWrongNumBytes, nil, n, int(size))
+	}
+	return nil
+}
+
+// byteReader adapts an io.Reader to an io.ByteReader so that we can
+// use it with encoding/Binary for varint etc.
+type byteReader struct{ io.Reader }
+
+func (b byteReader) ReadByte() (byte, error) {
+	var buf [1]byte
+	n, err := b.Reader.Read(buf[:])
+	switch {
+	case n == 1:
+		return buf[0], err
+	case err != nil:
+		return 0, err
+	default:
+		return 0, verror.New(errReadWrongNumBytes, nil, n, 1)
+	}
+}
+
+func readCounters(r io.Reader) (Counters, error) {
+	var br io.ByteReader
+	var ok bool
+	if br, ok = r.(io.ByteReader); !ok {
+		br = byteReader{r}
+	}
+	size, err := binary.ReadUvarint(br)
+	if err != nil {
+		return nil, err
+	}
+	if size == 0 {
+		return nil, nil
+	}
+	c := Counters(make(map[CounterID]uint32, size))
+	for i := uint64(0); i < size; i++ {
+		vci, err := binary.ReadUvarint(br)
+		if err != nil {
+			return nil, err
+		}
+		fid, err := binary.ReadUvarint(br)
+		if err != nil {
+			return nil, err
+		}
+		bytes, err := binary.ReadUvarint(br)
+		if err != nil {
+			return nil, err
+		}
+		c.Add(id.VC(vci), id.Flow(fid), uint32(bytes))
+	}
+	return c, nil
+}
+
+func writeCounters(w io.Writer, c Counters) (err error) {
+	var vbuf [binary.MaxVarintLen64]byte
+	putUvarint := func(n uint64) {
+		if err == nil {
+			_, err = w.Write(vbuf[:binary.PutUvarint(vbuf[:], n)])
+		}
+	}
+	putUvarint(uint64(len(c)))
+	for cid, bytes := range c {
+		putUvarint(uint64(cid.VCI()))
+		putUvarint(uint64(cid.Flow()))
+		putUvarint(uint64(bytes))
+	}
+	return
+}
+
+func readSetupOptions(r io.Reader) ([]SetupOption, error) {
+	var opts []SetupOption
+	for {
+		var code setupOptionCode
+		switch err := readInt(r, &code); err {
+		case io.EOF:
+			return opts, nil
+		case nil:
+			break
+		default:
+			return nil, err
+		}
+		var size uint16
+		if err := readInt(r, &size); err != nil {
+			return nil, err
+		}
+		l := &io.LimitedReader{R: r, N: int64(size)}
+		switch code {
+		case naclBoxPublicKey:
+			var opt NaclBox
+			if err := opt.read(l); err != nil {
+				return nil, err
+			}
+			opts = append(opts, &opt)
+		}
+		// Consume any data remaining.
+		readAndDiscardToError(l)
+	}
+}
+
+func writeSetupOptions(w io.Writer, options []SetupOption) error {
+	for _, opt := range options {
+		if err := writeInt(w, opt.code()); err != nil {
+			return err
+		}
+		if err := writeInt(w, opt.size()); err != nil {
+			return err
+		}
+		if err := opt.write(w); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func readAndDiscardToError(r io.Reader) {
+	var data [1024]byte
+	for {
+		if _, err := r.Read(data[:]); err != nil {
+			return
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/message/control.go b/runtime/internal/rpc/stream/message/control.go
new file mode 100644
index 0000000..0c33b91
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/control.go
@@ -0,0 +1,367 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"v.io/v23/naming"
+	"v.io/v23/verror"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/version"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errUnrecognizedVCControlMessageCommand = reg(".errUnrecognizedVCControlMessageCommand",
+		"unrecognized VC control message command({3})")
+	errUnrecognizedVCControlMessageType = reg(".errUnrecognizedVCControlMessageType",
+		"unrecognized VC control message type({3})")
+	errFailedToDeserializedVCControlMessage = reg(".errFailedToDeserializedVCControlMessage", "failed to deserialize control message {3}({4}): {5}")
+	errFailedToWriteHeader                  = reg(".errFailedToWriteHeader", "failed to write header. Wrote {3} bytes instead of {4}{:5}")
+)
+
+// Control is the interface implemented by all control messages.
+type Control interface {
+	readFrom(r *bytes.Buffer) error
+	writeTo(w io.Writer) error
+}
+
+// SetupVC is a Control implementation containing information to setup a new
+// virtual circuit.
+type SetupVC struct {
+	VCI            id.VC
+	LocalEndpoint  naming.Endpoint // Endpoint of the sender (as seen by the sender), can be nil.
+	RemoteEndpoint naming.Endpoint // Endpoint of the receiver (as seen by the sender), can be nil.
+	Counters       Counters
+	Setup          Setup // Negotiate versioning and encryption.
+}
+
+// CloseVC is a Control implementation notifying the closure of an established
+// virtual circuit, or failure to establish a virtual circuit.
+//
+// The Error string will be empty in case the close was the result of an
+// explicit close by the application (and not an error).
+type CloseVC struct {
+	VCI   id.VC
+	Error string
+}
+
+// AddReceiveBuffers is a Control implementation used by the sender of the
+// message to inform the other end of a virtual circuit that it is ready to
+// receive more bytes of data (specified per flow).
+type AddReceiveBuffers struct {
+	Counters Counters
+}
+
+// OpenFlow is a Control implementation notifying the senders intent to create
+// a new Flow. It also include the number of bytes the sender of this message
+// is willing to read.
+type OpenFlow struct {
+	VCI             id.VC
+	Flow            id.Flow
+	InitialCounters uint32
+}
+
+// Setup is a control message used to negotiate VIF/VC options.
+type Setup struct {
+	Versions version.Range
+	Options  []SetupOption
+}
+
+// SetupOption is the base interface for optional Setup options.
+type SetupOption interface {
+	// code is the identifier for the option.
+	code() setupOptionCode
+
+	// size returns the number of bytes needed to represent the option.
+	size() uint16
+
+	// write the option to the writer.
+	write(w io.Writer) error
+
+	// read the option from the reader.
+	read(r io.Reader) error
+}
+
+// NaclBox is a SetupOption that specifies the public key for the NaclBox
+// encryption protocol.
+type NaclBox struct {
+	PublicKey crypto.BoxKey
+}
+
+// SetupStream is a byte stream used to negotiate VIF setup.  During VIF setup,
+// each party sends a Setup message to the other party containing their version
+// and options.  If the version requires further negotiation (such as for authentication),
+// the SetupStream is used for the negotiation.
+//
+// The protocol used on the stream is version-specific, it is not specified here.  See
+// vif/auth.go for an example.
+type SetupStream struct {
+	Data []byte
+}
+
+// Setup option codes.
+type setupOptionCode uint16
+
+const (
+	naclBoxPublicKey setupOptionCode = 0
+)
+
+// Command enum.
+type command uint8
+
+const (
+	deprecatedOpenVCCommand  command = 0
+	closeVCCommand           command = 1
+	addReceiveBuffersCommand command = 2
+	openFlowCommand          command = 3
+	hopSetupCommand          command = 4
+	hopSetupStreamCommand    command = 5
+	setupVCCommand           command = 6
+)
+
+func writeControl(w io.Writer, m Control) error {
+	var command command
+	switch m.(type) {
+	case *CloseVC:
+		command = closeVCCommand
+	case *AddReceiveBuffers:
+		command = addReceiveBuffersCommand
+	case *OpenFlow:
+		command = openFlowCommand
+	case *Setup:
+		command = hopSetupCommand
+	case *SetupStream:
+		command = hopSetupStreamCommand
+	case *SetupVC:
+		command = setupVCCommand
+	default:
+		return verror.New(errUnrecognizedVCControlMessageType, nil, fmt.Sprintf("%T", m))
+	}
+	var header [1]byte
+	header[0] = byte(command)
+	if n, err := w.Write(header[:]); n != len(header) || err != nil {
+		return verror.New(errFailedToWriteHeader, nil, n, len(header), err)
+	}
+	if err := m.writeTo(w); err != nil {
+		return err
+	}
+	return nil
+}
+
+func readControl(r *bytes.Buffer) (Control, error) {
+	var header byte
+	var err error
+	if header, err = r.ReadByte(); err != nil {
+		return nil, err
+	}
+	command := command(header)
+	var m Control
+	switch command {
+	case closeVCCommand:
+		m = new(CloseVC)
+	case addReceiveBuffersCommand:
+		m = new(AddReceiveBuffers)
+	case openFlowCommand:
+		m = new(OpenFlow)
+	case hopSetupCommand:
+		m = new(Setup)
+	case hopSetupStreamCommand:
+		m = new(SetupStream)
+	case setupVCCommand:
+		m = new(SetupVC)
+	default:
+		return nil, verror.New(errUnrecognizedVCControlMessageCommand, nil, command)
+	}
+	if err := m.readFrom(r); err != nil {
+		return nil, verror.New(errFailedToDeserializedVCControlMessage, nil, command, fmt.Sprintf("%T", m), err)
+	}
+	return m, nil
+}
+
+func (m *CloseVC) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.VCI); err != nil {
+		return
+	}
+	if err = writeString(w, m.Error); err != nil {
+		return
+	}
+	return
+}
+
+func (m *CloseVC) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.VCI); err != nil {
+		return
+	}
+	if err = readString(r, &m.Error); err != nil {
+		return
+	}
+	return
+}
+
+func (m *SetupVC) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.VCI); err != nil {
+		return
+	}
+	var localep string
+	if m.LocalEndpoint != nil {
+		localep = m.LocalEndpoint.String()
+	}
+	if err = writeString(w, localep); err != nil {
+		return
+	}
+	var remoteep string
+	if m.RemoteEndpoint != nil {
+		remoteep = m.RemoteEndpoint.String()
+	}
+	if err = writeString(w, remoteep); err != nil {
+		return
+	}
+	if err = writeCounters(w, m.Counters); err != nil {
+		return
+	}
+	if err = m.Setup.writeTo(w); err != nil {
+		return
+	}
+	return
+}
+
+func (m *SetupVC) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.VCI); err != nil {
+		return
+	}
+	var ep string
+	if err = readString(r, &ep); err != nil {
+		return
+	}
+	if ep != "" {
+		if m.LocalEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+			return
+		}
+	}
+	if err = readString(r, &ep); err != nil {
+		return
+	}
+	if ep != "" {
+		if m.RemoteEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+			return
+		}
+	}
+	if m.Counters, err = readCounters(r); err != nil {
+		return
+	}
+	if err = m.Setup.readFrom(r); err != nil {
+		return
+	}
+	return
+}
+
+func (m *AddReceiveBuffers) writeTo(w io.Writer) error {
+	return writeCounters(w, m.Counters)
+}
+
+func (m *AddReceiveBuffers) readFrom(r *bytes.Buffer) (err error) {
+	m.Counters, err = readCounters(r)
+	return
+}
+
+func (m *OpenFlow) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.VCI); err != nil {
+		return
+	}
+	if err = writeInt(w, m.Flow); err != nil {
+		return
+	}
+	if err = writeInt(w, m.InitialCounters); err != nil {
+		return
+	}
+	return
+}
+
+func (m *OpenFlow) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.VCI); err != nil {
+		return
+	}
+	if err = readInt(r, &m.Flow); err != nil {
+		return
+	}
+	if err = readInt(r, &m.InitialCounters); err != nil {
+		return
+	}
+	return
+}
+
+func (m *Setup) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.Versions.Min); err != nil {
+		return
+	}
+	if err = writeInt(w, m.Versions.Max); err != nil {
+		return
+	}
+	if err = writeSetupOptions(w, m.Options); err != nil {
+		return
+	}
+	return
+}
+
+func (m *Setup) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.Versions.Min); err != nil {
+		return
+	}
+	if err = readInt(r, &m.Versions.Max); err != nil {
+		return
+	}
+	if m.Options, err = readSetupOptions(r); err != nil {
+		return
+	}
+	return
+}
+
+// NaclBox returns the first NaclBox option, or nil if there is none.
+func (m *Setup) NaclBox() *NaclBox {
+	for _, opt := range m.Options {
+		if b, ok := opt.(*NaclBox); ok {
+			return b
+		}
+	}
+	return nil
+}
+
+func (*NaclBox) code() setupOptionCode {
+	return naclBoxPublicKey
+}
+
+func (m *NaclBox) size() uint16 {
+	return uint16(len(m.PublicKey))
+}
+
+func (m *NaclBox) write(w io.Writer) error {
+	_, err := w.Write(m.PublicKey[:])
+	return err
+}
+
+func (m *NaclBox) read(r io.Reader) error {
+	_, err := io.ReadFull(r, m.PublicKey[:])
+	return err
+}
+
+func (m *SetupStream) writeTo(w io.Writer) error {
+	_, err := w.Write(m.Data)
+	return err
+}
+
+func (m *SetupStream) readFrom(r *bytes.Buffer) error {
+	m.Data = r.Bytes()
+	return nil
+}
diff --git a/runtime/internal/rpc/stream/message/counters.go b/runtime/internal/rpc/stream/message/counters.go
new file mode 100644
index 0000000..f51074e
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/counters.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"fmt"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+// CounterID encapsulates the VCI and Flow used for flow control counter
+// accounting.
+type CounterID uint64
+
+// VCI returns the VCI encoded within the CounterID
+func (c *CounterID) VCI() id.VC { return id.VC(*c >> 32) }
+
+// Flow returns the Flow identifier encoded within the CounterID
+func (c *CounterID) Flow() id.Flow { return id.Flow(*c & 0xffffffff) }
+
+func (c *CounterID) String() string { return fmt.Sprintf("Flow:%d/VCI:%d", c.Flow(), c.VCI()) }
+
+// MakeCounterID creates a CounterID from the provided (vci, fid) pair.
+func MakeCounterID(vci id.VC, fid id.Flow) CounterID {
+	return CounterID(uint64(vci)<<32 | uint64(fid))
+}
+
+// Counters is a map from (VCI, Flow) to the number of bytes for that (VCI,
+// Flow) pair that the receiver is willing to read.
+//
+// Counters are not safe for concurrent access from multiple goroutines.
+//
+// When received in Control messages, clients can iterate over the map:
+//	for cid, bytes := range counters {
+//		fmt.Println("VCI=%d Flow=%d Bytes=%d", cid.VCI(), cid.Flow(), bytes)
+//	}
+type Counters map[CounterID]uint32
+
+// NewCounters creates a new Counters object.
+func NewCounters() Counters { return Counters(make(map[CounterID]uint32)) }
+
+// Add should be called by the receiving end of a Flow to indicate that it is
+// ready to read 'bytes' more data for the flow identified by (vci, fid).
+func (c Counters) Add(vci id.VC, fid id.Flow, bytes uint32) {
+	c[MakeCounterID(vci, fid)] += bytes
+}
+
+func (c Counters) String() string {
+	ret := "map[ "
+	for cid, bytes := range c {
+		ret += fmt.Sprintf("%d@%d:%d ", cid.Flow(), cid.VCI(), bytes)
+	}
+	ret += "]"
+	return ret
+}
diff --git a/runtime/internal/rpc/stream/message/counters_test.go b/runtime/internal/rpc/stream/message/counters_test.go
new file mode 100644
index 0000000..d2ea0f5
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/counters_test.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"testing"
+	"testing/quick"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+func TestCounterID(t *testing.T) {
+	tests := []struct {
+		vci id.VC
+		fid id.Flow
+	}{
+		{0, 0},
+		{1, 10},
+		{0xffeeddcc, 0xffaabbcc},
+	}
+	for _, test := range tests {
+		cid := MakeCounterID(test.vci, test.fid)
+		if g, w := cid.VCI(), test.vci; g != w {
+			t.Errorf("Got VCI %d want %d", g, w)
+		}
+		if g, w := cid.Flow(), test.fid; g != w {
+			t.Errorf("Got Flow %d want %d", g, w)
+		}
+	}
+}
+
+func TestCounterID_Random(t *testing.T) {
+	f := func(vci id.VC, fid id.Flow) bool {
+		cid := MakeCounterID(vci, fid)
+		return cid.VCI() == vci && cid.Flow() == fid
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestCounters(t *testing.T) {
+	f := func(vci id.VC, fid id.Flow, bytes []uint32) bool {
+		c := NewCounters()
+		var sum uint32
+		for _, bin := range bytes {
+			c.Add(vci, fid, bin)
+			if len(c) != 1 {
+				return false
+			}
+			sum += bin
+			for cid, bout := range c {
+				if cid.VCI() != vci || cid.Flow() != fid || bout != sum {
+					return false
+				}
+			}
+		}
+		return true
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
diff --git a/runtime/internal/rpc/stream/message/data.go b/runtime/internal/rpc/stream/message/data.go
new file mode 100644
index 0000000..784b603
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/data.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"fmt"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+// Data encapsulates an application data message.
+type Data struct {
+	VCI     id.VC // Must be non-zero.
+	Flow    id.Flow
+	flags   uint8
+	Payload *iobuf.Slice
+}
+
+// Close returns true if the sender of the data message requested that the flow be closed.
+func (d *Data) Close() bool { return d.flags&0x1 == 1 }
+
+// SetClose sets the Close flag of the message.
+func (d *Data) SetClose() { d.flags |= 0x1 }
+
+// Release releases the Payload
+func (d *Data) Release() {
+	if d.Payload != nil {
+		d.Payload.Release()
+		d.Payload = nil
+	}
+}
+
+func (d *Data) PayloadSize() int {
+	if d.Payload == nil {
+		return 0
+	}
+	return d.Payload.Size()
+}
+
+func (d *Data) String() string {
+	return fmt.Sprintf("VCI:%d Flow:%d Flags:%02x Payload:(%d bytes)", d.VCI, d.Flow, d.flags, d.PayloadSize())
+}
diff --git a/runtime/internal/rpc/stream/message/message.go b/runtime/internal/rpc/stream/message/message.go
new file mode 100644
index 0000000..199ba03
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/message.go
@@ -0,0 +1,262 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package message provides data structures and serialization/deserialization
+// methods for messages exchanged by the implementation of the
+// v.io/x/ref/runtime/internal/rpc/stream interfaces.
+package message
+
+// This file contains methods to read and write messages sent over the VIF.
+// Every message has the following format:
+//
+// +-----------------------------------------+
+// | Type (1 byte) | PayloadSize (3 bytes)   |
+// +-----------------------------------------+
+// | Payload (PayloadSize bytes)             |
+// +-----------------------------------------+
+//
+// Currently, there are 2 valid types:
+// 0 (controlType)
+// 1 (dataType)
+//
+// When Type == controlType, the message is:
+// +---------------------------------------------+
+// |      0        | PayloadSize (3 bytes)       |
+// +---------------------------------------------+
+// | Cmd  (1 byte)                               |
+// +---------------------------------------------+
+// | Data (PayloadSize - MACSize - 1 bytes)      |
+// +---------------------------------------------+
+// | MAC (MACSize bytes)                         |
+// +---------------------------------------------+
+// Where Data is the serialized Control interface object.
+//
+// When Type == dataType, the message is:
+// +---------------------------------------------+
+// |      1        | PayloadSize (3 bytes)       |
+// +---------------------------------------------+
+// | id.VCI (4 bytes)                            |
+// +---------------------------------------------+
+// | id.Flow (4 bytes)                           |
+// +---------------------------------------------+
+// | Flags (1 byte)                              |
+// +---------------------------------------------+
+// | MAC (MACSize bytes)                         |
+// +---------------------------------------------+
+// | Data (PayloadSize - 9 - MACSize bytes)      |
+// +---------------------------------------------+
+// Where Data is the application data.  The Data is encrypted separately; it is
+// not included in the MAC.
+//
+// A crypto.ControlCipher is used to encrypt the control data.  The MACSize
+// comes from the ControlCipher.  When used, the first word of the header,
+// containing the Type and PayloadSize, is encrypted with the cipher's Encrypt
+// method.  The rest of the control data is encrypted with the cipher's Seal
+// method.  This means that none of the data is observable by an adversary, but
+// the type and length are subject to corruption (the rest of the data is not).
+// This doesn't matter -- if the Type or PayloadSize is corrupted by an
+// adversary, the payload will be misread, and will fail to validate.
+//
+// We could potentially pass the Type and PayloadSize in the clear, but then the
+// framing would be observable, a (probably minor) information leak.  There is
+// no reason to do so, we encrypt everything.
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+const (
+	// Size (in bytes) of headers appended to application data payload in
+	// Data messages.
+	HeaderSizeBytes = commonHeaderSizeBytes + dataHeaderSizeBytes
+
+	commonHeaderSizeBytes = 4 // 1 byte type + 3 bytes payload length
+	dataHeaderSizeBytes   = 9 // 4 byte id.VC + 4 byte id.Flow + 1 byte flags
+
+	// Make sure the first byte can't be ASCII to ensure that a VC
+	// header can never be confused with a web socket request.
+	// TODO(cnicolaou): remove the original controlType and dataType values
+	// when new binaries are pushed.
+	controlType   = 0
+	controlTypeWS = 0x80
+	dataType      = 1
+	dataTypeWS    = 0x81
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errEmptyMessage            = reg(".errEmptyMessage", "message is empty")
+	errCorruptedMessage        = reg(".errCorruptedMessage", "corrupted message")
+	errInvalidMessageType      = reg("errInvalidMessageType", "invalid message type {3}")
+	errUnrecognizedMessageType = reg("errUrecognizedMessageType", "unrecognized message type {3}")
+	errFailedToReadVCHeader    = reg(".errFailedToReadVCHeader", "failed to read VC header{:3}")
+	errFailedToReadPayload     = reg(".errFailedToReadPayload", "failed to read payload of {3} bytes for type {4}{:5}")
+)
+
+// T is the interface implemented by all messages communicated over a VIF.
+type T interface {
+}
+
+// ReadFrom reads a message from the provided iobuf.Reader.
+//
+// Sample usage:
+//	msg, err := message.ReadFrom(r)
+//	switch m := msg.(type) {
+//		case *Data:
+//			notifyFlowOfReceivedData(m.VCI, m.Flow, m.Payload)
+//			if m.Closed() {
+//			   closeFlow(m.VCI, m.Flow)
+//			}
+//		case Control:
+//			handleControlMessage(m)
+//	}
+func ReadFrom(r *iobuf.Reader, c crypto.ControlCipher) (T, error) {
+	header, err := r.Read(commonHeaderSizeBytes)
+	if err != nil {
+		return nil, verror.New(errFailedToReadVCHeader, nil, err)
+	}
+	c.Decrypt(header.Contents)
+	msgType := header.Contents[0]
+	msgPayloadSize := read3ByteUint(header.Contents[1:4])
+	header.Release()
+	payload, err := r.Read(msgPayloadSize)
+	if err != nil {
+		return nil, verror.New(errFailedToReadPayload, nil, msgPayloadSize, msgType, err)
+	}
+	macSize := c.MACSize()
+	switch msgType {
+	case controlType, controlTypeWS:
+		if !c.Open(payload.Contents) {
+			payload.Release()
+			return nil, verror.New(errCorruptedMessage, nil)
+		}
+		m, err := readControl(bytes.NewBuffer(payload.Contents[:msgPayloadSize-macSize]))
+		payload.Release()
+		return m, err
+	case dataType, dataTypeWS:
+		if !c.Open(payload.Contents[0 : dataHeaderSizeBytes+macSize]) {
+			payload.Release()
+			return nil, verror.New(errCorruptedMessage, nil)
+		}
+		m := &Data{
+			VCI:     id.VC(read4ByteUint(payload.Contents[0:4])),
+			Flow:    id.Flow(read4ByteUint(payload.Contents[4:8])),
+			flags:   payload.Contents[8],
+			Payload: payload,
+		}
+		m.Payload.TruncateFront(uint(dataHeaderSizeBytes + macSize))
+		return m, nil
+	default:
+		payload.Release()
+		return nil, verror.New(errUnrecognizedMessageType, nil, msgType)
+	}
+}
+
+// WriteTo serializes message and makes a single call to w.Write.
+// It is the inverse of ReadFrom.
+//
+// By writing the message in a single call to w.Write, confusion is avoided in
+// case multiple goroutines are calling Write on w simultaneously.
+//
+// If message is a Data message, the Payload contents will be Released
+// irrespective of the return value of this method.
+func WriteTo(w io.Writer, message T, c crypto.ControlCipher) error {
+	macSize := c.MACSize()
+	switch m := message.(type) {
+	case *Data:
+		payloadSize := m.PayloadSize() + dataHeaderSizeBytes + macSize
+		msg := mkHeaderSpace(m.Payload, uint(HeaderSizeBytes+macSize))
+		header := msg.Contents[0 : HeaderSizeBytes+macSize]
+		header[0] = dataType
+		if err := write3ByteUint(header[1:4], payloadSize); err != nil {
+			return err
+
+		}
+		write4ByteUint(header[4:8], uint32(m.VCI))
+		write4ByteUint(header[8:12], uint32(m.Flow))
+		header[12] = m.flags
+		EncryptMessage(msg.Contents, c)
+		_, err := w.Write(msg.Contents)
+		msg.Release()
+		return err
+	case Control:
+		var buf bytes.Buffer
+		// Prevent a few memory allocations by presizing the buffer to
+		// something that is large enough for typical control messages.
+		buf.Grow(256)
+		// Reserve space for the header
+		if err := extendBuffer(&buf, commonHeaderSizeBytes); err != nil {
+			return err
+		}
+		if err := writeControl(&buf, m); err != nil {
+			return err
+		}
+		if err := extendBuffer(&buf, macSize); err != nil {
+			return err
+		}
+		msg := buf.Bytes()
+		msg[0] = controlType
+		if err := write3ByteUint(msg[1:4], buf.Len()-commonHeaderSizeBytes); err != nil {
+			return err
+		}
+		EncryptMessage(msg, c)
+		_, err := w.Write(msg)
+		return err
+	default:
+		return verror.New(errInvalidMessageType, nil, fmt.Sprintf("%T", m))
+	}
+}
+
+// EncryptMessage encrypts the message's control data in place.
+func EncryptMessage(msg []byte, c crypto.ControlCipher) error {
+	if len(msg) == 0 {
+		return verror.New(errEmptyMessage, nil)
+	}
+	n := len(msg)
+	switch msgType := msg[0]; msgType {
+	case controlType:
+		// skip
+	case dataType:
+		n = HeaderSizeBytes + c.MACSize()
+	default:
+		return verror.New(errUnrecognizedMessageType, nil, msgType)
+	}
+	c.Encrypt(msg[0:commonHeaderSizeBytes])
+	c.Seal(msg[commonHeaderSizeBytes:n])
+	return nil
+}
+
+func mkHeaderSpace(slice *iobuf.Slice, space uint) *iobuf.Slice {
+	if slice == nil {
+		return iobuf.NewSlice(make([]byte, space))
+	}
+	if slice.ExpandFront(space) {
+		return slice
+	}
+	vlog.VI(10).Infof("Failed to expand slice by %d bytes. Copying", space)
+	contents := make([]byte, slice.Size()+int(space))
+	copy(contents[space:], slice.Contents)
+	slice.Release()
+	return iobuf.NewSlice(contents)
+}
+
+var emptyBytes [256]byte
+
+func extendBuffer(buf *bytes.Buffer, size int) error {
+	_, err := buf.Write(emptyBytes[:size])
+	return err
+}
diff --git a/runtime/internal/rpc/stream/message/message_test.go b/runtime/internal/rpc/stream/message/message_test.go
new file mode 100644
index 0000000..aa79604
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/message_test.go
@@ -0,0 +1,216 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"bytes"
+	"encoding/binary"
+	"reflect"
+	"testing"
+
+	"v.io/v23/naming"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+)
+
+// testControlCipher is a super-simple cipher that xor's each byte of the
+// payload with 0xaa.
+type testControlCipher struct{}
+
+const testMACSize = 4
+
+func (*testControlCipher) MACSize() int {
+	return testMACSize
+}
+
+func testMAC(data []byte) []byte {
+	var h uint32
+	for _, b := range data {
+		h = (h << 1) ^ uint32(b)
+	}
+	var hash [4]byte
+	binary.BigEndian.PutUint32(hash[:], h)
+	return hash[:]
+}
+
+func (c *testControlCipher) Decrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Encrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Open(data []byte) bool {
+	mac := testMAC(data[:len(data)-testMACSize])
+	if bytes.Compare(mac, data[len(data)-testMACSize:]) != 0 {
+		return false
+	}
+	c.Decrypt(data[:len(data)-testMACSize])
+	return true
+}
+
+func (c *testControlCipher) Seal(data []byte) error {
+	c.Encrypt(data[:len(data)-testMACSize])
+	mac := testMAC(data[:len(data)-testMACSize])
+	copy(data[len(data)-testMACSize:], mac)
+	return nil
+}
+
+func TestControl(t *testing.T) {
+	counters := NewCounters()
+	counters.Add(12, 13, 10240)
+	tests := []Control{
+		&CloseVC{VCI: 1},
+		&CloseVC{VCI: 2, Error: "some error"},
+
+		&SetupVC{
+			VCI: 1,
+			LocalEndpoint: &inaming.Endpoint{
+				Protocol: "tcp",
+				Address:  "batman.com:1990",
+				RID:      naming.FixedRoutingID(0xba7),
+			},
+			RemoteEndpoint: &inaming.Endpoint{
+				Protocol: "tcp",
+				Address:  "bugsbunny.com:1940",
+				RID:      naming.FixedRoutingID(0xbb),
+			},
+			Counters: counters,
+			Setup: Setup{
+				Versions: iversion.Range{Min: 34, Max: 56},
+				Options: []SetupOption{
+					&NaclBox{PublicKey: crypto.BoxKey{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+					&NaclBox{PublicKey: crypto.BoxKey{7, 67, 31}},
+				},
+			},
+		},
+		// SetupVC without endpoints
+		&SetupVC{
+			VCI:      1,
+			Counters: counters,
+			Setup: Setup{
+				Versions: iversion.Range{Min: 34, Max: 56},
+				Options: []SetupOption{
+					&NaclBox{PublicKey: crypto.BoxKey{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+					&NaclBox{PublicKey: crypto.BoxKey{7, 67, 31}},
+				},
+			},
+		},
+
+		&AddReceiveBuffers{},
+		&AddReceiveBuffers{Counters: counters},
+
+		&OpenFlow{VCI: 1, Flow: 10, InitialCounters: 1 << 24},
+
+		&Setup{
+			Versions: iversion.Range{Min: 21, Max: 71},
+			Options: []SetupOption{
+				&NaclBox{PublicKey: crypto.BoxKey{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+				&NaclBox{PublicKey: crypto.BoxKey{7, 67, 31}},
+			},
+		},
+
+		&SetupStream{Data: []byte("HelloWorld")},
+	}
+
+	var c testControlCipher
+	pool := iobuf.NewPool(0)
+	for i, msg := range tests {
+		var buf bytes.Buffer
+		if err := WriteTo(&buf, msg, &c); err != nil {
+			t.Errorf("WriteTo(%T) (test #%d) failed: %v", msg, i, err)
+			continue
+		}
+		reader := iobuf.NewReader(pool, &buf)
+		read, err := ReadFrom(reader, &c)
+		reader.Close()
+		if err != nil {
+			t.Errorf("ReadFrom failed (test #%d): %v", i, err)
+			continue
+		}
+		if !reflect.DeepEqual(msg, read) {
+			t.Errorf("Test #%d: Got %T = %+v, want %T = %+v", i, read, read, msg, msg)
+		}
+	}
+}
+
+func TestData(t *testing.T) {
+	tests := []struct {
+		Header  Data
+		Payload string
+	}{
+		{Data{VCI: 10, Flow: 3}, "abcd"},
+		{Data{VCI: 10, Flow: 3, flags: 1}, "batman"},
+	}
+
+	var c testControlCipher
+	pool := iobuf.NewPool(0)
+	allocator := iobuf.NewAllocator(pool, HeaderSizeBytes+testMACSize)
+	for i, test := range tests {
+		var buf bytes.Buffer
+		msgW := test.Header
+		msgW.Payload = allocator.Copy([]byte(test.Payload))
+		if err := WriteTo(&buf, &msgW, &c); err != nil {
+			t.Errorf("WriteTo(%v) failed: %v", i, err)
+			continue
+		}
+		reader := iobuf.NewReader(pool, &buf)
+		read, err := ReadFrom(reader, &c)
+		if err != nil {
+			t.Errorf("ReadFrom(%v) failed: %v", i, err)
+			continue
+		}
+		msgR := read.(*Data)
+		// Must compare Payload and the rest of the message separately.
+		// reflect.DeepEqual(msgR, &msgW) will not cut it because the
+		// iobuf.Slice objects might not pass the DeepEqual test.  That
+		// is fine, the important thing is for iobuf.Slice.Content to
+		// match.
+		if g, w := string(msgR.Payload.Contents), test.Payload; g != w {
+			t.Errorf("Mismatched payloads in test #%d. Got %q want %q", i, g, w)
+		}
+		msgR.Release()
+		if !reflect.DeepEqual(&test.Header, msgR) {
+			t.Errorf("Mismatched headers in test #%d. Got %+v want %+v", i, msgR, &test.Header)
+		}
+	}
+}
+
+func TestDataNoPayload(t *testing.T) {
+	tests := []Data{
+		{VCI: 10, Flow: 3},
+		{VCI: 11, Flow: 4, flags: 10},
+	}
+	var c testControlCipher
+	pool := iobuf.NewPool(0)
+	for _, test := range tests {
+		var buf bytes.Buffer
+		if err := WriteTo(&buf, &test, &c); err != nil {
+			t.Errorf("WriteTo(%v) failed: %v", test, err)
+			continue
+		}
+		read, err := ReadFrom(iobuf.NewReader(pool, &buf), &c)
+		if err != nil {
+			t.Errorf("ReadFrom(%v) failed: %v", test, err)
+			continue
+		}
+		msgR := read.(*Data)
+		if msgR.PayloadSize() != 0 {
+			t.Errorf("ReadFrom(WriteTo(%v)) returned payload of %d bytes", test, msgR.PayloadSize())
+			continue
+		}
+		msgR.Payload = nil
+		if !reflect.DeepEqual(&test, msgR) {
+			t.Errorf("Wrote %v, Read %v", test, read)
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/model.go b/runtime/internal/rpc/stream/model.go
new file mode 100644
index 0000000..baf4077
--- /dev/null
+++ b/runtime/internal/rpc/stream/model.go
@@ -0,0 +1,154 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream
+
+import (
+	"io"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+)
+
+// Flow is the interface for a flow-controlled channel multiplexed on a Virtual
+// Circuit (VC) (and its underlying network connections).
+//
+// This allows for a single level of multiplexing and flow-control over
+// multiple concurrent streams (that may be used for RPCs) over multiple
+// VCs over a single underlying network connection.
+type Flow interface {
+	io.ReadWriteCloser
+
+	// LocalEndpoint returns the local vanadium Endpoint
+	LocalEndpoint() naming.Endpoint
+	// RemoteEndpoint returns the remote vanadium Endpoint
+	RemoteEndpoint() naming.Endpoint
+	// LocalPrincipal returns the Principal at the local end of the flow that has authenticated with the remote end.
+	LocalPrincipal() security.Principal
+	// LocalBlessings returns the blessings presented by the local end of the flow during authentication.
+	LocalBlessings() security.Blessings
+	// RemoteBlessings returns the blessings presented by the remote end of the flow during authentication.
+	RemoteBlessings() security.Blessings
+	// LocalDischarges returns the discharges presented by the local end of the flow during authentication.
+	//
+	// The discharges are organized in a map keyed by the discharge-identifier.
+	LocalDischarges() map[string]security.Discharge
+	// RemoteDischarges returns the discharges presented by the remote end of the flow during authentication.
+	//
+	// The discharges are organized in a map keyed by the discharge-identifier.
+	RemoteDischarges() map[string]security.Discharge
+	// Cancel, like Close, closes the Flow but unlike Close discards any queued writes.
+	Cancel()
+	// IsClosed returns true if the flow has been closed or cancelled.
+	IsClosed() bool
+	// Closed returns a channel that remains open until the flow has been closed.
+	Closed() <-chan struct{}
+
+	// SetDeadline causes reads and writes to the flow to be
+	// cancelled when the given channel is closed.
+	SetDeadline(deadline <-chan struct{})
+
+	// VCDataCache returns the stream.VCDataCache object that allows information to be
+	// shared across the Flow's parent VC.
+	VCDataCache() VCDataCache
+}
+
+// VCDataCache is a thread-safe store that allows data to be shared across a VC,
+// with the intention of caching data that reappears over multiple flows.
+type VCDataCache interface {
+	// Get returns the 'value' associated with 'key'.
+	Get(key interface{}) interface{}
+
+	// GetOrInsert returns the 'value' associated with 'key'. If an entry already exists in the
+	// cache with the 'key', the 'value' is returned, otherwise 'create' is called to create a new
+	// value N, the cache is updated, and N is returned.  GetOrInsert may be called from
+	// multiple goroutines concurrently.
+	GetOrInsert(key interface{}, create func() interface{}) interface{}
+}
+
+// FlowOpt is the interface for all Flow options.
+type FlowOpt interface {
+	RPCStreamFlowOpt()
+}
+
+// Listener is the interface for accepting Flows created by a remote process.
+type Listener interface {
+	// Accept blocks until a new Flow has been initiated by a remote process.
+	// TODO(toddw): This should be:
+	//   Accept() (Flow, Connector, error)
+	Accept() (Flow, error)
+
+	// Close prevents new Flows from being accepted on this Listener.
+	// Previously accepted Flows are not closed down.
+	Close() error
+}
+
+// ListenerOpt is the interface for all options that control the creation of a
+// Listener.
+type ListenerOpt interface {
+	RPCStreamListenerOpt()
+}
+
+// Connector is the interface for initiating Flows to a remote process over a
+// Virtual Circuit (VC).
+type Connector interface {
+	Connect(opts ...FlowOpt) (Flow, error)
+}
+
+// VC is the interface for creating authenticated and secure end-to-end
+// streams.
+//
+// VCs are multiplexed onto underlying network conections and can span
+// multiple hops. Authentication and encryption are end-to-end, even though
+// underlying network connections span a single hop.
+type VC interface {
+	Connector
+	Listen() (Listener, error)
+
+	// Close closes the VC and all flows on it, allowing any pending writes in
+	// flows to drain.
+	Close(reason error) error
+}
+
+// VCOpt is the interface for all VC options.
+type VCOpt interface {
+	RPCStreamVCOpt()
+}
+
+// Manager is the interface for managing the creation of VCs.
+type Manager interface {
+	// Listen creates a Listener that can be used to accept Flows initiated
+	// with the provided network address.
+	//
+	// For example:
+	//   ln, ep, err := Listen("tcp", ":0", principal)
+	//   for {
+	//     flow, err := ln.Accept()
+	//     // process flow
+	//   }
+	// can be used to accept Flows initiated by remote processes to the endpoint
+	// identified by the returned Endpoint.
+	//
+	// principal is used during authentication. If principal is nil, then the Listener
+	// expects to be used for unauthenticated, unencrypted communication.
+	// blessings are the Blessings presented to the Client during authentication.
+	Listen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...ListenerOpt) (Listener, naming.Endpoint, error)
+
+	// Dial creates a VC to the provided remote endpoint.
+	// principal is used during authentication. If principal is nil, then the VC expects
+	// to be used for unauthenticated, unencrypted communication.
+	Dial(remote naming.Endpoint, principal security.Principal, opts ...VCOpt) (VC, error)
+
+	// ShutdownEndpoint closes all VCs (and Flows and Listeners over it)
+	// involving the provided remote endpoint.
+	ShutdownEndpoint(remote naming.Endpoint)
+
+	// Shutdown closes all VCs and Listeners (and Flows over them) and
+	// frees up internal data structures.
+	// The Manager is not usable after Shutdown has been called.
+	Shutdown()
+
+	// RoutingID returns the Routing ID associated with the VC.
+	RoutingID() naming.RoutingID
+}
diff --git a/runtime/internal/rpc/stream/proxy/debug.go b/runtime/internal/rpc/stream/proxy/debug.go
new file mode 100644
index 0000000..a5ae4c0
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/debug.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// DebugString dumps out the routing table at the proxy in text format.
+// The format is meant for debugging purposes and may change without notice.
+func (p *Proxy) debugString() string {
+	var buf bytes.Buffer
+	servers := p.servers.List()
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	fmt.Fprintf(&buf, "Proxy with endpoint: %q. #Processes:%d #Servers:%d\n", p.endpoint(), len(p.processes), len(servers))
+	fmt.Fprintf(&buf, "=========\n")
+	fmt.Fprintf(&buf, "PROCESSES\n")
+	fmt.Fprintf(&buf, "=========\n")
+	index := 1
+	for process, _ := range p.processes {
+		fmt.Fprintf(&buf, "(%d) - %v", index, process)
+		index++
+		process.mu.RLock()
+		fmt.Fprintf(&buf, " NextVCI:%d #Severs:%d\n", process.nextVCI, len(process.servers))
+		for vci, d := range process.routingTable {
+			fmt.Fprintf(&buf, "    VCI %4d --> VCI %4d @ %s\n", vci, d.VCI, d.Process)
+		}
+		process.mu.RUnlock()
+	}
+	fmt.Fprintf(&buf, "=======\n")
+	fmt.Fprintf(&buf, "SERVERS\n")
+	fmt.Fprintf(&buf, "=======\n")
+	for ix, is := range servers {
+		fmt.Fprintf(&buf, "(%d) %v\n", ix+1, is)
+	}
+	return buf.String()
+}
diff --git a/runtime/internal/rpc/stream/proxy/doc.go b/runtime/internal/rpc/stream/proxy/doc.go
new file mode 100644
index 0000000..2e0d16c
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/doc.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proxy implements a proxy for the stream layer.
+//
+// Each process in vanadium is uniquely identified by a routing id
+// (naming.RoutingID). A proxy routes messages
+// (v.io/x/ref/runtime/internal/rpc/stream/message) it receives on a network connection
+// (net.Conn) to the network connection on which the destination process
+// (identified by the routing id) is listening.
+//
+// Processes behind a NAT can use the proxy to export their services outside
+// the NAT.
+// Sample usage:
+//    var proxyEP naming.Endpoint  // Endpoint of the proxy server
+//    var manager stream.Manager   // Manager used to create and listen for VCs and Flows.
+//    ln, ep, err := manager.Listen(proxyEP.Network(), proxyEP.String())
+//    // Now ln.Accept() will return Flows initiated by remote processes through the proxy.
+//
+// The proxy implemented in this package operates as follows:
+// - When an OpenVC message is received at the proxy, the RoutingID(R)
+//   of the source endpoint is associated with the net.Conn the message
+//   was received on.
+// - This association is used to route messages destined for R to the
+//   corresponding net.Conn
+// - Servers can "listen" on the proxy's address by establishing a VC to the
+//   proxy. Once the VC is established, messages received at the proxy destined
+//   for the RoutingID of the server are forwarded to the net.Conn between the
+//   server and the proxy.
+//
+// For example, consider the following three processes:
+// - Proxy(P) with routing id Rp
+// - A server (S) wishing to listen on the proxy's address with routing id Rs
+// - A client (C) wishing to connect to S through the proxy with routing id Rc.
+//
+// Here is a valid sequence of events that makes that possible:
+// (1) S establishes a VC with P over a net.Conn c1
+//     As a result, P knows that any messages intended for Rs should be
+//     forwarded on c1
+// (2) C connects to P over a net.Conn c2 and attempts to establish a VC with S
+//     using an OpenVC message.
+//     The source endpoint of this message contains the routing id Rc while the
+//     destination endpoint contains the routing id Rs.
+// (3) The proxy sees this message and:
+//     (a) Forwards the message over c1 (since Rs is mapped to c1)
+//     (b) Updates its routing table so that messages intended for Rc are forwarded over c2
+// (4) Any messages from S intended for the client received on c1 are forwarded
+//     by the proxy over c2.
+package proxy
diff --git a/runtime/internal/rpc/stream/proxy/protocol.vdl b/runtime/internal/rpc/stream/proxy/protocol.vdl
new file mode 100644
index 0000000..da87aa0
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/protocol.vdl
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import "v.io/v23/security"
+
+// The proxy protocol is:
+// (1) Server establishes a VC to the proxy to register its routing id and authenticate.
+// (2) The server opens a flow and sends a "Request" message and waits for a "Response"
+//     message.
+// (3) This flow is then kept alive with no more data read/written.
+//     Closure of this flow indicates that proxying has (or should be) stopped.
+// (4) The proxy immediately closes any other flows on the VC.
+
+// Request is the message sent by a server to request that the proxy route
+// traffic intended for the server's RoutingId to the network connection
+// between the server and the proxy.
+type Request struct {
+  // Blessings of the server that wishes to be proxied.
+  // Used to authorize the use of the proxy.
+  Blessings security.WireBlessings
+  // Discharges required to make Blessings valid.
+  Discharges []security.WireDischarge
+}
+
+// Response is sent by the proxy to the server after processing Request.
+type Response struct {
+  // Error is a description of why the proxy refused to proxy the server.
+  // A nil error indicates that the proxy will route traffic to the server.
+  Error error
+  // Endpoint is the string representation of an endpoint that can be
+  // used to communicate with the server through the proxy.
+  Endpoint string
+}
diff --git a/runtime/internal/rpc/stream/proxy/protocol.vdl.go b/runtime/internal/rpc/stream/proxy/protocol.vdl.go
new file mode 100644
index 0000000..43f70a0
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/protocol.vdl.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: protocol.vdl
+
+package proxy
+
+import (
+	// VDL system imports
+	"v.io/v23/vdl"
+
+	// VDL user imports
+	"v.io/v23/security"
+)
+
+// Request is the message sent by a server to request that the proxy route
+// traffic intended for the server's RoutingId to the network connection
+// between the server and the proxy.
+type Request struct {
+	// Blessings of the server that wishes to be proxied.
+	// Used to authorize the use of the proxy.
+	Blessings security.Blessings
+	// Discharges required to make Blessings valid.
+	Discharges []security.Discharge
+}
+
+func (Request) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/runtime/internal/rpc/stream/proxy.Request"`
+}) {
+}
+
+// Response is sent by the proxy to the server after processing Request.
+type Response struct {
+	// Error is a description of why the proxy refused to proxy the server.
+	// A nil error indicates that the proxy will route traffic to the server.
+	Error error
+	// Endpoint is the string representation of an endpoint that can be
+	// used to communicate with the server through the proxy.
+	Endpoint string
+}
+
+func (Response) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/runtime/internal/rpc/stream/proxy.Response"`
+}) {
+}
+
+func init() {
+	vdl.Register((*Request)(nil))
+	vdl.Register((*Response)(nil))
+}
diff --git a/runtime/internal/rpc/stream/proxy/proxy.go b/runtime/internal/rpc/stream/proxy/proxy.go
new file mode 100644
index 0000000..40fec9b
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/proxy.go
@@ -0,0 +1,827 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+	"fmt"
+	"net"
+	"reflect"
+	"sync"
+	"time"
+
+	"v.io/x/lib/netstate"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/lib/publisher"
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+
+	"v.io/x/ref/lib/stats"
+)
+
+const pkgPath = "v.io/x/ref/runtime/proxy"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errNoRoutingTableEntry       = reg(".errNoRoutingTableEntry", "routing table has no entry for the VC")
+	errProcessVanished           = reg(".errProcessVanished", "remote process vanished")
+	errDuplicateSetupVC          = reg(".errDuplicateSetupVC", "duplicate SetupVC request")
+	errVomEncodeResponse         = reg(".errVomEncodeResponse", "failed to encode response from proxy{:3}")
+	errNoRequest                 = reg(".errNoRequest", "unable to read Request{:3}")
+	errServerClosedByProxy       = reg(".errServerClosedByProxy", "server closed by proxy")
+	errRemoveServerVC            = reg(".errRemoveServerVC", "failed to remove server VC {3}{:4}")
+	errNetConnClosing            = reg(".errNetConnClosing", "net.Conn is closing")
+	errFailedToAcceptHealthCheck = reg(".errFailedToAcceptHealthCheck", "failed to accept health check flow")
+	errIncompatibleVersions      = reg(".errIncompatibleVersions", "{:3}")
+	errAlreadyProxied            = reg(".errAlreadyProxied", "server with routing id {3} is already being proxied")
+	errUnknownNetwork            = reg(".errUnknownNetwork", "unknown network {3}")
+	errListenFailed              = reg(".errListenFailed", "net.Listen({3}, {4}) failed{:5}")
+	errFailedToForwardRxBufs     = reg(".errFailedToForwardRxBufs", "failed to forward receive buffers{:3}")
+	errFailedToFowardDataMsg     = reg(".errFailedToFowardDataMsg", "failed to forward data message{:3}")
+	errFailedToFowardOpenFlow    = reg(".errFailedToFowardOpenFlow", "failed to forward open flow{:3}")
+	errServerNotBeingProxied     = reg(".errServerNotBeingProxied", "no server with routing id {3} is being proxied")
+	errServerVanished            = reg(".errServerVanished", "server with routing id {3} vanished")
+	errAccessibleAddresses       = reg(".errAccessibleAddresses", "failed to obtain a set of accessible addresses{:3}")
+	errNoAccessibleAddresses     = reg(".errNoAccessibleAddresses", "no accessible addresses were available for {3}")
+	errEmptyListenSpec           = reg(".errEmptyListenSpec", "no addresses supplied in the listen spec")
+)
+
+// Proxy routes virtual circuit (VC) traffic between multiple underlying
+// network connections.
+type Proxy struct {
+	ctx        *context.T
+	ln         net.Listener
+	rid        naming.RoutingID
+	principal  security.Principal
+	blessings  security.Blessings
+	authorizer security.Authorizer
+	mu         sync.RWMutex
+	servers    *servermap
+	processes  map[*process]struct{}
+	pubAddress string
+	statsName  string
+}
+
+// process encapsulates the physical network connection and the routing table
+// associated with the process at the other end of the network connection.
+type process struct {
+	proxy        *Proxy
+	conn         net.Conn
+	pool         *iobuf.Pool
+	reader       *iobuf.Reader
+	ctrlCipher   crypto.ControlCipher
+	queue        *upcqueue.T
+	mu           sync.RWMutex
+	routingTable map[id.VC]*destination
+	nextVCI      id.VC
+	servers      map[id.VC]*vc.VC // servers wishing to be proxied create a VC that terminates at the proxy
+	bq           bqueue.T         // Flow control for messages sent on behalf of servers.
+}
+
+// destination is an entry in the routingtable of a process.
+type destination struct {
+	VCI     id.VC
+	Process *process
+}
+
+// server encapsulates information stored about a server exporting itself via the proxy.
+type server struct {
+	Process *process
+	VC      *vc.VC
+}
+
+func (s *server) RoutingID() naming.RoutingID { return s.VC.RemoteEndpoint().RoutingID() }
+
+func (s *server) Close(err error) {
+	if vc := s.Process.RemoveServerVC(s.VC.VCI()); vc != nil {
+		if err != nil {
+			vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errRemoveServerVC, nil, s.VC.VCI(), err)))
+		} else {
+			vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errServerClosedByProxy, nil)))
+		}
+		s.Process.SendCloseVC(s.VC.VCI(), err)
+	}
+}
+
+func (s *server) String() string {
+	return fmt.Sprintf("RoutingID %v on process %v (VCI:%v Blessings:%v)", s.RoutingID(), s.Process, s.VC.VCI(), s.VC.RemoteBlessings())
+}
+
+// servermap is a concurrent-access safe map from the RoutingID of a server exporting itself
+// through the proxy to the underlying network connection that the server is found on.
+type servermap struct {
+	mu sync.Mutex
+	m  map[naming.RoutingID]*server
+}
+
+func (m *servermap) Add(server *server) error {
+	key := server.RoutingID()
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.m[key] != nil {
+		return verror.New(stream.ErrProxy, nil, verror.New(errAlreadyProxied, nil, key))
+	}
+	m.m[key] = server
+	proxyLog().Infof("Started proxying server: %v", server)
+	return nil
+}
+
+func (m *servermap) Remove(server *server) {
+	key := server.RoutingID()
+	m.mu.Lock()
+	if m.m[key] != nil {
+		delete(m.m, key)
+		proxyLog().Infof("Stopped proxying server: %v", server)
+	}
+	m.mu.Unlock()
+}
+
+func (m *servermap) Process(rid naming.RoutingID) *process {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if s := m.m[rid]; s != nil {
+		return s.Process
+	}
+	return nil
+}
+
+func (m *servermap) List() []string {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	ret := make([]string, 0, len(m.m))
+	for _, s := range m.m {
+		ret = append(ret, s.String())
+	}
+	return ret
+}
+
+// New creates a new Proxy that listens for network connections on the provided
+// ListenSpec and routes VC traffic between accepted connections.
+//
+// Servers wanting to "listen through the proxy" will only be allowed to do so
+// if the blessings they present are accepted to the provided authorization
+// policy (authorizer).
+func New(ctx *context.T, spec rpc.ListenSpec, authorizer security.Authorizer, names ...string) (shutdown func(), endpoint naming.Endpoint, err error) {
+	rid, err := naming.NewRoutingID()
+	if err != nil {
+		return nil, nil, err
+	}
+	proxy, err := internalNew(rid, ctx, spec, authorizer)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var pub publisher.Publisher
+	for _, name := range names {
+		if name == "" {
+			// Consistent with v23.rpc.Server.Serve(...)
+			// an empty name implies, "do not publish"
+			continue
+		}
+		if pub == nil {
+			pub = publisher.New(ctx, v23.GetNamespace(ctx), time.Minute)
+			pub.AddServer(proxy.endpoint().String())
+		}
+		pub.AddName(name, false, true)
+	}
+
+	shutdown = func() {
+		if pub != nil {
+			pub.Stop()
+			pub.WaitForStop()
+		}
+		proxy.shutdown()
+	}
+	return shutdown, proxy.endpoint(), nil
+}
+
+func internalNew(rid naming.RoutingID, ctx *context.T, spec rpc.ListenSpec, authorizer security.Authorizer) (*Proxy, error) {
+	if len(spec.Addrs) == 0 {
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errEmptyListenSpec, nil))
+	}
+	laddr := spec.Addrs[0]
+	network := laddr.Protocol
+	address := laddr.Address
+	_, _, listenFn, _ := rpc.RegisteredProtocol(network)
+	if listenFn == nil {
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errUnknownNetwork, nil, network))
+	}
+	ln, err := listenFn(network, address)
+	if err != nil {
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errListenFailed, nil, network, address, err))
+	}
+	pub, _, err := netstate.PossibleAddresses(ln.Addr().Network(), ln.Addr().String(), spec.AddressChooser)
+	if err != nil {
+		ln.Close()
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errAccessibleAddresses, nil, err))
+	}
+	if len(pub) == 0 {
+		ln.Close()
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errNoAccessibleAddresses, nil, ln.Addr().String()))
+	}
+	if authorizer == nil {
+		authorizer = security.DefaultAuthorizer()
+	}
+	proxy := &Proxy{
+		ctx:        ctx,
+		ln:         ln,
+		rid:        rid,
+		authorizer: authorizer,
+		servers:    &servermap{m: make(map[naming.RoutingID]*server)},
+		processes:  make(map[*process]struct{}),
+		// TODO(cnicolaou): should use all of the available addresses
+		pubAddress: pub[0].String(),
+		principal:  v23.GetPrincipal(ctx),
+		statsName:  naming.Join("rpc", "proxy", "routing-id", rid.String(), "debug"),
+	}
+	if proxy.principal != nil {
+		proxy.blessings = proxy.principal.BlessingStore().Default()
+	}
+	stats.NewStringFunc(proxy.statsName, proxy.debugString)
+
+	go proxy.listenLoop()
+	return proxy, nil
+}
+
+func (p *Proxy) listenLoop() {
+	proxyLog().Infof("Proxy listening on (%q, %q): %v", p.ln.Addr().Network(), p.ln.Addr(), p.endpoint())
+	for {
+		conn, err := p.ln.Accept()
+		if err != nil {
+			proxyLog().Infof("Exiting listenLoop of proxy %q: %v", p.endpoint(), err)
+			return
+		}
+		go p.acceptProcess(conn)
+	}
+}
+
+func (p *Proxy) acceptProcess(conn net.Conn) {
+	pool := iobuf.NewPool(0)
+	reader := iobuf.NewReader(pool, conn)
+
+	var blessings security.Blessings
+	if p.principal != nil {
+		blessings = p.principal.BlessingStore().Default()
+	}
+
+	c, err := vif.AuthenticateAsServer(conn, reader, nil, p.principal, blessings, nil)
+	if err != nil {
+		processLog().Infof("Process %v failed to authenticate: %s", p, err)
+		return
+	}
+
+	process := &process{
+		proxy:        p,
+		conn:         conn,
+		pool:         pool,
+		reader:       reader,
+		ctrlCipher:   c,
+		queue:        upcqueue.New(),
+		routingTable: make(map[id.VC]*destination),
+		servers:      make(map[id.VC]*vc.VC),
+		bq:           drrqueue.New(vc.MaxPayloadSizeBytes),
+	}
+
+	p.mu.Lock()
+	p.processes[process] = struct{}{}
+	p.mu.Unlock()
+
+	go process.serverVCsLoop()
+	go process.writeLoop()
+	go process.readLoop()
+
+	processLog().Infof("Started process %v", process)
+}
+
+func (p *Proxy) removeProcess(process *process) {
+	p.mu.Lock()
+	delete(p.processes, process)
+	p.mu.Unlock()
+}
+
+func (p *Proxy) runServer(server *server, c <-chan vc.HandshakeResult) {
+	hr := <-c
+	if hr.Error != nil {
+		server.Close(hr.Error)
+		return
+	}
+	// See comments in protocol.vdl for the protocol between servers and the proxy.
+	conn, err := hr.Listener.Accept()
+	if err != nil {
+		server.Close(verror.New(stream.ErrProxy, nil, verror.New(errFailedToAcceptHealthCheck, nil)))
+		return
+	}
+	server.Process.InitVCI(server.VC.VCI())
+	var request Request
+	var response Response
+	dec := vom.NewDecoder(conn)
+	if err := dec.Decode(&request); err != nil {
+		response.Error = verror.New(stream.ErrProxy, nil, verror.New(errNoRequest, nil, err))
+	} else if err := p.authorize(server.VC, request); err != nil {
+		response.Error = err
+	} else if err := p.servers.Add(server); err != nil {
+		response.Error = verror.Convert(verror.ErrUnknown, nil, err)
+	} else {
+		defer p.servers.Remove(server)
+		proxyEP := p.endpoint()
+		ep := &inaming.Endpoint{
+			Protocol: proxyEP.Protocol,
+			Address:  proxyEP.Address,
+			RID:      server.VC.RemoteEndpoint().RoutingID(),
+		}
+		response.Endpoint = ep.String()
+	}
+	enc := vom.NewEncoder(conn)
+	if err := enc.Encode(response); err != nil {
+		proxyLog().Infof("Failed to encode response %#v for server %v", response, server)
+		server.Close(verror.New(stream.ErrProxy, nil, verror.New(errVomEncodeResponse, nil, err)))
+		return
+	}
+	// Reject all other flows
+	go func() {
+		for {
+			flow, err := hr.Listener.Accept()
+			if err != nil {
+				return
+			}
+			flow.Close()
+		}
+	}()
+	// Wait for this flow to be closed.
+	<-conn.Closed()
+	server.Close(nil)
+}
+
+func (p *Proxy) authorize(vc *vc.VC, request Request) error {
+	var dmap map[string]security.Discharge
+	if len(request.Discharges) > 0 {
+		dmap = make(map[string]security.Discharge)
+		for _, d := range request.Discharges {
+			dmap[d.ID()] = d
+		}
+	}
+	// Blessings must be bound to the same public key as the VC.
+	// (Repeating logic in the RPC server authorization code).
+	if got, want := request.Blessings.PublicKey(), vc.RemoteBlessings().PublicKey(); !request.Blessings.IsZero() && !reflect.DeepEqual(got, want) {
+		return verror.New(verror.ErrNoAccess, nil, fmt.Errorf("malformed request: Blessings sent in proxy.Request are bound to public key %v and not %v", got, want))
+	}
+	return p.authorizer.Authorize(p.ctx, security.NewCall(&security.CallParams{
+		LocalPrincipal:   vc.LocalPrincipal(),
+		LocalBlessings:   vc.LocalBlessings(),
+		RemoteBlessings:  request.Blessings,
+		LocalEndpoint:    vc.LocalEndpoint(),
+		RemoteEndpoint:   vc.RemoteEndpoint(),
+		LocalDischarges:  vc.LocalDischarges(),
+		RemoteDischarges: dmap,
+	}))
+}
+
+func (p *Proxy) routeCounters(process *process, counters message.Counters) {
+	// Since each VC can be routed to a different process, split up the
+	// Counters into one message per VC.
+	// Ideally, would split into one message per process (rather than per
+	// flow). This optimization is left an as excercise to the interested.
+	for cid, bytes := range counters {
+		srcVCI := cid.VCI()
+		if vc := process.ServerVC(srcVCI); vc != nil {
+			vc.ReleaseCounters(cid.Flow(), bytes)
+			continue
+		}
+		if d := process.Route(srcVCI); d != nil {
+			c := message.NewCounters()
+			c.Add(d.VCI, cid.Flow(), bytes)
+			if err := d.Process.queue.Put(&message.AddReceiveBuffers{Counters: c}); err != nil {
+				process.RemoveRoute(srcVCI)
+				process.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToForwardRxBufs, nil, err)))
+			}
+		}
+	}
+}
+
+func startRoutingVC(srcVCI, dstVCI id.VC, srcProcess, dstProcess *process) {
+	dstProcess.AddRoute(dstVCI, &destination{VCI: srcVCI, Process: srcProcess})
+	srcProcess.AddRoute(srcVCI, &destination{VCI: dstVCI, Process: dstProcess})
+	vcLog().Infof("Routing (VCI %d @ [%s]) <-> (VCI %d @ [%s])", srcVCI, srcProcess, dstVCI, dstProcess)
+}
+
+// Endpoint returns the endpoint of the proxy service.  By Dialing a VC to this
+// endpoint, processes can have their services exported through the proxy.
+func (p *Proxy) endpoint() *inaming.Endpoint {
+
+	ep := &inaming.Endpoint{
+		Protocol: p.ln.Addr().Network(),
+		Address:  p.pubAddress,
+		RID:      p.rid,
+	}
+	if prncpl := p.principal; prncpl != nil {
+		for b, _ := range prncpl.BlessingsInfo(prncpl.BlessingStore().Default()) {
+			ep.Blessings = append(ep.Blessings, b)
+		}
+	}
+	return ep
+}
+
+// Shutdown stops the proxy service, closing all network connections.
+func (p *Proxy) shutdown() {
+	stats.Delete(p.statsName)
+	p.ln.Close()
+	p.mu.Lock()
+	processes := p.processes
+	p.processes = nil
+	p.mu.Unlock()
+	for process, _ := range processes {
+		process.Close()
+	}
+}
+
+func (p *process) serverVCsLoop() {
+	for {
+		w, bufs, err := p.bq.Get(nil)
+		if err != nil {
+			return
+		}
+		vci, fid := unpackIDs(w.ID())
+		if vc := p.ServerVC(vci); vc != nil {
+			queueDataMessages(bufs, vc, fid, p.queue)
+			if len(bufs) == 0 {
+				m := &message.Data{VCI: vci, Flow: fid}
+				m.SetClose()
+				p.queue.Put(m)
+				w.Shutdown(true)
+			}
+			continue
+		}
+		releaseBufs(0, bufs)
+	}
+}
+
+func releaseBufs(start int, bufs []*iobuf.Slice) {
+	for _, buf := range bufs[start:] {
+		buf.Release()
+	}
+}
+
+func queueDataMessages(bufs []*iobuf.Slice, vc *vc.VC, fid id.Flow, q *upcqueue.T) {
+	for ix, b := range bufs {
+		m := &message.Data{VCI: vc.VCI(), Flow: fid}
+		var err error
+		if m.Payload, err = vc.Encrypt(fid, b); err != nil {
+			msgLog().Infof("vc.Encrypt failed. VC:%v Flow:%v Error:%v", vc, fid, err)
+			releaseBufs(ix+1, bufs)
+			return
+		}
+		if err = q.Put(m); err != nil {
+			msgLog().Infof("Failed to enqueue data message %v: %v", m, err)
+			m.Release()
+			releaseBufs(ix+1, bufs)
+			return
+		}
+	}
+}
+
+func (p *process) writeLoop() {
+	defer processLog().Infof("Exited writeLoop for %v", p)
+	defer p.Close()
+
+	for {
+		item, err := p.queue.Get(nil)
+		if err != nil {
+			if err != upcqueue.ErrQueueIsClosed {
+				processLog().Infof("upcqueue.Get failed on %v: %v", p, err)
+			}
+			return
+		}
+		if err = message.WriteTo(p.conn, item.(message.T), p.ctrlCipher); err != nil {
+			processLog().Infof("message.WriteTo on %v failed: %v", p, err)
+			return
+		}
+	}
+}
+
+func (p *process) readLoop() {
+	defer processLog().Infof("Exited readLoop for %v", p)
+	defer p.Close()
+
+	for {
+		msg, err := message.ReadFrom(p.reader, p.ctrlCipher)
+		if err != nil {
+			processLog().Infof("Read on %v failed: %v", p, err)
+			return
+		}
+		msgLog().Infof("Received msg: %T = %v", msg, msg)
+		switch m := msg.(type) {
+		case *message.Data:
+			if vc := p.ServerVC(m.VCI); vc != nil {
+				if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
+					processLog().Infof("Ignoring data message %v from process %v: %v", m, p, err)
+				}
+				if m.Close() {
+					vc.ShutdownFlow(m.Flow)
+				}
+				break
+			}
+			srcVCI := m.VCI
+			if d := p.Route(srcVCI); d != nil {
+				m.VCI = d.VCI
+				if err := d.Process.queue.Put(m); err != nil {
+					m.Release()
+					p.RemoveRoute(srcVCI)
+					p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToFowardDataMsg, nil, err)))
+				}
+				break
+			}
+			p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errNoRoutingTableEntry, nil)))
+		case *message.OpenFlow:
+			if vc := p.ServerVC(m.VCI); vc != nil {
+				if err := vc.AcceptFlow(m.Flow); err != nil {
+					processLog().Infof("OpenFlow %+v on process %v failed: %v", m, p, err)
+					cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
+					cm.SetClose()
+					p.queue.Put(cm)
+				}
+				vc.ReleaseCounters(m.Flow, m.InitialCounters)
+				break
+			}
+			srcVCI := m.VCI
+			if d := p.Route(srcVCI); d != nil {
+				m.VCI = d.VCI
+				if err := d.Process.queue.Put(m); err != nil {
+					p.RemoveRoute(srcVCI)
+					p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToFowardOpenFlow, nil, err)))
+				}
+				break
+			}
+			p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errNoRoutingTableEntry, nil)))
+		case *message.CloseVC:
+			if vc := p.RemoveServerVC(m.VCI); vc != nil {
+				vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errRemoveServerVC, nil, m.VCI, m.Error)))
+				break
+			}
+			srcVCI := m.VCI
+			if d := p.Route(srcVCI); d != nil {
+				m.VCI = d.VCI
+				d.Process.queue.Put(m)
+				d.Process.RemoveRoute(d.VCI)
+			}
+			p.RemoveRoute(srcVCI)
+		case *message.AddReceiveBuffers:
+			p.proxy.routeCounters(p, m.Counters)
+		case *message.SetupVC:
+			// First let's ensure that we can speak a common protocol verison.
+			intersection, err := iversion.SupportedRange.Intersect(&m.Setup.Versions)
+			if err != nil {
+				p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil,
+					verror.New(errIncompatibleVersions, nil, err)))
+				break
+			}
+
+			dstrid := m.RemoteEndpoint.RoutingID()
+			if naming.Compare(dstrid, p.proxy.rid) || naming.Compare(dstrid, naming.NullRoutingID) {
+				// VC that terminates at the proxy.
+				// See protocol.vdl for details on the protocol between the server and the proxy.
+				vcObj := p.NewServerVC(m)
+				// route counters after creating the VC so counters to vc are not lost.
+				p.proxy.routeCounters(p, m.Counters)
+				if vcObj != nil {
+					server := &server{Process: p, VC: vcObj}
+					keyExchanger := func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+						p.queue.Put(&message.SetupVC{
+							VCI: m.VCI,
+							Setup: message.Setup{
+								// Note that servers send clients not their actual supported versions,
+								// but the intersected range of the server and client ranges.  This
+								// is important because proxies may have adjusted the version ranges
+								// along the way, and we should negotiate a version that is compatible
+								// with all intermediate hops.
+								Versions: *intersection,
+								Options:  []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}},
+							},
+							RemoteEndpoint: m.LocalEndpoint,
+							LocalEndpoint:  p.proxy.endpoint(),
+							// TODO(mattr): Consider adding counters.  See associated comment
+							// in vc.go:VC.HandshakeAcceptedVC for more details.
+						})
+						var theirPK *crypto.BoxKey
+						box := m.Setup.NaclBox()
+						if box != nil {
+							theirPK = &box.PublicKey
+						}
+						return theirPK, nil
+					}
+					go p.proxy.runServer(server, vcObj.HandshakeAcceptedVC(intersection.Max, p.proxy.principal, p.proxy.blessings, keyExchanger))
+				}
+				break
+			}
+
+			srcVCI := m.VCI
+
+			d := p.Route(srcVCI)
+			if d == nil {
+				// SetupVC involves two messages: One sent by the initiator
+				// and one by the acceptor. The routing table gets setup on
+				// the first message, so if there is no route -
+				// setup a routing table entry.
+				dstprocess := p.proxy.servers.Process(dstrid)
+				if dstprocess == nil {
+					p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil, verror.New(errServerNotBeingProxied, nil, dstrid)))
+					p.proxy.routeCounters(p, m.Counters)
+					break
+				}
+				dstVCI := dstprocess.AllocVCI()
+				startRoutingVC(srcVCI, dstVCI, p, dstprocess)
+				if d = p.Route(srcVCI); d == nil {
+					p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errServerVanished, nil, dstrid)))
+					p.proxy.routeCounters(p, m.Counters)
+					break
+				}
+			}
+
+			// Forward the SetupVC message.
+			// Typically, a SetupVC message is accompanied with
+			// Counters for the new VC.  Keep that in the forwarded
+			// message and route the remaining counters separately.
+			counters := m.Counters
+			m.Counters = message.NewCounters()
+			dstVCI := d.VCI
+			for cid, bytes := range counters {
+				if cid.VCI() == srcVCI {
+					m.Counters.Add(dstVCI, cid.Flow(), bytes)
+					delete(counters, cid)
+				}
+			}
+			m.VCI = dstVCI
+			// Note that proxies rewrite the version range so that the final negotiated
+			// version will be compatible with all intermediate hops.
+			m.Setup.Versions = *intersection
+			d.Process.queue.Put(m)
+			p.proxy.routeCounters(p, counters)
+
+		default:
+			processLog().Infof("Closing %v because of invalid message %T", p, m)
+			return
+		}
+	}
+}
+
+func (p *process) String() string {
+	r := p.conn.RemoteAddr()
+	return fmt.Sprintf("(%s, %s)", r.Network(), r)
+}
+func (p *process) Route(vci id.VC) *destination {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.routingTable[vci]
+}
+func (p *process) AddRoute(vci id.VC, d *destination) {
+	p.mu.Lock()
+	p.routingTable[vci] = d
+	p.mu.Unlock()
+}
+func (p *process) InitVCI(vci id.VC) {
+	p.mu.Lock()
+	if p.nextVCI <= vci {
+		p.nextVCI = vci + 1
+	}
+	p.mu.Unlock()
+}
+func (p *process) AllocVCI() id.VC {
+	p.mu.Lock()
+	ret := p.nextVCI
+	p.nextVCI += 2
+	p.mu.Unlock()
+	return ret
+}
+func (p *process) RemoveRoute(vci id.VC) {
+	p.mu.Lock()
+	delete(p.routingTable, vci)
+	p.mu.Unlock()
+}
+func (p *process) SendCloseVC(vci id.VC, err error) {
+	var estr string
+	if err != nil {
+		estr = err.Error()
+	}
+	p.queue.Put(&message.CloseVC{VCI: vci, Error: estr})
+}
+
+func (p *process) Close() {
+	p.mu.Lock()
+	if p.routingTable == nil {
+		p.mu.Unlock()
+		return
+	}
+	rt := p.routingTable
+	p.routingTable = nil
+	for _, vc := range p.servers {
+		vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errNetConnClosing, nil)))
+	}
+	p.mu.Unlock()
+	for _, d := range rt {
+		d.Process.SendCloseVC(d.VCI, verror.New(stream.ErrProxy, nil, verror.New(errProcessVanished, nil)))
+	}
+	p.bq.Close()
+	p.queue.Close()
+	p.conn.Close()
+
+	p.proxy.removeProcess(p)
+}
+
+func (p *process) ServerVC(vci id.VC) *vc.VC {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.servers[vci]
+}
+
+func (p *process) NewServerVC(m *message.SetupVC) *vc.VC {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if vc := p.servers[m.VCI]; vc != nil {
+		vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errDuplicateSetupVC, nil)))
+		return nil
+	}
+	vc := vc.InternalNew(vc.Params{
+		VCI:          m.VCI,
+		LocalEP:      m.RemoteEndpoint,
+		RemoteEP:     m.LocalEndpoint,
+		Pool:         p.pool,
+		ReserveBytes: message.HeaderSizeBytes,
+		Helper:       p,
+	})
+	p.servers[m.VCI] = vc
+	proxyLog().Infof("Registered VC %v from server on process %v", vc, p)
+	return vc
+}
+
+func (p *process) RemoveServerVC(vci id.VC) *vc.VC {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if vc := p.servers[vci]; vc != nil {
+		delete(p.servers, vci)
+		proxyLog().Infof("Unregistered server VC %v from process %v", vc, p)
+		return vc
+	}
+	return nil
+}
+
+// Make process implement vc.Helper
+func (p *process) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+	msg := &message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)}
+	if err := p.queue.Put(msg); err != nil {
+		processLog().Infof("Failed to send OpenFlow(%+v) on process %v: %v", msg, p, err)
+	}
+}
+
+func (p *process) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+	if bytes == 0 {
+		return
+	}
+	msg := &message.AddReceiveBuffers{Counters: message.NewCounters()}
+	msg.Counters.Add(vci, fid, uint32(bytes))
+	if err := p.queue.Put(msg); err != nil {
+		processLog().Infof("Failed to send AddReceiveBuffers(%+v) on process %v: %v", msg, p, err)
+	}
+}
+
+func (p *process) NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error) {
+	return p.bq.NewWriter(packIDs(vci, fid), priority, vc.DefaultBytesBufferedPerFlow)
+}
+
+// Convenience functions to assist with the logging convention.
+func proxyLog() vlog.InfoLog   { return vlog.VI(1) }
+func processLog() vlog.InfoLog { return vlog.VI(2) }
+func vcLog() vlog.InfoLog      { return vlog.VI(3) }
+func msgLog() vlog.InfoLog     { return vlog.VI(4) }
+func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
+	return bqueue.ID(message.MakeCounterID(vci, fid))
+}
+func unpackIDs(b bqueue.ID) (id.VC, id.Flow) {
+	cid := message.CounterID(b)
+	return cid.VCI(), cid.Flow()
+}
diff --git a/runtime/internal/rpc/stream/proxy/proxy_test.go b/runtime/internal/rpc/stream/proxy/proxy_test.go
new file mode 100644
index 0000000..c325e13
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/proxy_test.go
@@ -0,0 +1,509 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy_test
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+//go:generate v23 test generate
+
+func TestProxy(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+
+	// Create the stream.Manager for the server.
+	server1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+	defer server1.Shutdown()
+	// Setup a stream.Listener that will accept VCs and Flows routed
+	// through the proxy.
+	ln1, ep1, err := server1.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Logf(verror.DebugString(err))
+		t.Fatal(err)
+	}
+	defer ln1.Close()
+
+	// Create the stream.Manager for a second server.
+	server2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+	defer server2.Shutdown()
+	// Setup a stream.Listener that will accept VCs and Flows routed
+	// through the proxy.
+	ln2, ep2, err := server2.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ln2.Close()
+
+	// Create the stream.Manager for a client.
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer client.Shutdown()
+
+	cases := []struct {
+		client stream.Manager
+		ln     stream.Listener
+		ep     naming.Endpoint
+	}{
+		{client, ln1, ep1},  // client writing to server1
+		{server1, ln2, ep2}, // server1 writing to server2
+		{server1, ln1, ep1}, // server1 writing to itself
+	}
+
+	const written = "the dough rises"
+	for i, c := range cases {
+		name := fmt.Sprintf("case #%d(write to %v):", i, c.ep)
+		// Accept a single flow and write out what is read to readChan
+		readChan := make(chan string)
+		go readFlow(t, c.ln, readChan)
+		if err := writeFlow(c.client, c.ep, written); err != nil {
+			t.Errorf("%s: %v", name, err)
+			continue
+		}
+		// Validate that the data read is the same as the data written.
+		if read := <-readChan; read != written {
+			t.Errorf("case #%d: Read %q, wrote %q", i, read, written)
+		}
+	}
+}
+
+func TestProxyAuthorization(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, testAuth{"alice", "carol"})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+
+	var (
+		alice = testutil.NewPrincipal("alice")
+		bob   = testutil.NewPrincipal("bob")
+		carol = testutil.NewPrincipal("carol")
+		dave  = testutil.NewPrincipal("dave")
+	)
+	// Make the proxy recognize "alice", "bob" and "carol", but not "dave"
+	v23.GetPrincipal(ctx).AddToRoots(alice.BlessingStore().Default())
+	v23.GetPrincipal(ctx).AddToRoots(bob.BlessingStore().Default())
+	v23.GetPrincipal(ctx).AddToRoots(carol.BlessingStore().Default())
+
+	testcases := []struct {
+		p  security.Principal
+		ok bool
+	}{
+		{alice, true}, // passes the auth policy
+		{bob, false},  // recognized, but not included in auth policy
+		{carol, true}, // passes the auth policy
+		{dave, false}, // not recognized, thus doesn't pass the auth policy
+	}
+	for idx, test := range testcases {
+		server := manager.InternalNew(naming.FixedRoutingID(uint64(idx)))
+		_, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), test.p, test.p.BlessingStore().Default(), proxyAuth{test.p})
+		if (err == nil) != test.ok {
+			t.Errorf("Got ep=%v, err=%v - wanted error:%v", ep, err, !test.ok)
+		}
+		server.Shutdown()
+	}
+}
+
+type proxyAuth struct {
+	p security.Principal
+}
+
+func (proxyAuth) RPCStreamListenerOpt() {}
+func (a proxyAuth) Login(stream.Flow) (security.Blessings, []security.Discharge, error) {
+	return a.p.BlessingStore().Default(), nil, nil
+}
+
+func TestDuplicateRoutingID(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+
+	// Create the stream.Manager for server1 and server2, both with the same routing ID
+	serverRID := naming.FixedRoutingID(0x5555555555555555)
+	server1 := manager.InternalNew(serverRID)
+	server2 := manager.InternalNew(serverRID)
+	defer server1.Shutdown()
+	defer server2.Shutdown()
+
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+
+	// First server to claim serverRID should win.
+	ln1, ep1, err := server1.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ln1.Close()
+
+	ln2, ep2, err := server2.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if pattern := "routing id 00000000000000005555555555555555 is already being proxied"; err == nil || !strings.Contains(err.Error(), pattern) {
+		t.Errorf("Got (%v, %v, %v) want error \"...%v\" (ep1:%v)", ln2, ep2, err, pattern, ep1)
+	}
+}
+
+func TestProxyAuthentication(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	pproxy := v23.GetPrincipal(ctx)
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	if got, want := proxyEp.BlessingNames(), []string{"proxy"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Proxy endpoint blessing names: got %v, want %v", got, want)
+	}
+
+	other := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer other.Shutdown()
+
+	vc, err := other.Dial(proxyEp, testutil.NewPrincipal("other"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := flow.RemoteBlessings(), pproxy.BlessingStore().Default(); !reflect.DeepEqual(got, want) {
+		t.Errorf("Proxy authenticated as [%v], want [%v]", got, want)
+	}
+}
+
+func TestServerBlessings(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	var (
+		pserver = testutil.NewPrincipal("server")
+		pclient = testutil.NewPrincipal("client")
+	)
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	if got, want := proxyEp.BlessingNames(), []string{"proxy"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Proxy endpoint blessing names: got %v, want %v", got, want)
+	}
+
+	server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+	defer server.Shutdown()
+
+	ln, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), pserver, pserver.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := ep.BlessingNames(), []string{"server"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Server endpoint %q: Got BlessingNames %v, want %v", ep, got, want)
+	}
+	defer ln.Close()
+	go func() {
+		for {
+			if _, err := ln.Accept(); err != nil {
+				return
+			}
+		}
+	}()
+
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer client.Shutdown()
+	vc, err := client.Dial(ep, pclient)
+	if err != nil {
+		t.Fatal(err)
+	}
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := flow.RemoteBlessings(), pserver.BlessingStore().Default(); !reflect.DeepEqual(got, want) {
+		t.Errorf("Got [%v] want [%v]", got, want)
+	}
+}
+
+func TestHostPort(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+	defer server.Shutdown()
+	addr := proxyEp.Addr().String()
+	port := addr[strings.LastIndex(addr, ":"):]
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	ln, _, err := server.Listen(inaming.Network, "127.0.0.1"+port, principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ln.Close()
+}
+
+func TestClientBecomesServer(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+	client1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+	client2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+	defer shutdown()
+	defer server.Shutdown()
+	defer client1.Shutdown()
+	defer client2.Shutdown()
+
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	lnS, epS, err := server.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer lnS.Close()
+	rchan := make(chan string)
+
+	pclient1 := testutil.NewPrincipal("client1")
+
+	// client1 must connect to the proxy to speak to the server.
+	// Keep a VC and Flow open to the server, to ensure that the proxy
+	// maintains routing information (at some point, inactive VIFs
+	// should be garbage collected, so this ensures that the VIF
+	// is "active")
+	if vc, err := client1.Dial(epS, pclient1); err != nil {
+		t.Fatal(err)
+	} else if flow, err := vc.Connect(); err != nil {
+		t.Fatal(err)
+	} else {
+		defer flow.Close()
+	}
+
+	// Now client1 becomes a server
+	lnC, epC, err := client1.Listen(proxyEp.Network(), proxyEp.String(), pclient1, pclient1.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer lnC.Close()
+	// client2 should be able to talk to client1 through the proxy
+	rchan = make(chan string)
+	go readFlow(t, lnC, rchan)
+	if err := writeFlow(client2, epC, "daffy duck"); err != nil {
+		t.Fatalf("client2 failed to chat with client1: %v", err)
+	}
+	if got, want := <-rchan, "daffy duck"; got != want {
+		t.Fatalf("client2->client1 got %q want %q", got, want)
+	}
+}
+
+func testProxyIdleTimeout(t *testing.T, testServer bool) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	const (
+		idleTime = 10 * time.Millisecond
+		// We use a long wait time here since it takes some time to handle VC close
+		// especially in race testing.
+		waitTime = 150 * time.Millisecond
+	)
+
+	var (
+		pserver = testutil.NewPrincipal("server")
+		pclient = testutil.NewPrincipal("client")
+
+		opts  []stream.VCOpt
+		lopts []stream.ListenerOpt
+	)
+	if testServer {
+		lopts = []stream.ListenerOpt{vc.IdleTimeout{idleTime}}
+	} else {
+		opts = []stream.VCOpt{vc.IdleTimeout{idleTime}}
+	}
+
+	// Pause the idle timers.
+	triggerTimers := vif.SetFakeTimers()
+
+	Proxy, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+
+	// Create the stream.Manager for the server.
+	server := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+	defer server.Shutdown()
+	// Setup a stream.Listener that will accept VCs and Flows routed
+	// through the proxy.
+	ln, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), pserver, pserver.BlessingStore().Default(), lopts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ln.Close()
+	go func() {
+		for {
+			if _, err := ln.Accept(); err != nil {
+				return
+			}
+		}
+	}()
+
+	// Create the stream.Manager for a client.
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer client.Shutdown()
+
+	// Open a VC and a Flow.
+	VC, err := client.Dial(ep, pclient, opts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	flow, err := VC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Trigger the idle timers.
+	triggerTimers()
+
+	if numProcs := proxy.NumProcesses(Proxy); numProcs != 2 {
+		// There should be two processes at this point.
+		t.Fatal(fmt.Errorf("Unexpected number of processes: %d\n", numProcs))
+	}
+
+	// There is one active flow. The VC should be kept open.
+	time.Sleep(waitTime)
+	if numProcs := proxy.NumProcesses(Proxy); numProcs != 2 {
+		t.Errorf("Want VC is kept open; closed")
+	}
+
+	flow.Close()
+
+	// The flow has been closed. The VC should be closed after idle timeout.
+	for range time.Tick(idleTime) {
+		if proxy.NumProcesses(Proxy) == 1 {
+			break
+		}
+	}
+
+	client.ShutdownEndpoint(ep)
+
+	// Even when the idle timeout is set for VC in server, we still should be
+	// able to dial to the server through the proxy, since one VC between the
+	// server and the proxy should be kept alive as the proxy protocol.
+	//
+	// We use fake timers here again to avoid idle timeout during dialing.
+	defer vif.SetFakeTimers()()
+	if _, err := client.Dial(ep, pclient, opts...); err != nil {
+		t.Errorf("Want to dial to the server; can't dial: %v", err)
+	}
+}
+
+func TestProxyIdleTimeout(t *testing.T)       { testProxyIdleTimeout(t, false) }
+func TestProxyIdleTimeoutServer(t *testing.T) { testProxyIdleTimeout(t, true) }
+
+func writeFlow(mgr stream.Manager, ep naming.Endpoint, data string) error {
+	vc, err := mgr.Dial(ep, testutil.NewPrincipal("test"))
+	if err != nil {
+		return fmt.Errorf("manager.Dial(%v) failed: %v", ep, err)
+	}
+	flow, err := vc.Connect()
+	if err != nil {
+		return fmt.Errorf("vc.Connect failed: %v", err)
+	}
+	defer flow.Close()
+	if _, err := flow.Write([]byte(data)); err != nil {
+		return fmt.Errorf("flow.Write failed: %v", err)
+	}
+	return nil
+}
+
+func readFlow(t *testing.T, ln stream.Listener, read chan<- string) {
+	defer close(read)
+	flow, err := ln.Accept()
+	if err != nil {
+		t.Error(err)
+		return
+	}
+	var tmp [1024]byte
+	var buf bytes.Buffer
+	for {
+		n, err := flow.Read(tmp[:])
+		if err == io.EOF {
+			read <- buf.String()
+			return
+		}
+		if err != nil {
+			t.Error(err)
+			return
+		}
+		buf.Write(tmp[:n])
+	}
+}
+
+func v23Init() (*context.T, func()) {
+	ctx, shutdown := test.InitForTest()
+	ctx, err := v23.WithPrincipal(ctx, testutil.NewPrincipal("proxy"))
+	if err != nil {
+		panic(err)
+	}
+	return ctx, shutdown
+}
+
+type testAuth []string
+
+func (l testAuth) Authorize(ctx *context.T, call security.Call) error {
+	remote, rejected := security.RemoteBlessingNames(ctx, call)
+	for _, n := range remote {
+		for _, a := range l {
+			if n == a {
+				return nil
+			}
+		}
+	}
+	return fmt.Errorf("%v not in authorized set of %v (rejected: %v)", remote, l, rejected)
+}
diff --git a/runtime/internal/rpc/stream/proxy/testutil_test.go b/runtime/internal/rpc/stream/proxy/testutil_test.go
new file mode 100644
index 0000000..727b8a5
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/testutil_test.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+)
+
+// These are the internal functions only for use in the proxy_test package.
+
+func InternalNew(rid naming.RoutingID, ctx *context.T, auth security.Authorizer) (*Proxy, func(), naming.Endpoint, error) {
+	proxy, err := internalNew(rid, ctx, v23.GetListenSpec(ctx), auth)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	return proxy, proxy.shutdown, proxy.endpoint(), err
+}
+
+func NumProcesses(proxy *Proxy) int {
+	proxy.mu.Lock()
+	defer proxy.mu.Unlock()
+	return len(proxy.processes)
+}
diff --git a/runtime/internal/rpc/stream/proxy/v23_internal_test.go b/runtime/internal/rpc/stream/proxy/v23_internal_test.go
new file mode 100644
index 0000000..84bea54
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package proxy
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+
+func TestMain(m *testing.M) {
+	test.Init()
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/stream/vc/auth.go b/runtime/internal/rpc/stream/vc/auth.go
new file mode 100644
index 0000000..ff9ec89
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/auth.go
@@ -0,0 +1,165 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"bytes"
+	"io"
+
+	"v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+)
+
+var (
+	authServerContextTag = []byte("VCauthS\x00")
+	authClientContextTag = []byte("VCauthC\x00")
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errVomEncodeBlessing            = reg(".errVomEncodeRequest", "failed to encode blessing{:3}")
+	errHandshakeMessage             = reg(".errHandshakeMessage", "failed to read hanshake message{:3}")
+	errInvalidSignatureInMessage    = reg(".errInvalidSignatureInMessage", "signature does not verify in authentication handshake message")
+	errFailedToCreateSelfBlessing   = reg(".errFailedToCreateSelfBlessing", "failed to create self blessing{:3}")
+	errNoBlessingsToPresentToServer = reg(".errerrNoBlessingsToPresentToServer ", "no blessings to present as a server")
+)
+
+// AuthenticateAsServer executes the authentication protocol at the server.
+// It returns the blessings shared by the client, and the discharges shared
+// by the server.
+func AuthenticateAsServer(conn io.ReadWriteCloser, principal security.Principal, server security.Blessings, dc DischargeClient, crypter crypto.Crypter, v version.RPCVersion) (security.Blessings, map[string]security.Discharge, error) {
+	if server.IsZero() {
+		return security.Blessings{}, nil, verror.New(stream.ErrSecurity, nil, verror.New(errNoBlessingsToPresentToServer, nil))
+	}
+	var serverDischarges []security.Discharge
+	if tpcavs := server.ThirdPartyCaveats(); len(tpcavs) > 0 && dc != nil {
+		serverDischarges = dc.PrepareDischarges(nil, tpcavs, security.DischargeImpetus{})
+	}
+	if err := writeBlessings(conn, authServerContextTag, crypter, principal, server, serverDischarges, v); err != nil {
+		return security.Blessings{}, nil, err
+	}
+	// Note that since the client uses a self-signed blessing to authenticate
+	// during VC setup, it does not share any discharges.
+	client, _, err := readBlessings(conn, authClientContextTag, crypter, v)
+	if err != nil {
+		return security.Blessings{}, nil, err
+	}
+	return client, mkDischargeMap(serverDischarges), nil
+}
+
+// AuthenticateAsClient executes the authentication protocol at the client.
+// It returns the blessing shared by the server, the blessings shared by the
+// client, and any discharges shared by the server.
+//
+// The client will only share its blessings if the server (who shares its
+// blessings first) is authorized as per the authorizer for this RPC.
+func AuthenticateAsClient(conn io.ReadWriteCloser, crypter crypto.Crypter, params security.CallParams, auth *ServerAuthorizer, v version.RPCVersion) (security.Blessings, security.Blessings, map[string]security.Discharge, error) {
+	server, serverDischarges, err := readBlessings(conn, authServerContextTag, crypter, v)
+	if err != nil {
+		return security.Blessings{}, security.Blessings{}, nil, err
+	}
+	// Authorize the server based on the provided authorizer.
+	if auth != nil {
+		params.RemoteBlessings = server
+		params.RemoteDischarges = serverDischarges
+		if err := auth.Authorize(params); err != nil {
+			return security.Blessings{}, security.Blessings{}, nil, verror.New(stream.ErrNotTrusted, nil, err)
+		}
+	}
+
+	// The client shares its blessings at RPC time (as the blessings may vary
+	// across RPCs). During VC handshake, the client simply sends a self-signed
+	// blessing in order to reveal its public key to the server.
+	principal := params.LocalPrincipal
+	client, err := principal.BlessSelf("vcauth")
+	if err != nil {
+		return security.Blessings{}, security.Blessings{}, nil, verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateSelfBlessing, nil, err))
+	}
+	if err := writeBlessings(conn, authClientContextTag, crypter, principal, client, nil, v); err != nil {
+		return security.Blessings{}, security.Blessings{}, nil, err
+	}
+	return server, client, serverDischarges, nil
+}
+
+func writeBlessings(w io.Writer, tag []byte, crypter crypto.Crypter, p security.Principal, b security.Blessings, discharges []security.Discharge, v version.RPCVersion) error {
+	signature, err := p.Sign(append(tag, crypter.ChannelBinding()...))
+	if err != nil {
+		return err
+	}
+	var buf bytes.Buffer
+	enc := vom.NewEncoder(&buf)
+	if err := enc.Encode(signature); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	if err := enc.Encode(b); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	if err := enc.Encode(discharges); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	msg, err := crypter.Encrypt(iobuf.NewSlice(buf.Bytes()))
+	if err != nil {
+		return err
+	}
+	defer msg.Release()
+	enc = vom.NewEncoder(w)
+	if err := enc.Encode(msg.Contents); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	return nil
+}
+
+func readBlessings(r io.Reader, tag []byte, crypter crypto.Crypter, v version.RPCVersion) (security.Blessings, map[string]security.Discharge, error) {
+	var msg []byte
+	var noBlessings security.Blessings
+	dec := vom.NewDecoder(r)
+	if err := dec.Decode(&msg); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, verror.New(errHandshakeMessage, nil, err))
+	}
+	buf, err := crypter.Decrypt(iobuf.NewSlice(msg))
+	if err != nil {
+		return noBlessings, nil, err
+	}
+	defer buf.Release()
+	dec = vom.NewDecoder(bytes.NewReader(buf.Contents))
+	var (
+		blessings security.Blessings
+		sig       security.Signature
+	)
+	if err = dec.Decode(&sig); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	if err = dec.Decode(&blessings); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	var discharges []security.Discharge
+	if err := dec.Decode(&discharges); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	if !sig.Verify(blessings.PublicKey(), append(tag, crypter.ChannelBinding()...)) {
+		return noBlessings, nil, verror.New(stream.ErrSecurity, nil, verror.New(errInvalidSignatureInMessage, nil))
+	}
+	return blessings, mkDischargeMap(discharges), nil
+}
+
+func mkDischargeMap(discharges []security.Discharge) map[string]security.Discharge {
+	if len(discharges) == 0 {
+		return nil
+	}
+	m := make(map[string]security.Discharge, len(discharges))
+	for _, d := range discharges {
+		m[d.ID()] = d
+	}
+	return m
+}
diff --git a/runtime/internal/rpc/stream/vc/data_cache.go b/runtime/internal/rpc/stream/vc/data_cache.go
new file mode 100644
index 0000000..6c5b56c
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/data_cache.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"sync"
+)
+
+// dataCache is a thread-safe map for any two types.
+type dataCache struct {
+	sync.RWMutex
+	m map[interface{}]interface{}
+}
+
+func newDataCache() *dataCache {
+	return &dataCache{m: make(map[interface{}]interface{})}
+}
+
+// Get returns the value stored under the key.
+func (c *dataCache) Get(key interface{}) interface{} {
+	c.RLock()
+	value, _ := c.m[key]
+	c.RUnlock()
+	return value
+}
+
+// Insert the given key and value into the cache if and only if the given key
+// did not already exist in the cache. Returns true if the key-value pair was
+// inserted; otherwise returns false.
+func (c *dataCache) Insert(key interface{}, value interface{}) bool {
+	c.Lock()
+	defer c.Unlock()
+	if _, exists := c.m[key]; exists {
+		return false
+	}
+	c.m[key] = value
+	return true
+}
+
+// GetOrInsert first checks if the key exists in the cache with a reader lock.
+// If it doesn't exist, it instead acquires a writer lock, creates and stores the new value
+// with create and returns value.
+func (c *dataCache) GetOrInsert(key interface{}, create func() interface{}) interface{} {
+	// We use the read lock for the fastpath. This should be the more common case, so we rarely
+	// need a writer lock.
+	c.RLock()
+	value, exists := c.m[key]
+	c.RUnlock()
+	if exists {
+		return value
+	}
+	// We acquire the writer lock for the slowpath, and need to re-check if the key exists
+	// in the map, since other thread may have snuck in.
+	c.Lock()
+	defer c.Unlock()
+	value, exists = c.m[key]
+	if exists {
+		return value
+	}
+	value = create()
+	c.m[key] = value
+	return value
+}
diff --git a/runtime/internal/rpc/stream/vc/doc.go b/runtime/internal/rpc/stream/vc/doc.go
new file mode 100644
index 0000000..62c34df
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vc provides implementations of the VC and Flow interfaces in v.io/x/ref/runtime/internal/rpc/stream.
+package vc
diff --git a/runtime/internal/rpc/stream/vc/flow.go b/runtime/internal/rpc/stream/vc/flow.go
new file mode 100644
index 0000000..d5f2d2f
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/flow.go
@@ -0,0 +1,58 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"v.io/v23/naming"
+	"v.io/v23/security"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+type flow struct {
+	backingVC
+	*reader
+	*writer
+}
+
+type backingVC interface {
+	LocalEndpoint() naming.Endpoint
+	RemoteEndpoint() naming.Endpoint
+
+	LocalPrincipal() security.Principal
+	LocalBlessings() security.Blessings
+	RemoteBlessings() security.Blessings
+	LocalDischarges() map[string]security.Discharge
+	RemoteDischarges() map[string]security.Discharge
+
+	VCDataCache() stream.VCDataCache
+}
+
+func (f *flow) Close() error {
+	f.reader.Close()
+	f.writer.Close()
+	return nil
+}
+
+// SetDeadline sets a deadline channel on the flow.  Reads and writes
+// will be cancelled if the channel is closed.
+func (f *flow) SetDeadline(deadline <-chan struct{}) {
+	f.reader.SetDeadline(deadline)
+	f.writer.SetDeadline(deadline)
+}
+
+// Shutdown closes the flow and discards any queued up write buffers.
+// This is appropriate when the flow has been closed by the remote end.
+func (f *flow) Shutdown() {
+	f.reader.Close()
+	f.writer.shutdown(true)
+}
+
+// Cancel closes the flow and discards any queued up write buffers.
+// This is appropriate when the flow is being cancelled locally.
+func (f *flow) Cancel() {
+	f.reader.Close()
+	f.writer.shutdown(false)
+}
diff --git a/runtime/internal/rpc/stream/vc/knobs.go b/runtime/internal/rpc/stream/vc/knobs.go
new file mode 100644
index 0000000..7271f7a
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/knobs.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+const (
+	// Maximum size (in bytes) of application data to write out in a single message.
+	MaxPayloadSizeBytes = 1 << 16 // 64KB
+
+	// Number of bytes that a receiver is willing to buffer for a flow.
+	DefaultBytesBufferedPerFlow = 1 << 20 // 1MB
+
+	// Maximum number of bytes to steal from the shared pool of receive
+	// buffers for the first write of a new Flow.
+	MaxSharedBytes = 1 << 12 // 4KB
+
+	// Number of VC IDs reserved for special use.
+	NumReservedVCs = 10
+
+	// Number of Flow IDs reserved for possible future use.
+	NumReservedFlows = 10
+
+	// Special Flow ID used for information specific to the VC
+	// (and not any specific flow)
+	SharedFlowID = 0
+
+	// Special flow used for authenticating between VCs.
+	AuthFlowID = 2
+	// Special flow used for interchanging of VOM types between VCs.
+	TypeFlowID = 3
+	// Special flow over which discharges for third-party caveats
+	// on the server's blessings are sent.
+	DischargeFlowID = 4
+)
diff --git a/runtime/internal/rpc/stream/vc/listener.go b/runtime/internal/rpc/stream/vc/listener.go
new file mode 100644
index 0000000..72479ac
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/listener.go
@@ -0,0 +1,53 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errListenerClosed = reg(".errListenerClosed", "Listener has been closed")
+	errGetFromQueue   = reg(".errGetFromQueue", "upcqueue.Get failed{:3}")
+)
+
+type listener struct {
+	q *upcqueue.T
+}
+
+var _ stream.Listener = (*listener)(nil)
+
+func newListener() *listener { return &listener{q: upcqueue.New()} }
+
+func (l *listener) Enqueue(f stream.Flow) error {
+	err := l.q.Put(f)
+	if err == upcqueue.ErrQueueIsClosed {
+		return verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
+	}
+	return err
+}
+
+func (l *listener) Accept() (stream.Flow, error) {
+	item, err := l.q.Get(nil)
+	if err == upcqueue.ErrQueueIsClosed {
+		return nil, verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
+	}
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errGetFromQueue, nil, err))
+	}
+	return item.(stream.Flow), nil
+}
+
+func (l *listener) Close() error {
+	l.q.Close()
+	return nil
+}
diff --git a/runtime/internal/rpc/stream/vc/listener_test.go b/runtime/internal/rpc/stream/vc/listener_test.go
new file mode 100644
index 0000000..4dbf4ad
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/listener_test.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"strings"
+	"testing"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+type noopFlow struct{}
+
+// net.Conn methods
+func (*noopFlow) Read([]byte) (int, error)        { return 0, nil }
+func (*noopFlow) Write([]byte) (int, error)       { return 0, nil }
+func (*noopFlow) Close() error                    { return nil }
+func (*noopFlow) IsClosed() bool                  { return false }
+func (*noopFlow) Closed() <-chan struct{}         { return nil }
+func (*noopFlow) Cancel()                         {}
+func (*noopFlow) LocalEndpoint() naming.Endpoint  { return nil }
+func (*noopFlow) RemoteEndpoint() naming.Endpoint { return nil }
+
+// Other stream.Flow methods
+func (*noopFlow) LocalPrincipal() security.Principal              { return nil }
+func (*noopFlow) LocalBlessings() security.Blessings              { return security.Blessings{} }
+func (*noopFlow) RemoteBlessings() security.Blessings             { return security.Blessings{} }
+func (*noopFlow) LocalDischarges() map[string]security.Discharge  { return nil }
+func (*noopFlow) RemoteDischarges() map[string]security.Discharge { return nil }
+func (*noopFlow) SetDeadline(<-chan struct{})                     {}
+func (*noopFlow) VCDataCache() stream.VCDataCache                 { return nil }
+
+func TestListener(t *testing.T) {
+	ln := newListener()
+	f1, f2 := &noopFlow{}, &noopFlow{}
+
+	if err := ln.Enqueue(f1); err != nil {
+		t.Error(err)
+	}
+	if err := ln.Enqueue(f2); err != nil {
+		t.Error(err)
+	}
+	if f, err := ln.Accept(); f != f1 || err != nil {
+		t.Errorf("Got (%v, %v) want (%v, nil)", f, err, f1)
+	}
+	if f, err := ln.Accept(); f != f2 || err != nil {
+		t.Errorf("Got (%v, %v) want (%v, nil)", f, err, f2)
+	}
+	if err := ln.Close(); err != nil {
+		t.Error(err)
+	}
+	// Close-ing multiple times is fine.
+	if err := ln.Close(); err != nil {
+		t.Error(err)
+	}
+	if err := ln.Enqueue(f1); verror.ErrorID(err) != stream.ErrBadState.ID || !strings.Contains(err.Error(), "closed") {
+		t.Error(err)
+	}
+	if f, err := ln.Accept(); f != nil || verror.ErrorID(err) != stream.ErrBadState.ID || !strings.Contains(err.Error(), "closed") {
+		t.Errorf("Accept returned (%v, %v) wanted (nil, %v)", f, err, errListenerClosed)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vc/reader.go b/runtime/internal/rpc/stream/vc/reader.go
new file mode 100644
index 0000000..80f1c9b
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/reader.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"io"
+	"sync"
+	"sync/atomic"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errGetFailed = reg(".errGetFailed", "upcqueue.Get failed:{:3}")
+)
+
+// readHandler is the interface used by the reader to notify other components
+// of the number of bytes returned in Read calls.
+type readHandler interface {
+	HandleRead(bytes uint)
+}
+
+// reader implements the io.Reader and SetReadDeadline interfaces for a Flow,
+// backed by iobuf.Slice objects read from a upcqueue.
+type reader struct {
+	handler    readHandler
+	src        *upcqueue.T
+	mu         sync.Mutex
+	buf        *iobuf.Slice    // GUARDED_BY(mu)
+	deadline   <-chan struct{} // GUARDED_BY(mu)
+	totalBytes uint32
+}
+
+func newReader(h readHandler) *reader {
+	return &reader{handler: h, src: upcqueue.New()}
+}
+
+func (r *reader) Close() {
+	r.src.Close()
+}
+
+func (r *reader) Read(b []byte) (int, error) {
+	// net.Conn requires that all methods be invokable by multiple
+	// goroutines simultaneously. Read calls are serialized to ensure
+	// contiguous chunks of data are provided from each Read call.
+	r.mu.Lock()
+	n, err := r.readLocked(b)
+	r.mu.Unlock()
+	atomic.AddUint32(&r.totalBytes, uint32(n))
+	if n > 0 {
+		r.handler.HandleRead(uint(n))
+	}
+	return n, err
+}
+
+func (r *reader) readLocked(b []byte) (int, error) {
+	if r.buf == nil {
+		slice, err := r.src.Get(r.deadline)
+		if err != nil {
+			switch err {
+			case upcqueue.ErrQueueIsClosed:
+				return 0, io.EOF
+			case vsync.ErrCanceled:
+				// As per net.Conn.Read specification
+				return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
+			default:
+				return 0, verror.New(stream.ErrNetwork, nil, verror.New(errGetFailed, nil, err))
+			}
+		}
+		r.buf = slice.(*iobuf.Slice)
+	}
+	copied := 0
+	for r.buf.Size() <= len(b) {
+		n := copy(b, r.buf.Contents)
+		copied += n
+		b = b[n:]
+		r.buf.Release()
+		r.buf = nil
+
+		slice, err := r.src.TryGet()
+		if err != nil {
+			return copied, nil
+		}
+		r.buf = slice.(*iobuf.Slice)
+	}
+	n := copy(b, r.buf.Contents)
+	r.buf.TruncateFront(uint(n))
+	copied += n
+	return copied, nil
+}
+
+func (r *reader) SetDeadline(deadline <-chan struct{}) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.deadline = deadline
+}
+
+func (r *reader) BytesRead() uint32 {
+	return atomic.LoadUint32(&r.totalBytes)
+}
+
+func (r *reader) Put(slice *iobuf.Slice) error {
+	return r.src.Put(slice)
+}
diff --git a/runtime/internal/rpc/stream/vc/reader_test.go b/runtime/internal/rpc/stream/vc/reader_test.go
new file mode 100644
index 0000000..7fac16f
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/reader_test.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"io"
+	"net"
+	"reflect"
+	"testing"
+	"testing/quick"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+)
+
+type testReadHandler struct{ items []uint }
+
+func (t *testReadHandler) HandleRead(bytes uint) {
+	t.items = append(t.items, bytes)
+}
+
+func TestRead(t *testing.T) {
+	l := &testReadHandler{}
+	r := newReader(l)
+	input := []byte("abcdefghijklmnopqrstuvwxyzABCDE") // 31 bytes total
+	start := 0
+	// Produce data to read, adding elements to the underlying upcqueue
+	// with a geometric progression of 2.
+	for n := 1; start < len(input); n *= 2 {
+		if err := r.Put(iobuf.NewSlice(input[start : start+n])); err != nil {
+			t.Fatalf("Put(start=%d, n=%d) failed: %v", start, n, err)
+		}
+		start = start + n
+	}
+
+	var output [31]byte
+	start = 0
+	// Read with geometric progression of 1/2.
+	for n := 16; start < len(output); n /= 2 {
+		if m, err := r.Read(output[start : start+n]); err != nil || m != n {
+			t.Errorf("Read returned (%d, %v) want (%d, nil)", m, err, n)
+		}
+		if m := l.items[len(l.items)-1]; m != uint(n) {
+			t.Errorf("Read notified %d but should have notified %d bytes", m, n)
+		}
+		start = start + n
+	}
+	if got, want := string(output[:]), string(input); got != want {
+		t.Errorf("Got %q want %q", got, want)
+	}
+
+	r.Close()
+	if n, err := r.Read(output[:]); n != 0 || err != io.EOF {
+		t.Errorf("Got (%d, %v) want (0, nil)", n, err)
+	}
+}
+
+func TestReadRandom(t *testing.T) {
+	f := func(data [][]byte) bool {
+		r := newReader(&testReadHandler{})
+		// Use an empty slice (as opposed to a nil-slice) so that the
+		// reflect.DeepEqual call below succeeds when data is
+		// [][]byte{}.
+		written := make([]byte, 0)
+		for _, d := range data {
+			if err := r.Put(iobuf.NewSlice(d)); err != nil {
+				t.Error(err)
+				return false
+			}
+			written = append(written, d...)
+		}
+		read := make([]byte, len(written))
+		buf := read
+		r.Close()
+		for {
+			n, err := r.Read(buf)
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Error(err)
+				return false
+			}
+			buf = buf[n:]
+		}
+		return reflect.DeepEqual(written, read) && int(r.BytesRead()) == len(written)
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestReadDeadline(t *testing.T) {
+	l := &testReadHandler{}
+	r := newReader(l)
+	defer r.Close()
+
+	deadline := make(chan struct{}, 0)
+	r.SetDeadline(deadline)
+	close(deadline)
+
+	var buf [1]byte
+	n, err := r.Read(buf[:])
+	neterr, ok := err.(net.Error)
+	if n != 0 || err == nil || !ok || !neterr.Timeout() {
+		t.Errorf("Expected read to fail with net.Error.Timeout, got (%d, %v)", n, err)
+	}
+	if len(l.items) != 0 {
+		t.Errorf("Expected no reads, but notified of reads: %v", l.items)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vc/v23_internal_test.go b/runtime/internal/rpc/stream/vc/v23_internal_test.go
new file mode 100644
index 0000000..945d8c4
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package vc
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+
+func TestMain(m *testing.M) {
+	test.Init()
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/stream/vc/vc.go b/runtime/internal/rpc/stream/vc/vc.go
new file mode 100644
index 0000000..06f2c58
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc.go
@@ -0,0 +1,1006 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+// Logging guidelines:
+// Verbosity level 1 is for per-VC messages.
+// Verbosity level 2 is for per-Flow messages.
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/vc"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errAlreadyListening               = reg(".errAlreadyListening", "Listen has already been called")
+	errDuplicateFlow                  = reg(".errDuplicateFlow", "duplicate OpenFlow message")
+	errUnrecognizedFlow               = reg(".errUnrecognizedFlow", "unrecognized flow")
+	errFailedToCreateWriterForFlow    = reg(".errFailedToCreateWriterForFlow", "failed to create writer for Flow{:3}")
+	errConnectOnClosedVC              = reg(".errConnectOnClosedVC", "connect on closed VC{:3}")
+	errHandshakeNotInProgress         = reg(".errHandshakeNotInProgress", "Attempted to finish a VC handshake, but no handshake was in progress{:3}")
+	errClosedDuringHandshake          = reg(".errCloseDuringHandshake", "VC closed during handshake{:3}")
+	errFailedToDecryptPayload         = reg(".errFailedToDecryptPayload", "failed to decrypt payload{:3}")
+	errIgnoringMessageOnClosedVC      = reg(".errIgnoringMessageOnClosedVC", "ignoring message for Flow {3} on closed VC {4}")
+	errFailedToCreateFlowForAuth      = reg(".errFailedToCreateFlowForAuth", "failed to create a Flow for authentication{:3}")
+	errAuthFlowNotAccepted            = reg(".errAuthFlowNotAccepted", "authentication Flow not accepted{:3}")
+	errFailedToCreateFlowForWireType  = reg(".errFailedToCreateFlowForWireType", "fail to create a Flow for wire type{:3}")
+	errFlowForWireTypeNotAccepted     = reg(".errFlowForWireTypeNotAccepted", "Flow for wire type not accepted{:3}")
+	errFailedToCreateFlowForDischarge = reg(".errFailedToCreateFlowForDischarge", "fail to create a Flow for discharge{:3}")
+	errFlowForDischargeNotAccepted    = reg(".errFlowForDischargesNotAccepted", "Flow for discharge not accepted{:3}")
+	errFailedToSetupEncryption        = reg(".errFailedToSetupEncryption", "failed to setup channel encryption{:3}")
+	errAuthFailed                     = reg(".errAuthFailed", "authentication failed{:3}")
+	errNoActiveListener               = reg(".errNoActiveListener", "no active listener on VCI {3}")
+	errFailedToCreateWriterForNewFlow = reg(".errFailedToCreateWriterForNewFlow", "failed to create writer for new flow({3}){:4}")
+	errFailedToEnqueueFlow            = reg(".errFailedToEnqueueFlow", "failed to enqueue flow at listener{:3}")
+	errFailedToAcceptSystemFlows      = reg(".errFailedToAcceptSystemFlows", "failed to accept system flows{:3}")
+)
+
+// DischargeExpiryBuffer specifies how much before discharge expiration we should
+// refresh discharges.
+// Discharges will be refreshed DischargeExpiryBuffer before they expire.
+type DischargeExpiryBuffer time.Duration
+
+func (DischargeExpiryBuffer) RPCStreamListenerOpt() {}
+func (DischargeExpiryBuffer) RPCServerOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+const DefaultServerDischargeExpiryBuffer = 20 * time.Second
+
+// DataCache Keys for TypeEncoder/Decoder.
+type TypeEncoderKey struct{}
+type TypeDecoderKey struct{}
+
+// VC implements the stream.VC interface and exports additional methods to
+// manage Flows.
+//
+// stream.Flow objects created by this stream.VC implementation use a buffer
+// queue (v.io/x/ref/runtime/internal/lib/bqueue) to provide flow control on Write
+// operations.
+type VC struct {
+	vci                             id.VC
+	localEP, remoteEP               naming.Endpoint
+	localPrincipal                  security.Principal
+	localBlessings, remoteBlessings security.Blessings
+	localDischarges                 map[string]security.Discharge // Discharges shared by the local end of the VC.
+	remoteDischarges                map[string]security.Discharge // Discharges shared by the remote end of the VC.
+
+	pool           *iobuf.Pool
+	reserveBytes   uint
+	sharedCounters *vsync.Semaphore
+
+	mu                  sync.Mutex
+	flowMap             map[id.Flow]*flow // nil iff the VC is closed.
+	acceptHandshakeDone chan struct{}     // non-nil when HandshakeAcceptedVC begins the handshake, closed when handshake completes.
+	nextConnectFID      id.Flow
+	listener            *listener // non-nil iff Listen has been called and the VC has not been closed.
+	crypter             crypto.Crypter
+	closeReason         error // reason why the VC was closed, possibly nil
+	closeCh             chan struct{}
+	closed              bool
+	version             version.RPCVersion
+	remotePubKeyChan    chan *crypto.BoxKey // channel which will receive the remote public key during setup.
+
+	helper    Helper
+	dataCache *dataCache // dataCache contains information that can shared between Flows from this VC.
+}
+
+// ServerAuthorizer encapsulates the policy used to authorize servers during VC
+// establishment.
+//
+// A client will first authorize a server before revealing any of its credentials
+// (public key, blessings etc.) to the server. Thus, if the authorization policy
+// calls for the server to be rejected, then the client will not have revealed
+// any of its credentials to the server.
+//
+// ServerAuthorizer in turn uses an authorization policy (security.Authorizer),
+// with the context matching the context of the RPC that caused the initiation
+// of the VC.
+type ServerAuthorizer struct {
+	Suffix, Method string
+	Policy         security.Authorizer
+}
+
+func (a *ServerAuthorizer) RPCStreamVCOpt() {}
+func (a *ServerAuthorizer) Authorize(params security.CallParams) error {
+	params.Suffix = a.Suffix
+	params.Method = a.Method
+	ctx, cancel := context.RootContext()
+	defer cancel()
+	return a.Policy.Authorize(ctx, security.NewCall(&params))
+}
+
+// DialContext establishes the context under which a VC Dial was initiated.
+type DialContext struct{ *context.T }
+
+func (DialContext) RPCStreamVCOpt()       {}
+func (DialContext) RPCStreamListenerOpt() {}
+
+// StartTimeout specifies the time after which the underlying VIF is closed
+// if no VC is opened.
+type StartTimeout struct{ time.Duration }
+
+func (StartTimeout) RPCStreamVCOpt()       {}
+func (StartTimeout) RPCStreamListenerOpt() {}
+
+// IdleTimeout specifies the time after which an idle VC is closed.
+type IdleTimeout struct{ time.Duration }
+
+func (IdleTimeout) RPCStreamVCOpt()       {}
+func (IdleTimeout) RPCStreamListenerOpt() {}
+
+var _ stream.VC = (*VC)(nil)
+
+// Helper is the interface for functionality required by the stream.VC
+// implementation in this package.
+type Helper interface {
+	// NotifyOfNewFlow notifies the remote end of a VC that the caller intends to
+	// establish a new flow to it and that the caller is ready to receive bytes
+	// data from the remote end.
+	NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint)
+
+	// AddReceiveBuffers notifies the remote end of a VC that it is read to receive
+	// bytes more data on the flow identified by fid over the VC identified by vci.
+	//
+	// Unlike NotifyOfNewFlow, this call does not let the remote end know of the
+	// intent to establish a new flow.
+	AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint)
+
+	// NewWriter creates a buffer queue for Write operations on the
+	// stream.Flow implementation.
+	NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error)
+}
+
+// Priorities of flows.
+const (
+	systemFlowPriority bqueue.Priority = iota
+	normalFlowPriority
+
+	NumFlowPriorities
+)
+
+// DischargeClient is an interface for obtaining discharges for a set of third-party
+// caveats.
+//
+// TODO(ataly, ashankar): What should be the impetus for obtaining the discharges?
+type DischargeClient interface {
+	PrepareDischarges(ctx *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) []security.Discharge
+	// Invalidate marks the provided discharges as invalid, and therefore unfit
+	// for being returned by a subsequent PrepareDischarges call.
+	Invalidate(discharges ...security.Discharge)
+	RPCStreamListenerOpt()
+}
+
+// Params encapsulates the set of parameters needed to create a new VC.
+type Params struct {
+	VCI          id.VC           // Identifier of the VC
+	Dialed       bool            // True if the VC was initiated by the local process.
+	LocalEP      naming.Endpoint // Endpoint of the local end of the VC.
+	RemoteEP     naming.Endpoint // Endpoint of the remote end of the VC.
+	Pool         *iobuf.Pool     // Byte pool used for read and write buffer allocations.
+	ReserveBytes uint            // Number of padding bytes to reserve for headers.
+	Helper       Helper
+}
+
+// InternalNew creates a new VC, which implements the stream.VC interface.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNew(p Params) *VC {
+	fidOffset := 1
+	if p.Dialed {
+		fidOffset = 0
+	}
+	return &VC{
+		vci:            p.VCI,
+		localEP:        p.LocalEP,
+		remoteEP:       p.RemoteEP,
+		pool:           p.Pool,
+		reserveBytes:   p.ReserveBytes,
+		sharedCounters: vsync.NewSemaphore(),
+		flowMap:        make(map[id.Flow]*flow),
+		// Reserve flow IDs 0 thru NumReservedFlows for
+		// possible future use.
+		// Furthermore, flows created by Connect have an even
+		// id if the VC was initiated by the local process,
+		// and have an odd id if the VC was initiated by the
+		// remote process.
+		nextConnectFID: id.Flow(NumReservedFlows + fidOffset),
+		crypter:        crypto.NewNullCrypter(),
+		closeCh:        make(chan struct{}),
+		helper:         p.Helper,
+		dataCache:      newDataCache(),
+	}
+}
+
+// Connect implements the stream.Connector.Connect method.
+func (vc *VC) Connect(opts ...stream.FlowOpt) (stream.Flow, error) {
+	return vc.connectFID(vc.allocFID(), normalFlowPriority, opts...)
+}
+
+func (vc *VC) Version() version.RPCVersion {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	return vc.version
+}
+
+func (vc *VC) connectFID(fid id.Flow, priority bqueue.Priority, opts ...stream.FlowOpt) (stream.Flow, error) {
+	writer, err := vc.newWriter(fid, priority)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateWriterForFlow, nil, err))
+	}
+	f := &flow{
+		backingVC: vc,
+		reader:    newReader(readHandlerImpl{vc, fid}),
+		writer:    writer,
+	}
+	vc.mu.Lock()
+	if vc.flowMap == nil {
+		vc.mu.Unlock()
+		f.Shutdown()
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errConnectOnClosedVC, nil, vc.closeReason))
+	}
+	vc.flowMap[fid] = f
+	vc.mu.Unlock()
+	// New flow created, inform remote end that data can be received on it.
+	vc.helper.NotifyOfNewFlow(vc.vci, fid, DefaultBytesBufferedPerFlow)
+	return f, nil
+}
+
+// Listen implements the stream.VC.Listen method.
+func (vc *VC) Listen() (stream.Listener, error) {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.listener != nil {
+		return nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyListening, nil))
+	}
+	vc.listener = newListener()
+	return vc.listener, nil
+}
+
+// DispatchPayload makes payload.Contents available to Read operations on the
+// Flow identified by fid.
+//
+// Assumes ownership of payload, i.e., payload should not be used by the caller
+// after this method returns (irrespective of the return value).
+func (vc *VC) DispatchPayload(fid id.Flow, payload *iobuf.Slice) error {
+	if payload.Size() == 0 {
+		payload.Release()
+		return nil
+	}
+	vc.mu.Lock()
+	if vc.flowMap == nil {
+		vc.mu.Unlock()
+		payload.Release()
+		return verror.New(stream.ErrNetwork, nil, verror.New(errIgnoringMessageOnClosedVC, nil, fid, vc.VCI()))
+	}
+	// Authentication is done by encrypting/decrypting its payload by itself,
+	// so we do not go through with the decryption for auth flow.
+	if fid != AuthFlowID {
+		vc.waitForHandshakeLocked()
+		var err error
+		if payload, err = vc.crypter.Decrypt(payload); err != nil {
+			vc.mu.Unlock()
+			return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToDecryptPayload, nil, err))
+		}
+	}
+	if payload.Size() == 0 {
+		vc.mu.Unlock()
+		payload.Release()
+		return nil
+	}
+	f := vc.flowMap[fid]
+	if f == nil {
+		vc.mu.Unlock()
+		payload.Release()
+		return verror.New(stream.ErrNetwork, nil, verror.New(errDuplicateFlow, nil))
+	}
+	vc.mu.Unlock()
+	if err := f.reader.Put(payload); err != nil {
+		payload.Release()
+		return verror.New(stream.ErrNetwork, nil, err)
+	}
+	return nil
+}
+
+// AcceptFlow enqueues a new Flow for acceptance by the listener on the VC.
+// Returns an error if the VC is not accepting flows initiated by the remote
+// end.
+func (vc *VC) AcceptFlow(fid id.Flow) error {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.listener == nil {
+		return verror.New(stream.ErrBadState, nil, vc.vci)
+	}
+	if _, exists := vc.flowMap[fid]; exists {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errDuplicateFlow, nil))
+	}
+	priority := normalFlowPriority
+	// We use the same high priority for all reserved flows including handshake and
+	// authentication flows. This is because client may open a new system flow before
+	// authentication finishes in server side and then vc.DispatchPayload() can be
+	// stuck in waiting for authentication to finish.
+	if fid < NumReservedFlows {
+		priority = systemFlowPriority
+	}
+	writer, err := vc.newWriter(fid, priority)
+	if err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateWriterForNewFlow, nil, fid, err))
+	}
+	f := &flow{
+		backingVC: vc,
+		reader:    newReader(readHandlerImpl{vc, fid}),
+		writer:    writer,
+	}
+	if err = vc.listener.Enqueue(f); err != nil {
+		f.Shutdown()
+		return verror.New(stream.ErrNetwork, nil, verror.New(errFailedToEnqueueFlow, nil, err))
+	}
+	vc.flowMap[fid] = f
+	// New flow accepted, notify remote end that it can send over data.
+	// Do it in a goroutine in case the implementation of AddReceiveBuffers
+	// ends up attempting to lock vc.mu
+	go vc.helper.AddReceiveBuffers(vc.vci, fid, DefaultBytesBufferedPerFlow)
+	vlog.VI(2).Infof("Added flow %d@%d to listener", fid, vc.vci)
+	return nil
+}
+
+// ShutdownFlow closes the Flow identified by fid and discards any pending
+// writes.
+func (vc *VC) ShutdownFlow(fid id.Flow) {
+	vc.mu.Lock()
+	f := vc.flowMap[fid]
+	if f == nil {
+		vc.mu.Unlock()
+		return
+	}
+	delete(vc.flowMap, fid)
+	vc.mu.Unlock()
+	f.Shutdown()
+	vlog.VI(2).Infof("Shutdown flow %d@%d", fid, vc.vci)
+}
+
+// ReleaseCounters informs the Flow (identified by fid) that the remote end is
+// ready to receive up to 'bytes' more bytes of data.
+func (vc *VC) ReleaseCounters(fid id.Flow, bytes uint32) {
+	if fid == SharedFlowID {
+		vc.sharedCounters.IncN(uint(bytes))
+		return
+	}
+	var f *flow
+	vc.mu.Lock()
+	if vc.flowMap != nil {
+		f = vc.flowMap[fid]
+	}
+	vc.mu.Unlock()
+	if f == nil {
+		vlog.VI(2).Infof("Ignoring ReleaseCounters(%d, %d) on VCI %d as the flow does not exist", fid, bytes, vc.vci)
+		return
+	}
+	f.Release(int(bytes))
+}
+
+func (vc *VC) Close(reason error) error {
+	vlog.VI(1).Infof("Closing VC %v. Reason:%q", vc, reason)
+	vc.mu.Lock()
+	if vc.closed {
+		vc.mu.Unlock()
+		return nil
+	}
+	flows := vc.flowMap
+	vc.flowMap = nil
+	if vc.listener != nil {
+		vc.listener.Close()
+		vc.listener = nil
+	}
+	vc.closeReason = reason
+	vc.closed = true
+	close(vc.closeCh)
+	vc.mu.Unlock()
+
+	vc.sharedCounters.Close()
+	for fid, flow := range flows {
+		vlog.VI(2).Infof("Closing flow %d on VC %v as VC is being closed(%q)", fid, vc, reason)
+		flow.Close()
+	}
+	return nil
+}
+
+// appendCloseReason adds a closeReason, if any, as a sub error to err.
+func (vc *VC) appendCloseReason(err error) error {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.closeReason != nil {
+		return verror.AddSubErrs(err, nil, verror.SubErr{
+			Name:    "remote=" + vc.RemoteEndpoint().String(),
+			Err:     vc.closeReason,
+			Options: verror.Print,
+		})
+	}
+	return err
+}
+
+// FinishHandshakeDialedVC should be called from another goroutine
+// after HandshakeDialedVC is called, when version/encryption
+// negotiation is complete.
+func (vc *VC) FinishHandshakeDialedVC(vers version.RPCVersion, remotePubKey *crypto.BoxKey) error {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.remotePubKeyChan == nil {
+		return verror.New(errHandshakeNotInProgress, nil, vc.VCI)
+	}
+	vc.remotePubKeyChan <- remotePubKey
+	vc.remotePubKeyChan = nil
+	vc.version = vers
+	return nil
+}
+
+// HandshakeDialedVC completes initialization of the VC (setting up encryption,
+// authentication etc.) under the assumption that the VC was initiated by the
+// local process (i.e., the local process "Dial"ed to create the VC).
+// HandshakeDialedVC will not return until FinishHandshakeDialedVC is called
+// from another goroutine.
+// TODO(mattr): vers can be removed as a parameter when we get rid of TLS and
+// the version is always obtained via the Setup call.
+func (vc *VC) HandshakeDialedVC(principal security.Principal, sendKey func(*crypto.BoxKey) error, opts ...stream.VCOpt) error {
+	remotePubKeyChan := make(chan *crypto.BoxKey, 1)
+	vc.mu.Lock()
+	vc.remotePubKeyChan = remotePubKeyChan
+	vc.mu.Unlock()
+
+	// principal = nil means that we are running in SecurityNone and we don't need
+	// to authenticate the VC.  We still need to negotiate versioning information,
+	// so we still set remotePubKeyChan and call sendKey, though we don't care about
+	// the resulting key.
+	if principal == nil {
+		if err := sendKey(nil); err != nil {
+			return err
+		}
+		// TODO(mattr): Error on some timeout so a non-responding server doesn't
+		// cause this to hang forever.
+		select {
+		case <-remotePubKeyChan:
+		case <-vc.closeCh:
+			return verror.New(stream.ErrNetwork, nil, verror.New(errClosedDuringHandshake, nil, vc.VCI))
+		}
+		return nil
+	}
+
+	var auth *ServerAuthorizer
+	for _, o := range opts {
+		switch v := o.(type) {
+		case *ServerAuthorizer:
+			auth = v
+		}
+	}
+
+	exchange := func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+		if err := sendKey(pubKey); err != nil {
+			return nil, err
+		}
+		// TODO(mattr): Error on some timeout so a non-responding server doesn't
+		// cause this to hang forever.
+		select {
+		case theirKey := <-remotePubKeyChan:
+			return theirKey, nil
+		case <-vc.closeCh:
+			return nil, verror.New(stream.ErrNetwork, nil, verror.New(errClosedDuringHandshake, nil, vc.VCI))
+		}
+	}
+	crypter, err := crypto.NewBoxCrypter(exchange, vc.pool)
+	if err != nil {
+		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil,
+			verror.New(errFailedToSetupEncryption, nil, err)))
+	}
+	// The version is set by FinishHandshakeDialedVC and exchange (called by
+	// NewBoxCrypter) will block until FinishHandshakeDialedVC is called, so we
+	// should look up the version now.
+	vers := vc.Version()
+
+	// Authenticate (exchange identities)
+	authConn, err := vc.connectFID(AuthFlowID, systemFlowPriority)
+	if err != nil {
+		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForAuth, nil, err)))
+	}
+	params := security.CallParams{
+		LocalPrincipal: principal,
+		LocalEndpoint:  vc.localEP,
+		RemoteEndpoint: vc.remoteEP,
+	}
+
+	rBlessings, lBlessings, rDischarges, err := AuthenticateAsClient(authConn, crypter, params, auth, vers)
+	if err != nil || len(rBlessings.ThirdPartyCaveats()) == 0 {
+		authConn.Close()
+		if err != nil {
+			return vc.appendCloseReason(err)
+		}
+	} else if vers < version.RPCVersion10 {
+		go vc.recvDischargesLoop(authConn)
+	}
+
+	vc.mu.Lock()
+	vc.crypter = crypter
+	vc.localPrincipal = principal
+	vc.localBlessings = lBlessings
+	vc.remoteBlessings = rBlessings
+	vc.remoteDischarges = rDischarges
+	vc.mu.Unlock()
+
+	// Open system flows.
+	if err = vc.connectSystemFlows(); err != nil {
+		return vc.appendCloseReason(err)
+	}
+
+	vlog.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v",
+		vc, rBlessings, lBlessings)
+	return nil
+}
+
+// HandshakeResult is sent by HandshakeAcceptedVC over the channel returned by it.
+type HandshakeResult struct {
+	Listener stream.Listener // Listener for accepting new Flows on the VC.
+	Error    error           // Error, if any, during the handshake.
+}
+
+// HandshakeAcceptedVC completes initialization of the VC (setting up
+// encryption, authentication etc.) under the assumption that the VC was
+// initiated by a remote process (and the local process wishes to "accept" it).
+//
+// Since the handshaking process might involve several round trips, a bulk of the work
+// is done asynchronously and the result of the handshake is written to the
+// channel returned by this method.
+//
+// 'principal' is the principal used by the server used during authentication.
+// If principal is nil, then the VC expects to be used for unauthenticated, unencrypted communication.
+// 'lBlessings' is presented to the client during authentication.
+func (vc *VC) HandshakeAcceptedVC(
+	vers version.RPCVersion,
+	principal security.Principal,
+	lBlessings security.Blessings,
+	exchange crypto.BoxKeyExchanger,
+	opts ...stream.ListenerOpt) <-chan HandshakeResult {
+	result := make(chan HandshakeResult, 1)
+	finish := func(ln stream.Listener, err error) chan HandshakeResult {
+		result <- HandshakeResult{ln, err}
+		return result
+	}
+	var (
+		dischargeClient       DischargeClient
+		dischargeExpiryBuffer = DefaultServerDischargeExpiryBuffer
+	)
+	for _, o := range opts {
+		switch v := o.(type) {
+		case DischargeClient:
+			dischargeClient = v
+		case DischargeExpiryBuffer:
+			dischargeExpiryBuffer = time.Duration(v)
+		}
+	}
+	// If the listener was setup asynchronously, there is a race between
+	// the listener being setup and the caller of this method trying to
+	// dispatch messages, thus it is setup synchronously.
+	ln, err := vc.Listen()
+	if err != nil {
+		return finish(nil, err)
+	}
+	// TODO(mattr): We could instead send counters in the return SetupVC message
+	// and avoid this extra message.  It probably doesn't make much difference
+	// so for now I'll leave it.  May be a nice cleanup after we are always
+	// using SetupVC.
+	vc.helper.AddReceiveBuffers(vc.VCI(), SharedFlowID, DefaultBytesBufferedPerFlow)
+
+	// principal == nil means that we are running in SecurityNone and we don't need
+	// to authenticate the VC.
+	if principal == nil {
+		go func() {
+			_, err = exchange(nil)
+			result <- HandshakeResult{ln, err}
+		}()
+		return result
+	}
+
+	var crypter crypto.Crypter
+
+	go func() {
+		sendErr := func(err error) {
+			ln.Close()
+			result <- HandshakeResult{nil, vc.appendCloseReason(err)}
+		}
+
+		vc.mu.Lock()
+		vc.acceptHandshakeDone = make(chan struct{})
+		vc.mu.Unlock()
+
+		crypter, err = crypto.NewBoxCrypter(exchange, vc.pool)
+		if err != nil {
+			sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToSetupEncryption, nil, err)))
+			return
+		}
+
+		// Authenticate (exchange identities)
+		authConn, err := ln.Accept()
+		if err != nil {
+			sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errAuthFlowNotAccepted, nil, err)))
+			return
+		}
+		if vc.findFlow(authConn) != AuthFlowID {
+			// This should have been an auth flow.  We can't establish security so send
+			// an error.
+			sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForAuth, nil, err)))
+			return
+		}
+
+		rBlessings, lDischarges, err := AuthenticateAsServer(authConn, principal, lBlessings, dischargeClient, crypter, vers)
+		if err != nil {
+			authConn.Close()
+			sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errAuthFailed, nil, err)))
+			return
+		}
+
+		vc.mu.Lock()
+		vc.version = vers
+		vc.crypter = crypter
+		vc.localPrincipal = principal
+		vc.localBlessings = lBlessings
+		vc.remoteBlessings = rBlessings
+		vc.localDischarges = lDischarges
+		close(vc.acceptHandshakeDone)
+		vc.acceptHandshakeDone = nil
+		vc.mu.Unlock()
+
+		if len(lBlessings.ThirdPartyCaveats()) > 0 && vers < version.RPCVersion10 {
+			go vc.sendDischargesLoop(authConn, dischargeClient, lBlessings.ThirdPartyCaveats(), dischargeExpiryBuffer)
+		} else {
+			authConn.Close()
+		}
+
+		// Accept system flows.
+		if err = vc.acceptSystemFlows(ln, dischargeClient, dischargeExpiryBuffer); err != nil {
+			sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errFailedToAcceptSystemFlows, nil, err)))
+		}
+
+		vlog.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+		result <- HandshakeResult{ln, nil}
+	}()
+	return result
+}
+
+func (vc *VC) sendDischargesLoop(conn io.WriteCloser, dc DischargeClient, tpCavs []security.Caveat, dischargeExpiryBuffer time.Duration) {
+	defer conn.Close()
+	if dc == nil {
+		return
+	}
+	enc := vom.NewEncoder(conn)
+	discharges := dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
+	for expiry := minExpiryTime(discharges, tpCavs); !expiry.IsZero(); expiry = minExpiryTime(discharges, tpCavs) {
+		select {
+		case <-time.After(fetchDuration(expiry, dischargeExpiryBuffer)):
+			discharges = dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
+			if err := enc.Encode(discharges); err != nil {
+				vlog.Errorf("encoding discharges on VC %v failed: %v", vc, err)
+				return
+			}
+			if len(discharges) == 0 {
+				continue
+			}
+			vc.mu.Lock()
+			if vc.localDischarges == nil {
+				vc.localDischarges = make(map[string]security.Discharge)
+			}
+			for _, d := range discharges {
+				vc.localDischarges[d.ID()] = d
+			}
+			vc.mu.Unlock()
+		case <-vc.closeCh:
+			vlog.VI(3).Infof("closing sendDischargesLoop on VC %v", vc)
+			return
+		}
+	}
+}
+
+func fetchDuration(expiry time.Time, buffer time.Duration) time.Duration {
+	// Fetch the discharge earlier than the actual expiry to factor in for clock
+	// skew and RPC time.
+	return expiry.Sub(time.Now().Add(buffer))
+}
+
+func minExpiryTime(discharges []security.Discharge, tpCavs []security.Caveat) time.Time {
+	var min time.Time
+	// If some discharges were not fetched, retry again in a minute.
+	if len(discharges) < len(tpCavs) {
+		min = time.Now().Add(time.Minute)
+	}
+	for _, d := range discharges {
+		if exp := d.Expiry(); min.IsZero() || (!exp.IsZero() && exp.Before(min)) {
+			min = exp
+		}
+	}
+	return min
+}
+
+func (vc *VC) recvDischargesLoop(conn io.ReadCloser) {
+	defer conn.Close()
+	dec := vom.NewDecoder(conn)
+	for {
+		var discharges []security.Discharge
+		if err := dec.Decode(&discharges); err != nil {
+			vlog.VI(3).Infof("decoding discharges on %v failed: %v", vc, err)
+			return
+		}
+		if len(discharges) == 0 {
+			continue
+		}
+		vc.mu.Lock()
+		if vc.remoteDischarges == nil {
+			vc.remoteDischarges = make(map[string]security.Discharge)
+		}
+		for _, d := range discharges {
+			vc.remoteDischarges[d.ID()] = d
+		}
+		vc.mu.Unlock()
+	}
+}
+
+func (vc *VC) connectSystemFlows() error {
+	conn, err := vc.connectFID(TypeFlowID, systemFlowPriority)
+	if err != nil {
+		return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForWireType, nil, err))
+	}
+	vc.dataCache.Insert(TypeEncoderKey{}, vom.NewTypeEncoder(conn))
+	vc.dataCache.Insert(TypeDecoderKey{}, vom.NewTypeDecoder(conn))
+
+	if vc.Version() < version.RPCVersion10 {
+		return nil
+	}
+
+	vc.mu.Lock()
+	rBlessings := vc.remoteBlessings
+	vc.mu.Unlock()
+	if len(rBlessings.ThirdPartyCaveats()) > 0 {
+		conn, err = vc.connectFID(DischargeFlowID, systemFlowPriority)
+		if err != nil {
+			return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForDischarge, nil, err))
+		}
+		go vc.recvDischargesLoop(conn)
+	}
+
+	return nil
+}
+
+func (vc *VC) acceptSystemFlows(ln stream.Listener, dischargeClient DischargeClient, dischargeExpiryBuffer time.Duration) error {
+	conn, err := ln.Accept()
+	if err != nil {
+		return verror.New(errFlowForWireTypeNotAccepted, nil, err)
+	}
+	vc.dataCache.Insert(TypeEncoderKey{}, vom.NewTypeEncoder(conn))
+	vc.dataCache.Insert(TypeDecoderKey{}, vom.NewTypeDecoder(conn))
+
+	if vc.Version() < version.RPCVersion10 {
+		return nil
+	}
+
+	vc.mu.Lock()
+	lBlessings := vc.localBlessings
+	vc.mu.Unlock()
+	tpCaveats := lBlessings.ThirdPartyCaveats()
+	if len(tpCaveats) > 0 {
+		conn, err := ln.Accept()
+		if err != nil {
+			return verror.New(errFlowForDischargeNotAccepted, nil, err)
+		}
+		go vc.sendDischargesLoop(conn, dischargeClient, tpCaveats, dischargeExpiryBuffer)
+	}
+
+	return nil
+}
+
+// Encrypt uses the VC's encryption scheme to encrypt the provided data payload.
+// Always takes ownership of plaintext.
+func (vc *VC) Encrypt(fid id.Flow, plaintext *iobuf.Slice) (cipherslice *iobuf.Slice, err error) {
+	if plaintext == nil {
+		return nil, nil
+	}
+	vc.mu.Lock()
+	if fid == AuthFlowID {
+		cipherslice = plaintext
+	} else {
+		cipherslice, err = vc.crypter.Encrypt(plaintext)
+	}
+	vc.mu.Unlock()
+	return
+}
+
+func (vc *VC) allocFID() id.Flow {
+	vc.mu.Lock()
+	ret := vc.nextConnectFID
+	vc.nextConnectFID += 2
+	vc.mu.Unlock()
+	return ret
+}
+
+// findFlow finds the flow id for the provided flow.
+// Returns 0 if there is none.
+func (vc *VC) findFlow(flow interface{}) id.Flow {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+
+	const invalidFlowID = 0
+	// This operation is rare and early enough (called when there are <= 2
+	// flows over the VC) that iteration to the map should be fine.
+	for fid, f := range vc.flowMap {
+		if f == flow {
+			return fid
+		}
+	}
+	return invalidFlowID
+}
+
+// VCI returns the identifier of this VC.
+func (vc *VC) VCI() id.VC { return vc.vci }
+
+// RemoteEndpoint returns the remote endpoint for this VC.
+func (vc *VC) RemoteEndpoint() naming.Endpoint { return vc.remoteEP }
+
+// LocalEndpoint returns the local endpoint for this VC.
+func (vc *VC) LocalEndpoint() naming.Endpoint { return vc.localEP }
+
+// VCDataCache returns the VCDataCache that allows information to be
+// shared across the VC.
+func (vc *VC) VCDataCache() stream.VCDataCache { return vc.dataCache }
+
+// LocalPrincipal returns the principal that authenticated with the remote end of the VC.
+func (vc *VC) LocalPrincipal() security.Principal {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	return vc.localPrincipal
+}
+
+// LocalBlessings returns the blessings (bound to LocalPrincipal) presented to the
+// remote end of the VC during authentication.
+func (vc *VC) LocalBlessings() security.Blessings {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	return vc.localBlessings
+}
+
+// RemoteBlessings returns the blessings presented by the remote end of the VC during
+// authentication.
+func (vc *VC) RemoteBlessings() security.Blessings {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	return vc.remoteBlessings
+}
+
+// LocalDischarges returns the discharges presented by the local end of the VC during
+// authentication.
+func (vc *VC) LocalDischarges() map[string]security.Discharge {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	if len(vc.localDischarges) == 0 {
+		return nil
+	}
+	// Return a copy of the map to prevent racy reads.
+	return copyDischargeMap(vc.localDischarges)
+}
+
+// RemoteDischarges returns the discharges presented by the remote end of the VC during
+// authentication.
+func (vc *VC) RemoteDischarges() map[string]security.Discharge {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	if len(vc.remoteDischarges) == 0 {
+		return nil
+	}
+	// Return a copy of the map to prevent racy reads.
+	return copyDischargeMap(vc.remoteDischarges)
+}
+
+// waitForHandshakeLocked blocks until an in-progress handshake (encryption
+// setup and authentication) completes.
+// REQUIRES: vc.mu is held.
+func (vc *VC) waitForHandshakeLocked() {
+	if hsd := vc.acceptHandshakeDone; hsd != nil {
+		vc.mu.Unlock()
+		<-hsd
+		vc.mu.Lock()
+	}
+}
+
+func (vc *VC) String() string {
+	return fmt.Sprintf("VCI:%d (%v<->%v)", vc.vci, vc.localEP, vc.remoteEP)
+}
+
+// DebugString returns a string representation of the state of a VC.
+//
+// The format of the returned string is meant to be human-friendly and the
+// specific format should not be relied upon for automated processing.
+func (vc *VC) DebugString() string {
+	vc.mu.Lock()
+	l := make([]string, 0, len(vc.flowMap)+1)
+	l = append(l, fmt.Sprintf("VCI:%d -- Endpoints:(Local:%q Remote:%q) #Flows:%d NextConnectFID:%d",
+		vc.vci,
+		vc.localEP,
+		vc.remoteEP,
+		len(vc.flowMap),
+		vc.nextConnectFID))
+	if vc.crypter == nil {
+		l = append(l, "Handshake not completed yet")
+	} else {
+		l = append(l, "Encryption: "+vc.crypter.String())
+		if vc.localPrincipal != nil {
+			l = append(l, fmt.Sprintf("LocalPrincipal:%v LocalBlessings:%v RemoteBlessings:%v", vc.localPrincipal.PublicKey(), vc.localBlessings, vc.remoteBlessings))
+		}
+	}
+	for fid, f := range vc.flowMap {
+		l = append(l, fmt.Sprintf("  Flow:%3d BytesRead:%7d BytesWritten:%7d", fid, f.BytesRead(), f.BytesWritten()))
+	}
+	vc.mu.Unlock()
+	sort.Strings(l[1:])
+	return strings.Join(l, "\n")
+}
+
+func (vc *VC) newWriter(fid id.Flow, priority bqueue.Priority) (*writer, error) {
+	bq, err := vc.helper.NewWriter(vc.vci, fid, priority)
+	if err != nil {
+		return nil, err
+	}
+	alloc := iobuf.NewAllocator(vc.pool, vc.reserveBytes)
+	return newWriter(MaxPayloadSizeBytes, bq, alloc, vc.sharedCounters), nil
+}
+
+// readHandlerImpl is an adapter for the readHandler interface required by
+// the reader type.
+type readHandlerImpl struct {
+	vc  *VC
+	fid id.Flow
+}
+
+func (r readHandlerImpl) HandleRead(bytes uint) {
+	r.vc.helper.AddReceiveBuffers(r.vc.vci, r.fid, bytes)
+}
+
+func copyDischargeMap(m map[string]security.Discharge) map[string]security.Discharge {
+	ret := make(map[string]security.Discharge)
+	for id, d := range m {
+		ret[id] = d
+	}
+	return ret
+
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_cache.go b/runtime/internal/rpc/stream/vc/vc_cache.go
new file mode 100644
index 0000000..d962cfa
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc_cache.go
@@ -0,0 +1,114 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"sync"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+)
+
+var errVCCacheClosed = reg(".errVCCacheClosed", "vc cache has been closed")
+
+// VCCache implements a set of VIFs keyed by the endpoint of the remote end and the
+// local principal. Multiple goroutines can invoke methods on the VCCache simultaneously.
+type VCCache struct {
+	mu      sync.Mutex
+	cache   map[vcKey]*VC  // GUARDED_BY(mu)
+	started map[vcKey]bool // GUARDED_BY(mu)
+	cond    *sync.Cond
+}
+
+// NewVCCache returns a new cache for VCs.
+func NewVCCache() *VCCache {
+	c := &VCCache{
+		cache:   make(map[vcKey]*VC),
+		started: make(map[vcKey]bool),
+	}
+	c.cond = sync.NewCond(&c.mu)
+	return c
+}
+
+// ReservedFind returns a VC where the remote end of the underlying connection
+// is identified by the provided (ep, p.PublicKey). Returns nil if there is no
+// such VC.
+//
+// Iff the cache is closed, ReservedFind will return an error.
+// If ReservedFind has no error, the caller is required to call Unreserve, to avoid deadlock.
+// The ep, and p.PublicKey in Unreserve must be the same as used in the ReservedFind call.
+// During this time, all new ReservedFind calls for this ep and p will Block until
+// the corresponding Unreserve call is made.
+func (c *VCCache) ReservedFind(ep naming.Endpoint, p security.Principal) (*VC, error) {
+	k := c.key(ep, p)
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	for c.started[k] {
+		c.cond.Wait()
+	}
+	if c.cache == nil {
+		return nil, verror.New(errVCCacheClosed, nil)
+	}
+	c.started[k] = true
+	return c.cache[k], nil
+}
+
+// Unreserve marks the status of the ep, p as no longer started, and
+// broadcasts waiting threads.
+func (c *VCCache) Unreserve(ep naming.Endpoint, p security.Principal) {
+	c.mu.Lock()
+	delete(c.started, c.key(ep, p))
+	c.cond.Broadcast()
+	c.mu.Unlock()
+}
+
+// Insert adds vc to the cache and returns an error iff the cache has been closed.
+func (c *VCCache) Insert(vc *VC) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.cache == nil {
+		return verror.New(errVCCacheClosed, nil)
+	}
+	c.cache[c.key(vc.RemoteEndpoint(), vc.LocalPrincipal())] = vc
+	return nil
+}
+
+// Close marks the VCCache as closed and returns the VCs remaining in the cache.
+func (c *VCCache) Close() []*VC {
+	c.mu.Lock()
+	vcs := make([]*VC, 0, len(c.cache))
+	for _, vc := range c.cache {
+		vcs = append(vcs, vc)
+	}
+	c.cache = nil
+	c.started = nil
+	c.mu.Unlock()
+	return vcs
+}
+
+// Delete removes vc from the cache, returning an error iff the cache has been closed.
+func (c *VCCache) Delete(vc *VC) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.cache == nil {
+		return verror.New(errVCCacheClosed, nil)
+	}
+	delete(c.cache, c.key(vc.RemoteEndpoint(), vc.LocalPrincipal()))
+	return nil
+}
+
+type vcKey struct {
+	remoteEP       string
+	localPublicKey string // localPublicKey = "" means we are running unencrypted (i.e. SecurityNone)
+}
+
+func (c *VCCache) key(ep naming.Endpoint, p security.Principal) vcKey {
+	k := vcKey{remoteEP: ep.String()}
+	if p != nil {
+		k.localPublicKey = p.PublicKey().String()
+	}
+	return k
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_cache_test.go b/runtime/internal/rpc/stream/vc/vc_cache_test.go
new file mode 100644
index 0000000..b096e21
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc_cache_test.go
@@ -0,0 +1,123 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"testing"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestInsertDelete(t *testing.T) {
+	cache := NewVCCache()
+	ep, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := testutil.NewPrincipal("test")
+	vc := &VC{remoteEP: ep, localPrincipal: p}
+	otherEP, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	otherP := testutil.NewPrincipal("test")
+	otherVC := &VC{remoteEP: otherEP, localPrincipal: otherP}
+
+	cache.Insert(vc)
+	cache.Insert(otherVC)
+	cache.Delete(vc)
+	if got, want := cache.Close(), []*VC{otherVC}; !vcsEqual(got, want) {
+		t.Errorf("got %v, want %v", got, want)
+	}
+}
+
+func TestInsertClose(t *testing.T) {
+	cache := NewVCCache()
+	ep, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := testutil.NewPrincipal("test")
+	vc := &VC{remoteEP: ep, localPrincipal: p}
+
+	if err := cache.Insert(vc); err != nil {
+		t.Errorf("the cache is not closed yet")
+	}
+	if got, want := cache.Close(), []*VC{vc}; !vcsEqual(got, want) {
+		t.Errorf("got %v, want %v", got, want)
+	}
+	if err := cache.Insert(vc); err == nil {
+		t.Errorf("the cache has been closed")
+	}
+}
+
+func TestReservedFind(t *testing.T) {
+	cache := NewVCCache()
+	ep, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := testutil.NewPrincipal("test")
+	vc := &VC{remoteEP: ep, localPrincipal: p}
+	cache.Insert(vc)
+
+	// We should be able to find the vc in the cache.
+	if got, err := cache.ReservedFind(ep, p); err != nil || got != vc {
+		t.Errorf("got %v, want %v, err: %v", got, vc, err)
+	}
+
+	// If we change the endpoint or the principal, we should get nothing.
+	otherEP, err := inaming.NewEndpoint("bar: 7777")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, err := cache.ReservedFind(otherEP, p); err != nil || got != nil {
+		t.Errorf("got %v, want <nil>, err: %v", got, err)
+	}
+	if got, err := cache.ReservedFind(ep, testutil.NewPrincipal("wrong")); err != nil || got != nil {
+		t.Errorf("got %v, want <nil>, err: %v", got, err)
+	}
+
+	// A subsequent ReservedFind call that matches a previous failed ReservedFind
+	// should block until a matching Unreserve call is made.
+	ch := make(chan *VC, 1)
+	go func(ch chan *VC) {
+		vc, err := cache.ReservedFind(otherEP, p)
+		if err != nil {
+			t.Fatal(err)
+		}
+		ch <- vc
+	}(ch)
+
+	// We insert the otherEP into the cache.
+	otherVC := &VC{remoteEP: otherEP, localPrincipal: p}
+	cache.Insert(otherVC)
+	cache.Unreserve(otherEP, p)
+
+	// Now the cache.BlcokingFind should have returned the correct otherVC.
+	if cachedVC := <-ch; cachedVC != otherVC {
+		t.Errorf("got %v, want %v", cachedVC, otherVC)
+	}
+}
+
+func vcsEqual(a, b []*VC) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	m := make(map[*VC]int)
+	for _, v := range a {
+		m[v]++
+	}
+	for _, v := range b {
+		m[v]--
+	}
+	for _, i := range m {
+		if i != 0 {
+			return false
+		}
+	}
+	return true
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_test.go b/runtime/internal/rpc/stream/vc/vc_test.go
new file mode 100644
index 0000000..6f01d63
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc_test.go
@@ -0,0 +1,632 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Use a different package for the tests to ensure that only the exported API is used.
+
+package vc_test
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"testing"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+	"v.io/x/ref/test/testutil"
+)
+
+var (
+	clientEP = endpoint(naming.FixedRoutingID(0xcccccccccccccccc))
+	serverEP = endpoint(naming.FixedRoutingID(0x5555555555555555))
+)
+
+//go:generate v23 test generate
+
+const (
+	// Convenience alias to avoid conflicts between the package name "vc" and variables called "vc".
+	DefaultBytesBufferedPerFlow = vc.DefaultBytesBufferedPerFlow
+	// Shorthands
+	SecurityNone    = options.SecurityNone
+	SecurityDefault = options.SecurityConfidential
+)
+
+var LatestVersion = iversion.SupportedRange.Max
+
+// testFlowEcho writes a random string of 'size' bytes on the flow and then
+// ensures that the same string is read back.
+func testFlowEcho(t *testing.T, flow stream.Flow, size int) {
+	defer flow.Close()
+	wrote := testutil.RandomBytes(size)
+	go func() {
+		buf := wrote
+		for len(buf) > 0 {
+			limit := 1 + testutil.Intn(len(buf)) // Random number in [1, n]
+			n, err := flow.Write(buf[:limit])
+			if n != limit || err != nil {
+				t.Errorf("Write returned (%d, %v) want (%d, nil)", n, err, limit)
+			}
+			buf = buf[limit:]
+		}
+	}()
+
+	total := 0
+	read := make([]byte, size)
+	buf := read
+	for total < size {
+		n, err := flow.Read(buf)
+		if err != nil {
+			t.Error(err)
+			return
+		}
+		total += n
+		buf = buf[n:]
+	}
+	if bytes.Compare(read, wrote) != 0 {
+		t.Errorf("Data read != data written")
+	}
+}
+
+func TestHandshakeNoSecurity(t *testing.T) {
+	// When the principals are nil, no blessings should be sent over the wire.
+	h, vc, err := New(LatestVersion, nil, nil, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !flow.RemoteBlessings().IsZero() {
+		t.Errorf("Server sent blessing %v over insecure transport", flow.RemoteBlessings())
+	}
+	if !flow.LocalBlessings().IsZero() {
+		t.Errorf("Client sent blessing %v over insecure transport", flow.LocalBlessings())
+	}
+}
+
+func testFlowAuthN(flow stream.Flow, serverBlessings security.Blessings, serverDischarges map[string]security.Discharge, clientPublicKey security.PublicKey) error {
+	if got, want := flow.RemoteBlessings(), serverBlessings; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("Server shared blessings %v, want %v", got, want)
+	}
+	if got, want := flow.RemoteDischarges(), serverDischarges; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("Server shared discharges %#v, want %#v", got, want)
+	}
+	if got, want := flow.LocalBlessings().PublicKey(), clientPublicKey; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("Client shared %v, want %v", got, want)
+	}
+	return nil
+}
+
+// auth implements security.Authorizer.
+type auth struct {
+	localPrincipal   security.Principal
+	remoteBlessings  security.Blessings
+	remoteDischarges map[string]security.Discharge
+	suffix, method   string
+	err              error
+}
+
+// Authorize tests that the context passed to the authorizer is the expected one.
+func (a *auth) Authorize(ctx *context.T, call security.Call) error {
+	if a.err != nil {
+		return a.err
+	}
+	if got, want := call.LocalPrincipal(), a.localPrincipal; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.LocalPrincipal: got %v, want %v", got, want)
+	}
+	if got, want := call.RemoteBlessings(), a.remoteBlessings; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.RemoteBlessings: got %v, want %v", got, want)
+	}
+	if got, want := call.RemoteDischarges(), a.remoteDischarges; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.RemoteDischarges: got %v, want %v", got, want)
+	}
+	if got, want := call.LocalEndpoint(), clientEP; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.LocalEndpoint: got %v, want %v", got, want)
+	}
+	if got, want := call.RemoteEndpoint(), serverEP; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+	}
+	if got, want := call.Suffix(), a.suffix; got != want {
+		return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+	}
+	if got, want := call.Method(), a.method; got != want {
+		return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+	}
+	return nil
+}
+
+// mockDischargeClient implements vc.DischargeClient.
+type mockDischargeClient []security.Discharge
+
+func (m mockDischargeClient) PrepareDischarges(_ *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) []security.Discharge {
+	return m
+}
+func (mockDischargeClient) Invalidate(...security.Discharge) {}
+func (mockDischargeClient) RPCStreamListenerOpt()            {}
+func (mockDischargeClient) RPCStreamVCOpt()                  {}
+
+// Test that mockDischargeClient implements vc.DischargeClient.
+var _ vc.DischargeClient = (mockDischargeClient)(nil)
+
+func TestHandshake(t *testing.T) {
+	matchesError := func(got error, want string) error {
+		if (got == nil) && len(want) == 0 {
+			return nil
+		}
+		if got == nil && !strings.Contains(got.Error(), want) {
+			return fmt.Errorf("got error %q, wanted to match %q", got, want)
+		}
+		return nil
+	}
+	var (
+		root       = testutil.NewIDProvider("root")
+		discharger = testutil.NewPrincipal("discharger")
+		client     = testutil.NewPrincipal()
+		server     = testutil.NewPrincipal()
+	)
+	tpcav, err := security.NewPublicKeyCaveat(discharger.PublicKey(), "irrelevant", security.ThirdPartyRequirements{}, security.UnconstrainedUse())
+	if err != nil {
+		t.Fatal(err)
+	}
+	dis, err := discharger.MintDischarge(tpcav, security.UnconstrainedUse())
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Root blesses the client
+	if err := root.Bless(client, "client"); err != nil {
+		t.Fatal(err)
+	}
+	// Root blesses the server with a third-party caveat
+	if err := root.Bless(server, "server", tpcav); err != nil {
+		t.Fatal(err)
+	}
+
+	testdata := []struct {
+		dischargeClient      vc.DischargeClient
+		auth                 *vc.ServerAuthorizer
+		dialErr              string
+		flowRemoteBlessings  security.Blessings
+		flowRemoteDischarges map[string]security.Discharge
+	}{
+		{
+			flowRemoteBlessings: server.BlessingStore().Default(),
+		},
+		{
+			dischargeClient:      mockDischargeClient([]security.Discharge{dis}),
+			flowRemoteBlessings:  server.BlessingStore().Default(),
+			flowRemoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+		},
+		{
+			dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+			auth: &vc.ServerAuthorizer{
+				Suffix: "suffix",
+				Method: "method",
+				Policy: &auth{
+					localPrincipal:   client,
+					remoteBlessings:  server.BlessingStore().Default(),
+					remoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+					suffix:           "suffix",
+					method:           "method",
+				},
+			},
+			flowRemoteBlessings:  server.BlessingStore().Default(),
+			flowRemoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+		},
+		{
+			dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+			auth: &vc.ServerAuthorizer{
+				Suffix: "suffix",
+				Method: "method",
+				Policy: &auth{
+					err: errors.New("authorization error"),
+				},
+			},
+			dialErr: "authorization error",
+		},
+	}
+	for i, d := range testdata {
+		h, vc, err := New(LatestVersion, client, server, d.dischargeClient, d.auth)
+		if merr := matchesError(err, d.dialErr); merr != nil {
+			t.Errorf("Test #%d: HandshakeDialedVC with server authorizer %#v:: %v", i, d.auth.Policy, merr)
+		}
+		if err != nil {
+			continue
+		}
+		flow, err := vc.Connect()
+		if err != nil {
+			h.Close()
+			t.Errorf("Unable to create flow: %v", err)
+			continue
+		}
+		if err := testFlowAuthN(flow, d.flowRemoteBlessings, d.flowRemoteDischarges, client.PublicKey()); err != nil {
+			h.Close()
+			t.Error(err)
+			continue
+		}
+		h.Close()
+	}
+}
+
+func testConnect_Small(t *testing.T, version version.RPCVersion, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(version, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFlowEcho(t, flow, 10)
+}
+func TestConnect_SmallNoSecurity(t *testing.T) { testConnect_Small(t, LatestVersion, SecurityNone) }
+func TestConnect_Small(t *testing.T)           { testConnect_Small(t, LatestVersion, SecurityDefault) }
+
+func testConnect(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFlowEcho(t, flow, 10*DefaultBytesBufferedPerFlow)
+}
+func TestConnectNoSecurity(t *testing.T) { testConnect(t, SecurityNone) }
+func TestConnect(t *testing.T)           { testConnect(t, SecurityDefault) }
+
+func testConnect_Version7(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFlowEcho(t, flow, 10)
+}
+func TestConnect_Version7NoSecurity(t *testing.T) { testConnect_Version7(t, SecurityNone) }
+func TestConnect_Version7(t *testing.T)           { testConnect_Version7(t, SecurityDefault) }
+
+// helper function for testing concurrent operations on multiple flows over the
+// same VC.  Such tests are most useful when running the race detector.
+// (go test -race ...)
+func testConcurrentFlows(t *testing.T, securityLevel options.SecurityLevel, flows, gomaxprocs int) {
+	mp := runtime.GOMAXPROCS(gomaxprocs)
+	defer runtime.GOMAXPROCS(mp)
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+
+	var wg sync.WaitGroup
+	wg.Add(flows)
+	for i := 0; i < flows; i++ {
+		go func(n int) {
+			defer wg.Done()
+			flow, err := vc.Connect()
+			if err != nil {
+				t.Error(err)
+			} else {
+				testFlowEcho(t, flow, (n+1)*DefaultBytesBufferedPerFlow)
+			}
+		}(i)
+	}
+	wg.Wait()
+}
+
+func TestConcurrentFlows_1NOSecurity(t *testing.T) { testConcurrentFlows(t, SecurityNone, 10, 1) }
+func TestConcurrentFlows_1(t *testing.T)           { testConcurrentFlows(t, SecurityDefault, 10, 1) }
+
+func TestConcurrentFlows_10NoSecurity(t *testing.T) { testConcurrentFlows(t, SecurityNone, 10, 10) }
+func TestConcurrentFlows_10(t *testing.T)           { testConcurrentFlows(t, SecurityDefault, 10, 10) }
+
+func testListen(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	if err := h.VC.AcceptFlow(id.Flow(21)); err == nil {
+		t.Errorf("Expected AcceptFlow on a new flow to fail as Listen was not called")
+	}
+
+	ln, err := vc.Listen()
+	if err != nil {
+		t.Fatalf("vc.Listen failed: %v", err)
+		return
+	}
+	_, err = vc.Listen()
+	if err == nil {
+		t.Fatalf("Second call to vc.Listen should have failed")
+		return
+	}
+	if err := h.VC.AcceptFlow(id.Flow(23)); err != nil {
+		t.Fatal(err)
+	}
+
+	data := "the dark knight"
+	cipherdata, err := h.otherEnd.VC.Encrypt(id.Flow(23), iobuf.NewSlice([]byte(data)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := h.VC.DispatchPayload(id.Flow(23), cipherdata); err != nil {
+		t.Fatal(err)
+	}
+	flow, err := ln.Accept()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := ln.Close(); err != nil {
+		t.Error(err)
+	}
+	flow.Close()
+	var buf [4096]byte
+	if n, err := flow.Read(buf[:]); n != len(data) || err != nil || string(buf[:n]) != data {
+		t.Errorf("Got (%d, %v) = %q, want (%d, nil) = %q", n, err, string(buf[:n]), len(data), data)
+	}
+	if n, err := flow.Read(buf[:]); n != 0 || err != io.EOF {
+		t.Errorf("Got (%d, %v) want (0, %v)", n, err, io.EOF)
+	}
+}
+func TestListenNoSecurity(t *testing.T) { testListen(t, SecurityNone) }
+func TestListen(t *testing.T)           { testListen(t, SecurityDefault) }
+
+func testNewFlowAfterClose(t *testing.T, securityLevel options.SecurityLevel) {
+	h, _, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	h.VC.Close(fmt.Errorf("reason"))
+	if err := h.VC.AcceptFlow(id.Flow(10)); err == nil {
+		t.Fatalf("New flows should not be accepted once the VC is closed")
+	}
+}
+func TestNewFlowAfterCloseNoSecurity(t *testing.T) { testNewFlowAfterClose(t, SecurityNone) }
+func TestNewFlowAfterClose(t *testing.T)           { testNewFlowAfterClose(t, SecurityDefault) }
+
+func testConnectAfterClose(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	h.VC.Close(fmt.Errorf("myerr"))
+	if f, err := vc.Connect(); f != nil || err == nil || !strings.Contains(err.Error(), "myerr") {
+		t.Fatalf("Got (%v, %v), want (nil, %q)", f, err, "myerr")
+	}
+}
+func TestConnectAfterCloseNoSecurity(t *testing.T) { testConnectAfterClose(t, SecurityNone) }
+func TestConnectAfterClose(t *testing.T)           { testConnectAfterClose(t, SecurityDefault) }
+
+// helper implements vc.Helper and also sets up a single VC.
+type helper struct {
+	VC *vc.VC
+	bq bqueue.T
+
+	mu       sync.Mutex
+	otherEnd *helper // GUARDED_BY(mu)
+}
+
+func createPrincipals(securityLevel options.SecurityLevel) (client, server security.Principal) {
+	if securityLevel == SecurityDefault {
+		client = testutil.NewPrincipal("client")
+		server = testutil.NewPrincipal("server")
+	}
+	return
+}
+
+// A convenient version of New() with default parameters.
+func NewSimple(v version.RPCVersion, securityLevel options.SecurityLevel) (*helper, stream.VC, error) {
+	pclient, pserver := createPrincipals(securityLevel)
+	return New(v, pclient, pserver, nil, nil)
+}
+
+// New creates both ends of a VC but returns only the "client" end (i.e., the
+// one that initiated the VC). The "server" end (the one that "accepted" the VC)
+// listens for flows and simply echoes data read.
+func New(v version.RPCVersion, client, server security.Principal, dischargeClient vc.DischargeClient, auth *vc.ServerAuthorizer) (*helper, stream.VC, error) {
+	clientH := &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
+	serverH := &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
+	clientH.otherEnd = serverH
+	serverH.otherEnd = clientH
+
+	vci := id.VC(1234)
+
+	clientParams := vc.Params{
+		VCI:      vci,
+		Dialed:   true,
+		LocalEP:  clientEP,
+		RemoteEP: serverEP,
+		Pool:     iobuf.NewPool(0),
+		Helper:   clientH,
+	}
+	serverParams := vc.Params{
+		VCI:      vci,
+		LocalEP:  serverEP,
+		RemoteEP: clientEP,
+		Pool:     iobuf.NewPool(0),
+		Helper:   serverH,
+	}
+
+	clientH.VC = vc.InternalNew(clientParams)
+	serverH.VC = vc.InternalNew(serverParams)
+	clientH.AddReceiveBuffers(vci, vc.SharedFlowID, vc.DefaultBytesBufferedPerFlow)
+
+	go clientH.pipeLoop(serverH.VC)
+	go serverH.pipeLoop(clientH.VC)
+
+	var (
+		lopts  []stream.ListenerOpt
+		vcopts []stream.VCOpt
+	)
+
+	if dischargeClient != nil {
+		lopts = append(lopts, dischargeClient)
+	}
+	if auth != nil {
+		vcopts = append(vcopts, auth)
+	}
+
+	var bserver security.Blessings
+	if server != nil {
+		bserver = server.BlessingStore().Default()
+	}
+
+	var clientExchanger func(*crypto.BoxKey) error
+	var serverExchanger func(*crypto.BoxKey) (*crypto.BoxKey, error)
+
+	serverch, clientch := make(chan *crypto.BoxKey, 1), make(chan *crypto.BoxKey, 1)
+	clientExchanger = func(pubKey *crypto.BoxKey) error {
+		clientch <- pubKey
+		return clientH.VC.FinishHandshakeDialedVC(v, <-serverch)
+	}
+	serverExchanger = func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+		serverch <- pubKey
+		return <-clientch, nil
+	}
+
+	c := serverH.VC.HandshakeAcceptedVC(v, server, bserver, serverExchanger, lopts...)
+	if err := clientH.VC.HandshakeDialedVC(client, clientExchanger, vcopts...); err != nil {
+		go func() { <-c }()
+		return nil, nil, err
+	}
+	hr := <-c
+	if hr.Error != nil {
+		return nil, nil, hr.Error
+	}
+	go acceptLoop(hr.Listener)
+	return clientH, clientH.VC, nil
+}
+
+// pipeLoop forwards slices written to h.bq to dst.
+func (h *helper) pipeLoop(dst *vc.VC) {
+	for {
+		w, bufs, err := h.bq.Get(nil)
+		if err != nil {
+			return
+		}
+		fid := id.Flow(w.ID())
+		for _, b := range bufs {
+			cipher, err := h.VC.Encrypt(fid, b)
+			if err != nil {
+				vlog.Infof("vc encrypt failed: %v", err)
+			}
+			if err := dst.DispatchPayload(fid, cipher); err != nil {
+				vlog.Infof("dispatch payload failed: %v", err)
+				return
+			}
+		}
+		if w.IsDrained() {
+			h.VC.ShutdownFlow(fid)
+			dst.ShutdownFlow(fid)
+		}
+	}
+}
+
+func acceptLoop(ln stream.Listener) {
+	for {
+		f, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		go echoLoop(f)
+	}
+}
+
+func echoLoop(flow stream.Flow) {
+	var buf [vc.DefaultBytesBufferedPerFlow * 20]byte
+	for {
+		n, err := flow.Read(buf[:])
+		if err == io.EOF {
+			return
+		}
+		if err == nil {
+			_, err = flow.Write(buf[:n])
+		}
+		if err != nil {
+			panic(err)
+		}
+	}
+}
+
+func (h *helper) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	if h.otherEnd != nil {
+		if err := h.otherEnd.VC.AcceptFlow(fid); err != nil {
+			panic(verror.DebugString(err))
+		}
+		h.otherEnd.VC.ReleaseCounters(fid, uint32(bytes))
+	}
+}
+
+func (h *helper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	if h.otherEnd != nil {
+		h.otherEnd.VC.ReleaseCounters(fid, uint32(bytes))
+	}
+}
+
+func (h *helper) NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error) {
+	return h.bq.NewWriter(bqueue.ID(fid), priority, DefaultBytesBufferedPerFlow)
+}
+
+func (h *helper) Close() {
+	h.VC.Close(fmt.Errorf("helper closed"))
+	h.bq.Close()
+	h.mu.Lock()
+	otherEnd := h.otherEnd
+	h.otherEnd = nil
+	h.mu.Unlock()
+	if otherEnd != nil {
+		otherEnd.mu.Lock()
+		otherEnd.otherEnd = nil
+		otherEnd.mu.Unlock()
+		otherEnd.Close()
+	}
+}
+
+type endpoint naming.RoutingID
+
+func (e endpoint) Network() string                          { return "test" }
+func (e endpoint) VersionedString(int) string               { return e.String() }
+func (e endpoint) String() string                           { return naming.RoutingID(e).String() }
+func (e endpoint) Name() string                             { return naming.JoinAddressName(e.String(), "") }
+func (e endpoint) RoutingID() naming.RoutingID              { return naming.RoutingID(e) }
+func (e endpoint) Addr() net.Addr                           { return nil }
+func (e endpoint) ServesMountTable() bool                   { return false }
+func (e endpoint) ServesLeaf() bool                         { return false }
+func (e endpoint) BlessingNames() []string                  { return nil }
+func (e endpoint) RPCVersionRange() version.RPCVersionRange { return version.RPCVersionRange{} }
diff --git a/runtime/internal/rpc/stream/vc/writer.go b/runtime/internal/rpc/stream/vc/writer.go
new file mode 100644
index 0000000..32f51d3
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/writer.go
@@ -0,0 +1,200 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"io"
+	"sync"
+	"sync/atomic"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errWriterClosed     = reg(".errWriterClosed", "attempt to call Write on Flow that has been Closed")
+	errBQueuePutFailed  = reg(".errBqueuePutFailed", "bqueue.Writer.Put failed{:3}")
+	errFailedToGetQuota = reg(".errFailedToGetQuota", "failed to get quota from receive buffers shared by all new flows on a VC{:3}")
+	errCanceled         = reg(".errCanceled", "underlying queues canceled")
+)
+
+// writer implements the io.Writer and SetWriteDeadline interfaces for Flow.
+type writer struct {
+	MTU            int              // Maximum size (in bytes) of each slice Put into Sink.
+	Sink           bqueue.Writer    // Buffer queue writer where data from Write is sent as iobuf.Slice objects.
+	Alloc          *iobuf.Allocator // Allocator for iobuf.Slice objects. GUARDED_BY(mu)
+	SharedCounters *vsync.Semaphore // Semaphore hosting counters shared by all flows over a VC.
+
+	mu         sync.Mutex      // Guards call to Writes
+	wroteOnce  bool            // GUARDED_BY(mu)
+	isClosed   bool            // GUARDED_BY(mu)
+	closeError error           // GUARDED_BY(mu)
+	closed     chan struct{}   // GUARDED_BY(mu)
+	deadline   <-chan struct{} // GUARDED_BY(mu)
+
+	// Total number of bytes filled in by all Write calls on this writer.
+	// Atomic operations are used to manipulate it.
+	totalBytes uint32
+
+	// Accounting for counters borrowed from the shared pool.
+	muSharedCountersBorrowed sync.Mutex
+	sharedCountersBorrowed   int // GUARDED_BY(muSharedCountersBorrowed)
+}
+
+func newWriter(mtu int, sink bqueue.Writer, alloc *iobuf.Allocator, counters *vsync.Semaphore) *writer {
+	return &writer{
+		MTU:            mtu,
+		Sink:           sink,
+		Alloc:          alloc,
+		SharedCounters: counters,
+		closed:         make(chan struct{}),
+		closeError:     verror.New(errWriterClosed, nil),
+	}
+}
+
+// Shutdown closes the writer and discards any queued up write buffers, i.e.,
+// the bqueue.Get call will not see the buffers queued up at this writer.
+// If removeWriter is true the writer will also be removed entirely from the
+// bqueue, otherwise the now empty writer will eventually be returned by
+// bqueue.Get.
+func (w *writer) shutdown(removeWriter bool) {
+	w.Sink.Shutdown(removeWriter)
+	w.finishClose(true)
+}
+
+// Close closes the writer without discarding any queued up write buffers.
+func (w *writer) Close() {
+	w.Sink.Close()
+	w.finishClose(false)
+}
+
+func (w *writer) IsClosed() bool {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	return w.isClosed
+}
+
+func (w *writer) Closed() <-chan struct{} {
+	return w.closed
+}
+
+func (w *writer) finishClose(remoteShutdown bool) {
+	// IsClosed() and Closed() indicate that the writer is closed before
+	// finishClose() completes. This is safe because Alloc and shared counters
+	// are guarded, and are not accessed elsewhere after w.closed is closed.
+	w.mu.Lock()
+	// finishClose() is idempotent, but Go's builtin close is not.
+	if !w.isClosed {
+		w.isClosed = true
+		if remoteShutdown {
+			w.closeError = io.EOF
+		}
+		close(w.closed)
+	}
+
+	w.Alloc.Release()
+	w.mu.Unlock()
+
+	w.muSharedCountersBorrowed.Lock()
+	w.SharedCounters.IncN(uint(w.sharedCountersBorrowed))
+	w.sharedCountersBorrowed = 0
+	w.muSharedCountersBorrowed.Unlock()
+}
+
+// Write implements the Write call for a Flow.
+//
+// Flow control is achieved using receive buffers (aka counters), wherein the
+// receiving end sends out the number of bytes that it is willing to read. To
+// avoid an additional round-trip for the creation of new flows, the very first
+// write of a new flow borrows counters from a shared pool.
+func (w *writer) Write(b []byte) (int, error) {
+	written := 0
+	// net.Conn requires that multiple goroutines be able to invoke methods
+	// simulatenously.
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.isClosed {
+		if w.closeError == io.EOF {
+			return 0, io.EOF
+		}
+		return 0, verror.New(stream.ErrBadState, nil, w.closeError)
+	}
+
+	for len(b) > 0 {
+		n := len(b)
+		if n > w.MTU {
+			n = w.MTU
+		}
+		if !w.wroteOnce && w.SharedCounters != nil {
+			w.wroteOnce = true
+			if n > MaxSharedBytes {
+				n = MaxSharedBytes
+			}
+			if err := w.SharedCounters.DecN(uint(n), w.deadline); err != nil {
+				if err == vsync.ErrCanceled {
+					return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
+				}
+				return 0, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToGetQuota, nil, err))
+			}
+			w.muSharedCountersBorrowed.Lock()
+			w.sharedCountersBorrowed = n
+			w.muSharedCountersBorrowed.Unlock()
+			w.Sink.Release(n)
+		}
+		slice := w.Alloc.Copy(b[:n])
+		if err := w.Sink.Put(slice, w.deadline); err != nil {
+			slice.Release()
+			atomic.AddUint32(&w.totalBytes, uint32(written))
+			switch err {
+			case bqueue.ErrCancelled, vsync.ErrCanceled:
+				return written, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
+			case bqueue.ErrWriterIsClosed:
+				return written, verror.New(stream.ErrBadState, nil, verror.New(errWriterClosed, nil))
+			default:
+				return written, verror.New(stream.ErrNetwork, nil, verror.New(errBQueuePutFailed, nil, err))
+			}
+		}
+		written += n
+		b = b[n:]
+	}
+	atomic.AddUint32(&w.totalBytes, uint32(written))
+	return written, nil
+}
+
+func (w *writer) SetDeadline(deadline <-chan struct{}) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	w.deadline = deadline
+}
+
+// Release allows the next 'bytes' of data to be removed from the buffer queue
+// writer and passed to bqueue.Get.
+func (w *writer) Release(bytes int) {
+	w.muSharedCountersBorrowed.Lock()
+	switch {
+	case w.sharedCountersBorrowed == 0:
+		w.Sink.Release(bytes)
+	case w.sharedCountersBorrowed >= bytes:
+		w.SharedCounters.IncN(uint(bytes))
+		w.sharedCountersBorrowed -= bytes
+	default:
+		w.SharedCounters.IncN(uint(w.sharedCountersBorrowed))
+		w.Sink.Release(bytes - w.sharedCountersBorrowed)
+		w.sharedCountersBorrowed = 0
+	}
+	w.muSharedCountersBorrowed.Unlock()
+}
+
+func (w *writer) BytesWritten() uint32 {
+	return atomic.LoadUint32(&w.totalBytes)
+}
diff --git a/runtime/internal/rpc/stream/vc/writer_test.go b/runtime/internal/rpc/stream/vc/writer_test.go
new file mode 100644
index 0000000..baaebfd
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/writer_test.go
@@ -0,0 +1,223 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"bytes"
+	"io"
+	"net"
+	"reflect"
+	"testing"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// TestWrite is a very basic, easy to follow, but not very thorough test of the
+// writer.  More thorough testing of flows (and implicitly the writer) is in
+// vc_test.go.
+func TestWrite(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+
+	if n, err := w.Write([]byte("abcd")); n != 4 || err != nil {
+		t.Errorf("Got (%d, %v) want (4, nil)", n, err)
+	}
+
+	// Should have used up 4 shared counters
+	if err := shared.TryDecN(1); err != sync.ErrTryAgain {
+		t.Errorf("Got %v want %v", err, sync.ErrTryAgain)
+	}
+
+	// Further Writes will block until some space has been released.
+	w.Release(10)
+	if n, err := w.Write([]byte("efghij")); n != 6 || err != nil {
+		t.Errorf("Got (%d, %v) want (5, nil)", n, err)
+	}
+	// And the release should have returned to the shared counters set
+	if err := shared.TryDecN(4); err != nil {
+		t.Errorf("Got %v want %v", err, nil)
+	}
+
+	// Further writes will block since all 10 bytes (provided to NewWriter)
+	// have been exhausted and Get hasn't been called on bq yet.
+	deadline := make(chan struct{}, 0)
+	w.SetDeadline(deadline)
+	close(deadline)
+
+	w.SetDeadline(deadline)
+	if n, err := w.Write([]byte("k")); n != 0 || !isTimeoutError(err) {
+		t.Errorf("Got (%d, %v) want (0, timeout error)", n, err)
+	}
+
+	w.Close()
+	if w.BytesWritten() != 10 {
+		t.Errorf("Got %d want %d", w.BytesWritten(), 10)
+	}
+
+	_, bufs, err := bq.Get(nil)
+	var read bytes.Buffer
+	for _, b := range bufs {
+		read.Write(b.Contents)
+		b.Release()
+	}
+	if g, w := read.String(), "abcdefghij"; g != w {
+		t.Errorf("Got %q want %q", g, w)
+	}
+}
+
+func TestCloseBeforeWrite(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+	w.Close()
+
+	if n, err := w.Write([]byte{1, 2}); n != 0 || verror.ErrorID(err) != stream.ErrBadState.ID {
+		t.Errorf("Got (%v, %v) want (0, %v)", n, err, stream.ErrBadState)
+	}
+}
+
+func TestShutdownBeforeWrite(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+	w.shutdown(true)
+
+	if n, err := w.Write([]byte{1, 2}); n != 0 || err != io.EOF {
+		t.Errorf("Got (%v, %v) want (0, %v)", n, err, io.EOF)
+	}
+}
+
+func TestCloseDoesNotDiscardPendingWrites(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(2)
+
+	w := newTestWriter(bw, shared)
+	data := []byte{1, 2}
+	if n, err := w.Write(data); n != len(data) || err != nil {
+		t.Fatalf("Got (%d, %v) want (%d, nil)", n, err, len(data))
+	}
+	w.Close()
+
+	gbw, bufs, err := bq.Get(nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if gbw != bw {
+		t.Fatalf("Got %v want %v", gbw, bw)
+	}
+	if len(bufs) != 1 {
+		t.Fatalf("Got %d bufs, want 1", len(bufs))
+	}
+	if !reflect.DeepEqual(bufs[0].Contents, data) {
+		t.Fatalf("Got %v want %v", bufs[0].Contents, data)
+	}
+	if !gbw.IsDrained() {
+		t.Fatal("Expected bqueue.Writer to be drained")
+	}
+}
+
+func TestWriterCloseIsIdempotent(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(1)
+	w := newTestWriter(bw, shared)
+	if n, err := w.Write([]byte{1}); n != 1 || err != nil {
+		t.Fatalf("Got (%d, %v) want (1, nil)", n, err)
+	}
+	// Should have used up the shared counter.
+	if err := shared.TryDec(); err != sync.ErrTryAgain {
+		t.Fatalf("Got %v want %v", err, sync.ErrTryAgain)
+	}
+	w.Close()
+	// The shared counter should have been returned
+	if err := shared.TryDec(); err != nil {
+		t.Fatalf("Got %v want nil", err)
+	}
+	// Closing again shouldn't affect the shared counters
+	w.Close()
+	if err := shared.TryDec(); err != sync.ErrTryAgain {
+		t.Fatalf("Got %v want %v", err, sync.ErrTryAgain)
+	}
+}
+
+func TestClosedChannel(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+	go w.Close()
+	<-w.Closed()
+
+	if n, err := w.Write([]byte{1, 2}); n != 0 || verror.ErrorID(err) != stream.ErrBadState.ID {
+		t.Errorf("Got (%v, %v) want (0, %v)", n, err, stream.ErrBadState.ID)
+	}
+}
+
+func newTestWriter(bqw bqueue.Writer, shared *sync.Semaphore) *writer {
+	alloc := iobuf.NewAllocator(iobuf.NewPool(0), 0)
+	return newWriter(16, bqw, alloc, shared)
+}
+
+func isTimeoutError(err error) bool {
+	neterr, ok := err.(net.Error)
+	return ok && neterr.Timeout()
+}
diff --git a/runtime/internal/rpc/stream/vif/auth.go b/runtime/internal/rpc/stream/vif/auth.go
new file mode 100644
index 0000000..c0590f0
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/auth.go
@@ -0,0 +1,245 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"crypto/rand"
+	"io"
+
+	"golang.org/x/crypto/nacl/box"
+
+	rpcversion "v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/version"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errAuthFailed                      = reg(".errAuthFailed", "authentication failed{:3}")
+	errUnsupportedEncryptVersion       = reg(".errUnsupportedEncryptVersion", "unsupported encryption version {4} < {5}")
+	errNaclBoxVersionNegotiationFailed = reg(".errNaclBoxVersionNegotiationFailed", "nacl box encryption version negotiation failed")
+	errVersionNegotiationFailed        = reg(".errVersionNegotiationFailed", "encryption version negotiation failed")
+	nullCipher                         crypto.NullControlCipher
+)
+
+// privateData includes secret data we need for encryption.
+type privateData struct {
+	naclBoxPrivateKey crypto.BoxKey
+}
+
+// AuthenticateAsClient sends a Setup message if possible.  If so, it chooses
+// encryption based on the max supported version.
+//
+// The sequence is initiated by the client.
+//
+//    - The client sends a Setup message to the server, containing the client's
+//      supported versions, and the client's crypto options.  The Setup message
+//      is sent in the clear.
+//
+//    - When the server receives the Setup message, it calls
+//      AuthenticateAsServer, which constructs a response Setup containing
+//      the server's version range, and any crypto options.
+//
+//    - The client and server use the public/private key pairs
+//      generated for the Setup messages to create an encrypted stream
+//      of SetupStream messages for the remainder of the authentication
+//      setup.  The encyrption uses NewControlCipherRPC6, which is based
+//      on code.google.com/p/go.crypto/nacl/box.
+//
+//    - Once the encrypted SetupStream channel is setup, the client and
+//      server authenticate using the vc.AuthenticateAs{Client,Server} protocol.
+//
+// Note that the Setup messages are sent in the clear, so they are subject to
+// modification by a man-in-the-middle, which can currently force a downgrade by
+// modifying the acceptable version ranges downward.  This can be addressed by
+// including a hash of the Setup message in the encrypted stream.  It is
+// likely that this will be addressed in subsequent protocol versions.
+func AuthenticateAsClient(writer io.Writer, reader *iobuf.Reader, versions *version.Range, params security.CallParams, auth *vc.ServerAuthorizer) (crypto.ControlCipher, error) {
+	if versions == nil {
+		versions = version.SupportedRange
+	}
+
+	// Send the client's public data.
+	pvt, pub, err := makeSetup(versions, params.LocalPrincipal != nil)
+	if err != nil {
+		return nil, verror.New(stream.ErrSecurity, nil, err)
+	}
+
+	errch := make(chan error, 1)
+	go func() {
+		errch <- message.WriteTo(writer, pub, nullCipher)
+	}()
+
+	pmsg, err := message.ReadFrom(reader, nullCipher)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	ppub, ok := pmsg.(*message.Setup)
+	if !ok {
+		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
+	}
+
+	// Wait for the write to succeed.
+	if err := <-errch; err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+
+	// Choose the max version in the intersection.
+	vrange, err := pub.Versions.Intersect(&ppub.Versions)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	v := vrange.Max
+
+	if params.LocalPrincipal == nil {
+		return nullCipher, nil
+	}
+
+	// Perform the authentication.
+	return authenticateAsClient(writer, reader, params, auth, pvt, pub, ppub, v)
+}
+
+func authenticateAsClient(writer io.Writer, reader *iobuf.Reader, params security.CallParams, auth *vc.ServerAuthorizer,
+	pvt *privateData, pub, ppub *message.Setup, version rpcversion.RPCVersion) (crypto.ControlCipher, error) {
+	pbox := ppub.NaclBox()
+	if pbox == nil {
+		return nil, verror.New(errNaclBoxVersionNegotiationFailed, nil)
+	}
+	c := crypto.NewControlCipherRPC6(&pbox.PublicKey, &pvt.naclBoxPrivateKey, false)
+	sconn := newSetupConn(writer, reader, c)
+	// TODO(jyh): act upon the authentication results.
+	_, _, _, err := vc.AuthenticateAsClient(sconn, crypto.NewNullCrypter(), params, auth, version)
+	if err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+// AuthenticateAsServer handles a Setup message, choosing authentication
+// based on the max common version.
+//
+// See AuthenticateAsClient for a description of the negotiation.
+func AuthenticateAsServer(writer io.Writer, reader *iobuf.Reader, versions *version.Range, principal security.Principal, lBlessings security.Blessings,
+	dc vc.DischargeClient) (crypto.ControlCipher, error) {
+	var err error
+	if versions == nil {
+		versions = version.SupportedRange
+	}
+
+	// Send server's public data.
+	pvt, pub, err := makeSetup(versions, principal != nil)
+	if err != nil {
+		return nil, err
+	}
+
+	errch := make(chan error, 1)
+	readch := make(chan struct{})
+	go func() {
+		// TODO(mattr,ribrdb): In the case of the agent, which is
+		// currently the only user of insecure connections, we need to
+		// wait for the client to initiate the communication.  The agent
+		// sends an extra first byte to clients, which clients read before
+		// dialing their side of the vif.  If we send this message before
+		// the magic byte has been sent the client will use the first
+		// byte of this message instead rendering the remainder of the
+		// stream uninterpretable.
+		if principal == nil {
+			<-readch
+		}
+		err := message.WriteTo(writer, pub, nullCipher)
+		errch <- err
+	}()
+
+	// Read client's public data.
+	pmsg, err := message.ReadFrom(reader, nullCipher)
+	close(readch) // Note: we need to close this whether we get an error or not.
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	ppub, ok := pmsg.(*message.Setup)
+	if !ok {
+		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
+	}
+
+	// Wait for the write to succeed.
+	if err := <-errch; err != nil {
+		return nil, err
+	}
+
+	// Choose the max version in the intersection.
+	vrange, err := versions.Intersect(&ppub.Versions)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	v := vrange.Max
+
+	if principal == nil {
+		return nullCipher, nil
+	}
+
+	// Perform authentication.
+	return authenticateAsServerRPC6(writer, reader, principal, lBlessings, dc, pvt, pub, ppub, v)
+}
+
+func authenticateAsServerRPC6(writer io.Writer, reader *iobuf.Reader, principal security.Principal, lBlessings security.Blessings, dc vc.DischargeClient,
+	pvt *privateData, pub, ppub *message.Setup, version rpcversion.RPCVersion) (crypto.ControlCipher, error) {
+	box := ppub.NaclBox()
+	if box == nil {
+		return nil, verror.New(errNaclBoxVersionNegotiationFailed, nil)
+	}
+	c := crypto.NewControlCipherRPC6(&box.PublicKey, &pvt.naclBoxPrivateKey, true)
+	sconn := newSetupConn(writer, reader, c)
+	// TODO(jyh): act upon authentication results.
+	_, _, err := vc.AuthenticateAsServer(sconn, principal, lBlessings, dc, crypto.NewNullCrypter(), version)
+	if err != nil {
+		return nil, verror.New(errAuthFailed, nil, err)
+	}
+	return c, nil
+}
+
+// getDischargeClient returns the dischargeClient needed to fetch server discharges for this call.
+// TODO(suharshs): Perhaps we should pass dischargeClient explicitly?
+func getDischargeClient(lopts []stream.ListenerOpt) vc.DischargeClient {
+	for _, o := range lopts {
+		switch v := o.(type) {
+		case vc.DischargeClient:
+			return v
+		}
+	}
+	return nil
+}
+
+// makeSetup constructs the options that this process can support.
+func makeSetup(versions *version.Range, secure bool) (*privateData, *message.Setup, error) {
+	var options []message.SetupOption
+	var pvt *privateData
+	if secure {
+		pubKey, pvtKey, err := box.GenerateKey(rand.Reader)
+		if err != nil {
+			return nil, nil, err
+		}
+		options = []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}}
+		pvt = &privateData{
+			naclBoxPrivateKey: *pvtKey,
+		}
+	}
+
+	pub := &message.Setup{
+		Versions: *versions,
+		Options:  options,
+	}
+
+	return pvt, pub, nil
+}
diff --git a/runtime/internal/rpc/stream/vif/doc.go b/runtime/internal/rpc/stream/vif/doc.go
new file mode 100644
index 0000000..9f00db4
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vif implements a virtual network interface that wraps over a
+// net.Conn and provides the ability to Dial and Listen for virtual circuits
+// (v.io/x/ref/runtime/internal/rpc/stream.VC)
+package vif
diff --git a/runtime/internal/rpc/stream/vif/faketimer.go b/runtime/internal/rpc/stream/vif/faketimer.go
new file mode 100644
index 0000000..914c4e2
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/faketimer.go
@@ -0,0 +1,90 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"sync"
+	"time"
+)
+
+// Since an idle timer with a short timeout can expire before establishing a VC,
+// we provide a fake timer to reduce dependence on real time in unittests.
+type fakeTimer struct {
+	mu          sync.Mutex
+	timeout     time.Duration
+	timeoutFunc func()
+	timer       timer
+	stopped     bool
+}
+
+func newFakeTimer(d time.Duration, f func()) *fakeTimer {
+	return &fakeTimer{
+		timeout:     d,
+		timeoutFunc: f,
+		timer:       noopTimer{},
+	}
+}
+
+func (t *fakeTimer) Stop() bool {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.stopped = true
+	return t.timer.Stop()
+}
+
+func (t *fakeTimer) Reset(d time.Duration) bool {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.timeout = d
+	t.stopped = false
+	return t.timer.Reset(t.timeout)
+}
+
+func (t *fakeTimer) run(release <-chan struct{}, wg *sync.WaitGroup) {
+	defer wg.Done()
+	<-release // Wait until notified to run.
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.timeout > 0 {
+		t.timer = newTimer(t.timeout, t.timeoutFunc)
+	}
+	if t.stopped {
+		t.timer.Stop()
+	}
+}
+
+// SetFakeTimers causes the idle timers to use a fake timer instead of one
+// based on real time. The timers will be triggered when the returned function
+// is invoked. (at which point the timer setup will be restored to what it was
+// before calling this function)
+//
+// Usage:
+//   triggerTimers := SetFakeTimers()
+//   ...
+//   triggerTimers()
+//
+// This function cannot be called concurrently.
+func SetFakeTimers() func() {
+	backup := newTimer
+
+	var mu sync.Mutex
+	var wg sync.WaitGroup
+	release := make(chan struct{})
+	newTimer = func(d time.Duration, f func()) timer {
+		mu.Lock()
+		defer mu.Unlock()
+		wg.Add(1)
+		t := newFakeTimer(d, f)
+		go t.run(release, &wg)
+		return t
+	}
+	return func() {
+		mu.Lock()
+		defer mu.Unlock()
+		newTimer = backup
+		close(release)
+		wg.Wait()
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/idletimer.go b/runtime/internal/rpc/stream/vif/idletimer.go
new file mode 100644
index 0000000..a9910c7
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/idletimer.go
@@ -0,0 +1,139 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"sync"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+// idleTimerMap keeps track of all the flows of each VC and then calls the notify
+// function in its own goroutine if there is no flow in a VC for some duration.
+type idleTimerMap struct {
+	mu         sync.Mutex
+	m          map[id.VC]*idleTimer
+	notifyFunc func(id.VC)
+	stopped    bool
+}
+
+type idleTimer struct {
+	set     map[id.Flow]struct{}
+	timeout time.Duration
+	timer   timer
+	stopped bool
+}
+
+type timer interface {
+	// Stop prevents the Timer from firing.
+	Stop() bool
+	// Reset changes the timer to expire after duration d.
+	Reset(d time.Duration) bool
+}
+
+// newIdleTimerMap returns a new idle timer map.
+func newIdleTimerMap(f func(id.VC)) *idleTimerMap {
+	return &idleTimerMap{
+		m:          make(map[id.VC]*idleTimer),
+		notifyFunc: f,
+	}
+}
+
+// Stop stops idle timers for all VC.
+func (m *idleTimerMap) Stop() {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.stopped {
+		return
+	}
+	for _, t := range m.m {
+		if !t.stopped {
+			t.timer.Stop()
+			t.stopped = true
+		}
+	}
+	m.stopped = true
+}
+
+// Insert starts the idle timer for the given VC. If there is no active flows
+// in the VC for the duration d, the notify function will be called in its own
+// goroutine. If d is zero, the idle timer is disabled.
+func (m *idleTimerMap) Insert(vci id.VC, d time.Duration) bool {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.stopped {
+		return false
+	}
+	if _, exists := m.m[vci]; exists {
+		return false
+	}
+	t := &idleTimer{
+		set:     make(map[id.Flow]struct{}),
+		timeout: d,
+	}
+	if t.timeout > 0 {
+		t.timer = newTimer(t.timeout, func() { m.notifyFunc(vci) })
+	} else {
+		t.timer = noopTimer{}
+	}
+	m.m[vci] = t
+	return true
+}
+
+// Delete deletes the idle timer for the given VC.
+func (m *idleTimerMap) Delete(vci id.VC) {
+	m.mu.Lock()
+	if t, exists := m.m[vci]; exists {
+		if !t.stopped {
+			t.timer.Stop()
+		}
+		delete(m.m, vci)
+	}
+	m.mu.Unlock()
+}
+
+// InsertFlow inserts the given flow to the given VC. All system flows will be ignored.
+func (m *idleTimerMap) InsertFlow(vci id.VC, fid id.Flow) {
+	if fid < vc.NumReservedFlows {
+		return
+	}
+	m.mu.Lock()
+	if t, exists := m.m[vci]; exists {
+		t.set[fid] = struct{}{}
+		if !t.stopped {
+			t.timer.Stop()
+			t.stopped = true
+		}
+	}
+	m.mu.Unlock()
+}
+
+// DeleteFlow deletes the given flow from the VC vci.
+func (m *idleTimerMap) DeleteFlow(vci id.VC, fid id.Flow) {
+	m.mu.Lock()
+	if t, exists := m.m[vci]; exists {
+		delete(t.set, fid)
+		if len(t.set) == 0 && t.stopped && !m.stopped {
+			t.timer.Reset(t.timeout)
+			t.stopped = false
+		}
+	}
+	m.mu.Unlock()
+}
+
+// To avoid dependence on real times in unittests, the factory function for timers
+// can be overridden (with SetFakeTimers). This factory function should only be
+// overridden for unittests.
+var newTimer = defaultTimerFactory
+
+func defaultTimerFactory(d time.Duration, f func()) timer { return time.AfterFunc(d, f) }
+
+// A noop timer.
+type noopTimer struct{}
+
+func (t noopTimer) Stop() bool                 { return false }
+func (t noopTimer) Reset(d time.Duration) bool { return false }
diff --git a/runtime/internal/rpc/stream/vif/idletimer_test.go b/runtime/internal/rpc/stream/vif/idletimer_test.go
new file mode 100644
index 0000000..d5bd6f3
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/idletimer_test.go
@@ -0,0 +1,130 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"testing"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+func TestIdleTimer(t *testing.T) {
+	const (
+		idleTime = 5 * time.Millisecond
+		waitTime = idleTime * 2
+
+		vc1 id.VC = 1
+		vc2 id.VC = 2
+
+		flow1        id.Flow = vc.NumReservedFlows
+		flow2        id.Flow = vc.NumReservedFlows + 1
+		flowReserved id.Flow = vc.NumReservedFlows - 1
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vci id.VC) { notify <- vci }
+
+	m := newIdleTimerMap(notifyFunc)
+
+	// An empty map. Should not be notified.
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	m.Insert(vc1, idleTime)
+
+	// A new empty VC. Should be notified.
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+
+	// A VC with one flow. Should not be notified.
+	m.InsertFlow(vc1, flow1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Try to delete non-existent flow. Should not be notified.
+	m.DeleteFlow(vc1, flow2)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Delete the flow. Should be notified.
+	m.DeleteFlow(vc1, flow1)
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	// Try to delete the deleted flow again. Should not be notified.
+	m.DeleteFlow(vc1, flow1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+
+	// Delete an empty VC. Should not be notified.
+	m.Delete(vc1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	m.Insert(vc1, idleTime)
+
+	// A VC with two flows.
+	m.InsertFlow(vc1, flow1)
+	m.InsertFlow(vc1, flow2)
+
+	// Delete the first flow twice. Should not be notified.
+	m.DeleteFlow(vc1, flow1)
+	m.DeleteFlow(vc1, flow1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Delete the second flow. Should be notified.
+	m.DeleteFlow(vc1, flow2)
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+
+	// Insert a reserved flow. Should be notified.
+	m.InsertFlow(vc1, flowReserved)
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+	m.Insert(vc2, idleTime)
+
+	// Multiple VCs. Should be notified for each.
+	if err := WaitForNotifications(notify, vc1, vc2); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Delete(vc2)
+	m.Insert(vc1, idleTime)
+
+	// Stop the timer. Should not be notified.
+	m.Stop()
+	if m.Insert(vc1, idleTime) {
+		t.Fatal("timer has been stopped, but can insert a vc")
+	}
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/set.go b/runtime/internal/rpc/stream/vif/set.go
new file mode 100644
index 0000000..3032dfc
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/set.go
@@ -0,0 +1,146 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"math/rand"
+	"net"
+	"runtime"
+	"sync"
+
+	"v.io/v23/rpc"
+)
+
+// Set implements a set of VIFs keyed by (network, address) of the underlying
+// connection.  Multiple goroutines can invoke methods on the Set
+// simultaneously.
+type Set struct {
+	mu      sync.RWMutex
+	set     map[string][]*VIF // GUARDED_BY(mu)
+	started map[string]bool   // GUARDED_BY(mu)
+	keys    map[*VIF]string   // GUARDED_BY(mu)
+	cond    *sync.Cond
+}
+
+// NewSet returns a new Set of VIFs.
+func NewSet() *Set {
+	s := &Set{
+		set:     make(map[string][]*VIF),
+		started: make(map[string]bool),
+		keys:    make(map[*VIF]string),
+	}
+	s.cond = sync.NewCond(&s.mu)
+	return s
+}
+
+// BlockingFind returns a VIF where the remote end of the underlying network connection
+// is identified by the provided (network, address). Returns nil if there is no
+// such VIF.
+//
+// The caller is required to call the returned unblock function, to avoid deadlock.
+// Until the returned function is called, all new BlockingFind calls for this
+// network and address will block.
+func (s *Set) BlockingFind(network, address string) (*VIF, func()) {
+	if isNonDistinctConn(network, address) {
+		return nil, func() {}
+	}
+
+	k := key(network, address)
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	for s.started[k] {
+		s.cond.Wait()
+	}
+
+	_, _, _, p := rpc.RegisteredProtocol(network)
+	for _, n := range p {
+		if vifs := s.set[key(n, address)]; len(vifs) > 0 {
+			return vifs[rand.Intn(len(vifs))], func() {}
+		}
+	}
+
+	s.started[k] = true
+	return nil, func() { s.unblock(network, address) }
+}
+
+// unblock marks the status of the network, address as no longer started, and
+// broadcasts waiting threads.
+func (s *Set) unblock(network, address string) {
+	s.mu.Lock()
+	delete(s.started, key(network, address))
+	s.cond.Broadcast()
+	s.mu.Unlock()
+}
+
+// Insert adds a VIF to the set.
+func (s *Set) Insert(vif *VIF, network, address string) {
+	k := key(network, address)
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.keys[vif] = k
+	vifs := s.set[k]
+	for _, v := range vifs {
+		if v == vif {
+			return
+		}
+	}
+	s.set[k] = append(vifs, vif)
+}
+
+// Delete removes a VIF from the set.
+func (s *Set) Delete(vif *VIF) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	k := s.keys[vif]
+	vifs := s.set[k]
+	for i, v := range vifs {
+		if v == vif {
+			if len(vifs) == 1 {
+				delete(s.set, k)
+			} else {
+				s.set[k] = append(vifs[:i], vifs[i+1:]...)
+			}
+			delete(s.keys, vif)
+			return
+		}
+	}
+}
+
+// List returns the elements in the set as a slice.
+func (s *Set) List() []*VIF {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	l := make([]*VIF, 0, len(s.set))
+	for _, vifs := range s.set {
+		l = append(l, vifs...)
+	}
+	return l
+}
+
+func key(network, address string) string {
+	if network == "tcp" || network == "ws" {
+		host, _, _ := net.SplitHostPort(address)
+		switch ip := net.ParseIP(host); {
+		case ip == nil:
+			// This may happen when address is a hostname. But we do not care
+			// about it, since vif cannot be found with a hostname anyway.
+		case ip.To4() != nil:
+			network += "4"
+		default:
+			network += "6"
+		}
+	}
+	return network + ":" + address
+}
+
+// Some network connections (like those created with net.Pipe or Unix sockets)
+// do not end up with distinct net.Addrs on distinct net.Conns.
+func isNonDistinctConn(network, address string) bool {
+	return len(address) == 0 ||
+		(network == "pipe" && address == "pipe") ||
+		(runtime.GOOS == "linux" && network == "unix" && address == "@")
+}
diff --git a/runtime/internal/rpc/stream/vif/set_test.go b/runtime/internal/rpc/stream/vif/set_test.go
new file mode 100644
index 0000000..08b8ea8
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/set_test.go
@@ -0,0 +1,340 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif_test
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+	"path"
+	"testing"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test/testutil"
+)
+
+var supportsIPv6 bool
+
+func init() {
+	simpleResolver := func(network, address string) (string, string, error) {
+		return network, address, nil
+	}
+	rpc.RegisterProtocol("unix", net.DialTimeout, simpleResolver, net.Listen)
+
+	// Check whether the platform supports IPv6.
+	ln, err := net.Listen("tcp6", "[::1]:0")
+	defer ln.Close()
+	if err == nil {
+		supportsIPv6 = true
+	}
+}
+
+func newConn(network, address string) (net.Conn, net.Conn, error) {
+	dfunc, _, lfunc, _ := rpc.RegisteredProtocol(network)
+	ln, err := lfunc(network, address)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer ln.Close()
+
+	done := make(chan net.Conn)
+	go func() {
+		conn, err := ln.Accept()
+		if err != nil {
+			panic(err)
+		}
+		conn.Read(make([]byte, 1)) // Read a dummy byte.
+		done <- conn
+	}()
+
+	conn, err := dfunc(ln.Addr().Network(), ln.Addr().String(), 1*time.Second)
+	if err != nil {
+		return nil, nil, err
+	}
+	// Write a dummy byte since wsh listener waits for the magic bytes for ws.
+	conn.Write([]byte("."))
+	return conn, <-done, nil
+}
+
+func newVIF(c, s net.Conn) (*vif.VIF, *vif.VIF, error) {
+	done := make(chan *vif.VIF)
+	go func() {
+		principal := testutil.NewPrincipal("accepted")
+		blessings := principal.BlessingStore().Default()
+		vf, err := vif.InternalNewAcceptedVIF(s, naming.FixedRoutingID(0x5), principal, blessings, nil, nil)
+		if err != nil {
+			panic(err)
+		}
+		done <- vf
+	}()
+
+	vf, err := vif.InternalNewDialedVIF(c, naming.FixedRoutingID(0xc), testutil.NewPrincipal("dialed"), nil, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	return vf, <-done, nil
+}
+
+func diff(a, b []string) []string {
+	m := make(map[string]struct{})
+	for _, x := range b {
+		m[x] = struct{}{}
+	}
+	d := make([]string, 0, len(a))
+	for _, x := range a {
+		if _, ok := m[x]; !ok {
+			d = append(d, x)
+		}
+	}
+	return d
+}
+
+func find(set *vif.Set, n, a string) *vif.VIF {
+	found, unblock := set.BlockingFind(n, a)
+	unblock()
+	return found
+}
+
+func TestSetBasic(t *testing.T) {
+	sockdir, err := ioutil.TempDir("", "TestSetBasic")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(sockdir)
+
+	all := rpc.RegisteredProtocols()
+	unknown := naming.UnknownProtocol
+	tests := []struct {
+		network, address string
+		compatibles      []string
+	}{
+		{"tcp", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"tcp4", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"tcp", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"tcp6", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"ws", "127.0.0.1:0", []string{"ws", "ws4", "wsh", "wsh4", unknown}},
+		{"ws4", "127.0.0.1:0", []string{"ws", "ws4", "wsh", "wsh4", unknown}},
+		{"ws", "[::1]:0", []string{"ws", "ws6", "wsh", "wsh6", unknown}},
+		{"ws6", "[::1]:0", []string{"ws", "ws6", "wsh", "wsh6", unknown}},
+		// wsh dial always uses tcp.
+		{"wsh", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"wsh4", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"wsh", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"wsh6", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{unknown, "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{unknown, "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"unix", path.Join(sockdir, "socket"), []string{"unix"}},
+	}
+
+	set := vif.NewSet()
+	for _, test := range tests {
+		if test.address == "[::1]:0" && !supportsIPv6 {
+			continue
+		}
+
+		name := fmt.Sprintf("(%q, %q)", test.network, test.address)
+
+		c, s, err := newConn(test.network, test.address)
+		if err != nil {
+			t.Fatal(err)
+		}
+		vf, _, err := newVIF(c, s)
+		if err != nil {
+			t.Fatal(err)
+		}
+		a := c.RemoteAddr()
+
+		set.Insert(vf, a.Network(), a.String())
+		for _, n := range test.compatibles {
+			if found := find(set, n, a.String()); found == nil {
+				t.Fatalf("%s: Got nil, but want [%v] on find(%q, %q))", name, vf, n, a)
+			}
+		}
+
+		for _, n := range diff(all, test.compatibles) {
+			if v := find(set, n, a.String()); v != nil {
+				t.Fatalf("%s: Got [%v], but want nil on find(%q, %q))", name, v, n, a)
+			}
+		}
+
+		set.Delete(vf)
+		for _, n := range all {
+			if v := find(set, n, a.String()); v != nil {
+				t.Fatalf("%s: Got [%v], but want nil on find(%q, %q))", name, v, n, a)
+			}
+		}
+	}
+}
+
+func TestSetWithPipes(t *testing.T) {
+	c1, s1 := net.Pipe()
+	c2, s2 := net.Pipe()
+	a1 := c1.RemoteAddr()
+	a2 := c2.RemoteAddr()
+	if a1.Network() != a2.Network() || a1.String() != a2.String() {
+		t.Fatalf("This test was intended for distinct connections that have duplicate RemoteAddrs. "+
+			"That does not seem to be the case with (%q, %q) and (%q, %q)",
+			a1.Network(), a1, a2.Network(), a2)
+	}
+
+	vf1, _, err := newVIF(c1, s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	vf2, _, err := newVIF(c2, s2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	set := vif.NewSet()
+	set.Insert(vf1, a1.Network(), a1.String())
+	if v := find(set, a1.Network(), a1.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a1.Network(), a1)
+	}
+	if l := set.List(); len(l) != 1 || l[0] != vf1 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Insert(vf2, a2.Network(), a2.String())
+	if v := find(set, a2.Network(), a2.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a2.Network(), a2)
+	}
+	if l := set.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Delete(vf1)
+	if l := set.List(); len(l) != 1 || l[0] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+	set.Delete(vf2)
+	if l := set.List(); len(l) != 0 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+}
+
+func TestSetWithUnixSocket(t *testing.T) {
+	dir, err := ioutil.TempDir("", "TestSetWithUnixSocket")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(dir)
+
+	c1, s1, err := newConn("unix", path.Join(dir, "socket1"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	c2, s2, err := newConn("unix", path.Join(dir, "socket2"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// The client side address is always unix:@ regardless of socket name.
+	a1 := s1.RemoteAddr()
+	a2 := s2.RemoteAddr()
+	if a1.Network() != a2.Network() || a1.String() != a2.String() {
+		t.Fatalf("This test was intended for distinct connections that have duplicate RemoteAddrs. "+
+			"That does not seem to be the case with (%q, %q) and (%q, %q)",
+			a1.Network(), a1, a2.Network(), a2)
+	}
+
+	_, vf1, err := newVIF(c1, s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, vf2, err := newVIF(c2, s2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	set := vif.NewSet()
+	set.Insert(vf1, a1.Network(), a1.String())
+	if v := find(set, a1.Network(), a1.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a1.Network(), a1)
+	}
+	if l := set.List(); len(l) != 1 || l[0] != vf1 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Insert(vf2, a2.Network(), a2.String())
+	if v := find(set, a2.Network(), a2.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a2.Network(), a2)
+	}
+	if l := set.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Delete(vf1)
+	if l := set.List(); len(l) != 1 || l[0] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+	set.Delete(vf2)
+	if l := set.List(); len(l) != 0 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+}
+
+func TestSetInsertDelete(t *testing.T) {
+	c1, s1 := net.Pipe()
+	vf1, _, err := newVIF(c1, s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	set1 := vif.NewSet()
+
+	n1, a1 := c1.RemoteAddr().Network(), c1.RemoteAddr().String()
+	set1.Insert(vf1, n1, a1)
+	if l := set1.List(); len(l) != 1 || l[0] != vf1 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set1.Delete(vf1)
+	if l := set1.List(); len(l) != 0 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+}
+
+func TestBlockingFind(t *testing.T) {
+	network, address := "tcp", "127.0.0.1:1234"
+	set := vif.NewSet()
+
+	_, unblock := set.BlockingFind(network, address)
+
+	ch := make(chan *vif.VIF, 1)
+
+	// set.BlockingFind should block until set.Unblock is called with the corresponding VIF,
+	// since set.BlockingFind was called earlier.
+	go func(ch chan *vif.VIF) {
+		vf, _ := set.BlockingFind(network, address)
+		ch <- vf
+	}(ch)
+
+	// set.BlockingFind for a different network and address should not block.
+	set.BlockingFind("network", "address")
+
+	// Create and insert the VIF.
+	c, s, err := newConn(network, address)
+	if err != nil {
+		t.Fatal(err)
+	}
+	vf, _, err := newVIF(c, s)
+	if err != nil {
+		t.Fatal(err)
+	}
+	set.Insert(vf, network, address)
+	unblock()
+
+	// Now the set.BlockingFind should have returned the correct vif.
+	if cachedVif := <-ch; cachedVif != vf {
+		t.Errorf("got %v, want %v", cachedVif, vf)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/setup_conn.go b/runtime/internal/rpc/stream/vif/setup_conn.go
new file mode 100644
index 0000000..c038ef3
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/setup_conn.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"io"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+)
+
+// setupConn writes the data to the net.Conn using SetupStream messages.
+type setupConn struct {
+	writer  io.Writer
+	reader  *iobuf.Reader
+	cipher  crypto.ControlCipher
+	rbuffer []byte // read buffer
+}
+
+var _ io.ReadWriteCloser = (*setupConn)(nil)
+
+const maxFrameSize = 8192
+
+func newSetupConn(writer io.Writer, reader *iobuf.Reader, c crypto.ControlCipher) *setupConn {
+	return &setupConn{writer: writer, reader: reader, cipher: c}
+}
+
+// Read implements the method from net.Conn.
+func (s *setupConn) Read(buf []byte) (int, error) {
+	for len(s.rbuffer) == 0 {
+		msg, err := message.ReadFrom(s.reader, s.cipher)
+		if err != nil {
+			return 0, err
+		}
+		emsg, ok := msg.(*message.SetupStream)
+		if !ok {
+			return 0, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
+		}
+		s.rbuffer = emsg.Data
+	}
+	n := copy(buf, s.rbuffer)
+	s.rbuffer = s.rbuffer[n:]
+	return n, nil
+}
+
+// Write implements the method from net.Conn.
+func (s *setupConn) Write(buf []byte) (int, error) {
+	amount := 0
+	for len(buf) > 0 {
+		n := len(buf)
+		if n > maxFrameSize {
+			n = maxFrameSize
+		}
+		emsg := message.SetupStream{Data: buf[:n]}
+		if err := message.WriteTo(s.writer, &emsg, s.cipher); err != nil {
+			return 0, err
+		}
+		buf = buf[n:]
+		amount += n
+	}
+	return amount, nil
+}
+
+// Close does nothing.
+func (s *setupConn) Close() error { return nil }
diff --git a/runtime/internal/rpc/stream/vif/setup_conn_test.go b/runtime/internal/rpc/stream/vif/setup_conn_test.go
new file mode 100644
index 0000000..17622a7
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/setup_conn_test.go
@@ -0,0 +1,166 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"bytes"
+	"encoding/binary"
+	"io"
+	"net"
+	"sync"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+)
+
+const (
+	text = `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.`
+)
+
+func min(i, j int) int {
+	if i < j {
+		return i
+	}
+	return j
+}
+
+// testControlCipher is a super-simple cipher that xor's each byte of the
+// payload with 0xaa.
+type testControlCipher struct{}
+
+const testMACSize = 4
+
+func (*testControlCipher) MACSize() int {
+	return testMACSize
+}
+
+func testMAC(data []byte) []byte {
+	var h uint32
+	for _, b := range data {
+		h = (h << 1) ^ uint32(b)
+	}
+	var hash [4]byte
+	binary.BigEndian.PutUint32(hash[:], h)
+	return hash[:]
+}
+
+func (c *testControlCipher) Decrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Encrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Open(data []byte) bool {
+	mac := testMAC(data[:len(data)-testMACSize])
+	if bytes.Compare(mac, data[len(data)-testMACSize:]) != 0 {
+		return false
+	}
+	c.Decrypt(data[:len(data)-testMACSize])
+	return true
+}
+
+func (c *testControlCipher) Seal(data []byte) error {
+	c.Encrypt(data[:len(data)-testMACSize])
+	mac := testMAC(data[:len(data)-testMACSize])
+	copy(data[len(data)-testMACSize:], mac)
+	return nil
+}
+
+// shortConn performs at most 3 bytes of IO at a time.
+type shortConn struct {
+	io.ReadWriteCloser
+}
+
+func (s *shortConn) Read(data []byte) (int, error) {
+	if len(data) > 3 {
+		data = data[:3]
+	}
+	return s.ReadWriteCloser.Read(data)
+}
+
+func (s *shortConn) Write(data []byte) (int, error) {
+	n := len(data)
+	for i := 0; i < n; i += 3 {
+		j := min(n, i+3)
+		m, err := s.ReadWriteCloser.Write(data[i:j])
+		if err != nil {
+			return i + m, err
+		}
+	}
+	return n, nil
+}
+
+func TestConn(t *testing.T) {
+	p1, p2 := net.Pipe()
+	pool := iobuf.NewPool(0)
+	r1 := iobuf.NewReader(pool, p1)
+	r2 := iobuf.NewReader(pool, p2)
+	f1 := newSetupConn(p1, r1, &testControlCipher{})
+	f2 := newSetupConn(p2, r2, &testControlCipher{})
+	testConn(t, f1, f2)
+}
+
+func TestShortInnerConn(t *testing.T) {
+	p1, p2 := net.Pipe()
+	s1 := &shortConn{p1}
+	s2 := &shortConn{p2}
+	pool := iobuf.NewPool(0)
+	r1 := iobuf.NewReader(pool, s1)
+	r2 := iobuf.NewReader(pool, s2)
+	f1 := newSetupConn(s1, r1, &testControlCipher{})
+	f2 := newSetupConn(s2, r2, &testControlCipher{})
+	testConn(t, f1, f2)
+}
+
+func TestShortOuterConn(t *testing.T) {
+	p1, p2 := net.Pipe()
+	pool := iobuf.NewPool(0)
+	r1 := iobuf.NewReader(pool, p1)
+	r2 := iobuf.NewReader(pool, p2)
+	e1 := newSetupConn(p1, r1, &testControlCipher{})
+	e2 := newSetupConn(p2, r2, &testControlCipher{})
+	f1 := &shortConn{e1}
+	f2 := &shortConn{e2}
+	testConn(t, f1, f2)
+}
+
+// Write prefixes of the text onto the framed pipe and verify the frame content.
+func testConn(t *testing.T, f1, f2 io.ReadWriteCloser) {
+	// Reader loop.
+	var pending sync.WaitGroup
+	pending.Add(1)
+	go func() {
+		var buf [1024]byte
+		for i := 1; i != len(text); i++ {
+			n, err := io.ReadFull(f1, buf[:i])
+			if err != nil {
+				t.Errorf("bad read: %s", err)
+			}
+			if n != i {
+				t.Errorf("bad read: got %d bytes, expected %d bytes", n, i)
+			}
+			actual := string(buf[:n])
+			expected := string(text[:n])
+			if actual != expected {
+				t.Errorf("got %q, expected %q", actual, expected)
+			}
+		}
+		pending.Done()
+	}()
+
+	// Writer.
+	for i := 1; i != len(text); i++ {
+		if n, err := f2.Write([]byte(text[:i])); err != nil || n != i {
+			t.Errorf("bad write: i=%d n=%d err=%s", i, n, err)
+		}
+	}
+	pending.Wait()
+}
diff --git a/runtime/internal/rpc/stream/vif/testutil_test.go b/runtime/internal/rpc/stream/vif/testutil_test.go
new file mode 100644
index 0000000..9d35a6a
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/testutil_test.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"fmt"
+	"time"
+)
+
+// WaitForNotifications waits till all notifications in 'wants' have been received.
+func WaitForNotifications(notify <-chan interface{}, wants ...interface{}) error {
+	expected := make(map[interface{}]struct{})
+	for _, w := range wants {
+		expected[w] = struct{}{}
+	}
+	for len(expected) > 0 {
+		n := <-notify
+		if _, exists := expected[n]; !exists {
+			return fmt.Errorf("unexpected notification %v", n)
+		}
+		delete(expected, n)
+	}
+	return nil
+}
+
+// WaitWithTimeout returns error if any notification has been received before
+// the timeout expires.
+func WaitWithTimeout(notify <-chan interface{}, timeout time.Duration) error {
+	timer := time.After(timeout)
+	for {
+		select {
+		case n := <-notify:
+			return fmt.Errorf("unexpected notification %v", n)
+		case <-timer:
+			return nil
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/v23_internal_test.go b/runtime/internal/rpc/stream/vif/v23_internal_test.go
new file mode 100644
index 0000000..161553c
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package vif
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+
+func TestMain(m *testing.M) {
+	test.Init()
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/stream/vif/vcmap.go b/runtime/internal/rpc/stream/vif/vcmap.go
new file mode 100644
index 0000000..bc6aa28
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vcmap.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"sort"
+	"sync"
+
+	"v.io/x/ref/runtime/internal/lib/pcqueue"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+// vcMap implements a thread-safe map of vc.VC objects (vcInfo) keyed by their VCI.
+type vcMap struct {
+	mu     sync.Mutex
+	m      map[id.VC]vcInfo
+	frozen bool
+}
+
+// vcInfo represents per-VC information maintained by a VIF.
+type vcInfo struct {
+	VC *vc.VC
+	// Queues used to dispatch work to per-VC goroutines.
+	// RQ is where vif.readLoop can dispatch work to.
+	// WQ is where vif.writeLoop can dispatch work to.
+	RQ, WQ *pcqueue.T
+}
+
+func newVCMap() *vcMap { return &vcMap{m: make(map[id.VC]vcInfo)} }
+
+func (m *vcMap) Insert(c *vc.VC) (inserted bool, rq, wq *pcqueue.T) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.frozen {
+		return false, nil, nil
+	}
+	if _, exists := m.m[c.VCI()]; exists {
+		return false, nil, nil
+	}
+	info := vcInfo{
+		VC: c,
+		RQ: pcqueue.New(100),
+		WQ: pcqueue.New(100),
+	}
+	m.m[c.VCI()] = info
+	return true, info.RQ, info.WQ
+}
+
+func (m *vcMap) Find(vci id.VC) (vc *vc.VC, rq, wq *pcqueue.T) {
+	m.mu.Lock()
+	info := m.m[vci]
+	m.mu.Unlock()
+	return info.VC, info.RQ, info.WQ
+}
+
+// Delete deletes the given VC and returns true if the map is empty after deletion.
+func (m *vcMap) Delete(vci id.VC) bool {
+	m.mu.Lock()
+	if info, exists := m.m[vci]; exists {
+		info.RQ.Close()
+		info.WQ.Close()
+		delete(m.m, vci)
+	}
+	empty := len(m.m) == 0
+	m.mu.Unlock()
+	return empty
+}
+
+func (m *vcMap) Size() int {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	return len(m.m)
+}
+
+// Freeze causes all subsequent Inserts to fail.
+// Returns a list of all the VCs that are in the map.
+func (m *vcMap) Freeze() []vcInfo {
+	m.mu.Lock()
+	m.frozen = true
+	l := make([]vcInfo, 0, len(m.m))
+	for _, info := range m.m {
+		l = append(l, info)
+	}
+	m.mu.Unlock()
+	return l
+}
+
+type vcSlice []*vc.VC
+
+func (s vcSlice) Len() int           { return len(s) }
+func (s vcSlice) Less(i, j int) bool { return s[i].VCI() < s[j].VCI() }
+func (s vcSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the list of all VCs currently in the map, sorted by VCI
+func (m *vcMap) List() []*vc.VC {
+	m.mu.Lock()
+	l := make([]*vc.VC, 0, len(m.m))
+	for _, info := range m.m {
+		l = append(l, info.VC)
+	}
+	m.mu.Unlock()
+	sort.Sort(vcSlice(l))
+	return l
+}
diff --git a/runtime/internal/rpc/stream/vif/vcmap_test.go b/runtime/internal/rpc/stream/vif/vcmap_test.go
new file mode 100644
index 0000000..83c503f
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vcmap_test.go
@@ -0,0 +1,86 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"reflect"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+func TestVCMap(t *testing.T) {
+	m := newVCMap()
+
+	vc12 := vc.InternalNew(vc.Params{VCI: 12})
+	vc34 := vc.InternalNew(vc.Params{VCI: 34})
+	vc45 := vc.InternalNew(vc.Params{VCI: 45})
+
+	if vc, _, _ := m.Find(12); vc != nil {
+		t.Errorf("Unexpected VC found: %+v", vc)
+	}
+	if ok, _, _ := m.Insert(vc34); !ok {
+		t.Errorf("Insert should have returned true on first insert")
+	}
+	if ok, _, _ := m.Insert(vc34); ok {
+		t.Errorf("Insert should have returned false on second insert")
+	}
+	if ok, _, _ := m.Insert(vc12); !ok {
+		t.Errorf("Insert should have returned true on first insert")
+	}
+	if ok, _, _ := m.Insert(vc45); !ok {
+		t.Errorf("Insert should have returned true on the first insert")
+	}
+	if g, w := m.List(), []*vc.VC{vc12, vc34, vc45}; !reflect.DeepEqual(g, w) {
+		t.Errorf("Did not get all VCs in expected order. Got %v, want %v", g, w)
+	}
+	m.Delete(vc34.VCI())
+	if g, w := m.List(), []*vc.VC{vc12, vc45}; !reflect.DeepEqual(g, w) {
+		t.Errorf("Did not get all VCs in expected order. Got %v, want %v", g, w)
+	}
+}
+
+func TestVCMapFreeze(t *testing.T) {
+	m := newVCMap()
+	vc1 := vc.InternalNew(vc.Params{VCI: 1})
+	vc2 := vc.InternalNew(vc.Params{VCI: 2})
+	if ok, _, _ := m.Insert(vc1); !ok {
+		t.Fatal("Should be able to insert the VC")
+	}
+	m.Freeze()
+	if ok, _, _ := m.Insert(vc2); ok {
+		t.Errorf("Should not be able to insert a VC after Freeze")
+	}
+	if vc, _, _ := m.Find(1); vc != vc1 {
+		t.Errorf("Got %v want %v", vc, vc1)
+	}
+	m.Delete(vc1.VCI())
+	if vc, _, _ := m.Find(1); vc != nil {
+		t.Errorf("Got %v want nil", vc)
+	}
+}
+
+func TestVCMapDelete(t *testing.T) {
+	m := newVCMap()
+
+	vc1 := vc.InternalNew(vc.Params{VCI: 1})
+	vc2 := vc.InternalNew(vc.Params{VCI: 2})
+
+	m.Insert(vc1)
+	if empty := m.Delete(vc1.VCI()); !empty {
+		t.Error("Want empty; got false")
+	}
+
+	m.Insert(vc1)
+	m.Insert(vc2)
+
+	m.Delete(vc1.VCI())
+	if empty := m.Delete(vc1.VCI()); empty {
+		t.Error("Want not empty; got true")
+	}
+	if empty := m.Delete(vc2.VCI()); !empty {
+		t.Error("Want empty; got false")
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/vif.go b/runtime/internal/rpc/stream/vif/vif.go
new file mode 100644
index 0000000..124ce98
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vif.go
@@ -0,0 +1,1092 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+// Logging guidelines:
+// vlog.VI(1) for per-net.Conn information
+// vlog.VI(2) for per-VC information
+// vlog.VI(3) for per-Flow information
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vtrace"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/lib/pcqueue"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/vif"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errShuttingDown             = reg(".errShuttingDown", "underlying network connection({3}) shutting down")
+	errVCHandshakeFailed        = reg(".errVCHandshakeFailed", "VC handshake failed{:3}")
+	errSendOnExpressQFailed     = reg(".errSendOnExpressQFailed", "vif.sendOnExpressQ(OpenVC) failed{:3}")
+	errVIFIsBeingClosed         = reg(".errVIFIsBeingClosed", "VIF is being closed")
+	errVIFAlreadyAcceptingFlows = reg(".errVIFAlreadyAcceptingFlows", "already accepting flows on VIF {3}")
+	errVCsNotAcceptedOnVIF      = reg(".errVCsNotAcceptedOnVIF", "VCs not accepted on VIF {3}")
+	errAcceptFailed             = reg(".errAcceptFailed", "Accept failed{:3}")
+	errRemoteEndClosedVC        = reg(".errRemoteEndClosedVC", "remote end closed VC{:3}")
+	errFlowsNoLongerAccepted    = reg(".errFlowsNowLongerAccepted", "Flows no longer being accepted")
+	errVCAcceptFailed           = reg(".errVCAcceptFailed", "VC accept failed{:3}")
+	errIdleTimeout              = reg(".errIdleTimeout", "idle timeout")
+	errVIFAlreadySetup          = reg(".errVIFAlreadySetupt", "VIF is already setup")
+	errBqueueWriterForXpress    = reg(".errBqueueWriterForXpress", "failed to create bqueue.Writer for express messages{:3}")
+	errBqueueWriterForControl   = reg(".errBqueueWriterForControl", "failed to create bqueue.Writer for flow control counters{:3}")
+	errBqueueWriterForStopping  = reg(".errBqueueWriterForStopping", "failed to create bqueue.Writer for stopping the write loop{:3}")
+	errWriteFailed              = reg(".errWriteFailed", "write failed: got ({3}, {4}) for {5} byte message)")
+)
+
+// VIF implements a "virtual interface" over an underlying network connection
+// (net.Conn). Just like multiple network connections can be established over a
+// single physical interface, multiple Virtual Circuits (VCs) can be
+// established over a single VIF.
+type VIF struct {
+	// All reads must be performed through reader, and not directly through conn.
+	conn    net.Conn
+	pool    *iobuf.Pool
+	reader  *iobuf.Reader
+	localEP naming.Endpoint
+
+	// ctrlCipher is normally guarded by writeMu, however see the exception in
+	// readLoop.
+	ctrlCipher crypto.ControlCipher
+	writeMu    sync.Mutex
+
+	muStartTimer sync.Mutex
+	startTimer   timer
+
+	vcMap              *vcMap
+	idleTimerMap       *idleTimerMap
+	wpending, rpending vsync.WaitGroup
+
+	muListen     sync.Mutex
+	acceptor     *upcqueue.T          // GUARDED_BY(muListen)
+	listenerOpts []stream.ListenerOpt // GUARDED_BY(muListen)
+	principal    security.Principal
+	blessings    security.Blessings
+
+	muNextVCI sync.Mutex
+	nextVCI   id.VC
+
+	outgoing bqueue.T
+	expressQ bqueue.Writer
+
+	flowQ        bqueue.Writer
+	flowMu       sync.Mutex
+	flowCounters message.Counters
+
+	stopQ bqueue.Writer
+
+	// The RPC version range supported by this VIF.  In practice this is
+	// non-nil only in testing.  nil is equivalent to using the versions
+	// actually supported by this RPC implementation (which is always
+	// what you want outside of tests).
+	versions *iversion.Range
+
+	isClosedMu sync.Mutex
+	isClosed   bool // GUARDED_BY(isClosedMu)
+	onClose    func(*VIF)
+
+	// These counters track the number of messages sent and received by
+	// this VIF.
+	muMsgCounters sync.Mutex
+	msgCounters   map[string]int64
+}
+
+// ConnectorAndFlow represents a Flow and the Connector that can be used to
+// create another Flow over the same underlying VC.
+type ConnectorAndFlow struct {
+	Connector stream.Connector
+	Flow      stream.Flow
+}
+
+// Separate out constants that are not exported so that godoc looks nicer for
+// the exported ones.
+const (
+	// Priorities of the buffered queues used for flow control of writes.
+	expressPriority bqueue.Priority = iota
+	controlPriority
+	// The range of flow priorities is [flowPriority, flowPriority + NumFlowPriorities)
+	flowPriority
+	stopPriority = flowPriority + vc.NumFlowPriorities
+)
+
+const (
+	// Convenience aliases so that the package name "vc" does not
+	// conflict with the variables named "vc".
+	defaultBytesBufferedPerFlow = vc.DefaultBytesBufferedPerFlow
+	sharedFlowID                = vc.SharedFlowID
+)
+
+// InternalNewDialedVIF creates a new virtual interface over the provided
+// network connection, under the assumption that the conn object was created
+// using net.Dial. If onClose is given, it is run in its own goroutine when
+// the vif has been closed.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNewDialedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, versions *iversion.Range, onClose func(*VIF), opts ...stream.VCOpt) (*VIF, error) {
+	ctx := getDialContext(opts)
+	if ctx != nil {
+		var span vtrace.Span
+		ctx, span = vtrace.WithNewSpan(ctx, "InternalNewDialedVIF")
+		span.Annotatef("(%v, %v)", conn.RemoteAddr().Network(), conn.RemoteAddr())
+		defer span.Finish()
+	}
+	pool := iobuf.NewPool(0)
+	reader := iobuf.NewReader(pool, conn)
+	params := security.CallParams{LocalPrincipal: principal, LocalEndpoint: localEP(conn, rid, versions)}
+
+	// TODO(ataly, ashankar, suharshs): Figure out what authorization policy to use
+	// for authenticating the server during VIF establishment. Note that we cannot
+	// use the VC.ServerAuthorizer available in 'opts' as that applies to the end
+	// server and not the remote endpoint of the VIF.
+	c, err := AuthenticateAsClient(conn, reader, versions, params, nil)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, ctx, err)
+	}
+	var blessings security.Blessings
+
+	if principal != nil {
+		blessings = principal.BlessingStore().Default()
+	}
+	var startTimeout time.Duration
+	for _, o := range opts {
+		switch v := o.(type) {
+		case vc.StartTimeout:
+			startTimeout = v.Duration
+		}
+	}
+	return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs), versions, principal, blessings, startTimeout, onClose, nil, nil, c)
+}
+
+// InternalNewAcceptedVIF creates a new virtual interface over the provided
+// network connection, under the assumption that the conn object was created
+// using an Accept call on a net.Listener object. If onClose is given, it is
+// run in its own goroutine when the vif has been closed.
+//
+// The returned VIF is also setup for accepting new VCs and Flows with the provided
+// ListenerOpts.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNewAcceptedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, blessings security.Blessings, versions *iversion.Range, onClose func(*VIF), lopts ...stream.ListenerOpt) (*VIF, error) {
+	pool := iobuf.NewPool(0)
+	reader := iobuf.NewReader(pool, conn)
+
+	dischargeClient := getDischargeClient(lopts)
+
+	c, err := AuthenticateAsServer(conn, reader, versions, principal, blessings, dischargeClient)
+	if err != nil {
+		return nil, err
+	}
+
+	var startTimeout time.Duration
+	for _, o := range lopts {
+		switch v := o.(type) {
+		case vc.StartTimeout:
+			startTimeout = v.Duration
+		}
+	}
+	return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs)+1, versions, principal, blessings, startTimeout, onClose, upcqueue.New(), lopts, c)
+}
+
+func internalNew(conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, rid naming.RoutingID, initialVCI id.VC, versions *iversion.Range, principal security.Principal, blessings security.Blessings, startTimeout time.Duration, onClose func(*VIF), acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher) (*VIF, error) {
+	var (
+		// Choose IDs that will not conflict with any other (VC, Flow)
+		// pairs.  VCI 0 is never used by the application (it is
+		// reserved for control messages), so steal from the Flow space
+		// there.
+		expressID bqueue.ID = packIDs(0, 0)
+		flowID    bqueue.ID = packIDs(0, 1)
+		stopID    bqueue.ID = packIDs(0, 2)
+	)
+	outgoing := drrqueue.New(vc.MaxPayloadSizeBytes)
+
+	expressQ, err := outgoing.NewWriter(expressID, expressPriority, defaultBytesBufferedPerFlow)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForXpress, nil, err))
+	}
+	expressQ.Release(-1) // Disable flow control
+
+	flowQ, err := outgoing.NewWriter(flowID, controlPriority, flowToken.Size())
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForControl, nil, err))
+	}
+	flowQ.Release(-1) // Disable flow control
+
+	stopQ, err := outgoing.NewWriter(stopID, stopPriority, 1)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForStopping, nil, err))
+	}
+	stopQ.Release(-1) // Disable flow control
+
+	if versions == nil {
+		versions = iversion.SupportedRange
+	}
+
+	vif := &VIF{
+		conn:         conn,
+		pool:         pool,
+		reader:       reader,
+		ctrlCipher:   c,
+		vcMap:        newVCMap(),
+		acceptor:     acceptor,
+		listenerOpts: listenerOpts,
+		principal:    principal,
+		localEP:      localEP(conn, rid, versions),
+		nextVCI:      initialVCI,
+		outgoing:     outgoing,
+		expressQ:     expressQ,
+		flowQ:        flowQ,
+		flowCounters: message.NewCounters(),
+		stopQ:        stopQ,
+		versions:     versions,
+		onClose:      onClose,
+		msgCounters:  make(map[string]int64),
+		blessings:    blessings,
+	}
+	if startTimeout > 0 {
+		vif.startTimer = newTimer(startTimeout, vif.Close)
+	}
+	vif.idleTimerMap = newIdleTimerMap(func(vci id.VC) {
+		vc, _, _ := vif.vcMap.Find(vci)
+		if vc != nil {
+			vif.closeVCAndSendMsg(vc, false, verror.New(errIdleTimeout, nil))
+		}
+	})
+	go vif.readLoop()
+	go vif.writeLoop()
+	return vif, nil
+}
+
+// Dial creates a new VC to the provided remote identity, authenticating the VC
+// with the provided local identity.
+func (vif *VIF) Dial(remoteEP naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (stream.VC, error) {
+	var idleTimeout time.Duration
+	for _, o := range opts {
+		switch v := o.(type) {
+		case vc.IdleTimeout:
+			idleTimeout = v.Duration
+		}
+	}
+	vc, err := vif.newVC(vif.allocVCI(), vif.localEP, remoteEP, idleTimeout, true)
+	if err != nil {
+		return nil, err
+	}
+	counters := message.NewCounters()
+	counters.Add(vc.VCI(), sharedFlowID, defaultBytesBufferedPerFlow)
+
+	sendPublicKey := func(pubKey *crypto.BoxKey) error {
+		var options []message.SetupOption
+		if pubKey != nil {
+			options = []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}}
+		}
+		err := vif.sendOnExpressQ(&message.SetupVC{
+			VCI:            vc.VCI(),
+			RemoteEndpoint: remoteEP,
+			LocalEndpoint:  vif.localEP,
+			Counters:       counters,
+			Setup: message.Setup{
+				Versions: *vif.versions,
+				Options:  options,
+			},
+		})
+		if err != nil {
+			err = verror.New(stream.ErrNetwork, nil,
+				verror.New(errSendOnExpressQFailed, nil, err))
+		}
+		return err
+	}
+	if err = vc.HandshakeDialedVC(principal, sendPublicKey, opts...); err != nil {
+		vif.deleteVC(vc.VCI())
+		vc.Close(err)
+		return nil, err
+	}
+	return vc, nil
+}
+
+// Close closes all VCs (and thereby Flows) over the VIF and then closes the
+// underlying network connection after draining all pending writes on those
+// VCs.
+func (vif *VIF) Close() {
+	vif.isClosedMu.Lock()
+	if vif.isClosed {
+		vif.isClosedMu.Unlock()
+		return
+	}
+	vif.isClosed = true
+	vif.isClosedMu.Unlock()
+
+	vlog.VI(1).Infof("Closing VIF %s", vif)
+	// Stop accepting new VCs.
+	vif.StopAccepting()
+	// Close local datastructures for all existing VCs.
+	vcs := vif.vcMap.Freeze()
+	// Stop the idle timers.
+	vif.idleTimerMap.Stop()
+	for _, vc := range vcs {
+		vc.VC.Close(verror.New(stream.ErrNetwork, nil, verror.New(errVIFIsBeingClosed, nil)))
+	}
+	// Wait for the vcWriteLoops to exit (after draining queued up messages).
+	vif.stopQ.Close()
+	vif.wpending.Wait()
+	// Close the underlying network connection.
+	// No need to send individual messages to close all pending VCs since
+	// the remote end should know to close all VCs when the VIF's
+	// connection breaks.
+	if err := vif.conn.Close(); err != nil {
+		vlog.VI(1).Infof("net.Conn.Close failed on VIF %s: %v", vif, err)
+	}
+	// Notify that the VIF has been closed.
+	if vif.onClose != nil {
+		go vif.onClose(vif)
+	}
+}
+
+// StartAccepting begins accepting Flows (and VCs) initiated by the remote end
+// of a VIF. opts is used to setup the listener on newly established VCs.
+func (vif *VIF) StartAccepting(opts ...stream.ListenerOpt) error {
+	vif.muListen.Lock()
+	defer vif.muListen.Unlock()
+	if vif.acceptor != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVIFIsBeingClosed, nil, vif))
+	}
+	vif.acceptor = upcqueue.New()
+	vif.listenerOpts = opts
+	return nil
+}
+
+// StopAccepting prevents any Flows initiated by the remote end of a VIF from
+// being accepted and causes any existing and future calls to Accept to fail
+// immediately.
+func (vif *VIF) StopAccepting() {
+	vif.muListen.Lock()
+	defer vif.muListen.Unlock()
+	if vif.acceptor != nil {
+		vif.acceptor.Shutdown()
+		vif.acceptor = nil
+		vif.listenerOpts = nil
+	}
+}
+
+// Accept returns the (stream.Connector, stream.Flow) pair of a newly
+// established VC and/or Flow.
+//
+// Sample usage:
+//	for {
+//		cAndf, err := vif.Accept()
+//		switch {
+//		case err != nil:
+//			fmt.Println("Accept error:", err)
+//			return
+//		case cAndf.Flow == nil:
+//			fmt.Println("New VC established:", cAndf.Connector)
+//		default:
+//			fmt.Println("New flow established")
+//			go handleFlow(cAndf.Flow)
+//		}
+//	}
+func (vif *VIF) Accept() (ConnectorAndFlow, error) {
+	vif.muListen.Lock()
+	acceptor := vif.acceptor
+	vif.muListen.Unlock()
+	if acceptor == nil {
+		return ConnectorAndFlow{}, verror.New(stream.ErrNetwork, nil, verror.New(errVCsNotAcceptedOnVIF, nil, vif))
+	}
+	item, err := acceptor.Get(nil)
+	if err != nil {
+		return ConnectorAndFlow{}, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
+	}
+	return item.(ConnectorAndFlow), nil
+}
+
+func (vif *VIF) String() string {
+	l := vif.conn.LocalAddr()
+	r := vif.conn.RemoteAddr()
+	return fmt.Sprintf("(%s, %s) <-> (%s, %s)", l.Network(), l, r.Network(), r)
+}
+
+func (vif *VIF) readLoop() {
+	defer vif.Close()
+	defer vif.stopVCDispatchLoops()
+	for {
+		// vif.ctrlCipher is guarded by vif.writeMu.  However, the only mutation
+		// to it is in handleMessage, which runs in the same goroutine, so a
+		// lock is not required here.
+		msg, err := message.ReadFrom(vif.reader, vif.ctrlCipher)
+		if err != nil {
+			vlog.VI(1).Infof("Exiting readLoop of VIF %s because of read error: %v", vif, err)
+			return
+		}
+		vlog.VI(3).Infof("Received %T = [%v] on VIF %s", msg, msg, vif)
+		if err := vif.handleMessage(msg); err != nil {
+			vlog.VI(1).Infof("Exiting readLoop of VIF %s because of message error: %v", vif, err)
+			return
+		}
+	}
+}
+
+// handleMessage handles a single incoming message.  Any error returned is
+// fatal, causing the VIF to close.
+func (vif *VIF) handleMessage(msg message.T) error {
+	vif.muMsgCounters.Lock()
+	vif.msgCounters[fmt.Sprintf("Recv(%T)", msg)]++
+	vif.muMsgCounters.Unlock()
+
+	switch m := msg.(type) {
+
+	case *message.Data:
+		_, rq, _ := vif.vcMap.Find(m.VCI)
+		if rq == nil {
+			vlog.VI(2).Infof("Ignoring message of %d bytes for unrecognized VCI %d on VIF %s", m.Payload.Size(), m.VCI, vif)
+			m.Release()
+			return nil
+		}
+		if err := rq.Put(m, nil); err != nil {
+			vlog.VI(2).Infof("Failed to put message(%v) on VC queue on VIF %v: %v", m, vif, err)
+			m.Release()
+		}
+
+	case *message.SetupVC:
+		// First, find the public key we need out of the message.
+		var theirPK *crypto.BoxKey
+		box := m.Setup.NaclBox()
+		if box != nil {
+			theirPK = &box.PublicKey
+		}
+
+		// If we dialed this VC, then this is a response and we should finish
+		// the vc handshake.  Otherwise, this message is opening a new VC.
+		if vif.dialedVCI(m.VCI) {
+			vif.distributeCounters(m.Counters)
+			if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+				intersection, err := vif.versions.Intersect(&m.Setup.Versions)
+				if err != nil {
+					vif.closeVCAndSendMsg(vc, false, err)
+				} else if err := vc.FinishHandshakeDialedVC(intersection.Max, theirPK); err != nil {
+					vif.closeVCAndSendMsg(vc, false, err)
+				}
+				return nil
+			}
+			vlog.VI(2).Infof("Ignoring SetupVC message %+v for unknown dialed VC", m)
+			return nil
+		}
+
+		// This is an accepted VC.
+		intersection, err := vif.versions.Intersect(&m.Setup.Versions)
+		if err != nil {
+			vlog.VI(2).Infof("SetupVC message %+v to VIF %s did not present compatible versions: %v", m, vif, err)
+			vif.sendOnExpressQ(&message.CloseVC{
+				VCI:   m.VCI,
+				Error: err.Error(),
+			})
+			return nil
+		}
+		vif.muListen.Lock()
+		closed := vif.acceptor == nil || vif.acceptor.IsClosed()
+		lopts := vif.listenerOpts
+		vif.muListen.Unlock()
+		if closed {
+			vlog.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not accept VCs", m, vif)
+			vif.sendOnExpressQ(&message.CloseVC{
+				VCI:   m.VCI,
+				Error: "VCs not accepted",
+			})
+			return nil
+		}
+		var idleTimeout time.Duration
+		for _, o := range lopts {
+			switch v := o.(type) {
+			case vc.IdleTimeout:
+				idleTimeout = v.Duration
+			}
+		}
+		vc, err := vif.newVC(m.VCI, m.RemoteEndpoint, m.LocalEndpoint, idleTimeout, false)
+		if err != nil {
+			vif.sendOnExpressQ(&message.CloseVC{
+				VCI:   m.VCI,
+				Error: err.Error(),
+			})
+			return nil
+		}
+		vif.distributeCounters(m.Counters)
+		keyExchanger := func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+			var options []message.SetupOption
+			if pubKey != nil {
+				options = []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}}
+			}
+			err = vif.sendOnExpressQ(&message.SetupVC{
+				VCI: m.VCI,
+				Setup: message.Setup{
+					// Note that servers send clients not their actual supported versions,
+					// but the intersected range of the server and client ranges.  This
+					// is important because proxies may have adjusted the version ranges
+					// along the way, and we should negotiate a version that is compatible
+					// with all intermediate hops.
+					Versions: *intersection,
+					Options:  options,
+				},
+				RemoteEndpoint: m.LocalEndpoint,
+				LocalEndpoint:  vif.localEP,
+				// TODO(mattr): Consider adding counters.  See associated comment
+				// in vc.go:VC.HandshakeAcceptedVC for more details.
+			})
+			return theirPK, err
+		}
+		go vif.acceptFlowsLoop(vc, vc.HandshakeAcceptedVC(intersection.Max, vif.principal, vif.blessings, keyExchanger, lopts...))
+
+	case *message.CloseVC:
+		if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+			vif.deleteVC(vc.VCI())
+			vlog.VI(2).Infof("CloseVC(%+v) on VIF %s", m, vif)
+			// TODO(cnicolaou): it would be nice to have a method on VC
+			// to indicate a 'remote close' rather than a 'local one'. This helps
+			// with error reporting since we expect reads/writes to occur
+			// after a remote close, but not after a local close.
+			vc.Close(verror.New(stream.ErrNetwork, nil, verror.New(errRemoteEndClosedVC, nil, m.Error)))
+			return nil
+		}
+		vlog.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
+
+	case *message.AddReceiveBuffers:
+		vif.distributeCounters(m.Counters)
+
+	case *message.OpenFlow:
+		if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+			if err := vc.AcceptFlow(m.Flow); err != nil {
+				vlog.VI(3).Infof("OpenFlow %+v on VIF %v failed:%v", m, vif, err)
+				cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
+				cm.SetClose()
+				vif.sendOnExpressQ(cm)
+				return nil
+			}
+			vc.ReleaseCounters(m.Flow, m.InitialCounters)
+			return nil
+		}
+		vlog.VI(2).Infof("Ignoring OpenFlow(%+v) for unrecognized VCI on VIF %s", m, m, vif)
+
+	case *message.Setup:
+		vlog.Infof("Ignoring redundant Setup message %T on VIF %s", m, vif)
+
+	default:
+		vlog.Infof("Ignoring unrecognized message %T on VIF %s", m, vif)
+	}
+	return nil
+}
+
+func (vif *VIF) vcDispatchLoop(vc *vc.VC, messages *pcqueue.T) {
+	defer vlog.VI(2).Infof("Exiting vcDispatchLoop(%v) on VIF %v", vc, vif)
+	defer vif.rpending.Done()
+	for {
+		qm, err := messages.Get(nil)
+		if err != nil {
+			return
+		}
+		m := qm.(*message.Data)
+		if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
+			vlog.VI(2).Infof("Ignoring data message %v for on VIF %s: %v", m, vif, err)
+		}
+		if m.Close() {
+			vif.shutdownFlow(vc, m.Flow)
+		}
+	}
+}
+
+func (vif *VIF) stopVCDispatchLoops() {
+	vcs := vif.vcMap.Freeze()
+	for _, v := range vcs {
+		v.RQ.Close()
+	}
+	vif.rpending.Wait()
+}
+
+func clientVCClosed(err error) bool {
+	// If we've encountered a networking error, then all likelihood the
+	// connection to the client is closed.
+	return verror.ErrorID(err) == stream.ErrNetwork.ID
+}
+
+func (vif *VIF) acceptFlowsLoop(vc *vc.VC, c <-chan vc.HandshakeResult) {
+	hr := <-c
+	if hr.Error != nil {
+		vif.closeVCAndSendMsg(vc, clientVCClosed(hr.Error), hr.Error)
+		return
+	}
+
+	vif.muListen.Lock()
+	acceptor := vif.acceptor
+	vif.muListen.Unlock()
+	if acceptor == nil {
+		vif.closeVCAndSendMsg(vc, false, verror.New(errFlowsNoLongerAccepted, nil))
+		return
+	}
+
+	// Notify any listeners that a new VC has been established
+	if err := acceptor.Put(ConnectorAndFlow{vc, nil}); err != nil {
+		vif.closeVCAndSendMsg(vc, clientVCClosed(err), verror.New(errVCAcceptFailed, nil, err))
+		return
+	}
+
+	vlog.VI(2).Infof("Running acceptFlowsLoop for VC %v on VIF %v", vc, vif)
+	for {
+		f, err := hr.Listener.Accept()
+		if err != nil {
+			vlog.VI(2).Infof("Accept failed on VC %v on VIF %v: %v", vc, vif, err)
+			return
+		}
+		if err := acceptor.Put(ConnectorAndFlow{vc, f}); err != nil {
+			vlog.VI(2).Infof("vif.acceptor.Put(%v, %T) on VIF %v failed: %v", vc, f, vif, err)
+			f.Close()
+			return
+		}
+	}
+}
+
+func (vif *VIF) distributeCounters(counters message.Counters) {
+	for cid, bytes := range counters {
+		vc, _, _ := vif.vcMap.Find(cid.VCI())
+		if vc == nil {
+			vlog.VI(2).Infof("Ignoring counters for non-existent VCI %d on VIF %s", cid.VCI(), vif)
+			continue
+		}
+		vc.ReleaseCounters(cid.Flow(), bytes)
+	}
+}
+
+func (vif *VIF) writeLoop() {
+	defer vif.outgoing.Close()
+	defer vif.stopVCWriteLoops()
+	for {
+		writer, bufs, err := vif.outgoing.Get(nil)
+		if err != nil {
+			vlog.VI(1).Infof("Exiting writeLoop of VIF %s because of bqueue.Get error: %v", vif, err)
+			return
+		}
+		vif.muMsgCounters.Lock()
+		vif.msgCounters[fmt.Sprintf("Send(%T)", writer)]++
+		vif.muMsgCounters.Unlock()
+		switch writer {
+		case vif.expressQ:
+			for _, b := range bufs {
+				if err := vif.writeSerializedMessage(b.Contents); err != nil {
+					vlog.VI(1).Infof("Exiting writeLoop of VIF %s because Control message write failed: %s", vif, err)
+					releaseBufs(bufs)
+					return
+				}
+				b.Release()
+			}
+		case vif.flowQ:
+			msg := &message.AddReceiveBuffers{}
+			// No need to call releaseBufs(bufs) as all bufs are
+			// the exact same value: flowToken.
+			vif.flowMu.Lock()
+			if len(vif.flowCounters) > 0 {
+				msg.Counters = vif.flowCounters
+				vif.flowCounters = message.NewCounters()
+			}
+			vif.flowMu.Unlock()
+			if len(msg.Counters) > 0 {
+				vlog.VI(3).Infof("Sending counters %v on VIF %s", msg.Counters, vif)
+				if err := vif.writeMessage(msg); err != nil {
+					vlog.VI(1).Infof("Exiting writeLoop of VIF %s because AddReceiveBuffers message write failed: %v", vif, err)
+					return
+				}
+			}
+		case vif.stopQ:
+			// Lowest-priority queue which will never have any
+			// buffers, Close is the only method called on it.
+			return
+		default:
+			vif.writeDataMessages(writer, bufs)
+		}
+	}
+}
+
+func (vif *VIF) vcWriteLoop(vc *vc.VC, messages *pcqueue.T) {
+	defer vlog.VI(2).Infof("Exiting vcWriteLoop(%v) on VIF %v", vc, vif)
+	defer vif.wpending.Done()
+	for {
+		qm, err := messages.Get(nil)
+		if err != nil {
+			return
+		}
+		m := qm.(*message.Data)
+		m.Payload, err = vc.Encrypt(m.Flow, m.Payload)
+		if err != nil {
+			vlog.Infof("Encryption failed. Flow:%v VC:%v Error:%v", m.Flow, vc, err)
+		}
+		if m.Close() {
+			// The last bytes written on the flow will be sent out
+			// on vif.conn. Local datastructures for the flow can
+			// be cleaned up now.
+			vif.shutdownFlow(vc, m.Flow)
+		}
+		if err == nil {
+			err = vif.writeMessage(m)
+		}
+		if err != nil {
+			// TODO(caprita): Calling closeVCAndSendMsg below causes
+			// a race as described in:
+			// https://docs.google.com/a/google.com/document/d/1C0kxfYhuOcStdV7tnLZELZpUhfQCZj47B0JrzbE29h8/edit
+			//
+			// There should be a finer grained way to fix this, and
+			// there are likely other instances where we should not
+			// be closing the VC.
+			//
+			// For now, commenting out the line below removes the
+			// flakiness from our existing unit tests, but this
+			// needs to be revisited and fixed correctly.
+			//
+			//   vif.closeVCAndSendMsg(vc, fmt.Sprintf("write failure: %v", err))
+
+			// Drain the queue and exit.
+			for {
+				qm, err := messages.Get(nil)
+				if err != nil {
+					return
+				}
+				qm.(*message.Data).Release()
+			}
+		}
+	}
+}
+
+func (vif *VIF) stopVCWriteLoops() {
+	vcs := vif.vcMap.Freeze()
+	vif.idleTimerMap.Stop()
+	for _, v := range vcs {
+		v.WQ.Close()
+	}
+}
+
+// sendOnExpressQ adds 'msg' to the expressQ (highest priority queue) of messages to write on the wire.
+func (vif *VIF) sendOnExpressQ(msg message.T) error {
+	vlog.VI(2).Infof("sendOnExpressQ(%T = %+v) on VIF %s", msg, msg, vif)
+	var buf bytes.Buffer
+	// Don't encrypt yet, because the message ordering isn't yet determined.
+	// Encryption is performed by vif.writeSerializedMessage() when the
+	// message is actually written to vif.conn.
+	vif.writeMu.Lock()
+	c := vif.ctrlCipher
+	vif.writeMu.Unlock()
+	if err := message.WriteTo(&buf, msg, crypto.NewDisabledControlCipher(c)); err != nil {
+		return err
+	}
+	return vif.expressQ.Put(iobuf.NewSlice(buf.Bytes()), nil)
+}
+
+// writeMessage writes the message to the channel.  Writes must be serialized so
+// that the control channel can be encrypted, so we acquire the writeMu.
+func (vif *VIF) writeMessage(msg message.T) error {
+	vif.writeMu.Lock()
+	defer vif.writeMu.Unlock()
+	return message.WriteTo(vif.conn, msg, vif.ctrlCipher)
+}
+
+// Write writes the message to the channel, encrypting the control data.  Writes
+// must be serialized so that the control channel can be encrypted, so we
+// acquire the writeMu.
+func (vif *VIF) writeSerializedMessage(msg []byte) error {
+	vif.writeMu.Lock()
+	defer vif.writeMu.Unlock()
+	if err := message.EncryptMessage(msg, vif.ctrlCipher); err != nil {
+		return err
+	}
+	if n, err := vif.conn.Write(msg); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errWriteFailed, nil, n, err, len(msg)))
+	}
+	return nil
+}
+
+func (vif *VIF) writeDataMessages(writer bqueue.Writer, bufs []*iobuf.Slice) {
+	vci, fid := unpackIDs(writer.ID())
+	// iobuf.Coalesce will coalesce buffers only if they are adjacent to
+	// each other.  In the worst case, each buf will be non-adjacent to the
+	// others and the code below will end up with multiple small writes
+	// instead of a single big one.
+	// Might want to investigate this and see if this needs to be
+	// revisited.
+	bufs = iobuf.Coalesce(bufs, uint(vc.MaxPayloadSizeBytes))
+	_, _, wq := vif.vcMap.Find(vci)
+	if wq == nil {
+		// VC has been removed, stop sending messages
+		vlog.VI(2).Infof("VCI %d on VIF %s was shutdown, dropping %d messages that were pending a write", vci, vif, len(bufs))
+		releaseBufs(bufs)
+		return
+	}
+	last := len(bufs) - 1
+	drained := writer.IsDrained()
+	for i, b := range bufs {
+		d := &message.Data{VCI: vci, Flow: fid, Payload: b}
+		if drained && i == last {
+			d.SetClose()
+		}
+		if err := wq.Put(d, nil); err != nil {
+			releaseBufs(bufs[i:])
+			return
+		}
+	}
+	if len(bufs) == 0 && drained {
+		d := &message.Data{VCI: vci, Flow: fid}
+		d.SetClose()
+		if err := wq.Put(d, nil); err != nil {
+			d.Release()
+		}
+	}
+}
+
+func (vif *VIF) dialedVCI(VCI id.VC) bool {
+	return vif.nextVCI%2 == VCI%2
+}
+
+func (vif *VIF) allocVCI() id.VC {
+	vif.muNextVCI.Lock()
+	ret := vif.nextVCI
+	vif.nextVCI += 2
+	vif.muNextVCI.Unlock()
+	return ret
+}
+
+func (vif *VIF) newVC(vci id.VC, localEP, remoteEP naming.Endpoint, idleTimeout time.Duration, dialed bool) (*vc.VC, error) {
+	vif.muStartTimer.Lock()
+	if vif.startTimer != nil {
+		vif.startTimer.Stop()
+		vif.startTimer = nil
+	}
+	vif.muStartTimer.Unlock()
+	macSize := vif.ctrlCipher.MACSize()
+	vc := vc.InternalNew(vc.Params{
+		VCI:          vci,
+		Dialed:       dialed,
+		LocalEP:      localEP,
+		RemoteEP:     remoteEP,
+		Pool:         vif.pool,
+		ReserveBytes: uint(message.HeaderSizeBytes + macSize),
+		Helper:       vcHelper{vif},
+	})
+	added, rq, wq := vif.vcMap.Insert(vc)
+	if added {
+		vif.idleTimerMap.Insert(vc.VCI(), idleTimeout)
+	}
+	// Start vcWriteLoop
+	if added = added && vif.wpending.TryAdd(); added {
+		go vif.vcWriteLoop(vc, wq)
+	}
+	// Start vcDispatchLoop
+	if added = added && vif.rpending.TryAdd(); added {
+		go vif.vcDispatchLoop(vc, rq)
+	}
+	if !added {
+		if rq != nil {
+			rq.Close()
+		}
+		if wq != nil {
+			wq.Close()
+		}
+		vc.Close(verror.New(stream.ErrAborted, nil, verror.New(errShuttingDown, nil, vif)))
+		vif.deleteVC(vci)
+		return nil, verror.New(stream.ErrAborted, nil, verror.New(errShuttingDown, nil, vif))
+	}
+	return vc, nil
+}
+
+func (vif *VIF) deleteVC(vci id.VC) {
+	vif.idleTimerMap.Delete(vci)
+	if vif.vcMap.Delete(vci) {
+		vif.Close()
+	}
+}
+
+func (vif *VIF) closeVCAndSendMsg(vc *vc.VC, clientVCClosed bool, errMsg error) {
+	vlog.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, errMsg)
+	vif.deleteVC(vc.VCI())
+	vc.Close(errMsg)
+	if clientVCClosed {
+		// No point in sending to the client if the VC is closed, or otherwise broken.
+		return
+	}
+	msg := ""
+	if errMsg != nil {
+		msg = errMsg.Error()
+	}
+	if err := vif.sendOnExpressQ(&message.CloseVC{
+		VCI:   vc.VCI(),
+		Error: msg,
+	}); err != nil {
+		vlog.VI(2).Infof("sendOnExpressQ(CloseVC{VCI:%d,...}) on VIF %v failed: %v", vc.VCI(), vif, err)
+	}
+}
+
+// shutdownFlow clears out all the datastructures associated with fid.
+func (vif *VIF) shutdownFlow(vc *vc.VC, fid id.Flow) {
+	vc.ShutdownFlow(fid)
+	vif.flowMu.Lock()
+	delete(vif.flowCounters, message.MakeCounterID(vc.VCI(), fid))
+	vif.flowMu.Unlock()
+	vif.idleTimerMap.DeleteFlow(vc.VCI(), fid)
+}
+
+// ShutdownVCs closes all VCs established to the provided remote endpoint.
+// Returns the number of VCs that were closed.
+func (vif *VIF) ShutdownVCs(remote naming.Endpoint) int {
+	vcs := vif.vcMap.List()
+	n := 0
+	for _, vc := range vcs {
+		if naming.Compare(vc.RemoteEndpoint().RoutingID(), remote.RoutingID()) {
+			vlog.VI(1).Infof("VCI %d on VIF %s being closed because of ShutdownVCs call", vc.VCI(), vif)
+			vif.closeVCAndSendMsg(vc, false, nil)
+			n++
+		}
+	}
+	return n
+}
+
+// NumVCs returns the number of VCs established over this VIF.
+func (vif *VIF) NumVCs() int { return vif.vcMap.Size() }
+
+// DebugString returns a descriptive state of the VIF.
+//
+// The returned string is meant for consumptions by humans. The specific format
+// should not be relied upon by any automated processing.
+func (vif *VIF) DebugString() string {
+	vcs := vif.vcMap.List()
+	l := make([]string, 0, len(vcs)+1)
+
+	vif.muNextVCI.Lock() // Needed for vif.nextVCI
+	l = append(l, fmt.Sprintf("VIF:[%s] -- #VCs:%d NextVCI:%d ControlChannelEncryption:%v IsClosed:%v", vif, len(vcs), vif.nextVCI, vif.ctrlCipher != nullCipher, vif.isClosed))
+	vif.muNextVCI.Unlock()
+
+	for _, vc := range vcs {
+		l = append(l, vc.DebugString())
+	}
+
+	l = append(l, "Message Counters:")
+	ctrs := len(l)
+	vif.muMsgCounters.Lock()
+	for k, v := range vif.msgCounters {
+		l = append(l, fmt.Sprintf(" %-32s %10d", k, v))
+	}
+	vif.muMsgCounters.Unlock()
+	sort.Strings(l[ctrs:])
+	return strings.Join(l, "\n")
+}
+
+// Methods and type that implement vc.Helper
+//
+// We create a separate type for vc.Helper to hide the vc.Helper methods
+// from the exported method set of VIF.
+type vcHelper struct{ vif *VIF }
+
+func (h vcHelper) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+	h.vif.sendOnExpressQ(&message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)})
+}
+
+func (h vcHelper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+	if bytes == 0 {
+		return
+	}
+	h.vif.flowMu.Lock()
+	h.vif.flowCounters.Add(vci, fid, uint32(bytes))
+	h.vif.flowMu.Unlock()
+	h.vif.flowQ.TryPut(flowToken)
+}
+
+func (h vcHelper) NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error) {
+	h.vif.idleTimerMap.InsertFlow(vci, fid)
+	return h.vif.outgoing.NewWriter(packIDs(vci, fid), flowPriority+priority, defaultBytesBufferedPerFlow)
+}
+
+// The token added to vif.flowQ.
+var flowToken *iobuf.Slice
+
+func init() {
+	// flowToken must be non-empty otherwise bqueue.Writer.Put will ignore it.
+	flowToken = iobuf.NewSlice(make([]byte, 1))
+}
+
+func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
+	return bqueue.ID(message.MakeCounterID(vci, fid))
+}
+
+func unpackIDs(b bqueue.ID) (id.VC, id.Flow) {
+	cid := message.CounterID(b)
+	return cid.VCI(), cid.Flow()
+}
+
+func releaseBufs(bufs []*iobuf.Slice) {
+	for _, b := range bufs {
+		b.Release()
+	}
+}
+
+// localEP creates a naming.Endpoint from the provided parameters.
+//
+// It intentionally does not include any blessings (present in endpoints in the
+// v5 format). At this point it is not clear whether the endpoint is being
+// created for a "client" or a "server". If the endpoint is used for clients
+// (i.e., for those sending an OpenVC message for example), then we do NOT want
+// to include the blessings in the endpoint to ensure client privacy.
+//
+// Servers should be happy to let anyone with access to their endpoint string
+// know their blessings, because they are willing to share those with anyone
+// that connects to them.
+//
+// The addition of the endpoints is left as an excercise to higher layers of
+// the stack, where the desire to share or hide blessings from the endpoint is
+// clearer.
+func localEP(conn net.Conn, rid naming.RoutingID, versions *iversion.Range) naming.Endpoint {
+	localAddr := conn.LocalAddr()
+	ep := &inaming.Endpoint{
+		Protocol: localAddr.Network(),
+		Address:  localAddr.String(),
+		RID:      rid,
+	}
+	return ep
+}
+
+// getDialContext returns the DialContext for this call.
+func getDialContext(vopts []stream.VCOpt) *context.T {
+	for _, o := range vopts {
+		switch v := o.(type) {
+		case vc.DialContext:
+			return v.T
+		}
+	}
+	return nil
+}
diff --git a/runtime/internal/rpc/stream/vif/vif_test.go b/runtime/internal/rpc/stream/vif/vif_test.go
new file mode 100644
index 0000000..fd820ff
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vif_test.go
@@ -0,0 +1,925 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests in a separate package to ensure that only the exported API is used in the tests.
+//
+// All tests are run with the default security level on VCs (SecurityConfidential).
+
+package vif_test
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"runtime"
+	"sort"
+	"sync"
+	"testing"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc/version"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+	"v.io/x/ref/test/testutil"
+)
+
+//go:generate v23 test generate
+
+func TestSingleFlowCreatedAtClient(t *testing.T) {
+	client, server := NewClientServer()
+	defer client.Close()
+
+	clientVC, _, err := createVC(client, server, makeEP(0x5))
+	if err != nil {
+		t.Fatal(err)
+	}
+	writer, err := clientVC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Test with an empty message to ensure that we correctly
+	// handle closing empty flows.
+	rwSingleFlow(t, writer, acceptFlowAtServer(server), "")
+	writer, err = clientVC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	rwSingleFlow(t, writer, acceptFlowAtServer(server), "the dark knight")
+}
+
+func TestSingleFlowCreatedAtServer(t *testing.T) {
+	client, server := NewClientServer()
+	defer client.Close()
+
+	clientVC, serverConnector, err := createVC(client, server, makeEP(0x5))
+	if err != nil {
+		t.Fatal(err)
+	}
+	ln, err := clientVC.Listen()
+	if err != nil {
+		t.Fatal(err)
+	}
+	writer, err := serverConnector.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	reader, err := ln.Accept()
+	if err != nil {
+		t.Fatal(err)
+	}
+	rwSingleFlow(t, writer, reader, "the dark knight")
+	ln.Close()
+}
+
+func testMultipleVCsAndMultipleFlows(t *testing.T, gomaxprocs int) {
+	// This test dials multiple VCs from the client to the server.
+	// On each VC, it creates multiple flows, writes to them and verifies
+	// that the other process received what was written.
+
+	// Knobs configuring this test
+	//
+	// In case the test breaks, the knobs can be tuned down to isolate the problem.
+	// In normal circumstances, the knows should be tuned up to stress test the code.
+	const (
+		nVCs                  = 6 // Number of VCs created by the client process Dialing.
+		nFlowsFromClientPerVC = 3 // Number of flows initiated by the client process, per VC
+		nFlowsFromServerPerVC = 4 // Number of flows initiated by the server process, per VC
+
+		// Maximum number of bytes to write and read per flow.
+		// The actual size is selected randomly.
+		maxBytesPerFlow = 512 << 10 // 512KB
+	)
+
+	mp := runtime.GOMAXPROCS(gomaxprocs)
+	defer runtime.GOMAXPROCS(mp)
+	client, server := NewClientServer()
+	defer client.Close()
+
+	// Create all the VCs
+	// clientVCs[i] is the VC at the client process
+	// serverConnectors[i] is the corresponding VC at the server process.
+	clientVCs, serverConnectors, err := createNVCs(client, server, 0, nVCs)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create listeners for flows on the client VCs.
+	// Flows are implicitly being listened to at the server (available through server.Accept())
+	clientLNs, err := createListeners(clientVCs)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create flows:
+	// Over each VC, create nFlowsFromClientPerVC initiated by the client
+	// and nFlowsFromServerPerVC initiated by the server.
+	nFlows := nVCs * (nFlowsFromClientPerVC + nFlowsFromServerPerVC)
+
+	// Fill in random strings that will be written over the Flows.
+	dataWritten := make([]string, nFlows)
+	for i := 0; i < nFlows; i++ {
+		dataWritten[i] = string(testutil.RandomBytes(maxBytesPerFlow))
+	}
+
+	// write writes data to flow in randomly sized chunks.
+	write := func(flow stream.Flow, data string) {
+		defer flow.Close()
+		buf := []byte(data)
+		// Split into a random number of Write calls.
+		for len(buf) > 0 {
+			size := 1 + testutil.Intn(len(buf)) // Random number in [1, len(buf)]
+			n, err := flow.Write(buf[:size])
+			if err != nil {
+				t.Errorf("Write failed: (%d, %v)", n, err)
+				return
+			}
+			buf = buf[size:]
+		}
+	}
+
+	dataReadChan := make(chan string, nFlows)
+	// read reads from a flow and writes out the data to dataReadChan
+	read := func(flow stream.Flow) {
+		var buf bytes.Buffer
+		var tmp [1024]byte
+		for {
+			n, err := flow.Read(tmp[:testutil.Intn(len(tmp))])
+			buf.Write(tmp[:n])
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Errorf("Read error: %v", err)
+				break
+			}
+		}
+		dataReadChan <- buf.String()
+	}
+
+	index := 0
+	for i := 0; i < len(clientVCs); i++ {
+		for j := 0; j < nFlowsFromClientPerVC; j++ {
+			// Flow initiated by client, read by server
+			writer, err := clientVCs[i].Connect()
+			if err != nil {
+				t.Errorf("clientVCs[%d], flow %d: %v", i, j, err)
+				continue
+			}
+			go write(writer, dataWritten[index])
+			go read(acceptFlowAtServer(server))
+			index++
+		}
+	}
+	for i := 0; i < len(serverConnectors); i++ {
+		for j := 0; j < nFlowsFromServerPerVC; j++ {
+			// Flow initiated by server, read by client
+			writer, err := serverConnectors[i].Connect()
+			if err != nil {
+				t.Errorf("serverConnectors[%d], flow %d: %v", i, j, err)
+				continue
+			}
+			go write(writer, dataWritten[index])
+			go read(acceptFlowAtClient(clientLNs[i]))
+			index++
+		}
+	}
+	if index != nFlows {
+		t.Errorf("Created %d flows, wanted %d", index, nFlows)
+	}
+
+	// Collect all data read and compare against the data written.
+	// Since flows might be accepted in arbitrary order, sort the data before comparing.
+	dataRead := make([]string, index)
+	for i := 0; i < index; i++ {
+		dataRead[i] = <-dataReadChan
+	}
+	sort.Strings(dataWritten)
+	sort.Strings(dataRead)
+	if !reflect.DeepEqual(dataRead, dataWritten) {
+		// Since the strings can be very large, only print out the first few diffs.
+		nDiffs := 0
+		for i := 0; i < len(dataRead); i++ {
+			if dataRead[i] != dataWritten[i] {
+				nDiffs++
+				t.Errorf("Diff %d out of %d items: Got %q want %q", nDiffs, i, atmostNbytes(dataRead[i], 20), atmostNbytes(dataWritten[i], 20))
+			}
+		}
+		if nDiffs > 0 {
+			t.Errorf("#Mismatches:%d #ReadSamples:%d #WriteSamples:%d", nDiffs, len(dataRead), len(dataWritten))
+		}
+	}
+}
+
+func TestMultipleVCsAndMultipleFlows_1(t *testing.T) {
+	// Test with a single goroutine since that is typically easier to debug
+	// in case of problems.
+	testMultipleVCsAndMultipleFlows(t, 1)
+}
+
+func TestMultipleVCsAndMultipleFlows_5(t *testing.T) {
+	// Test with multiple goroutines, particularly useful for checking
+	// races with
+	// go test -race
+	testMultipleVCsAndMultipleFlows(t, 5)
+}
+
+func TestClose(t *testing.T) {
+	client, server := NewClientServer()
+	vc, _, err := createVC(client, server, makeEP(0x5))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	clientFlow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	serverFlow := acceptFlowAtServer(server)
+
+	var message = []byte("bugs bunny")
+	go func() {
+		if n, err := clientFlow.Write(message); n != len(message) || err != nil {
+			t.Fatalf("Wrote (%d, %v), want (%d, nil)", n, err, len(message))
+		}
+		client.Close()
+	}()
+
+	buf := make([]byte, 1024)
+	// client.Close should drain all pending writes first.
+	if n, err := serverFlow.Read(buf); n != len(message) || err != nil {
+		t.Fatalf("Got (%d, %v) = %q, want (%d, nil) = %q", n, err, buf[:n], len(message), message)
+	}
+	// subsequent reads should fail, since the VIF should be closed.
+	if n, err := serverFlow.Read(buf); n != 0 || err == nil {
+		t.Fatalf("Got (%d, %v) = %q, want (0, nil)", n, err, buf[:n])
+	}
+	server.Close()
+}
+
+func TestOnClose(t *testing.T) {
+	notifyC, notifyS := make(chan *vif.VIF), make(chan *vif.VIF)
+	notifyFuncC := func(vf *vif.VIF) { notifyC <- vf }
+	notifyFuncS := func(vf *vif.VIF) { notifyS <- vf }
+
+	// Close the client VIF. Both client and server should be notified.
+	client, server, err := New(nil, nil, notifyFuncC, notifyFuncS, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	client.Close()
+	if got := <-notifyC; got != client {
+		t.Errorf("Want notification for %v; got %v", client, got)
+	}
+	if got := <-notifyS; got != server {
+		t.Errorf("Want notification for %v; got %v", server, got)
+	}
+
+	// Same as above, but close the server VIF at this time.
+	client, server, err = New(nil, nil, notifyFuncC, notifyFuncS, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	server.Close()
+	if got := <-notifyC; got != client {
+		t.Errorf("Want notification for %v; got %v", client, got)
+	}
+	if got := <-notifyS; got != server {
+		t.Errorf("Want notification for %v; got %v", server, got)
+	}
+}
+
+func testCloseWhenEmpty(t *testing.T, testServer bool) {
+	const (
+		waitTime = 5 * time.Millisecond
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+	newVIF := func() (vf, remote *vif.VIF) {
+		var err error
+		vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, nil, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if err = vf.StartAccepting(); err != nil {
+			t.Fatal(err)
+		}
+		if testServer {
+			vf, remote = remote, vf
+		}
+		return
+	}
+
+	// Initially empty. Should not be closed.
+	vf, remote := newVIF()
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Open one VC. Should not be closed.
+	vf, remote = newVIF()
+	if _, _, err := createVC(vf, remote, makeEP(0x10)); err != nil {
+		t.Fatal(err)
+	}
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the VC. Should be closed.
+	vf.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Same as above, but open a VC from the remote side.
+	vf, remote = newVIF()
+	_, _, err := createVC(remote, vf, makeEP(0x10))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+	remote.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Create two VCs.
+	vf, remote = newVIF()
+	if _, _, err := createNVCs(vf, remote, 0x10, 2); err != nil {
+		t.Fatal(err)
+	}
+
+	// Close the first VC twice. Should not be closed.
+	vf.ShutdownVCs(makeEP(0x10))
+	vf.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the second VC. Should be closed.
+	vf.ShutdownVCs(makeEP(0x10 + 1))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestCloseWhenEmpty(t *testing.T)       { testCloseWhenEmpty(t, false) }
+func TestCloseWhenEmptyServer(t *testing.T) { testCloseWhenEmpty(t, true) }
+
+func testStartTimeout(t *testing.T, testServer bool) {
+	const (
+		startTime = 5 * time.Millisecond
+		// We use a long wait time here since it takes some time for the underlying network
+		// connection of the other side to be closed especially in race testing.
+		waitTime = 150 * time.Millisecond
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+	newVIF := func() (vf, remote *vif.VIF, triggerTimers func()) {
+		triggerTimers = vif.SetFakeTimers()
+		var vfStartTime, remoteStartTime time.Duration = startTime, 0
+		if testServer {
+			vfStartTime, remoteStartTime = remoteStartTime, vfStartTime
+		}
+		var err error
+		vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, []stream.VCOpt{vc.StartTimeout{vfStartTime}}, []stream.ListenerOpt{vc.StartTimeout{remoteStartTime}})
+		if err != nil {
+			t.Fatal(err)
+		}
+		if err = vf.StartAccepting(); err != nil {
+			t.Fatal(err)
+		}
+		if testServer {
+			vf, remote = remote, vf
+		}
+		return
+	}
+
+	// No VC opened. Should be closed after the start timeout.
+	vf, remote, triggerTimers := newVIF()
+	triggerTimers()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Open one VC. Should not be closed.
+	vf, remote, triggerTimers = newVIF()
+	if _, _, err := createVC(vf, remote, makeEP(0x10)); err != nil {
+		t.Fatal(err)
+	}
+	triggerTimers()
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the VC. Should be closed.
+	vf.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestStartTimeout(t *testing.T)       { testStartTimeout(t, false) }
+func TestStartTimeoutServer(t *testing.T) { testStartTimeout(t, true) }
+
+func testIdleTimeout(t *testing.T, testServer bool) {
+	const (
+		idleTime = 10 * time.Millisecond
+		waitTime = idleTime * 2
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+	newVIF := func() (vf, remote *vif.VIF) {
+		var err error
+		if vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, nil, nil); err != nil {
+			t.Fatal(err)
+		}
+		if err = vf.StartAccepting(); err != nil {
+			t.Fatal(err)
+		}
+		if testServer {
+			vf, remote = remote, vf
+		}
+		return
+	}
+	newVC := func(vf, remote *vif.VIF) (VC stream.VC, ln stream.Listener, remoteVC stream.Connector) {
+		triggerTimers := vif.SetFakeTimers()
+		defer triggerTimers()
+		var err error
+		VC, remoteVC, err = createVC(vf, remote, makeEP(0x10), vc.IdleTimeout{idleTime})
+		if err != nil {
+			t.Fatal(err)
+		}
+		if ln, err = VC.Listen(); err != nil {
+			t.Fatal(err)
+		}
+		return
+	}
+	newFlow := func(vc stream.VC, remote *vif.VIF) stream.Flow {
+		f, err := vc.Connect()
+		if err != nil {
+			t.Fatal(err)
+		}
+		acceptFlowAtServer(remote)
+		return f
+	}
+
+	// No active flow. Should be notified.
+	vf, remote := newVIF()
+	_, _, _ = newVC(vf, remote)
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Same as above, but with multiple VCs.
+	vf, remote = newVIF()
+	triggerTimers := vif.SetFakeTimers()
+	if _, _, err := createNVCs(vf, remote, 0x10, 5, vc.IdleTimeout{idleTime}); err != nil {
+		t.Fatal(err)
+	}
+	triggerTimers()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Open one flow. Should not be notified.
+	vf, remote = newVIF()
+	vc, _, _ := newVC(vf, remote)
+	f1 := newFlow(vc, remote)
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the flow. Should be notified.
+	f1.Close()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Open two flows.
+	vf, remote = newVIF()
+	vc, _, _ = newVC(vf, remote)
+	f1 = newFlow(vc, remote)
+	f2 := newFlow(vc, remote)
+
+	// Close the first flow twice. Should not be notified.
+	f1.Close()
+	f1.Close()
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the second flow. Should be notified now.
+	f2.Close()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Same as above, but open a flow from the remote side.
+	vf, remote = newVIF()
+	_, ln, remoteVC := newVC(vf, remote)
+	f1, err := remoteVC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	acceptFlowAtClient(ln)
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+	f1.Close()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestIdleTimeout(t *testing.T)       { testIdleTimeout(t, false) }
+func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
+
+func TestShutdownVCs(t *testing.T) {
+	client, server := NewClientServer()
+	defer server.Close()
+	defer client.Close()
+
+	testN := func(N int) error {
+		c := client.NumVCs()
+		if c != N {
+			return fmt.Errorf("%d VCs on client VIF, expected %d", c, N)
+		}
+		return nil
+	}
+
+	if _, _, err := createVC(client, server, makeEP(0x5)); err != nil {
+		t.Fatal(err)
+	}
+	if err := testN(1); err != nil {
+		t.Error(err)
+	}
+	if _, _, err := createVC(client, server, makeEP(0x5)); err != nil {
+		t.Fatal(err)
+	}
+	if err := testN(2); err != nil {
+		t.Error(err)
+	}
+	if _, _, err := createVC(client, server, makeEP(0x7)); err != nil {
+		t.Fatal(err)
+	}
+	if err := testN(3); err != nil {
+		t.Error(err)
+	}
+	// Client does not have any VCs to the endpoint with routing id 0x9,
+	// so nothing should be closed
+	if n := client.ShutdownVCs(makeEP(0x9)); n != 0 {
+		t.Errorf("Expected 0 VCs to be closed, closed %d", n)
+	}
+	if err := testN(3); err != nil {
+		t.Error(err)
+	}
+	// But it does have to 0x5
+	if n := client.ShutdownVCs(makeEP(0x5)); n != 2 {
+		t.Errorf("Expected 2 VCs to be closed, closed %d", n)
+	}
+	if err := testN(1); err != nil {
+		t.Error(err)
+	}
+	// And 0x7
+	if n := client.ShutdownVCs(makeEP(0x7)); n != 1 {
+		t.Errorf("Expected 2 VCs to be closed, closed %d", n)
+	}
+	if err := testN(0); err != nil {
+		t.Error(err)
+	}
+}
+
+type versionTestCase struct {
+	client, server, ep *iversion.Range
+	expectError        bool
+	expectVIFError     bool
+}
+
+func (tc *versionTestCase) Run(t *testing.T) {
+	client, server, err := NewVersionedClientServer(tc.client, tc.server)
+	if (err != nil) != tc.expectVIFError {
+		t.Errorf("Error mismatch.  Wanted error: %v, got %v; client: %v, server: %v", tc.expectVIFError, err, tc.client, tc.server)
+	}
+	if err != nil {
+		return
+	}
+	defer client.Close()
+
+	ep := &inaming.Endpoint{
+		Protocol: "test",
+		Address:  "addr",
+		RID:      naming.FixedRoutingID(0x5),
+	}
+	clientVC, _, err := createVC(client, server, ep)
+	if (err != nil) != tc.expectError {
+		t.Errorf("Error mismatch.  Wanted error: %v, got %v (client:%v, server:%v ep:%v)", tc.expectError, err, tc.client, tc.server, tc.ep)
+
+	}
+	if err != nil {
+		return
+	}
+
+	writer, err := clientVC.Connect()
+	if err != nil {
+		t.Errorf("Unexpected error on case %+v: %v", tc, err)
+		return
+	}
+
+	rwSingleFlow(t, writer, acceptFlowAtServer(server), "the dark knight")
+}
+
+// TestIncompatibleVersions tests many cases where the client and server
+// have compatible or incompatible supported version ranges.  It ensures
+// that overlapping ranges work properly, but non-overlapping ranges generate
+// errors.
+func TestIncompatibleVersions(t *testing.T) {
+	unknown := &iversion.Range{version.UnknownRPCVersion, version.UnknownRPCVersion}
+	tests := []versionTestCase{
+		{&iversion.Range{2, 2}, &iversion.Range{2, 2}, &iversion.Range{2, 2}, false, false},
+		{&iversion.Range{2, 3}, &iversion.Range{3, 5}, &iversion.Range{3, 5}, false, false},
+		{&iversion.Range{2, 3}, &iversion.Range{3, 5}, unknown, false, false},
+
+		// VIF error since there are no versions in common.
+		{&iversion.Range{2, 3}, &iversion.Range{4, 5}, &iversion.Range{4, 5}, true, true},
+		{&iversion.Range{2, 3}, &iversion.Range{4, 5}, unknown, true, true},
+	}
+
+	for _, tc := range tests {
+		tc.Run(t)
+	}
+}
+
+func TestNetworkFailure(t *testing.T) {
+	c1, c2 := pipe()
+	result := make(chan *vif.VIF)
+	pclient := testutil.NewPrincipal("client")
+	go func() {
+		client, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), pclient, nil, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		result <- client
+	}()
+	pserver := testutil.NewPrincipal("server")
+	blessings := pserver.BlessingStore().Default()
+	server, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, blessings, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	client := <-result
+	// If the network connection dies, Dial and Accept should fail.
+	c1.Close()
+	if _, err := client.Dial(makeEP(0x5), pclient); err == nil {
+		t.Errorf("Expected client.Dial to fail")
+	}
+	if _, err := server.Accept(); err == nil {
+		t.Errorf("Expected server.Accept to fail")
+	}
+}
+
+func makeEP(rid uint64) naming.Endpoint {
+	return &inaming.Endpoint{
+		Protocol: "test",
+		Address:  "addr",
+		RID:      naming.FixedRoutingID(rid),
+	}
+}
+
+// pipeAddr provides a more descriptive String implementation than provided by net.Pipe.
+type pipeAddr struct{ name string }
+
+func (a pipeAddr) Network() string { return "pipe" }
+func (a pipeAddr) String() string  { return a.name }
+
+// pipeConn provides a buffered net.Conn, with pipeAddr addressing.
+type pipeConn struct {
+	lock sync.Mutex
+	// w is guarded by lock, to prevent Close from racing with Write.  This is a
+	// quick way to prevent the race, but it allows a Write to block the Close.
+	// This isn't a problem in the tests currently.
+	w            chan<- []byte
+	r            <-chan []byte
+	rdata        []byte
+	laddr, raddr pipeAddr
+}
+
+func (c *pipeConn) Read(data []byte) (int, error) {
+	for len(c.rdata) == 0 {
+		d, ok := <-c.r
+		if !ok {
+			return 0, io.EOF
+		}
+		c.rdata = d
+	}
+	n := copy(data, c.rdata)
+	c.rdata = c.rdata[n:]
+	return n, nil
+}
+
+func (c *pipeConn) Write(data []byte) (int, error) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.w == nil {
+		return 0, io.EOF
+	}
+	d := make([]byte, len(data))
+	copy(d, data)
+	c.w <- d
+	return len(data), nil
+}
+
+func (c *pipeConn) Close() error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.w == nil {
+		return io.EOF
+	}
+	close(c.w)
+	c.w = nil
+	return nil
+}
+
+func (c *pipeConn) LocalAddr() net.Addr                { return c.laddr }
+func (c *pipeConn) RemoteAddr() net.Addr               { return c.raddr }
+func (c *pipeConn) SetDeadline(t time.Time) error      { return nil }
+func (c *pipeConn) SetReadDeadline(t time.Time) error  { return nil }
+func (c *pipeConn) SetWriteDeadline(t time.Time) error { return nil }
+
+func pipe() (net.Conn, net.Conn) {
+	clientAddr := pipeAddr{"client"}
+	serverAddr := pipeAddr{"server"}
+	c1 := make(chan []byte, 10)
+	c2 := make(chan []byte, 10)
+	p1 := &pipeConn{w: c1, r: c2, laddr: clientAddr, raddr: serverAddr}
+	p2 := &pipeConn{w: c2, r: c1, laddr: serverAddr, raddr: clientAddr}
+	return p1, p2
+}
+
+func NewClientServer() (client, server *vif.VIF) {
+	var err error
+	client, server, err = New(nil, nil, nil, nil, nil, nil)
+	if err != nil {
+		panic(err)
+	}
+	return
+}
+
+func NewVersionedClientServer(clientVersions, serverVersions *iversion.Range) (client, server *vif.VIF, verr error) {
+	return New(clientVersions, serverVersions, nil, nil, nil, nil)
+}
+
+func New(clientVersions, serverVersions *iversion.Range, clientOnClose, serverOnClose func(*vif.VIF), opts []stream.VCOpt, lopts []stream.ListenerOpt) (client, server *vif.VIF, verr error) {
+	c1, c2 := pipe()
+	var cerr error
+	cl := make(chan *vif.VIF)
+	go func() {
+		c, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), testutil.NewPrincipal("client"), clientVersions, clientOnClose, opts...)
+		if err != nil {
+			cerr = err
+			close(cl)
+		} else {
+			cl <- c
+		}
+	}()
+	pserver := testutil.NewPrincipal("server")
+	bserver := pserver.BlessingStore().Default()
+	s, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, bserver, serverVersions, serverOnClose, lopts...)
+	c, ok := <-cl
+	if err != nil {
+		verr = err
+		return
+	}
+	if !ok {
+		verr = cerr
+		return
+	}
+	server = s
+	client = c
+	return
+}
+
+// rwSingleFlow writes out data on writer and ensures that the reader sees the same string.
+func rwSingleFlow(t *testing.T, writer io.WriteCloser, reader io.Reader, data string) {
+	go func() {
+		if n, err := writer.Write([]byte(data)); n != len(data) || err != nil {
+			t.Errorf("Write failure. Got (%d, %v) want (%d, nil)", n, err, len(data))
+		}
+		writer.Close()
+	}()
+
+	var buf bytes.Buffer
+	var tmp [4096]byte
+	for {
+		n, err := reader.Read(tmp[:])
+		buf.Write(tmp[:n])
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Errorf("Read error: %v", err)
+		}
+	}
+	if buf.String() != data {
+		t.Errorf("Wrote %q but read %q", data, buf.String())
+	}
+}
+
+// createVC creates a VC by dialing from the client process to the server
+// process.  It returns the VC at the client and the Connector at the server
+// (which the server can use to create flows over the VC)).
+func createVC(client, server *vif.VIF, ep naming.Endpoint, opts ...stream.VCOpt) (clientVC stream.VC, serverConnector stream.Connector, err error) {
+	vcChan := make(chan stream.VC)
+	scChan := make(chan stream.Connector)
+	errChan := make(chan error)
+	go func() {
+		vc, err := client.Dial(ep, testutil.NewPrincipal("client"), opts...)
+		errChan <- err
+		vcChan <- vc
+	}()
+	go func() {
+		cAndf, err := server.Accept()
+		errChan <- err
+		if err == nil {
+			scChan <- cAndf.Connector
+		}
+	}()
+	if err = <-errChan; err != nil {
+		return
+	}
+	if err = <-errChan; err != nil {
+		return
+	}
+	clientVC = <-vcChan
+	serverConnector = <-scChan
+	return
+}
+
+func createNVCs(client, server *vif.VIF, startRID uint64, N int, opts ...stream.VCOpt) (clientVCs []stream.VC, serverConnectors []stream.Connector, err error) {
+	var c stream.VC
+	var s stream.Connector
+	for i := 0; i < N; i++ {
+		c, s, err = createVC(client, server, makeEP(startRID+uint64(i)), opts...)
+		if err != nil {
+			return
+		}
+		clientVCs = append(clientVCs, c)
+		serverConnectors = append(serverConnectors, s)
+	}
+	return
+}
+
+func createListeners(vcs []stream.VC) ([]stream.Listener, error) {
+	var ret []stream.Listener
+	for _, vc := range vcs {
+		ln, err := vc.Listen()
+		if err != nil {
+			return nil, err
+		}
+		ret = append(ret, ln)
+	}
+	return ret, nil
+}
+
+func acceptFlowAtServer(vf *vif.VIF) stream.Flow {
+	for {
+		cAndf, err := vf.Accept()
+		if err != nil {
+			panic(err)
+		}
+		if cAndf.Flow != nil {
+			return cAndf.Flow
+		}
+	}
+}
+
+func acceptFlowAtClient(ln stream.Listener) stream.Flow {
+	f, err := ln.Accept()
+	if err != nil {
+		panic(err)
+	}
+	return f
+}
+
+func atmostNbytes(s string, n int) string {
+	if n > len(s) {
+		return s
+	}
+	b := []byte(s)
+	return string(b[:n/2]) + "..." + string(b[len(s)-n/2:])
+}
diff --git a/runtime/internal/rpc/stress/internal/client.go b/runtime/internal/rpc/stress/internal/client.go
new file mode 100644
index 0000000..d17b1b0
--- /dev/null
+++ b/runtime/internal/rpc/stress/internal/client.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"bytes"
+	"fmt"
+	"math/rand"
+	"time"
+
+	"v.io/v23/context"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/rpc/stress"
+)
+
+// CallEcho calls 'Echo' method with the given payload size for the given time
+// duration and returns the number of iterations.
+func CallEcho(ctx *context.T, server string, payloadSize int, duration time.Duration) uint64 {
+	stub := stress.StressClient(server)
+	payload := make([]byte, payloadSize)
+	for i := range payload {
+		payload[i] = byte(i & 0xff)
+	}
+
+	var iterations uint64
+	start := time.Now()
+	for {
+		got, err := stub.Echo(ctx, payload)
+		if err != nil {
+			vlog.Fatalf("Echo failed: %v", err)
+		}
+		if !bytes.Equal(got, payload) {
+			vlog.Fatalf("Echo returned %v, but expected %v", got, payload)
+		}
+		iterations++
+
+		if time.Since(start) >= duration {
+			break
+		}
+	}
+	return iterations
+}
+
+// CallSum calls 'Sum' method with a randomly generated payload.
+func CallSum(ctx *context.T, server string, maxPayloadSize int, stats *stress.SumStats) {
+	stub := stress.StressClient(server)
+	arg, err := newSumArg(maxPayloadSize)
+	if err != nil {
+		vlog.Fatalf("new arg failed: %v", err)
+	}
+
+	got, err := stub.Sum(ctx, arg)
+	if err != nil {
+		vlog.Fatalf("Sum failed: %v", err)
+	}
+
+	wanted, _ := doSum(&arg)
+	if !bytes.Equal(got, wanted) {
+		vlog.Fatalf("Sum returned %v, but expected %v", got, wanted)
+	}
+	stats.SumCount++
+	stats.BytesSent += uint64(lenSumArg(&arg))
+	stats.BytesRecv += uint64(len(got))
+}
+
+// CallSumStream calls 'SumStream' method. Each iteration sends up to
+// 'maxChunkCnt' chunks on the stream and receives the same number of
+// sums back.
+func CallSumStream(ctx *context.T, server string, maxChunkCnt, maxPayloadSize int, stats *stress.SumStats) {
+	stub := stress.StressClient(server)
+	stream, err := stub.SumStream(ctx)
+	if err != nil {
+		vlog.Fatalf("Stream failed: %v", err)
+	}
+
+	chunkCnt := rand.Intn(maxChunkCnt) + 1
+	args := make([]stress.SumArg, chunkCnt)
+	done := make(chan error, 1)
+	go func() {
+		defer close(done)
+
+		recvS := stream.RecvStream()
+		i := 0
+		for ; recvS.Advance(); i++ {
+			got := recvS.Value()
+			wanted, _ := doSum(&args[i])
+			if !bytes.Equal(got, wanted) {
+				done <- fmt.Errorf("RecvStream returned %v, but expected %v", got, wanted)
+				return
+			}
+			stats.BytesRecv += uint64(len(got))
+		}
+		switch err := recvS.Err(); {
+		case err != nil:
+			done <- err
+		case i != chunkCnt:
+			done <- fmt.Errorf("RecvStream returned %d chunks, but expected %d", i, chunkCnt)
+		default:
+			done <- nil
+		}
+	}()
+
+	sendS := stream.SendStream()
+	for i := 0; i < chunkCnt; i++ {
+		arg, err := newSumArg(maxPayloadSize)
+		if err != nil {
+			vlog.Fatalf("new arg failed: %v", err)
+		}
+		args[i] = arg
+
+		if err = sendS.Send(arg); err != nil {
+			vlog.Fatalf("SendStream failed to send: %v", err)
+		}
+		stats.BytesSent += uint64(lenSumArg(&arg))
+	}
+	if err = sendS.Close(); err != nil {
+		vlog.Fatalf("SendStream failed to close: %v", err)
+	}
+	if err = <-done; err != nil {
+		vlog.Fatalf("%v", err)
+	}
+	if err = stream.Finish(); err != nil {
+		vlog.Fatalf("Stream failed to finish: %v", err)
+	}
+	stats.SumStreamCount++
+}
diff --git a/runtime/internal/rpc/stress/internal/server.go b/runtime/internal/rpc/stress/internal/server.go
new file mode 100644
index 0000000..30b03da
--- /dev/null
+++ b/runtime/internal/rpc/stress/internal/server.go
@@ -0,0 +1,105 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"sync"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/runtime/internal/rpc/stress"
+)
+
+type impl struct {
+	statsMu sync.Mutex
+	stats   stress.SumStats // GUARDED_BY(statsMu)
+
+	stop chan struct{}
+}
+
+func (s *impl) Echo(_ *context.T, _ rpc.ServerCall, payload []byte) ([]byte, error) {
+	return payload, nil
+}
+
+func (s *impl) Sum(_ *context.T, _ rpc.ServerCall, arg stress.SumArg) ([]byte, error) {
+	sum, err := doSum(&arg)
+	if err != nil {
+		return nil, err
+	}
+	s.addSumStats(false, uint64(lenSumArg(&arg)), uint64(len(sum)))
+	return sum, nil
+}
+
+func (s *impl) SumStream(_ *context.T, call stress.StressSumStreamServerCall) error {
+	rStream := call.RecvStream()
+	sStream := call.SendStream()
+	var bytesRecv, bytesSent uint64
+	for rStream.Advance() {
+		arg := rStream.Value()
+		sum, err := doSum(&arg)
+		if err != nil {
+			return err
+		}
+		sStream.Send(sum)
+		bytesRecv += uint64(lenSumArg(&arg))
+		bytesSent += uint64(len(sum))
+	}
+	if err := rStream.Err(); err != nil {
+		return err
+	}
+	s.addSumStats(true, bytesRecv, bytesSent)
+	return nil
+}
+
+func (s *impl) addSumStats(stream bool, bytesRecv, bytesSent uint64) {
+	s.statsMu.Lock()
+	if stream {
+		s.stats.SumStreamCount++
+	} else {
+		s.stats.SumCount++
+	}
+	s.stats.BytesRecv += bytesRecv
+	s.stats.BytesSent += bytesSent
+	s.statsMu.Unlock()
+}
+
+func (s *impl) GetSumStats(*context.T, rpc.ServerCall) (stress.SumStats, error) {
+	s.statsMu.Lock()
+	defer s.statsMu.Unlock()
+	return s.stats, nil
+}
+
+func (s *impl) Stop(*context.T, rpc.ServerCall) error {
+	s.stop <- struct{}{}
+	return nil
+}
+
+// StartServer starts a server that implements the Stress service, and returns
+// the server and its vanadium address. It also returns a channel carrying stop
+// requests. After reading from the stop channel, the application should exit.
+func StartServer(ctx *context.T, listenSpec rpc.ListenSpec) (rpc.Server, naming.Endpoint, <-chan struct{}) {
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		vlog.Fatalf("NewServer failed: %v", err)
+	}
+	eps, err := server.Listen(listenSpec)
+	if err != nil {
+		vlog.Fatalf("Listen failed: %v", err)
+	}
+	if len(eps) == 0 {
+		vlog.Fatal("No local address to listen on")
+	}
+
+	s := impl{stop: make(chan struct{})}
+	if err := server.Serve("", stress.StressServer(&s), security.AllowEveryone()); err != nil {
+		vlog.Fatalf("Serve failed: %v", err)
+	}
+	return server, eps[0], s.stop
+}
diff --git a/runtime/internal/rpc/stress/internal/util.go b/runtime/internal/rpc/stress/internal/util.go
new file mode 100644
index 0000000..adf15e2
--- /dev/null
+++ b/runtime/internal/rpc/stress/internal/util.go
@@ -0,0 +1,44 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"crypto/md5"
+	crand "crypto/rand"
+	"encoding/binary"
+	"math/rand"
+
+	"v.io/x/ref/runtime/internal/rpc/stress"
+)
+
+// newSumArg returns a randomly generated SumArg.
+func newSumArg(maxPayloadSize int) (stress.SumArg, error) {
+	var arg stress.SumArg
+	arg.ABool = rand.Intn(2) == 0
+	arg.AInt64 = rand.Int63()
+	arg.AListOfBytes = make([]byte, rand.Intn(maxPayloadSize)+1)
+	_, err := crand.Read(arg.AListOfBytes)
+	return arg, err
+}
+
+// lenSumArg returns the length of the SumArg in bytes.
+func lenSumArg(arg *stress.SumArg) int {
+	// bool + uint64 + []byte
+	return 1 + 4 + len(arg.AListOfBytes)
+}
+
+// doSum returns the MD5 checksum of the SumArg.
+func doSum(arg *stress.SumArg) ([]byte, error) {
+	h := md5.New()
+	if arg.ABool {
+		if err := binary.Write(h, binary.LittleEndian, arg.AInt64); err != nil {
+			return nil, err
+		}
+	}
+	if _, err := h.Write(arg.AListOfBytes); err != nil {
+		return nil, err
+	}
+	return h.Sum(nil), nil
+}
diff --git a/runtime/internal/rpc/stress/mtstress/doc.go b/runtime/internal/rpc/stress/mtstress/doc.go
new file mode 100644
index 0000000..f3ade96
--- /dev/null
+++ b/runtime/internal/rpc/stress/mtstress/doc.go
@@ -0,0 +1,113 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+
+/*
+Usage:
+   mtstress <command>
+
+The mtstress commands are:
+   mount       Measure latency of the Mount RPC at a fixed request rate
+   resolve     Measure latency of the Resolve RPC at a fixed request rate
+   help        Display help for commands or topics
+
+The global flags are:
+ -duration=10s
+   Duration for sending test traffic and measuring latency
+ -rate=1
+   Rate, in RPCs per second, to send to the test server
+ -reauthenticate=false
+   If true, establish a new authenticated connection for each RPC, simulating
+   load from a distinct process
+
+ -alsologtostderr=true
+   log to standard error as well as files
+ -log_backtrace_at=:0
+   when logging hits line file:N, emit a stack trace
+ -log_dir=
+   if non-empty, write log files to this directory
+ -logtostderr=false
+   log to standard error instead of files
+ -max_stack_buf_size=4292608
+   max size in bytes of the buffer to use for logging stack traces
+ -stderrthreshold=2
+   logs at or above this threshold go to stderr
+ -v=0
+   log level for V logs
+ -v23.credentials=
+   directory to use for storing security credentials
+ -v23.i18n-catalogue=
+   18n catalogue files to load, comma separated
+ -v23.metadata=<just specify -v23.metadata to activate>
+   Displays metadata for the program and exits.
+ -v23.namespace.root=[/(dev.v.io/role/vprod/service/mounttabled)@ns.dev.v.io:8101]
+   local namespace root; can be repeated to provided multiple roots
+ -v23.proxy=
+   object name of proxy service to use to export services across network
+   boundaries
+ -v23.tcp.address=
+   address to listen on
+ -v23.tcp.protocol=wsh
+   protocol to listen with
+ -v23.vtrace.cache-size=1024
+   The number of vtrace traces to store in memory.
+ -v23.vtrace.collect-regexp=
+   Spans and annotations that match this regular expression will trigger trace
+   collection.
+ -v23.vtrace.dump-on-shutdown=true
+   If true, dump all stored traces on runtime shutdown.
+ -v23.vtrace.sample-rate=0
+   Rate (from 0.0 to 1.0) to sample vtrace traces.
+ -vmodule=
+   comma-separated list of pattern=N settings for file-filtered logging
+
+Mtstress mount
+
+Repeatedly issues a Mount request (at --rate) and measures latency
+
+Usage:
+   mtstress mount <mountpoint> <ttl>
+
+<mountpoint> defines the name to be mounted
+
+<ttl> specfies the time-to-live of the mount point. For example: 5s for 5
+seconds, 1m for 1 minute etc. Valid time units are "ms", "s", "m", "h".
+
+Mtstress resolve
+
+Repeatedly issues a Resolve request (at --rate) to a name and measures latency
+
+Usage:
+   mtstress resolve <name>
+
+<name> the object name to resolve
+
+Mtstress help
+
+Help with no args displays the usage of the parent command.
+
+Help with args displays the usage of the specified sub-command or help topic.
+
+"help ..." recursively displays help for all commands and topics.
+
+Usage:
+   mtstress help [flags] [command/topic ...]
+
+[command/topic ...] optionally identifies a specific sub-command or help topic.
+
+The mtstress help flags are:
+ -style=compact
+   The formatting style for help output:
+      compact - Good for compact cmdline output.
+      full    - Good for cmdline output, shows all global flags.
+      godoc   - Good for godoc processing.
+   Override the default by setting the CMDLINE_STYLE environment variable.
+ -width=<terminal width>
+   Format output to this target width in runes, or unlimited if width < 0.
+   Defaults to the terminal width if available.  Override the default by setting
+   the CMDLINE_WIDTH environment variable.
+*/
+package main
diff --git a/runtime/internal/rpc/stress/mtstress/main.go b/runtime/internal/rpc/stress/mtstress/main.go
new file mode 100644
index 0000000..47a1404
--- /dev/null
+++ b/runtime/internal/rpc/stress/mtstress/main.go
@@ -0,0 +1,151 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The following enables go generate to generate the doc.go file.
+//go:generate go run $V23_ROOT/release/go/src/v.io/x/lib/cmdline/testdata/gendoc.go .
+
+package main
+
+import (
+	"flag"
+	"net"
+	"regexp"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/verror"
+	"v.io/x/lib/cmdline2"
+	"v.io/x/ref/lib/v23cmd"
+	_ "v.io/x/ref/runtime/factories/generic"
+)
+
+func init() {
+	cmdline2.HideGlobalFlagsExcept(regexp.MustCompile(`^((rate)|(duration)|(reauthenticate))$`))
+}
+
+var (
+	rate     = flag.Float64("rate", 1, "Rate, in RPCs per second, to send to the test server")
+	duration = flag.Duration("duration", 10*time.Second, "Duration for sending test traffic and measuring latency")
+	reauth   = flag.Bool("reauthenticate", false, "If true, establish a new authenticated connection for each RPC, simulating load from a distinct process")
+
+	cmdMount = &cmdline2.Command{
+		Name:  "mount",
+		Short: "Measure latency of the Mount RPC at a fixed request rate",
+		Long: `
+Repeatedly issues a Mount request (at --rate) and measures latency
+`,
+		ArgsName: "<mountpoint> <ttl>",
+		ArgsLong: `
+<mountpoint> defines the name to be mounted
+
+<ttl> specfies the time-to-live of the mount point. For example: 5s for 5
+seconds, 1m for 1 minute etc.
+Valid time units are "ms", "s", "m", "h".
+`,
+		Runner: v23cmd.RunnerFunc(func(ctx *context.T, env *cmdline2.Env, args []string) error {
+			if got, want := len(args), 2; got != want {
+				return env.UsageErrorf("mount: got %d arguments, want %d", got, want)
+			}
+			mountpoint := args[0]
+			ttl, err := time.ParseDuration(args[1])
+			if err != nil {
+				return env.UsageErrorf("invalid TTL: %v", err)
+			}
+			// Make up a random server to mount
+			ep := naming.FormatEndpoint("tcp", "127.0.0.1:14141")
+			mount := func(ctx *context.T) (time.Duration, error) {
+				// Currently this is a simple call, but at some
+				// point should generate random test data -
+				// mountpoints at different depths and the like
+				start := time.Now()
+				if err := v23.GetClient(ctx).Call(ctx, mountpoint, "Mount", []interface{}{ep, uint32(ttl / time.Second), 0}, nil, options.NoResolve{}); err != nil {
+					return 0, err
+				}
+				return time.Since(start), nil
+			}
+			p, err := paramsFromFlags(ctx, mountpoint)
+			if err != nil {
+				return err
+			}
+			return run(mount, p)
+		}),
+	}
+
+	cmdResolve = &cmdline2.Command{
+		Name:  "resolve",
+		Short: "Measure latency of the Resolve RPC at a fixed request rate",
+		Long: `
+Repeatedly issues a Resolve request (at --rate) to a name and measures latency
+`,
+		ArgsName: "<name>",
+		ArgsLong: `
+<name> the object name to resolve
+`,
+		Runner: v23cmd.RunnerFunc(func(ctx *context.T, env *cmdline2.Env, args []string) error {
+			if got, want := len(args), 1; got != want {
+				return env.UsageErrorf("resolve: got %d arguments, want %d", got, want)
+			}
+			name := args[0]
+			resolve := func(ctx *context.T) (time.Duration, error) {
+				var entry naming.MountEntry
+				start := time.Now()
+				if err := v23.GetClient(ctx).Call(ctx, name, "ResolveStep", nil, []interface{}{&entry}, options.NoResolve{}); err != nil && verror.ErrorID(err) != naming.ErrNoSuchName.ID {
+					// ErrNoSuchName is fine, it just means
+					// that the mounttable server did not
+					// find an entry in its tables.
+					return 0, err
+				}
+				return time.Since(start), nil
+			}
+			p, err := paramsFromFlags(ctx, name)
+			if err != nil {
+				return err
+			}
+			return run(resolve, p)
+		}),
+	}
+)
+
+func paramsFromFlags(ctx *context.T, objectName string) (params, error) {
+	// Measure network distance to objectName
+	const iters = 5
+	addr, _ := naming.SplitAddressName(objectName)
+	if len(addr) == 0 {
+		addr, _ = naming.SplitAddressName(v23.GetNamespace(ctx).Roots()[0])
+	}
+	ep, err := v23.NewEndpoint(addr)
+	if err != nil {
+		return params{}, err
+	}
+	// Assume TCP
+	var total time.Duration
+	for i := 0; i < iters; i++ {
+		start := time.Now()
+		conn, err := net.Dial("tcp", ep.Addr().String())
+		if err != nil {
+			return params{}, err
+		}
+		total += time.Since(start)
+		conn.Close()
+	}
+	return params{
+		Rate:            *rate,
+		Duration:        *duration,
+		NetworkDistance: time.Duration(total.Nanoseconds() / iters),
+		Context:         ctx,
+		Reauthenticate:  *reauth,
+	}, nil
+}
+
+func main() {
+	root := &cmdline2.Command{
+		Name:     "mtstress",
+		Short:    "Tool to stress test a mounttable service by issuing a fixed rate of requests per second and measuring latency",
+		Children: []*cmdline2.Command{cmdMount, cmdResolve},
+	}
+	cmdline2.Main(root)
+}
diff --git a/runtime/internal/rpc/stress/mtstress/run.go b/runtime/internal/rpc/stress/mtstress/run.go
new file mode 100644
index 0000000..f7f1596
--- /dev/null
+++ b/runtime/internal/rpc/stress/mtstress/run.go
@@ -0,0 +1,158 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"fmt"
+	"os"
+	"os/signal"
+	"sync"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/lib/stats/histogram"
+)
+
+// params encapsulates "input" information to the loadtester.
+type params struct {
+	NetworkDistance time.Duration // Distance (in time) of the server from this driver
+	Rate            float64       // Desired rate of sending RPCs (RPC/sec)
+	Duration        time.Duration // Duration over which loadtest traffic should be sent
+	Context         *context.T
+	Reauthenticate  bool // If true, each RPC should establish a network connection (and authenticate)
+}
+
+// report is generated by a run of the loadtest.
+type report struct {
+	Count      int64         // Count of RPCs sent
+	Elapsed    time.Duration // Time period over which Count RPCs were sent
+	AvgLatency time.Duration
+	HistMS     *histogram.Histogram // Histogram of latencies in milliseconds
+}
+
+func (r *report) Print(params params) error {
+	if r.HistMS != nil {
+		fmt.Println("RPC latency histogram (in ms):")
+		fmt.Println(r.HistMS.Value())
+	}
+	actualRate := float64(r.Count*int64(time.Second)) / float64(r.Elapsed)
+	fmt.Printf("Network Distance: %v\n", params.NetworkDistance)
+	fmt.Printf("#RPCs sent:       %v\n", r.Count)
+	fmt.Printf("RPCs/sec sent:    %.2f (%.2f%% of the desired rate of %v)\n", actualRate, actualRate*100/params.Rate, params.Rate)
+	fmt.Printf("Avg. Latency:     %v\n", r.AvgLatency)
+	// Mark the results are tainted if the deviation from the desired rate is too large
+	if 0.9*params.Rate > actualRate {
+		return fmt.Errorf("TAINTED RESULTS: drove less traffic than desired: either server or loadtester had a bottleneck")
+	}
+	return nil
+}
+
+func run(f func(*context.T) (time.Duration, error), p params) error {
+	var (
+		ticker    = time.NewTicker(time.Duration(float64(time.Second) / p.Rate))
+		latency   = make(chan time.Duration, 1000)
+		started   = time.Now()
+		stop      = time.After(p.Duration)
+		interrupt = make(chan os.Signal)
+		ret       report
+	)
+	defer ticker.Stop()
+	warmup(p.Context, f)
+	signal.Notify(interrupt, os.Interrupt)
+	defer signal.Stop(interrupt)
+
+	stopped := false
+	var sumMS int64
+	var lastInterrupt time.Time
+	for !stopped {
+		select {
+		case <-ticker.C:
+			go call(p.Context, f, p.Reauthenticate, latency)
+		case d := <-latency:
+			if ret.HistMS != nil {
+				ret.HistMS.Add(int64(d / time.Millisecond))
+			}
+			ret.Count++
+			sumMS += int64(d / time.Millisecond)
+			// Use 10 samples to determine how the histogram should be setup
+			if ret.Count == 10 {
+				avgms := sumMS / ret.Count
+				opts := histogram.Options{
+					NumBuckets: 32,
+					// Mostly interested in tail latencies,
+					// so have the histogram start close to
+					// the current average.
+					MinValue:     int64(float64(avgms) * 0.95),
+					GrowthFactor: 0.20,
+				}
+				vlog.Infof("Creating histogram after %d samples (%vms avg latency): %+v", ret.Count, avgms, opts)
+				ret.HistMS = histogram.New(opts)
+			}
+		case sig := <-interrupt:
+			if time.Since(lastInterrupt) < time.Second {
+				vlog.Infof("Multiple %v signals received, aborting test", sig)
+				stopped = true
+				break
+			}
+			lastInterrupt = time.Now()
+			// Print a temporary report
+			fmt.Println("INTERMEDIATE REPORT:")
+			ret.Elapsed = time.Since(started)
+			ret.AvgLatency = time.Duration(float64(sumMS)/float64(ret.Count)) * time.Millisecond
+			if err := ret.Print(p); err != nil {
+				fmt.Println(err)
+			}
+			fmt.Println("--------------------------------------------------------------------------------")
+		case <-stop:
+			stopped = true
+		}
+	}
+	ret.Elapsed = time.Since(started)
+	ret.AvgLatency = time.Duration(float64(sumMS)/float64(ret.Count)) * time.Millisecond
+	return ret.Print(p)
+}
+
+func warmup(ctx *context.T, f func(*context.T) (time.Duration, error)) {
+	const nWarmup = 10
+	vlog.Infof("Sending %d requests as warmup", nWarmup)
+	var wg sync.WaitGroup
+	for i := 0; i < nWarmup; i++ {
+		wg.Add(1)
+		go func() {
+			f(ctx)
+			wg.Done()
+		}()
+	}
+	wg.Wait()
+	vlog.Infof("Done with warmup")
+}
+
+func call(ctx *context.T, f func(*context.T) (time.Duration, error), reauth bool, d chan<- time.Duration) {
+	client := v23.GetClient(ctx)
+	if reauth {
+		// HACK ALERT: At the time the line below was written, it was
+		// known that the implementation would cause 'ctx' to be setup
+		// such that any subsequent RPC will establish a network
+		// connection from scratch (a new VIF, new VC etc.) If that
+		// implementation changes, then this line below will have to
+		// change!
+		var err error
+		if ctx, err = v23.WithPrincipal(ctx, v23.GetPrincipal(ctx)); err != nil {
+			vlog.Infof("%v", err)
+			return
+		}
+		client = v23.GetClient(ctx)
+		defer client.Close()
+	}
+	sample, err := f(ctx)
+	if err != nil {
+		vlog.Infof("%v", err)
+		return
+	}
+	d <- sample
+}
diff --git a/runtime/internal/rpc/stress/stress.vdl b/runtime/internal/rpc/stress/stress.vdl
new file mode 100644
index 0000000..1ae6c51
--- /dev/null
+++ b/runtime/internal/rpc/stress/stress.vdl
@@ -0,0 +1,39 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stress
+
+import (
+	"v.io/v23/security/access"
+)
+
+type SumArg struct {
+	ABool        bool
+	AInt64       int64
+	AListOfBytes []byte
+}
+
+type SumStats struct {
+	SumCount       uint64
+	SumStreamCount uint64
+	BytesRecv      uint64
+	BytesSent      uint64
+}
+
+type Stress interface {
+	// Echo returns the payload that it receives.
+	Echo(Payload []byte) ([]byte | error) {access.Read}
+
+	// Do returns the checksum of the payload that it receives.
+	Sum(arg SumArg) ([]byte | error) {access.Read}
+
+	// DoStream returns the checksum of the payload that it receives via the stream.
+	SumStream() stream<SumArg,[]byte> error {access.Read}
+
+	// GetSumStats returns the stats on the Sum calls that the server received.
+	GetSumStats() (SumStats | error) {access.Read}
+
+	// Stop stops the server.
+	Stop() error {access.Admin}
+}
diff --git a/runtime/internal/rpc/stress/stress.vdl.go b/runtime/internal/rpc/stress/stress.vdl.go
new file mode 100644
index 0000000..15e2787
--- /dev/null
+++ b/runtime/internal/rpc/stress/stress.vdl.go
@@ -0,0 +1,434 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: stress.vdl
+
+package stress
+
+import (
+	// VDL system imports
+	"io"
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/rpc"
+	"v.io/v23/vdl"
+
+	// VDL user imports
+	"v.io/v23/security/access"
+)
+
+type SumArg struct {
+	ABool        bool
+	AInt64       int64
+	AListOfBytes []byte
+}
+
+func (SumArg) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/runtime/internal/rpc/stress.SumArg"`
+}) {
+}
+
+type SumStats struct {
+	SumCount       uint64
+	SumStreamCount uint64
+	BytesRecv      uint64
+	BytesSent      uint64
+}
+
+func (SumStats) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/runtime/internal/rpc/stress.SumStats"`
+}) {
+}
+
+func init() {
+	vdl.Register((*SumArg)(nil))
+	vdl.Register((*SumStats)(nil))
+}
+
+// StressClientMethods is the client interface
+// containing Stress methods.
+type StressClientMethods interface {
+	// Echo returns the payload that it receives.
+	Echo(ctx *context.T, Payload []byte, opts ...rpc.CallOpt) ([]byte, error)
+	// Do returns the checksum of the payload that it receives.
+	Sum(ctx *context.T, arg SumArg, opts ...rpc.CallOpt) ([]byte, error)
+	// DoStream returns the checksum of the payload that it receives via the stream.
+	SumStream(*context.T, ...rpc.CallOpt) (StressSumStreamClientCall, error)
+	// GetSumStats returns the stats on the Sum calls that the server received.
+	GetSumStats(*context.T, ...rpc.CallOpt) (SumStats, error)
+	// Stop stops the server.
+	Stop(*context.T, ...rpc.CallOpt) error
+}
+
+// StressClientStub adds universal methods to StressClientMethods.
+type StressClientStub interface {
+	StressClientMethods
+	rpc.UniversalServiceMethods
+}
+
+// StressClient returns a client stub for Stress.
+func StressClient(name string) StressClientStub {
+	return implStressClientStub{name}
+}
+
+type implStressClientStub struct {
+	name string
+}
+
+func (c implStressClientStub) Echo(ctx *context.T, i0 []byte, opts ...rpc.CallOpt) (o0 []byte, err error) {
+	err = v23.GetClient(ctx).Call(ctx, c.name, "Echo", []interface{}{i0}, []interface{}{&o0}, opts...)
+	return
+}
+
+func (c implStressClientStub) Sum(ctx *context.T, i0 SumArg, opts ...rpc.CallOpt) (o0 []byte, err error) {
+	err = v23.GetClient(ctx).Call(ctx, c.name, "Sum", []interface{}{i0}, []interface{}{&o0}, opts...)
+	return
+}
+
+func (c implStressClientStub) SumStream(ctx *context.T, opts ...rpc.CallOpt) (ocall StressSumStreamClientCall, err error) {
+	var call rpc.ClientCall
+	if call, err = v23.GetClient(ctx).StartCall(ctx, c.name, "SumStream", nil, opts...); err != nil {
+		return
+	}
+	ocall = &implStressSumStreamClientCall{ClientCall: call}
+	return
+}
+
+func (c implStressClientStub) GetSumStats(ctx *context.T, opts ...rpc.CallOpt) (o0 SumStats, err error) {
+	err = v23.GetClient(ctx).Call(ctx, c.name, "GetSumStats", nil, []interface{}{&o0}, opts...)
+	return
+}
+
+func (c implStressClientStub) Stop(ctx *context.T, opts ...rpc.CallOpt) (err error) {
+	err = v23.GetClient(ctx).Call(ctx, c.name, "Stop", nil, nil, opts...)
+	return
+}
+
+// StressSumStreamClientStream is the client stream for Stress.SumStream.
+type StressSumStreamClientStream interface {
+	// RecvStream returns the receiver side of the Stress.SumStream client stream.
+	RecvStream() interface {
+		// Advance stages an item so that it may be retrieved via Value.  Returns
+		// true iff there is an item to retrieve.  Advance must be called before
+		// Value is called.  May block if an item is not available.
+		Advance() bool
+		// Value returns the item that was staged by Advance.  May panic if Advance
+		// returned false or was not called.  Never blocks.
+		Value() []byte
+		// Err returns any error encountered by Advance.  Never blocks.
+		Err() error
+	}
+	// SendStream returns the send side of the Stress.SumStream client stream.
+	SendStream() interface {
+		// Send places the item onto the output stream.  Returns errors
+		// encountered while sending, or if Send is called after Close or
+		// the stream has been canceled.  Blocks if there is no buffer
+		// space; will unblock when buffer space is available or after
+		// the stream has been canceled.
+		Send(item SumArg) error
+		// Close indicates to the server that no more items will be sent;
+		// server Recv calls will receive io.EOF after all sent items.
+		// This is an optional call - e.g. a client might call Close if it
+		// needs to continue receiving items from the server after it's
+		// done sending.  Returns errors encountered while closing, or if
+		// Close is called after the stream has been canceled.  Like Send,
+		// blocks if there is no buffer space available.
+		Close() error
+	}
+}
+
+// StressSumStreamClientCall represents the call returned from Stress.SumStream.
+type StressSumStreamClientCall interface {
+	StressSumStreamClientStream
+	// Finish performs the equivalent of SendStream().Close, then blocks until
+	// the server is done, and returns the positional return values for the call.
+	//
+	// Finish returns immediately if the call has been canceled; depending on the
+	// timing the output could either be an error signaling cancelation, or the
+	// valid positional return values from the server.
+	//
+	// Calling Finish is mandatory for releasing stream resources, unless the call
+	// has been canceled or any of the other methods return an error.  Finish should
+	// be called at most once.
+	Finish() error
+}
+
+type implStressSumStreamClientCall struct {
+	rpc.ClientCall
+	valRecv []byte
+	errRecv error
+}
+
+func (c *implStressSumStreamClientCall) RecvStream() interface {
+	Advance() bool
+	Value() []byte
+	Err() error
+} {
+	return implStressSumStreamClientCallRecv{c}
+}
+
+type implStressSumStreamClientCallRecv struct {
+	c *implStressSumStreamClientCall
+}
+
+func (c implStressSumStreamClientCallRecv) Advance() bool {
+	c.c.errRecv = c.c.Recv(&c.c.valRecv)
+	return c.c.errRecv == nil
+}
+func (c implStressSumStreamClientCallRecv) Value() []byte {
+	return c.c.valRecv
+}
+func (c implStressSumStreamClientCallRecv) Err() error {
+	if c.c.errRecv == io.EOF {
+		return nil
+	}
+	return c.c.errRecv
+}
+func (c *implStressSumStreamClientCall) SendStream() interface {
+	Send(item SumArg) error
+	Close() error
+} {
+	return implStressSumStreamClientCallSend{c}
+}
+
+type implStressSumStreamClientCallSend struct {
+	c *implStressSumStreamClientCall
+}
+
+func (c implStressSumStreamClientCallSend) Send(item SumArg) error {
+	return c.c.Send(item)
+}
+func (c implStressSumStreamClientCallSend) Close() error {
+	return c.c.CloseSend()
+}
+func (c *implStressSumStreamClientCall) Finish() (err error) {
+	err = c.ClientCall.Finish()
+	return
+}
+
+// StressServerMethods is the interface a server writer
+// implements for Stress.
+type StressServerMethods interface {
+	// Echo returns the payload that it receives.
+	Echo(ctx *context.T, call rpc.ServerCall, Payload []byte) ([]byte, error)
+	// Do returns the checksum of the payload that it receives.
+	Sum(ctx *context.T, call rpc.ServerCall, arg SumArg) ([]byte, error)
+	// DoStream returns the checksum of the payload that it receives via the stream.
+	SumStream(*context.T, StressSumStreamServerCall) error
+	// GetSumStats returns the stats on the Sum calls that the server received.
+	GetSumStats(*context.T, rpc.ServerCall) (SumStats, error)
+	// Stop stops the server.
+	Stop(*context.T, rpc.ServerCall) error
+}
+
+// StressServerStubMethods is the server interface containing
+// Stress methods, as expected by rpc.Server.
+// The only difference between this interface and StressServerMethods
+// is the streaming methods.
+type StressServerStubMethods interface {
+	// Echo returns the payload that it receives.
+	Echo(ctx *context.T, call rpc.ServerCall, Payload []byte) ([]byte, error)
+	// Do returns the checksum of the payload that it receives.
+	Sum(ctx *context.T, call rpc.ServerCall, arg SumArg) ([]byte, error)
+	// DoStream returns the checksum of the payload that it receives via the stream.
+	SumStream(*context.T, *StressSumStreamServerCallStub) error
+	// GetSumStats returns the stats on the Sum calls that the server received.
+	GetSumStats(*context.T, rpc.ServerCall) (SumStats, error)
+	// Stop stops the server.
+	Stop(*context.T, rpc.ServerCall) error
+}
+
+// StressServerStub adds universal methods to StressServerStubMethods.
+type StressServerStub interface {
+	StressServerStubMethods
+	// Describe the Stress interfaces.
+	Describe__() []rpc.InterfaceDesc
+}
+
+// StressServer returns a server stub for Stress.
+// It converts an implementation of StressServerMethods into
+// an object that may be used by rpc.Server.
+func StressServer(impl StressServerMethods) StressServerStub {
+	stub := implStressServerStub{
+		impl: impl,
+	}
+	// Initialize GlobState; always check the stub itself first, to handle the
+	// case where the user has the Glob method defined in their VDL source.
+	if gs := rpc.NewGlobState(stub); gs != nil {
+		stub.gs = gs
+	} else if gs := rpc.NewGlobState(impl); gs != nil {
+		stub.gs = gs
+	}
+	return stub
+}
+
+type implStressServerStub struct {
+	impl StressServerMethods
+	gs   *rpc.GlobState
+}
+
+func (s implStressServerStub) Echo(ctx *context.T, call rpc.ServerCall, i0 []byte) ([]byte, error) {
+	return s.impl.Echo(ctx, call, i0)
+}
+
+func (s implStressServerStub) Sum(ctx *context.T, call rpc.ServerCall, i0 SumArg) ([]byte, error) {
+	return s.impl.Sum(ctx, call, i0)
+}
+
+func (s implStressServerStub) SumStream(ctx *context.T, call *StressSumStreamServerCallStub) error {
+	return s.impl.SumStream(ctx, call)
+}
+
+func (s implStressServerStub) GetSumStats(ctx *context.T, call rpc.ServerCall) (SumStats, error) {
+	return s.impl.GetSumStats(ctx, call)
+}
+
+func (s implStressServerStub) Stop(ctx *context.T, call rpc.ServerCall) error {
+	return s.impl.Stop(ctx, call)
+}
+
+func (s implStressServerStub) Globber() *rpc.GlobState {
+	return s.gs
+}
+
+func (s implStressServerStub) Describe__() []rpc.InterfaceDesc {
+	return []rpc.InterfaceDesc{StressDesc}
+}
+
+// StressDesc describes the Stress interface.
+var StressDesc rpc.InterfaceDesc = descStress
+
+// descStress hides the desc to keep godoc clean.
+var descStress = rpc.InterfaceDesc{
+	Name:    "Stress",
+	PkgPath: "v.io/x/ref/runtime/internal/rpc/stress",
+	Methods: []rpc.MethodDesc{
+		{
+			Name: "Echo",
+			Doc:  "// Echo returns the payload that it receives.",
+			InArgs: []rpc.ArgDesc{
+				{"Payload", ``}, // []byte
+			},
+			OutArgs: []rpc.ArgDesc{
+				{"", ``}, // []byte
+			},
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+		},
+		{
+			Name: "Sum",
+			Doc:  "// Do returns the checksum of the payload that it receives.",
+			InArgs: []rpc.ArgDesc{
+				{"arg", ``}, // SumArg
+			},
+			OutArgs: []rpc.ArgDesc{
+				{"", ``}, // []byte
+			},
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+		},
+		{
+			Name: "SumStream",
+			Doc:  "// DoStream returns the checksum of the payload that it receives via the stream.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+		},
+		{
+			Name: "GetSumStats",
+			Doc:  "// GetSumStats returns the stats on the Sum calls that the server received.",
+			OutArgs: []rpc.ArgDesc{
+				{"", ``}, // SumStats
+			},
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Read"))},
+		},
+		{
+			Name: "Stop",
+			Doc:  "// Stop stops the server.",
+			Tags: []*vdl.Value{vdl.ValueOf(access.Tag("Admin"))},
+		},
+	},
+}
+
+// StressSumStreamServerStream is the server stream for Stress.SumStream.
+type StressSumStreamServerStream interface {
+	// RecvStream returns the receiver side of the Stress.SumStream server stream.
+	RecvStream() interface {
+		// Advance stages an item so that it may be retrieved via Value.  Returns
+		// true iff there is an item to retrieve.  Advance must be called before
+		// Value is called.  May block if an item is not available.
+		Advance() bool
+		// Value returns the item that was staged by Advance.  May panic if Advance
+		// returned false or was not called.  Never blocks.
+		Value() SumArg
+		// Err returns any error encountered by Advance.  Never blocks.
+		Err() error
+	}
+	// SendStream returns the send side of the Stress.SumStream server stream.
+	SendStream() interface {
+		// Send places the item onto the output stream.  Returns errors encountered
+		// while sending.  Blocks if there is no buffer space; will unblock when
+		// buffer space is available.
+		Send(item []byte) error
+	}
+}
+
+// StressSumStreamServerCall represents the context passed to Stress.SumStream.
+type StressSumStreamServerCall interface {
+	rpc.ServerCall
+	StressSumStreamServerStream
+}
+
+// StressSumStreamServerCallStub is a wrapper that converts rpc.StreamServerCall into
+// a typesafe stub that implements StressSumStreamServerCall.
+type StressSumStreamServerCallStub struct {
+	rpc.StreamServerCall
+	valRecv SumArg
+	errRecv error
+}
+
+// Init initializes StressSumStreamServerCallStub from rpc.StreamServerCall.
+func (s *StressSumStreamServerCallStub) Init(call rpc.StreamServerCall) {
+	s.StreamServerCall = call
+}
+
+// RecvStream returns the receiver side of the Stress.SumStream server stream.
+func (s *StressSumStreamServerCallStub) RecvStream() interface {
+	Advance() bool
+	Value() SumArg
+	Err() error
+} {
+	return implStressSumStreamServerCallRecv{s}
+}
+
+type implStressSumStreamServerCallRecv struct {
+	s *StressSumStreamServerCallStub
+}
+
+func (s implStressSumStreamServerCallRecv) Advance() bool {
+	s.s.valRecv = SumArg{}
+	s.s.errRecv = s.s.Recv(&s.s.valRecv)
+	return s.s.errRecv == nil
+}
+func (s implStressSumStreamServerCallRecv) Value() SumArg {
+	return s.s.valRecv
+}
+func (s implStressSumStreamServerCallRecv) Err() error {
+	if s.s.errRecv == io.EOF {
+		return nil
+	}
+	return s.s.errRecv
+}
+
+// SendStream returns the send side of the Stress.SumStream server stream.
+func (s *StressSumStreamServerCallStub) SendStream() interface {
+	Send(item []byte) error
+} {
+	return implStressSumStreamServerCallSend{s}
+}
+
+type implStressSumStreamServerCallSend struct {
+	s *StressSumStreamServerCallStub
+}
+
+func (s implStressSumStreamServerCallSend) Send(item []byte) error {
+	return s.s.Send(item)
+}
diff --git a/runtime/internal/rpc/stress/stress/load.go b/runtime/internal/rpc/stress/stress/load.go
new file mode 100644
index 0000000..e689f62
--- /dev/null
+++ b/runtime/internal/rpc/stress/stress/load.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"runtime"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/x/lib/cmdline2"
+	"v.io/x/ref/lib/v23cmd"
+	"v.io/x/ref/runtime/internal/rpc/stress/internal"
+)
+
+var (
+	cpus        int
+	payloadSize int
+)
+
+func init() {
+	cmdLoadTest.Flags.DurationVar(&duration, "duration", 1*time.Minute, "duration of the test to run")
+	cmdLoadTest.Flags.IntVar(&cpus, "cpu", 0, "number of cpu cores to use; if zero, use the number of servers to test")
+	cmdLoadTest.Flags.IntVar(&payloadSize, "payload-size", 1000, "size of payload in bytes")
+	cmdLoadTest.Flags.StringVar(&outFormat, "format", "text", "Stats output format; either text or json")
+}
+
+type loadStats struct {
+	Iterations uint64
+	MsecPerRpc float64
+	Qps        float64
+	QpsPerCore float64
+}
+
+var cmdLoadTest = &cmdline2.Command{
+	Runner:   v23cmd.RunnerFunc(runLoadTest),
+	Name:     "load",
+	Short:    "Run load test",
+	Long:     "Run load test",
+	ArgsName: "<server> ...",
+	ArgsLong: "<server> ... A list of servers to connect to.",
+}
+
+func runLoadTest(ctx *context.T, env *cmdline2.Env, args []string) error {
+	if len(args) == 0 {
+		return env.UsageErrorf("no server specified")
+	}
+	if outFormat != "text" && outFormat != "json" {
+		return env.UsageErrorf("invalid output format: %s\n", outFormat)
+	}
+
+	cores := cpus
+	if cores == 0 {
+		cores = len(args)
+	}
+	runtime.GOMAXPROCS(cores)
+
+	fmt.Fprintf(env.Stdout, "starting load test against %d server(s) using %d core(s)...\n", len(args), cores)
+	fmt.Fprintf(env.Stdout, "payloadSize: %d, duration: %v\n", payloadSize, duration)
+
+	start := time.Now()
+	done := make(chan loadStats)
+	for _, server := range args {
+		go func(server string) {
+			var stats loadStats
+
+			start := time.Now()
+			stats.Iterations = internal.CallEcho(ctx, server, payloadSize, duration)
+			elapsed := time.Since(start)
+
+			stats.Qps = float64(stats.Iterations) / elapsed.Seconds()
+			stats.MsecPerRpc = 1000 / stats.Qps
+			done <- stats
+		}(server)
+	}
+	var merged loadStats
+	for i := 0; i < len(args); i++ {
+		stats := <-done
+		merged.Iterations += stats.Iterations
+		merged.MsecPerRpc += stats.MsecPerRpc
+		merged.Qps += stats.Qps
+	}
+	merged.MsecPerRpc /= float64(len(args))
+	merged.QpsPerCore = merged.Qps / float64(cores)
+	elapsed := time.Since(start)
+	fmt.Printf("done after %v\n", elapsed)
+	return outLoadStats(env.Stdout, outFormat, "load stats:", &merged)
+}
+
+func outLoadStats(w io.Writer, format, title string, stats *loadStats) error {
+	switch format {
+	case "text":
+		fmt.Fprintf(w, "%s\n", title)
+		fmt.Fprintf(w, "\tnumber of RPCs:\t\t%d\n", stats.Iterations)
+		fmt.Fprintf(w, "\tlatency (msec/rpc):\t%.2f\n", stats.MsecPerRpc)
+		fmt.Fprintf(w, "\tqps:\t\t\t%.2f\n", stats.Qps)
+		fmt.Fprintf(w, "\tqps/core:\t\t%.2f\n", stats.QpsPerCore)
+	case "json":
+		b, err := json.Marshal(stats)
+		if err != nil {
+			return err
+		}
+		fmt.Fprintf(w, "%s%s\n", title, b)
+	default:
+		return fmt.Errorf("invalid output format: %s\n", format)
+	}
+	return nil
+}
diff --git a/runtime/internal/rpc/stress/stress/main.go b/runtime/internal/rpc/stress/stress/main.go
new file mode 100644
index 0000000..5a2e0e7
--- /dev/null
+++ b/runtime/internal/rpc/stress/stress/main.go
@@ -0,0 +1,49 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"v.io/v23/context"
+	"v.io/x/lib/cmdline2"
+	"v.io/x/ref/lib/v23cmd"
+	_ "v.io/x/ref/runtime/factories/static"
+	"v.io/x/ref/runtime/internal/rpc/stress"
+)
+
+var cmdStopServers = &cmdline2.Command{
+	Runner:   v23cmd.RunnerFunc(runStopServers),
+	Name:     "stop",
+	Short:    "Stop servers",
+	Long:     "Stop servers",
+	ArgsName: "<server> ...",
+	ArgsLong: "<server> ... A list of servers to stop.",
+}
+
+func runStopServers(ctx *context.T, env *cmdline2.Env, args []string) error {
+	if len(args) == 0 {
+		return env.UsageErrorf("no server specified")
+	}
+	for _, server := range args {
+		if err := stress.StressClient(server).Stop(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func main() {
+	cmdRoot := &cmdline2.Command{
+		Name:  "stress",
+		Short: "Tool to stress/load test RPC",
+		Long:  "Tool to stress/load test RPC by issuing randomly generated requests",
+		Children: []*cmdline2.Command{
+			cmdStressTest,
+			cmdStressStats,
+			cmdLoadTest,
+			cmdStopServers,
+		},
+	}
+	cmdline2.Main(cmdRoot)
+}
diff --git a/runtime/internal/rpc/stress/stress/stress.go b/runtime/internal/rpc/stress/stress/stress.go
new file mode 100644
index 0000000..b63dc7a
--- /dev/null
+++ b/runtime/internal/rpc/stress/stress/stress.go
@@ -0,0 +1,149 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"math/rand"
+	"runtime"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/x/lib/cmdline2"
+	"v.io/x/ref/lib/v23cmd"
+	"v.io/x/ref/runtime/internal/rpc/stress"
+	"v.io/x/ref/runtime/internal/rpc/stress/internal"
+)
+
+var (
+	duration time.Duration
+
+	workers        int
+	maxChunkCnt    int
+	maxPayloadSize int
+
+	outFormat string
+)
+
+func init() {
+	cmdStressTest.Flags.DurationVar(&duration, "duration", 1*time.Minute, "duration of the test to run")
+	cmdStressTest.Flags.IntVar(&workers, "workers", 1, "number of test workers to run")
+	cmdStressTest.Flags.IntVar(&maxChunkCnt, "max-chunk-count", 1000, "maximum number of chunks to send per streaming RPC")
+	cmdStressTest.Flags.IntVar(&maxPayloadSize, "max-payload-size", 10000, "maximum size of payload in bytes")
+	cmdStressTest.Flags.StringVar(&outFormat, "format", "text", "Stats output format; either text or json")
+
+	cmdStressStats.Flags.StringVar(&outFormat, "format", "text", "Stats output format; either text or json")
+}
+
+var cmdStressTest = &cmdline2.Command{
+	Runner:   v23cmd.RunnerFunc(runStressTest),
+	Name:     "stress",
+	Short:    "Run stress test",
+	Long:     "Run stress test",
+	ArgsName: "<server> ...",
+	ArgsLong: "<server> ... A list of servers to connect to.",
+}
+
+func runStressTest(ctx *context.T, env *cmdline2.Env, args []string) error {
+	if len(args) == 0 {
+		return env.UsageErrorf("no server specified")
+	}
+	if outFormat != "text" && outFormat != "json" {
+		return env.UsageErrorf("invalid output format: %s\n", outFormat)
+	}
+
+	cores := runtime.NumCPU()
+	runtime.GOMAXPROCS(cores)
+	rand.Seed(time.Now().UnixNano())
+	fmt.Fprintf(env.Stdout, "starting stress test against %d server(s) using %d core(s)...\n", len(args), cores)
+	fmt.Fprintf(env.Stdout, "workers: %d, maxChunkCnt: %d, maxPayloadSize: %d, duration: %v\n", workers, maxChunkCnt, maxPayloadSize, duration)
+
+	start := time.Now()
+	done := make(chan stress.SumStats)
+	for i := 0; i < workers; i++ {
+		go func() {
+			var stats stress.SumStats
+			timeout := time.After(duration)
+		done:
+			for {
+				server := args[rand.Intn(len(args))]
+				if rand.Intn(2) == 0 {
+					internal.CallSum(ctx, server, maxPayloadSize, &stats)
+				} else {
+					internal.CallSumStream(ctx, server, maxChunkCnt, maxPayloadSize, &stats)
+				}
+
+				select {
+				case <-timeout:
+					break done
+				default:
+				}
+			}
+			done <- stats
+		}()
+	}
+	var merged stress.SumStats
+	for i := 0; i < workers; i++ {
+		stats := <-done
+		merged.SumCount += stats.SumCount
+		merged.SumStreamCount += stats.SumStreamCount
+		merged.BytesRecv += stats.BytesRecv
+		merged.BytesSent += stats.BytesSent
+	}
+	elapsed := time.Since(start)
+	fmt.Printf("done after %v\n", elapsed)
+	return outSumStats(env.Stdout, outFormat, "client stats:", &merged)
+}
+
+var cmdStressStats = &cmdline2.Command{
+	Runner:   v23cmd.RunnerFunc(runStressStats),
+	Name:     "stats",
+	Short:    "Print out stress stats of servers",
+	Long:     "Print out stress stats of servers",
+	ArgsName: "<server> ...",
+	ArgsLong: "<server> ... A list of servers to connect to.",
+}
+
+func runStressStats(ctx *context.T, env *cmdline2.Env, args []string) error {
+	if len(args) == 0 {
+		return env.UsageErrorf("no server specified")
+	}
+	if outFormat != "text" && outFormat != "json" {
+		return env.UsageErrorf("invalid output format: %s\n", outFormat)
+	}
+	for _, server := range args {
+		stats, err := stress.StressClient(server).GetSumStats(ctx)
+		if err != nil {
+			return err
+		}
+		title := fmt.Sprintf("server stats(%s):", server)
+		if err := outSumStats(env.Stdout, outFormat, title, &stats); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func outSumStats(w io.Writer, format, title string, stats *stress.SumStats) error {
+	switch format {
+	case "text":
+		fmt.Fprintf(w, "%s\n", title)
+		fmt.Fprintf(w, "\tnumber of non-streaming RPCs:\t%d\n", stats.SumCount)
+		fmt.Fprintf(w, "\tnumber of streaming RPCs:\t%d\n", stats.SumStreamCount)
+		fmt.Fprintf(w, "\tnumber of bytes received:\t%d\n", stats.BytesRecv)
+		fmt.Fprintf(w, "\tnumber of bytes sent:\t\t%d\n", stats.BytesSent)
+	case "json":
+		b, err := json.Marshal(stats)
+		if err != nil {
+			return err
+		}
+		fmt.Fprintf(w, "%s%s\n", title, b)
+	default:
+		return fmt.Errorf("invalid output format: %s\n", format)
+	}
+	return nil
+}
diff --git a/runtime/internal/rpc/stress/stressd/main.go b/runtime/internal/rpc/stress/stressd/main.go
new file mode 100644
index 0000000..3286e7f
--- /dev/null
+++ b/runtime/internal/rpc/stress/stressd/main.go
@@ -0,0 +1,48 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A simple command-line tool to run the benchmark server.
+package main
+
+import (
+	"flag"
+	"runtime"
+	"time"
+
+	"v.io/v23"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/lib/signals"
+	_ "v.io/x/ref/runtime/factories/static"
+	"v.io/x/ref/runtime/internal/rpc/stress/internal"
+)
+
+var (
+	duration = flag.Duration("duration", 0, "duration of the stress test to run; if zero, there is no limit.")
+)
+
+func main() {
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	server, ep, stop := internal.StartServer(ctx, v23.GetListenSpec(ctx))
+	vlog.Infof("listening on %s", ep.Name())
+
+	var timeout <-chan time.Time
+	if *duration > 0 {
+		timeout = time.After(*duration)
+	}
+	select {
+	case <-timeout:
+	case <-stop:
+	case <-signals.ShutdownOnSignals(ctx):
+	}
+
+	if err := server.Stop(); err != nil {
+		vlog.Fatalf("Stop() failed: %v", err)
+	}
+	vlog.Info("stopped.")
+}
diff --git a/runtime/internal/rpc/test/client_test.go b/runtime/internal/rpc/test/client_test.go
new file mode 100644
index 0000000..0343ad9
--- /dev/null
+++ b/runtime/internal/rpc/test/client_test.go
@@ -0,0 +1,993 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/vdlroot/signature"
+	"v.io/v23/verror"
+	"v.io/x/ref/envvar"
+	_ "v.io/x/ref/runtime/factories/generic"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	irpc "v.io/x/ref/runtime/internal/rpc"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/testing/mocks/mocknet"
+	"v.io/x/ref/services/mounttable/mounttablelib"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/expect"
+	"v.io/x/ref/test/modules"
+	"v.io/x/ref/test/testutil"
+)
+
+//go:generate v23 test generate .
+
+func rootMT(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	seclevel := options.SecurityConfidential
+	if len(args) == 1 && args[0] == "nosec" {
+		seclevel = options.SecurityNone
+	}
+	return runRootMT(stdin, stdout, stderr, seclevel, env, args...)
+}
+
+func runRootMT(stdin io.Reader, stdout, stderr io.Writer, seclevel options.SecurityLevel, env map[string]string, args ...string) error {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	lspec := v23.GetListenSpec(ctx)
+	server, err := v23.NewServer(ctx, options.ServesMountTable(true), seclevel)
+	if err != nil {
+		return fmt.Errorf("root failed: %v", err)
+	}
+	mt, err := mounttablelib.NewMountTableDispatcher("", "", "mounttable")
+	if err != nil {
+		return fmt.Errorf("mounttablelib.NewMountTableDispatcher failed: %s", err)
+	}
+	eps, err := server.Listen(lspec)
+	if err != nil {
+		return fmt.Errorf("server.Listen failed: %s", err)
+	}
+	if err := server.ServeDispatcher("", mt); err != nil {
+		return fmt.Errorf("root failed: %s", err)
+	}
+	fmt.Fprintf(stdout, "PID=%d\n", os.Getpid())
+	for _, ep := range eps {
+		fmt.Fprintf(stdout, "MT_NAME=%s\n", ep.Name())
+	}
+	modules.WaitForEOF(stdin)
+	return nil
+}
+
+type treeDispatcher struct{ id string }
+
+func (d treeDispatcher) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	return &echoServerObject{d.id, suffix}, nil, nil
+}
+
+type echoServerObject struct {
+	id, suffix string
+}
+
+func (es *echoServerObject) Echo(_ *context.T, _ rpc.ServerCall, m string) (string, error) {
+	if len(es.suffix) > 0 {
+		return fmt.Sprintf("%s.%s: %s\n", es.id, es.suffix, m), nil
+	}
+	return fmt.Sprintf("%s: %s\n", es.id, m), nil
+}
+
+func (es *echoServerObject) Sleep(_ *context.T, _ rpc.ServerCall, d string) error {
+	duration, err := time.ParseDuration(d)
+	if err != nil {
+		return err
+	}
+	time.Sleep(duration)
+	return nil
+}
+
+func echoServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	id, mp := args[0], args[1]
+	disp := &treeDispatcher{id: id}
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		return err
+	}
+	defer server.Stop()
+	eps, err := server.Listen(v23.GetListenSpec(ctx))
+	if err != nil {
+		return err
+	}
+	if err := server.ServeDispatcher(mp, disp); err != nil {
+		return err
+	}
+	fmt.Fprintf(stdout, "PID=%d\n", os.Getpid())
+	for _, ep := range eps {
+		fmt.Fprintf(stdout, "NAME=%s\n", ep.Name())
+	}
+	modules.WaitForEOF(stdin)
+	return nil
+}
+
+func echoClient(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	name := args[0]
+	args = args[1:]
+	client := v23.GetClient(ctx)
+	for _, a := range args {
+		var r string
+		if err := client.Call(ctx, name, "Echo", []interface{}{a}, []interface{}{&r}); err != nil {
+			return err
+		}
+		fmt.Fprintf(stdout, r)
+	}
+	return nil
+}
+
+func newCtx() (*context.T, v23.Shutdown) {
+	ctx, shutdown := test.InitForTest()
+	v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
+	return ctx, shutdown
+}
+
+func runMountTable(t *testing.T, ctx *context.T, args ...string) (*modules.Shell, func()) {
+	sh, err := modules.NewShell(ctx, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	root, err := sh.Start("rootMT", nil, args...)
+	if err != nil {
+		t.Fatalf("unexpected error for root mt: %s", err)
+	}
+	deferFn := func() {
+		sh.Cleanup(os.Stderr, os.Stderr)
+	}
+
+	root.ExpectVar("PID")
+	rootName := root.ExpectVar("MT_NAME")
+
+	sh.SetVar(envvar.NamespacePrefix, rootName)
+	if err = v23.GetNamespace(ctx).SetRoots(rootName); err != nil {
+		t.Fatalf("unexpected error setting namespace roots: %s", err)
+	}
+
+	return sh, deferFn
+}
+
+func runClient(t *testing.T, sh *modules.Shell) error {
+	clt, err := sh.Start("echoClient", nil, "echoServer", "a message")
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	s := expect.NewSession(t, clt.Stdout(), 30*time.Second)
+	s.Expect("echoServer: a message")
+	if s.Failed() {
+		return s.Error()
+	}
+	return nil
+}
+
+func numServers(t *testing.T, ctx *context.T, name string) int {
+	me, err := v23.GetNamespace(ctx).Resolve(ctx, name)
+	if err != nil {
+		return 0
+	}
+	return len(me.Servers)
+}
+
+// TODO(cnicolaou): figure out how to test and see what the internals
+// of tryCall are doing - e.g. using stats counters.
+func TestMultipleEndpoints(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+
+	sh, fn := runMountTable(t, ctx)
+	defer fn()
+	srv, err := sh.Start("echoServer", nil, "echoServer", "echoServer")
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	s := expect.NewSession(t, srv.Stdout(), time.Minute)
+	s.ExpectVar("PID")
+	s.ExpectVar("NAME")
+
+	// Verify that there are 1 entries for echoServer in the mount table.
+	if got, want := numServers(t, ctx, "echoServer"), 1; got != want {
+		t.Fatalf("got: %d, want: %d", got, want)
+	}
+
+	runClient(t, sh)
+
+	// Create a fake set of 100 entries in the mount table
+	for i := 0; i < 100; i++ {
+		// 203.0.113.0 is TEST-NET-3 from RFC5737
+		ep := naming.FormatEndpoint("tcp", fmt.Sprintf("203.0.113.%d:443", i))
+		n := naming.JoinAddressName(ep, "")
+		if err := v23.GetNamespace(ctx).Mount(ctx, "echoServer", n, time.Hour); err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+	}
+
+	// Verify that there are 101 entries for echoServer in the mount table.
+	if got, want := numServers(t, ctx, "echoServer"), 101; got != want {
+		t.Fatalf("got: %q, want: %q", got, want)
+	}
+
+	// TODO(cnicolaou): ok, so it works, but I'm not sure how
+	// long it should take or if the parallel connection code
+	// really works. Use counters to inspect it for example.
+	if err := runClient(t, sh); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+
+	srv.CloseStdin()
+	srv.Shutdown(nil, nil)
+
+	// Verify that there are 100 entries for echoServer in the mount table.
+	if got, want := numServers(t, ctx, "echoServer"), 100; got != want {
+		t.Fatalf("got: %d, want: %d", got, want)
+	}
+}
+
+func TestTimeout(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	client := v23.GetClient(ctx)
+	ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
+	name := naming.JoinAddressName(naming.FormatEndpoint("tcp", "203.0.113.10:443"), "")
+	_, err := client.StartCall(ctx, name, "echo", []interface{}{"args don't matter"})
+	t.Log(err)
+	if verror.ErrorID(err) != verror.ErrTimeout.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+}
+
+func logErrors(t *testing.T, msg string, logerr, logstack, debugString bool, err error) {
+	_, file, line, _ := runtime.Caller(2)
+	loc := fmt.Sprintf("%s:%d", filepath.Base(file), line)
+	if logerr {
+		t.Logf("%s: %s: %v", loc, msg, err)
+	}
+	if logstack {
+		t.Logf("%s: %s: %v", loc, msg, verror.Stack(err).String())
+	}
+	if debugString {
+		t.Logf("%s: %s: %v", loc, msg, verror.DebugString(err))
+	}
+}
+
+func TestStartCallErrors(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	client := v23.GetClient(ctx)
+
+	ns := v23.GetNamespace(ctx)
+	v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
+
+	logErr := func(msg string, err error) {
+		logErrors(t, msg, true, false, false, err)
+	}
+
+	emptyCtx := &context.T{}
+	_, err := client.StartCall(emptyCtx, "noname", "nomethod", nil)
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	logErr("no context", err)
+
+	p1 := options.ServerPublicKey{testutil.NewPrincipal().PublicKey()}
+	p2 := options.ServerPublicKey{testutil.NewPrincipal().PublicKey()}
+	_, err = client.StartCall(ctx, "noname", "nomethod", nil, p1, p2)
+	if verror.ErrorID(err) != verror.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	logErr("too many public keys", err)
+
+	// This will fail with NoServers, but because there is no mount table
+	// to communicate with. The error message should include a
+	// 'connection refused' string.
+	ns.SetRoots("/127.0.0.1:8101")
+	_, err = client.StartCall(ctx, "noname", "nomethod", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if want := "connection refused"; !strings.Contains(verror.DebugString(err), want) {
+		t.Fatalf("wrong error: %s - doesn't contain %q", err, want)
+	}
+	logErr("no mount table", err)
+
+	// This will fail with NoServers, but because there really is no
+	// name registered with the mount table.
+	_, shutdown = runMountTable(t, ctx)
+	defer shutdown()
+	_, err = client.StartCall(ctx, "noname", "nomethod", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if unwanted := "connection refused"; strings.Contains(err.Error(), unwanted) {
+		t.Fatalf("wrong error: %s - does contain %q", err, unwanted)
+	}
+	logErr("no name registered", err)
+
+	// The following tests will fail with NoServers, but because there are
+	// no protocols that the client and servers (mount table, and "name") share.
+	nctx, nclient, err := v23.WithNewClient(ctx, irpc.PreferredProtocols([]string{"wsh"}))
+
+	addr := naming.FormatEndpoint("nope", "127.0.0.1:1081")
+	if err := ns.Mount(ctx, "name", addr, time.Minute); err != nil {
+		t.Fatal(err)
+	}
+
+	// This will fail in its attempt to call ResolveStep to the mount table
+	// because we are using both the new context and the new client.
+	_, err = nclient.StartCall(nctx, "name", "nomethod", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if want := "ResolveStep"; !strings.Contains(err.Error(), want) {
+		t.Fatalf("wrong error: %s - doesn't contain %q", err, want)
+	}
+	logErr("mismatched protocols", err)
+
+	// This will fail in its attempt to invoke the actual RPC because
+	// we are using the old context (which supplies the context for the calls
+	// to ResolveStep) and the new client.
+	_, err = nclient.StartCall(ctx, "name", "nomethod", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if want := "nope"; !strings.Contains(err.Error(), want) {
+		t.Fatalf("wrong error: %s - doesn't contain %q", err, want)
+	}
+	if unwanted := "ResolveStep"; strings.Contains(err.Error(), unwanted) {
+		t.Fatalf("wrong error: %s - does contain %q", err, unwanted)
+
+	}
+	logErr("mismatched protocols", err)
+
+	// The following two tests will fail due to a timeout.
+	ns.SetRoots("/203.0.113.10:8101")
+	nctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
+	// This call will timeout talking to the mount table, returning
+	// NoServers, but with the string 'Timeout' in the message.
+	call, err := client.StartCall(nctx, "name", "noname", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if want := "Timeout"; !strings.Contains(err.Error(), want) {
+		t.Fatalf("wrong error: %s - doesn't contain %q", err, want)
+	}
+	if call != nil {
+		t.Fatalf("expected call to be nil")
+	}
+	logErr("timeout to mount table", err)
+
+	// This, second test, will fail due a timeout contacting the server itself.
+	addr = naming.FormatEndpoint("tcp", "203.0.113.10:8101")
+
+	nctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
+	new_name := naming.JoinAddressName(addr, "")
+	call, err = client.StartCall(nctx, new_name, "noname", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrTimeout.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if call != nil {
+		t.Fatalf("expected call to be nil")
+	}
+	logErr("timeout to server", err)
+}
+
+func dropDataDialer(network, address string, timeout time.Duration) (net.Conn, error) {
+	matcher := func(read bool, msg message.T) bool {
+		// Drop and close the connection when reading the first data message.
+		if _, ok := msg.(*message.Data); ok && read {
+			return true
+		}
+		return false
+	}
+	opts := mocknet.Opts{
+		Mode:              mocknet.V23CloseAtMessage,
+		V23MessageMatcher: matcher,
+	}
+	return mocknet.DialerWithOpts(opts, network, address, timeout)
+}
+
+func simpleResolver(network, address string) (string, string, error) {
+	return network, address, nil
+}
+
+func TestStartCallBadProtocol(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	client := v23.GetClient(ctx)
+
+	ns := v23.GetNamespace(ctx)
+	ns.CacheCtl(naming.DisableCache(true))
+
+	logErr := func(msg string, err error) {
+		logErrors(t, msg, true, false, false, err)
+	}
+
+	rpc.RegisterProtocol("dropData", dropDataDialer, simpleResolver, net.Listen)
+
+	// The following test will fail due to a broken connection.
+	// We need to run mount table and servers with no security to use
+	// the V23CloseAtMessage net.Conn mock.
+	_, shutdown = runMountTable(t, ctx, "nosec")
+	defer shutdown()
+
+	roots := ns.Roots()
+	brkRoot, err := mocknet.RewriteEndpointProtocol(roots[0], "dropData")
+	if err != nil {
+		t.Fatal(err)
+	}
+	ns.SetRoots(brkRoot.Name())
+
+	nctx, _ := context.WithTimeout(ctx, time.Minute)
+	call, err := client.StartCall(nctx, "name", "noname", nil, options.NoRetry{}, options.SecurityNone)
+	if verror.ErrorID(err) != verror.ErrNoServers.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if call != nil {
+		t.Fatalf("expected call to be nil")
+	}
+	logErr("broken connection", err)
+
+	// The following test will fail with because the client will set up
+	// a secure connection to a server that isn't expecting one.
+	name, fn := initServer(t, ctx, options.SecurityNone)
+	defer fn()
+
+	call, err = client.StartCall(nctx, name, "noname", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrBadProtocol.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if call != nil {
+		t.Fatalf("expected call to be nil")
+	}
+	logErr("insecure server", err)
+
+	// This is the inverse, secure server, insecure client
+	name, fn = initServer(t, ctx)
+	defer fn()
+
+	call, err = client.StartCall(nctx, name, "noname", nil, options.NoRetry{}, options.SecurityNone)
+	if verror.ErrorID(err) != verror.ErrBadProtocol.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if call != nil {
+		t.Fatalf("expected call to be nil")
+	}
+	logErr("insecure client", err)
+}
+
+func TestStartCallSecurity(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	client := v23.GetClient(ctx)
+
+	logErr := func(msg string, err error) {
+		logErrors(t, msg, true, false, false, err)
+	}
+
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	// Create a context with a new principal that doesn't match the server,
+	// so that the client will not trust the server.
+	ctx1, err := v23.WithPrincipal(ctx, testutil.NewPrincipal("test-blessing"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	call, err := client.StartCall(ctx1, name, "noname", nil, options.NoRetry{})
+	if verror.ErrorID(err) != verror.ErrNotTrusted.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	if call != nil {
+		t.Fatalf("expected call to be nil")
+	}
+	logErr("client does not trust server", err)
+}
+
+func childPing(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+	v23.GetNamespace(ctx).CacheCtl(naming.DisableCache(true))
+
+	name := args[0]
+	got := ""
+	if err := v23.GetClient(ctx).Call(ctx, name, "Ping", nil, []interface{}{&got}); err != nil {
+		fmt.Errorf("unexpected error: %s", err)
+	}
+	fmt.Fprintf(stdout, "RESULT=%s\n", got)
+	return nil
+}
+
+func initServer(t *testing.T, ctx *context.T, opts ...rpc.ServerOpt) (string, func()) {
+	server, err := v23.NewServer(ctx, opts...)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	done := make(chan struct{})
+	deferFn := func() { close(done); server.Stop() }
+
+	eps, err := server.Listen(v23.GetListenSpec(ctx))
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	server.Serve("", &simple{done}, nil)
+	return eps[0].Name(), deferFn
+}
+
+func TestTimeoutResponse(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+	ctx, _ = context.WithTimeout(ctx, time.Millisecond)
+	err := v23.GetClient(ctx).Call(ctx, name, "Sleep", nil, nil)
+	if got, want := verror.ErrorID(err), verror.ErrTimeout.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+func TestArgsAndResponses(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", []interface{}{"too many args"})
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	err = call.Finish()
+	if got, want := verror.ErrorID(err), verror.ErrBadProtocol.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	call, err = v23.GetClient(ctx).StartCall(ctx, name, "Ping", nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	pong := ""
+	dummy := ""
+	err = call.Finish(&pong, &dummy)
+	if got, want := verror.ErrorID(err), verror.ErrBadProtocol.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+func TestAccessDenied(t *testing.T) {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	ctx1, err := v23.WithPrincipal(ctx, testutil.NewPrincipal("test-blessing"))
+	// Client must recognize the server, otherwise it won't even send the request.
+	v23.GetPrincipal(ctx1).AddToRoots(v23.GetPrincipal(ctx).BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	call, err := v23.GetClient(ctx1).StartCall(ctx1, name, "Sleep", nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	err = call.Finish()
+	if got, want := verror.ErrorID(err), verror.ErrNoAccess.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+func TestCanceledBeforeFinish(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	ctx, cancel := context.WithCancel(ctx)
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	// Cancel before we call finish.
+	cancel()
+	err = call.Finish()
+	if got, want := verror.ErrorID(err), verror.ErrCanceled.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+func TestCanceledDuringFinish(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	ctx, cancel := context.WithCancel(ctx)
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	// Cancel whilst the RPC is running.
+	go func() {
+		time.Sleep(100 * time.Millisecond)
+		cancel()
+	}()
+	err = call.Finish()
+	if got, want := verror.ErrorID(err), verror.ErrCanceled.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+func TestRendezvous(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	sh, fn := runMountTable(t, ctx)
+	defer fn()
+
+	name := "echoServer"
+
+	// We start the client before we start the server, StartCall will reresolve
+	// the name until it finds an entry or times out after an exponential
+	// backoff of some minutes.
+	startServer := func() {
+		time.Sleep(100 * time.Millisecond)
+		srv, _ := sh.Start("echoServer", nil, "message", name)
+		s := expect.NewSession(t, srv.Stdout(), time.Minute)
+		s.ExpectVar("PID")
+		s.ExpectVar("NAME")
+	}
+	go startServer()
+
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Echo", []interface{}{"hello"})
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+
+	response := ""
+	if err := call.Finish(&response); err != nil {
+		if got, want := verror.ErrorID(err), verror.ErrCanceled.ID; got != want {
+			t.Fatalf("got %v, want %v", got, want)
+		}
+	}
+	if got, want := response, "message: hello\n"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+}
+
+func TestCallback(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	sh, fn := runMountTable(t, ctx)
+	defer fn()
+
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	srv, err := sh.Start("childPing", nil, name)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	s := expect.NewSession(t, srv.Stdout(), time.Minute)
+	if got, want := s.ExpectVar("RESULT"), "pong"; got != want {
+		t.Errorf("got %q, want %q", got, want)
+	}
+}
+
+func TestStreamTimeout(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	want := 10
+	ctx, _ = context.WithTimeout(ctx, 300*time.Millisecond)
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Source", []interface{}{want})
+	if err != nil {
+		if verror.ErrorID(err) != verror.ErrTimeout.ID {
+			t.Fatalf("verror should be a timeout not %s: stack %s",
+				err, verror.Stack(err))
+		}
+		return
+	}
+
+	for {
+		got := 0
+		err := call.Recv(&got)
+		if err == nil {
+			if got != want {
+				t.Fatalf("got %d, want %d", got, want)
+			}
+			want++
+			continue
+		}
+		if got, want := verror.ErrorID(err), verror.ErrTimeout.ID; got != want {
+			t.Fatalf("got %v, want %v", got, want)
+		}
+		break
+	}
+	err = call.Finish()
+	if got, want := verror.ErrorID(err), verror.ErrTimeout.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+func TestStreamAbort(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sink", nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+
+	want := 10
+	for i := 0; i <= want; i++ {
+		if err := call.Send(i); err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+	}
+	call.CloseSend()
+	err = call.Send(100)
+	if got, want := verror.ErrorID(err), verror.ErrAborted.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	result := 0
+	err = call.Finish(&result)
+	if err != nil {
+		t.Errorf("unexpected error: %#v", err)
+	}
+	if got := result; got != want {
+		t.Errorf("got %d, want %d", got, want)
+	}
+}
+
+func TestNoServersAvailable(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	_, fn := runMountTable(t, ctx)
+	defer fn()
+	name := "noservers"
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil, options.NoRetry{})
+	if err != nil {
+		if got, want := verror.ErrorID(err), verror.ErrNoServers.ID; got != want {
+			t.Fatalf("got %v, want %v", got, want)
+		}
+		return
+	}
+	err = call.Finish()
+	if got, want := verror.ErrorID(err), verror.ErrNoServers.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+}
+
+func TestNoMountTable(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	v23.GetNamespace(ctx).SetRoots()
+	name := "a_mount_table_entry"
+
+	// If there is no mount table, then we'll get a NoServers error message.
+	ctx, _ = context.WithTimeout(ctx, 300*time.Millisecond)
+	_, err := v23.GetClient(ctx).StartCall(ctx, name, "Sleep", nil)
+	if got, want := verror.ErrorID(err), verror.ErrNoServers.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
+
+// TestReconnect verifies that the client transparently re-establishes the
+// connection to the server if the server dies and comes back (on the same
+// endpoint).
+func TestReconnect(t *testing.T) {
+	t.Skip()
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+
+	sh, err := modules.NewShell(ctx, v23.GetPrincipal(ctx), testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	defer sh.Cleanup(os.Stderr, os.Stderr)
+	server, err := sh.Start("echoServer", nil, "--v23.tcp.address=127.0.0.1:0", "mymessage", "")
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	server.ReadLine()
+	serverName := server.ExpectVar("NAME")
+	serverEP, _ := naming.SplitAddressName(serverName)
+	ep, _ := inaming.NewEndpoint(serverEP)
+
+	makeCall := func(ctx *context.T, opts ...rpc.CallOpt) (string, error) {
+		ctx, _ = context.WithDeadline(ctx, time.Now().Add(10*time.Second))
+		call, err := v23.GetClient(ctx).StartCall(ctx, serverName, "Echo", []interface{}{"bratman"}, opts...)
+		if err != nil {
+			return "", fmt.Errorf("START: %s", err)
+		}
+		var result string
+		if err := call.Finish(&result); err != nil {
+			return "", err
+		}
+		return result, nil
+	}
+
+	expected := "mymessage: bratman\n"
+	if result, err := makeCall(ctx); err != nil || result != expected {
+		t.Errorf("Got (%q, %v) want (%q, nil)", result, err, expected)
+	}
+	// Kill the server, verify client can't talk to it anymore.
+	if err := server.Shutdown(os.Stderr, os.Stderr); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+
+	if _, err := makeCall(ctx, options.NoRetry{}); err == nil || (!strings.HasPrefix(err.Error(), "START") && !strings.Contains(err.Error(), "EOF")) {
+		t.Fatalf(`Got (%v) want ("START: <err>" or "EOF") as server is down`, err)
+	}
+
+	// Resurrect the server with the same address, verify client
+	// re-establishes the connection. This is racy if another
+	// process grabs the port.
+	server, err = sh.Start("echoServer", nil, "--v23.tcp.address="+ep.Address, "mymessage again", "")
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	defer server.Shutdown(os.Stderr, os.Stderr)
+	expected = "mymessage again: bratman\n"
+	if result, err := makeCall(ctx); err != nil || result != expected {
+		t.Errorf("Got (%q, %v) want (%q, nil)", result, err, expected)
+	}
+}
+
+func TestMethodErrors(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	clt := v23.GetClient(ctx)
+
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	var (
+		i, j int
+		s    string
+	)
+
+	testCases := []struct {
+		testName, objectName, method string
+		args, results                []interface{}
+		wantID                       verror.ID
+		wantMessage                  string
+	}{
+		{
+			testName:   "unknown method",
+			objectName: name,
+			method:     "NoMethod",
+			wantID:     verror.ErrUnknownMethod.ID,
+		},
+		{
+			testName:   "unknown suffix",
+			objectName: name + "/NoSuffix",
+			method:     "Ping",
+			wantID:     verror.ErrUnknownSuffix.ID,
+		},
+		{
+			testName:    "too many args",
+			objectName:  name,
+			method:      "Ping",
+			args:        []interface{}{1, 2},
+			results:     []interface{}{&i},
+			wantID:      verror.ErrBadProtocol.ID,
+			wantMessage: "wrong number of input arguments",
+		},
+		{
+			testName:    "wrong number of results",
+			objectName:  name,
+			method:      "Ping",
+			results:     []interface{}{&i, &j},
+			wantID:      verror.ErrBadProtocol.ID,
+			wantMessage: "results, but want",
+		},
+		{
+			testName:    "wrong number of results",
+			objectName:  name,
+			method:      "Ping",
+			results:     []interface{}{&i, &j},
+			wantID:      verror.ErrBadProtocol.ID,
+			wantMessage: "results, but want",
+		},
+		{
+			testName:    "mismatched arg types",
+			objectName:  name,
+			method:      "Echo",
+			args:        []interface{}{1},
+			results:     []interface{}{&s},
+			wantID:      verror.ErrBadProtocol.ID,
+			wantMessage: "aren't compatible",
+		},
+		{
+			testName:    "mismatched result types",
+			objectName:  name,
+			method:      "Ping",
+			results:     []interface{}{&i},
+			wantID:      verror.ErrBadProtocol.ID,
+			wantMessage: "aren't compatible",
+		},
+	}
+
+	for _, test := range testCases {
+		testPrefix := fmt.Sprintf("test(%s) failed", test.testName)
+		call, err := clt.StartCall(ctx, test.objectName, test.method, test.args)
+		if err != nil {
+			t.Fatalf("%s: %v", testPrefix, err)
+		}
+		verr := call.Finish(test.results...)
+		if verror.ErrorID(verr) != test.wantID {
+			t.Errorf("%s: wrong error: %v", testPrefix, verr)
+		} else if got, want := verr.Error(), test.wantMessage; !strings.Contains(got, want) {
+			t.Errorf("%s: want %q to contain %q", testPrefix, got, want)
+		}
+		logErrors(t, test.testName, false, false, false, verr)
+	}
+}
+
+func TestReservedMethodErrors(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	clt := v23.GetClient(ctx)
+
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	logErr := func(msg string, err error) {
+		logErrors(t, msg, true, false, false, err)
+	}
+
+	// This call will fail because the __xx suffix is not supported by
+	// the dispatcher implementing Signature.
+	call, err := clt.StartCall(ctx, name+"/__xx", "__Signature", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	sig := []signature.Interface{}
+	verr := call.Finish(&sig)
+	if verror.ErrorID(verr) != verror.ErrUnknownSuffix.ID {
+		t.Fatalf("wrong error: %s", verr)
+	}
+	logErr("unknown suffix", verr)
+
+	// This call will fail for the same reason, but with a different error,
+	// saying that MethodSignature is an unknown method.
+	call, err = clt.StartCall(ctx, name+"/__xx", "__MethodSignature", []interface{}{"dummy"})
+	if err != nil {
+		t.Fatal(err)
+	}
+	verr = call.Finish(&sig)
+	if verror.ErrorID(verr) != verror.ErrUnknownMethod.ID {
+		t.Fatalf("wrong error: %s", verr)
+	}
+	logErr("unknown method", verr)
+}
diff --git a/runtime/internal/rpc/test/doc.go b/runtime/internal/rpc/test/doc.go
new file mode 100644
index 0000000..df643b7
--- /dev/null
+++ b/runtime/internal/rpc/test/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package test contains test for rpc code that do not rely on unexposed rpc declarations.
+package test
diff --git a/runtime/internal/rpc/test/glob_test.go b/runtime/internal/rpc/test/glob_test.go
new file mode 100644
index 0000000..7e5bd43
--- /dev/null
+++ b/runtime/internal/rpc/test/glob_test.go
@@ -0,0 +1,387 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/i18n"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/rpc/reserved"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/x/ref/lib/glob"
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+func startGlobServer(ctx *context.T, tree *node) (string, func(), error) {
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to start debug server: %v", err)
+	}
+	endpoints, err := server.Listen(v23.GetListenSpec(ctx))
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to listen: %v", err)
+	}
+	if err := server.ServeDispatcher("", &disp{tree}); err != nil {
+		return "", nil, err
+	}
+	ep := endpoints[0].String()
+	return ep, func() { server.Stop() }, nil
+}
+
+func TestGlob(t *testing.T) {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+
+	namespace := []string{
+		"a/b/c1/d1",
+		"a/b/c1/d2",
+		"a/b/c2/d1",
+		"a/b/c2/d2",
+		"a/x/y/z",
+		"leaf",
+	}
+	tree := newNode()
+	for _, p := range namespace {
+		tree.find(strings.Split(p, "/"), true)
+	}
+
+	ep, stop, err := startGlobServer(ctx, tree)
+	if err != nil {
+		t.Fatalf("startGlobServer: %v", err)
+	}
+	defer stop()
+
+	var (
+		noExist        = verror.New(verror.ErrNoExist, ctx, "")
+		notImplemented = reserved.NewErrGlobNotImplemented(ctx)
+		maxRecursion   = reserved.NewErrGlobMaxRecursionReached(ctx)
+	)
+
+	testcases := []struct {
+		name, pattern string
+		expected      []string
+		errors        []naming.GlobError
+	}{
+		{"", "...", []string{
+			"",
+			"a",
+			"a/b",
+			"a/b/c1",
+			"a/b/c1/d1",
+			"a/b/c1/d2",
+			"a/b/c2",
+			"a/b/c2/d1",
+			"a/b/c2/d2",
+			"a/x",
+			"a/x/y",
+			"a/x/y/z",
+			"leaf",
+		}, nil},
+		{"a", "...", []string{
+			"",
+			"b",
+			"b/c1",
+			"b/c1/d1",
+			"b/c1/d2",
+			"b/c2",
+			"b/c2/d1",
+			"b/c2/d2",
+			"x",
+			"x/y",
+			"x/y/z",
+		}, nil},
+		{"a/b", "...", []string{
+			"",
+			"c1",
+			"c1/d1",
+			"c1/d2",
+			"c2",
+			"c2/d1",
+			"c2/d2",
+		}, nil},
+		{"a/b/c1", "...", []string{
+			"",
+			"d1",
+			"d2",
+		}, nil},
+		{"a/b/c1/d1", "...", []string{
+			"",
+		}, nil},
+		{"a/x", "...", []string{
+			"",
+			"y",
+			"y/z",
+		}, nil},
+		{"a/x/y", "...", []string{
+			"",
+			"z",
+		}, nil},
+		{"a/x/y/z", "...", []string{
+			"",
+		}, nil},
+		{"", "", []string{""}, nil},
+		{"", "*", []string{"a", "leaf"}, nil},
+		{"a", "", []string{""}, nil},
+		{"a", "*", []string{"b", "x"}, nil},
+		{"a/b", "", []string{""}, nil},
+		{"a/b", "*", []string{"c1", "c2"}, nil},
+		{"a/b/c1", "", []string{""}, nil},
+		{"a/b/c1", "*", []string{"d1", "d2"}, nil},
+		{"a/b/c1/d1", "*", []string{}, nil},
+		{"a/b/c1/d1", "", []string{""}, nil},
+		{"a", "*/c?", []string{"b/c1", "b/c2"}, nil},
+		{"a", "*/*", []string{"b/c1", "b/c2", "x/y"}, nil},
+		{"a", "*/*/*", []string{"b/c1/d1", "b/c1/d2", "b/c2/d1", "b/c2/d2", "x/y/z"}, nil},
+		{"a/x", "*/*", []string{"y/z"}, nil},
+		{"bad", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"bad/foo", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"a/bad", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"a/b/bad", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"a/b/c1/bad", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"a/x/bad", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"a/x/y/bad", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		{"leaf", "", []string{""}, nil},
+		{"leaf", "*", []string{}, []naming.GlobError{{Name: "", Error: notImplemented}}},
+		{"leaf/foo", "", []string{}, []naming.GlobError{{Name: "", Error: noExist}}},
+		// muah is an infinite space to test rescursion limit.
+		{"muah", "*", []string{"ha"}, nil},
+		{"muah", "*/*", []string{"ha/ha"}, nil},
+		{"muah", "*/*/*/*/*/*/*/*/*/*/*/*", []string{"ha/ha/ha/ha/ha/ha/ha/ha/ha/ha/ha/ha"}, nil},
+		{"muah", "...", []string{
+			"",
+			"ha",
+			"ha/ha",
+			"ha/ha/ha",
+			"ha/ha/ha/ha",
+			"ha/ha/ha/ha/ha",
+			"ha/ha/ha/ha/ha/ha",
+			"ha/ha/ha/ha/ha/ha/ha",
+			"ha/ha/ha/ha/ha/ha/ha/ha",
+			"ha/ha/ha/ha/ha/ha/ha/ha/ha",
+			"ha/ha/ha/ha/ha/ha/ha/ha/ha/ha",
+		}, []naming.GlobError{{Name: "ha/ha/ha/ha/ha/ha/ha/ha/ha/ha/ha", Error: maxRecursion}}},
+	}
+	for _, tc := range testcases {
+		name := naming.JoinAddressName(ep, tc.name)
+		results, globErrors, err := testutil.GlobName(ctx, name, tc.pattern)
+		if err != nil {
+			t.Errorf("unexpected Glob error for (%q, %q): %v", tc.name, tc.pattern, err)
+			continue
+		}
+		if !reflect.DeepEqual(results, tc.expected) {
+			t.Errorf("unexpected result for (%q, %q). Got %q, want %q", tc.name, tc.pattern, results, tc.expected)
+		}
+		if len(globErrors) != len(tc.errors) {
+			t.Errorf("unexpected number of glob errors for (%q, %q): got %#v, expected %#v", tc.name, tc.pattern, globErrors, tc.errors)
+		}
+		for i, e := range globErrors {
+			if i >= len(tc.errors) {
+				t.Errorf("unexpected glob error for (%q, %q): %#v", tc.name, tc.pattern, e.Error)
+				continue
+			}
+			if e.Name != tc.errors[i].Name {
+				t.Errorf("unexpected glob error for (%q, %q): %v", tc.name, tc.pattern, e)
+			}
+			if got, expected := verror.ErrorID(e.Error), verror.ErrorID(tc.errors[i].Error); got != expected {
+				t.Errorf("unexpected error ID for (%q, %q): Got %v, expected %v", tc.name, tc.pattern, got, expected)
+			}
+		}
+	}
+}
+
+func TestGlobDeny(t *testing.T) {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+
+	tree := newNode()
+	tree.find([]string{"a", "b"}, true)
+	tree.find([]string{"a", "deny", "x"}, true)
+	ep, stop, err := startGlobServer(ctx, tree)
+	if err != nil {
+		t.Fatalf("startGlobServer: %v", err)
+	}
+	defer stop()
+
+	testcases := []struct {
+		name, pattern string
+		results       []string
+		numErrors     int
+	}{
+		{"", "*", []string{"a"}, 0},
+		{"", "*/*", []string{"a/b"}, 1},
+		{"a", "*", []string{"b"}, 1},
+		{"a/deny", "*", []string{}, 1},
+		{"a/deny/x", "*", []string{}, 1},
+		{"a/deny/y", "*", []string{}, 1},
+	}
+
+	// Ensure that we're getting the english error message.
+	ctx = i18n.WithLangID(ctx, i18n.LangID("en-US"))
+
+	for _, tc := range testcases {
+		name := naming.JoinAddressName(ep, tc.name)
+		results, globErrors, err := testutil.GlobName(ctx, name, tc.pattern)
+		if err != nil {
+			t.Errorf("unexpected Glob error for (%q, %q): %v", tc.name, tc.pattern, err)
+		}
+		if !reflect.DeepEqual(results, tc.results) {
+			t.Errorf("unexpected results for (%q, %q). Got %v, expected %v", tc.name, tc.pattern, results, tc.results)
+		}
+		if len(globErrors) != tc.numErrors {
+			t.Errorf("unexpected number of errors for (%q, %q). Got %v, expected %v", tc.name, tc.pattern, len(globErrors), tc.numErrors)
+		}
+		for _, gerr := range globErrors {
+			if got, expected := verror.ErrorID(gerr.Error), reserved.ErrGlobMatchesOmitted.ID; got != expected {
+				t.Errorf("unexpected error for (%q, %q): Got %v, expected %v", tc.name, tc.pattern, got, expected)
+			}
+			// This error message is purposely vague to avoid leaking information that
+			// the user doesn't have access to.
+			// We check the actual error string to make sure that we don't start
+			// leaking new information by accident.
+			expectedStr := fmt.Sprintf(
+				`test.test:"%s".__Glob: some matches might have been omitted`,
+				tc.name)
+			if got := gerr.Error.Error(); got != expectedStr {
+				t.Errorf("unexpected error string: Got %q, expected %q", got, expectedStr)
+			}
+		}
+	}
+}
+
+type disp struct {
+	tree *node
+}
+
+func (d *disp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	elems := strings.Split(suffix, "/")
+	var auth security.Authorizer
+	for _, e := range elems {
+		if e == "deny" {
+			auth = &denyAllAuthorizer{}
+			break
+		}
+	}
+	if len(elems) != 0 && elems[0] == "muah" {
+		// Infinite space. Each node has one child named "ha".
+		return rpc.ChildrenGlobberInvoker("ha"), auth, nil
+
+	}
+	if len(elems) != 0 && elems[len(elems)-1] == "leaf" {
+		return leafObject{}, auth, nil
+	}
+	if len(elems) < 2 || (elems[0] == "a" && elems[1] == "x") {
+		return &vChildrenObject{d.tree, elems}, auth, nil
+	}
+	return &globObject{d.tree, elems}, auth, nil
+}
+
+type denyAllAuthorizer struct{}
+
+func (denyAllAuthorizer) Authorize(*context.T, security.Call) error {
+	return errors.New("no access")
+}
+
+type globObject struct {
+	n      *node
+	suffix []string
+}
+
+func (o *globObject) Glob__(ctx *context.T, _ rpc.ServerCall, pattern string) (<-chan naming.GlobReply, error) {
+	g, err := glob.Parse(pattern)
+	if err != nil {
+		return nil, err
+	}
+	n := o.n.find(o.suffix, false)
+	if n == nil {
+		return nil, verror.New(verror.ErrNoExist, ctx, o.suffix)
+	}
+	ch := make(chan naming.GlobReply)
+	go func() {
+		o.globLoop(ch, "", g, n)
+		close(ch)
+	}()
+	return ch, nil
+}
+
+func (o *globObject) globLoop(ch chan<- naming.GlobReply, name string, g *glob.Glob, n *node) {
+	if g.Len() == 0 {
+		ch <- naming.GlobReplyEntry{naming.MountEntry{Name: name}}
+	}
+	if g.Finished() {
+		return
+	}
+	for leaf, child := range n.children {
+		if ok, _, left := g.MatchInitialSegment(leaf); ok {
+			o.globLoop(ch, naming.Join(name, leaf), left, child)
+		}
+	}
+}
+
+type vChildrenObject struct {
+	n      *node
+	suffix []string
+}
+
+func (o *vChildrenObject) GlobChildren__(ctx *context.T, _ rpc.ServerCall) (<-chan string, error) {
+	n := o.n.find(o.suffix, false)
+	if n == nil {
+		return nil, verror.New(verror.ErrNoExist, ctx, o.suffix)
+	}
+	ch := make(chan string, len(n.children))
+	for child, _ := range n.children {
+		ch <- child
+	}
+	close(ch)
+	return ch, nil
+}
+
+type node struct {
+	children map[string]*node
+}
+
+func newNode() *node {
+	return &node{make(map[string]*node)}
+}
+
+func (n *node) find(names []string, create bool) *node {
+	if len(names) == 1 && names[0] == "" {
+		return n
+	}
+	for {
+		if len(names) == 0 {
+			return n
+		}
+		if next, ok := n.children[names[0]]; ok {
+			n = next
+			names = names[1:]
+			continue
+		}
+		if create {
+			nn := newNode()
+			n.children[names[0]] = nn
+			n = nn
+			names = names[1:]
+			continue
+		}
+		return nil
+	}
+}
+
+type leafObject struct{}
+
+func (l leafObject) Func(*context.T, rpc.ServerCall) error {
+	return nil
+}
diff --git a/runtime/internal/rpc/test/proxy_test.go b/runtime/internal/rpc/test/proxy_test.go
new file mode 100644
index 0000000..05e1c36
--- /dev/null
+++ b/runtime/internal/rpc/test/proxy_test.go
@@ -0,0 +1,409 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"sort"
+	"strings"
+	"testing"
+	"time"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/namespace"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vtrace"
+	"v.io/x/ref/lib/flags"
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/runtime/internal/lib/publisher"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	irpc "v.io/x/ref/runtime/internal/rpc"
+	imanager "v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
+	tnaming "v.io/x/ref/runtime/internal/testing/mocks/naming"
+	ivtrace "v.io/x/ref/runtime/internal/vtrace"
+	"v.io/x/ref/test/modules"
+	"v.io/x/ref/test/testutil"
+)
+
+func testContext() (*context.T, func()) {
+	ctx, shutdown := v23.Init()
+	ctx, _ = context.WithTimeout(ctx, 20*time.Second)
+	var err error
+	if ctx, err = ivtrace.Init(ctx, flags.VtraceFlags{}); err != nil {
+		panic(err)
+	}
+	ctx, _ = vtrace.WithNewTrace(ctx)
+	return ctx, shutdown
+}
+
+func proxyServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	expected := len(args)
+
+	listenSpec := rpc.ListenSpec{Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}}}
+	proxyShutdown, proxyEp, err := proxy.New(ctx, listenSpec, security.AllowEveryone())
+	if err != nil {
+		fmt.Fprintf(stderr, "%s\n", verror.DebugString(err))
+		return err
+	}
+	defer proxyShutdown()
+	fmt.Fprintf(stdout, "PID=%d\n", os.Getpid())
+	if expected > 0 {
+		pub := publisher.New(ctx, v23.GetNamespace(ctx), time.Minute)
+		defer pub.WaitForStop()
+		defer pub.Stop()
+		pub.AddServer(proxyEp.String())
+		for _, name := range args {
+			if len(name) == 0 {
+				return fmt.Errorf("empty name specified on the command line")
+			}
+			pub.AddName(name, false, false)
+		}
+		// Wait for all the entries to be published.
+		for {
+			pubState := pub.Status()
+			if expected == len(pubState) {
+				break
+			}
+			delay := time.Second
+			time.Sleep(delay)
+		}
+	}
+	fmt.Fprintf(stdout, "PROXY_NAME=%s\n", proxyEp.Name())
+	modules.WaitForEOF(stdin)
+	fmt.Fprintf(stdout, "DONE\n")
+	return nil
+}
+
+type testServer struct{}
+
+func (*testServer) Echo(_ *context.T, call rpc.ServerCall, arg string) (string, error) {
+	return fmt.Sprintf("method:%q,suffix:%q,arg:%q", "Echo", call.Suffix(), arg), nil
+}
+
+type testServerAuthorizer struct{}
+
+func (testServerAuthorizer) Authorize(*context.T, security.Call) error {
+	return nil
+}
+
+type testServerDisp struct{ server interface{} }
+
+func (t testServerDisp) Lookup(suffix string) (interface{}, security.Authorizer, error) {
+	return t.server, testServerAuthorizer{}, nil
+}
+
+type proxyHandle struct {
+	ns    namespace.T
+	sh    *modules.Shell
+	proxy modules.Handle
+	name  string
+}
+
+func (h *proxyHandle) Start(t *testing.T, ctx *context.T, args ...string) error {
+	sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	h.sh = sh
+	p, err := sh.Start("proxyServer", nil, args...)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	h.proxy = p
+	p.ReadLine()
+	h.name = p.ExpectVar("PROXY_NAME")
+	if len(h.name) == 0 {
+		h.proxy.Shutdown(os.Stderr, os.Stderr)
+		t.Fatalf("failed to get PROXY_NAME from proxyd")
+	}
+	return h.ns.Mount(ctx, "proxy", h.name, time.Hour)
+}
+
+func (h *proxyHandle) Stop(ctx *context.T) error {
+	defer h.sh.Cleanup(os.Stderr, os.Stderr)
+	if err := h.proxy.Shutdown(os.Stderr, os.Stderr); err != nil {
+		return err
+	}
+	if len(h.name) == 0 {
+		return nil
+	}
+	return h.ns.Unmount(ctx, "proxy", h.name)
+}
+
+func TestProxyOnly(t *testing.T) {
+	listenSpec := rpc.ListenSpec{Proxy: "proxy"}
+	testProxy(t, listenSpec)
+}
+
+func TestProxy(t *testing.T) {
+	proxyListenSpec := rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}},
+		Proxy: "proxy",
+	}
+	testProxy(t, proxyListenSpec)
+}
+
+func TestWSProxy(t *testing.T) {
+	proxyListenSpec := rpc.ListenSpec{
+		Addrs: rpc.ListenAddrs{{"tcp", "127.0.0.1:0"}},
+		Proxy: "proxy",
+	}
+	// The proxy uses websockets only, but the server is using tcp.
+	testProxy(t, proxyListenSpec, "--v23.tcp.protocol=ws")
+}
+
+func testProxy(t *testing.T, spec rpc.ListenSpec, args ...string) {
+	ctx, shutdown := testContext()
+	defer shutdown()
+
+	var (
+		pserver   = testutil.NewPrincipal("server")
+		pclient   = testutil.NewPrincipal("client")
+		serverKey = pserver.PublicKey()
+		// We use different stream managers for the client and server
+		// to prevent VIF re-use (in other words, we want to test VIF
+		// creation from both the client and server end).
+		smserver = imanager.InternalNew(naming.FixedRoutingID(0x555555555))
+		smclient = imanager.InternalNew(naming.FixedRoutingID(0x444444444))
+		ns       = tnaming.NewSimpleNamespace()
+	)
+	defer smserver.Shutdown()
+	defer smclient.Shutdown()
+	client, err := irpc.InternalNewClient(smserver, ns)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer client.Close()
+	serverCtx, _ := v23.WithPrincipal(ctx, pserver)
+	server, err := irpc.InternalNewServer(serverCtx, smserver, ns, nil, "", nil, pserver)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer server.Stop()
+
+	// The client must recognize the server's blessings, otherwise it won't
+	// communicate with it.
+	pclient.AddToRoots(pserver.BlessingStore().Default())
+
+	// If no address is specified then we'll only 'listen' via
+	// the proxy.
+	hasLocalListener := len(spec.Addrs) > 0 && len(spec.Addrs[0].Address) != 0
+
+	name := "mountpoint/server/suffix"
+	makeCall := func(opts ...rpc.CallOpt) (string, error) {
+		clientCtx, _ := v23.WithPrincipal(ctx, pclient)
+		clientCtx, _ = context.WithDeadline(clientCtx, time.Now().Add(5*time.Second))
+		call, err := client.StartCall(clientCtx, name, "Echo", []interface{}{"batman"}, opts...)
+		if err != nil {
+			// proxy is down, we should return here/.... prepend
+			// the error with a well known string so that we can test for that.
+			return "", fmt.Errorf("RESOLVE: %s", err)
+		}
+		var result string
+		if err = call.Finish(&result); err != nil {
+			return "", err
+		}
+		return result, nil
+	}
+	proxy := &proxyHandle{ns: ns}
+	if err := proxy.Start(t, ctx, args...); err != nil {
+		t.Fatal(err)
+	}
+	defer proxy.Stop(ctx)
+	addrs := verifyMount(t, ctx, ns, spec.Proxy)
+	if len(addrs) != 1 {
+		t.Fatalf("failed to lookup proxy")
+	}
+
+	eps, err := server.Listen(spec)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := server.ServeDispatcher("mountpoint/server", testServerDisp{&testServer{}}); err != nil {
+		t.Fatal(err)
+	}
+
+	// Proxy connections are started asynchronously, so we need to wait..
+	waitForMountTable := func(ch chan int, expect int) {
+		then := time.Now().Add(time.Minute)
+		for {
+			me, err := ns.Resolve(ctx, name)
+			if err != nil {
+				continue
+			}
+			for i, s := range me.Servers {
+				vlog.Infof("%d: %s", i, s)
+			}
+			if err == nil && len(me.Servers) == expect {
+				ch <- 1
+				return
+			}
+			if time.Now().After(then) {
+				t.Fatalf("timed out waiting for %d servers, found %d", expect, len(me.Servers))
+			}
+			time.Sleep(100 * time.Millisecond)
+		}
+	}
+	waitForServerStatus := func(ch chan int, proxy string) {
+		then := time.Now().Add(time.Minute)
+		for {
+			status := server.Status()
+			if len(status.Proxies) == 1 && status.Proxies[0].Proxy == proxy {
+				ch <- 2
+				return
+			}
+			if time.Now().After(then) {
+				t.Fatalf("timed out")
+			}
+			time.Sleep(100 * time.Millisecond)
+		}
+	}
+	proxyEP, _ := naming.SplitAddressName(addrs[0])
+	proxiedEP, err := inaming.NewEndpoint(proxyEP)
+	if err != nil {
+		t.Fatalf("unexpected error for %q: %s", proxyEP, err)
+	}
+	proxiedEP.RID = naming.FixedRoutingID(0x555555555)
+	proxiedEP.Blessings = []string{"server"}
+	expectedNames := []string{naming.JoinAddressName(proxiedEP.String(), "suffix")}
+	if hasLocalListener {
+		expectedNames = append(expectedNames, naming.JoinAddressName(eps[0].String(), "suffix"))
+	}
+
+	// Proxy connetions are created asynchronously, so we wait for the
+	// expected number of endpoints to appear for the specified service name.
+	ch := make(chan int, 2)
+	go waitForMountTable(ch, len(expectedNames))
+	go waitForServerStatus(ch, spec.Proxy)
+	select {
+	case <-time.After(time.Minute):
+		t.Fatalf("timedout waiting for two entries in the mount table and server status")
+	case i := <-ch:
+		select {
+		case <-time.After(time.Minute):
+			t.Fatalf("timedout waiting for two entries in the mount table or server status")
+		case j := <-ch:
+			if !((i == 1 && j == 2) || (i == 2 && j == 1)) {
+				t.Fatalf("unexpected return values from waiters")
+			}
+		}
+	}
+
+	status := server.Status()
+	if got, want := status.Proxies[0].Endpoint, proxiedEP; !reflect.DeepEqual(got, want) {
+		t.Fatalf("got %q, want %q", got, want)
+	}
+
+	got := []string{}
+	for _, s := range verifyMount(t, ctx, ns, name) {
+		got = append(got, s)
+	}
+	sort.Strings(got)
+	sort.Strings(expectedNames)
+	if !reflect.DeepEqual(got, expectedNames) {
+		t.Errorf("got %v, want %v", got, expectedNames)
+	}
+
+	if hasLocalListener {
+		// Listen will publish both the local and proxied endpoint with the
+		// mount table, given that we're trying to test the proxy, we remove
+		// the local endpoint from the mount table entry!  We have to remove both
+		// the tcp and the websocket address.
+		sep := eps[0].String()
+		ns.Unmount(ctx, "mountpoint/server", sep)
+	}
+
+	addrs = verifyMount(t, ctx, ns, name)
+	if len(addrs) != 1 {
+		t.Fatalf("failed to lookup proxy: addrs %v", addrs)
+	}
+
+	// Proxied endpoint should be published and RPC should succeed (through proxy).
+	// Additionally, any server authorizaton options must only apply to the end server
+	// and not the proxy.
+	const expected = `method:"Echo",suffix:"suffix",arg:"batman"`
+	if result, err := makeCall(options.ServerPublicKey{serverKey}); result != expected || err != nil {
+		t.Fatalf("Got (%v, %v) want (%v, nil)", result, err, expected)
+	}
+
+	// Proxy dies, calls should fail and the name should be unmounted.
+	if err := proxy.Stop(ctx); err != nil {
+		t.Fatal(err)
+	}
+
+	if result, err := makeCall(options.NoRetry{}); err == nil || (!strings.HasPrefix(err.Error(), "RESOLVE") && !strings.Contains(err.Error(), "EOF")) {
+		t.Fatalf(`Got (%v, %v) want ("", "RESOLVE: <err>" or "EOF") as proxy is down`, result, err)
+	}
+
+	for {
+		if _, err := ns.Resolve(ctx, name); err != nil {
+			break
+		}
+		time.Sleep(10 * time.Millisecond)
+	}
+	verifyMountMissing(t, ctx, ns, name)
+
+	status = server.Status()
+	if got, want := len(status.Proxies), 1; got != want {
+		t.Logf("Proxies: %v", status.Proxies)
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	if got, want := status.Proxies[0].Proxy, spec.Proxy; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	if got, want := verror.ErrorID(status.Proxies[0].Error), verror.ErrNoServers.ID; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+
+	// Proxy restarts, calls should eventually start succeeding.
+	if err := proxy.Start(t, ctx, args...); err != nil {
+		t.Fatal(err)
+	}
+
+	retries := 0
+	for {
+		if result, err := makeCall(); err == nil {
+			if result != expected {
+				t.Errorf("Got (%v, %v) want (%v, nil)", result, err, expected)
+			}
+			break
+		} else {
+			retries++
+			if retries > 10 {
+				t.Fatalf("Failed after 10 attempts: err: %s", err)
+			}
+		}
+	}
+}
+
+func verifyMount(t *testing.T, ctx *context.T, ns namespace.T, name string) []string {
+	me, err := ns.Resolve(ctx, name)
+	if err != nil {
+		t.Errorf("%s not found in mounttable", name)
+		return nil
+	}
+	return me.Names()
+}
+
+func verifyMountMissing(t *testing.T, ctx *context.T, ns namespace.T, name string) {
+	if me, err := ns.Resolve(ctx, name); err == nil {
+		names := me.Names()
+		t.Errorf("%s not supposed to be found in mounttable; got %d servers instead: %v", name, len(names), names)
+	}
+}
diff --git a/runtime/internal/rpc/test/retry_test.go b/runtime/internal/rpc/test/retry_test.go
new file mode 100644
index 0000000..0b28efb
--- /dev/null
+++ b/runtime/internal/rpc/test/retry_test.go
@@ -0,0 +1,86 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/options"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+)
+
+var errRetryThis = verror.Register("retry_test.retryThis", verror.RetryBackoff, "retryable error")
+
+type retryServer struct {
+	called int // number of times TryAgain has been called
+}
+
+func (s *retryServer) TryAgain(ctx *context.T, _ rpc.ServerCall) error {
+	// If this is the second time this method is being called, return success.
+	if s.called > 0 {
+		s.called++
+		return nil
+	}
+	s.called++
+	// otherwise, return a verror with action code RetryBackoff.
+	return verror.New(errRetryThis, ctx)
+}
+
+func TestRetryCall(t *testing.T) {
+	ctx, shutdown := v23.Init()
+	defer shutdown()
+
+	// Start the server.
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		t.Fatal(err)
+	}
+	eps, err := server.Listen(v23.GetListenSpec(ctx))
+	if err != nil {
+		t.Fatal(err)
+	}
+	rs := retryServer{}
+	if err = server.Serve("", &rs, security.AllowEveryone()); err != nil {
+		t.Fatal(err)
+	}
+	name := eps[0].Name()
+
+	client := v23.GetClient(ctx)
+	// A traditional client.StartCall/call.Finish sequence should fail at
+	// call.Finish since the error returned by the server won't be retried.
+	call, err := client.StartCall(ctx, name, "TryAgain", nil)
+	if err != nil {
+		t.Errorf("client.StartCall failed: %v", err)
+	}
+	if err := call.Finish(); err == nil {
+		t.Errorf("call.Finish should have failed")
+	}
+	rs.called = 0
+
+	// A call to client.Call should succeed because the error return by the
+	// server should be retried.
+	if err := client.Call(ctx, name, "TryAgain", nil, nil); err != nil {
+		t.Errorf("client.Call failed: %v", err)
+	}
+	// Ensure that client.Call retried the call exactly once.
+	if rs.called != 2 {
+		t.Errorf("retryServer should have been called twice, instead called %d times", rs.called)
+	}
+	rs.called = 0
+
+	// The options.NoRetry option should be honored by client.Call, so the following
+	// call should fail.
+	if err := client.Call(ctx, name, "TryAgain", nil, nil, options.NoRetry{}); err == nil {
+		t.Errorf("client.Call(..., options.NoRetry{}) should have failed")
+	}
+	// Ensure that client.Call did not retry the call.
+	if rs.called != 1 {
+		t.Errorf("retryServer have been called once, instead called %d times", rs.called)
+	}
+}
diff --git a/runtime/internal/rpc/test/signature_test.go b/runtime/internal/rpc/test/signature_test.go
new file mode 100644
index 0000000..480bd77
--- /dev/null
+++ b/runtime/internal/rpc/test/signature_test.go
@@ -0,0 +1,160 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"fmt"
+	"reflect"
+	"testing"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/rpc/reserved"
+	"v.io/v23/vdl"
+	"v.io/v23/vdlroot/signature"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/test"
+)
+
+func startSigServer(ctx *context.T, sig sigImpl) (string, func(), error) {
+	server, err := v23.NewServer(ctx)
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to start sig server: %v", err)
+	}
+	eps, err := server.Listen(v23.GetListenSpec(ctx))
+	if err != nil {
+		return "", nil, fmt.Errorf("failed to listen: %v", err)
+	}
+	if err := server.Serve("", sig, nil); err != nil {
+		return "", nil, err
+	}
+	return eps[0].String(), func() { server.Stop() }, nil
+}
+
+type sigImpl struct{}
+
+func (sigImpl) NonStreaming0(*context.T, rpc.ServerCall) error                   { panic("X") }
+func (sigImpl) NonStreaming1(*context.T, rpc.ServerCall, string) (int64, error)  { panic("X") }
+func (sigImpl) Streaming0(*context.T, *streamStringBool) error                   { panic("X") }
+func (sigImpl) Streaming1(*context.T, *streamStringBool, int64) (float64, error) { panic("X") }
+
+type streamStringBool struct{ rpc.StreamServerCall }
+
+func (*streamStringBool) Init(rpc.StreamServerCall) { panic("X") }
+func (*streamStringBool) RecvStream() interface {
+	Advance() bool
+	Value() string
+	Err() error
+} {
+	panic("X")
+}
+func (*streamStringBool) SendStream() interface {
+	Send(_ bool) error
+} {
+	panic("X")
+}
+
+func TestMethodSignature(t *testing.T) {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+	ep, stop, err := startSigServer(ctx, sigImpl{})
+	if err != nil {
+		t.Fatalf("startSigServer: %v", err)
+	}
+	defer stop()
+	name := naming.JoinAddressName(ep, "")
+
+	tests := []struct {
+		Method string
+		Want   signature.Method
+	}{
+		{"NonStreaming0", signature.Method{
+			Name: "NonStreaming0",
+		}},
+		{"NonStreaming1", signature.Method{
+			Name:    "NonStreaming1",
+			InArgs:  []signature.Arg{{Type: vdl.StringType}},
+			OutArgs: []signature.Arg{{Type: vdl.Int64Type}},
+		}},
+		{"Streaming0", signature.Method{
+			Name:      "Streaming0",
+			InStream:  &signature.Arg{Type: vdl.StringType},
+			OutStream: &signature.Arg{Type: vdl.BoolType},
+		}},
+		{"Streaming1", signature.Method{
+			Name:      "Streaming1",
+			InArgs:    []signature.Arg{{Type: vdl.Int64Type}},
+			OutArgs:   []signature.Arg{{Type: vdl.Float64Type}},
+			InStream:  &signature.Arg{Type: vdl.StringType},
+			OutStream: &signature.Arg{Type: vdl.BoolType},
+		}},
+	}
+	for _, test := range tests {
+		sig, err := reserved.MethodSignature(ctx, name, test.Method)
+		if err != nil {
+			t.Errorf("call failed: %v", err)
+		}
+		if got, want := sig, test.Want; !reflect.DeepEqual(got, want) {
+			t.Errorf("%s got %#v, want %#v", test.Method, got, want)
+		}
+	}
+}
+
+func TestSignature(t *testing.T) {
+	ctx, shutdown := test.InitForTest()
+	defer shutdown()
+	ep, stop, err := startSigServer(ctx, sigImpl{})
+	if err != nil {
+		t.Fatalf("startSigServer: %v", err)
+	}
+	defer stop()
+	name := naming.JoinAddressName(ep, "")
+	sig, err := reserved.Signature(ctx, name)
+	if err != nil {
+		t.Errorf("call failed: %v", err)
+	}
+	if got, want := len(sig), 2; got != want {
+		t.Fatalf("got sig %#v len %d, want %d", sig, got, want)
+	}
+	// Check expected methods.
+	methods := signature.Interface{
+		Doc: "The empty interface contains methods not attached to any interface.",
+		Methods: []signature.Method{
+			{
+				Name: "NonStreaming0",
+			},
+			{
+				Name:    "NonStreaming1",
+				InArgs:  []signature.Arg{{Type: vdl.StringType}},
+				OutArgs: []signature.Arg{{Type: vdl.Int64Type}},
+			},
+			{
+				Name:      "Streaming0",
+				InStream:  &signature.Arg{Type: vdl.StringType},
+				OutStream: &signature.Arg{Type: vdl.BoolType},
+			},
+			{
+				Name:      "Streaming1",
+				InArgs:    []signature.Arg{{Type: vdl.Int64Type}},
+				OutArgs:   []signature.Arg{{Type: vdl.Float64Type}},
+				InStream:  &signature.Arg{Type: vdl.StringType},
+				OutStream: &signature.Arg{Type: vdl.BoolType},
+			},
+		},
+	}
+	if got, want := sig[0], methods; !reflect.DeepEqual(got, want) {
+		t.Errorf("got sig[0] %#v, want %#v", got, want)
+	}
+	// Check reserved methods.
+	if got, want := sig[1].Name, "__Reserved"; got != want {
+		t.Errorf("got sig[1].Name %q, want %q", got, want)
+	}
+	if got, want := signature.MethodNames(sig[1:2]), []string{"__Glob", "__MethodSignature", "__Signature"}; !reflect.DeepEqual(got, want) {
+		t.Fatalf("got sig[1] methods %v, want %v", got, want)
+	}
+}
diff --git a/runtime/internal/rpc/test/simple_test.go b/runtime/internal/rpc/test/simple_test.go
new file mode 100644
index 0000000..1ce55f8
--- /dev/null
+++ b/runtime/internal/rpc/test/simple_test.go
@@ -0,0 +1,131 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+	"io"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/rpc"
+)
+
+type simple struct {
+	done <-chan struct{}
+}
+
+func (s *simple) Sleep(*context.T, rpc.ServerCall) error {
+	select {
+	case <-s.done:
+	case <-time.After(time.Hour):
+	}
+	return nil
+}
+
+func (s *simple) Ping(_ *context.T, _ rpc.ServerCall) (string, error) {
+	return "pong", nil
+}
+
+func (s *simple) Echo(_ *context.T, _ rpc.ServerCall, arg string) (string, error) {
+	return arg, nil
+}
+
+func (s *simple) Source(_ *context.T, call rpc.StreamServerCall, start int) error {
+	i := start
+	backoff := 25 * time.Millisecond
+	for {
+		select {
+		case <-s.done:
+			return nil
+		case <-time.After(backoff):
+			call.Send(i)
+			i++
+		}
+		backoff *= 2
+	}
+}
+
+func (s *simple) Sink(_ *context.T, call rpc.StreamServerCall) (int, error) {
+	i := 0
+	for {
+		if err := call.Recv(&i); err != nil {
+			if err == io.EOF {
+				return i, nil
+			}
+			return 0, err
+		}
+	}
+}
+
+func (s *simple) Inc(_ *context.T, call rpc.StreamServerCall, inc int) (int, error) {
+	i := 0
+	for {
+		if err := call.Recv(&i); err != nil {
+			if err == io.EOF {
+				return i, nil
+			}
+			return 0, err
+		}
+		call.Send(i + inc)
+	}
+}
+
+func TestSimpleRPC(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	client := v23.GetClient(ctx)
+	call, err := client.StartCall(ctx, name, "Ping", nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	response := ""
+	if err := call.Finish(&response); err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if got, want := response, "pong"; got != want {
+		t.Fatalf("got %q, want %q", got, want)
+	}
+}
+
+func TestSimpleStreaming(t *testing.T) {
+	ctx, shutdown := newCtx()
+	defer shutdown()
+	name, fn := initServer(t, ctx)
+	defer fn()
+
+	inc := 1
+	call, err := v23.GetClient(ctx).StartCall(ctx, name, "Inc", []interface{}{inc})
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+
+	want := 10
+	for i := 0; i <= want; i++ {
+		if err := call.Send(i); err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+		got := -1
+		if err = call.Recv(&got); err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+		if want := i + inc; got != want {
+			t.Fatalf("got %d, want %d", got, want)
+		}
+	}
+	call.CloseSend()
+	final := -1
+	err = call.Finish(&final)
+	if err != nil {
+		t.Errorf("unexpected error: %#v", err)
+	}
+	if got := final; got != want {
+		t.Fatalf("got %d, want %d", got, want)
+	}
+}
diff --git a/runtime/internal/rpc/test/v23_internal_test.go b/runtime/internal/rpc/test/v23_internal_test.go
new file mode 100644
index 0000000..0862cd2
--- /dev/null
+++ b/runtime/internal/rpc/test/v23_internal_test.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package test
+
+import "fmt"
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+import "v.io/x/ref/test/modules"
+
+func init() {
+	modules.RegisterChild("rootMT", ``, rootMT)
+	modules.RegisterChild("echoServer", ``, echoServer)
+	modules.RegisterChild("echoClient", ``, echoClient)
+	modules.RegisterChild("childPing", ``, childPing)
+	modules.RegisterChild("proxyServer", ``, proxyServer)
+}
+
+func TestMain(m *testing.M) {
+	test.Init()
+	if modules.IsModulesChildProcess() {
+		if err := modules.Dispatch(); err != nil {
+			fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+			os.Exit(1)
+		}
+		return
+	}
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/testutil_test.go b/runtime/internal/rpc/testutil_test.go
new file mode 100644
index 0000000..4f298dc
--- /dev/null
+++ b/runtime/internal/rpc/testutil_test.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"reflect"
+	"testing"
+	"time"
+
+	"v.io/v23/vtrace"
+	"v.io/x/ref/lib/flags"
+	ivtrace "v.io/x/ref/runtime/internal/vtrace"
+	"v.io/x/ref/test"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/vdl"
+	"v.io/v23/verror"
+)
+
+func makeResultPtrs(ins []interface{}) []interface{} {
+	outs := make([]interface{}, len(ins))
+	for ix, in := range ins {
+		typ := reflect.TypeOf(in)
+		if typ == nil {
+			// Nil indicates interface{}.
+			var empty interface{}
+			typ = reflect.ValueOf(&empty).Elem().Type()
+		}
+		outs[ix] = reflect.New(typ).Interface()
+	}
+	return outs
+}
+
+func checkResultPtrs(t *testing.T, name string, gotptrs, want []interface{}) {
+	for ix, res := range gotptrs {
+		got := reflect.ValueOf(res).Elem().Interface()
+		want := want[ix]
+		switch g := got.(type) {
+		case verror.E:
+			w, ok := want.(verror.E)
+			// don't use reflect deep equal on verror's since they contain
+			// a list of stack PCs which will be different.
+			if !ok {
+				t.Errorf("%s result %d got type %T, want %T", name, ix, g, w)
+			}
+			if verror.ErrorID(g) != w.ID {
+				t.Errorf("%s result %d got %v, want %v", name, ix, g, w)
+			}
+		default:
+			if !reflect.DeepEqual(got, want) {
+				t.Errorf("%s result %d got %v, want %v", name, ix, got, want)
+			}
+		}
+
+	}
+}
+
+func mkCaveat(cav security.Caveat, err error) security.Caveat {
+	if err != nil {
+		panic(err)
+	}
+	return cav
+}
+
+func bless(blesser, blessed security.Principal, extension string, caveats ...security.Caveat) security.Blessings {
+	if len(caveats) == 0 {
+		caveats = append(caveats, security.UnconstrainedUse())
+	}
+	b, err := blesser.Bless(blessed.PublicKey(), blesser.BlessingStore().Default(), extension, caveats[0], caveats[1:]...)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+func initForTest() (*context.T, v23.Shutdown) {
+	ctx, shutdown := test.InitForTest()
+	ctx, err := ivtrace.Init(ctx, flags.VtraceFlags{})
+	if err != nil {
+		panic(err)
+	}
+	ctx, _ = vtrace.WithNewTrace(ctx)
+	return ctx, shutdown
+}
+
+func mkThirdPartyCaveat(discharger security.PublicKey, location string, c security.Caveat) security.Caveat {
+	tpc, err := security.NewPublicKeyCaveat(discharger, location, security.ThirdPartyRequirements{}, c)
+	if err != nil {
+		panic(err)
+	}
+	return tpc
+}
+
+// mockCall implements security.Call
+type mockCall struct {
+	p        security.Principal
+	l, r     security.Blessings
+	m        string
+	ld, rd   security.Discharge
+	lep, rep naming.Endpoint
+}
+
+var _ security.Call = (*mockCall)(nil)
+
+func (c *mockCall) Timestamp() (t time.Time) { return }
+func (c *mockCall) Method() string           { return c.m }
+func (c *mockCall) MethodTags() []*vdl.Value { return nil }
+func (c *mockCall) Suffix() string           { return "" }
+func (c *mockCall) LocalDischarges() map[string]security.Discharge {
+	return map[string]security.Discharge{c.ld.ID(): c.ld}
+}
+func (c *mockCall) RemoteDischarges() map[string]security.Discharge {
+	return map[string]security.Discharge{c.rd.ID(): c.rd}
+}
+func (c *mockCall) LocalEndpoint() naming.Endpoint      { return c.lep }
+func (c *mockCall) RemoteEndpoint() naming.Endpoint     { return c.rep }
+func (c *mockCall) LocalPrincipal() security.Principal  { return c.p }
+func (c *mockCall) LocalBlessings() security.Blessings  { return c.l }
+func (c *mockCall) RemoteBlessings() security.Blessings { return c.r }
diff --git a/runtime/internal/rpc/timer.go b/runtime/internal/rpc/timer.go
new file mode 100644
index 0000000..986124d
--- /dev/null
+++ b/runtime/internal/rpc/timer.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"time"
+)
+
+// timer is a replacement for time.Timer, the only difference is that
+// its channel is type chan struct{} and it will be closed when the timer expires,
+// which we need in some places.
+type timer struct {
+	base *time.Timer
+	C    <-chan struct{}
+}
+
+func newTimer(d time.Duration) *timer {
+	c := make(chan struct{}, 0)
+	base := time.AfterFunc(d, func() {
+		close(c)
+	})
+	return &timer{
+		base: base,
+		C:    c,
+	}
+}
+
+func (t *timer) Stop() bool {
+	return t.base.Stop()
+}
+
+func (t *timer) Reset(d time.Duration) bool {
+	return t.base.Reset(d)
+}
diff --git a/runtime/internal/rpc/timer_test.go b/runtime/internal/rpc/timer_test.go
new file mode 100644
index 0000000..60f9866
--- /dev/null
+++ b/runtime/internal/rpc/timer_test.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rpc
+
+import (
+	"testing"
+	"time"
+)
+
+func TestTimer(t *testing.T) {
+	test := newTimer(time.Millisecond)
+	if _, ok := <-test.C; ok {
+		t.Errorf("Expected the channel to be closed.")
+	}
+
+	// Test resetting.
+	test = newTimer(time.Hour)
+	if reset := test.Reset(time.Millisecond); !reset {
+		t.Errorf("Expected to successfully reset.")
+	}
+	if _, ok := <-test.C; ok {
+		t.Errorf("Expected the channel to be closed.")
+	}
+
+	// Test stop.
+	test = newTimer(100 * time.Millisecond)
+	test.Stop()
+	select {
+	case <-test.C:
+		t.Errorf("the test timer should have been stopped.")
+	case <-time.After(200 * time.Millisecond):
+	}
+}
diff --git a/runtime/internal/rpc/v23_test.go b/runtime/internal/rpc/v23_test.go
new file mode 100644
index 0000000..b874fc4
--- /dev/null
+++ b/runtime/internal/rpc/v23_test.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package rpc_test
+
+import "fmt"
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+import "v.io/x/ref/test/modules"
+
+func init() {
+	modules.RegisterChild("rootMountTable", ``, rootMountTable)
+}
+
+func TestMain(m *testing.M) {
+	test.Init()
+	if modules.IsModulesChildProcess() {
+		if err := modules.Dispatch(); err != nil {
+			fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+			os.Exit(1)
+		}
+		return
+	}
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/version/version.go b/runtime/internal/rpc/version/version.go
new file mode 100644
index 0000000..76c8ce9
--- /dev/null
+++ b/runtime/internal/rpc/version/version.go
@@ -0,0 +1,100 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package version
+
+import (
+	"fmt"
+
+	"v.io/v23/rpc/version"
+	"v.io/v23/verror"
+	"v.io/x/lib/metadata"
+)
+
+// Range represents a range of RPC versions.
+type Range struct {
+	Min, Max version.RPCVersion
+}
+
+// SupportedRange represents the range of protocol verions supported by this
+// implementation.
+//
+// Max is incremented whenever we make a protocol change that's not both forward
+// and backward compatible.
+//
+// Min is incremented whenever we want to remove support for old protocol
+// versions.
+var SupportedRange = &Range{Min: version.RPCVersion9, Max: version.RPCVersion10}
+
+func init() {
+	metadata.Insert("v23.RPCVersionMax", fmt.Sprint(SupportedRange.Max))
+	metadata.Insert("v23.RPCVersionMin", fmt.Sprint(SupportedRange.Min))
+}
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/version"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	ErrNoCompatibleVersion = reg(".errNoCompatibleVersionErr", "no compatible RPC version available{:3} not in range {4}..{5}")
+	ErrUnknownVersion      = reg(".errUnknownVersionErr", "there was not enough information to determine a version")
+	ErrDeprecatedVersion   = reg(".errDeprecatedVersionError", "some of the provided version information is deprecated")
+)
+
+// IsVersionError returns true if err is a versioning related error.
+func IsVersionError(err error) bool {
+	id := verror.ErrorID(err)
+	return id == ErrNoCompatibleVersion.ID || id == ErrUnknownVersion.ID || id == ErrDeprecatedVersion.ID
+}
+
+// intersectRanges finds the intersection between ranges
+// supported by two endpoints.  We make an assumption here that if one
+// of the endpoints has an UnknownVersion we assume it has the same
+// extent as the other endpoint. If both endpoints have Unknown for a
+// version number, an error is produced.
+// For example:
+//   a == (2, 4) and b == (Unknown, Unknown), intersect(a,b) == (2, 4)
+//   a == (2, Unknown) and b == (3, 4), intersect(a,b) == (3, 4)
+func intersectRanges(amin, amax, bmin, bmax version.RPCVersion) (min, max version.RPCVersion, err error) {
+	// TODO(mattr): this may be incorrect.  Ensure that when we talk to a server who
+	// advertises (5,8) and we support (5, 9) but v5 EPs (so we may get d, d here) that
+	// we use v8 and don't send setupVC.
+	d := version.DeprecatedRPCVersion
+	if amin == d || amax == d || bmin == d || bmax == d {
+		return d, d, verror.New(ErrDeprecatedVersion, nil)
+	}
+
+	u := version.UnknownRPCVersion
+
+	min = amin
+	if min == u || (bmin != u && bmin > min) {
+		min = bmin
+	}
+	max = amax
+	if max == u || (bmax != u && bmax < max) {
+		max = bmax
+	}
+
+	if min == u || max == u {
+		err = verror.New(ErrUnknownVersion, nil)
+	} else if min > max {
+		err = verror.New(ErrNoCompatibleVersion, nil, u, min, max)
+	}
+	return
+}
+
+func (r1 *Range) Intersect(r2 *Range) (*Range, error) {
+	min, max, err := intersectRanges(r1.Min, r1.Max, r2.Min, r2.Max)
+	if err != nil {
+		return nil, err
+	}
+	r := &Range{Min: min, Max: max}
+	return r, nil
+}
diff --git a/runtime/internal/rpc/version/version_test.go b/runtime/internal/rpc/version/version_test.go
new file mode 100644
index 0000000..652baf8
--- /dev/null
+++ b/runtime/internal/rpc/version/version_test.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package version
+
+import (
+	"testing"
+
+	"v.io/v23/rpc/version"
+	"v.io/v23/verror"
+)
+
+func TestIntersect(t *testing.T) {
+	type testCase struct {
+		localMin, localMax   version.RPCVersion
+		remoteMin, remoteMax version.RPCVersion
+		expected             *Range
+		expectedErr          verror.IDAction
+	}
+	tests := []testCase{
+		{0, 0, 0, 0, nil, ErrUnknownVersion},
+		{0, 2, 3, 4, nil, ErrNoCompatibleVersion},
+		{3, 4, 0, 2, nil, ErrNoCompatibleVersion},
+		{0, 6, 6, 7, nil, ErrNoCompatibleVersion},
+		{0, 3, 3, 5, &Range{3, 3}, verror.ErrUnknown},
+		{0, 3, 2, 4, &Range{2, 3}, verror.ErrUnknown},
+		{2, 4, 2, 4, &Range{2, 4}, verror.ErrUnknown},
+		{4, 4, 4, 4, &Range{4, 4}, verror.ErrUnknown},
+	}
+	for _, tc := range tests {
+		local := &Range{
+			Min: tc.localMin,
+			Max: tc.localMax,
+		}
+		remote := &Range{
+			Min: tc.remoteMin,
+			Max: tc.remoteMax,
+		}
+		intersection, err := local.Intersect(remote)
+
+		if (tc.expected != nil && *tc.expected != *intersection) ||
+			(err != nil && verror.ErrorID(err) != tc.expectedErr.ID) {
+			t.Errorf("Unexpected result for local: %v, remote: %v.  Got (%v, %v) wanted (%v, %v)",
+				local, remote, intersection, err,
+				tc.expected, tc.expectedErr)
+		}
+		if err != nil {
+			t.Logf("%s", err)
+		}
+	}
+}