ref: Change "profiles" directory to "runtime"

As per vanadium/issues#470

MultiPart: 4/10

Change-Id: I3ac47c1d9c514f7bbe1c80507c2b3db7fcd9f6d4
diff --git a/runtime/internal/rpc/stream/benchmark/RESULTS.txt b/runtime/internal/rpc/stream/benchmark/RESULTS.txt
new file mode 100644
index 0000000..905f1db
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/RESULTS.txt
@@ -0,0 +1,82 @@
+Date: 01/30/2015
+Platform: Intel(R) Xeon(R) CPU E5-2689 0 @ 2.60GHz,  66114888KB Memory
+
+$ v23 go test -bench=. -cpu=1 -benchtime=5s \
+  v.io/x/ref/runtime/internal/rpc/stream/benchmark
+
+Benchmark_dial_VIF	  500000	     14292 ns/op
+--- Histogram (unit: s)
+	Count: 500000  Min: 4  Max: 16455  Avg: 13.58
+	------------------------------------------------------------
+	[    4,     5)  139232   27.8%   27.8%  ###
+	[    5,     6)  257818   51.6%   79.4%  #####
+	[    6,     9)   92644   18.5%   97.9%  ##
+	[    9,    15)    5963    1.2%   99.1%
+	[   15,    28)    3162    0.6%   99.8%
+	[   28,    53)     171    0.0%   99.8%
+	[   53,   101)      67    0.0%   99.8%
+	[  101,   193)       1    0.0%   99.8%
+	[  193,   370)       0    0.0%   99.8%
+	[  370,   708)       0    0.0%   99.8%
+	[  708,  1354)      57    0.0%   99.8%
+	[ 1354,  2589)     152    0.0%   99.9%
+	[ 2589,  4949)     393    0.1%   99.9%
+	[ 4949,  9457)     322    0.1%  100.0%
+	[ 9457, 18069)      18    0.0%  100.0%
+	[18069, 34520)       0    0.0%  100.0%
+	[34520,   inf)       0    0.0%  100.0%
+Benchmark_dial_VIF_TLS	     500	  12594281 ns/op
+--- Histogram (unit: ms)
+	Count: 500  Min: 12  Max: 14  Avg: 12.31
+	------------------------------------------------------------
+	[ 12,  13)  352   70.4%   70.4%  #######
+	[ 13,  14)  141   28.2%   98.6%  ###
+	[ 14, inf)    7    1.4%  100.0%
+Benchmark_dial_VC_TLS	     500	  16116072 ns/op
+--- Histogram (unit: ms)
+	Count: 500  Min: 15  Max: 22  Avg: 15.53
+	------------------------------------------------------------
+	[ 15,  16)  313   62.6%   62.6%  ######
+	[ 16,  17)  121   24.2%   86.8%  ##
+	[ 17,  18)   60   12.0%   98.8%  #
+	[ 18,  19)    3    0.6%   99.4%
+	[ 19,  20)    2    0.4%   99.8%
+	[ 20,  21)    0    0.0%   99.8%
+	[ 21,  23)    1    0.2%  100.0%
+	[ 23, inf)    0    0.0%  100.0%
+Benchmark_throughput_TCP_1Conn	 1000000	      9197 ns/op	5566.89 MB/s
+Benchmark_throughput_TCP_2Conns	 1000000	      9083 ns/op	5636.56 MB/s
+Benchmark_throughput_TCP_4Conns	 1000000	      9855 ns/op	5194.81 MB/s
+Benchmark_throughput_TCP_8Conns	  500000	     12541 ns/op	4082.43 MB/s
+Benchmark_throughput_WS_1Conn	   30000	    206804 ns/op	 247.58 MB/s
+Benchmark_throughput_WS_2Conns	   30000	    211842 ns/op	 241.69 MB/s
+Benchmark_throughput_WS_4Conns	   30000	    209994 ns/op	 243.82 MB/s
+Benchmark_throughput_WS_8Conns	   30000	    217110 ns/op	 235.83 MB/s
+Benchmark_throughput_WSH_TCP_1Conn	 1000000	      9322 ns/op	5491.85 MB/s
+Benchmark_throughput_WSH_TCP_2Conns	 1000000	      9370 ns/op	5463.77 MB/s
+Benchmark_throughput_WSH_TCP_4Conns	 1000000	      9466 ns/op	5408.50 MB/s
+Benchmark_throughput_WSH_TCP_8Conns	  500000	     12526 ns/op	4087.22 MB/s
+Benchmark_throughput_WSH_WS_1Conn	   30000	    207833 ns/op	 246.35 MB/s
+Benchmark_throughput_WSH_WS_2Conns	   30000	    208567 ns/op	 245.48 MB/s
+Benchmark_throughput_WSH_WS_4Conns	   30000	    211562 ns/op	 242.01 MB/s
+Benchmark_throughput_WSH_WS_8Conns	   30000	    216454 ns/op	 236.54 MB/s
+Benchmark_throughput_Pipe_1Conn	  500000	     20169 ns/op	2538.54 MB/s
+Benchmark_throughput_Pipe_2Conns	  500000	     19935 ns/op	2568.29 MB/s
+Benchmark_throughput_Pipe_4Conns	  300000	     19893 ns/op	2573.76 MB/s
+Benchmark_throughput_Pipe_8Conns	 1000000	     20235 ns/op	2530.22 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_1Flow	  300000	     28014 ns/op	1827.66 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_2Flow	  300000	     27495 ns/op	1862.09 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_8Flow	  200000	     35584 ns/op	1438.84 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_2Flow	  300000	     27665 ns/op	1850.66 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_8Flow	  200000	     34974 ns/op	1463.94 MB/s
+Benchmark_throughput_Flow_2VIF_4VC_8Flow	  200000	     37642 ns/op	1360.15 MB/s
+Benchmark_throughput_TLS_1Conn	   20000	    415149 ns/op	 123.33 MB/s
+Benchmark_throughput_TLS_2Conns	   20000	    416008 ns/op	 123.07 MB/s
+Benchmark_throughput_TLS_4Conns	   20000	    421083 ns/op	 121.59 MB/s
+Benchmark_throughput_TLS_8Conns	   20000	    423079 ns/op	 121.02 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_1FlowTLS	   20000	    466212 ns/op	 109.82 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_2FlowTLS	   20000	    466104 ns/op	 109.85 MB/s
+Benchmark_throughput_Flow_1VIF_1VC_8FlowTLS	   20000	    476604 ns/op	 107.43 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_2FlowTLS	   20000	    466818 ns/op	 109.68 MB/s
+Benchmark_throughput_Flow_1VIF_2VC_8FlowTLS	   20000	    477094 ns/op	 107.32 MB/s
+Benchmark_throughput_Flow_2VIF_4VC_8FlowTLS	   20000	    476370 ns/op	 107.48 MB/s
diff --git a/runtime/internal/rpc/stream/benchmark/benchmark_test.go b/runtime/internal/rpc/stream/benchmark/benchmark_test.go
new file mode 100644
index 0000000..2102047
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/benchmark_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"os"
+	"testing"
+
+	"v.io/x/ref/test/benchmark"
+)
+
+// A single empty test to avoid:
+// testing: warning: no tests to run
+// from showing up when running benchmarks in this package via "go test"
+func TestNoOp(t *testing.T) {}
+
+func TestMain(m *testing.M) {
+	os.Exit(benchmark.RunTestMain(m))
+}
diff --git a/runtime/internal/rpc/stream/benchmark/dial_test.go b/runtime/internal/rpc/stream/benchmark/dial_test.go
new file mode 100644
index 0000000..789df5b
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/dial_test.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import "testing"
+
+func Benchmark_dial_VIF_NoSecurity(b *testing.B) { benchmarkDialVIF(b, securityNone) }
+func Benchmark_dial_VIF(b *testing.B)            { benchmarkDialVIF(b, securityDefault) }
+
+// Note: We don't benchmark SecurityNone VC Dial for now since it doesn't wait ack
+// from the server after sending "OpenVC".
+func Benchmark_dial_VC(b *testing.B) { benchmarkDialVC(b, securityDefault) }
diff --git a/runtime/internal/rpc/stream/benchmark/dial_vc.go b/runtime/internal/rpc/stream/benchmark/dial_vc.go
new file mode 100644
index 0000000..fd7ed2d
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/dial_vc.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"testing"
+	"time"
+
+	_ "v.io/x/ref/runtime/factories/static"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/test/benchmark"
+	"v.io/x/ref/test/testutil"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/security"
+)
+
+// benchmarkDialVC measures VC creation time over the underlying VIF.
+func benchmarkDialVC(b *testing.B, mode options.SecurityLevel) {
+	stats := benchmark.AddStats(b, 16)
+
+	server := manager.InternalNew(naming.FixedRoutingID(0x5))
+	client := manager.InternalNew(naming.FixedRoutingID(0xc))
+	var (
+		principal security.Principal
+		blessings security.Blessings
+	)
+	if mode == securityDefault {
+		principal = testutil.NewPrincipal("test")
+		blessings = principal.BlessingStore().Default()
+	}
+
+	_, ep, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	// Create one VC to prevent the underlying VIF from being closed.
+	_, err = client.Dial(ep, principal, vc.IdleTimeout{0})
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	b.ResetTimer() // Exclude setup time from measurement.
+
+	for i := 0; i < b.N; i++ {
+		b.StartTimer()
+		start := time.Now()
+
+		VC, err := client.Dial(ep, principal)
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		duration := time.Since(start)
+		b.StopTimer()
+
+		stats.Add(duration)
+
+		VC.Close(nil)
+	}
+
+	client.Shutdown()
+	server.Shutdown()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/dial_vif.go b/runtime/internal/rpc/stream/benchmark/dial_vif.go
new file mode 100644
index 0000000..2278839
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/dial_vif.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"net"
+	"testing"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test/benchmark"
+	"v.io/x/ref/test/testutil"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/security"
+)
+
+// benchmarkDialVIF measures VIF creation time over the underlying net connection.
+func benchmarkDialVIF(b *testing.B, mode options.SecurityLevel) {
+	stats := benchmark.AddStats(b, 16)
+	var (
+		principal security.Principal
+		blessings security.Blessings
+	)
+	if mode == securityDefault {
+		principal = testutil.NewPrincipal("test")
+		blessings = principal.BlessingStore().Default()
+	}
+
+	b.ResetTimer() // Exclude setup time from measurement.
+
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		nc, ns := net.Pipe()
+
+		server, err := vif.InternalNewAcceptedVIF(ns, naming.FixedRoutingID(0x5), principal, blessings, nil, nil)
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		b.StartTimer()
+		start := time.Now()
+
+		client, err := vif.InternalNewDialedVIF(nc, naming.FixedRoutingID(0xc), principal, nil, nil)
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		duration := time.Since(start)
+		b.StopTimer()
+
+		stats.Add(duration)
+
+		client.Close()
+		server.Close()
+	}
+}
diff --git a/runtime/internal/rpc/stream/benchmark/doc.go b/runtime/internal/rpc/stream/benchmark/doc.go
new file mode 100644
index 0000000..ba50140
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package benchmark implements some benchmarks for comparing the
+// v.io/v23/x/ref/profiles/internal/rpc/stream implementation with raw TCP
+// connections and/or pipes.
+//
+// Sample usage:
+//	go test v.io/v23/x/ref/profiles/internal/rpc/stream/benchmark -bench=.
+package benchmark
diff --git a/runtime/internal/rpc/stream/benchmark/throughput.go b/runtime/internal/rpc/stream/benchmark/throughput.go
new file mode 100644
index 0000000..f8a2819
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"crypto/rand"
+	"io"
+	"sync"
+	"testing"
+)
+
+const (
+	// Number of bytes to read/write
+	throughputBlockSize = 50 << 10 // 50 KB
+)
+
+type throughputTester struct {
+	b       *testing.B
+	writers []io.WriteCloser
+	readers []io.ReadCloser
+
+	data    []byte
+	pending sync.WaitGroup
+}
+
+func (t *throughputTester) Run() {
+	t.pending.Add(len(t.writers) + len(t.readers))
+	iters := t.b.N / len(t.writers)
+	t.data = make([]byte, throughputBlockSize)
+	if n, err := rand.Read(t.data); n != len(t.data) || err != nil {
+		t.b.Fatalf("Failed to fill write buffer with data: (%d, %v)", n, err)
+	}
+	t.b.ResetTimer()
+	for _, w := range t.writers {
+		go t.writeLoop(w, iters)
+	}
+	for _, r := range t.readers {
+		go t.readLoop(r)
+	}
+	t.pending.Wait()
+}
+
+func (t *throughputTester) writeLoop(w io.WriteCloser, N int) {
+	defer t.pending.Done()
+	defer w.Close()
+	size := len(t.data)
+	t.b.SetBytes(int64(size))
+	for i := 0; i < N; i++ {
+		if n, err := w.Write(t.data); err != nil || n != size {
+			t.b.Fatalf("Write error: %v", err)
+			return
+		}
+	}
+}
+
+func (t *throughputTester) readLoop(r io.ReadCloser) {
+	defer t.pending.Done()
+	defer r.Close()
+	var buf [throughputBlockSize]byte
+	total := 0
+	for {
+		n, err := r.Read(buf[:])
+		if err != nil {
+			if err != io.EOF {
+				t.b.Errorf("Read returned (%d, %v)", n, err)
+			}
+			break
+		}
+		total += n
+	}
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_flow.go b/runtime/internal/rpc/stream/benchmark/throughput_flow.go
new file mode 100644
index 0000000..605ebbe
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_flow.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/security"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/test/testutil"
+)
+
+const (
+	// Shorthands
+	securityNone    = options.SecurityNone
+	securityDefault = options.SecurityConfidential
+)
+
+type listener struct {
+	ln stream.Listener
+	ep naming.Endpoint
+}
+
+// createListeners returns N (stream.Listener, naming.Endpoint) pairs, such
+// that calling stream.Manager.Dial to each of the endpoints will end up
+// creating a new VIF.
+func createListeners(mode options.SecurityLevel, m stream.Manager, N int) (servers []listener, err error) {
+	for i := 0; i < N; i++ {
+		var (
+			l         listener
+			principal security.Principal
+			blessings security.Blessings
+		)
+		if mode == securityDefault {
+			principal = testutil.NewPrincipal("test")
+			blessings = principal.BlessingStore().Default()
+		}
+		if l.ln, l.ep, err = m.Listen("tcp", "127.0.0.1:0", principal, blessings); err != nil {
+			return
+		}
+		servers = append(servers, l)
+	}
+	return
+}
+
+func benchmarkFlow(b *testing.B, mode options.SecurityLevel, nVIFs, nVCsPerVIF, nFlowsPerVC int) {
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
+	server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
+
+	var principal security.Principal
+	if mode == securityDefault {
+		principal = testutil.NewPrincipal("test")
+	}
+
+	lns, err := createListeners(mode, server, nVIFs)
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	nFlows := nVIFs * nVCsPerVIF * nFlowsPerVC
+	rchan := make(chan io.ReadCloser, nFlows)
+	wchan := make(chan io.WriteCloser, nFlows)
+
+	b.ResetTimer()
+
+	go func() {
+		defer close(wchan)
+		for i := 0; i < nVIFs; i++ {
+			ep := lns[i].ep
+			for j := 0; j < nVCsPerVIF; j++ {
+				vc, err := client.Dial(ep, principal)
+				if err != nil {
+					b.Error(err)
+					return
+				}
+				for k := 0; k < nFlowsPerVC; k++ {
+					flow, err := vc.Connect()
+					if err != nil {
+						b.Error(err)
+						return
+					}
+					// Flows are "Accepted" by the remote
+					// end only on the first Write.
+					if _, err := flow.Write([]byte("hello")); err != nil {
+						b.Error(err)
+						return
+					}
+					wchan <- flow
+				}
+			}
+		}
+	}()
+
+	go func() {
+		defer close(rchan)
+		for i := 0; i < nVIFs; i++ {
+			ln := lns[i].ln
+			nFlowsPerVIF := nVCsPerVIF * nFlowsPerVC
+			for j := 0; j < nFlowsPerVIF; j++ {
+				flow, err := ln.Accept()
+				if err != nil {
+					b.Error(err)
+					return
+				}
+				rchan <- flow
+			}
+		}
+	}()
+
+	var readers []io.ReadCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	var writers []io.WriteCloser
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_pipe.go b/runtime/internal/rpc/stream/benchmark/throughput_pipe.go
new file mode 100644
index 0000000..0a3d348
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_pipe.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"os"
+	"testing"
+)
+
+// benchmarkPipe runs a benchmark to test the throughput when nPipes each are
+// reading and writing.
+func benchmarkPipe(b *testing.B, nPipes int) {
+	readers := make([]io.ReadCloser, nPipes)
+	writers := make([]io.WriteCloser, nPipes)
+	var err error
+	for i := 0; i < nPipes; i++ {
+		// Use os.Pipe and NOT net.Pipe.
+		// The latter (based on io.Pipe) doesn't really do any I/O
+		// on the Write, it just manipulates pointers (the slice)
+		// and thus isn't useful when benchmarking since that
+		// implementation is excessively cache friendly.
+		readers[i], writers[i], err = os.Pipe()
+		if err != nil {
+			b.Fatalf("Failed to create pipe #%d: %v", i, err)
+			return
+		}
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_tcp.go b/runtime/internal/rpc/stream/benchmark/throughput_tcp.go
new file mode 100644
index 0000000..a4b54e1
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_tcp.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"net"
+	"testing"
+)
+
+// benchmarkTCP sets up nConns TCP connections and measures throughput.
+func benchmarkTCP(b *testing.B, nConns int) {
+	rchan := make(chan net.Conn, nConns)
+	wchan := make(chan net.Conn, nConns)
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("net.Listen failed: %v", err)
+		return
+	}
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := net.Dial("tcp", ln.Addr().String())
+			if err != nil {
+				b.Fatalf("net.Dial(%q, %q) failed: %v", "tcp", ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+				return
+			}
+			rchan <- conn
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_test.go b/runtime/internal/rpc/stream/benchmark/throughput_test.go
new file mode 100644
index 0000000..39a0c46
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_test.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import "testing"
+
+func Benchmark_throughput_TCP_1Conn(b *testing.B)  { benchmarkTCP(b, 1) }
+func Benchmark_throughput_TCP_2Conns(b *testing.B) { benchmarkTCP(b, 2) }
+func Benchmark_throughput_TCP_4Conns(b *testing.B) { benchmarkTCP(b, 4) }
+func Benchmark_throughput_TCP_8Conns(b *testing.B) { benchmarkTCP(b, 8) }
+
+func Benchmark_throughput_WS_1Conn(b *testing.B)  { benchmarkWS(b, 1) }
+func Benchmark_throughput_WS_2Conns(b *testing.B) { benchmarkWS(b, 2) }
+func Benchmark_throughput_WS_4Conns(b *testing.B) { benchmarkWS(b, 4) }
+func Benchmark_throughput_WS_8Conns(b *testing.B) { benchmarkWS(b, 8) }
+
+func Benchmark_throughput_WSH_TCP_1Conn(b *testing.B)  { benchmarkWSH(b, "tcp", 1) }
+func Benchmark_throughput_WSH_TCP_2Conns(b *testing.B) { benchmarkWSH(b, "tcp", 2) }
+func Benchmark_throughput_WSH_TCP_4Conns(b *testing.B) { benchmarkWSH(b, "tcp", 4) }
+func Benchmark_throughput_WSH_TCP_8Conns(b *testing.B) { benchmarkWSH(b, "tcp", 8) }
+
+func Benchmark_throughput_WSH_WS_1Conn(b *testing.B)  { benchmarkWSH(b, "ws", 1) }
+func Benchmark_throughput_WSH_WS_2Conns(b *testing.B) { benchmarkWSH(b, "ws", 2) }
+func Benchmark_throughput_WSH_WS_4Conns(b *testing.B) { benchmarkWSH(b, "ws", 4) }
+func Benchmark_throughput_WSH_WS_8Conns(b *testing.B) { benchmarkWSH(b, "ws", 8) }
+
+func Benchmark_throughput_Pipe_1Conn(b *testing.B)  { benchmarkPipe(b, 1) }
+func Benchmark_throughput_Pipe_2Conns(b *testing.B) { benchmarkPipe(b, 2) }
+func Benchmark_throughput_Pipe_4Conns(b *testing.B) { benchmarkPipe(b, 4) }
+func Benchmark_throughput_Pipe_8Conns(b *testing.B) { benchmarkPipe(b, 8) }
+
+func Benchmark_throughput_Flow_1VIF_1VC_1Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 1, 1)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_2Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 1, 2)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_8Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 1, 8)
+}
+
+func Benchmark_throughput_Flow_1VIF_2VC_2Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 2, 1)
+}
+func Benchmark_throughput_Flow_1VIF_2VC_8Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 1, 2, 4)
+}
+
+func Benchmark_throughput_Flow_2VIF_4VC_8Flow_NoSecurity(b *testing.B) {
+	benchmarkFlow(b, securityNone, 2, 2, 2)
+}
+
+func Benchmark_throughput_TLS_1Conn(b *testing.B)  { benchmarkTLS(b, 1) }
+func Benchmark_throughput_TLS_2Conns(b *testing.B) { benchmarkTLS(b, 2) }
+func Benchmark_throughput_TLS_4Conns(b *testing.B) { benchmarkTLS(b, 4) }
+func Benchmark_throughput_TLS_8Conns(b *testing.B) { benchmarkTLS(b, 8) }
+
+func Benchmark_throughput_Flow_1VIF_1VC_1Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 1, 1)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_2Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 1, 2)
+}
+func Benchmark_throughput_Flow_1VIF_1VC_8Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 1, 8)
+}
+
+func Benchmark_throughput_Flow_1VIF_2VC_2Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 2, 1)
+}
+func Benchmark_throughput_Flow_1VIF_2VC_8Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 1, 2, 4)
+}
+
+func Benchmark_throughput_Flow_2VIF_4VC_8Flow(b *testing.B) {
+	benchmarkFlow(b, securityDefault, 2, 2, 2)
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_tls.go b/runtime/internal/rpc/stream/benchmark/throughput_tls.go
new file mode 100644
index 0000000..db4e96a
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_tls.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"crypto/tls"
+	"io"
+	"net"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+)
+
+func benchmarkTLS(b *testing.B, nConns int) {
+	rchan := make(chan *tls.Conn, nConns)
+	wchan := make(chan *tls.Conn, nConns)
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("net.Listen failed: %v", err)
+		return
+	}
+
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	var tlsConfig tls.Config
+	tlsConfig.InsecureSkipVerify = true
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := tls.Dial("tcp", ln.Addr().String(), &tlsConfig)
+			if err != nil {
+				b.Fatalf("tls.Dial(%q, %q) failed: %v", "tcp", ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+			}
+			server := tls.Server(conn, crypto.ServerTLSConfig())
+			server.Handshake()
+			rchan <- server
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_ws.go b/runtime/internal/rpc/stream/benchmark/throughput_ws.go
new file mode 100644
index 0000000..07babce
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_ws.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"net"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+// benchmarkWS sets up nConns WS connections and measures throughput.
+func benchmarkWS(b *testing.B, nConns int) {
+	rchan := make(chan net.Conn, nConns)
+	wchan := make(chan net.Conn, nConns)
+	ln, err := websocket.Listener("ws", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("websocket.Listener failed: %v", err)
+		return
+	}
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := websocket.Dial("ws", ln.Addr().String(), 0)
+			if err != nil {
+				b.Fatalf("websocket.Dial(%q, %q) failed: %v", "ws", ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+				return
+			}
+			rchan <- conn
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/benchmark/throughput_wsh.go b/runtime/internal/rpc/stream/benchmark/throughput_wsh.go
new file mode 100644
index 0000000..f160184
--- /dev/null
+++ b/runtime/internal/rpc/stream/benchmark/throughput_wsh.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package benchmark
+
+import (
+	"io"
+	"net"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/lib/websocket"
+)
+
+// benchmarkWS sets up nConns WS connections and measures throughput.
+func benchmarkWSH(b *testing.B, protocol string, nConns int) {
+	rchan := make(chan net.Conn, nConns)
+	wchan := make(chan net.Conn, nConns)
+	ln, err := websocket.HybridListener("wsh", "127.0.0.1:0")
+	if err != nil {
+		b.Fatalf("websocket.HybridListener failed: %v", err)
+		return
+	}
+	defer ln.Close()
+	// One goroutine to dial nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			var conn net.Conn
+			var err error
+			switch protocol {
+			case "tcp":
+				conn, err = net.Dial("tcp", ln.Addr().String())
+			case "ws":
+				conn, err = websocket.Dial("ws", ln.Addr().String(), 0)
+			}
+			if err != nil {
+				b.Fatalf("Dial(%q, %q) failed: %v", protocol, ln.Addr(), err)
+				wchan <- nil
+				return
+			}
+			if protocol == "tcp" {
+				// Write a dummy byte since wsh waits for magic byte forever.
+				conn.Write([]byte("."))
+			}
+			wchan <- conn
+		}
+		close(wchan)
+	}()
+	// One goroutine to accept nConns connections.
+	go func() {
+		for i := 0; i < nConns; i++ {
+			conn, err := ln.Accept()
+			if err != nil {
+				b.Fatalf("Accept failed: %v", err)
+				rchan <- nil
+				return
+			}
+			if protocol == "tcp" {
+				// Read a dummy byte.
+				conn.Read(make([]byte, 1))
+			}
+			rchan <- conn
+		}
+		close(rchan)
+	}()
+
+	var readers []io.ReadCloser
+	var writers []io.WriteCloser
+	for r := range rchan {
+		readers = append(readers, r)
+	}
+	for w := range wchan {
+		writers = append(writers, w)
+	}
+	if b.Failed() {
+		return
+	}
+	(&throughputTester{b: b, readers: readers, writers: writers}).Run()
+}
diff --git a/runtime/internal/rpc/stream/crypto/box.go b/runtime/internal/rpc/stream/crypto/box.go
new file mode 100644
index 0000000..eec1bc9
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/box.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"fmt"
+
+	"golang.org/x/crypto/nacl/box"
+
+	"v.io/v23/verror"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/crypto"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errCipherTextTooShort     = reg(".errCipherTextTooShort", "ciphertext too short")
+	errRemotePublicKey        = reg(".errRemotePublicKey", "failed to get remote public key")
+	errMessageAuthFailed      = reg(".errMessageAuthFailed", "message authentication failed")
+	errUnrecognizedCipherText = reg(".errUnrecognizedCipherText", "CipherSuite {3} is not recognized. Must use one that uses Diffie-Hellman as the key exchange algorithm")
+)
+
+type boxcrypter struct {
+	alloc                 *iobuf.Allocator
+	sharedKey             [32]byte
+	sortedPubkeys         []byte
+	writeNonce, readNonce uint64
+}
+
+type BoxKey [32]byte
+
+// BoxKeyExchanger is used to communicate public keys between the two ends of
+// communication.
+type BoxKeyExchanger func(myPublicKey *BoxKey) (theirPublicKey *BoxKey, err error)
+
+// NewBoxCrypter uses Curve25519, XSalsa20 and Poly1305 to encrypt and
+// authenticate messages (as defined in http://nacl.cr.yp.to/box.html).
+// An ephemeral Diffie-Hellman key exchange is performed per invocation
+// of NewBoxCrypter; the data sent has forward security with connection
+// granularity. One round-trip is required before any data can be sent.
+// BoxCrypter does NOT do anything to verify the identity of the peer.
+func NewBoxCrypter(exchange BoxKeyExchanger, pool *iobuf.Pool) (Crypter, error) {
+	pk, sk, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		return nil, err
+	}
+
+	theirPK, err := exchange((*BoxKey)(pk))
+	if err != nil {
+		return nil, err
+	}
+	if theirPK == nil {
+		return nil, verror.New(errRemotePublicKey, nil)
+	}
+
+	ret := &boxcrypter{alloc: iobuf.NewAllocator(pool, 0)}
+
+	box.Precompute(&ret.sharedKey, (*[32]byte)(theirPK), sk)
+	// Distinct messages between the same {sender, receiver} set are required
+	// to have distinct nonces. The server with the lexicographically smaller
+	// public key will be sending messages with 0, 2, 4... and the other will
+	// be using 1, 3, 5...
+	if bytes.Compare(pk[:], theirPK[:]) < 0 {
+		ret.writeNonce, ret.readNonce = 0, 1
+		ret.sortedPubkeys = append(pk[:], theirPK[:]...)
+	} else {
+		ret.writeNonce, ret.readNonce = 1, 0
+		ret.sortedPubkeys = append(theirPK[:], pk[:]...)
+	}
+	return ret, nil
+}
+
+func (c *boxcrypter) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
+	defer src.Release()
+	var nonce [24]byte
+	binary.LittleEndian.PutUint64(nonce[:], c.writeNonce)
+	c.writeNonce += 2
+	ret := c.alloc.Alloc(uint(len(src.Contents) + box.Overhead))
+	ret.Contents = box.SealAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+	return ret, nil
+}
+
+func (c *boxcrypter) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
+	defer src.Release()
+	var nonce [24]byte
+	binary.LittleEndian.PutUint64(nonce[:], c.readNonce)
+	c.readNonce += 2
+	retLen := len(src.Contents) - box.Overhead
+	if retLen < 0 {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errCipherTextTooShort, nil))
+	}
+	ret := c.alloc.Alloc(uint(retLen))
+	var ok bool
+	ret.Contents, ok = box.OpenAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+	if !ok {
+		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errMessageAuthFailed, nil))
+	}
+	return ret, nil
+}
+
+func (c *boxcrypter) ChannelBinding() []byte {
+	return c.sortedPubkeys
+}
+
+func (c *boxcrypter) String() string {
+	return fmt.Sprintf("%#v", *c)
+}
diff --git a/runtime/internal/rpc/stream/crypto/box_cipher.go b/runtime/internal/rpc/stream/crypto/box_cipher.go
new file mode 100644
index 0000000..f28757d
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/box_cipher.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import (
+	"encoding/binary"
+
+	"golang.org/x/crypto/nacl/box"
+	"golang.org/x/crypto/salsa20/salsa"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// cbox implements a ControlCipher using go.crypto/nacl/box.
+type cbox struct {
+	sharedKey [32]byte
+	enc       cboxStream
+	dec       cboxStream
+}
+
+// cboxStream implements one stream of encryption or decryption.
+type cboxStream struct {
+	counter uint64
+	nonce   [24]byte
+	// buffer is a temporary used for in-place crypto.
+	buffer []byte
+}
+
+const (
+	cboxMACSize = box.Overhead
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errMessageTooShort = reg(".errMessageTooShort", "control cipher: message is too short")
+)
+
+func (s *cboxStream) alloc(n int) []byte {
+	if len(s.buffer) < n {
+		s.buffer = make([]byte, n*2)
+	}
+	return s.buffer[:0]
+}
+
+func (s *cboxStream) currentNonce() *[24]byte {
+	return &s.nonce
+}
+
+func (s *cboxStream) advanceNonce() {
+	s.counter++
+	binary.LittleEndian.PutUint64(s.nonce[:], s.counter)
+}
+
+// setupXSalsa20 produces a sub-key and Salsa20 counter given a nonce and key.
+//
+// See, "Extending the Salsa20 nonce," by Daniel J. Bernsten, Department of
+// Computer Science, University of Illinois at Chicago, 2008.
+func setupXSalsa20(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
+	// We use XSalsa20 for encryption so first we need to generate a
+	// key and nonce with HSalsa20.
+	var hNonce [16]byte
+	copy(hNonce[:], nonce[:])
+	salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
+
+	// The final 8 bytes of the original nonce form the new nonce.
+	copy(counter[:], nonce[16:])
+}
+
+// NewControlCipher returns a ControlCipher for RPC versions greater than 6.
+func NewControlCipherRPC6(peersPublicKey, privateKey *BoxKey, isServer bool) ControlCipher {
+	var c cbox
+	box.Precompute(&c.sharedKey, (*[32]byte)(peersPublicKey), (*[32]byte)(privateKey))
+	// The stream is full-duplex, and we want the directions to use different
+	// nonces, so we set bit (1 << 64) in the server-to-client stream, and leave
+	// it cleared in the client-to-server stream.  advanceNone touches only the
+	// first 8 bytes, so this change is permanent for the duration of the
+	// stream.
+	if isServer {
+		c.enc.nonce[8] = 1
+	} else {
+		c.dec.nonce[8] = 1
+	}
+	return &c
+}
+
+// MACSize implements the ControlCipher method.
+func (c *cbox) MACSize() int {
+	return cboxMACSize
+}
+
+// Seal implements the ControlCipher method.
+func (c *cbox) Seal(data []byte) error {
+	n := len(data)
+	if n < cboxMACSize {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errMessageTooShort, nil))
+	}
+	tmp := c.enc.alloc(n)
+	nonce := c.enc.currentNonce()
+	out := box.SealAfterPrecomputation(tmp, data[:n-cboxMACSize], nonce, &c.sharedKey)
+	c.enc.advanceNonce()
+	copy(data, out)
+	return nil
+}
+
+// Open implements the ControlCipher method.
+func (c *cbox) Open(data []byte) bool {
+	n := len(data)
+	if n < cboxMACSize {
+		return false
+	}
+	tmp := c.dec.alloc(n - cboxMACSize)
+	nonce := c.dec.currentNonce()
+	out, ok := box.OpenAfterPrecomputation(tmp, data, nonce, &c.sharedKey)
+	if !ok {
+		return false
+	}
+	c.dec.advanceNonce()
+	copy(data, out)
+	return true
+}
+
+// Encrypt implements the ControlCipher method.
+func (c *cbox) Encrypt(data []byte) {
+	var subKey [32]byte
+	var counter [16]byte
+	nonce := c.enc.currentNonce()
+	setupXSalsa20(&subKey, &counter, nonce, &c.sharedKey)
+	c.enc.advanceNonce()
+	salsa.XORKeyStream(data, data, &counter, &subKey)
+}
+
+// Decrypt implements the ControlCipher method.
+func (c *cbox) Decrypt(data []byte) {
+	var subKey [32]byte
+	var counter [16]byte
+	nonce := c.dec.currentNonce()
+	setupXSalsa20(&subKey, &counter, nonce, &c.sharedKey)
+	c.dec.advanceNonce()
+	salsa.XORKeyStream(data, data, &counter, &subKey)
+}
diff --git a/runtime/internal/rpc/stream/crypto/box_cipher_test.go b/runtime/internal/rpc/stream/crypto/box_cipher_test.go
new file mode 100644
index 0000000..6727f7d
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/box_cipher_test.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto_test
+
+import (
+	"bytes"
+	"crypto/rand"
+	"testing"
+
+	"golang.org/x/crypto/nacl/box"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+)
+
+// Add space for a MAC.
+func newMessage(s string) []byte {
+	b := make([]byte, len(s)+box.Overhead)
+	copy(b, []byte(s))
+	return b
+}
+
+func TestOpenSeal(t *testing.T) {
+	pub1, pvt1, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	pub2, pvt2, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	c1 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub2), (*crypto.BoxKey)(pvt1), true)
+	c2 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub1), (*crypto.BoxKey)(pvt2), false)
+
+	msg1 := newMessage("hello")
+	if err := c1.Seal(msg1); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	msg2 := newMessage("world")
+	if err := c1.Seal(msg2); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	msg3 := newMessage("hello")
+	if err := c1.Seal(msg3); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if bytes.Compare(msg1, msg3) == 0 {
+		t.Errorf("message should differ: %q, %q", msg1, msg3)
+	}
+
+	// Check that the client does not encrypt the same.
+	msg4 := newMessage("hello")
+	if err := c2.Seal(msg4); err != nil {
+		t.Errorf("unexpected error: %s", err)
+	}
+	if bytes.Compare(msg4, msg1) == 0 {
+		t.Errorf("messages should differ %q vs. %q", msg4, msg1)
+	}
+
+	// Corrupted message should not decrypt.
+	msg1[0] ^= 1
+	if ok := c2.Open(msg1); ok {
+		t.Errorf("expected error")
+	}
+
+	// Fix the message and try again.
+	msg1[0] ^= 1
+	if ok := c2.Open(msg1); !ok {
+		t.Errorf("Open failed")
+	}
+	if bytes.Compare(msg1[:5], []byte("hello")) != 0 {
+		t.Errorf("got %q, expected %q", msg1[:5], "hello")
+	}
+
+	// msg3 should not decrypt.
+	if ok := c2.Open(msg3); ok {
+		t.Errorf("expected error")
+	}
+
+	// Resume.
+	if ok := c2.Open(msg2); !ok {
+		t.Errorf("Open failed")
+	}
+	if bytes.Compare(msg2[:5], []byte("world")) != 0 {
+		t.Errorf("got %q, expected %q", msg2[:5], "world")
+	}
+	if ok := c2.Open(msg3); !ok {
+		t.Errorf("Open failed")
+	}
+	if bytes.Compare(msg3[:5], []byte("hello")) != 0 {
+		t.Errorf("got %q, expected %q", msg3[:5], "hello")
+	}
+}
+
+func TestXORKeyStream(t *testing.T) {
+	pub1, pvt1, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	pub2, pvt2, err := box.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("can't generate key")
+	}
+	c1 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub2), (*crypto.BoxKey)(pvt1), true)
+	c2 := crypto.NewControlCipherRPC6((*crypto.BoxKey)(pub1), (*crypto.BoxKey)(pvt2), false)
+
+	msg1 := []byte("hello")
+	msg2 := []byte("world")
+	msg3 := []byte("hello")
+	c1.Encrypt(msg1)
+	c1.Encrypt(msg2)
+	c1.Encrypt(msg3)
+	if bytes.Compare(msg1, msg3) == 0 {
+		t.Errorf("messages should differ: %q, %q", msg1, msg3)
+	}
+
+	c2.Decrypt(msg1)
+	c2.Decrypt(msg2)
+	c2.Decrypt(msg3)
+	s1 := string(msg1)
+	s2 := string(msg2)
+	s3 := string(msg3)
+	if s1 != "hello" {
+		t.Errorf("got %q, expected 'hello'", s1)
+	}
+	if s2 != "world" {
+		t.Errorf("got %q, expected 'world'", s2)
+	}
+	if s3 != "hello" {
+		t.Errorf("got %q, expected 'hello'", s3)
+	}
+}
diff --git a/runtime/internal/rpc/stream/crypto/control_cipher.go b/runtime/internal/rpc/stream/crypto/control_cipher.go
new file mode 100644
index 0000000..ae25daf
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/control_cipher.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+// ControlCipher provides the ciphers and MAC for control channel encryption.
+// Encryption and decryption are performed in place.
+type ControlCipher interface {
+	// MACSize returns the number of bytes in the MAC.
+	MACSize() int
+
+	// Seal replaces the message with an authenticated and encrypted version.
+	// The trailing MACSize bytes of the data are used for the MAC; they are
+	// discarded and overwritten.
+	Seal(data []byte) error
+
+	// Open authenticates and decrypts a box produced by Seal.  The trailing
+	// MACSize bytes are not changed.  Returns true on success.
+	Open(data []byte) bool
+
+	// Encrypt encrypts the data in place.  No MAC is added.
+	Encrypt(data []byte)
+
+	// Decrypt decrypts the data in place.  No MAC is verified.
+	Decrypt(data []byte)
+}
diff --git a/runtime/internal/rpc/stream/crypto/crypto.go b/runtime/internal/rpc/stream/crypto/crypto.go
new file mode 100644
index 0000000..bee1726
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/crypto.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crypto implements encryption and decryption interfaces intended for
+// securing communication over VCs.
+package crypto
+
+import "v.io/x/ref/runtime/internal/lib/iobuf"
+
+type Encrypter interface {
+	// Encrypt encrypts the provided plaintext data and returns the
+	// corresponding ciphertext slice (or nil if an error is returned).
+	//
+	// It always calls Release on plaintext and thus plaintext should not
+	// be used after calling Encrypt.
+	Encrypt(plaintext *iobuf.Slice) (ciphertext *iobuf.Slice, err error)
+}
+
+type Decrypter interface {
+	// Decrypt decrypts the provided ciphertext slice and returns the
+	// corresponding plaintext (or nil if an error is returned).
+	//
+	// It always calls Release on ciphertext and thus ciphertext should not
+	// be used after calling Decrypt.
+	Decrypt(ciphertext *iobuf.Slice) (plaintext *iobuf.Slice, err error)
+}
+
+type Crypter interface {
+	Encrypter
+	Decrypter
+	// ChannelBinding Returns a byte slice that is unique for the the
+	// particular crypter (and the parties between which it is operating).
+	// Having both parties assert out of the band that they are indeed
+	// participating in a connection with that channel binding value is
+	// sufficient to authenticate the data received through the crypter.
+	ChannelBinding() []byte
+	String() string
+}
diff --git a/runtime/internal/rpc/stream/crypto/crypto_test.go b/runtime/internal/rpc/stream/crypto/crypto_test.go
new file mode 100644
index 0000000..fe83db7
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/crypto_test.go
@@ -0,0 +1,270 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import (
+	"bytes"
+	"crypto/rand"
+	"net"
+	"testing"
+	"testing/quick"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+)
+
+func quickTest(t *testing.T, e Encrypter, d Decrypter) {
+	f := func(plaintext []byte) bool {
+		plainslice := iobuf.NewSlice(plaintext)
+		cipherslice, err := e.Encrypt(plainslice)
+		if err != nil {
+			t.Error(err)
+			return false
+		}
+		plainslice, err = d.Decrypt(cipherslice)
+		if err != nil {
+			t.Error(err)
+			return false
+		}
+		defer plainslice.Release()
+		return bytes.Equal(plainslice.Contents, plaintext)
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestNull(t *testing.T) {
+	crypter := NewNullCrypter()
+	quickTest(t, crypter, crypter)
+	crypter.String() // Only to test that String does not crash.
+}
+
+func testSimple(t *testing.T, c1, c2 Crypter) {
+	// Execute String just to check that it does not crash.
+	c1.String()
+	c2.String()
+	if t.Failed() {
+		return
+	}
+	quickTest(t, c1, c2)
+	quickTest(t, c2, c1)
+
+	// Log the byte overhead of encryption, just so that test output has a
+	// record.
+	var overhead [10]int
+	for i := 0; i < len(overhead); i++ {
+		size := 1 << uint(i)
+		slice, err := c1.Encrypt(iobuf.NewSlice(make([]byte, size)))
+		overhead[i] = slice.Size() - size
+		slice.Release()
+		if err != nil {
+			t.Fatalf("%d: %v", i, err)
+		}
+	}
+	t.Logf("Byte overhead of encryption: %v", overhead)
+}
+
+func TestTLS(t *testing.T) {
+	server, client := net.Pipe()
+	c1, c2 := tlsCrypters(t, server, client)
+	testSimple(t, c1, c2)
+}
+
+func TestBox(t *testing.T) {
+	c1, c2 := boxCrypters(t, nil, nil)
+	testSimple(t, c1, c2)
+}
+
+// testChannelBinding attempts to ensure that:
+// (a) ChannelBinding returns the same value for both ends of a Crypter
+// (b) ChannelBindings are unique
+// For (b), we simply test many times and check that no two instances have the same ChannelBinding value.
+// Yes, this test isn't exhaustive. If you have ideas, please share.
+func testChannelBinding(t *testing.T, factory func(testing.TB, net.Conn, net.Conn) (Crypter, Crypter)) {
+	values := make([][]byte, 100)
+	for i := 0; i < len(values); i++ {
+		conn1, conn2 := net.Pipe()
+		c1, c2 := factory(t, conn1, conn2)
+		if !bytes.Equal(c1.ChannelBinding(), c2.ChannelBinding()) {
+			t.Fatalf("Two ends of the crypter ended up with different channel bindings (iteration #%d)", i)
+		}
+		values[i] = c1.ChannelBinding()
+	}
+	for i := 0; i < len(values); i++ {
+		for j := i + 1; j < len(values); j++ {
+			if bytes.Equal(values[i], values[j]) {
+				t.Fatalf("Same ChannelBinding seen on multiple channels (%d and %d)", i, j)
+			}
+		}
+	}
+}
+
+func TestChannelBindingTLS(t *testing.T) { testChannelBinding(t, tlsCrypters) }
+func TestChannelBindingBox(t *testing.T) { testChannelBinding(t, boxCrypters) }
+
+func TestTLSNil(t *testing.T) {
+	conn1, conn2 := net.Pipe()
+	c1, c2 := tlsCrypters(t, conn1, conn2)
+	if t.Failed() {
+		return
+	}
+	cipher, err := c1.Encrypt(iobuf.NewSlice(nil))
+	if err != nil {
+		t.Fatal(err)
+	}
+	plain, err := c2.Decrypt(cipher)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if plain.Size() != 0 {
+		t.Fatalf("Decryption produced non-empty data (%d)", plain.Size())
+	}
+}
+
+func TestTLSFragmentedPlaintext(t *testing.T) {
+	// Form RFC 5246, Section 6.2.1, the maximum length of a TLS record is
+	// 16K (it is represented by a uint16).
+	// http://tools.ietf.org/html/rfc5246#section-6.2.1
+	const dataLen = 16384 + 1
+	conn1, conn2 := net.Pipe()
+	enc, dec := tlsCrypters(t, conn1, conn2)
+	cipher, err := enc.Encrypt(iobuf.NewSlice(make([]byte, dataLen)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	plain, err := dec.Decrypt(cipher)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(plain.Contents, make([]byte, dataLen)) {
+		t.Errorf("Got %d bytes, want %d bytes of zeroes", plain.Size(), dataLen)
+	}
+}
+
+type factory func(t testing.TB, server, client net.Conn) (Crypter, Crypter)
+
+func tlsCrypters(t testing.TB, serverConn, clientConn net.Conn) (Crypter, Crypter) {
+	crypters := make(chan Crypter)
+	go func() {
+		server, err := NewTLSServer(serverConn, serverConn.LocalAddr(), serverConn.RemoteAddr(), iobuf.NewPool(0))
+		if err != nil {
+			t.Fatal(err)
+		}
+		crypters <- server
+	}()
+
+	go func() {
+		client, err := NewTLSClient(clientConn, clientConn.LocalAddr(), clientConn.RemoteAddr(), TLSClientSessionCache{}, iobuf.NewPool(0))
+		if err != nil {
+			t.Fatal(err)
+		}
+		crypters <- client
+	}()
+	c1 := <-crypters
+	c2 := <-crypters
+	return c1, c2
+}
+
+func boxCrypters(t testing.TB, _, _ net.Conn) (Crypter, Crypter) {
+	server, client := make(chan *BoxKey, 1), make(chan *BoxKey, 1)
+	clientExchanger := func(pubKey *BoxKey) (*BoxKey, error) {
+		client <- pubKey
+		return <-server, nil
+	}
+	serverExchanger := func(pubKey *BoxKey) (*BoxKey, error) {
+		server <- pubKey
+		return <-client, nil
+	}
+	crypters := make(chan Crypter)
+	for _, ex := range []BoxKeyExchanger{clientExchanger, serverExchanger} {
+		go func(exchanger BoxKeyExchanger) {
+			crypter, err := NewBoxCrypter(exchanger, iobuf.NewPool(0))
+			if err != nil {
+				t.Fatal(err)
+			}
+			crypters <- crypter
+		}(ex)
+	}
+	return <-crypters, <-crypters
+}
+
+func benchmarkEncrypt(b *testing.B, crypters factory, size int) {
+	plaintext := make([]byte, size)
+	if _, err := rand.Read(plaintext); err != nil {
+		b.Fatal(err)
+	}
+	conn1, conn2 := net.Pipe()
+	defer conn1.Close()
+	defer conn2.Close()
+	e, _ := crypters(b, conn1, conn2)
+	b.SetBytes(int64(size))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		cipher, err := e.Encrypt(iobuf.NewSlice(plaintext))
+		if err != nil {
+			b.Fatal(err)
+		}
+		cipher.Release()
+	}
+}
+
+func BenchmarkTLSEncrypt_1B(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 1) }
+func BenchmarkTLSEncrypt_1K(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 1<<10) }
+func BenchmarkTLSEncrypt_10K(b *testing.B) { benchmarkEncrypt(b, tlsCrypters, 10<<10) }
+func BenchmarkTLSEncrypt_1M(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 1<<20) }
+func BenchmarkTLSEncrypt_5M(b *testing.B)  { benchmarkEncrypt(b, tlsCrypters, 5<<20) }
+
+func BenchmarkBoxEncrypt_1B(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 1) }
+func BenchmarkBoxEncrypt_1K(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 1<<10) }
+func BenchmarkBoxEncrypt_10K(b *testing.B) { benchmarkEncrypt(b, boxCrypters, 10<<10) }
+func BenchmarkBoxEncrypt_1M(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 1<<20) }
+func BenchmarkBoxEncrypt_5M(b *testing.B)  { benchmarkEncrypt(b, boxCrypters, 5<<20) }
+
+func benchmarkRoundTrip(b *testing.B, crypters factory, size int) {
+	plaintext := make([]byte, size)
+	if _, err := rand.Read(plaintext); err != nil {
+		b.Fatal(err)
+	}
+	conn1, conn2 := net.Pipe()
+	defer conn1.Close()
+	defer conn2.Close()
+	e, d := crypters(b, conn1, conn2)
+	b.SetBytes(int64(size))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		cipherslice, err := e.Encrypt(iobuf.NewSlice(plaintext))
+		if err != nil {
+			b.Fatal(err)
+		}
+		plainslice, err := d.Decrypt(cipherslice)
+		if err != nil {
+			b.Fatal(err)
+		}
+		plainslice.Release()
+	}
+}
+func BenchmarkTLSRoundTrip_1B(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 1) }
+func BenchmarkTLSRoundTrip_1K(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 1<<10) }
+func BenchmarkTLSRoundTrip_10K(b *testing.B) { benchmarkRoundTrip(b, tlsCrypters, 10<<10) }
+func BenchmarkTLSRoundTrip_1M(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 1<<20) }
+func BenchmarkTLSRoundTrip_5M(b *testing.B)  { benchmarkRoundTrip(b, tlsCrypters, 5<<20) }
+
+func BenchmarkBoxRoundTrip_1B(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 1) }
+func BenchmarkBoxRoundTrip_1K(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 1<<10) }
+func BenchmarkBoxRoundTrip_10K(b *testing.B) { benchmarkRoundTrip(b, boxCrypters, 10<<10) }
+func BenchmarkBoxRoundTrip_1M(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 1<<20) }
+func BenchmarkBoxRoundTrip_5M(b *testing.B)  { benchmarkRoundTrip(b, boxCrypters, 5<<20) }
+
+func benchmarkSetup(b *testing.B, crypters factory) {
+	for i := 0; i < b.N; i++ {
+		conn1, conn2 := net.Pipe()
+		crypters(b, conn1, conn2)
+		conn1.Close()
+		conn2.Close()
+	}
+}
+
+func BenchmarkTLSSetup(b *testing.B) { benchmarkSetup(b, tlsCrypters) }
+func BenchmarkBoxSetup(b *testing.B) { benchmarkSetup(b, boxCrypters) }
diff --git a/runtime/internal/rpc/stream/crypto/null.go b/runtime/internal/rpc/stream/crypto/null.go
new file mode 100644
index 0000000..036b541
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/null.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+import "v.io/x/ref/runtime/internal/lib/iobuf"
+
+// NewNullCrypter returns a Crypter that does no encryption/decryption.
+func NewNullCrypter() Crypter { return null{} }
+
+type null struct{}
+
+func (null) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) { return src, nil }
+func (null) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) { return src, nil }
+func (null) String() string                                 { return "Null" }
+func (null) ChannelBinding() []byte                         { return nil }
diff --git a/runtime/internal/rpc/stream/crypto/null_cipher.go b/runtime/internal/rpc/stream/crypto/null_cipher.go
new file mode 100644
index 0000000..cdfadc5
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/null_cipher.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crypto
+
+// NullControlCipher is a cipher that does nothing.
+type NullControlCipher struct{}
+
+func (NullControlCipher) MACSize() int           { return 0 }
+func (NullControlCipher) Seal(data []byte) error { return nil }
+func (NullControlCipher) Open(data []byte) bool  { return true }
+func (NullControlCipher) Encrypt(data []byte)    {}
+func (NullControlCipher) Decrypt(data []byte)    {}
+
+type disabledControlCipher struct {
+	NullControlCipher
+	macSize int
+}
+
+func (c *disabledControlCipher) MACSize() int { return c.macSize }
+
+// NewDisabledControlCipher returns a cipher that has the correct MACSize, but
+// encryption and decryption are disabled.
+func NewDisabledControlCipher(c ControlCipher) ControlCipher {
+	return &disabledControlCipher{macSize: c.MACSize()}
+}
diff --git a/runtime/internal/rpc/stream/crypto/tls.go b/runtime/internal/rpc/stream/crypto/tls.go
new file mode 100644
index 0000000..db8833c
--- /dev/null
+++ b/runtime/internal/rpc/stream/crypto/tls.go
@@ -0,0 +1,252 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.4
+
+package crypto
+
+import (
+	"bytes"
+	"crypto/tls"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"time"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errDeadlinesNotSupported = reg(".errDeadlinesNotSupported", "deadlines not supported")
+	errEndOfEncryptedSlice   = reg(".errEndOfEncryptedSlice", "end of encrypted slice")
+)
+
+// TLSClientSessionCacheOpt specifies the ClientSessionCache used to resume TLS sessions.
+// It adapts tls.ClientSessionCache to the v.io/v23/x/ref/profiles/internal/rpc/stream.VCOpt interface.
+type TLSClientSessionCache struct{ tls.ClientSessionCache }
+
+func (TLSClientSessionCache) RPCStreamVCOpt() {}
+
+// NewTLSClient returns a Crypter implementation that uses TLS, assuming
+// handshaker was initiated by a client.
+func NewTLSClient(handshaker io.ReadWriteCloser, local, remote net.Addr, sessionCache TLSClientSessionCache, pool *iobuf.Pool) (Crypter, error) {
+	var config tls.Config
+	// TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+	config.SessionTicketsDisabled = true
+	config.InsecureSkipVerify = true
+	config.ClientSessionCache = sessionCache.ClientSessionCache
+	return newTLSCrypter(handshaker, local, remote, &config, pool, false)
+}
+
+// NewTLSServer returns a Crypter implementation that uses TLS, assuming
+// handshaker was accepted by a server.
+func NewTLSServer(handshaker io.ReadWriteCloser, local, remote net.Addr, pool *iobuf.Pool) (Crypter, error) {
+	return newTLSCrypter(handshaker, local, remote, ServerTLSConfig(), pool, true)
+}
+
+type fakeConn struct {
+	handshakeConn io.ReadWriteCloser
+	out           bytes.Buffer
+	in            []byte
+	laddr, raddr  net.Addr
+}
+
+func (c *fakeConn) Read(b []byte) (n int, err error) {
+	if c.handshakeConn != nil {
+		return c.handshakeConn.Read(b)
+	}
+	if len(c.in) == 0 {
+		return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errEndOfEncryptedSlice, nil)), false, true)
+	}
+	n = copy(b, c.in)
+	c.in = c.in[n:]
+	return
+}
+
+func (c *fakeConn) Write(b []byte) (int, error) {
+	if c.handshakeConn != nil {
+		return c.handshakeConn.Write(b)
+	}
+	return c.out.Write(b)
+}
+
+func (*fakeConn) Close() error           { return nil }
+func (c *fakeConn) LocalAddr() net.Addr  { return c.laddr }
+func (c *fakeConn) RemoteAddr() net.Addr { return c.raddr }
+func (*fakeConn) SetDeadline(t time.Time) error {
+	return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+func (*fakeConn) SetReadDeadline(t time.Time) error {
+	return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+func (*fakeConn) SetWriteDeadline(t time.Time) error {
+	return verror.New(stream.ErrBadState, nil, verror.New(errDeadlinesNotSupported, nil))
+}
+
+// tlsCrypter implements the Crypter interface using crypto/tls.
+//
+// crypto/tls provides a net.Conn, while the Crypter interface operates on
+// iobuf.Slice objects. In order to adapt to the Crypter in stream.ErrNetwork, verrorterface, the
+// strategy is as follows:
+//
+// - netTLSCrypter wraps a net.Conn with an alternative implementation
+//   (fakeConn) for the TLS handshake protocol.
+// - Once the TLS handshake is complete, fakeConn switches to a mode where all
+//   Write calls add to a bytes.Buffer and all Read calls read from a
+//   bytes.Buffer.
+// - Encrypt uses tls.Conn.Write, which in-turn invokes fakeConn.Write and then
+//   it extracts the contents of the underlying bytes.Buffer.
+// - Decrypt adds to the read buffer and then invokes tls.Conn.Read, which
+//   in-turn invokes fakeConn.Read, which reads from that buffer.
+type tlsCrypter struct {
+	mu    sync.Mutex
+	alloc *iobuf.Allocator
+	tls   *tls.Conn
+	fc    *fakeConn
+}
+
+func newTLSCrypter(handshaker io.ReadWriteCloser, local, remote net.Addr, config *tls.Config, pool *iobuf.Pool, server bool) (Crypter, error) {
+	fc := &fakeConn{handshakeConn: handshaker, laddr: local, raddr: remote}
+	var t *tls.Conn
+	if server {
+		t = tls.Server(fc, config)
+	} else {
+		// The TLS handshake protocol ends with a message received by the client.
+		// handshaker should be closed only after the handshake protocol completes.
+		// So, the client closes the handshaker.
+		defer handshaker.Close()
+		t = tls.Client(fc, config)
+	}
+	if err := t.Handshake(); err != nil {
+		return nil, err
+	}
+	// Must have used Diffie-Hellman to exchange keys (so that the key selection
+	// is independent of any TLS certificates used). This helps ensure that
+	// identities exchanged during the vanadium authentication protocol cannot be
+	// stolen and are bound to the session key established during the TLS handshake.
+	switch cs := t.ConnectionState().CipherSuite; cs {
+	case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+	case tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+	case tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+	case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+	case tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+	default:
+		t.Close()
+		return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnrecognizedCipherText, nil, fmt.Sprintf("0x%04x", cs)))
+	}
+	fc.handshakeConn = nil
+	return &tlsCrypter{
+		alloc: iobuf.NewAllocator(pool, 0),
+		tls:   t,
+		fc:    fc,
+	}, nil
+}
+
+func (c *tlsCrypter) Encrypt(plaintext *iobuf.Slice) (*iobuf.Slice, error) {
+	defer plaintext.Release()
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	defer c.fc.out.Reset()
+	if _, err := c.tls.Write(plaintext.Contents); err != nil {
+		return nil, err
+	}
+	return c.alloc.Copy(c.fc.out.Bytes()), nil
+}
+
+func (c *tlsCrypter) Decrypt(ciphertext *iobuf.Slice) (*iobuf.Slice, error) {
+	defer ciphertext.Release()
+	if ciphertext.Size() == 0 {
+		return ciphertext, nil
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.fc.in = ciphertext.Contents
+	// Given the cipher suites used, len(plaintext) < len(ciphertext)
+	// (ciphertext includes TLS record headers). Allocating space for
+	// plaintext based on ciphertext.Size should suffice.
+	plaintext := c.alloc.Alloc(uint(ciphertext.Size()))
+	out := plaintext.Contents
+	for {
+		n, err := c.tls.Read(out)
+		if err != nil {
+			if _, exit := err.(*stream.NetError); exit {
+				break
+			}
+			plaintext.Release()
+			return nil, err
+		}
+		out = out[n:]
+	}
+	plaintext.Contents = plaintext.Contents[:plaintext.Size()-len(out)]
+	return plaintext, nil
+}
+
+func (c *tlsCrypter) String() string {
+	state := c.tls.ConnectionState()
+	return fmt.Sprintf("TLS CipherSuite:0x%04x Resumed:%v", state.CipherSuite, state.DidResume)
+}
+
+// ServerTLSConfig returns the tls.Config used by NewTLSServer.
+func ServerTLSConfig() *tls.Config {
+	c, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey))
+	if err != nil {
+		panic(err)
+	}
+	return &tls.Config{
+		// TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+		SessionTicketsDisabled: true,
+		Certificates:           []tls.Certificate{c},
+		InsecureSkipVerify:     true,
+		// RC4_128_SHA is 4-5X faster compared to the other cipher suites.
+		// There are concerns with its security (see http://en.wikipedia.org/wiki/RC4 and
+		// https://www.usenix.org/conference/usenixsecurity13/technical-sessions/paper/alFardan),
+		// so this decision will be revisted.
+		// TODO(ashankar,ataly): Figure out what cipher to use and how to
+		// have a speedy Go implementation of it.
+		CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA},
+	}
+}
+
+func (c *tlsCrypter) ChannelBinding() []byte {
+	return c.tls.ConnectionState().TLSUnique
+}
+
+// PEM-encoded certificates and keys used in the tests.
+// One way to generate them is:
+//   go run $GOROOT/src/pkg/crypto/tls/generate_cert.go  --host=localhost --duration=87600h --ecdsa-curve=P256
+// (This generates a self-signed certificate valid for 10 years)
+// which will create cert.pem and key.pem files.
+const (
+	serverCert = `
+-----BEGIN CERTIFICATE-----
+MIIBbTCCAROgAwIBAgIQMD+Kzawjvhij1B/BmvHxLDAKBggqhkjOPQQDAjASMRAw
+DgYDVQQKEwdBY21lIENvMB4XDTE0MDcxODIzMTYxMloXDTI0MDcxNTIzMTYxMlow
+EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABLiz
+Ajsly1DS8NJF2KE195V83TgidfgGEB7nudscdKWH3+5uQHgCc+2BV/7AGGj3yePR
+ZZLzYD95goJ/a7eet/2jSzBJMA4GA1UdDwEB/wQEAwIAoDATBgNVHSUEDDAKBggr
+BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
+hkjOPQQDAgNIADBFAiAb4tBxggEpnKdxv66TBVFxAUn3EBWX25XlL1G2GF8RkAIh
+AOAwys3mvzM4Td/2kV9QNyQPZ9kLLQr9A9ryB0H3N9Yz
+-----END CERTIFICATE-----
+`
+	serverKey = `
+-----BEGIN ECDSA PRIVATE KEY-----
+MHcCAQEEIPLfwg+SVC2/xUcKq0bI9y2+SDEEdCeGuxuBz22BhAw1oAoGCCqGSM49
+AwEHoUQDQgAEuLMCOyXLUNLw0kXYoTX3lXzdOCJ1+AYQHue52xx0pYff7m5AeAJz
+7YFX/sAYaPfJ49FlkvNgP3mCgn9rt563/Q==
+-----END ECDSA PRIVATE KEY-----
+`
+)
diff --git a/runtime/internal/rpc/stream/doc.go b/runtime/internal/rpc/stream/doc.go
new file mode 100644
index 0000000..509628a
--- /dev/null
+++ b/runtime/internal/rpc/stream/doc.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package stream implements authenticated byte streams to vanadium endpoints.
+//
+// It is split into multiple sub-packages in an attempt to keep the code
+// healthier by limiting the dependencies between objects. Most users should not
+// need to use this package.
+//
+// Package contents and dependencies are as follows:
+//
+//      * manager provides a factory for Manager objects.
+//        It depends on the vif and proxy packages.
+//      * vif implements a VIF type that wraps over a net.Conn and enables the
+//        creation of VC objects over the underlying network connection.
+//        It depends on the id, message and vc packages.
+//      * message implements serialization and deserialization for messages
+//        exchanged over a VIF.
+//        It depends on the id package.
+//      * vc provides types implementing VC and Flow.
+//        It depends on the id and crypto packages.
+//      * crypto provides types to secure communication over VCs.
+//        It does not depend on any other package.
+//      * id defines identifier types used by other packages.
+//        It does not depend on any other package.
+package stream
+
+// A dump of some ideas/thoughts/TODOs arising from the first iteration of this
+// package. Big ticket items like proxying and TLS/authentication are obvious
+// and won't be missed. I just wanted to put some smaller items on record (in
+// no particular order).
+//
+// (1) Garbage collection of VIFs: Create a policy to close the underlying
+// network connection (and shutdown the VIF) when it is "inactive" (i.e., no VCs
+// have existed on it for a while).
+// (2) On the first write of a new flow, counters are stolen from a shared pool
+// (to avoid a round trip of a "create flow" message followed by a "here are
+// your counters" message). Currently, this happens on either end of the flow
+// (on both the remote and local process). This doesn't need to be the case,
+// the end that received the first message of the flow doesn't need to steal
+// on its first write.
+// (3) Should flow control counters be part of the Data message?
+// If so, maybe the flowQ should have a lower priority than that of Data
+// messages? At a higher level I'm thinking of ways to reduce the number
+// of messages sent per flow. Currently, just creating a flow results in
+// two messages - One where the initiator sends counters to the receiver
+// and one where the receiver does the same. The first write does not
+// block on receiving the counters because of the "steal from shared pool on
+// first write" scheme, but still, sounds like too much traffic.
+// (4) As an example of the above, consider the following code:
+//     vc.Connect().Close()
+// This will result in 3 messages. But ideally it should involve 0.
+// (5) Encryption of control messages to protect from network sniffers.
diff --git a/runtime/internal/rpc/stream/error_test.go b/runtime/internal/rpc/stream/error_test.go
new file mode 100644
index 0000000..fa0716f
--- /dev/null
+++ b/runtime/internal/rpc/stream/error_test.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream_test
+
+import (
+	"net"
+	"testing"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+func TestTimeoutError(t *testing.T) {
+	e := verror.Register(".test", verror.NoRetry, "hello{:3}")
+	timeoutErr := stream.NewNetError(verror.New(e, nil, "world"), true, false)
+
+	// TimeoutError implements error & net.Error. We test that it
+	// implements error by assigning timeoutErr to err which is of type error.
+	var err error
+	err = timeoutErr
+
+	neterr, ok := err.(net.Error)
+	if !ok {
+		t.Fatalf("%T not a net.Error", err)
+	}
+
+	if got, want := neterr.Timeout(), true; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+	if got, want := neterr.Error(), "hello: world"; got != want {
+		t.Fatalf("got %v, want %v", got, want)
+	}
+}
diff --git a/runtime/internal/rpc/stream/errors.go b/runtime/internal/rpc/stream/errors.go
new file mode 100644
index 0000000..7ba12b2
--- /dev/null
+++ b/runtime/internal/rpc/stream/errors.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream
+
+import (
+	"net"
+
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream"
+
+// The stream family of packages guarantee to return one of the verror codes defined
+// here, their messages are constructed so as to avoid embedding a component/method name
+// and are thus more suitable for inclusion in other verrors.
+// This practiced of omitting {1}{2} is used throughout the stream packages since all
+// of their errors are intended to be used as arguments to higher level errors.
+var (
+	// TODO(cnicolaou): rename ErrSecurity to ErrAuth
+	ErrSecurity      = verror.Register(pkgPath+".errSecurity", verror.NoRetry, "{:3}")
+	ErrNotTrusted    = verror.Register(pkgPath+".errNotTrusted", verror.NoRetry, "{:3}")
+	ErrNetwork       = verror.Register(pkgPath+".errNetwork", verror.NoRetry, "{:3}")
+	ErrDialFailed    = verror.Register(pkgPath+".errDialFailed", verror.NoRetry, "{:3}")
+	ErrResolveFailed = verror.Register(pkgPath+".errResolveFailed", verror.NoRetry, "{:3}")
+	ErrProxy         = verror.Register(pkgPath+".errProxy", verror.NoRetry, "{:3}")
+	ErrBadArg        = verror.Register(pkgPath+".errBadArg", verror.NoRetry, "{:3}")
+	ErrBadState      = verror.Register(pkgPath+".errBadState", verror.NoRetry, "{:3}")
+	ErrAborted       = verror.Register(pkgPath+".errAborted", verror.NoRetry, "{:3}")
+)
+
+// NetError implements net.Error
+type NetError struct {
+	err           error
+	timeout, temp bool
+}
+
+// TODO(cnicolaou): investigate getting rid of the use of net.Error
+// entirely. The rpc code can now test for a specific verror code and it's
+// not clear that the net.Conns we implement in Vanadium will ever be used
+// directly by code that expects them to return a net.Error when they
+// timeout.
+
+// NewNetError returns a new net.Error which will return the
+// supplied error, timeout and temporary parameters when the corresponding
+// methods are invoked.
+func NewNetError(err error, timeout, temporary bool) net.Error {
+	return &NetError{err, timeout, temporary}
+}
+
+func (t NetError) Err() error { return t.err }
+func (t NetError) Error() string {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+	return t.err.Error()
+}
+func (t NetError) Timeout() bool   { return t.timeout }
+func (t NetError) Temporary() bool { return t.temp }
diff --git a/runtime/internal/rpc/stream/id/id.go b/runtime/internal/rpc/stream/id/id.go
new file mode 100644
index 0000000..fd6a641
--- /dev/null
+++ b/runtime/internal/rpc/stream/id/id.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package id provides types for identifying VCs and Flows over them.
+package id
+
+// VC identifies a VC over a VIF.
+type VC uint32
+
+// Flow identifies a Flow over a VC.
+type Flow uint32
diff --git a/runtime/internal/rpc/stream/manager/error_test.go b/runtime/internal/rpc/stream/manager/error_test.go
new file mode 100644
index 0000000..4c61f49
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/error_test.go
@@ -0,0 +1,140 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package manager_test
+
+import (
+	"net"
+	"testing"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/testing/mocks/mocknet"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestListenErrors(t *testing.T) {
+	server := manager.InternalNew(naming.FixedRoutingID(0x1))
+	pserver := testutil.NewPrincipal("server")
+
+	// principal, no blessings
+	_, _, err := server.Listen("tcp", "127.0.0.1:0", pserver, security.Blessings{}, nil)
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// blessings, no principal
+	_, _, err = server.Listen("tcp", "127.0.0.1:0", nil, pserver.BlessingStore().Default(), nil)
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// bad protocol
+	_, _, err = server.Listen("foo", "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// bad address
+	_, _, err = server.Listen("tcp", "xx.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if verror.ErrorID(err) != stream.ErrNetwork.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+
+	// bad address for proxy
+	_, _, err = server.Listen("v23", "127x.0.0.1", pserver, pserver.BlessingStore().Default())
+	if verror.ErrorID(err) != stream.ErrBadArg.ID {
+		t.Fatalf("wrong error: %s", err)
+	}
+	t.Log(err)
+}
+
+func acceptLoop(ln stream.Listener) {
+	for {
+		f, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		f.Close()
+	}
+
+}
+func dropDataDialer(network, address string, timeout time.Duration) (net.Conn, error) {
+	matcher := func(read bool, msg message.T) bool {
+		switch msg.(type) {
+		case *message.Setup:
+			return true
+		}
+		return false
+	}
+	opts := mocknet.Opts{
+		Mode:              mocknet.V23CloseAtMessage,
+		V23MessageMatcher: matcher,
+	}
+	return mocknet.DialerWithOpts(opts, network, address, timeout)
+}
+
+func simpleResolver(network, address string) (string, string, error) {
+	return network, address, nil
+}
+
+func TestDialErrors(t *testing.T) {
+	_, shutdown := test.InitForTest()
+	defer shutdown()
+	server := manager.InternalNew(naming.FixedRoutingID(0x55555555))
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccc))
+	pclient := testutil.NewPrincipal("client")
+	pserver := testutil.NewPrincipal("server")
+
+	// bad protocol
+	ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("x", "127.0.0.1:2"))
+	_, err := client.Dial(ep, pclient)
+	// A bad protocol should result in a Resolve Error.
+	if verror.ErrorID(err) != stream.ErrResolveFailed.ID {
+		t.Errorf("wrong error: %v", err)
+	}
+	t.Log(err)
+
+	// no server
+	ep, _ = inaming.NewEndpoint(naming.FormatEndpoint("tcp", "127.0.0.1:2"))
+	_, err = client.Dial(ep, pclient)
+	if verror.ErrorID(err) != stream.ErrDialFailed.ID {
+		t.Errorf("wrong error: %v", err)
+	}
+	t.Log(err)
+
+	rpc.RegisterProtocol("dropData", dropDataDialer, simpleResolver, net.Listen)
+
+	ln, sep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+
+	cep, err := mocknet.RewriteEndpointProtocol(sep.String(), "dropData")
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Dial(cep, pclient)
+	if verror.ErrorID(err) != stream.ErrNetwork.ID {
+		t.Errorf("wrong error: %v", err)
+	}
+	t.Log(err)
+}
diff --git a/runtime/internal/rpc/stream/manager/listener.go b/runtime/internal/rpc/stream/manager/listener.go
new file mode 100644
index 0000000..387752e
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/listener.go
@@ -0,0 +1,424 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package manager
+
+import (
+	"fmt"
+	"math/rand"
+	"net"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// ProxyAuthenticator is a stream.ListenerOpt that is used when listening via a
+// proxy to authenticate with the proxy.
+type ProxyAuthenticator interface {
+	stream.ListenerOpt
+	// Login returns the Blessings (and the set of Discharges to make them
+	// valid) to send to the proxy. Typically, the proxy uses these to
+	// determine whether it wants to authorize use.
+	Login(proxy stream.Flow) (security.Blessings, []security.Discharge, error)
+}
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errVomEncodeRequest           = reg(".errVomEncodeRequest", "failed to encode request to proxy{:3}")
+	errVomDecodeResponse          = reg(".errVomDecodeRequest", "failed to decoded response from proxy{:3}")
+	errProxyError                 = reg(".errProxyError", "proxy error {:3}")
+	errProxyEndpointError         = reg(".errProxyEndpointError", "proxy returned an invalid endpoint {:3}{:4}")
+	errAlreadyConnected           = reg(".errAlreadyConnected", "already connected to proxy and accepting connections? VIF: {3}, StartAccepting{:_}")
+	errFailedToCreateLivenessFlow = reg(".errFailedToCreateLivenessFlow", "unable to create liveness check flow to proxy{:3}")
+	errAcceptFailed               = reg(".errAcceptFailed", "accept failed{:3}")
+	errFailedToEstablishVC        = reg(".errFailedToEstablishVC", "VC establishment with proxy failed{:_}")
+	errListenerAlreadyClosed      = reg(".errListenerAlreadyClosed", "listener already closed")
+	errRefusedProxyLogin          = reg(".errRefusedProxyLogin", "server did not want to listen via proxy{:_}")
+)
+
+// listener extends stream.Listener with a DebugString method.
+type listener interface {
+	stream.Listener
+	DebugString() string
+}
+
+// netListener implements the listener interface by accepting flows (and VCs)
+// over network connections accepted on an underlying net.Listener.
+type netListener struct {
+	q       *upcqueue.T
+	netLn   net.Listener
+	manager *manager
+	vifs    *vif.Set
+
+	connsMu sync.Mutex
+	conns   map[net.Conn]bool
+
+	netLoop  sync.WaitGroup
+	vifLoops sync.WaitGroup
+}
+
+var _ stream.Listener = (*netListener)(nil)
+
+// proxyListener implements the listener interface by connecting to a remote
+// proxy (typically used to "listen" across network domains).
+type proxyListener struct {
+	q       *upcqueue.T
+	proxyEP naming.Endpoint
+	manager *manager
+	vif     *vif.VIF
+
+	vifLoop sync.WaitGroup
+}
+
+var _ stream.Listener = (*proxyListener)(nil)
+
+func newNetListener(m *manager, netLn net.Listener, principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) listener {
+	ln := &netListener{
+		q:       upcqueue.New(),
+		manager: m,
+		netLn:   netLn,
+		vifs:    vif.NewSet(),
+		conns:   make(map[net.Conn]bool),
+	}
+
+	// Set the default idle timeout for VC. But for "unixfd", we do not set
+	// the idle timeout since we cannot reconnect it.
+	if ln.netLn.Addr().Network() != "unixfd" {
+		opts = append([]stream.ListenerOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
+	}
+
+	ln.netLoop.Add(1)
+	go ln.netAcceptLoop(principal, blessings, opts)
+	return ln
+}
+
+func isTemporaryError(err error) bool {
+	if oErr, ok := err.(*net.OpError); ok && oErr.Temporary() {
+		return true
+	}
+	return false
+}
+
+func isTooManyOpenFiles(err error) bool {
+	if oErr, ok := err.(*net.OpError); ok && oErr.Err == syscall.EMFILE {
+		return true
+	}
+	return false
+}
+
+func (ln *netListener) killConnections(n int) {
+	ln.connsMu.Lock()
+	if n > len(ln.conns) {
+		n = len(ln.conns)
+	}
+	remaining := make([]net.Conn, 0, len(ln.conns))
+	for c := range ln.conns {
+		remaining = append(remaining, c)
+	}
+	removed := remaining[:n]
+	ln.connsMu.Unlock()
+
+	vlog.Infof("Killing %d Conns", n)
+
+	var wg sync.WaitGroup
+	wg.Add(n)
+	for i := 0; i < n; i++ {
+		idx := rand.Intn(len(remaining))
+		conn := remaining[idx]
+		go func(conn net.Conn) {
+			vlog.Infof("Killing connection (%s, %s)", conn.LocalAddr(), conn.RemoteAddr())
+			conn.Close()
+			ln.manager.killedConns.Incr(1)
+			wg.Done()
+		}(conn)
+		remaining[idx], remaining[0] = remaining[0], remaining[idx]
+		remaining = remaining[1:]
+	}
+
+	ln.connsMu.Lock()
+	for _, conn := range removed {
+		delete(ln.conns, conn)
+	}
+	ln.connsMu.Unlock()
+
+	wg.Wait()
+}
+
+func (ln *netListener) netAcceptLoop(principal security.Principal, blessings security.Blessings, opts []stream.ListenerOpt) {
+	defer ln.netLoop.Done()
+	opts = append([]stream.ListenerOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
+	for {
+		conn, err := ln.netLn.Accept()
+		if isTemporaryError(err) {
+			// Use Info instead of Error to reduce the changes that
+			// the log library will cause the process to abort on
+			// failing to create a new file.
+			vlog.Infof("net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+			for tokill := 1; isTemporaryError(err); tokill *= 2 {
+				if isTooManyOpenFiles(err) {
+					ln.killConnections(tokill)
+				} else {
+					tokill = 1
+				}
+				time.Sleep(10 * time.Millisecond)
+				conn, err = ln.netLn.Accept()
+			}
+		}
+		if err != nil {
+			// TODO(cnicolaou): closeListener in manager.go writes to ln (by calling
+			// ln.Close()) and we read it here in the Infof output, so there is
+			// an unguarded read here that will fail under --race. This will only show
+			// itself if the Infof below is changed to always be printed (which is
+			// how I noticed). The right solution is to lock these datastructures, but
+			// that can wait until a bigger overhaul occurs. For now, we leave this at
+			// VI(1) knowing that it's basically harmless.
+			vlog.VI(1).Infof("Exiting netAcceptLoop: net.Listener.Accept() failed on %v with %v", ln.netLn, err)
+			return
+		}
+		ln.connsMu.Lock()
+		ln.conns[conn] = true
+		ln.connsMu.Unlock()
+
+		vlog.VI(1).Infof("New net.Conn accepted from %s (local address: %s)", conn.RemoteAddr(), conn.LocalAddr())
+		go func() {
+			vf, err := vif.InternalNewAcceptedVIF(conn, ln.manager.rid, principal, blessings, nil, ln.deleteVIF, opts...)
+			if err != nil {
+				vlog.Infof("Shutting down conn from %s (local address: %s) as a VIF could not be created: %v", conn.RemoteAddr(), conn.LocalAddr(), err)
+				conn.Close()
+				return
+			}
+			ln.vifs.Insert(vf, conn.RemoteAddr().Network(), conn.RemoteAddr().String())
+			ln.manager.vifs.Insert(vf, conn.RemoteAddr().Network(), conn.RemoteAddr().String())
+
+			ln.vifLoops.Add(1)
+			vifLoop(vf, ln.q, func() {
+				ln.connsMu.Lock()
+				delete(ln.conns, conn)
+				ln.connsMu.Unlock()
+				ln.vifLoops.Done()
+			})
+		}()
+	}
+}
+
+func (ln *netListener) deleteVIF(vf *vif.VIF) {
+	vlog.VI(2).Infof("VIF %v is closed, removing from cache", vf)
+	ln.vifs.Delete(vf)
+	ln.manager.vifs.Delete(vf)
+}
+
+func (ln *netListener) Accept() (stream.Flow, error) {
+	item, err := ln.q.Get(nil)
+	switch {
+	case err == upcqueue.ErrQueueIsClosed:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errListenerAlreadyClosed, nil))
+	case err != nil:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
+	default:
+		return item.(vif.ConnectorAndFlow).Flow, nil
+	}
+}
+
+func (ln *netListener) Close() error {
+	closeNetListener(ln.netLn)
+	ln.netLoop.Wait()
+	for _, vif := range ln.vifs.List() {
+		// NOTE(caprita): We do not actually Close down the vifs, as
+		// that would require knowing when all outstanding requests are
+		// finished.  For now, do not worry about it, since we expect
+		// shut down to immediately precede process exit.
+		vif.StopAccepting()
+	}
+	ln.q.Shutdown()
+	ln.manager.removeListener(ln)
+	ln.vifLoops.Wait()
+	vlog.VI(3).Infof("Closed stream.Listener %s", ln)
+	return nil
+}
+
+func (ln *netListener) String() string {
+	return fmt.Sprintf("%T: (%v, %v)", ln, ln.netLn.Addr().Network(), ln.netLn.Addr())
+}
+
+func (ln *netListener) DebugString() string {
+	ret := []string{
+		fmt.Sprintf("stream.Listener: net.Listener on (%q, %q)", ln.netLn.Addr().Network(), ln.netLn.Addr()),
+	}
+	if vifs := ln.vifs.List(); len(vifs) > 0 {
+		ret = append(ret, fmt.Sprintf("===Accepted VIFs(%d)===", len(vifs)))
+		for ix, vif := range vifs {
+			ret = append(ret, fmt.Sprintf("%4d) %v", ix, vif))
+		}
+	}
+	return strings.Join(ret, "\n")
+}
+
+func newProxyListener(m *manager, proxyEP naming.Endpoint, principal security.Principal, opts []stream.ListenerOpt) (listener, *inaming.Endpoint, error) {
+	ln := &proxyListener{
+		q:       upcqueue.New(),
+		proxyEP: proxyEP,
+		manager: m,
+	}
+	vf, ep, err := ln.connect(principal, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	ln.vif = vf
+	ln.vifLoop.Add(1)
+	go vifLoop(ln.vif, ln.q, func() {
+		ln.vifLoop.Done()
+	})
+	return ln, ep, nil
+}
+
+func (ln *proxyListener) connect(principal security.Principal, opts []stream.ListenerOpt) (*vif.VIF, *inaming.Endpoint, error) {
+	vlog.VI(1).Infof("Connecting to proxy at %v", ln.proxyEP)
+	// Requires dialing a VC to the proxy, need to extract options from ln.opts to do so.
+	var dialOpts []stream.VCOpt
+	var auth ProxyAuthenticator
+	for _, opt := range opts {
+		switch v := opt.(type) {
+		case stream.VCOpt:
+			dialOpts = append(dialOpts, v)
+		case ProxyAuthenticator:
+			auth = v
+		}
+	}
+	// TODO(cnicolaou, ashankar): probably want to set a timeout here. (is
+	// this covered by opts?)
+	// TODO(ashankar): Authorize the proxy server as well (similar to how
+	// clients authorize servers in RPCs).
+	vf, err := ln.manager.FindOrDialVIF(ln.proxyEP, principal, dialOpts...)
+	if err != nil {
+		return nil, nil, err
+	}
+	// Prepend the default idle timeout for VC.
+	opts = append([]stream.ListenerOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
+	if err := vf.StartAccepting(opts...); err != nil {
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errAlreadyConnected, nil, vf, err))
+	}
+	// Proxy protocol: See v.io/x/ref/runtime/internal/rpc/stream/proxy/protocol.vdl
+	//
+	// We don't need idle timeout for this VC, since one flow will be kept alive.
+	vc, err := vf.Dial(ln.proxyEP, principal, dialOpts...)
+	if err != nil {
+		vf.StopAccepting()
+		if verror.ErrorID(err) == verror.ErrAborted.ID {
+			ln.manager.vifs.Delete(vf)
+			return nil, nil, verror.New(stream.ErrAborted, nil, err)
+		}
+		return nil, nil, err
+	}
+	flow, err := vc.Connect()
+	if err != nil {
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateLivenessFlow, nil, err))
+	}
+	var request proxy.Request
+	var response proxy.Response
+	if auth != nil {
+		if request.Blessings, request.Discharges, err = auth.Login(flow); err != nil {
+			vf.StopAccepting()
+			return nil, nil, verror.New(stream.ErrSecurity, nil, verror.New(errRefusedProxyLogin, nil, err))
+		}
+	}
+	enc := vom.NewEncoder(flow)
+	if err := enc.Encode(request); err != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeRequest, nil, err))
+	}
+	dec := vom.NewDecoder(flow)
+	if err := dec.Decode(&response); err != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrNetwork, nil, verror.New(errVomDecodeResponse, nil, err))
+	}
+	if response.Error != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrProxy, nil, response.Error)
+	}
+	ep, err := inaming.NewEndpoint(response.Endpoint)
+	if err != nil {
+		flow.Close()
+		vf.StopAccepting()
+		return nil, nil, verror.New(stream.ErrProxy, nil, verror.New(errProxyEndpointError, nil, response.Endpoint, err))
+	}
+	go func(vf *vif.VIF, flow stream.Flow, q *upcqueue.T) {
+		<-flow.Closed()
+		vf.StopAccepting()
+		q.Close()
+	}(vf, flow, ln.q)
+	return vf, ep, nil
+}
+
+func (ln *proxyListener) Accept() (stream.Flow, error) {
+	item, err := ln.q.Get(nil)
+	switch {
+	case err == upcqueue.ErrQueueIsClosed:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errListenerAlreadyClosed, nil))
+	case err != nil:
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
+	default:
+		return item.(vif.ConnectorAndFlow).Flow, nil
+	}
+}
+
+func (ln *proxyListener) Close() error {
+	ln.vif.StopAccepting()
+	ln.q.Shutdown()
+	ln.manager.removeListener(ln)
+	ln.vifLoop.Wait()
+	vlog.VI(3).Infof("Closed stream.Listener %s", ln)
+	return nil
+}
+
+func (ln *proxyListener) String() string {
+	return ln.DebugString()
+}
+
+func (ln *proxyListener) DebugString() string {
+	return fmt.Sprintf("stream.Listener: PROXY:%v RoutingID:%v", ln.proxyEP, ln.manager.rid)
+}
+
+func vifLoop(vf *vif.VIF, q *upcqueue.T, cleanup func()) {
+	defer cleanup()
+	for {
+		cAndf, err := vf.Accept()
+		switch {
+		case err != nil:
+			vlog.VI(2).Infof("Shutting down listener on VIF %v: %v", vf, err)
+			return
+		case cAndf.Flow == nil:
+			vlog.VI(1).Infof("New VC %v on VIF %v", cAndf.Connector, vf)
+		default:
+			if err := q.Put(cAndf); err != nil {
+				vlog.VI(1).Infof("Closing new flow on VC %v (VIF %v) as Put failed in vifLoop: %v", cAndf.Connector, vf, err)
+				cAndf.Flow.Close()
+			}
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/manager/manager.go b/runtime/internal/rpc/stream/manager/manager.go
new file mode 100644
index 0000000..a297962
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/manager.go
@@ -0,0 +1,352 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package manager provides an implementation of the Manager interface defined in v.io/x/ref/runtime/internal/rpc/stream.
+package manager
+
+import (
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/lib/stats"
+	"v.io/x/ref/lib/stats/counter"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/manager"
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errUnknownNetwork                          = reg(".errUnknownNetwork", "unknown network{:3}")
+	errEndpointParseError                      = reg(".errEndpointParseError", "failed to parse endpoint {3}{:4}")
+	errAlreadyShutdown                         = reg(".errAlreadyShutdown", "already shutdown")
+	errProvidedServerBlessingsWithoutPrincipal = reg(".errServerBlessingsWithoutPrincipal", "blessings provided but with no principal")
+	errNoBlessingNames                         = reg(".errNoBlessingNames", "no blessing names could be extracted for the provided principal")
+)
+
+const (
+	// The default time after which an VIF is closed if no VC is opened.
+	defaultStartTimeout = 3 * time.Second
+	// The default time after which an idle VC is closed.
+	defaultIdleTimeout = 30 * time.Second
+)
+
+// InternalNew creates a new stream.Manager for managing streams where the local
+// process is identified by the provided RoutingID.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNew(rid naming.RoutingID) stream.Manager {
+	statsPrefix := naming.Join("rpc", "stream", "routing-id", rid.String())
+	m := &manager{
+		rid:         rid,
+		vifs:        vif.NewSet(),
+		listeners:   make(map[listener]bool),
+		statsPrefix: statsPrefix,
+		killedConns: stats.NewCounter(naming.Join(statsPrefix, "killed-connections")),
+	}
+	stats.NewStringFunc(naming.Join(m.statsPrefix, "debug"), m.DebugString)
+	return m
+}
+
+type manager struct {
+	rid  naming.RoutingID
+	vifs *vif.Set
+
+	muListeners sync.Mutex
+	listeners   map[listener]bool // GUARDED_BY(muListeners)
+	shutdown    bool              // GUARDED_BY(muListeners)
+
+	statsPrefix string
+	killedConns *counter.Counter
+}
+
+var _ stream.Manager = (*manager)(nil)
+
+type DialTimeout time.Duration
+
+func (DialTimeout) RPCStreamVCOpt() {}
+func (DialTimeout) RPCClientOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+func dial(d rpc.DialerFunc, network, address string, timeout time.Duration) (net.Conn, error) {
+	if d != nil {
+		conn, err := d(network, address, timeout)
+		if err != nil {
+			return nil, verror.New(stream.ErrDialFailed, nil, err)
+		}
+		return conn, nil
+	}
+	return nil, verror.New(stream.ErrDialFailed, nil, verror.New(errUnknownNetwork, nil, network))
+}
+
+func resolve(r rpc.ResolverFunc, network, address string) (string, string, error) {
+	if r != nil {
+		net, addr, err := r(network, address)
+		if err != nil {
+			return "", "", verror.New(stream.ErrResolveFailed, nil, err)
+		}
+		return net, addr, nil
+	}
+	return "", "", verror.New(stream.ErrResolveFailed, nil, verror.New(errUnknownNetwork, nil, network))
+}
+
+// FindOrDialVIF returns the network connection (VIF) to the provided address
+// from the cache in the manager. If not already present in the cache, a new
+// connection will be created using net.Dial.
+func (m *manager) FindOrDialVIF(remote naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (*vif.VIF, error) {
+	// Extract options.
+	var timeout time.Duration
+	for _, o := range opts {
+		switch v := o.(type) {
+		case DialTimeout:
+			timeout = time.Duration(v)
+		}
+	}
+	addr := remote.Addr()
+	d, r, _, _ := rpc.RegisteredProtocol(addr.Network())
+	// (network, address) in the endpoint might not always match up
+	// with the key used in the vifs. For example:
+	// - conn, err := net.Dial("tcp", "www.google.com:80")
+	//   fmt.Println(conn.RemoteAddr()) // Might yield the corresponding IP address
+	// - Similarly, an unspecified IP address (net.IP.IsUnspecified) like "[::]:80"
+	//   might yield "[::1]:80" (loopback interface) in conn.RemoteAddr().
+	// Thus, look for VIFs with the resolved address.
+	network, address, err := resolve(r, addr.Network(), addr.String())
+	if err != nil {
+		return nil, err
+	}
+	vf, unblock := m.vifs.BlockingFind(network, address)
+	if vf != nil {
+		vlog.VI(1).Infof("(%q, %q) resolved to (%q, %q) which exists in the VIF cache.", addr.Network(), addr.String(), network, address)
+		return vf, nil
+	}
+	defer unblock()
+
+	vlog.VI(1).Infof("(%q, %q) not in VIF cache. Dialing", network, address)
+	conn, err := dial(d, network, address, timeout)
+	if err != nil {
+		return nil, err
+	}
+
+	opts = append([]stream.VCOpt{vc.StartTimeout{defaultStartTimeout}}, opts...)
+	vf, err = vif.InternalNewDialedVIF(conn, m.rid, principal, nil, m.deleteVIF, opts...)
+	if err != nil {
+		conn.Close()
+		return nil, err
+	}
+	m.vifs.Insert(vf, network, address)
+	return vf, nil
+}
+
+func (m *manager) Dial(remote naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (stream.VC, error) {
+	// If vif.Dial fails because the cached network connection was broken, remove from
+	// the cache and try once more.
+	for retry := true; true; retry = false {
+		vf, err := m.FindOrDialVIF(remote, principal, opts...)
+		if err != nil {
+			return nil, err
+		}
+		opts = append([]stream.VCOpt{vc.IdleTimeout{defaultIdleTimeout}}, opts...)
+		vc, err := vf.Dial(remote, principal, opts...)
+		if !retry || verror.ErrorID(err) != stream.ErrAborted.ID {
+			return vc, err
+		}
+		vf.Close()
+	}
+	return nil, verror.NewErrInternal(nil) // Not reached
+}
+
+func (m *manager) deleteVIF(vf *vif.VIF) {
+	vlog.VI(2).Infof("%p: VIF %v is closed, removing from cache", m, vf)
+	m.vifs.Delete(vf)
+}
+
+func listen(protocol, address string) (net.Listener, error) {
+	if _, _, l, _ := rpc.RegisteredProtocol(protocol); l != nil {
+		ln, err := l(protocol, address)
+		if err != nil {
+			return nil, verror.New(stream.ErrNetwork, nil, err)
+		}
+		return ln, nil
+	}
+	return nil, verror.New(stream.ErrBadArg, nil, verror.New(errUnknownNetwork, nil, protocol))
+}
+
+func (m *manager) Listen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, naming.Endpoint, error) {
+	bNames, err := extractBlessingNames(principal, blessings)
+	if err != nil {
+		return nil, nil, err
+	}
+	ln, ep, err := m.internalListen(protocol, address, principal, blessings, opts...)
+	if err != nil {
+		return nil, nil, err
+	}
+	ep.Blessings = bNames
+	return ln, ep, nil
+}
+
+func (m *manager) internalListen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
+	m.muListeners.Lock()
+	if m.shutdown {
+		m.muListeners.Unlock()
+		return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
+	}
+	m.muListeners.Unlock()
+
+	if protocol == inaming.Network {
+		// Act as if listening on the address of a remote proxy.
+		ep, err := inaming.NewEndpoint(address)
+		if err != nil {
+			return nil, nil, verror.New(stream.ErrBadArg, nil, verror.New(errEndpointParseError, nil, address, err))
+		}
+		return m.remoteListen(ep, principal, opts)
+	}
+	netln, err := listen(protocol, address)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	m.muListeners.Lock()
+	if m.shutdown {
+		m.muListeners.Unlock()
+		closeNetListener(netln)
+		return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
+	}
+
+	ln := newNetListener(m, netln, principal, blessings, opts)
+	m.listeners[ln] = true
+	m.muListeners.Unlock()
+	ep := &inaming.Endpoint{
+		Protocol: protocol,
+		Address:  netln.Addr().String(),
+		RID:      m.rid,
+	}
+	return ln, ep, nil
+}
+
+func (m *manager) remoteListen(proxy naming.Endpoint, principal security.Principal, listenerOpts []stream.ListenerOpt) (stream.Listener, *inaming.Endpoint, error) {
+	ln, ep, err := newProxyListener(m, proxy, principal, listenerOpts)
+	if err != nil {
+		return nil, nil, err
+	}
+	m.muListeners.Lock()
+	defer m.muListeners.Unlock()
+	if m.shutdown {
+		ln.Close()
+		return nil, nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyShutdown, nil))
+	}
+	m.listeners[ln] = true
+	return ln, ep, nil
+}
+
+func (m *manager) ShutdownEndpoint(remote naming.Endpoint) {
+	vifs := m.vifs.List()
+	total := 0
+	for _, vf := range vifs {
+		total += vf.ShutdownVCs(remote)
+	}
+	vlog.VI(1).Infof("ShutdownEndpoint(%q) closed %d VCs", remote, total)
+}
+
+func closeNetListener(ln net.Listener) {
+	addr := ln.Addr()
+	err := ln.Close()
+	vlog.VI(1).Infof("Closed net.Listener on (%q, %q): %v", addr.Network(), addr, err)
+}
+
+func (m *manager) removeListener(ln listener) {
+	m.muListeners.Lock()
+	delete(m.listeners, ln)
+	m.muListeners.Unlock()
+}
+
+func (m *manager) Shutdown() {
+	stats.Delete(m.statsPrefix)
+	m.muListeners.Lock()
+	if m.shutdown {
+		m.muListeners.Unlock()
+		return
+	}
+	m.shutdown = true
+	var wg sync.WaitGroup
+	wg.Add(len(m.listeners))
+	for ln, _ := range m.listeners {
+		go func(ln stream.Listener) {
+			ln.Close()
+			wg.Done()
+		}(ln)
+	}
+	m.listeners = make(map[listener]bool)
+	m.muListeners.Unlock()
+	wg.Wait()
+
+	vifs := m.vifs.List()
+	for _, vf := range vifs {
+		vf.Close()
+	}
+}
+
+func (m *manager) RoutingID() naming.RoutingID {
+	return m.rid
+}
+
+func (m *manager) DebugString() string {
+	vifs := m.vifs.List()
+
+	m.muListeners.Lock()
+	defer m.muListeners.Unlock()
+
+	l := make([]string, 0)
+	l = append(l, fmt.Sprintf("Manager: RoutingID:%v #VIFs:%d #Listeners:%d Shutdown:%t", m.rid, len(vifs), len(m.listeners), m.shutdown))
+	if len(vifs) > 0 {
+		l = append(l, "============================VIFs================================================")
+		for ix, vif := range vifs {
+			l = append(l, fmt.Sprintf("%4d) %v", ix, vif.DebugString()))
+			l = append(l, "--------------------------------------------------------------------------------")
+		}
+	}
+	if len(m.listeners) > 0 {
+		l = append(l, "=======================================Listeners==================================================")
+		l = append(l, "  (stream listeners, their local network listeners (missing for proxied listeners), and VIFS")
+		for ln, _ := range m.listeners {
+			l = append(l, ln.DebugString())
+		}
+	}
+	return strings.Join(l, "\n")
+}
+
+func extractBlessingNames(p security.Principal, b security.Blessings) ([]string, error) {
+	if !b.IsZero() && p == nil {
+		return nil, verror.New(stream.ErrBadArg, nil, verror.New(errProvidedServerBlessingsWithoutPrincipal, nil))
+	}
+	if p == nil {
+		return nil, nil
+	}
+	var ret []string
+	for b, _ := range p.BlessingsInfo(b) {
+		ret = append(ret, b)
+	}
+	if len(ret) == 0 {
+		return nil, verror.New(stream.ErrBadArg, nil, verror.New(errNoBlessingNames, nil))
+	}
+	return ret, nil
+}
diff --git a/runtime/internal/rpc/stream/manager/manager_test.go b/runtime/internal/rpc/stream/manager/manager_test.go
new file mode 100644
index 0000000..bce2bab
--- /dev/null
+++ b/runtime/internal/rpc/stream/manager/manager_test.go
@@ -0,0 +1,969 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package manager
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"syscall"
+	"testing"
+	"time"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/tcp"
+	_ "v.io/x/ref/runtime/internal/rpc/protocols/ws"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/expect"
+	"v.io/x/ref/test/modules"
+	"v.io/x/ref/test/testutil"
+)
+
+func init() {
+	modules.RegisterChild("runServer", "", runServer)
+	modules.RegisterChild("runRLimitedServer", "", runRLimitedServer)
+}
+
+// We write our own TestMain here instead of relying on v23 test generate because
+// we need to set runtime.GOMAXPROCS.
+func TestMain(m *testing.M) {
+	test.Init()
+	// testutil.Init sets GOMAXPROCS to NumCPU.  We want to force
+	// GOMAXPROCS to remain at 1, in order to trigger a particular race
+	// condition that occurs when closing the server; also, using 1 cpu
+	// introduces less variance in the behavior of the test.
+	runtime.GOMAXPROCS(1)
+	if modules.IsModulesChildProcess() {
+		if err := modules.Dispatch(); err != nil {
+			fmt.Fprintf(os.Stderr, "modules.Dispatch failed: %v\n", err)
+			os.Exit(1)
+		}
+		return
+	}
+	os.Exit(m.Run())
+}
+
+func testSimpleFlow(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	pclient := testutil.NewPrincipal("client")
+	pserver := testutil.NewPrincipal("server")
+
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", pserver, pserver.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	data := "the dark knight rises"
+	var clientVC stream.VC
+	var clientF1 stream.Flow
+	go func() {
+		if clientVC, err = client.Dial(ep, pclient); err != nil {
+			t.Errorf("Dial(%q) failed: %v", ep, err)
+			return
+		}
+		if clientF1, err = clientVC.Connect(); err != nil {
+			t.Errorf("Connect() failed: %v", err)
+			return
+		}
+		if err := writeLine(clientF1, data); err != nil {
+			t.Error(err)
+		}
+	}()
+	serverF, err := ln.Accept()
+	if err != nil {
+		t.Fatalf("Accept failed: %v", err)
+	}
+	if got, err := readLine(serverF); got != data || err != nil {
+		t.Errorf("Got (%q, %v), want (%q, nil)", got, err, data)
+	}
+	// By this point, the goroutine has passed the write call (or exited
+	// early) since the read has gotten through.  Check if the goroutine
+	// encountered any errors in creating the VC or flow and abort.
+	if t.Failed() {
+		return
+	}
+	defer clientF1.Close()
+
+	ln.Close()
+
+	// Writes on flows opened before the server listener was closed should
+	// still succeed.
+	data = "the dark knight goes to bed"
+	go func() {
+		if err := writeLine(clientF1, data); err != nil {
+			t.Error(err)
+		}
+	}()
+	if got, err := readLine(serverF); got != data || err != nil {
+		t.Errorf("Got (%q, %v), want (%q, nil)", got, err, data)
+	}
+
+	// Opening a new flow on an existing VC will succeed initially, but
+	// writes on the client end will eventually fail once the server has
+	// stopped listening.
+	//
+	// It will require a round-trip to the server to notice the failure,
+	// hence the client should write enough data to ensure that the Write
+	// call will not return before a round-trip.
+	//
+	// The length of the data is taken to exceed the queue buffer size
+	// (DefaultBytesBufferedPerFlow), the shared counters (MaxSharedBytes)
+	// and the per-flow counters (DefaultBytesBufferedPerFlow) that are
+	// given when the flow gets established.
+	//
+	// TODO(caprita): separate the constants for the queue buffer size and
+	// the default number of counters to avoid confusion.
+	lotsOfData := string(make([]byte, vc.DefaultBytesBufferedPerFlow*2+vc.MaxSharedBytes+1))
+	clientF2, err := clientVC.Connect()
+	if err != nil {
+		t.Fatalf("Connect() failed: %v", err)
+	}
+	defer clientF2.Close()
+	if err := writeLine(clientF2, lotsOfData); err == nil {
+		t.Errorf("Should not be able to Dial or Write after the Listener is closed")
+	}
+	// Opening a new VC should fail fast.
+	if _, err := client.Dial(ep, pclient); err == nil {
+		t.Errorf("Should not be able to Dial after listener is closed")
+	}
+}
+
+func TestSimpleFlow(t *testing.T) {
+	testSimpleFlow(t, "tcp")
+}
+
+func TestSimpleFlowWS(t *testing.T) {
+	testSimpleFlow(t, "ws")
+}
+
+func TestConnectionTimeout(t *testing.T) {
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+	ch := make(chan error)
+	go func() {
+		// 203.0.113.0 is TEST-NET-3 from RFC5737
+		ep, _ := inaming.NewEndpoint(naming.FormatEndpoint("tcp", "203.0.113.10:80"))
+		_, err := client.Dial(ep, testutil.NewPrincipal("client"), DialTimeout(time.Second))
+		ch <- err
+	}()
+
+	select {
+	case err := <-ch:
+		if err == nil {
+			t.Fatalf("expected an error")
+		}
+	case <-time.After(time.Minute):
+		t.Fatalf("timedout")
+	}
+}
+
+func testAuthenticatedByDefault(t *testing.T, protocol string) {
+	var (
+		server = InternalNew(naming.FixedRoutingID(0x55555555))
+		client = InternalNew(naming.FixedRoutingID(0xcccccccc))
+
+		clientPrincipal = testutil.NewPrincipal("client")
+		serverPrincipal = testutil.NewPrincipal("server")
+		clientKey       = clientPrincipal.PublicKey()
+		serverBlessings = serverPrincipal.BlessingStore().Default()
+	)
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", serverPrincipal, serverPrincipal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	// And the server blessing should be in the endpoint.
+	if got, want := ep.BlessingNames(), []string{"server"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Got blessings %v from endpoint, want %v", got, want)
+	}
+
+	errs := make(chan error)
+
+	testAuth := func(tag string, flow stream.Flow, wantServer security.Blessings, wantClientKey security.PublicKey) {
+		// Since the client's blessing is expected to be self-signed we only test
+		// its public key
+		gotServer := flow.RemoteBlessings()
+		gotClientKey := flow.LocalBlessings().PublicKey()
+		if tag == "server" {
+			gotServer = flow.LocalBlessings()
+			gotClientKey = flow.RemoteBlessings().PublicKey()
+		}
+		if !reflect.DeepEqual(gotServer, wantServer) || !reflect.DeepEqual(gotClientKey, wantClientKey) {
+			errs <- fmt.Errorf("%s: Server: Got Blessings %q, want %q. Server: Got Blessings %q, want %q", tag, gotServer, wantServer, gotClientKey, wantClientKey)
+			return
+		}
+		errs <- nil
+	}
+
+	go func() {
+		flow, err := ln.Accept()
+		if err != nil {
+			errs <- err
+			return
+		}
+		defer flow.Close()
+		testAuth("server", flow, serverBlessings, clientKey)
+	}()
+
+	go func() {
+		vc, err := client.Dial(ep, clientPrincipal)
+		if err != nil {
+			errs <- err
+			return
+		}
+		flow, err := vc.Connect()
+		if err != nil {
+			errs <- err
+			return
+		}
+		defer flow.Close()
+		testAuth("client", flow, serverBlessings, clientKey)
+	}()
+
+	if err := <-errs; err != nil {
+		t.Error(err)
+	}
+	if err := <-errs; err != nil {
+		t.Error(err)
+	}
+}
+
+func TestAuthenticatedByDefault(t *testing.T) {
+	testAuthenticatedByDefault(t, "tcp")
+}
+
+func TestAuthenticatedByDefaultWS(t *testing.T) {
+	testAuthenticatedByDefault(t, "ws")
+}
+
+func numListeners(m stream.Manager) int   { return len(m.(*manager).listeners) }
+func debugString(m stream.Manager) string { return m.(*manager).DebugString() }
+func numVIFs(m stream.Manager) int        { return len(m.(*manager).vifs.List()) }
+
+func TestListenEndpoints(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0xcafe))
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	ln1, ep1, err1 := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+	ln2, ep2, err2 := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+	// Since "127.0.0.1:0" was used as the network address, a random port will be
+	// assigned in each case. The endpoint should include that random port.
+	if err1 != nil {
+		t.Error(err1)
+	}
+	if err2 != nil {
+		t.Error(err2)
+	}
+	if ep1.String() == ep2.String() {
+		t.Errorf("Both listeners got the same endpoint: %q", ep1)
+	}
+	if n, expect := numListeners(server), 2; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+	ln1.Close()
+	if n, expect := numListeners(server), 1; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+	ln2.Close()
+	if n, expect := numListeners(server), 0; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+}
+
+func acceptLoop(ln stream.Listener) {
+	for {
+		f, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		f.Close()
+	}
+}
+
+func TestCloseListener(t *testing.T) {
+	testCloseListener(t, "tcp")
+}
+
+func TestCloseListenerWS(t *testing.T) {
+	testCloseListener(t, "ws")
+}
+
+func testCloseListener(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+	pclient := testutil.NewPrincipal("client")
+	pserver := testutil.NewPrincipal("server")
+	blessings := pserver.BlessingStore().Default()
+
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", pserver, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+	client := InternalNew(naming.FixedRoutingID(0xc1e41))
+	if _, err = client.Dial(ep, pclient); err != nil {
+		t.Fatal(err)
+	}
+	ln.Close()
+	client = InternalNew(naming.FixedRoutingID(0xc1e42))
+	if _, err := client.Dial(ep, pclient); err == nil {
+		t.Errorf("client.Dial(%q) should have failed", ep)
+	}
+}
+
+func TestShutdown(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x5e97e9))
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	ln, _, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+	if n, expect := numListeners(server), 1; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+	server.Shutdown()
+	if _, _, err := server.Listen("tcp", "127.0.0.1:0", principal, blessings); err == nil {
+		t.Error("server should have shut down")
+	}
+	if n, expect := numListeners(server), 0; n != expect {
+		t.Errorf("expecting %d listeners, got %d for %s", n, expect, debugString(server))
+	}
+}
+
+func TestShutdownEndpoint(t *testing.T) {
+	testShutdownEndpoint(t, "tcp")
+}
+
+func TestShutdownEndpointWS(t *testing.T) {
+	testShutdownEndpoint(t, "ws")
+}
+
+func testShutdownEndpoint(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Server will just listen for flows and close them.
+	go acceptLoop(ln)
+
+	vc, err := client.Dial(ep, testutil.NewPrincipal("client"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if f, err := vc.Connect(); f == nil || err != nil {
+		t.Errorf("vc.Connect failed: (%v, %v)", f, err)
+	}
+	client.ShutdownEndpoint(ep)
+	if f, err := vc.Connect(); f != nil || err == nil {
+		t.Errorf("vc.Connect unexpectedly succeeded: (%v, %v)", f, err)
+	}
+}
+
+func TestStartTimeout(t *testing.T) {
+	const (
+		startTime = 5 * time.Millisecond
+	)
+
+	var (
+		server  = InternalNew(naming.FixedRoutingID(0x55555555))
+		pserver = testutil.NewPrincipal("server")
+		lopts   = []stream.ListenerOpt{vc.StartTimeout{startTime}}
+	)
+
+	// Pause the start timers.
+	triggerTimers := vif.SetFakeTimers()
+
+	ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	go func() {
+		for {
+			_, err := ln.Accept()
+			if err != nil {
+				return
+			}
+		}
+	}()
+
+	_, err = net.Dial(ep.Addr().Network(), ep.Addr().String())
+	if err != nil {
+		t.Fatalf("net.Dial failed: %v", err)
+	}
+
+	// Trigger the start timers.
+	triggerTimers()
+
+	// No VC is opened. The VIF should be closed after start timeout.
+	for range time.Tick(startTime) {
+		if numVIFs(server) == 0 {
+			break
+		}
+	}
+}
+
+func testIdleTimeout(t *testing.T, testServer bool) {
+	const (
+		idleTime = 10 * time.Millisecond
+		// We use a long wait time here since it takes some time to handle VC close
+		// especially in race testing.
+		waitTime = 150 * time.Millisecond
+	)
+
+	var (
+		server  = InternalNew(naming.FixedRoutingID(0x55555555))
+		client  = InternalNew(naming.FixedRoutingID(0xcccccccc))
+		pclient = testutil.NewPrincipal("client")
+		pserver = testutil.NewPrincipal("server")
+
+		opts  []stream.VCOpt
+		lopts []stream.ListenerOpt
+	)
+	if testServer {
+		lopts = []stream.ListenerOpt{vc.IdleTimeout{idleTime}}
+	} else {
+		opts = []stream.VCOpt{vc.IdleTimeout{idleTime}}
+	}
+
+	// Pause the idle timers.
+	triggerTimers := vif.SetFakeTimers()
+
+	ln, ep, err := server.Listen("tcp", "127.0.0.1:0", pserver, pserver.BlessingStore().Default(), lopts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	go func() {
+		for {
+			_, err := ln.Accept()
+			if err != nil {
+				return
+			}
+		}
+	}()
+
+	vc, err := client.Dial(ep, pclient, opts...)
+	if err != nil {
+		t.Fatalf("client.Dial(%q) failed: %v", ep, err)
+	}
+	f, err := vc.Connect()
+	if f == nil || err != nil {
+		t.Fatalf("vc.Connect failed: (%v, %v)", f, err)
+	}
+
+	// Trigger the idle timers.
+	triggerTimers()
+
+	// One active flow. The VIF should be kept open.
+	time.Sleep(waitTime)
+	if n := numVIFs(client); n != 1 {
+		t.Errorf("Client has %d VIFs; want 1\n%v", n, debugString(client))
+	}
+	if n := numVIFs(server); n != 1 {
+		t.Errorf("Server has %d VIFs; want 1\n%v", n, debugString(server))
+	}
+
+	f.Close()
+
+	// The flow has been closed. The VIF should be closed after idle timeout.
+	for range time.Tick(idleTime) {
+		if numVIFs(client) == 0 && numVIFs(server) == 0 {
+			break
+		}
+	}
+}
+
+func TestIdleTimeout(t *testing.T)       { testIdleTimeout(t, false) }
+func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
+
+/* TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
+func TestSessionTicketCache(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	_, ep, err := server.Listen("tcp", "127.0.0.1:0", testutil.NewPrincipal("server"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	if _, err = client.Dial(ep, testutil.NewPrincipal("TestSessionTicketCacheClient")); err != nil {
+		t.Fatalf("Dial(%q) failed: %v", ep, err)
+	}
+
+	if _, ok := client.(*manager).sessionCache.Get(ep.String()); !ok {
+		t.Fatalf("SessionTicket from TLS handshake not cached")
+	}
+}
+*/
+
+func testMultipleVCs(t *testing.T, protocol string) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	const nVCs = 2
+	const data = "bugs bunny"
+
+	// Have the server read from each flow and write to rchan.
+	rchan := make(chan string)
+	ln, ep, err := server.Listen(protocol, "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	read := func(flow stream.Flow, c chan string) {
+		var buf bytes.Buffer
+		var tmp [1024]byte
+		for {
+			n, err := flow.Read(tmp[:])
+			buf.Write(tmp[:n])
+			if err == io.EOF {
+				c <- buf.String()
+				return
+			}
+			if err != nil {
+				t.Error(err)
+				return
+			}
+		}
+	}
+	go func() {
+		for i := 0; i < nVCs; i++ {
+			flow, err := ln.Accept()
+			if err != nil {
+				t.Error(err)
+				rchan <- ""
+				continue
+			}
+			go read(flow, rchan)
+		}
+	}()
+
+	// Have the client establish nVCs and a flow on each.
+	var vcs [nVCs]stream.VC
+	for i := 0; i < nVCs; i++ {
+		var err error
+		vcs[i], err = client.Dial(ep, testutil.NewPrincipal("client"))
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+	write := func(vc stream.VC) {
+		if err != nil {
+			ln.Close()
+			t.Error(err)
+			return
+		}
+		flow, err := vc.Connect()
+		if err != nil {
+			ln.Close()
+			t.Error(err)
+			return
+		}
+		defer flow.Close()
+		if _, err := flow.Write([]byte(data)); err != nil {
+			ln.Close()
+			t.Error(err)
+			return
+		}
+	}
+	for _, vc := range vcs {
+		go write(vc)
+	}
+	for i := 0; i < nVCs; i++ {
+		if got := <-rchan; got != data {
+			t.Errorf("Got %q want %q", got, data)
+		}
+	}
+}
+
+func TestMultipleVCs(t *testing.T) {
+	testMultipleVCs(t, "tcp")
+}
+
+func TestMultipleVCsWS(t *testing.T) {
+	testMultipleVCs(t, "ws")
+}
+
+func TestAddressResolution(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	// Using "tcp4" instead of "tcp" because the latter can end up with IPv6
+	// addresses and our Google Compute Engine integration test machines cannot
+	// resolve IPv6 addresses.
+	// As of April 2014, https://developers.google.com/compute/docs/networking
+	// said that IPv6 is not yet supported.
+	ln, ep, err := server.Listen("tcp4", "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	go acceptLoop(ln)
+
+	// We'd like an endpoint that contains an address that's different than the
+	// one used for the connection. In practice this is awkward to achieve since
+	// we don't want to listen on ":0" since that will annoy firewalls. Instead we
+	// create a endpoint with "localhost", which will result in an endpoint that
+	// doesn't contain 127.0.0.1.
+	_, port, _ := net.SplitHostPort(ep.Addr().String())
+	nep := &inaming.Endpoint{
+		Protocol: ep.Addr().Network(),
+		Address:  net.JoinHostPort("localhost", port),
+		RID:      ep.RoutingID(),
+	}
+
+	// Dial multiple VCs
+	for i := 0; i < 2; i++ {
+		if _, err = client.Dial(nep, testutil.NewPrincipal("client")); err != nil {
+			t.Fatalf("Dial #%d failed: %v", i, err)
+		}
+	}
+	// They should all be on the same VIF.
+	if n := numVIFs(client); n != 1 {
+		t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
+	}
+	// TODO(ashankar): While a VIF can be re-used to Dial from the server
+	// to the client, currently there is no way to have the client "listen"
+	// on the same VIF. It can listen on a VC for new flows, but it cannot
+	// listen on an established VIF for new VCs. Figure this out?
+}
+
+func TestServerRestartDuringClientLifetime(t *testing.T) {
+	testServerRestartDuringClientLifetime(t, "tcp")
+}
+
+func TestServerRestartDuringClientLifetimeWS(t *testing.T) {
+	testServerRestartDuringClientLifetime(t, "ws")
+}
+
+func testServerRestartDuringClientLifetime(t *testing.T, protocol string) {
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	pclient := testutil.NewPrincipal("client")
+	sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	defer sh.Cleanup(nil, nil)
+	h, err := sh.Start("runServer", nil, protocol, "127.0.0.1:0")
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	epstr := expect.NewSession(t, h.Stdout(), time.Minute).ExpectVar("ENDPOINT")
+	ep, err := inaming.NewEndpoint(epstr)
+	if err != nil {
+		t.Fatalf("inaming.NewEndpoint(%q): %v", epstr, err)
+	}
+	if _, err := client.Dial(ep, pclient); err != nil {
+		t.Fatal(err)
+	}
+	h.Shutdown(nil, os.Stderr)
+
+	// A new VC cannot be created since the server is dead
+	if _, err := client.Dial(ep, pclient); err == nil {
+		t.Fatal("Expected client.Dial to fail since server is dead")
+	}
+
+	h, err = sh.Start("runServer", nil, protocol, ep.Addr().String())
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	// Restarting the server, listening on the same address as before
+	ep2, err := inaming.NewEndpoint(expect.NewSession(t, h.Stdout(), time.Minute).ExpectVar("ENDPOINT"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := ep.Addr().String(), ep2.Addr().String(); got != want {
+		t.Fatalf("Got %q, want %q", got, want)
+	}
+	if _, err := client.Dial(ep2, pclient); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func runServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	principal := testutil.NewPrincipal("test")
+	_, ep, err := server.Listen(args[0], args[1], principal, principal.BlessingStore().Default())
+	if err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	fmt.Fprintf(stdout, "ENDPOINT=%v\n", ep)
+	// Live forever (till the process is explicitly killed)
+	modules.WaitForEOF(stdin)
+	return nil
+}
+
+func runRLimitedServer(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {
+	var rlimit syscall.Rlimit
+	if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	rlimit.Cur = 9
+	if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	fmt.Fprintf(stdout, "RLIMIT_NOFILE=%d\n", rlimit.Cur)
+	return runServer(stdin, stdout, stderr, env, args...)
+}
+
+func readLine(f stream.Flow) (string, error) {
+	var result bytes.Buffer
+	var buf [5]byte
+	for {
+		n, err := f.Read(buf[:])
+		result.Write(buf[:n])
+		if err == io.EOF || buf[n-1] == '\n' {
+			return strings.TrimRight(result.String(), "\n"), nil
+		}
+		if err != nil {
+			return "", fmt.Errorf("Read returned (%d, %v)", n, err)
+		}
+	}
+}
+
+func writeLine(f stream.Flow, data string) error {
+	data = data + "\n"
+	vlog.VI(1).Infof("write sending %d bytes", len(data))
+	if n, err := f.Write([]byte(data)); err != nil {
+		return fmt.Errorf("Write returned (%d, %v)", n, err)
+	}
+	return nil
+}
+
+func TestRegistration(t *testing.T) {
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("server")
+	blessings := principal.BlessingStore().Default()
+
+	dialer := func(_, _ string, _ time.Duration) (net.Conn, error) {
+		return nil, fmt.Errorf("tn.Dial")
+	}
+	resolver := func(_, _ string) (string, string, error) {
+		return "", "", fmt.Errorf("tn.Resolve")
+	}
+	listener := func(_, _ string) (net.Listener, error) {
+		return nil, fmt.Errorf("tn.Listen")
+	}
+	rpc.RegisterProtocol("tn", dialer, resolver, listener)
+
+	_, _, err := server.Listen("tnx", "127.0.0.1:0", principal, blessings)
+	if err == nil || !strings.Contains(err.Error(), "unknown network: tnx") {
+		t.Fatalf("expected error is missing (%v)", err)
+	}
+
+	_, _, err = server.Listen("tn", "127.0.0.1:0", principal, blessings)
+	if err == nil || !strings.Contains(err.Error(), "tn.Listen") {
+		t.Fatalf("expected error is missing (%v)", err)
+	}
+
+	// Need a functional listener to test Dial.
+	listener = func(_, addr string) (net.Listener, error) {
+		return net.Listen("tcp", addr)
+	}
+
+	if got, want := rpc.RegisterProtocol("tn", dialer, resolver, listener), true; got != want {
+		t.Errorf("got %t, want %t", got, want)
+	}
+
+	_, ep, err := server.Listen("tn", "127.0.0.1:0", principal, blessings)
+	if err != nil {
+		t.Errorf("unexpected error %s", err)
+	}
+
+	_, err = client.Dial(ep, testutil.NewPrincipal("client"))
+	if err == nil || !strings.Contains(err.Error(), "tn.Resolve") {
+		t.Fatalf("expected error is missing (%v)", err)
+	}
+}
+
+func TestBlessingNamesInEndpoint(t *testing.T) {
+	var (
+		p    = testutil.NewPrincipal("default")
+		b, _ = p.BlessSelf("dev.v.io/users/foo@bar.com/devices/desktop/app/myapp")
+
+		server = InternalNew(naming.FixedRoutingID(0x1))
+
+		tests = []struct {
+			principal     security.Principal
+			blessings     security.Blessings
+			blessingNames []string
+			err           bool
+		}{
+			{
+				// provided blessings should match returned output.
+				principal:     p,
+				blessings:     b,
+				blessingNames: []string{"dev.v.io/users/foo@bar.com/devices/desktop/app/myapp"},
+			},
+			{
+				// It is an error to provide a principal without providing blessings.
+				principal: p,
+				blessings: security.Blessings{},
+				err:       true,
+			},
+			{
+				// It is an error to provide inconsistent blessings and principal
+				principal: testutil.NewPrincipal("random"),
+				blessings: b,
+				err:       true,
+			},
+		}
+	)
+	// p must recognize its own blessings!
+	p.AddToRoots(b)
+	for idx, test := range tests {
+		ln, ep, err := server.Listen("tcp", "127.0.0.1:0", test.principal, test.blessings)
+		if (err != nil) != test.err {
+			t.Errorf("test #%d: Got error %v, wanted error: %v", idx, err, test.err)
+		}
+		if err != nil {
+			continue
+		}
+		ln.Close()
+		got, want := ep.BlessingNames(), test.blessingNames
+		sort.Strings(got)
+		sort.Strings(want)
+		if !reflect.DeepEqual(got, want) {
+			t.Errorf("test #%d: Got %v, want %v", idx, got, want)
+		}
+	}
+}
+
+func TestVIFCleanupWhenFDLimitIsReached(t *testing.T) {
+	sh, err := modules.NewShell(nil, nil, testing.Verbose(), t)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer sh.Cleanup(nil, nil)
+	h, err := sh.Start("runRLimitedServer", nil, "--logtostderr=true", "tcp", "127.0.0.1:0")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.CloseStdin()
+	stdout := expect.NewSession(t, h.Stdout(), time.Minute)
+	nfiles, err := strconv.Atoi(stdout.ExpectVar("RLIMIT_NOFILE"))
+	if stdout.Error() != nil {
+		t.Fatal(stdout.Error())
+	}
+	if err != nil {
+		t.Fatal(err)
+	}
+	epstr := stdout.ExpectVar("ENDPOINT")
+	if stdout.Error() != nil {
+		t.Fatal(stdout.Error())
+	}
+	ep, err := inaming.NewEndpoint(epstr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Different client processes (represented by different stream managers
+	// in this test) should be able to make progress, even if the server
+	// has reached its file descriptor limit.
+	nattempts := 0
+	for i := 0; i < 2*nfiles; i++ {
+		client := InternalNew(naming.FixedRoutingID(uint64(i)))
+		defer client.Shutdown()
+		principal := testutil.NewPrincipal(fmt.Sprintf("client%d", i))
+		connected := false
+		for !connected {
+			nattempts++
+			// If the client connection reached the server when it
+			// was at its limit, it might fail.  However, this
+			// failure will trigger the "kill connections" logic at
+			// the server and eventually the client should succeed.
+			vc, err := client.Dial(ep, principal)
+			if err != nil {
+				continue
+			}
+			// Establish a flow to prevent the VC (and thus the
+			// underlying VIF) from being garbage collected as an
+			// "inactive" connection.
+			flow, err := vc.Connect()
+			if err != nil {
+				continue
+			}
+			defer flow.Close()
+			connected = true
+		}
+	}
+	var stderr bytes.Buffer
+	if err := h.Shutdown(nil, &stderr); err != nil {
+		t.Fatal(err)
+	}
+	if log := expect.NewSession(t, bytes.NewReader(stderr.Bytes()), time.Minute).ExpectSetEventuallyRE("listener.go.*Killing [1-9][0-9]* Conns"); len(log) == 0 {
+		t.Errorf("Failed to find log message talking about killing Conns in:\n%v", stderr.String())
+	}
+	t.Logf("Server FD limit:%d", nfiles)
+	t.Logf("Client connection attempts: %d", nattempts)
+}
+
+func TestConcurrentDials(t *testing.T) {
+	// Concurrent Dials to the same network, address should only result in one VIF.
+	server := InternalNew(naming.FixedRoutingID(0x55555555))
+	client := InternalNew(naming.FixedRoutingID(0xcccccccc))
+	principal := testutil.NewPrincipal("test")
+
+	// Using "tcp4" instead of "tcp" because the latter can end up with IPv6
+	// addresses and our Google Compute Engine integration test machines cannot
+	// resolve IPv6 addresses.
+	// As of April 2014, https://developers.google.com/compute/docs/networking
+	// said that IPv6 is not yet supported.
+	ln, ep, err := server.Listen("tcp4", "127.0.0.1:0", principal, principal.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	go acceptLoop(ln)
+
+	nep := &inaming.Endpoint{
+		Protocol: ep.Addr().Network(),
+		Address:  ep.Addr().String(),
+		RID:      ep.RoutingID(),
+	}
+
+	// Dial multiple VCs
+	errCh := make(chan error, 10)
+	for i := 0; i < 10; i++ {
+		go func() {
+			_, err = client.Dial(nep, testutil.NewPrincipal("client"))
+			errCh <- err
+		}()
+	}
+	for i := 0; i < 10; i++ {
+		if err = <-errCh; err != nil {
+			t.Fatal(err)
+		}
+	}
+	// They should all be on the same VIF.
+	if n := numVIFs(client); n != 1 {
+		t.Errorf("Client has %d VIFs, want 1\n%v", n, debugString(client))
+	}
+}
diff --git a/runtime/internal/rpc/stream/message/coding.go b/runtime/internal/rpc/stream/message/coding.go
new file mode 100644
index 0000000..d8e7c68
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/coding.go
@@ -0,0 +1,213 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"encoding/binary"
+	"io"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/message"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errLargerThan3ByteUint = reg(".errLargerThan3ByteUnit", "integer too large to represent in 3 bytes")
+	errReadWrongNumBytes   = reg(".errReadWrongNumBytes", "read {3} bytes, wanted to read {4}")
+)
+
+func write3ByteUint(dst []byte, n int) error {
+	if n >= (1<<24) || n < 0 {
+		return verror.New(errLargerThan3ByteUint, nil)
+	}
+	dst[0] = byte((n & 0xff0000) >> 16)
+	dst[1] = byte((n & 0x00ff00) >> 8)
+	dst[2] = byte(n & 0x0000ff)
+	return nil
+}
+
+func read3ByteUint(src []byte) int {
+	return int(src[0])<<16 | int(src[1])<<8 | int(src[2])
+}
+
+func write4ByteUint(dst []byte, n uint32) {
+	dst[0] = byte((n & 0xff000000) >> 24)
+	dst[1] = byte((n & 0x00ff0000) >> 16)
+	dst[2] = byte((n & 0x0000ff00) >> 8)
+	dst[3] = byte(n & 0x000000ff)
+}
+
+func read4ByteUint(src []byte) uint32 {
+	return uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+}
+
+func readInt(r io.Reader, ptr interface{}) error {
+	return binary.Read(r, binary.BigEndian, ptr)
+}
+
+func writeInt(w io.Writer, ptr interface{}) error {
+	return binary.Write(w, binary.BigEndian, ptr)
+}
+
+func readString(r io.Reader, s *string) error {
+	var size uint32
+	if err := readInt(r, &size); err != nil {
+		return err
+	}
+	bytes := make([]byte, size)
+	n, err := r.Read(bytes)
+	if err != nil {
+		return err
+	}
+	if n != int(size) {
+		return verror.New(errReadWrongNumBytes, nil, n, int(size))
+	}
+	*s = string(bytes)
+	return nil
+}
+
+func writeString(w io.Writer, s string) error {
+	size := uint32(len(s))
+	if err := writeInt(w, size); err != nil {
+		return err
+	}
+	n, err := w.Write([]byte(s))
+	if err != nil {
+		return err
+	}
+	if n != int(size) {
+		return verror.New(errReadWrongNumBytes, nil, n, int(size))
+	}
+	return nil
+}
+
+// byteReader adapts an io.Reader to an io.ByteReader so that we can
+// use it with encoding/Binary for varint etc.
+type byteReader struct{ io.Reader }
+
+func (b byteReader) ReadByte() (byte, error) {
+	var buf [1]byte
+	n, err := b.Reader.Read(buf[:])
+	switch {
+	case n == 1:
+		return buf[0], err
+	case err != nil:
+		return 0, err
+	default:
+		return 0, verror.New(errReadWrongNumBytes, nil, n, 1)
+	}
+}
+
+func readCounters(r io.Reader) (Counters, error) {
+	var br io.ByteReader
+	var ok bool
+	if br, ok = r.(io.ByteReader); !ok {
+		br = byteReader{r}
+	}
+	size, err := binary.ReadUvarint(br)
+	if err != nil {
+		return nil, err
+	}
+	if size == 0 {
+		return nil, nil
+	}
+	c := Counters(make(map[CounterID]uint32, size))
+	for i := uint64(0); i < size; i++ {
+		vci, err := binary.ReadUvarint(br)
+		if err != nil {
+			return nil, err
+		}
+		fid, err := binary.ReadUvarint(br)
+		if err != nil {
+			return nil, err
+		}
+		bytes, err := binary.ReadUvarint(br)
+		if err != nil {
+			return nil, err
+		}
+		c.Add(id.VC(vci), id.Flow(fid), uint32(bytes))
+	}
+	return c, nil
+}
+
+func writeCounters(w io.Writer, c Counters) (err error) {
+	var vbuf [binary.MaxVarintLen64]byte
+	putUvarint := func(n uint64) {
+		if err == nil {
+			_, err = w.Write(vbuf[:binary.PutUvarint(vbuf[:], n)])
+		}
+	}
+	putUvarint(uint64(len(c)))
+	for cid, bytes := range c {
+		putUvarint(uint64(cid.VCI()))
+		putUvarint(uint64(cid.Flow()))
+		putUvarint(uint64(bytes))
+	}
+	return
+}
+
+func readSetupOptions(r io.Reader) ([]SetupOption, error) {
+	var opts []SetupOption
+	for {
+		var code setupOptionCode
+		switch err := readInt(r, &code); err {
+		case io.EOF:
+			return opts, nil
+		case nil:
+			break
+		default:
+			return nil, err
+		}
+		var size uint16
+		if err := readInt(r, &size); err != nil {
+			return nil, err
+		}
+		l := &io.LimitedReader{R: r, N: int64(size)}
+		switch code {
+		case naclBoxPublicKey:
+			var opt NaclBox
+			if err := opt.read(l); err != nil {
+				return nil, err
+			}
+			opts = append(opts, &opt)
+		}
+		// Consume any data remaining.
+		readAndDiscardToError(l)
+	}
+}
+
+func writeSetupOptions(w io.Writer, options []SetupOption) error {
+	for _, opt := range options {
+		if err := writeInt(w, opt.code()); err != nil {
+			return err
+		}
+		if err := writeInt(w, opt.size()); err != nil {
+			return err
+		}
+		if err := opt.write(w); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func readAndDiscardToError(r io.Reader) {
+	var data [1024]byte
+	for {
+		if _, err := r.Read(data[:]); err != nil {
+			return
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/message/control.go b/runtime/internal/rpc/stream/message/control.go
new file mode 100644
index 0000000..0c33b91
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/control.go
@@ -0,0 +1,367 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"v.io/v23/naming"
+	"v.io/v23/verror"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/version"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errUnrecognizedVCControlMessageCommand = reg(".errUnrecognizedVCControlMessageCommand",
+		"unrecognized VC control message command({3})")
+	errUnrecognizedVCControlMessageType = reg(".errUnrecognizedVCControlMessageType",
+		"unrecognized VC control message type({3})")
+	errFailedToDeserializedVCControlMessage = reg(".errFailedToDeserializedVCControlMessage", "failed to deserialize control message {3}({4}): {5}")
+	errFailedToWriteHeader                  = reg(".errFailedToWriteHeader", "failed to write header. Wrote {3} bytes instead of {4}{:5}")
+)
+
+// Control is the interface implemented by all control messages.
+type Control interface {
+	readFrom(r *bytes.Buffer) error
+	writeTo(w io.Writer) error
+}
+
+// SetupVC is a Control implementation containing information to setup a new
+// virtual circuit.
+type SetupVC struct {
+	VCI            id.VC
+	LocalEndpoint  naming.Endpoint // Endpoint of the sender (as seen by the sender), can be nil.
+	RemoteEndpoint naming.Endpoint // Endpoint of the receiver (as seen by the sender), can be nil.
+	Counters       Counters
+	Setup          Setup // Negotiate versioning and encryption.
+}
+
+// CloseVC is a Control implementation notifying the closure of an established
+// virtual circuit, or failure to establish a virtual circuit.
+//
+// The Error string will be empty in case the close was the result of an
+// explicit close by the application (and not an error).
+type CloseVC struct {
+	VCI   id.VC
+	Error string
+}
+
+// AddReceiveBuffers is a Control implementation used by the sender of the
+// message to inform the other end of a virtual circuit that it is ready to
+// receive more bytes of data (specified per flow).
+type AddReceiveBuffers struct {
+	Counters Counters
+}
+
+// OpenFlow is a Control implementation notifying the senders intent to create
+// a new Flow. It also include the number of bytes the sender of this message
+// is willing to read.
+type OpenFlow struct {
+	VCI             id.VC
+	Flow            id.Flow
+	InitialCounters uint32
+}
+
+// Setup is a control message used to negotiate VIF/VC options.
+type Setup struct {
+	Versions version.Range
+	Options  []SetupOption
+}
+
+// SetupOption is the base interface for optional Setup options.
+type SetupOption interface {
+	// code is the identifier for the option.
+	code() setupOptionCode
+
+	// size returns the number of bytes needed to represent the option.
+	size() uint16
+
+	// write the option to the writer.
+	write(w io.Writer) error
+
+	// read the option from the reader.
+	read(r io.Reader) error
+}
+
+// NaclBox is a SetupOption that specifies the public key for the NaclBox
+// encryption protocol.
+type NaclBox struct {
+	PublicKey crypto.BoxKey
+}
+
+// SetupStream is a byte stream used to negotiate VIF setup.  During VIF setup,
+// each party sends a Setup message to the other party containing their version
+// and options.  If the version requires further negotiation (such as for authentication),
+// the SetupStream is used for the negotiation.
+//
+// The protocol used on the stream is version-specific, it is not specified here.  See
+// vif/auth.go for an example.
+type SetupStream struct {
+	Data []byte
+}
+
+// Setup option codes.
+type setupOptionCode uint16
+
+const (
+	naclBoxPublicKey setupOptionCode = 0
+)
+
+// Command enum.
+type command uint8
+
+const (
+	deprecatedOpenVCCommand  command = 0
+	closeVCCommand           command = 1
+	addReceiveBuffersCommand command = 2
+	openFlowCommand          command = 3
+	hopSetupCommand          command = 4
+	hopSetupStreamCommand    command = 5
+	setupVCCommand           command = 6
+)
+
+func writeControl(w io.Writer, m Control) error {
+	var command command
+	switch m.(type) {
+	case *CloseVC:
+		command = closeVCCommand
+	case *AddReceiveBuffers:
+		command = addReceiveBuffersCommand
+	case *OpenFlow:
+		command = openFlowCommand
+	case *Setup:
+		command = hopSetupCommand
+	case *SetupStream:
+		command = hopSetupStreamCommand
+	case *SetupVC:
+		command = setupVCCommand
+	default:
+		return verror.New(errUnrecognizedVCControlMessageType, nil, fmt.Sprintf("%T", m))
+	}
+	var header [1]byte
+	header[0] = byte(command)
+	if n, err := w.Write(header[:]); n != len(header) || err != nil {
+		return verror.New(errFailedToWriteHeader, nil, n, len(header), err)
+	}
+	if err := m.writeTo(w); err != nil {
+		return err
+	}
+	return nil
+}
+
+func readControl(r *bytes.Buffer) (Control, error) {
+	var header byte
+	var err error
+	if header, err = r.ReadByte(); err != nil {
+		return nil, err
+	}
+	command := command(header)
+	var m Control
+	switch command {
+	case closeVCCommand:
+		m = new(CloseVC)
+	case addReceiveBuffersCommand:
+		m = new(AddReceiveBuffers)
+	case openFlowCommand:
+		m = new(OpenFlow)
+	case hopSetupCommand:
+		m = new(Setup)
+	case hopSetupStreamCommand:
+		m = new(SetupStream)
+	case setupVCCommand:
+		m = new(SetupVC)
+	default:
+		return nil, verror.New(errUnrecognizedVCControlMessageCommand, nil, command)
+	}
+	if err := m.readFrom(r); err != nil {
+		return nil, verror.New(errFailedToDeserializedVCControlMessage, nil, command, fmt.Sprintf("%T", m), err)
+	}
+	return m, nil
+}
+
+func (m *CloseVC) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.VCI); err != nil {
+		return
+	}
+	if err = writeString(w, m.Error); err != nil {
+		return
+	}
+	return
+}
+
+func (m *CloseVC) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.VCI); err != nil {
+		return
+	}
+	if err = readString(r, &m.Error); err != nil {
+		return
+	}
+	return
+}
+
+func (m *SetupVC) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.VCI); err != nil {
+		return
+	}
+	var localep string
+	if m.LocalEndpoint != nil {
+		localep = m.LocalEndpoint.String()
+	}
+	if err = writeString(w, localep); err != nil {
+		return
+	}
+	var remoteep string
+	if m.RemoteEndpoint != nil {
+		remoteep = m.RemoteEndpoint.String()
+	}
+	if err = writeString(w, remoteep); err != nil {
+		return
+	}
+	if err = writeCounters(w, m.Counters); err != nil {
+		return
+	}
+	if err = m.Setup.writeTo(w); err != nil {
+		return
+	}
+	return
+}
+
+func (m *SetupVC) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.VCI); err != nil {
+		return
+	}
+	var ep string
+	if err = readString(r, &ep); err != nil {
+		return
+	}
+	if ep != "" {
+		if m.LocalEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+			return
+		}
+	}
+	if err = readString(r, &ep); err != nil {
+		return
+	}
+	if ep != "" {
+		if m.RemoteEndpoint, err = inaming.NewEndpoint(ep); err != nil {
+			return
+		}
+	}
+	if m.Counters, err = readCounters(r); err != nil {
+		return
+	}
+	if err = m.Setup.readFrom(r); err != nil {
+		return
+	}
+	return
+}
+
+func (m *AddReceiveBuffers) writeTo(w io.Writer) error {
+	return writeCounters(w, m.Counters)
+}
+
+func (m *AddReceiveBuffers) readFrom(r *bytes.Buffer) (err error) {
+	m.Counters, err = readCounters(r)
+	return
+}
+
+func (m *OpenFlow) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.VCI); err != nil {
+		return
+	}
+	if err = writeInt(w, m.Flow); err != nil {
+		return
+	}
+	if err = writeInt(w, m.InitialCounters); err != nil {
+		return
+	}
+	return
+}
+
+func (m *OpenFlow) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.VCI); err != nil {
+		return
+	}
+	if err = readInt(r, &m.Flow); err != nil {
+		return
+	}
+	if err = readInt(r, &m.InitialCounters); err != nil {
+		return
+	}
+	return
+}
+
+func (m *Setup) writeTo(w io.Writer) (err error) {
+	if err = writeInt(w, m.Versions.Min); err != nil {
+		return
+	}
+	if err = writeInt(w, m.Versions.Max); err != nil {
+		return
+	}
+	if err = writeSetupOptions(w, m.Options); err != nil {
+		return
+	}
+	return
+}
+
+func (m *Setup) readFrom(r *bytes.Buffer) (err error) {
+	if err = readInt(r, &m.Versions.Min); err != nil {
+		return
+	}
+	if err = readInt(r, &m.Versions.Max); err != nil {
+		return
+	}
+	if m.Options, err = readSetupOptions(r); err != nil {
+		return
+	}
+	return
+}
+
+// NaclBox returns the first NaclBox option, or nil if there is none.
+func (m *Setup) NaclBox() *NaclBox {
+	for _, opt := range m.Options {
+		if b, ok := opt.(*NaclBox); ok {
+			return b
+		}
+	}
+	return nil
+}
+
+func (*NaclBox) code() setupOptionCode {
+	return naclBoxPublicKey
+}
+
+func (m *NaclBox) size() uint16 {
+	return uint16(len(m.PublicKey))
+}
+
+func (m *NaclBox) write(w io.Writer) error {
+	_, err := w.Write(m.PublicKey[:])
+	return err
+}
+
+func (m *NaclBox) read(r io.Reader) error {
+	_, err := io.ReadFull(r, m.PublicKey[:])
+	return err
+}
+
+func (m *SetupStream) writeTo(w io.Writer) error {
+	_, err := w.Write(m.Data)
+	return err
+}
+
+func (m *SetupStream) readFrom(r *bytes.Buffer) error {
+	m.Data = r.Bytes()
+	return nil
+}
diff --git a/runtime/internal/rpc/stream/message/counters.go b/runtime/internal/rpc/stream/message/counters.go
new file mode 100644
index 0000000..f51074e
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/counters.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"fmt"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+// CounterID encapsulates the VCI and Flow used for flow control counter
+// accounting.
+type CounterID uint64
+
+// VCI returns the VCI encoded within the CounterID
+func (c *CounterID) VCI() id.VC { return id.VC(*c >> 32) }
+
+// Flow returns the Flow identifier encoded within the CounterID
+func (c *CounterID) Flow() id.Flow { return id.Flow(*c & 0xffffffff) }
+
+func (c *CounterID) String() string { return fmt.Sprintf("Flow:%d/VCI:%d", c.Flow(), c.VCI()) }
+
+// MakeCounterID creates a CounterID from the provided (vci, fid) pair.
+func MakeCounterID(vci id.VC, fid id.Flow) CounterID {
+	return CounterID(uint64(vci)<<32 | uint64(fid))
+}
+
+// Counters is a map from (VCI, Flow) to the number of bytes for that (VCI,
+// Flow) pair that the receiver is willing to read.
+//
+// Counters are not safe for concurrent access from multiple goroutines.
+//
+// When received in Control messages, clients can iterate over the map:
+//	for cid, bytes := range counters {
+//		fmt.Println("VCI=%d Flow=%d Bytes=%d", cid.VCI(), cid.Flow(), bytes)
+//	}
+type Counters map[CounterID]uint32
+
+// NewCounters creates a new Counters object.
+func NewCounters() Counters { return Counters(make(map[CounterID]uint32)) }
+
+// Add should be called by the receiving end of a Flow to indicate that it is
+// ready to read 'bytes' more data for the flow identified by (vci, fid).
+func (c Counters) Add(vci id.VC, fid id.Flow, bytes uint32) {
+	c[MakeCounterID(vci, fid)] += bytes
+}
+
+func (c Counters) String() string {
+	ret := "map[ "
+	for cid, bytes := range c {
+		ret += fmt.Sprintf("%d@%d:%d ", cid.Flow(), cid.VCI(), bytes)
+	}
+	ret += "]"
+	return ret
+}
diff --git a/runtime/internal/rpc/stream/message/counters_test.go b/runtime/internal/rpc/stream/message/counters_test.go
new file mode 100644
index 0000000..d2ea0f5
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/counters_test.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"testing"
+	"testing/quick"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+func TestCounterID(t *testing.T) {
+	tests := []struct {
+		vci id.VC
+		fid id.Flow
+	}{
+		{0, 0},
+		{1, 10},
+		{0xffeeddcc, 0xffaabbcc},
+	}
+	for _, test := range tests {
+		cid := MakeCounterID(test.vci, test.fid)
+		if g, w := cid.VCI(), test.vci; g != w {
+			t.Errorf("Got VCI %d want %d", g, w)
+		}
+		if g, w := cid.Flow(), test.fid; g != w {
+			t.Errorf("Got Flow %d want %d", g, w)
+		}
+	}
+}
+
+func TestCounterID_Random(t *testing.T) {
+	f := func(vci id.VC, fid id.Flow) bool {
+		cid := MakeCounterID(vci, fid)
+		return cid.VCI() == vci && cid.Flow() == fid
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestCounters(t *testing.T) {
+	f := func(vci id.VC, fid id.Flow, bytes []uint32) bool {
+		c := NewCounters()
+		var sum uint32
+		for _, bin := range bytes {
+			c.Add(vci, fid, bin)
+			if len(c) != 1 {
+				return false
+			}
+			sum += bin
+			for cid, bout := range c {
+				if cid.VCI() != vci || cid.Flow() != fid || bout != sum {
+					return false
+				}
+			}
+		}
+		return true
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
diff --git a/runtime/internal/rpc/stream/message/data.go b/runtime/internal/rpc/stream/message/data.go
new file mode 100644
index 0000000..784b603
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/data.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"fmt"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+// Data encapsulates an application data message.
+type Data struct {
+	VCI     id.VC // Must be non-zero.
+	Flow    id.Flow
+	flags   uint8
+	Payload *iobuf.Slice
+}
+
+// Close returns true if the sender of the data message requested that the flow be closed.
+func (d *Data) Close() bool { return d.flags&0x1 == 1 }
+
+// SetClose sets the Close flag of the message.
+func (d *Data) SetClose() { d.flags |= 0x1 }
+
+// Release releases the Payload
+func (d *Data) Release() {
+	if d.Payload != nil {
+		d.Payload.Release()
+		d.Payload = nil
+	}
+}
+
+func (d *Data) PayloadSize() int {
+	if d.Payload == nil {
+		return 0
+	}
+	return d.Payload.Size()
+}
+
+func (d *Data) String() string {
+	return fmt.Sprintf("VCI:%d Flow:%d Flags:%02x Payload:(%d bytes)", d.VCI, d.Flow, d.flags, d.PayloadSize())
+}
diff --git a/runtime/internal/rpc/stream/message/message.go b/runtime/internal/rpc/stream/message/message.go
new file mode 100644
index 0000000..199ba03
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/message.go
@@ -0,0 +1,262 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package message provides data structures and serialization/deserialization
+// methods for messages exchanged by the implementation of the
+// v.io/x/ref/runtime/internal/rpc/stream interfaces.
+package message
+
+// This file contains methods to read and write messages sent over the VIF.
+// Every message has the following format:
+//
+// +-----------------------------------------+
+// | Type (1 byte) | PayloadSize (3 bytes)   |
+// +-----------------------------------------+
+// | Payload (PayloadSize bytes)             |
+// +-----------------------------------------+
+//
+// Currently, there are 2 valid types:
+// 0 (controlType)
+// 1 (dataType)
+//
+// When Type == controlType, the message is:
+// +---------------------------------------------+
+// |      0        | PayloadSize (3 bytes)       |
+// +---------------------------------------------+
+// | Cmd  (1 byte)                               |
+// +---------------------------------------------+
+// | Data (PayloadSize - MACSize - 1 bytes)      |
+// +---------------------------------------------+
+// | MAC (MACSize bytes)                         |
+// +---------------------------------------------+
+// Where Data is the serialized Control interface object.
+//
+// When Type == dataType, the message is:
+// +---------------------------------------------+
+// |      1        | PayloadSize (3 bytes)       |
+// +---------------------------------------------+
+// | id.VCI (4 bytes)                            |
+// +---------------------------------------------+
+// | id.Flow (4 bytes)                           |
+// +---------------------------------------------+
+// | Flags (1 byte)                              |
+// +---------------------------------------------+
+// | MAC (MACSize bytes)                         |
+// +---------------------------------------------+
+// | Data (PayloadSize - 9 - MACSize bytes)      |
+// +---------------------------------------------+
+// Where Data is the application data.  The Data is encrypted separately; it is
+// not included in the MAC.
+//
+// A crypto.ControlCipher is used to encrypt the control data.  The MACSize
+// comes from the ControlCipher.  When used, the first word of the header,
+// containing the Type and PayloadSize, is encrypted with the cipher's Encrypt
+// method.  The rest of the control data is encrypted with the cipher's Seal
+// method.  This means that none of the data is observable by an adversary, but
+// the type and length are subject to corruption (the rest of the data is not).
+// This doesn't matter -- if the Type or PayloadSize is corrupted by an
+// adversary, the payload will be misread, and will fail to validate.
+//
+// We could potentially pass the Type and PayloadSize in the clear, but then the
+// framing would be observable, a (probably minor) information leak.  There is
+// no reason to do so, we encrypt everything.
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+const (
+	// Size (in bytes) of headers appended to application data payload in
+	// Data messages.
+	HeaderSizeBytes = commonHeaderSizeBytes + dataHeaderSizeBytes
+
+	commonHeaderSizeBytes = 4 // 1 byte type + 3 bytes payload length
+	dataHeaderSizeBytes   = 9 // 4 byte id.VC + 4 byte id.Flow + 1 byte flags
+
+	// Make sure the first byte can't be ASCII to ensure that a VC
+	// header can never be confused with a web socket request.
+	// TODO(cnicolaou): remove the original controlType and dataType values
+	// when new binaries are pushed.
+	controlType   = 0
+	controlTypeWS = 0x80
+	dataType      = 1
+	dataTypeWS    = 0x81
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errEmptyMessage            = reg(".errEmptyMessage", "message is empty")
+	errCorruptedMessage        = reg(".errCorruptedMessage", "corrupted message")
+	errInvalidMessageType      = reg("errInvalidMessageType", "invalid message type {3}")
+	errUnrecognizedMessageType = reg("errUrecognizedMessageType", "unrecognized message type {3}")
+	errFailedToReadVCHeader    = reg(".errFailedToReadVCHeader", "failed to read VC header{:3}")
+	errFailedToReadPayload     = reg(".errFailedToReadPayload", "failed to read payload of {3} bytes for type {4}{:5}")
+)
+
+// T is the interface implemented by all messages communicated over a VIF.
+type T interface {
+}
+
+// ReadFrom reads a message from the provided iobuf.Reader.
+//
+// Sample usage:
+//	msg, err := message.ReadFrom(r)
+//	switch m := msg.(type) {
+//		case *Data:
+//			notifyFlowOfReceivedData(m.VCI, m.Flow, m.Payload)
+//			if m.Closed() {
+//			   closeFlow(m.VCI, m.Flow)
+//			}
+//		case Control:
+//			handleControlMessage(m)
+//	}
+func ReadFrom(r *iobuf.Reader, c crypto.ControlCipher) (T, error) {
+	header, err := r.Read(commonHeaderSizeBytes)
+	if err != nil {
+		return nil, verror.New(errFailedToReadVCHeader, nil, err)
+	}
+	c.Decrypt(header.Contents)
+	msgType := header.Contents[0]
+	msgPayloadSize := read3ByteUint(header.Contents[1:4])
+	header.Release()
+	payload, err := r.Read(msgPayloadSize)
+	if err != nil {
+		return nil, verror.New(errFailedToReadPayload, nil, msgPayloadSize, msgType, err)
+	}
+	macSize := c.MACSize()
+	switch msgType {
+	case controlType, controlTypeWS:
+		if !c.Open(payload.Contents) {
+			payload.Release()
+			return nil, verror.New(errCorruptedMessage, nil)
+		}
+		m, err := readControl(bytes.NewBuffer(payload.Contents[:msgPayloadSize-macSize]))
+		payload.Release()
+		return m, err
+	case dataType, dataTypeWS:
+		if !c.Open(payload.Contents[0 : dataHeaderSizeBytes+macSize]) {
+			payload.Release()
+			return nil, verror.New(errCorruptedMessage, nil)
+		}
+		m := &Data{
+			VCI:     id.VC(read4ByteUint(payload.Contents[0:4])),
+			Flow:    id.Flow(read4ByteUint(payload.Contents[4:8])),
+			flags:   payload.Contents[8],
+			Payload: payload,
+		}
+		m.Payload.TruncateFront(uint(dataHeaderSizeBytes + macSize))
+		return m, nil
+	default:
+		payload.Release()
+		return nil, verror.New(errUnrecognizedMessageType, nil, msgType)
+	}
+}
+
+// WriteTo serializes message and makes a single call to w.Write.
+// It is the inverse of ReadFrom.
+//
+// By writing the message in a single call to w.Write, confusion is avoided in
+// case multiple goroutines are calling Write on w simultaneously.
+//
+// If message is a Data message, the Payload contents will be Released
+// irrespective of the return value of this method.
+func WriteTo(w io.Writer, message T, c crypto.ControlCipher) error {
+	macSize := c.MACSize()
+	switch m := message.(type) {
+	case *Data:
+		payloadSize := m.PayloadSize() + dataHeaderSizeBytes + macSize
+		msg := mkHeaderSpace(m.Payload, uint(HeaderSizeBytes+macSize))
+		header := msg.Contents[0 : HeaderSizeBytes+macSize]
+		header[0] = dataType
+		if err := write3ByteUint(header[1:4], payloadSize); err != nil {
+			return err
+
+		}
+		write4ByteUint(header[4:8], uint32(m.VCI))
+		write4ByteUint(header[8:12], uint32(m.Flow))
+		header[12] = m.flags
+		EncryptMessage(msg.Contents, c)
+		_, err := w.Write(msg.Contents)
+		msg.Release()
+		return err
+	case Control:
+		var buf bytes.Buffer
+		// Prevent a few memory allocations by presizing the buffer to
+		// something that is large enough for typical control messages.
+		buf.Grow(256)
+		// Reserve space for the header
+		if err := extendBuffer(&buf, commonHeaderSizeBytes); err != nil {
+			return err
+		}
+		if err := writeControl(&buf, m); err != nil {
+			return err
+		}
+		if err := extendBuffer(&buf, macSize); err != nil {
+			return err
+		}
+		msg := buf.Bytes()
+		msg[0] = controlType
+		if err := write3ByteUint(msg[1:4], buf.Len()-commonHeaderSizeBytes); err != nil {
+			return err
+		}
+		EncryptMessage(msg, c)
+		_, err := w.Write(msg)
+		return err
+	default:
+		return verror.New(errInvalidMessageType, nil, fmt.Sprintf("%T", m))
+	}
+}
+
+// EncryptMessage encrypts the message's control data in place.
+func EncryptMessage(msg []byte, c crypto.ControlCipher) error {
+	if len(msg) == 0 {
+		return verror.New(errEmptyMessage, nil)
+	}
+	n := len(msg)
+	switch msgType := msg[0]; msgType {
+	case controlType:
+		// skip
+	case dataType:
+		n = HeaderSizeBytes + c.MACSize()
+	default:
+		return verror.New(errUnrecognizedMessageType, nil, msgType)
+	}
+	c.Encrypt(msg[0:commonHeaderSizeBytes])
+	c.Seal(msg[commonHeaderSizeBytes:n])
+	return nil
+}
+
+func mkHeaderSpace(slice *iobuf.Slice, space uint) *iobuf.Slice {
+	if slice == nil {
+		return iobuf.NewSlice(make([]byte, space))
+	}
+	if slice.ExpandFront(space) {
+		return slice
+	}
+	vlog.VI(10).Infof("Failed to expand slice by %d bytes. Copying", space)
+	contents := make([]byte, slice.Size()+int(space))
+	copy(contents[space:], slice.Contents)
+	slice.Release()
+	return iobuf.NewSlice(contents)
+}
+
+var emptyBytes [256]byte
+
+func extendBuffer(buf *bytes.Buffer, size int) error {
+	_, err := buf.Write(emptyBytes[:size])
+	return err
+}
diff --git a/runtime/internal/rpc/stream/message/message_test.go b/runtime/internal/rpc/stream/message/message_test.go
new file mode 100644
index 0000000..aa79604
--- /dev/null
+++ b/runtime/internal/rpc/stream/message/message_test.go
@@ -0,0 +1,216 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+	"bytes"
+	"encoding/binary"
+	"reflect"
+	"testing"
+
+	"v.io/v23/naming"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+)
+
+// testControlCipher is a super-simple cipher that xor's each byte of the
+// payload with 0xaa.
+type testControlCipher struct{}
+
+const testMACSize = 4
+
+func (*testControlCipher) MACSize() int {
+	return testMACSize
+}
+
+func testMAC(data []byte) []byte {
+	var h uint32
+	for _, b := range data {
+		h = (h << 1) ^ uint32(b)
+	}
+	var hash [4]byte
+	binary.BigEndian.PutUint32(hash[:], h)
+	return hash[:]
+}
+
+func (c *testControlCipher) Decrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Encrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Open(data []byte) bool {
+	mac := testMAC(data[:len(data)-testMACSize])
+	if bytes.Compare(mac, data[len(data)-testMACSize:]) != 0 {
+		return false
+	}
+	c.Decrypt(data[:len(data)-testMACSize])
+	return true
+}
+
+func (c *testControlCipher) Seal(data []byte) error {
+	c.Encrypt(data[:len(data)-testMACSize])
+	mac := testMAC(data[:len(data)-testMACSize])
+	copy(data[len(data)-testMACSize:], mac)
+	return nil
+}
+
+func TestControl(t *testing.T) {
+	counters := NewCounters()
+	counters.Add(12, 13, 10240)
+	tests := []Control{
+		&CloseVC{VCI: 1},
+		&CloseVC{VCI: 2, Error: "some error"},
+
+		&SetupVC{
+			VCI: 1,
+			LocalEndpoint: &inaming.Endpoint{
+				Protocol: "tcp",
+				Address:  "batman.com:1990",
+				RID:      naming.FixedRoutingID(0xba7),
+			},
+			RemoteEndpoint: &inaming.Endpoint{
+				Protocol: "tcp",
+				Address:  "bugsbunny.com:1940",
+				RID:      naming.FixedRoutingID(0xbb),
+			},
+			Counters: counters,
+			Setup: Setup{
+				Versions: iversion.Range{Min: 34, Max: 56},
+				Options: []SetupOption{
+					&NaclBox{PublicKey: crypto.BoxKey{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+					&NaclBox{PublicKey: crypto.BoxKey{7, 67, 31}},
+				},
+			},
+		},
+		// SetupVC without endpoints
+		&SetupVC{
+			VCI:      1,
+			Counters: counters,
+			Setup: Setup{
+				Versions: iversion.Range{Min: 34, Max: 56},
+				Options: []SetupOption{
+					&NaclBox{PublicKey: crypto.BoxKey{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+					&NaclBox{PublicKey: crypto.BoxKey{7, 67, 31}},
+				},
+			},
+		},
+
+		&AddReceiveBuffers{},
+		&AddReceiveBuffers{Counters: counters},
+
+		&OpenFlow{VCI: 1, Flow: 10, InitialCounters: 1 << 24},
+
+		&Setup{
+			Versions: iversion.Range{Min: 21, Max: 71},
+			Options: []SetupOption{
+				&NaclBox{PublicKey: crypto.BoxKey{'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}},
+				&NaclBox{PublicKey: crypto.BoxKey{7, 67, 31}},
+			},
+		},
+
+		&SetupStream{Data: []byte("HelloWorld")},
+	}
+
+	var c testControlCipher
+	pool := iobuf.NewPool(0)
+	for i, msg := range tests {
+		var buf bytes.Buffer
+		if err := WriteTo(&buf, msg, &c); err != nil {
+			t.Errorf("WriteTo(%T) (test #%d) failed: %v", msg, i, err)
+			continue
+		}
+		reader := iobuf.NewReader(pool, &buf)
+		read, err := ReadFrom(reader, &c)
+		reader.Close()
+		if err != nil {
+			t.Errorf("ReadFrom failed (test #%d): %v", i, err)
+			continue
+		}
+		if !reflect.DeepEqual(msg, read) {
+			t.Errorf("Test #%d: Got %T = %+v, want %T = %+v", i, read, read, msg, msg)
+		}
+	}
+}
+
+func TestData(t *testing.T) {
+	tests := []struct {
+		Header  Data
+		Payload string
+	}{
+		{Data{VCI: 10, Flow: 3}, "abcd"},
+		{Data{VCI: 10, Flow: 3, flags: 1}, "batman"},
+	}
+
+	var c testControlCipher
+	pool := iobuf.NewPool(0)
+	allocator := iobuf.NewAllocator(pool, HeaderSizeBytes+testMACSize)
+	for i, test := range tests {
+		var buf bytes.Buffer
+		msgW := test.Header
+		msgW.Payload = allocator.Copy([]byte(test.Payload))
+		if err := WriteTo(&buf, &msgW, &c); err != nil {
+			t.Errorf("WriteTo(%v) failed: %v", i, err)
+			continue
+		}
+		reader := iobuf.NewReader(pool, &buf)
+		read, err := ReadFrom(reader, &c)
+		if err != nil {
+			t.Errorf("ReadFrom(%v) failed: %v", i, err)
+			continue
+		}
+		msgR := read.(*Data)
+		// Must compare Payload and the rest of the message separately.
+		// reflect.DeepEqual(msgR, &msgW) will not cut it because the
+		// iobuf.Slice objects might not pass the DeepEqual test.  That
+		// is fine, the important thing is for iobuf.Slice.Content to
+		// match.
+		if g, w := string(msgR.Payload.Contents), test.Payload; g != w {
+			t.Errorf("Mismatched payloads in test #%d. Got %q want %q", i, g, w)
+		}
+		msgR.Release()
+		if !reflect.DeepEqual(&test.Header, msgR) {
+			t.Errorf("Mismatched headers in test #%d. Got %+v want %+v", i, msgR, &test.Header)
+		}
+	}
+}
+
+func TestDataNoPayload(t *testing.T) {
+	tests := []Data{
+		{VCI: 10, Flow: 3},
+		{VCI: 11, Flow: 4, flags: 10},
+	}
+	var c testControlCipher
+	pool := iobuf.NewPool(0)
+	for _, test := range tests {
+		var buf bytes.Buffer
+		if err := WriteTo(&buf, &test, &c); err != nil {
+			t.Errorf("WriteTo(%v) failed: %v", test, err)
+			continue
+		}
+		read, err := ReadFrom(iobuf.NewReader(pool, &buf), &c)
+		if err != nil {
+			t.Errorf("ReadFrom(%v) failed: %v", test, err)
+			continue
+		}
+		msgR := read.(*Data)
+		if msgR.PayloadSize() != 0 {
+			t.Errorf("ReadFrom(WriteTo(%v)) returned payload of %d bytes", test, msgR.PayloadSize())
+			continue
+		}
+		msgR.Payload = nil
+		if !reflect.DeepEqual(&test, msgR) {
+			t.Errorf("Wrote %v, Read %v", test, read)
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/model.go b/runtime/internal/rpc/stream/model.go
new file mode 100644
index 0000000..baf4077
--- /dev/null
+++ b/runtime/internal/rpc/stream/model.go
@@ -0,0 +1,154 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stream
+
+import (
+	"io"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+)
+
+// Flow is the interface for a flow-controlled channel multiplexed on a Virtual
+// Circuit (VC) (and its underlying network connections).
+//
+// This allows for a single level of multiplexing and flow-control over
+// multiple concurrent streams (that may be used for RPCs) over multiple
+// VCs over a single underlying network connection.
+type Flow interface {
+	io.ReadWriteCloser
+
+	// LocalEndpoint returns the local vanadium Endpoint
+	LocalEndpoint() naming.Endpoint
+	// RemoteEndpoint returns the remote vanadium Endpoint
+	RemoteEndpoint() naming.Endpoint
+	// LocalPrincipal returns the Principal at the local end of the flow that has authenticated with the remote end.
+	LocalPrincipal() security.Principal
+	// LocalBlessings returns the blessings presented by the local end of the flow during authentication.
+	LocalBlessings() security.Blessings
+	// RemoteBlessings returns the blessings presented by the remote end of the flow during authentication.
+	RemoteBlessings() security.Blessings
+	// LocalDischarges returns the discharges presented by the local end of the flow during authentication.
+	//
+	// The discharges are organized in a map keyed by the discharge-identifier.
+	LocalDischarges() map[string]security.Discharge
+	// RemoteDischarges returns the discharges presented by the remote end of the flow during authentication.
+	//
+	// The discharges are organized in a map keyed by the discharge-identifier.
+	RemoteDischarges() map[string]security.Discharge
+	// Cancel, like Close, closes the Flow but unlike Close discards any queued writes.
+	Cancel()
+	// IsClosed returns true if the flow has been closed or cancelled.
+	IsClosed() bool
+	// Closed returns a channel that remains open until the flow has been closed.
+	Closed() <-chan struct{}
+
+	// SetDeadline causes reads and writes to the flow to be
+	// cancelled when the given channel is closed.
+	SetDeadline(deadline <-chan struct{})
+
+	// VCDataCache returns the stream.VCDataCache object that allows information to be
+	// shared across the Flow's parent VC.
+	VCDataCache() VCDataCache
+}
+
+// VCDataCache is a thread-safe store that allows data to be shared across a VC,
+// with the intention of caching data that reappears over multiple flows.
+type VCDataCache interface {
+	// Get returns the 'value' associated with 'key'.
+	Get(key interface{}) interface{}
+
+	// GetOrInsert returns the 'value' associated with 'key'. If an entry already exists in the
+	// cache with the 'key', the 'value' is returned, otherwise 'create' is called to create a new
+	// value N, the cache is updated, and N is returned.  GetOrInsert may be called from
+	// multiple goroutines concurrently.
+	GetOrInsert(key interface{}, create func() interface{}) interface{}
+}
+
+// FlowOpt is the interface for all Flow options.
+type FlowOpt interface {
+	RPCStreamFlowOpt()
+}
+
+// Listener is the interface for accepting Flows created by a remote process.
+type Listener interface {
+	// Accept blocks until a new Flow has been initiated by a remote process.
+	// TODO(toddw): This should be:
+	//   Accept() (Flow, Connector, error)
+	Accept() (Flow, error)
+
+	// Close prevents new Flows from being accepted on this Listener.
+	// Previously accepted Flows are not closed down.
+	Close() error
+}
+
+// ListenerOpt is the interface for all options that control the creation of a
+// Listener.
+type ListenerOpt interface {
+	RPCStreamListenerOpt()
+}
+
+// Connector is the interface for initiating Flows to a remote process over a
+// Virtual Circuit (VC).
+type Connector interface {
+	Connect(opts ...FlowOpt) (Flow, error)
+}
+
+// VC is the interface for creating authenticated and secure end-to-end
+// streams.
+//
+// VCs are multiplexed onto underlying network conections and can span
+// multiple hops. Authentication and encryption are end-to-end, even though
+// underlying network connections span a single hop.
+type VC interface {
+	Connector
+	Listen() (Listener, error)
+
+	// Close closes the VC and all flows on it, allowing any pending writes in
+	// flows to drain.
+	Close(reason error) error
+}
+
+// VCOpt is the interface for all VC options.
+type VCOpt interface {
+	RPCStreamVCOpt()
+}
+
+// Manager is the interface for managing the creation of VCs.
+type Manager interface {
+	// Listen creates a Listener that can be used to accept Flows initiated
+	// with the provided network address.
+	//
+	// For example:
+	//   ln, ep, err := Listen("tcp", ":0", principal)
+	//   for {
+	//     flow, err := ln.Accept()
+	//     // process flow
+	//   }
+	// can be used to accept Flows initiated by remote processes to the endpoint
+	// identified by the returned Endpoint.
+	//
+	// principal is used during authentication. If principal is nil, then the Listener
+	// expects to be used for unauthenticated, unencrypted communication.
+	// blessings are the Blessings presented to the Client during authentication.
+	Listen(protocol, address string, principal security.Principal, blessings security.Blessings, opts ...ListenerOpt) (Listener, naming.Endpoint, error)
+
+	// Dial creates a VC to the provided remote endpoint.
+	// principal is used during authentication. If principal is nil, then the VC expects
+	// to be used for unauthenticated, unencrypted communication.
+	Dial(remote naming.Endpoint, principal security.Principal, opts ...VCOpt) (VC, error)
+
+	// ShutdownEndpoint closes all VCs (and Flows and Listeners over it)
+	// involving the provided remote endpoint.
+	ShutdownEndpoint(remote naming.Endpoint)
+
+	// Shutdown closes all VCs and Listeners (and Flows over them) and
+	// frees up internal data structures.
+	// The Manager is not usable after Shutdown has been called.
+	Shutdown()
+
+	// RoutingID returns the Routing ID associated with the VC.
+	RoutingID() naming.RoutingID
+}
diff --git a/runtime/internal/rpc/stream/proxy/debug.go b/runtime/internal/rpc/stream/proxy/debug.go
new file mode 100644
index 0000000..a5ae4c0
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/debug.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// DebugString dumps out the routing table at the proxy in text format.
+// The format is meant for debugging purposes and may change without notice.
+func (p *Proxy) debugString() string {
+	var buf bytes.Buffer
+	servers := p.servers.List()
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	fmt.Fprintf(&buf, "Proxy with endpoint: %q. #Processes:%d #Servers:%d\n", p.endpoint(), len(p.processes), len(servers))
+	fmt.Fprintf(&buf, "=========\n")
+	fmt.Fprintf(&buf, "PROCESSES\n")
+	fmt.Fprintf(&buf, "=========\n")
+	index := 1
+	for process, _ := range p.processes {
+		fmt.Fprintf(&buf, "(%d) - %v", index, process)
+		index++
+		process.mu.RLock()
+		fmt.Fprintf(&buf, " NextVCI:%d #Severs:%d\n", process.nextVCI, len(process.servers))
+		for vci, d := range process.routingTable {
+			fmt.Fprintf(&buf, "    VCI %4d --> VCI %4d @ %s\n", vci, d.VCI, d.Process)
+		}
+		process.mu.RUnlock()
+	}
+	fmt.Fprintf(&buf, "=======\n")
+	fmt.Fprintf(&buf, "SERVERS\n")
+	fmt.Fprintf(&buf, "=======\n")
+	for ix, is := range servers {
+		fmt.Fprintf(&buf, "(%d) %v\n", ix+1, is)
+	}
+	return buf.String()
+}
diff --git a/runtime/internal/rpc/stream/proxy/doc.go b/runtime/internal/rpc/stream/proxy/doc.go
new file mode 100644
index 0000000..2e0d16c
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/doc.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proxy implements a proxy for the stream layer.
+//
+// Each process in vanadium is uniquely identified by a routing id
+// (naming.RoutingID). A proxy routes messages
+// (v.io/x/ref/runtime/internal/rpc/stream/message) it receives on a network connection
+// (net.Conn) to the network connection on which the destination process
+// (identified by the routing id) is listening.
+//
+// Processes behind a NAT can use the proxy to export their services outside
+// the NAT.
+// Sample usage:
+//    var proxyEP naming.Endpoint  // Endpoint of the proxy server
+//    var manager stream.Manager   // Manager used to create and listen for VCs and Flows.
+//    ln, ep, err := manager.Listen(proxyEP.Network(), proxyEP.String())
+//    // Now ln.Accept() will return Flows initiated by remote processes through the proxy.
+//
+// The proxy implemented in this package operates as follows:
+// - When an OpenVC message is received at the proxy, the RoutingID(R)
+//   of the source endpoint is associated with the net.Conn the message
+//   was received on.
+// - This association is used to route messages destined for R to the
+//   corresponding net.Conn
+// - Servers can "listen" on the proxy's address by establishing a VC to the
+//   proxy. Once the VC is established, messages received at the proxy destined
+//   for the RoutingID of the server are forwarded to the net.Conn between the
+//   server and the proxy.
+//
+// For example, consider the following three processes:
+// - Proxy(P) with routing id Rp
+// - A server (S) wishing to listen on the proxy's address with routing id Rs
+// - A client (C) wishing to connect to S through the proxy with routing id Rc.
+//
+// Here is a valid sequence of events that makes that possible:
+// (1) S establishes a VC with P over a net.Conn c1
+//     As a result, P knows that any messages intended for Rs should be
+//     forwarded on c1
+// (2) C connects to P over a net.Conn c2 and attempts to establish a VC with S
+//     using an OpenVC message.
+//     The source endpoint of this message contains the routing id Rc while the
+//     destination endpoint contains the routing id Rs.
+// (3) The proxy sees this message and:
+//     (a) Forwards the message over c1 (since Rs is mapped to c1)
+//     (b) Updates its routing table so that messages intended for Rc are forwarded over c2
+// (4) Any messages from S intended for the client received on c1 are forwarded
+//     by the proxy over c2.
+package proxy
diff --git a/runtime/internal/rpc/stream/proxy/protocol.vdl b/runtime/internal/rpc/stream/proxy/protocol.vdl
new file mode 100644
index 0000000..da87aa0
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/protocol.vdl
@@ -0,0 +1,36 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import "v.io/v23/security"
+
+// The proxy protocol is:
+// (1) Server establishes a VC to the proxy to register its routing id and authenticate.
+// (2) The server opens a flow and sends a "Request" message and waits for a "Response"
+//     message.
+// (3) This flow is then kept alive with no more data read/written.
+//     Closure of this flow indicates that proxying has (or should be) stopped.
+// (4) The proxy immediately closes any other flows on the VC.
+
+// Request is the message sent by a server to request that the proxy route
+// traffic intended for the server's RoutingId to the network connection
+// between the server and the proxy.
+type Request struct {
+  // Blessings of the server that wishes to be proxied.
+  // Used to authorize the use of the proxy.
+  Blessings security.WireBlessings
+  // Discharges required to make Blessings valid.
+  Discharges []security.WireDischarge
+}
+
+// Response is sent by the proxy to the server after processing Request.
+type Response struct {
+  // Error is a description of why the proxy refused to proxy the server.
+  // A nil error indicates that the proxy will route traffic to the server.
+  Error error
+  // Endpoint is the string representation of an endpoint that can be
+  // used to communicate with the server through the proxy.
+  Endpoint string
+}
diff --git a/runtime/internal/rpc/stream/proxy/protocol.vdl.go b/runtime/internal/rpc/stream/proxy/protocol.vdl.go
new file mode 100644
index 0000000..43f70a0
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/protocol.vdl.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated by the vanadium vdl tool.
+// Source: protocol.vdl
+
+package proxy
+
+import (
+	// VDL system imports
+	"v.io/v23/vdl"
+
+	// VDL user imports
+	"v.io/v23/security"
+)
+
+// Request is the message sent by a server to request that the proxy route
+// traffic intended for the server's RoutingId to the network connection
+// between the server and the proxy.
+type Request struct {
+	// Blessings of the server that wishes to be proxied.
+	// Used to authorize the use of the proxy.
+	Blessings security.Blessings
+	// Discharges required to make Blessings valid.
+	Discharges []security.Discharge
+}
+
+func (Request) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/runtime/internal/rpc/stream/proxy.Request"`
+}) {
+}
+
+// Response is sent by the proxy to the server after processing Request.
+type Response struct {
+	// Error is a description of why the proxy refused to proxy the server.
+	// A nil error indicates that the proxy will route traffic to the server.
+	Error error
+	// Endpoint is the string representation of an endpoint that can be
+	// used to communicate with the server through the proxy.
+	Endpoint string
+}
+
+func (Response) __VDLReflect(struct {
+	Name string `vdl:"v.io/x/ref/runtime/internal/rpc/stream/proxy.Response"`
+}) {
+}
+
+func init() {
+	vdl.Register((*Request)(nil))
+	vdl.Register((*Response)(nil))
+}
diff --git a/runtime/internal/rpc/stream/proxy/proxy.go b/runtime/internal/rpc/stream/proxy/proxy.go
new file mode 100644
index 0000000..40fec9b
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/proxy.go
@@ -0,0 +1,827 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+	"fmt"
+	"net"
+	"reflect"
+	"sync"
+	"time"
+
+	"v.io/x/lib/netstate"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/lib/publisher"
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+
+	"v.io/x/ref/lib/stats"
+)
+
+const pkgPath = "v.io/x/ref/runtime/proxy"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errNoRoutingTableEntry       = reg(".errNoRoutingTableEntry", "routing table has no entry for the VC")
+	errProcessVanished           = reg(".errProcessVanished", "remote process vanished")
+	errDuplicateSetupVC          = reg(".errDuplicateSetupVC", "duplicate SetupVC request")
+	errVomEncodeResponse         = reg(".errVomEncodeResponse", "failed to encode response from proxy{:3}")
+	errNoRequest                 = reg(".errNoRequest", "unable to read Request{:3}")
+	errServerClosedByProxy       = reg(".errServerClosedByProxy", "server closed by proxy")
+	errRemoveServerVC            = reg(".errRemoveServerVC", "failed to remove server VC {3}{:4}")
+	errNetConnClosing            = reg(".errNetConnClosing", "net.Conn is closing")
+	errFailedToAcceptHealthCheck = reg(".errFailedToAcceptHealthCheck", "failed to accept health check flow")
+	errIncompatibleVersions      = reg(".errIncompatibleVersions", "{:3}")
+	errAlreadyProxied            = reg(".errAlreadyProxied", "server with routing id {3} is already being proxied")
+	errUnknownNetwork            = reg(".errUnknownNetwork", "unknown network {3}")
+	errListenFailed              = reg(".errListenFailed", "net.Listen({3}, {4}) failed{:5}")
+	errFailedToForwardRxBufs     = reg(".errFailedToForwardRxBufs", "failed to forward receive buffers{:3}")
+	errFailedToFowardDataMsg     = reg(".errFailedToFowardDataMsg", "failed to forward data message{:3}")
+	errFailedToFowardOpenFlow    = reg(".errFailedToFowardOpenFlow", "failed to forward open flow{:3}")
+	errServerNotBeingProxied     = reg(".errServerNotBeingProxied", "no server with routing id {3} is being proxied")
+	errServerVanished            = reg(".errServerVanished", "server with routing id {3} vanished")
+	errAccessibleAddresses       = reg(".errAccessibleAddresses", "failed to obtain a set of accessible addresses{:3}")
+	errNoAccessibleAddresses     = reg(".errNoAccessibleAddresses", "no accessible addresses were available for {3}")
+	errEmptyListenSpec           = reg(".errEmptyListenSpec", "no addresses supplied in the listen spec")
+)
+
+// Proxy routes virtual circuit (VC) traffic between multiple underlying
+// network connections.
+type Proxy struct {
+	ctx        *context.T
+	ln         net.Listener
+	rid        naming.RoutingID
+	principal  security.Principal
+	blessings  security.Blessings
+	authorizer security.Authorizer
+	mu         sync.RWMutex
+	servers    *servermap
+	processes  map[*process]struct{}
+	pubAddress string
+	statsName  string
+}
+
+// process encapsulates the physical network connection and the routing table
+// associated with the process at the other end of the network connection.
+type process struct {
+	proxy        *Proxy
+	conn         net.Conn
+	pool         *iobuf.Pool
+	reader       *iobuf.Reader
+	ctrlCipher   crypto.ControlCipher
+	queue        *upcqueue.T
+	mu           sync.RWMutex
+	routingTable map[id.VC]*destination
+	nextVCI      id.VC
+	servers      map[id.VC]*vc.VC // servers wishing to be proxied create a VC that terminates at the proxy
+	bq           bqueue.T         // Flow control for messages sent on behalf of servers.
+}
+
+// destination is an entry in the routingtable of a process.
+type destination struct {
+	VCI     id.VC
+	Process *process
+}
+
+// server encapsulates information stored about a server exporting itself via the proxy.
+type server struct {
+	Process *process
+	VC      *vc.VC
+}
+
+func (s *server) RoutingID() naming.RoutingID { return s.VC.RemoteEndpoint().RoutingID() }
+
+func (s *server) Close(err error) {
+	if vc := s.Process.RemoveServerVC(s.VC.VCI()); vc != nil {
+		if err != nil {
+			vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errRemoveServerVC, nil, s.VC.VCI(), err)))
+		} else {
+			vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errServerClosedByProxy, nil)))
+		}
+		s.Process.SendCloseVC(s.VC.VCI(), err)
+	}
+}
+
+func (s *server) String() string {
+	return fmt.Sprintf("RoutingID %v on process %v (VCI:%v Blessings:%v)", s.RoutingID(), s.Process, s.VC.VCI(), s.VC.RemoteBlessings())
+}
+
+// servermap is a concurrent-access safe map from the RoutingID of a server exporting itself
+// through the proxy to the underlying network connection that the server is found on.
+type servermap struct {
+	mu sync.Mutex
+	m  map[naming.RoutingID]*server
+}
+
+func (m *servermap) Add(server *server) error {
+	key := server.RoutingID()
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.m[key] != nil {
+		return verror.New(stream.ErrProxy, nil, verror.New(errAlreadyProxied, nil, key))
+	}
+	m.m[key] = server
+	proxyLog().Infof("Started proxying server: %v", server)
+	return nil
+}
+
+func (m *servermap) Remove(server *server) {
+	key := server.RoutingID()
+	m.mu.Lock()
+	if m.m[key] != nil {
+		delete(m.m, key)
+		proxyLog().Infof("Stopped proxying server: %v", server)
+	}
+	m.mu.Unlock()
+}
+
+func (m *servermap) Process(rid naming.RoutingID) *process {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if s := m.m[rid]; s != nil {
+		return s.Process
+	}
+	return nil
+}
+
+func (m *servermap) List() []string {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	ret := make([]string, 0, len(m.m))
+	for _, s := range m.m {
+		ret = append(ret, s.String())
+	}
+	return ret
+}
+
+// New creates a new Proxy that listens for network connections on the provided
+// ListenSpec and routes VC traffic between accepted connections.
+//
+// Servers wanting to "listen through the proxy" will only be allowed to do so
+// if the blessings they present are accepted to the provided authorization
+// policy (authorizer).
+func New(ctx *context.T, spec rpc.ListenSpec, authorizer security.Authorizer, names ...string) (shutdown func(), endpoint naming.Endpoint, err error) {
+	rid, err := naming.NewRoutingID()
+	if err != nil {
+		return nil, nil, err
+	}
+	proxy, err := internalNew(rid, ctx, spec, authorizer)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var pub publisher.Publisher
+	for _, name := range names {
+		if name == "" {
+			// Consistent with v23.rpc.Server.Serve(...)
+			// an empty name implies, "do not publish"
+			continue
+		}
+		if pub == nil {
+			pub = publisher.New(ctx, v23.GetNamespace(ctx), time.Minute)
+			pub.AddServer(proxy.endpoint().String())
+		}
+		pub.AddName(name, false, true)
+	}
+
+	shutdown = func() {
+		if pub != nil {
+			pub.Stop()
+			pub.WaitForStop()
+		}
+		proxy.shutdown()
+	}
+	return shutdown, proxy.endpoint(), nil
+}
+
+func internalNew(rid naming.RoutingID, ctx *context.T, spec rpc.ListenSpec, authorizer security.Authorizer) (*Proxy, error) {
+	if len(spec.Addrs) == 0 {
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errEmptyListenSpec, nil))
+	}
+	laddr := spec.Addrs[0]
+	network := laddr.Protocol
+	address := laddr.Address
+	_, _, listenFn, _ := rpc.RegisteredProtocol(network)
+	if listenFn == nil {
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errUnknownNetwork, nil, network))
+	}
+	ln, err := listenFn(network, address)
+	if err != nil {
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errListenFailed, nil, network, address, err))
+	}
+	pub, _, err := netstate.PossibleAddresses(ln.Addr().Network(), ln.Addr().String(), spec.AddressChooser)
+	if err != nil {
+		ln.Close()
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errAccessibleAddresses, nil, err))
+	}
+	if len(pub) == 0 {
+		ln.Close()
+		return nil, verror.New(stream.ErrProxy, nil, verror.New(errNoAccessibleAddresses, nil, ln.Addr().String()))
+	}
+	if authorizer == nil {
+		authorizer = security.DefaultAuthorizer()
+	}
+	proxy := &Proxy{
+		ctx:        ctx,
+		ln:         ln,
+		rid:        rid,
+		authorizer: authorizer,
+		servers:    &servermap{m: make(map[naming.RoutingID]*server)},
+		processes:  make(map[*process]struct{}),
+		// TODO(cnicolaou): should use all of the available addresses
+		pubAddress: pub[0].String(),
+		principal:  v23.GetPrincipal(ctx),
+		statsName:  naming.Join("rpc", "proxy", "routing-id", rid.String(), "debug"),
+	}
+	if proxy.principal != nil {
+		proxy.blessings = proxy.principal.BlessingStore().Default()
+	}
+	stats.NewStringFunc(proxy.statsName, proxy.debugString)
+
+	go proxy.listenLoop()
+	return proxy, nil
+}
+
+func (p *Proxy) listenLoop() {
+	proxyLog().Infof("Proxy listening on (%q, %q): %v", p.ln.Addr().Network(), p.ln.Addr(), p.endpoint())
+	for {
+		conn, err := p.ln.Accept()
+		if err != nil {
+			proxyLog().Infof("Exiting listenLoop of proxy %q: %v", p.endpoint(), err)
+			return
+		}
+		go p.acceptProcess(conn)
+	}
+}
+
+func (p *Proxy) acceptProcess(conn net.Conn) {
+	pool := iobuf.NewPool(0)
+	reader := iobuf.NewReader(pool, conn)
+
+	var blessings security.Blessings
+	if p.principal != nil {
+		blessings = p.principal.BlessingStore().Default()
+	}
+
+	c, err := vif.AuthenticateAsServer(conn, reader, nil, p.principal, blessings, nil)
+	if err != nil {
+		processLog().Infof("Process %v failed to authenticate: %s", p, err)
+		return
+	}
+
+	process := &process{
+		proxy:        p,
+		conn:         conn,
+		pool:         pool,
+		reader:       reader,
+		ctrlCipher:   c,
+		queue:        upcqueue.New(),
+		routingTable: make(map[id.VC]*destination),
+		servers:      make(map[id.VC]*vc.VC),
+		bq:           drrqueue.New(vc.MaxPayloadSizeBytes),
+	}
+
+	p.mu.Lock()
+	p.processes[process] = struct{}{}
+	p.mu.Unlock()
+
+	go process.serverVCsLoop()
+	go process.writeLoop()
+	go process.readLoop()
+
+	processLog().Infof("Started process %v", process)
+}
+
+func (p *Proxy) removeProcess(process *process) {
+	p.mu.Lock()
+	delete(p.processes, process)
+	p.mu.Unlock()
+}
+
+func (p *Proxy) runServer(server *server, c <-chan vc.HandshakeResult) {
+	hr := <-c
+	if hr.Error != nil {
+		server.Close(hr.Error)
+		return
+	}
+	// See comments in protocol.vdl for the protocol between servers and the proxy.
+	conn, err := hr.Listener.Accept()
+	if err != nil {
+		server.Close(verror.New(stream.ErrProxy, nil, verror.New(errFailedToAcceptHealthCheck, nil)))
+		return
+	}
+	server.Process.InitVCI(server.VC.VCI())
+	var request Request
+	var response Response
+	dec := vom.NewDecoder(conn)
+	if err := dec.Decode(&request); err != nil {
+		response.Error = verror.New(stream.ErrProxy, nil, verror.New(errNoRequest, nil, err))
+	} else if err := p.authorize(server.VC, request); err != nil {
+		response.Error = err
+	} else if err := p.servers.Add(server); err != nil {
+		response.Error = verror.Convert(verror.ErrUnknown, nil, err)
+	} else {
+		defer p.servers.Remove(server)
+		proxyEP := p.endpoint()
+		ep := &inaming.Endpoint{
+			Protocol: proxyEP.Protocol,
+			Address:  proxyEP.Address,
+			RID:      server.VC.RemoteEndpoint().RoutingID(),
+		}
+		response.Endpoint = ep.String()
+	}
+	enc := vom.NewEncoder(conn)
+	if err := enc.Encode(response); err != nil {
+		proxyLog().Infof("Failed to encode response %#v for server %v", response, server)
+		server.Close(verror.New(stream.ErrProxy, nil, verror.New(errVomEncodeResponse, nil, err)))
+		return
+	}
+	// Reject all other flows
+	go func() {
+		for {
+			flow, err := hr.Listener.Accept()
+			if err != nil {
+				return
+			}
+			flow.Close()
+		}
+	}()
+	// Wait for this flow to be closed.
+	<-conn.Closed()
+	server.Close(nil)
+}
+
+func (p *Proxy) authorize(vc *vc.VC, request Request) error {
+	var dmap map[string]security.Discharge
+	if len(request.Discharges) > 0 {
+		dmap = make(map[string]security.Discharge)
+		for _, d := range request.Discharges {
+			dmap[d.ID()] = d
+		}
+	}
+	// Blessings must be bound to the same public key as the VC.
+	// (Repeating logic in the RPC server authorization code).
+	if got, want := request.Blessings.PublicKey(), vc.RemoteBlessings().PublicKey(); !request.Blessings.IsZero() && !reflect.DeepEqual(got, want) {
+		return verror.New(verror.ErrNoAccess, nil, fmt.Errorf("malformed request: Blessings sent in proxy.Request are bound to public key %v and not %v", got, want))
+	}
+	return p.authorizer.Authorize(p.ctx, security.NewCall(&security.CallParams{
+		LocalPrincipal:   vc.LocalPrincipal(),
+		LocalBlessings:   vc.LocalBlessings(),
+		RemoteBlessings:  request.Blessings,
+		LocalEndpoint:    vc.LocalEndpoint(),
+		RemoteEndpoint:   vc.RemoteEndpoint(),
+		LocalDischarges:  vc.LocalDischarges(),
+		RemoteDischarges: dmap,
+	}))
+}
+
+func (p *Proxy) routeCounters(process *process, counters message.Counters) {
+	// Since each VC can be routed to a different process, split up the
+	// Counters into one message per VC.
+	// Ideally, would split into one message per process (rather than per
+	// flow). This optimization is left an as excercise to the interested.
+	for cid, bytes := range counters {
+		srcVCI := cid.VCI()
+		if vc := process.ServerVC(srcVCI); vc != nil {
+			vc.ReleaseCounters(cid.Flow(), bytes)
+			continue
+		}
+		if d := process.Route(srcVCI); d != nil {
+			c := message.NewCounters()
+			c.Add(d.VCI, cid.Flow(), bytes)
+			if err := d.Process.queue.Put(&message.AddReceiveBuffers{Counters: c}); err != nil {
+				process.RemoveRoute(srcVCI)
+				process.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToForwardRxBufs, nil, err)))
+			}
+		}
+	}
+}
+
+func startRoutingVC(srcVCI, dstVCI id.VC, srcProcess, dstProcess *process) {
+	dstProcess.AddRoute(dstVCI, &destination{VCI: srcVCI, Process: srcProcess})
+	srcProcess.AddRoute(srcVCI, &destination{VCI: dstVCI, Process: dstProcess})
+	vcLog().Infof("Routing (VCI %d @ [%s]) <-> (VCI %d @ [%s])", srcVCI, srcProcess, dstVCI, dstProcess)
+}
+
+// Endpoint returns the endpoint of the proxy service.  By Dialing a VC to this
+// endpoint, processes can have their services exported through the proxy.
+func (p *Proxy) endpoint() *inaming.Endpoint {
+
+	ep := &inaming.Endpoint{
+		Protocol: p.ln.Addr().Network(),
+		Address:  p.pubAddress,
+		RID:      p.rid,
+	}
+	if prncpl := p.principal; prncpl != nil {
+		for b, _ := range prncpl.BlessingsInfo(prncpl.BlessingStore().Default()) {
+			ep.Blessings = append(ep.Blessings, b)
+		}
+	}
+	return ep
+}
+
+// Shutdown stops the proxy service, closing all network connections.
+func (p *Proxy) shutdown() {
+	stats.Delete(p.statsName)
+	p.ln.Close()
+	p.mu.Lock()
+	processes := p.processes
+	p.processes = nil
+	p.mu.Unlock()
+	for process, _ := range processes {
+		process.Close()
+	}
+}
+
+func (p *process) serverVCsLoop() {
+	for {
+		w, bufs, err := p.bq.Get(nil)
+		if err != nil {
+			return
+		}
+		vci, fid := unpackIDs(w.ID())
+		if vc := p.ServerVC(vci); vc != nil {
+			queueDataMessages(bufs, vc, fid, p.queue)
+			if len(bufs) == 0 {
+				m := &message.Data{VCI: vci, Flow: fid}
+				m.SetClose()
+				p.queue.Put(m)
+				w.Shutdown(true)
+			}
+			continue
+		}
+		releaseBufs(0, bufs)
+	}
+}
+
+func releaseBufs(start int, bufs []*iobuf.Slice) {
+	for _, buf := range bufs[start:] {
+		buf.Release()
+	}
+}
+
+func queueDataMessages(bufs []*iobuf.Slice, vc *vc.VC, fid id.Flow, q *upcqueue.T) {
+	for ix, b := range bufs {
+		m := &message.Data{VCI: vc.VCI(), Flow: fid}
+		var err error
+		if m.Payload, err = vc.Encrypt(fid, b); err != nil {
+			msgLog().Infof("vc.Encrypt failed. VC:%v Flow:%v Error:%v", vc, fid, err)
+			releaseBufs(ix+1, bufs)
+			return
+		}
+		if err = q.Put(m); err != nil {
+			msgLog().Infof("Failed to enqueue data message %v: %v", m, err)
+			m.Release()
+			releaseBufs(ix+1, bufs)
+			return
+		}
+	}
+}
+
+func (p *process) writeLoop() {
+	defer processLog().Infof("Exited writeLoop for %v", p)
+	defer p.Close()
+
+	for {
+		item, err := p.queue.Get(nil)
+		if err != nil {
+			if err != upcqueue.ErrQueueIsClosed {
+				processLog().Infof("upcqueue.Get failed on %v: %v", p, err)
+			}
+			return
+		}
+		if err = message.WriteTo(p.conn, item.(message.T), p.ctrlCipher); err != nil {
+			processLog().Infof("message.WriteTo on %v failed: %v", p, err)
+			return
+		}
+	}
+}
+
+func (p *process) readLoop() {
+	defer processLog().Infof("Exited readLoop for %v", p)
+	defer p.Close()
+
+	for {
+		msg, err := message.ReadFrom(p.reader, p.ctrlCipher)
+		if err != nil {
+			processLog().Infof("Read on %v failed: %v", p, err)
+			return
+		}
+		msgLog().Infof("Received msg: %T = %v", msg, msg)
+		switch m := msg.(type) {
+		case *message.Data:
+			if vc := p.ServerVC(m.VCI); vc != nil {
+				if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
+					processLog().Infof("Ignoring data message %v from process %v: %v", m, p, err)
+				}
+				if m.Close() {
+					vc.ShutdownFlow(m.Flow)
+				}
+				break
+			}
+			srcVCI := m.VCI
+			if d := p.Route(srcVCI); d != nil {
+				m.VCI = d.VCI
+				if err := d.Process.queue.Put(m); err != nil {
+					m.Release()
+					p.RemoveRoute(srcVCI)
+					p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToFowardDataMsg, nil, err)))
+				}
+				break
+			}
+			p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errNoRoutingTableEntry, nil)))
+		case *message.OpenFlow:
+			if vc := p.ServerVC(m.VCI); vc != nil {
+				if err := vc.AcceptFlow(m.Flow); err != nil {
+					processLog().Infof("OpenFlow %+v on process %v failed: %v", m, p, err)
+					cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
+					cm.SetClose()
+					p.queue.Put(cm)
+				}
+				vc.ReleaseCounters(m.Flow, m.InitialCounters)
+				break
+			}
+			srcVCI := m.VCI
+			if d := p.Route(srcVCI); d != nil {
+				m.VCI = d.VCI
+				if err := d.Process.queue.Put(m); err != nil {
+					p.RemoveRoute(srcVCI)
+					p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errFailedToFowardOpenFlow, nil, err)))
+				}
+				break
+			}
+			p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errNoRoutingTableEntry, nil)))
+		case *message.CloseVC:
+			if vc := p.RemoveServerVC(m.VCI); vc != nil {
+				vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errRemoveServerVC, nil, m.VCI, m.Error)))
+				break
+			}
+			srcVCI := m.VCI
+			if d := p.Route(srcVCI); d != nil {
+				m.VCI = d.VCI
+				d.Process.queue.Put(m)
+				d.Process.RemoveRoute(d.VCI)
+			}
+			p.RemoveRoute(srcVCI)
+		case *message.AddReceiveBuffers:
+			p.proxy.routeCounters(p, m.Counters)
+		case *message.SetupVC:
+			// First let's ensure that we can speak a common protocol verison.
+			intersection, err := iversion.SupportedRange.Intersect(&m.Setup.Versions)
+			if err != nil {
+				p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil,
+					verror.New(errIncompatibleVersions, nil, err)))
+				break
+			}
+
+			dstrid := m.RemoteEndpoint.RoutingID()
+			if naming.Compare(dstrid, p.proxy.rid) || naming.Compare(dstrid, naming.NullRoutingID) {
+				// VC that terminates at the proxy.
+				// See protocol.vdl for details on the protocol between the server and the proxy.
+				vcObj := p.NewServerVC(m)
+				// route counters after creating the VC so counters to vc are not lost.
+				p.proxy.routeCounters(p, m.Counters)
+				if vcObj != nil {
+					server := &server{Process: p, VC: vcObj}
+					keyExchanger := func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+						p.queue.Put(&message.SetupVC{
+							VCI: m.VCI,
+							Setup: message.Setup{
+								// Note that servers send clients not their actual supported versions,
+								// but the intersected range of the server and client ranges.  This
+								// is important because proxies may have adjusted the version ranges
+								// along the way, and we should negotiate a version that is compatible
+								// with all intermediate hops.
+								Versions: *intersection,
+								Options:  []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}},
+							},
+							RemoteEndpoint: m.LocalEndpoint,
+							LocalEndpoint:  p.proxy.endpoint(),
+							// TODO(mattr): Consider adding counters.  See associated comment
+							// in vc.go:VC.HandshakeAcceptedVC for more details.
+						})
+						var theirPK *crypto.BoxKey
+						box := m.Setup.NaclBox()
+						if box != nil {
+							theirPK = &box.PublicKey
+						}
+						return theirPK, nil
+					}
+					go p.proxy.runServer(server, vcObj.HandshakeAcceptedVC(intersection.Max, p.proxy.principal, p.proxy.blessings, keyExchanger))
+				}
+				break
+			}
+
+			srcVCI := m.VCI
+
+			d := p.Route(srcVCI)
+			if d == nil {
+				// SetupVC involves two messages: One sent by the initiator
+				// and one by the acceptor. The routing table gets setup on
+				// the first message, so if there is no route -
+				// setup a routing table entry.
+				dstprocess := p.proxy.servers.Process(dstrid)
+				if dstprocess == nil {
+					p.SendCloseVC(m.VCI, verror.New(stream.ErrProxy, nil, verror.New(errServerNotBeingProxied, nil, dstrid)))
+					p.proxy.routeCounters(p, m.Counters)
+					break
+				}
+				dstVCI := dstprocess.AllocVCI()
+				startRoutingVC(srcVCI, dstVCI, p, dstprocess)
+				if d = p.Route(srcVCI); d == nil {
+					p.SendCloseVC(srcVCI, verror.New(stream.ErrProxy, nil, verror.New(errServerVanished, nil, dstrid)))
+					p.proxy.routeCounters(p, m.Counters)
+					break
+				}
+			}
+
+			// Forward the SetupVC message.
+			// Typically, a SetupVC message is accompanied with
+			// Counters for the new VC.  Keep that in the forwarded
+			// message and route the remaining counters separately.
+			counters := m.Counters
+			m.Counters = message.NewCounters()
+			dstVCI := d.VCI
+			for cid, bytes := range counters {
+				if cid.VCI() == srcVCI {
+					m.Counters.Add(dstVCI, cid.Flow(), bytes)
+					delete(counters, cid)
+				}
+			}
+			m.VCI = dstVCI
+			// Note that proxies rewrite the version range so that the final negotiated
+			// version will be compatible with all intermediate hops.
+			m.Setup.Versions = *intersection
+			d.Process.queue.Put(m)
+			p.proxy.routeCounters(p, counters)
+
+		default:
+			processLog().Infof("Closing %v because of invalid message %T", p, m)
+			return
+		}
+	}
+}
+
+func (p *process) String() string {
+	r := p.conn.RemoteAddr()
+	return fmt.Sprintf("(%s, %s)", r.Network(), r)
+}
+func (p *process) Route(vci id.VC) *destination {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.routingTable[vci]
+}
+func (p *process) AddRoute(vci id.VC, d *destination) {
+	p.mu.Lock()
+	p.routingTable[vci] = d
+	p.mu.Unlock()
+}
+func (p *process) InitVCI(vci id.VC) {
+	p.mu.Lock()
+	if p.nextVCI <= vci {
+		p.nextVCI = vci + 1
+	}
+	p.mu.Unlock()
+}
+func (p *process) AllocVCI() id.VC {
+	p.mu.Lock()
+	ret := p.nextVCI
+	p.nextVCI += 2
+	p.mu.Unlock()
+	return ret
+}
+func (p *process) RemoveRoute(vci id.VC) {
+	p.mu.Lock()
+	delete(p.routingTable, vci)
+	p.mu.Unlock()
+}
+func (p *process) SendCloseVC(vci id.VC, err error) {
+	var estr string
+	if err != nil {
+		estr = err.Error()
+	}
+	p.queue.Put(&message.CloseVC{VCI: vci, Error: estr})
+}
+
+func (p *process) Close() {
+	p.mu.Lock()
+	if p.routingTable == nil {
+		p.mu.Unlock()
+		return
+	}
+	rt := p.routingTable
+	p.routingTable = nil
+	for _, vc := range p.servers {
+		vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errNetConnClosing, nil)))
+	}
+	p.mu.Unlock()
+	for _, d := range rt {
+		d.Process.SendCloseVC(d.VCI, verror.New(stream.ErrProxy, nil, verror.New(errProcessVanished, nil)))
+	}
+	p.bq.Close()
+	p.queue.Close()
+	p.conn.Close()
+
+	p.proxy.removeProcess(p)
+}
+
+func (p *process) ServerVC(vci id.VC) *vc.VC {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.servers[vci]
+}
+
+func (p *process) NewServerVC(m *message.SetupVC) *vc.VC {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if vc := p.servers[m.VCI]; vc != nil {
+		vc.Close(verror.New(stream.ErrProxy, nil, verror.New(errDuplicateSetupVC, nil)))
+		return nil
+	}
+	vc := vc.InternalNew(vc.Params{
+		VCI:          m.VCI,
+		LocalEP:      m.RemoteEndpoint,
+		RemoteEP:     m.LocalEndpoint,
+		Pool:         p.pool,
+		ReserveBytes: message.HeaderSizeBytes,
+		Helper:       p,
+	})
+	p.servers[m.VCI] = vc
+	proxyLog().Infof("Registered VC %v from server on process %v", vc, p)
+	return vc
+}
+
+func (p *process) RemoveServerVC(vci id.VC) *vc.VC {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if vc := p.servers[vci]; vc != nil {
+		delete(p.servers, vci)
+		proxyLog().Infof("Unregistered server VC %v from process %v", vc, p)
+		return vc
+	}
+	return nil
+}
+
+// Make process implement vc.Helper
+func (p *process) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+	msg := &message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)}
+	if err := p.queue.Put(msg); err != nil {
+		processLog().Infof("Failed to send OpenFlow(%+v) on process %v: %v", msg, p, err)
+	}
+}
+
+func (p *process) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+	if bytes == 0 {
+		return
+	}
+	msg := &message.AddReceiveBuffers{Counters: message.NewCounters()}
+	msg.Counters.Add(vci, fid, uint32(bytes))
+	if err := p.queue.Put(msg); err != nil {
+		processLog().Infof("Failed to send AddReceiveBuffers(%+v) on process %v: %v", msg, p, err)
+	}
+}
+
+func (p *process) NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error) {
+	return p.bq.NewWriter(packIDs(vci, fid), priority, vc.DefaultBytesBufferedPerFlow)
+}
+
+// Convenience functions to assist with the logging convention.
+func proxyLog() vlog.InfoLog   { return vlog.VI(1) }
+func processLog() vlog.InfoLog { return vlog.VI(2) }
+func vcLog() vlog.InfoLog      { return vlog.VI(3) }
+func msgLog() vlog.InfoLog     { return vlog.VI(4) }
+func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
+	return bqueue.ID(message.MakeCounterID(vci, fid))
+}
+func unpackIDs(b bqueue.ID) (id.VC, id.Flow) {
+	cid := message.CounterID(b)
+	return cid.VCI(), cid.Flow()
+}
diff --git a/runtime/internal/rpc/stream/proxy/proxy_test.go b/runtime/internal/rpc/stream/proxy/proxy_test.go
new file mode 100644
index 0000000..c325e13
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/proxy_test.go
@@ -0,0 +1,509 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy_test
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/manager"
+	"v.io/x/ref/runtime/internal/rpc/stream/proxy"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test"
+	"v.io/x/ref/test/testutil"
+)
+
+//go:generate v23 test generate
+
+func TestProxy(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+
+	// Create the stream.Manager for the server.
+	server1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+	defer server1.Shutdown()
+	// Setup a stream.Listener that will accept VCs and Flows routed
+	// through the proxy.
+	ln1, ep1, err := server1.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Logf(verror.DebugString(err))
+		t.Fatal(err)
+	}
+	defer ln1.Close()
+
+	// Create the stream.Manager for a second server.
+	server2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+	defer server2.Shutdown()
+	// Setup a stream.Listener that will accept VCs and Flows routed
+	// through the proxy.
+	ln2, ep2, err := server2.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ln2.Close()
+
+	// Create the stream.Manager for a client.
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer client.Shutdown()
+
+	cases := []struct {
+		client stream.Manager
+		ln     stream.Listener
+		ep     naming.Endpoint
+	}{
+		{client, ln1, ep1},  // client writing to server1
+		{server1, ln2, ep2}, // server1 writing to server2
+		{server1, ln1, ep1}, // server1 writing to itself
+	}
+
+	const written = "the dough rises"
+	for i, c := range cases {
+		name := fmt.Sprintf("case #%d(write to %v):", i, c.ep)
+		// Accept a single flow and write out what is read to readChan
+		readChan := make(chan string)
+		go readFlow(t, c.ln, readChan)
+		if err := writeFlow(c.client, c.ep, written); err != nil {
+			t.Errorf("%s: %v", name, err)
+			continue
+		}
+		// Validate that the data read is the same as the data written.
+		if read := <-readChan; read != written {
+			t.Errorf("case #%d: Read %q, wrote %q", i, read, written)
+		}
+	}
+}
+
+func TestProxyAuthorization(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, testAuth{"alice", "carol"})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+
+	var (
+		alice = testutil.NewPrincipal("alice")
+		bob   = testutil.NewPrincipal("bob")
+		carol = testutil.NewPrincipal("carol")
+		dave  = testutil.NewPrincipal("dave")
+	)
+	// Make the proxy recognize "alice", "bob" and "carol", but not "dave"
+	v23.GetPrincipal(ctx).AddToRoots(alice.BlessingStore().Default())
+	v23.GetPrincipal(ctx).AddToRoots(bob.BlessingStore().Default())
+	v23.GetPrincipal(ctx).AddToRoots(carol.BlessingStore().Default())
+
+	testcases := []struct {
+		p  security.Principal
+		ok bool
+	}{
+		{alice, true}, // passes the auth policy
+		{bob, false},  // recognized, but not included in auth policy
+		{carol, true}, // passes the auth policy
+		{dave, false}, // not recognized, thus doesn't pass the auth policy
+	}
+	for idx, test := range testcases {
+		server := manager.InternalNew(naming.FixedRoutingID(uint64(idx)))
+		_, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), test.p, test.p.BlessingStore().Default(), proxyAuth{test.p})
+		if (err == nil) != test.ok {
+			t.Errorf("Got ep=%v, err=%v - wanted error:%v", ep, err, !test.ok)
+		}
+		server.Shutdown()
+	}
+}
+
+type proxyAuth struct {
+	p security.Principal
+}
+
+func (proxyAuth) RPCStreamListenerOpt() {}
+func (a proxyAuth) Login(stream.Flow) (security.Blessings, []security.Discharge, error) {
+	return a.p.BlessingStore().Default(), nil, nil
+}
+
+func TestDuplicateRoutingID(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+
+	// Create the stream.Manager for server1 and server2, both with the same routing ID
+	serverRID := naming.FixedRoutingID(0x5555555555555555)
+	server1 := manager.InternalNew(serverRID)
+	server2 := manager.InternalNew(serverRID)
+	defer server1.Shutdown()
+	defer server2.Shutdown()
+
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+
+	// First server to claim serverRID should win.
+	ln1, ep1, err := server1.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ln1.Close()
+
+	ln2, ep2, err := server2.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if pattern := "routing id 00000000000000005555555555555555 is already being proxied"; err == nil || !strings.Contains(err.Error(), pattern) {
+		t.Errorf("Got (%v, %v, %v) want error \"...%v\" (ep1:%v)", ln2, ep2, err, pattern, ep1)
+	}
+}
+
+func TestProxyAuthentication(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	pproxy := v23.GetPrincipal(ctx)
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	if got, want := proxyEp.BlessingNames(), []string{"proxy"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Proxy endpoint blessing names: got %v, want %v", got, want)
+	}
+
+	other := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer other.Shutdown()
+
+	vc, err := other.Dial(proxyEp, testutil.NewPrincipal("other"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := flow.RemoteBlessings(), pproxy.BlessingStore().Default(); !reflect.DeepEqual(got, want) {
+		t.Errorf("Proxy authenticated as [%v], want [%v]", got, want)
+	}
+}
+
+func TestServerBlessings(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	var (
+		pserver = testutil.NewPrincipal("server")
+		pclient = testutil.NewPrincipal("client")
+	)
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	if got, want := proxyEp.BlessingNames(), []string{"proxy"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Proxy endpoint blessing names: got %v, want %v", got, want)
+	}
+
+	server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+	defer server.Shutdown()
+
+	ln, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), pserver, pserver.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := ep.BlessingNames(), []string{"server"}; !reflect.DeepEqual(got, want) {
+		t.Errorf("Server endpoint %q: Got BlessingNames %v, want %v", ep, got, want)
+	}
+	defer ln.Close()
+	go func() {
+		for {
+			if _, err := ln.Accept(); err != nil {
+				return
+			}
+		}
+	}()
+
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer client.Shutdown()
+	vc, err := client.Dial(ep, pclient)
+	if err != nil {
+		t.Fatal(err)
+	}
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, want := flow.RemoteBlessings(), pserver.BlessingStore().Default(); !reflect.DeepEqual(got, want) {
+		t.Errorf("Got [%v] want [%v]", got, want)
+	}
+}
+
+func TestHostPort(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+	server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+	defer server.Shutdown()
+	addr := proxyEp.Addr().String()
+	port := addr[strings.LastIndex(addr, ":"):]
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	ln, _, err := server.Listen(inaming.Network, "127.0.0.1"+port, principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ln.Close()
+}
+
+func TestClientBecomesServer(t *testing.T) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	_, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	server := manager.InternalNew(naming.FixedRoutingID(0x5555555555555555))
+	client1 := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+	client2 := manager.InternalNew(naming.FixedRoutingID(0x2222222222222222))
+	defer shutdown()
+	defer server.Shutdown()
+	defer client1.Shutdown()
+	defer client2.Shutdown()
+
+	principal := testutil.NewPrincipal("test")
+	blessings := principal.BlessingStore().Default()
+	lnS, epS, err := server.Listen(proxyEp.Network(), proxyEp.String(), principal, blessings)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer lnS.Close()
+	rchan := make(chan string)
+
+	pclient1 := testutil.NewPrincipal("client1")
+
+	// client1 must connect to the proxy to speak to the server.
+	// Keep a VC and Flow open to the server, to ensure that the proxy
+	// maintains routing information (at some point, inactive VIFs
+	// should be garbage collected, so this ensures that the VIF
+	// is "active")
+	if vc, err := client1.Dial(epS, pclient1); err != nil {
+		t.Fatal(err)
+	} else if flow, err := vc.Connect(); err != nil {
+		t.Fatal(err)
+	} else {
+		defer flow.Close()
+	}
+
+	// Now client1 becomes a server
+	lnC, epC, err := client1.Listen(proxyEp.Network(), proxyEp.String(), pclient1, pclient1.BlessingStore().Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer lnC.Close()
+	// client2 should be able to talk to client1 through the proxy
+	rchan = make(chan string)
+	go readFlow(t, lnC, rchan)
+	if err := writeFlow(client2, epC, "daffy duck"); err != nil {
+		t.Fatalf("client2 failed to chat with client1: %v", err)
+	}
+	if got, want := <-rchan, "daffy duck"; got != want {
+		t.Fatalf("client2->client1 got %q want %q", got, want)
+	}
+}
+
+func testProxyIdleTimeout(t *testing.T, testServer bool) {
+	ctx, shutdown := v23Init()
+	defer shutdown()
+
+	const (
+		idleTime = 10 * time.Millisecond
+		// We use a long wait time here since it takes some time to handle VC close
+		// especially in race testing.
+		waitTime = 150 * time.Millisecond
+	)
+
+	var (
+		pserver = testutil.NewPrincipal("server")
+		pclient = testutil.NewPrincipal("client")
+
+		opts  []stream.VCOpt
+		lopts []stream.ListenerOpt
+	)
+	if testServer {
+		lopts = []stream.ListenerOpt{vc.IdleTimeout{idleTime}}
+	} else {
+		opts = []stream.VCOpt{vc.IdleTimeout{idleTime}}
+	}
+
+	// Pause the idle timers.
+	triggerTimers := vif.SetFakeTimers()
+
+	Proxy, shutdown, proxyEp, err := proxy.InternalNew(naming.FixedRoutingID(0xbbbbbbbbbbbbbbbb), ctx, security.AllowEveryone())
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer shutdown()
+
+	// Create the stream.Manager for the server.
+	server := manager.InternalNew(naming.FixedRoutingID(0x1111111111111111))
+	defer server.Shutdown()
+	// Setup a stream.Listener that will accept VCs and Flows routed
+	// through the proxy.
+	ln, ep, err := server.Listen(proxyEp.Network(), proxyEp.String(), pserver, pserver.BlessingStore().Default(), lopts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ln.Close()
+	go func() {
+		for {
+			if _, err := ln.Accept(); err != nil {
+				return
+			}
+		}
+	}()
+
+	// Create the stream.Manager for a client.
+	client := manager.InternalNew(naming.FixedRoutingID(0xcccccccccccccccc))
+	defer client.Shutdown()
+
+	// Open a VC and a Flow.
+	VC, err := client.Dial(ep, pclient, opts...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	flow, err := VC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Trigger the idle timers.
+	triggerTimers()
+
+	if numProcs := proxy.NumProcesses(Proxy); numProcs != 2 {
+		// There should be two processes at this point.
+		t.Fatal(fmt.Errorf("Unexpected number of processes: %d\n", numProcs))
+	}
+
+	// There is one active flow. The VC should be kept open.
+	time.Sleep(waitTime)
+	if numProcs := proxy.NumProcesses(Proxy); numProcs != 2 {
+		t.Errorf("Want VC is kept open; closed")
+	}
+
+	flow.Close()
+
+	// The flow has been closed. The VC should be closed after idle timeout.
+	for range time.Tick(idleTime) {
+		if proxy.NumProcesses(Proxy) == 1 {
+			break
+		}
+	}
+
+	client.ShutdownEndpoint(ep)
+
+	// Even when the idle timeout is set for VC in server, we still should be
+	// able to dial to the server through the proxy, since one VC between the
+	// server and the proxy should be kept alive as the proxy protocol.
+	//
+	// We use fake timers here again to avoid idle timeout during dialing.
+	defer vif.SetFakeTimers()()
+	if _, err := client.Dial(ep, pclient, opts...); err != nil {
+		t.Errorf("Want to dial to the server; can't dial: %v", err)
+	}
+}
+
+func TestProxyIdleTimeout(t *testing.T)       { testProxyIdleTimeout(t, false) }
+func TestProxyIdleTimeoutServer(t *testing.T) { testProxyIdleTimeout(t, true) }
+
+func writeFlow(mgr stream.Manager, ep naming.Endpoint, data string) error {
+	vc, err := mgr.Dial(ep, testutil.NewPrincipal("test"))
+	if err != nil {
+		return fmt.Errorf("manager.Dial(%v) failed: %v", ep, err)
+	}
+	flow, err := vc.Connect()
+	if err != nil {
+		return fmt.Errorf("vc.Connect failed: %v", err)
+	}
+	defer flow.Close()
+	if _, err := flow.Write([]byte(data)); err != nil {
+		return fmt.Errorf("flow.Write failed: %v", err)
+	}
+	return nil
+}
+
+func readFlow(t *testing.T, ln stream.Listener, read chan<- string) {
+	defer close(read)
+	flow, err := ln.Accept()
+	if err != nil {
+		t.Error(err)
+		return
+	}
+	var tmp [1024]byte
+	var buf bytes.Buffer
+	for {
+		n, err := flow.Read(tmp[:])
+		if err == io.EOF {
+			read <- buf.String()
+			return
+		}
+		if err != nil {
+			t.Error(err)
+			return
+		}
+		buf.Write(tmp[:n])
+	}
+}
+
+func v23Init() (*context.T, func()) {
+	ctx, shutdown := test.InitForTest()
+	ctx, err := v23.WithPrincipal(ctx, testutil.NewPrincipal("proxy"))
+	if err != nil {
+		panic(err)
+	}
+	return ctx, shutdown
+}
+
+type testAuth []string
+
+func (l testAuth) Authorize(ctx *context.T, call security.Call) error {
+	remote, rejected := security.RemoteBlessingNames(ctx, call)
+	for _, n := range remote {
+		for _, a := range l {
+			if n == a {
+				return nil
+			}
+		}
+	}
+	return fmt.Errorf("%v not in authorized set of %v (rejected: %v)", remote, l, rejected)
+}
diff --git a/runtime/internal/rpc/stream/proxy/testutil_test.go b/runtime/internal/rpc/stream/proxy/testutil_test.go
new file mode 100644
index 0000000..727b8a5
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/testutil_test.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proxy
+
+import (
+	"v.io/v23"
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+)
+
+// These are the internal functions only for use in the proxy_test package.
+
+func InternalNew(rid naming.RoutingID, ctx *context.T, auth security.Authorizer) (*Proxy, func(), naming.Endpoint, error) {
+	proxy, err := internalNew(rid, ctx, v23.GetListenSpec(ctx), auth)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	return proxy, proxy.shutdown, proxy.endpoint(), err
+}
+
+func NumProcesses(proxy *Proxy) int {
+	proxy.mu.Lock()
+	defer proxy.mu.Unlock()
+	return len(proxy.processes)
+}
diff --git a/runtime/internal/rpc/stream/proxy/v23_internal_test.go b/runtime/internal/rpc/stream/proxy/v23_internal_test.go
new file mode 100644
index 0000000..84bea54
--- /dev/null
+++ b/runtime/internal/rpc/stream/proxy/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package proxy
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+
+func TestMain(m *testing.M) {
+	test.Init()
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/stream/vc/auth.go b/runtime/internal/rpc/stream/vc/auth.go
new file mode 100644
index 0000000..ff9ec89
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/auth.go
@@ -0,0 +1,165 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"bytes"
+	"io"
+
+	"v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+)
+
+var (
+	authServerContextTag = []byte("VCauthS\x00")
+	authClientContextTag = []byte("VCauthC\x00")
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errVomEncodeBlessing            = reg(".errVomEncodeRequest", "failed to encode blessing{:3}")
+	errHandshakeMessage             = reg(".errHandshakeMessage", "failed to read hanshake message{:3}")
+	errInvalidSignatureInMessage    = reg(".errInvalidSignatureInMessage", "signature does not verify in authentication handshake message")
+	errFailedToCreateSelfBlessing   = reg(".errFailedToCreateSelfBlessing", "failed to create self blessing{:3}")
+	errNoBlessingsToPresentToServer = reg(".errerrNoBlessingsToPresentToServer ", "no blessings to present as a server")
+)
+
+// AuthenticateAsServer executes the authentication protocol at the server.
+// It returns the blessings shared by the client, and the discharges shared
+// by the server.
+func AuthenticateAsServer(conn io.ReadWriteCloser, principal security.Principal, server security.Blessings, dc DischargeClient, crypter crypto.Crypter, v version.RPCVersion) (security.Blessings, map[string]security.Discharge, error) {
+	if server.IsZero() {
+		return security.Blessings{}, nil, verror.New(stream.ErrSecurity, nil, verror.New(errNoBlessingsToPresentToServer, nil))
+	}
+	var serverDischarges []security.Discharge
+	if tpcavs := server.ThirdPartyCaveats(); len(tpcavs) > 0 && dc != nil {
+		serverDischarges = dc.PrepareDischarges(nil, tpcavs, security.DischargeImpetus{})
+	}
+	if err := writeBlessings(conn, authServerContextTag, crypter, principal, server, serverDischarges, v); err != nil {
+		return security.Blessings{}, nil, err
+	}
+	// Note that since the client uses a self-signed blessing to authenticate
+	// during VC setup, it does not share any discharges.
+	client, _, err := readBlessings(conn, authClientContextTag, crypter, v)
+	if err != nil {
+		return security.Blessings{}, nil, err
+	}
+	return client, mkDischargeMap(serverDischarges), nil
+}
+
+// AuthenticateAsClient executes the authentication protocol at the client.
+// It returns the blessing shared by the server, the blessings shared by the
+// client, and any discharges shared by the server.
+//
+// The client will only share its blessings if the server (who shares its
+// blessings first) is authorized as per the authorizer for this RPC.
+func AuthenticateAsClient(conn io.ReadWriteCloser, crypter crypto.Crypter, params security.CallParams, auth *ServerAuthorizer, v version.RPCVersion) (security.Blessings, security.Blessings, map[string]security.Discharge, error) {
+	server, serverDischarges, err := readBlessings(conn, authServerContextTag, crypter, v)
+	if err != nil {
+		return security.Blessings{}, security.Blessings{}, nil, err
+	}
+	// Authorize the server based on the provided authorizer.
+	if auth != nil {
+		params.RemoteBlessings = server
+		params.RemoteDischarges = serverDischarges
+		if err := auth.Authorize(params); err != nil {
+			return security.Blessings{}, security.Blessings{}, nil, verror.New(stream.ErrNotTrusted, nil, err)
+		}
+	}
+
+	// The client shares its blessings at RPC time (as the blessings may vary
+	// across RPCs). During VC handshake, the client simply sends a self-signed
+	// blessing in order to reveal its public key to the server.
+	principal := params.LocalPrincipal
+	client, err := principal.BlessSelf("vcauth")
+	if err != nil {
+		return security.Blessings{}, security.Blessings{}, nil, verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateSelfBlessing, nil, err))
+	}
+	if err := writeBlessings(conn, authClientContextTag, crypter, principal, client, nil, v); err != nil {
+		return security.Blessings{}, security.Blessings{}, nil, err
+	}
+	return server, client, serverDischarges, nil
+}
+
+func writeBlessings(w io.Writer, tag []byte, crypter crypto.Crypter, p security.Principal, b security.Blessings, discharges []security.Discharge, v version.RPCVersion) error {
+	signature, err := p.Sign(append(tag, crypter.ChannelBinding()...))
+	if err != nil {
+		return err
+	}
+	var buf bytes.Buffer
+	enc := vom.NewEncoder(&buf)
+	if err := enc.Encode(signature); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	if err := enc.Encode(b); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	if err := enc.Encode(discharges); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	msg, err := crypter.Encrypt(iobuf.NewSlice(buf.Bytes()))
+	if err != nil {
+		return err
+	}
+	defer msg.Release()
+	enc = vom.NewEncoder(w)
+	if err := enc.Encode(msg.Contents); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVomEncodeBlessing, nil, err))
+	}
+	return nil
+}
+
+func readBlessings(r io.Reader, tag []byte, crypter crypto.Crypter, v version.RPCVersion) (security.Blessings, map[string]security.Discharge, error) {
+	var msg []byte
+	var noBlessings security.Blessings
+	dec := vom.NewDecoder(r)
+	if err := dec.Decode(&msg); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, verror.New(errHandshakeMessage, nil, err))
+	}
+	buf, err := crypter.Decrypt(iobuf.NewSlice(msg))
+	if err != nil {
+		return noBlessings, nil, err
+	}
+	defer buf.Release()
+	dec = vom.NewDecoder(bytes.NewReader(buf.Contents))
+	var (
+		blessings security.Blessings
+		sig       security.Signature
+	)
+	if err = dec.Decode(&sig); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	if err = dec.Decode(&blessings); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	var discharges []security.Discharge
+	if err := dec.Decode(&discharges); err != nil {
+		return noBlessings, nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	if !sig.Verify(blessings.PublicKey(), append(tag, crypter.ChannelBinding()...)) {
+		return noBlessings, nil, verror.New(stream.ErrSecurity, nil, verror.New(errInvalidSignatureInMessage, nil))
+	}
+	return blessings, mkDischargeMap(discharges), nil
+}
+
+func mkDischargeMap(discharges []security.Discharge) map[string]security.Discharge {
+	if len(discharges) == 0 {
+		return nil
+	}
+	m := make(map[string]security.Discharge, len(discharges))
+	for _, d := range discharges {
+		m[d.ID()] = d
+	}
+	return m
+}
diff --git a/runtime/internal/rpc/stream/vc/data_cache.go b/runtime/internal/rpc/stream/vc/data_cache.go
new file mode 100644
index 0000000..6c5b56c
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/data_cache.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"sync"
+)
+
+// dataCache is a thread-safe map for any two types.
+type dataCache struct {
+	sync.RWMutex
+	m map[interface{}]interface{}
+}
+
+func newDataCache() *dataCache {
+	return &dataCache{m: make(map[interface{}]interface{})}
+}
+
+// Get returns the value stored under the key.
+func (c *dataCache) Get(key interface{}) interface{} {
+	c.RLock()
+	value, _ := c.m[key]
+	c.RUnlock()
+	return value
+}
+
+// Insert the given key and value into the cache if and only if the given key
+// did not already exist in the cache. Returns true if the key-value pair was
+// inserted; otherwise returns false.
+func (c *dataCache) Insert(key interface{}, value interface{}) bool {
+	c.Lock()
+	defer c.Unlock()
+	if _, exists := c.m[key]; exists {
+		return false
+	}
+	c.m[key] = value
+	return true
+}
+
+// GetOrInsert first checks if the key exists in the cache with a reader lock.
+// If it doesn't exist, it instead acquires a writer lock, creates and stores the new value
+// with create and returns value.
+func (c *dataCache) GetOrInsert(key interface{}, create func() interface{}) interface{} {
+	// We use the read lock for the fastpath. This should be the more common case, so we rarely
+	// need a writer lock.
+	c.RLock()
+	value, exists := c.m[key]
+	c.RUnlock()
+	if exists {
+		return value
+	}
+	// We acquire the writer lock for the slowpath, and need to re-check if the key exists
+	// in the map, since other thread may have snuck in.
+	c.Lock()
+	defer c.Unlock()
+	value, exists = c.m[key]
+	if exists {
+		return value
+	}
+	value = create()
+	c.m[key] = value
+	return value
+}
diff --git a/runtime/internal/rpc/stream/vc/doc.go b/runtime/internal/rpc/stream/vc/doc.go
new file mode 100644
index 0000000..62c34df
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vc provides implementations of the VC and Flow interfaces in v.io/x/ref/runtime/internal/rpc/stream.
+package vc
diff --git a/runtime/internal/rpc/stream/vc/flow.go b/runtime/internal/rpc/stream/vc/flow.go
new file mode 100644
index 0000000..d5f2d2f
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/flow.go
@@ -0,0 +1,58 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"v.io/v23/naming"
+	"v.io/v23/security"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+type flow struct {
+	backingVC
+	*reader
+	*writer
+}
+
+type backingVC interface {
+	LocalEndpoint() naming.Endpoint
+	RemoteEndpoint() naming.Endpoint
+
+	LocalPrincipal() security.Principal
+	LocalBlessings() security.Blessings
+	RemoteBlessings() security.Blessings
+	LocalDischarges() map[string]security.Discharge
+	RemoteDischarges() map[string]security.Discharge
+
+	VCDataCache() stream.VCDataCache
+}
+
+func (f *flow) Close() error {
+	f.reader.Close()
+	f.writer.Close()
+	return nil
+}
+
+// SetDeadline sets a deadline channel on the flow.  Reads and writes
+// will be cancelled if the channel is closed.
+func (f *flow) SetDeadline(deadline <-chan struct{}) {
+	f.reader.SetDeadline(deadline)
+	f.writer.SetDeadline(deadline)
+}
+
+// Shutdown closes the flow and discards any queued up write buffers.
+// This is appropriate when the flow has been closed by the remote end.
+func (f *flow) Shutdown() {
+	f.reader.Close()
+	f.writer.shutdown(true)
+}
+
+// Cancel closes the flow and discards any queued up write buffers.
+// This is appropriate when the flow is being cancelled locally.
+func (f *flow) Cancel() {
+	f.reader.Close()
+	f.writer.shutdown(false)
+}
diff --git a/runtime/internal/rpc/stream/vc/knobs.go b/runtime/internal/rpc/stream/vc/knobs.go
new file mode 100644
index 0000000..7271f7a
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/knobs.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+const (
+	// Maximum size (in bytes) of application data to write out in a single message.
+	MaxPayloadSizeBytes = 1 << 16 // 64KB
+
+	// Number of bytes that a receiver is willing to buffer for a flow.
+	DefaultBytesBufferedPerFlow = 1 << 20 // 1MB
+
+	// Maximum number of bytes to steal from the shared pool of receive
+	// buffers for the first write of a new Flow.
+	MaxSharedBytes = 1 << 12 // 4KB
+
+	// Number of VC IDs reserved for special use.
+	NumReservedVCs = 10
+
+	// Number of Flow IDs reserved for possible future use.
+	NumReservedFlows = 10
+
+	// Special Flow ID used for information specific to the VC
+	// (and not any specific flow)
+	SharedFlowID = 0
+
+	// Special flow used for authenticating between VCs.
+	AuthFlowID = 2
+	// Special flow used for interchanging of VOM types between VCs.
+	TypeFlowID = 3
+	// Special flow over which discharges for third-party caveats
+	// on the server's blessings are sent.
+	DischargeFlowID = 4
+)
diff --git a/runtime/internal/rpc/stream/vc/listener.go b/runtime/internal/rpc/stream/vc/listener.go
new file mode 100644
index 0000000..72479ac
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/listener.go
@@ -0,0 +1,53 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errListenerClosed = reg(".errListenerClosed", "Listener has been closed")
+	errGetFromQueue   = reg(".errGetFromQueue", "upcqueue.Get failed{:3}")
+)
+
+type listener struct {
+	q *upcqueue.T
+}
+
+var _ stream.Listener = (*listener)(nil)
+
+func newListener() *listener { return &listener{q: upcqueue.New()} }
+
+func (l *listener) Enqueue(f stream.Flow) error {
+	err := l.q.Put(f)
+	if err == upcqueue.ErrQueueIsClosed {
+		return verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
+	}
+	return err
+}
+
+func (l *listener) Accept() (stream.Flow, error) {
+	item, err := l.q.Get(nil)
+	if err == upcqueue.ErrQueueIsClosed {
+		return nil, verror.New(stream.ErrBadState, nil, verror.New(errListenerClosed, nil))
+	}
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errGetFromQueue, nil, err))
+	}
+	return item.(stream.Flow), nil
+}
+
+func (l *listener) Close() error {
+	l.q.Close()
+	return nil
+}
diff --git a/runtime/internal/rpc/stream/vc/listener_test.go b/runtime/internal/rpc/stream/vc/listener_test.go
new file mode 100644
index 0000000..4dbf4ad
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/listener_test.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"strings"
+	"testing"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+type noopFlow struct{}
+
+// net.Conn methods
+func (*noopFlow) Read([]byte) (int, error)        { return 0, nil }
+func (*noopFlow) Write([]byte) (int, error)       { return 0, nil }
+func (*noopFlow) Close() error                    { return nil }
+func (*noopFlow) IsClosed() bool                  { return false }
+func (*noopFlow) Closed() <-chan struct{}         { return nil }
+func (*noopFlow) Cancel()                         {}
+func (*noopFlow) LocalEndpoint() naming.Endpoint  { return nil }
+func (*noopFlow) RemoteEndpoint() naming.Endpoint { return nil }
+
+// Other stream.Flow methods
+func (*noopFlow) LocalPrincipal() security.Principal              { return nil }
+func (*noopFlow) LocalBlessings() security.Blessings              { return security.Blessings{} }
+func (*noopFlow) RemoteBlessings() security.Blessings             { return security.Blessings{} }
+func (*noopFlow) LocalDischarges() map[string]security.Discharge  { return nil }
+func (*noopFlow) RemoteDischarges() map[string]security.Discharge { return nil }
+func (*noopFlow) SetDeadline(<-chan struct{})                     {}
+func (*noopFlow) VCDataCache() stream.VCDataCache                 { return nil }
+
+func TestListener(t *testing.T) {
+	ln := newListener()
+	f1, f2 := &noopFlow{}, &noopFlow{}
+
+	if err := ln.Enqueue(f1); err != nil {
+		t.Error(err)
+	}
+	if err := ln.Enqueue(f2); err != nil {
+		t.Error(err)
+	}
+	if f, err := ln.Accept(); f != f1 || err != nil {
+		t.Errorf("Got (%v, %v) want (%v, nil)", f, err, f1)
+	}
+	if f, err := ln.Accept(); f != f2 || err != nil {
+		t.Errorf("Got (%v, %v) want (%v, nil)", f, err, f2)
+	}
+	if err := ln.Close(); err != nil {
+		t.Error(err)
+	}
+	// Close-ing multiple times is fine.
+	if err := ln.Close(); err != nil {
+		t.Error(err)
+	}
+	if err := ln.Enqueue(f1); verror.ErrorID(err) != stream.ErrBadState.ID || !strings.Contains(err.Error(), "closed") {
+		t.Error(err)
+	}
+	if f, err := ln.Accept(); f != nil || verror.ErrorID(err) != stream.ErrBadState.ID || !strings.Contains(err.Error(), "closed") {
+		t.Errorf("Accept returned (%v, %v) wanted (nil, %v)", f, err, errListenerClosed)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vc/reader.go b/runtime/internal/rpc/stream/vc/reader.go
new file mode 100644
index 0000000..80f1c9b
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/reader.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"io"
+	"sync"
+	"sync/atomic"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errGetFailed = reg(".errGetFailed", "upcqueue.Get failed:{:3}")
+)
+
+// readHandler is the interface used by the reader to notify other components
+// of the number of bytes returned in Read calls.
+type readHandler interface {
+	HandleRead(bytes uint)
+}
+
+// reader implements the io.Reader and SetReadDeadline interfaces for a Flow,
+// backed by iobuf.Slice objects read from a upcqueue.
+type reader struct {
+	handler    readHandler
+	src        *upcqueue.T
+	mu         sync.Mutex
+	buf        *iobuf.Slice    // GUARDED_BY(mu)
+	deadline   <-chan struct{} // GUARDED_BY(mu)
+	totalBytes uint32
+}
+
+func newReader(h readHandler) *reader {
+	return &reader{handler: h, src: upcqueue.New()}
+}
+
+func (r *reader) Close() {
+	r.src.Close()
+}
+
+func (r *reader) Read(b []byte) (int, error) {
+	// net.Conn requires that all methods be invokable by multiple
+	// goroutines simultaneously. Read calls are serialized to ensure
+	// contiguous chunks of data are provided from each Read call.
+	r.mu.Lock()
+	n, err := r.readLocked(b)
+	r.mu.Unlock()
+	atomic.AddUint32(&r.totalBytes, uint32(n))
+	if n > 0 {
+		r.handler.HandleRead(uint(n))
+	}
+	return n, err
+}
+
+func (r *reader) readLocked(b []byte) (int, error) {
+	if r.buf == nil {
+		slice, err := r.src.Get(r.deadline)
+		if err != nil {
+			switch err {
+			case upcqueue.ErrQueueIsClosed:
+				return 0, io.EOF
+			case vsync.ErrCanceled:
+				// As per net.Conn.Read specification
+				return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
+			default:
+				return 0, verror.New(stream.ErrNetwork, nil, verror.New(errGetFailed, nil, err))
+			}
+		}
+		r.buf = slice.(*iobuf.Slice)
+	}
+	copied := 0
+	for r.buf.Size() <= len(b) {
+		n := copy(b, r.buf.Contents)
+		copied += n
+		b = b[n:]
+		r.buf.Release()
+		r.buf = nil
+
+		slice, err := r.src.TryGet()
+		if err != nil {
+			return copied, nil
+		}
+		r.buf = slice.(*iobuf.Slice)
+	}
+	n := copy(b, r.buf.Contents)
+	r.buf.TruncateFront(uint(n))
+	copied += n
+	return copied, nil
+}
+
+func (r *reader) SetDeadline(deadline <-chan struct{}) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.deadline = deadline
+}
+
+func (r *reader) BytesRead() uint32 {
+	return atomic.LoadUint32(&r.totalBytes)
+}
+
+func (r *reader) Put(slice *iobuf.Slice) error {
+	return r.src.Put(slice)
+}
diff --git a/runtime/internal/rpc/stream/vc/reader_test.go b/runtime/internal/rpc/stream/vc/reader_test.go
new file mode 100644
index 0000000..7fac16f
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/reader_test.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"io"
+	"net"
+	"reflect"
+	"testing"
+	"testing/quick"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+)
+
+type testReadHandler struct{ items []uint }
+
+func (t *testReadHandler) HandleRead(bytes uint) {
+	t.items = append(t.items, bytes)
+}
+
+func TestRead(t *testing.T) {
+	l := &testReadHandler{}
+	r := newReader(l)
+	input := []byte("abcdefghijklmnopqrstuvwxyzABCDE") // 31 bytes total
+	start := 0
+	// Produce data to read, adding elements to the underlying upcqueue
+	// with a geometric progression of 2.
+	for n := 1; start < len(input); n *= 2 {
+		if err := r.Put(iobuf.NewSlice(input[start : start+n])); err != nil {
+			t.Fatalf("Put(start=%d, n=%d) failed: %v", start, n, err)
+		}
+		start = start + n
+	}
+
+	var output [31]byte
+	start = 0
+	// Read with geometric progression of 1/2.
+	for n := 16; start < len(output); n /= 2 {
+		if m, err := r.Read(output[start : start+n]); err != nil || m != n {
+			t.Errorf("Read returned (%d, %v) want (%d, nil)", m, err, n)
+		}
+		if m := l.items[len(l.items)-1]; m != uint(n) {
+			t.Errorf("Read notified %d but should have notified %d bytes", m, n)
+		}
+		start = start + n
+	}
+	if got, want := string(output[:]), string(input); got != want {
+		t.Errorf("Got %q want %q", got, want)
+	}
+
+	r.Close()
+	if n, err := r.Read(output[:]); n != 0 || err != io.EOF {
+		t.Errorf("Got (%d, %v) want (0, nil)", n, err)
+	}
+}
+
+func TestReadRandom(t *testing.T) {
+	f := func(data [][]byte) bool {
+		r := newReader(&testReadHandler{})
+		// Use an empty slice (as opposed to a nil-slice) so that the
+		// reflect.DeepEqual call below succeeds when data is
+		// [][]byte{}.
+		written := make([]byte, 0)
+		for _, d := range data {
+			if err := r.Put(iobuf.NewSlice(d)); err != nil {
+				t.Error(err)
+				return false
+			}
+			written = append(written, d...)
+		}
+		read := make([]byte, len(written))
+		buf := read
+		r.Close()
+		for {
+			n, err := r.Read(buf)
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Error(err)
+				return false
+			}
+			buf = buf[n:]
+		}
+		return reflect.DeepEqual(written, read) && int(r.BytesRead()) == len(written)
+	}
+	if err := quick.Check(f, nil); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestReadDeadline(t *testing.T) {
+	l := &testReadHandler{}
+	r := newReader(l)
+	defer r.Close()
+
+	deadline := make(chan struct{}, 0)
+	r.SetDeadline(deadline)
+	close(deadline)
+
+	var buf [1]byte
+	n, err := r.Read(buf[:])
+	neterr, ok := err.(net.Error)
+	if n != 0 || err == nil || !ok || !neterr.Timeout() {
+		t.Errorf("Expected read to fail with net.Error.Timeout, got (%d, %v)", n, err)
+	}
+	if len(l.items) != 0 {
+		t.Errorf("Expected no reads, but notified of reads: %v", l.items)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vc/v23_internal_test.go b/runtime/internal/rpc/stream/vc/v23_internal_test.go
new file mode 100644
index 0000000..945d8c4
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package vc
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+
+func TestMain(m *testing.M) {
+	test.Init()
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/stream/vc/vc.go b/runtime/internal/rpc/stream/vc/vc.go
new file mode 100644
index 0000000..06f2c58
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc.go
@@ -0,0 +1,1006 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+// Logging guidelines:
+// Verbosity level 1 is for per-VC messages.
+// Verbosity level 2 is for per-Flow messages.
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vom"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/vc"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errAlreadyListening               = reg(".errAlreadyListening", "Listen has already been called")
+	errDuplicateFlow                  = reg(".errDuplicateFlow", "duplicate OpenFlow message")
+	errUnrecognizedFlow               = reg(".errUnrecognizedFlow", "unrecognized flow")
+	errFailedToCreateWriterForFlow    = reg(".errFailedToCreateWriterForFlow", "failed to create writer for Flow{:3}")
+	errConnectOnClosedVC              = reg(".errConnectOnClosedVC", "connect on closed VC{:3}")
+	errHandshakeNotInProgress         = reg(".errHandshakeNotInProgress", "Attempted to finish a VC handshake, but no handshake was in progress{:3}")
+	errClosedDuringHandshake          = reg(".errCloseDuringHandshake", "VC closed during handshake{:3}")
+	errFailedToDecryptPayload         = reg(".errFailedToDecryptPayload", "failed to decrypt payload{:3}")
+	errIgnoringMessageOnClosedVC      = reg(".errIgnoringMessageOnClosedVC", "ignoring message for Flow {3} on closed VC {4}")
+	errFailedToCreateFlowForAuth      = reg(".errFailedToCreateFlowForAuth", "failed to create a Flow for authentication{:3}")
+	errAuthFlowNotAccepted            = reg(".errAuthFlowNotAccepted", "authentication Flow not accepted{:3}")
+	errFailedToCreateFlowForWireType  = reg(".errFailedToCreateFlowForWireType", "fail to create a Flow for wire type{:3}")
+	errFlowForWireTypeNotAccepted     = reg(".errFlowForWireTypeNotAccepted", "Flow for wire type not accepted{:3}")
+	errFailedToCreateFlowForDischarge = reg(".errFailedToCreateFlowForDischarge", "fail to create a Flow for discharge{:3}")
+	errFlowForDischargeNotAccepted    = reg(".errFlowForDischargesNotAccepted", "Flow for discharge not accepted{:3}")
+	errFailedToSetupEncryption        = reg(".errFailedToSetupEncryption", "failed to setup channel encryption{:3}")
+	errAuthFailed                     = reg(".errAuthFailed", "authentication failed{:3}")
+	errNoActiveListener               = reg(".errNoActiveListener", "no active listener on VCI {3}")
+	errFailedToCreateWriterForNewFlow = reg(".errFailedToCreateWriterForNewFlow", "failed to create writer for new flow({3}){:4}")
+	errFailedToEnqueueFlow            = reg(".errFailedToEnqueueFlow", "failed to enqueue flow at listener{:3}")
+	errFailedToAcceptSystemFlows      = reg(".errFailedToAcceptSystemFlows", "failed to accept system flows{:3}")
+)
+
+// DischargeExpiryBuffer specifies how much before discharge expiration we should
+// refresh discharges.
+// Discharges will be refreshed DischargeExpiryBuffer before they expire.
+type DischargeExpiryBuffer time.Duration
+
+func (DischargeExpiryBuffer) RPCStreamListenerOpt() {}
+func (DischargeExpiryBuffer) RPCServerOpt() {
+	defer vlog.LogCall()() // AUTO-GENERATED, DO NOT EDIT, MUST BE FIRST STATEMENT
+}
+
+const DefaultServerDischargeExpiryBuffer = 20 * time.Second
+
+// DataCache Keys for TypeEncoder/Decoder.
+type TypeEncoderKey struct{}
+type TypeDecoderKey struct{}
+
+// VC implements the stream.VC interface and exports additional methods to
+// manage Flows.
+//
+// stream.Flow objects created by this stream.VC implementation use a buffer
+// queue (v.io/x/ref/runtime/internal/lib/bqueue) to provide flow control on Write
+// operations.
+type VC struct {
+	vci                             id.VC
+	localEP, remoteEP               naming.Endpoint
+	localPrincipal                  security.Principal
+	localBlessings, remoteBlessings security.Blessings
+	localDischarges                 map[string]security.Discharge // Discharges shared by the local end of the VC.
+	remoteDischarges                map[string]security.Discharge // Discharges shared by the remote end of the VC.
+
+	pool           *iobuf.Pool
+	reserveBytes   uint
+	sharedCounters *vsync.Semaphore
+
+	mu                  sync.Mutex
+	flowMap             map[id.Flow]*flow // nil iff the VC is closed.
+	acceptHandshakeDone chan struct{}     // non-nil when HandshakeAcceptedVC begins the handshake, closed when handshake completes.
+	nextConnectFID      id.Flow
+	listener            *listener // non-nil iff Listen has been called and the VC has not been closed.
+	crypter             crypto.Crypter
+	closeReason         error // reason why the VC was closed, possibly nil
+	closeCh             chan struct{}
+	closed              bool
+	version             version.RPCVersion
+	remotePubKeyChan    chan *crypto.BoxKey // channel which will receive the remote public key during setup.
+
+	helper    Helper
+	dataCache *dataCache // dataCache contains information that can shared between Flows from this VC.
+}
+
+// ServerAuthorizer encapsulates the policy used to authorize servers during VC
+// establishment.
+//
+// A client will first authorize a server before revealing any of its credentials
+// (public key, blessings etc.) to the server. Thus, if the authorization policy
+// calls for the server to be rejected, then the client will not have revealed
+// any of its credentials to the server.
+//
+// ServerAuthorizer in turn uses an authorization policy (security.Authorizer),
+// with the context matching the context of the RPC that caused the initiation
+// of the VC.
+type ServerAuthorizer struct {
+	Suffix, Method string
+	Policy         security.Authorizer
+}
+
+func (a *ServerAuthorizer) RPCStreamVCOpt() {}
+func (a *ServerAuthorizer) Authorize(params security.CallParams) error {
+	params.Suffix = a.Suffix
+	params.Method = a.Method
+	ctx, cancel := context.RootContext()
+	defer cancel()
+	return a.Policy.Authorize(ctx, security.NewCall(&params))
+}
+
+// DialContext establishes the context under which a VC Dial was initiated.
+type DialContext struct{ *context.T }
+
+func (DialContext) RPCStreamVCOpt()       {}
+func (DialContext) RPCStreamListenerOpt() {}
+
+// StartTimeout specifies the time after which the underlying VIF is closed
+// if no VC is opened.
+type StartTimeout struct{ time.Duration }
+
+func (StartTimeout) RPCStreamVCOpt()       {}
+func (StartTimeout) RPCStreamListenerOpt() {}
+
+// IdleTimeout specifies the time after which an idle VC is closed.
+type IdleTimeout struct{ time.Duration }
+
+func (IdleTimeout) RPCStreamVCOpt()       {}
+func (IdleTimeout) RPCStreamListenerOpt() {}
+
+var _ stream.VC = (*VC)(nil)
+
+// Helper is the interface for functionality required by the stream.VC
+// implementation in this package.
+type Helper interface {
+	// NotifyOfNewFlow notifies the remote end of a VC that the caller intends to
+	// establish a new flow to it and that the caller is ready to receive bytes
+	// data from the remote end.
+	NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint)
+
+	// AddReceiveBuffers notifies the remote end of a VC that it is read to receive
+	// bytes more data on the flow identified by fid over the VC identified by vci.
+	//
+	// Unlike NotifyOfNewFlow, this call does not let the remote end know of the
+	// intent to establish a new flow.
+	AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint)
+
+	// NewWriter creates a buffer queue for Write operations on the
+	// stream.Flow implementation.
+	NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error)
+}
+
+// Priorities of flows.
+const (
+	systemFlowPriority bqueue.Priority = iota
+	normalFlowPriority
+
+	NumFlowPriorities
+)
+
+// DischargeClient is an interface for obtaining discharges for a set of third-party
+// caveats.
+//
+// TODO(ataly, ashankar): What should be the impetus for obtaining the discharges?
+type DischargeClient interface {
+	PrepareDischarges(ctx *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) []security.Discharge
+	// Invalidate marks the provided discharges as invalid, and therefore unfit
+	// for being returned by a subsequent PrepareDischarges call.
+	Invalidate(discharges ...security.Discharge)
+	RPCStreamListenerOpt()
+}
+
+// Params encapsulates the set of parameters needed to create a new VC.
+type Params struct {
+	VCI          id.VC           // Identifier of the VC
+	Dialed       bool            // True if the VC was initiated by the local process.
+	LocalEP      naming.Endpoint // Endpoint of the local end of the VC.
+	RemoteEP     naming.Endpoint // Endpoint of the remote end of the VC.
+	Pool         *iobuf.Pool     // Byte pool used for read and write buffer allocations.
+	ReserveBytes uint            // Number of padding bytes to reserve for headers.
+	Helper       Helper
+}
+
+// InternalNew creates a new VC, which implements the stream.VC interface.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNew(p Params) *VC {
+	fidOffset := 1
+	if p.Dialed {
+		fidOffset = 0
+	}
+	return &VC{
+		vci:            p.VCI,
+		localEP:        p.LocalEP,
+		remoteEP:       p.RemoteEP,
+		pool:           p.Pool,
+		reserveBytes:   p.ReserveBytes,
+		sharedCounters: vsync.NewSemaphore(),
+		flowMap:        make(map[id.Flow]*flow),
+		// Reserve flow IDs 0 thru NumReservedFlows for
+		// possible future use.
+		// Furthermore, flows created by Connect have an even
+		// id if the VC was initiated by the local process,
+		// and have an odd id if the VC was initiated by the
+		// remote process.
+		nextConnectFID: id.Flow(NumReservedFlows + fidOffset),
+		crypter:        crypto.NewNullCrypter(),
+		closeCh:        make(chan struct{}),
+		helper:         p.Helper,
+		dataCache:      newDataCache(),
+	}
+}
+
+// Connect implements the stream.Connector.Connect method.
+func (vc *VC) Connect(opts ...stream.FlowOpt) (stream.Flow, error) {
+	return vc.connectFID(vc.allocFID(), normalFlowPriority, opts...)
+}
+
+func (vc *VC) Version() version.RPCVersion {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	return vc.version
+}
+
+func (vc *VC) connectFID(fid id.Flow, priority bqueue.Priority, opts ...stream.FlowOpt) (stream.Flow, error) {
+	writer, err := vc.newWriter(fid, priority)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateWriterForFlow, nil, err))
+	}
+	f := &flow{
+		backingVC: vc,
+		reader:    newReader(readHandlerImpl{vc, fid}),
+		writer:    writer,
+	}
+	vc.mu.Lock()
+	if vc.flowMap == nil {
+		vc.mu.Unlock()
+		f.Shutdown()
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errConnectOnClosedVC, nil, vc.closeReason))
+	}
+	vc.flowMap[fid] = f
+	vc.mu.Unlock()
+	// New flow created, inform remote end that data can be received on it.
+	vc.helper.NotifyOfNewFlow(vc.vci, fid, DefaultBytesBufferedPerFlow)
+	return f, nil
+}
+
+// Listen implements the stream.VC.Listen method.
+func (vc *VC) Listen() (stream.Listener, error) {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.listener != nil {
+		return nil, verror.New(stream.ErrBadState, nil, verror.New(errAlreadyListening, nil))
+	}
+	vc.listener = newListener()
+	return vc.listener, nil
+}
+
+// DispatchPayload makes payload.Contents available to Read operations on the
+// Flow identified by fid.
+//
+// Assumes ownership of payload, i.e., payload should not be used by the caller
+// after this method returns (irrespective of the return value).
+func (vc *VC) DispatchPayload(fid id.Flow, payload *iobuf.Slice) error {
+	if payload.Size() == 0 {
+		payload.Release()
+		return nil
+	}
+	vc.mu.Lock()
+	if vc.flowMap == nil {
+		vc.mu.Unlock()
+		payload.Release()
+		return verror.New(stream.ErrNetwork, nil, verror.New(errIgnoringMessageOnClosedVC, nil, fid, vc.VCI()))
+	}
+	// Authentication is done by encrypting/decrypting its payload by itself,
+	// so we do not go through with the decryption for auth flow.
+	if fid != AuthFlowID {
+		vc.waitForHandshakeLocked()
+		var err error
+		if payload, err = vc.crypter.Decrypt(payload); err != nil {
+			vc.mu.Unlock()
+			return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToDecryptPayload, nil, err))
+		}
+	}
+	if payload.Size() == 0 {
+		vc.mu.Unlock()
+		payload.Release()
+		return nil
+	}
+	f := vc.flowMap[fid]
+	if f == nil {
+		vc.mu.Unlock()
+		payload.Release()
+		return verror.New(stream.ErrNetwork, nil, verror.New(errDuplicateFlow, nil))
+	}
+	vc.mu.Unlock()
+	if err := f.reader.Put(payload); err != nil {
+		payload.Release()
+		return verror.New(stream.ErrNetwork, nil, err)
+	}
+	return nil
+}
+
+// AcceptFlow enqueues a new Flow for acceptance by the listener on the VC.
+// Returns an error if the VC is not accepting flows initiated by the remote
+// end.
+func (vc *VC) AcceptFlow(fid id.Flow) error {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.listener == nil {
+		return verror.New(stream.ErrBadState, nil, vc.vci)
+	}
+	if _, exists := vc.flowMap[fid]; exists {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errDuplicateFlow, nil))
+	}
+	priority := normalFlowPriority
+	// We use the same high priority for all reserved flows including handshake and
+	// authentication flows. This is because client may open a new system flow before
+	// authentication finishes in server side and then vc.DispatchPayload() can be
+	// stuck in waiting for authentication to finish.
+	if fid < NumReservedFlows {
+		priority = systemFlowPriority
+	}
+	writer, err := vc.newWriter(fid, priority)
+	if err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errFailedToCreateWriterForNewFlow, nil, fid, err))
+	}
+	f := &flow{
+		backingVC: vc,
+		reader:    newReader(readHandlerImpl{vc, fid}),
+		writer:    writer,
+	}
+	if err = vc.listener.Enqueue(f); err != nil {
+		f.Shutdown()
+		return verror.New(stream.ErrNetwork, nil, verror.New(errFailedToEnqueueFlow, nil, err))
+	}
+	vc.flowMap[fid] = f
+	// New flow accepted, notify remote end that it can send over data.
+	// Do it in a goroutine in case the implementation of AddReceiveBuffers
+	// ends up attempting to lock vc.mu
+	go vc.helper.AddReceiveBuffers(vc.vci, fid, DefaultBytesBufferedPerFlow)
+	vlog.VI(2).Infof("Added flow %d@%d to listener", fid, vc.vci)
+	return nil
+}
+
+// ShutdownFlow closes the Flow identified by fid and discards any pending
+// writes.
+func (vc *VC) ShutdownFlow(fid id.Flow) {
+	vc.mu.Lock()
+	f := vc.flowMap[fid]
+	if f == nil {
+		vc.mu.Unlock()
+		return
+	}
+	delete(vc.flowMap, fid)
+	vc.mu.Unlock()
+	f.Shutdown()
+	vlog.VI(2).Infof("Shutdown flow %d@%d", fid, vc.vci)
+}
+
+// ReleaseCounters informs the Flow (identified by fid) that the remote end is
+// ready to receive up to 'bytes' more bytes of data.
+func (vc *VC) ReleaseCounters(fid id.Flow, bytes uint32) {
+	if fid == SharedFlowID {
+		vc.sharedCounters.IncN(uint(bytes))
+		return
+	}
+	var f *flow
+	vc.mu.Lock()
+	if vc.flowMap != nil {
+		f = vc.flowMap[fid]
+	}
+	vc.mu.Unlock()
+	if f == nil {
+		vlog.VI(2).Infof("Ignoring ReleaseCounters(%d, %d) on VCI %d as the flow does not exist", fid, bytes, vc.vci)
+		return
+	}
+	f.Release(int(bytes))
+}
+
+func (vc *VC) Close(reason error) error {
+	vlog.VI(1).Infof("Closing VC %v. Reason:%q", vc, reason)
+	vc.mu.Lock()
+	if vc.closed {
+		vc.mu.Unlock()
+		return nil
+	}
+	flows := vc.flowMap
+	vc.flowMap = nil
+	if vc.listener != nil {
+		vc.listener.Close()
+		vc.listener = nil
+	}
+	vc.closeReason = reason
+	vc.closed = true
+	close(vc.closeCh)
+	vc.mu.Unlock()
+
+	vc.sharedCounters.Close()
+	for fid, flow := range flows {
+		vlog.VI(2).Infof("Closing flow %d on VC %v as VC is being closed(%q)", fid, vc, reason)
+		flow.Close()
+	}
+	return nil
+}
+
+// appendCloseReason adds a closeReason, if any, as a sub error to err.
+func (vc *VC) appendCloseReason(err error) error {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.closeReason != nil {
+		return verror.AddSubErrs(err, nil, verror.SubErr{
+			Name:    "remote=" + vc.RemoteEndpoint().String(),
+			Err:     vc.closeReason,
+			Options: verror.Print,
+		})
+	}
+	return err
+}
+
+// FinishHandshakeDialedVC should be called from another goroutine
+// after HandshakeDialedVC is called, when version/encryption
+// negotiation is complete.
+func (vc *VC) FinishHandshakeDialedVC(vers version.RPCVersion, remotePubKey *crypto.BoxKey) error {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	if vc.remotePubKeyChan == nil {
+		return verror.New(errHandshakeNotInProgress, nil, vc.VCI)
+	}
+	vc.remotePubKeyChan <- remotePubKey
+	vc.remotePubKeyChan = nil
+	vc.version = vers
+	return nil
+}
+
+// HandshakeDialedVC completes initialization of the VC (setting up encryption,
+// authentication etc.) under the assumption that the VC was initiated by the
+// local process (i.e., the local process "Dial"ed to create the VC).
+// HandshakeDialedVC will not return until FinishHandshakeDialedVC is called
+// from another goroutine.
+// TODO(mattr): vers can be removed as a parameter when we get rid of TLS and
+// the version is always obtained via the Setup call.
+func (vc *VC) HandshakeDialedVC(principal security.Principal, sendKey func(*crypto.BoxKey) error, opts ...stream.VCOpt) error {
+	remotePubKeyChan := make(chan *crypto.BoxKey, 1)
+	vc.mu.Lock()
+	vc.remotePubKeyChan = remotePubKeyChan
+	vc.mu.Unlock()
+
+	// principal = nil means that we are running in SecurityNone and we don't need
+	// to authenticate the VC.  We still need to negotiate versioning information,
+	// so we still set remotePubKeyChan and call sendKey, though we don't care about
+	// the resulting key.
+	if principal == nil {
+		if err := sendKey(nil); err != nil {
+			return err
+		}
+		// TODO(mattr): Error on some timeout so a non-responding server doesn't
+		// cause this to hang forever.
+		select {
+		case <-remotePubKeyChan:
+		case <-vc.closeCh:
+			return verror.New(stream.ErrNetwork, nil, verror.New(errClosedDuringHandshake, nil, vc.VCI))
+		}
+		return nil
+	}
+
+	var auth *ServerAuthorizer
+	for _, o := range opts {
+		switch v := o.(type) {
+		case *ServerAuthorizer:
+			auth = v
+		}
+	}
+
+	exchange := func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+		if err := sendKey(pubKey); err != nil {
+			return nil, err
+		}
+		// TODO(mattr): Error on some timeout so a non-responding server doesn't
+		// cause this to hang forever.
+		select {
+		case theirKey := <-remotePubKeyChan:
+			return theirKey, nil
+		case <-vc.closeCh:
+			return nil, verror.New(stream.ErrNetwork, nil, verror.New(errClosedDuringHandshake, nil, vc.VCI))
+		}
+	}
+	crypter, err := crypto.NewBoxCrypter(exchange, vc.pool)
+	if err != nil {
+		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil,
+			verror.New(errFailedToSetupEncryption, nil, err)))
+	}
+	// The version is set by FinishHandshakeDialedVC and exchange (called by
+	// NewBoxCrypter) will block until FinishHandshakeDialedVC is called, so we
+	// should look up the version now.
+	vers := vc.Version()
+
+	// Authenticate (exchange identities)
+	authConn, err := vc.connectFID(AuthFlowID, systemFlowPriority)
+	if err != nil {
+		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForAuth, nil, err)))
+	}
+	params := security.CallParams{
+		LocalPrincipal: principal,
+		LocalEndpoint:  vc.localEP,
+		RemoteEndpoint: vc.remoteEP,
+	}
+
+	rBlessings, lBlessings, rDischarges, err := AuthenticateAsClient(authConn, crypter, params, auth, vers)
+	if err != nil || len(rBlessings.ThirdPartyCaveats()) == 0 {
+		authConn.Close()
+		if err != nil {
+			return vc.appendCloseReason(err)
+		}
+	} else if vers < version.RPCVersion10 {
+		go vc.recvDischargesLoop(authConn)
+	}
+
+	vc.mu.Lock()
+	vc.crypter = crypter
+	vc.localPrincipal = principal
+	vc.localBlessings = lBlessings
+	vc.remoteBlessings = rBlessings
+	vc.remoteDischarges = rDischarges
+	vc.mu.Unlock()
+
+	// Open system flows.
+	if err = vc.connectSystemFlows(); err != nil {
+		return vc.appendCloseReason(err)
+	}
+
+	vlog.VI(1).Infof("Client VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v",
+		vc, rBlessings, lBlessings)
+	return nil
+}
+
+// HandshakeResult is sent by HandshakeAcceptedVC over the channel returned by it.
+type HandshakeResult struct {
+	Listener stream.Listener // Listener for accepting new Flows on the VC.
+	Error    error           // Error, if any, during the handshake.
+}
+
+// HandshakeAcceptedVC completes initialization of the VC (setting up
+// encryption, authentication etc.) under the assumption that the VC was
+// initiated by a remote process (and the local process wishes to "accept" it).
+//
+// Since the handshaking process might involve several round trips, a bulk of the work
+// is done asynchronously and the result of the handshake is written to the
+// channel returned by this method.
+//
+// 'principal' is the principal used by the server used during authentication.
+// If principal is nil, then the VC expects to be used for unauthenticated, unencrypted communication.
+// 'lBlessings' is presented to the client during authentication.
+func (vc *VC) HandshakeAcceptedVC(
+	vers version.RPCVersion,
+	principal security.Principal,
+	lBlessings security.Blessings,
+	exchange crypto.BoxKeyExchanger,
+	opts ...stream.ListenerOpt) <-chan HandshakeResult {
+	result := make(chan HandshakeResult, 1)
+	finish := func(ln stream.Listener, err error) chan HandshakeResult {
+		result <- HandshakeResult{ln, err}
+		return result
+	}
+	var (
+		dischargeClient       DischargeClient
+		dischargeExpiryBuffer = DefaultServerDischargeExpiryBuffer
+	)
+	for _, o := range opts {
+		switch v := o.(type) {
+		case DischargeClient:
+			dischargeClient = v
+		case DischargeExpiryBuffer:
+			dischargeExpiryBuffer = time.Duration(v)
+		}
+	}
+	// If the listener was setup asynchronously, there is a race between
+	// the listener being setup and the caller of this method trying to
+	// dispatch messages, thus it is setup synchronously.
+	ln, err := vc.Listen()
+	if err != nil {
+		return finish(nil, err)
+	}
+	// TODO(mattr): We could instead send counters in the return SetupVC message
+	// and avoid this extra message.  It probably doesn't make much difference
+	// so for now I'll leave it.  May be a nice cleanup after we are always
+	// using SetupVC.
+	vc.helper.AddReceiveBuffers(vc.VCI(), SharedFlowID, DefaultBytesBufferedPerFlow)
+
+	// principal == nil means that we are running in SecurityNone and we don't need
+	// to authenticate the VC.
+	if principal == nil {
+		go func() {
+			_, err = exchange(nil)
+			result <- HandshakeResult{ln, err}
+		}()
+		return result
+	}
+
+	var crypter crypto.Crypter
+
+	go func() {
+		sendErr := func(err error) {
+			ln.Close()
+			result <- HandshakeResult{nil, vc.appendCloseReason(err)}
+		}
+
+		vc.mu.Lock()
+		vc.acceptHandshakeDone = make(chan struct{})
+		vc.mu.Unlock()
+
+		crypter, err = crypto.NewBoxCrypter(exchange, vc.pool)
+		if err != nil {
+			sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToSetupEncryption, nil, err)))
+			return
+		}
+
+		// Authenticate (exchange identities)
+		authConn, err := ln.Accept()
+		if err != nil {
+			sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errAuthFlowNotAccepted, nil, err)))
+			return
+		}
+		if vc.findFlow(authConn) != AuthFlowID {
+			// This should have been an auth flow.  We can't establish security so send
+			// an error.
+			sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForAuth, nil, err)))
+			return
+		}
+
+		rBlessings, lDischarges, err := AuthenticateAsServer(authConn, principal, lBlessings, dischargeClient, crypter, vers)
+		if err != nil {
+			authConn.Close()
+			sendErr(verror.New(stream.ErrSecurity, nil, verror.New(errAuthFailed, nil, err)))
+			return
+		}
+
+		vc.mu.Lock()
+		vc.version = vers
+		vc.crypter = crypter
+		vc.localPrincipal = principal
+		vc.localBlessings = lBlessings
+		vc.remoteBlessings = rBlessings
+		vc.localDischarges = lDischarges
+		close(vc.acceptHandshakeDone)
+		vc.acceptHandshakeDone = nil
+		vc.mu.Unlock()
+
+		if len(lBlessings.ThirdPartyCaveats()) > 0 && vers < version.RPCVersion10 {
+			go vc.sendDischargesLoop(authConn, dischargeClient, lBlessings.ThirdPartyCaveats(), dischargeExpiryBuffer)
+		} else {
+			authConn.Close()
+		}
+
+		// Accept system flows.
+		if err = vc.acceptSystemFlows(ln, dischargeClient, dischargeExpiryBuffer); err != nil {
+			sendErr(verror.New(stream.ErrNetwork, nil, verror.New(errFailedToAcceptSystemFlows, nil, err)))
+		}
+
+		vlog.VI(1).Infof("Server VC %v authenticated. RemoteBlessings:%v, LocalBlessings:%v", vc, rBlessings, lBlessings)
+		result <- HandshakeResult{ln, nil}
+	}()
+	return result
+}
+
+func (vc *VC) sendDischargesLoop(conn io.WriteCloser, dc DischargeClient, tpCavs []security.Caveat, dischargeExpiryBuffer time.Duration) {
+	defer conn.Close()
+	if dc == nil {
+		return
+	}
+	enc := vom.NewEncoder(conn)
+	discharges := dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
+	for expiry := minExpiryTime(discharges, tpCavs); !expiry.IsZero(); expiry = minExpiryTime(discharges, tpCavs) {
+		select {
+		case <-time.After(fetchDuration(expiry, dischargeExpiryBuffer)):
+			discharges = dc.PrepareDischarges(nil, tpCavs, security.DischargeImpetus{})
+			if err := enc.Encode(discharges); err != nil {
+				vlog.Errorf("encoding discharges on VC %v failed: %v", vc, err)
+				return
+			}
+			if len(discharges) == 0 {
+				continue
+			}
+			vc.mu.Lock()
+			if vc.localDischarges == nil {
+				vc.localDischarges = make(map[string]security.Discharge)
+			}
+			for _, d := range discharges {
+				vc.localDischarges[d.ID()] = d
+			}
+			vc.mu.Unlock()
+		case <-vc.closeCh:
+			vlog.VI(3).Infof("closing sendDischargesLoop on VC %v", vc)
+			return
+		}
+	}
+}
+
+func fetchDuration(expiry time.Time, buffer time.Duration) time.Duration {
+	// Fetch the discharge earlier than the actual expiry to factor in for clock
+	// skew and RPC time.
+	return expiry.Sub(time.Now().Add(buffer))
+}
+
+func minExpiryTime(discharges []security.Discharge, tpCavs []security.Caveat) time.Time {
+	var min time.Time
+	// If some discharges were not fetched, retry again in a minute.
+	if len(discharges) < len(tpCavs) {
+		min = time.Now().Add(time.Minute)
+	}
+	for _, d := range discharges {
+		if exp := d.Expiry(); min.IsZero() || (!exp.IsZero() && exp.Before(min)) {
+			min = exp
+		}
+	}
+	return min
+}
+
+func (vc *VC) recvDischargesLoop(conn io.ReadCloser) {
+	defer conn.Close()
+	dec := vom.NewDecoder(conn)
+	for {
+		var discharges []security.Discharge
+		if err := dec.Decode(&discharges); err != nil {
+			vlog.VI(3).Infof("decoding discharges on %v failed: %v", vc, err)
+			return
+		}
+		if len(discharges) == 0 {
+			continue
+		}
+		vc.mu.Lock()
+		if vc.remoteDischarges == nil {
+			vc.remoteDischarges = make(map[string]security.Discharge)
+		}
+		for _, d := range discharges {
+			vc.remoteDischarges[d.ID()] = d
+		}
+		vc.mu.Unlock()
+	}
+}
+
+func (vc *VC) connectSystemFlows() error {
+	conn, err := vc.connectFID(TypeFlowID, systemFlowPriority)
+	if err != nil {
+		return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForWireType, nil, err))
+	}
+	vc.dataCache.Insert(TypeEncoderKey{}, vom.NewTypeEncoder(conn))
+	vc.dataCache.Insert(TypeDecoderKey{}, vom.NewTypeDecoder(conn))
+
+	if vc.Version() < version.RPCVersion10 {
+		return nil
+	}
+
+	vc.mu.Lock()
+	rBlessings := vc.remoteBlessings
+	vc.mu.Unlock()
+	if len(rBlessings.ThirdPartyCaveats()) > 0 {
+		conn, err = vc.connectFID(DischargeFlowID, systemFlowPriority)
+		if err != nil {
+			return verror.New(stream.ErrSecurity, nil, verror.New(errFailedToCreateFlowForDischarge, nil, err))
+		}
+		go vc.recvDischargesLoop(conn)
+	}
+
+	return nil
+}
+
+func (vc *VC) acceptSystemFlows(ln stream.Listener, dischargeClient DischargeClient, dischargeExpiryBuffer time.Duration) error {
+	conn, err := ln.Accept()
+	if err != nil {
+		return verror.New(errFlowForWireTypeNotAccepted, nil, err)
+	}
+	vc.dataCache.Insert(TypeEncoderKey{}, vom.NewTypeEncoder(conn))
+	vc.dataCache.Insert(TypeDecoderKey{}, vom.NewTypeDecoder(conn))
+
+	if vc.Version() < version.RPCVersion10 {
+		return nil
+	}
+
+	vc.mu.Lock()
+	lBlessings := vc.localBlessings
+	vc.mu.Unlock()
+	tpCaveats := lBlessings.ThirdPartyCaveats()
+	if len(tpCaveats) > 0 {
+		conn, err := ln.Accept()
+		if err != nil {
+			return verror.New(errFlowForDischargeNotAccepted, nil, err)
+		}
+		go vc.sendDischargesLoop(conn, dischargeClient, tpCaveats, dischargeExpiryBuffer)
+	}
+
+	return nil
+}
+
+// Encrypt uses the VC's encryption scheme to encrypt the provided data payload.
+// Always takes ownership of plaintext.
+func (vc *VC) Encrypt(fid id.Flow, plaintext *iobuf.Slice) (cipherslice *iobuf.Slice, err error) {
+	if plaintext == nil {
+		return nil, nil
+	}
+	vc.mu.Lock()
+	if fid == AuthFlowID {
+		cipherslice = plaintext
+	} else {
+		cipherslice, err = vc.crypter.Encrypt(plaintext)
+	}
+	vc.mu.Unlock()
+	return
+}
+
+func (vc *VC) allocFID() id.Flow {
+	vc.mu.Lock()
+	ret := vc.nextConnectFID
+	vc.nextConnectFID += 2
+	vc.mu.Unlock()
+	return ret
+}
+
+// findFlow finds the flow id for the provided flow.
+// Returns 0 if there is none.
+func (vc *VC) findFlow(flow interface{}) id.Flow {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+
+	const invalidFlowID = 0
+	// This operation is rare and early enough (called when there are <= 2
+	// flows over the VC) that iteration to the map should be fine.
+	for fid, f := range vc.flowMap {
+		if f == flow {
+			return fid
+		}
+	}
+	return invalidFlowID
+}
+
+// VCI returns the identifier of this VC.
+func (vc *VC) VCI() id.VC { return vc.vci }
+
+// RemoteEndpoint returns the remote endpoint for this VC.
+func (vc *VC) RemoteEndpoint() naming.Endpoint { return vc.remoteEP }
+
+// LocalEndpoint returns the local endpoint for this VC.
+func (vc *VC) LocalEndpoint() naming.Endpoint { return vc.localEP }
+
+// VCDataCache returns the VCDataCache that allows information to be
+// shared across the VC.
+func (vc *VC) VCDataCache() stream.VCDataCache { return vc.dataCache }
+
+// LocalPrincipal returns the principal that authenticated with the remote end of the VC.
+func (vc *VC) LocalPrincipal() security.Principal {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	return vc.localPrincipal
+}
+
+// LocalBlessings returns the blessings (bound to LocalPrincipal) presented to the
+// remote end of the VC during authentication.
+func (vc *VC) LocalBlessings() security.Blessings {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	return vc.localBlessings
+}
+
+// RemoteBlessings returns the blessings presented by the remote end of the VC during
+// authentication.
+func (vc *VC) RemoteBlessings() security.Blessings {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	return vc.remoteBlessings
+}
+
+// LocalDischarges returns the discharges presented by the local end of the VC during
+// authentication.
+func (vc *VC) LocalDischarges() map[string]security.Discharge {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	if len(vc.localDischarges) == 0 {
+		return nil
+	}
+	// Return a copy of the map to prevent racy reads.
+	return copyDischargeMap(vc.localDischarges)
+}
+
+// RemoteDischarges returns the discharges presented by the remote end of the VC during
+// authentication.
+func (vc *VC) RemoteDischarges() map[string]security.Discharge {
+	vc.mu.Lock()
+	defer vc.mu.Unlock()
+	vc.waitForHandshakeLocked()
+	if len(vc.remoteDischarges) == 0 {
+		return nil
+	}
+	// Return a copy of the map to prevent racy reads.
+	return copyDischargeMap(vc.remoteDischarges)
+}
+
+// waitForHandshakeLocked blocks until an in-progress handshake (encryption
+// setup and authentication) completes.
+// REQUIRES: vc.mu is held.
+func (vc *VC) waitForHandshakeLocked() {
+	if hsd := vc.acceptHandshakeDone; hsd != nil {
+		vc.mu.Unlock()
+		<-hsd
+		vc.mu.Lock()
+	}
+}
+
+func (vc *VC) String() string {
+	return fmt.Sprintf("VCI:%d (%v<->%v)", vc.vci, vc.localEP, vc.remoteEP)
+}
+
+// DebugString returns a string representation of the state of a VC.
+//
+// The format of the returned string is meant to be human-friendly and the
+// specific format should not be relied upon for automated processing.
+func (vc *VC) DebugString() string {
+	vc.mu.Lock()
+	l := make([]string, 0, len(vc.flowMap)+1)
+	l = append(l, fmt.Sprintf("VCI:%d -- Endpoints:(Local:%q Remote:%q) #Flows:%d NextConnectFID:%d",
+		vc.vci,
+		vc.localEP,
+		vc.remoteEP,
+		len(vc.flowMap),
+		vc.nextConnectFID))
+	if vc.crypter == nil {
+		l = append(l, "Handshake not completed yet")
+	} else {
+		l = append(l, "Encryption: "+vc.crypter.String())
+		if vc.localPrincipal != nil {
+			l = append(l, fmt.Sprintf("LocalPrincipal:%v LocalBlessings:%v RemoteBlessings:%v", vc.localPrincipal.PublicKey(), vc.localBlessings, vc.remoteBlessings))
+		}
+	}
+	for fid, f := range vc.flowMap {
+		l = append(l, fmt.Sprintf("  Flow:%3d BytesRead:%7d BytesWritten:%7d", fid, f.BytesRead(), f.BytesWritten()))
+	}
+	vc.mu.Unlock()
+	sort.Strings(l[1:])
+	return strings.Join(l, "\n")
+}
+
+func (vc *VC) newWriter(fid id.Flow, priority bqueue.Priority) (*writer, error) {
+	bq, err := vc.helper.NewWriter(vc.vci, fid, priority)
+	if err != nil {
+		return nil, err
+	}
+	alloc := iobuf.NewAllocator(vc.pool, vc.reserveBytes)
+	return newWriter(MaxPayloadSizeBytes, bq, alloc, vc.sharedCounters), nil
+}
+
+// readHandlerImpl is an adapter for the readHandler interface required by
+// the reader type.
+type readHandlerImpl struct {
+	vc  *VC
+	fid id.Flow
+}
+
+func (r readHandlerImpl) HandleRead(bytes uint) {
+	r.vc.helper.AddReceiveBuffers(r.vc.vci, r.fid, bytes)
+}
+
+func copyDischargeMap(m map[string]security.Discharge) map[string]security.Discharge {
+	ret := make(map[string]security.Discharge)
+	for id, d := range m {
+		ret[id] = d
+	}
+	return ret
+
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_cache.go b/runtime/internal/rpc/stream/vc/vc_cache.go
new file mode 100644
index 0000000..d962cfa
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc_cache.go
@@ -0,0 +1,114 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"sync"
+
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+)
+
+var errVCCacheClosed = reg(".errVCCacheClosed", "vc cache has been closed")
+
+// VCCache implements a set of VIFs keyed by the endpoint of the remote end and the
+// local principal. Multiple goroutines can invoke methods on the VCCache simultaneously.
+type VCCache struct {
+	mu      sync.Mutex
+	cache   map[vcKey]*VC  // GUARDED_BY(mu)
+	started map[vcKey]bool // GUARDED_BY(mu)
+	cond    *sync.Cond
+}
+
+// NewVCCache returns a new cache for VCs.
+func NewVCCache() *VCCache {
+	c := &VCCache{
+		cache:   make(map[vcKey]*VC),
+		started: make(map[vcKey]bool),
+	}
+	c.cond = sync.NewCond(&c.mu)
+	return c
+}
+
+// ReservedFind returns a VC where the remote end of the underlying connection
+// is identified by the provided (ep, p.PublicKey). Returns nil if there is no
+// such VC.
+//
+// Iff the cache is closed, ReservedFind will return an error.
+// If ReservedFind has no error, the caller is required to call Unreserve, to avoid deadlock.
+// The ep, and p.PublicKey in Unreserve must be the same as used in the ReservedFind call.
+// During this time, all new ReservedFind calls for this ep and p will Block until
+// the corresponding Unreserve call is made.
+func (c *VCCache) ReservedFind(ep naming.Endpoint, p security.Principal) (*VC, error) {
+	k := c.key(ep, p)
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	for c.started[k] {
+		c.cond.Wait()
+	}
+	if c.cache == nil {
+		return nil, verror.New(errVCCacheClosed, nil)
+	}
+	c.started[k] = true
+	return c.cache[k], nil
+}
+
+// Unreserve marks the status of the ep, p as no longer started, and
+// broadcasts waiting threads.
+func (c *VCCache) Unreserve(ep naming.Endpoint, p security.Principal) {
+	c.mu.Lock()
+	delete(c.started, c.key(ep, p))
+	c.cond.Broadcast()
+	c.mu.Unlock()
+}
+
+// Insert adds vc to the cache and returns an error iff the cache has been closed.
+func (c *VCCache) Insert(vc *VC) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.cache == nil {
+		return verror.New(errVCCacheClosed, nil)
+	}
+	c.cache[c.key(vc.RemoteEndpoint(), vc.LocalPrincipal())] = vc
+	return nil
+}
+
+// Close marks the VCCache as closed and returns the VCs remaining in the cache.
+func (c *VCCache) Close() []*VC {
+	c.mu.Lock()
+	vcs := make([]*VC, 0, len(c.cache))
+	for _, vc := range c.cache {
+		vcs = append(vcs, vc)
+	}
+	c.cache = nil
+	c.started = nil
+	c.mu.Unlock()
+	return vcs
+}
+
+// Delete removes vc from the cache, returning an error iff the cache has been closed.
+func (c *VCCache) Delete(vc *VC) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.cache == nil {
+		return verror.New(errVCCacheClosed, nil)
+	}
+	delete(c.cache, c.key(vc.RemoteEndpoint(), vc.LocalPrincipal()))
+	return nil
+}
+
+type vcKey struct {
+	remoteEP       string
+	localPublicKey string // localPublicKey = "" means we are running unencrypted (i.e. SecurityNone)
+}
+
+func (c *VCCache) key(ep naming.Endpoint, p security.Principal) vcKey {
+	k := vcKey{remoteEP: ep.String()}
+	if p != nil {
+		k.localPublicKey = p.PublicKey().String()
+	}
+	return k
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_cache_test.go b/runtime/internal/rpc/stream/vc/vc_cache_test.go
new file mode 100644
index 0000000..b096e21
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc_cache_test.go
@@ -0,0 +1,123 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"testing"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/test/testutil"
+)
+
+func TestInsertDelete(t *testing.T) {
+	cache := NewVCCache()
+	ep, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := testutil.NewPrincipal("test")
+	vc := &VC{remoteEP: ep, localPrincipal: p}
+	otherEP, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	otherP := testutil.NewPrincipal("test")
+	otherVC := &VC{remoteEP: otherEP, localPrincipal: otherP}
+
+	cache.Insert(vc)
+	cache.Insert(otherVC)
+	cache.Delete(vc)
+	if got, want := cache.Close(), []*VC{otherVC}; !vcsEqual(got, want) {
+		t.Errorf("got %v, want %v", got, want)
+	}
+}
+
+func TestInsertClose(t *testing.T) {
+	cache := NewVCCache()
+	ep, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := testutil.NewPrincipal("test")
+	vc := &VC{remoteEP: ep, localPrincipal: p}
+
+	if err := cache.Insert(vc); err != nil {
+		t.Errorf("the cache is not closed yet")
+	}
+	if got, want := cache.Close(), []*VC{vc}; !vcsEqual(got, want) {
+		t.Errorf("got %v, want %v", got, want)
+	}
+	if err := cache.Insert(vc); err == nil {
+		t.Errorf("the cache has been closed")
+	}
+}
+
+func TestReservedFind(t *testing.T) {
+	cache := NewVCCache()
+	ep, err := inaming.NewEndpoint("foo:8888")
+	if err != nil {
+		t.Fatal(err)
+	}
+	p := testutil.NewPrincipal("test")
+	vc := &VC{remoteEP: ep, localPrincipal: p}
+	cache.Insert(vc)
+
+	// We should be able to find the vc in the cache.
+	if got, err := cache.ReservedFind(ep, p); err != nil || got != vc {
+		t.Errorf("got %v, want %v, err: %v", got, vc, err)
+	}
+
+	// If we change the endpoint or the principal, we should get nothing.
+	otherEP, err := inaming.NewEndpoint("bar: 7777")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if got, err := cache.ReservedFind(otherEP, p); err != nil || got != nil {
+		t.Errorf("got %v, want <nil>, err: %v", got, err)
+	}
+	if got, err := cache.ReservedFind(ep, testutil.NewPrincipal("wrong")); err != nil || got != nil {
+		t.Errorf("got %v, want <nil>, err: %v", got, err)
+	}
+
+	// A subsequent ReservedFind call that matches a previous failed ReservedFind
+	// should block until a matching Unreserve call is made.
+	ch := make(chan *VC, 1)
+	go func(ch chan *VC) {
+		vc, err := cache.ReservedFind(otherEP, p)
+		if err != nil {
+			t.Fatal(err)
+		}
+		ch <- vc
+	}(ch)
+
+	// We insert the otherEP into the cache.
+	otherVC := &VC{remoteEP: otherEP, localPrincipal: p}
+	cache.Insert(otherVC)
+	cache.Unreserve(otherEP, p)
+
+	// Now the cache.BlcokingFind should have returned the correct otherVC.
+	if cachedVC := <-ch; cachedVC != otherVC {
+		t.Errorf("got %v, want %v", cachedVC, otherVC)
+	}
+}
+
+func vcsEqual(a, b []*VC) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	m := make(map[*VC]int)
+	for _, v := range a {
+		m[v]++
+	}
+	for _, v := range b {
+		m[v]--
+	}
+	for _, i := range m {
+		if i != 0 {
+			return false
+		}
+	}
+	return true
+}
diff --git a/runtime/internal/rpc/stream/vc/vc_test.go b/runtime/internal/rpc/stream/vc/vc_test.go
new file mode 100644
index 0000000..6f01d63
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/vc_test.go
@@ -0,0 +1,632 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Use a different package for the tests to ensure that only the exported API is used.
+
+package vc_test
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"testing"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/options"
+	"v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/lib/vlog"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+	"v.io/x/ref/test/testutil"
+)
+
+var (
+	clientEP = endpoint(naming.FixedRoutingID(0xcccccccccccccccc))
+	serverEP = endpoint(naming.FixedRoutingID(0x5555555555555555))
+)
+
+//go:generate v23 test generate
+
+const (
+	// Convenience alias to avoid conflicts between the package name "vc" and variables called "vc".
+	DefaultBytesBufferedPerFlow = vc.DefaultBytesBufferedPerFlow
+	// Shorthands
+	SecurityNone    = options.SecurityNone
+	SecurityDefault = options.SecurityConfidential
+)
+
+var LatestVersion = iversion.SupportedRange.Max
+
+// testFlowEcho writes a random string of 'size' bytes on the flow and then
+// ensures that the same string is read back.
+func testFlowEcho(t *testing.T, flow stream.Flow, size int) {
+	defer flow.Close()
+	wrote := testutil.RandomBytes(size)
+	go func() {
+		buf := wrote
+		for len(buf) > 0 {
+			limit := 1 + testutil.Intn(len(buf)) // Random number in [1, n]
+			n, err := flow.Write(buf[:limit])
+			if n != limit || err != nil {
+				t.Errorf("Write returned (%d, %v) want (%d, nil)", n, err, limit)
+			}
+			buf = buf[limit:]
+		}
+	}()
+
+	total := 0
+	read := make([]byte, size)
+	buf := read
+	for total < size {
+		n, err := flow.Read(buf)
+		if err != nil {
+			t.Error(err)
+			return
+		}
+		total += n
+		buf = buf[n:]
+	}
+	if bytes.Compare(read, wrote) != 0 {
+		t.Errorf("Data read != data written")
+	}
+}
+
+func TestHandshakeNoSecurity(t *testing.T) {
+	// When the principals are nil, no blessings should be sent over the wire.
+	h, vc, err := New(LatestVersion, nil, nil, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !flow.RemoteBlessings().IsZero() {
+		t.Errorf("Server sent blessing %v over insecure transport", flow.RemoteBlessings())
+	}
+	if !flow.LocalBlessings().IsZero() {
+		t.Errorf("Client sent blessing %v over insecure transport", flow.LocalBlessings())
+	}
+}
+
+func testFlowAuthN(flow stream.Flow, serverBlessings security.Blessings, serverDischarges map[string]security.Discharge, clientPublicKey security.PublicKey) error {
+	if got, want := flow.RemoteBlessings(), serverBlessings; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("Server shared blessings %v, want %v", got, want)
+	}
+	if got, want := flow.RemoteDischarges(), serverDischarges; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("Server shared discharges %#v, want %#v", got, want)
+	}
+	if got, want := flow.LocalBlessings().PublicKey(), clientPublicKey; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("Client shared %v, want %v", got, want)
+	}
+	return nil
+}
+
+// auth implements security.Authorizer.
+type auth struct {
+	localPrincipal   security.Principal
+	remoteBlessings  security.Blessings
+	remoteDischarges map[string]security.Discharge
+	suffix, method   string
+	err              error
+}
+
+// Authorize tests that the context passed to the authorizer is the expected one.
+func (a *auth) Authorize(ctx *context.T, call security.Call) error {
+	if a.err != nil {
+		return a.err
+	}
+	if got, want := call.LocalPrincipal(), a.localPrincipal; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.LocalPrincipal: got %v, want %v", got, want)
+	}
+	if got, want := call.RemoteBlessings(), a.remoteBlessings; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.RemoteBlessings: got %v, want %v", got, want)
+	}
+	if got, want := call.RemoteDischarges(), a.remoteDischarges; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.RemoteDischarges: got %v, want %v", got, want)
+	}
+	if got, want := call.LocalEndpoint(), clientEP; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.LocalEndpoint: got %v, want %v", got, want)
+	}
+	if got, want := call.RemoteEndpoint(), serverEP; !reflect.DeepEqual(got, want) {
+		return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+	}
+	if got, want := call.Suffix(), a.suffix; got != want {
+		return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+	}
+	if got, want := call.Method(), a.method; got != want {
+		return fmt.Errorf("ctx.RemoteEndpoint: got %v, want %v", got, want)
+	}
+	return nil
+}
+
+// mockDischargeClient implements vc.DischargeClient.
+type mockDischargeClient []security.Discharge
+
+func (m mockDischargeClient) PrepareDischarges(_ *context.T, forcaveats []security.Caveat, impetus security.DischargeImpetus) []security.Discharge {
+	return m
+}
+func (mockDischargeClient) Invalidate(...security.Discharge) {}
+func (mockDischargeClient) RPCStreamListenerOpt()            {}
+func (mockDischargeClient) RPCStreamVCOpt()                  {}
+
+// Test that mockDischargeClient implements vc.DischargeClient.
+var _ vc.DischargeClient = (mockDischargeClient)(nil)
+
+func TestHandshake(t *testing.T) {
+	matchesError := func(got error, want string) error {
+		if (got == nil) && len(want) == 0 {
+			return nil
+		}
+		if got == nil && !strings.Contains(got.Error(), want) {
+			return fmt.Errorf("got error %q, wanted to match %q", got, want)
+		}
+		return nil
+	}
+	var (
+		root       = testutil.NewIDProvider("root")
+		discharger = testutil.NewPrincipal("discharger")
+		client     = testutil.NewPrincipal()
+		server     = testutil.NewPrincipal()
+	)
+	tpcav, err := security.NewPublicKeyCaveat(discharger.PublicKey(), "irrelevant", security.ThirdPartyRequirements{}, security.UnconstrainedUse())
+	if err != nil {
+		t.Fatal(err)
+	}
+	dis, err := discharger.MintDischarge(tpcav, security.UnconstrainedUse())
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Root blesses the client
+	if err := root.Bless(client, "client"); err != nil {
+		t.Fatal(err)
+	}
+	// Root blesses the server with a third-party caveat
+	if err := root.Bless(server, "server", tpcav); err != nil {
+		t.Fatal(err)
+	}
+
+	testdata := []struct {
+		dischargeClient      vc.DischargeClient
+		auth                 *vc.ServerAuthorizer
+		dialErr              string
+		flowRemoteBlessings  security.Blessings
+		flowRemoteDischarges map[string]security.Discharge
+	}{
+		{
+			flowRemoteBlessings: server.BlessingStore().Default(),
+		},
+		{
+			dischargeClient:      mockDischargeClient([]security.Discharge{dis}),
+			flowRemoteBlessings:  server.BlessingStore().Default(),
+			flowRemoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+		},
+		{
+			dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+			auth: &vc.ServerAuthorizer{
+				Suffix: "suffix",
+				Method: "method",
+				Policy: &auth{
+					localPrincipal:   client,
+					remoteBlessings:  server.BlessingStore().Default(),
+					remoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+					suffix:           "suffix",
+					method:           "method",
+				},
+			},
+			flowRemoteBlessings:  server.BlessingStore().Default(),
+			flowRemoteDischarges: map[string]security.Discharge{dis.ID(): dis},
+		},
+		{
+			dischargeClient: mockDischargeClient([]security.Discharge{dis}),
+			auth: &vc.ServerAuthorizer{
+				Suffix: "suffix",
+				Method: "method",
+				Policy: &auth{
+					err: errors.New("authorization error"),
+				},
+			},
+			dialErr: "authorization error",
+		},
+	}
+	for i, d := range testdata {
+		h, vc, err := New(LatestVersion, client, server, d.dischargeClient, d.auth)
+		if merr := matchesError(err, d.dialErr); merr != nil {
+			t.Errorf("Test #%d: HandshakeDialedVC with server authorizer %#v:: %v", i, d.auth.Policy, merr)
+		}
+		if err != nil {
+			continue
+		}
+		flow, err := vc.Connect()
+		if err != nil {
+			h.Close()
+			t.Errorf("Unable to create flow: %v", err)
+			continue
+		}
+		if err := testFlowAuthN(flow, d.flowRemoteBlessings, d.flowRemoteDischarges, client.PublicKey()); err != nil {
+			h.Close()
+			t.Error(err)
+			continue
+		}
+		h.Close()
+	}
+}
+
+func testConnect_Small(t *testing.T, version version.RPCVersion, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(version, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFlowEcho(t, flow, 10)
+}
+func TestConnect_SmallNoSecurity(t *testing.T) { testConnect_Small(t, LatestVersion, SecurityNone) }
+func TestConnect_Small(t *testing.T)           { testConnect_Small(t, LatestVersion, SecurityDefault) }
+
+func testConnect(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFlowEcho(t, flow, 10*DefaultBytesBufferedPerFlow)
+}
+func TestConnectNoSecurity(t *testing.T) { testConnect(t, SecurityNone) }
+func TestConnect(t *testing.T)           { testConnect(t, SecurityDefault) }
+
+func testConnect_Version7(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	flow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFlowEcho(t, flow, 10)
+}
+func TestConnect_Version7NoSecurity(t *testing.T) { testConnect_Version7(t, SecurityNone) }
+func TestConnect_Version7(t *testing.T)           { testConnect_Version7(t, SecurityDefault) }
+
+// helper function for testing concurrent operations on multiple flows over the
+// same VC.  Such tests are most useful when running the race detector.
+// (go test -race ...)
+func testConcurrentFlows(t *testing.T, securityLevel options.SecurityLevel, flows, gomaxprocs int) {
+	mp := runtime.GOMAXPROCS(gomaxprocs)
+	defer runtime.GOMAXPROCS(mp)
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+
+	var wg sync.WaitGroup
+	wg.Add(flows)
+	for i := 0; i < flows; i++ {
+		go func(n int) {
+			defer wg.Done()
+			flow, err := vc.Connect()
+			if err != nil {
+				t.Error(err)
+			} else {
+				testFlowEcho(t, flow, (n+1)*DefaultBytesBufferedPerFlow)
+			}
+		}(i)
+	}
+	wg.Wait()
+}
+
+func TestConcurrentFlows_1NOSecurity(t *testing.T) { testConcurrentFlows(t, SecurityNone, 10, 1) }
+func TestConcurrentFlows_1(t *testing.T)           { testConcurrentFlows(t, SecurityDefault, 10, 1) }
+
+func TestConcurrentFlows_10NoSecurity(t *testing.T) { testConcurrentFlows(t, SecurityNone, 10, 10) }
+func TestConcurrentFlows_10(t *testing.T)           { testConcurrentFlows(t, SecurityDefault, 10, 10) }
+
+func testListen(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	if err := h.VC.AcceptFlow(id.Flow(21)); err == nil {
+		t.Errorf("Expected AcceptFlow on a new flow to fail as Listen was not called")
+	}
+
+	ln, err := vc.Listen()
+	if err != nil {
+		t.Fatalf("vc.Listen failed: %v", err)
+		return
+	}
+	_, err = vc.Listen()
+	if err == nil {
+		t.Fatalf("Second call to vc.Listen should have failed")
+		return
+	}
+	if err := h.VC.AcceptFlow(id.Flow(23)); err != nil {
+		t.Fatal(err)
+	}
+
+	data := "the dark knight"
+	cipherdata, err := h.otherEnd.VC.Encrypt(id.Flow(23), iobuf.NewSlice([]byte(data)))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := h.VC.DispatchPayload(id.Flow(23), cipherdata); err != nil {
+		t.Fatal(err)
+	}
+	flow, err := ln.Accept()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := ln.Close(); err != nil {
+		t.Error(err)
+	}
+	flow.Close()
+	var buf [4096]byte
+	if n, err := flow.Read(buf[:]); n != len(data) || err != nil || string(buf[:n]) != data {
+		t.Errorf("Got (%d, %v) = %q, want (%d, nil) = %q", n, err, string(buf[:n]), len(data), data)
+	}
+	if n, err := flow.Read(buf[:]); n != 0 || err != io.EOF {
+		t.Errorf("Got (%d, %v) want (0, %v)", n, err, io.EOF)
+	}
+}
+func TestListenNoSecurity(t *testing.T) { testListen(t, SecurityNone) }
+func TestListen(t *testing.T)           { testListen(t, SecurityDefault) }
+
+func testNewFlowAfterClose(t *testing.T, securityLevel options.SecurityLevel) {
+	h, _, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	h.VC.Close(fmt.Errorf("reason"))
+	if err := h.VC.AcceptFlow(id.Flow(10)); err == nil {
+		t.Fatalf("New flows should not be accepted once the VC is closed")
+	}
+}
+func TestNewFlowAfterCloseNoSecurity(t *testing.T) { testNewFlowAfterClose(t, SecurityNone) }
+func TestNewFlowAfterClose(t *testing.T)           { testNewFlowAfterClose(t, SecurityDefault) }
+
+func testConnectAfterClose(t *testing.T, securityLevel options.SecurityLevel) {
+	h, vc, err := NewSimple(LatestVersion, securityLevel)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer h.Close()
+	h.VC.Close(fmt.Errorf("myerr"))
+	if f, err := vc.Connect(); f != nil || err == nil || !strings.Contains(err.Error(), "myerr") {
+		t.Fatalf("Got (%v, %v), want (nil, %q)", f, err, "myerr")
+	}
+}
+func TestConnectAfterCloseNoSecurity(t *testing.T) { testConnectAfterClose(t, SecurityNone) }
+func TestConnectAfterClose(t *testing.T)           { testConnectAfterClose(t, SecurityDefault) }
+
+// helper implements vc.Helper and also sets up a single VC.
+type helper struct {
+	VC *vc.VC
+	bq bqueue.T
+
+	mu       sync.Mutex
+	otherEnd *helper // GUARDED_BY(mu)
+}
+
+func createPrincipals(securityLevel options.SecurityLevel) (client, server security.Principal) {
+	if securityLevel == SecurityDefault {
+		client = testutil.NewPrincipal("client")
+		server = testutil.NewPrincipal("server")
+	}
+	return
+}
+
+// A convenient version of New() with default parameters.
+func NewSimple(v version.RPCVersion, securityLevel options.SecurityLevel) (*helper, stream.VC, error) {
+	pclient, pserver := createPrincipals(securityLevel)
+	return New(v, pclient, pserver, nil, nil)
+}
+
+// New creates both ends of a VC but returns only the "client" end (i.e., the
+// one that initiated the VC). The "server" end (the one that "accepted" the VC)
+// listens for flows and simply echoes data read.
+func New(v version.RPCVersion, client, server security.Principal, dischargeClient vc.DischargeClient, auth *vc.ServerAuthorizer) (*helper, stream.VC, error) {
+	clientH := &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
+	serverH := &helper{bq: drrqueue.New(vc.MaxPayloadSizeBytes)}
+	clientH.otherEnd = serverH
+	serverH.otherEnd = clientH
+
+	vci := id.VC(1234)
+
+	clientParams := vc.Params{
+		VCI:      vci,
+		Dialed:   true,
+		LocalEP:  clientEP,
+		RemoteEP: serverEP,
+		Pool:     iobuf.NewPool(0),
+		Helper:   clientH,
+	}
+	serverParams := vc.Params{
+		VCI:      vci,
+		LocalEP:  serverEP,
+		RemoteEP: clientEP,
+		Pool:     iobuf.NewPool(0),
+		Helper:   serverH,
+	}
+
+	clientH.VC = vc.InternalNew(clientParams)
+	serverH.VC = vc.InternalNew(serverParams)
+	clientH.AddReceiveBuffers(vci, vc.SharedFlowID, vc.DefaultBytesBufferedPerFlow)
+
+	go clientH.pipeLoop(serverH.VC)
+	go serverH.pipeLoop(clientH.VC)
+
+	var (
+		lopts  []stream.ListenerOpt
+		vcopts []stream.VCOpt
+	)
+
+	if dischargeClient != nil {
+		lopts = append(lopts, dischargeClient)
+	}
+	if auth != nil {
+		vcopts = append(vcopts, auth)
+	}
+
+	var bserver security.Blessings
+	if server != nil {
+		bserver = server.BlessingStore().Default()
+	}
+
+	var clientExchanger func(*crypto.BoxKey) error
+	var serverExchanger func(*crypto.BoxKey) (*crypto.BoxKey, error)
+
+	serverch, clientch := make(chan *crypto.BoxKey, 1), make(chan *crypto.BoxKey, 1)
+	clientExchanger = func(pubKey *crypto.BoxKey) error {
+		clientch <- pubKey
+		return clientH.VC.FinishHandshakeDialedVC(v, <-serverch)
+	}
+	serverExchanger = func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+		serverch <- pubKey
+		return <-clientch, nil
+	}
+
+	c := serverH.VC.HandshakeAcceptedVC(v, server, bserver, serverExchanger, lopts...)
+	if err := clientH.VC.HandshakeDialedVC(client, clientExchanger, vcopts...); err != nil {
+		go func() { <-c }()
+		return nil, nil, err
+	}
+	hr := <-c
+	if hr.Error != nil {
+		return nil, nil, hr.Error
+	}
+	go acceptLoop(hr.Listener)
+	return clientH, clientH.VC, nil
+}
+
+// pipeLoop forwards slices written to h.bq to dst.
+func (h *helper) pipeLoop(dst *vc.VC) {
+	for {
+		w, bufs, err := h.bq.Get(nil)
+		if err != nil {
+			return
+		}
+		fid := id.Flow(w.ID())
+		for _, b := range bufs {
+			cipher, err := h.VC.Encrypt(fid, b)
+			if err != nil {
+				vlog.Infof("vc encrypt failed: %v", err)
+			}
+			if err := dst.DispatchPayload(fid, cipher); err != nil {
+				vlog.Infof("dispatch payload failed: %v", err)
+				return
+			}
+		}
+		if w.IsDrained() {
+			h.VC.ShutdownFlow(fid)
+			dst.ShutdownFlow(fid)
+		}
+	}
+}
+
+func acceptLoop(ln stream.Listener) {
+	for {
+		f, err := ln.Accept()
+		if err != nil {
+			return
+		}
+		go echoLoop(f)
+	}
+}
+
+func echoLoop(flow stream.Flow) {
+	var buf [vc.DefaultBytesBufferedPerFlow * 20]byte
+	for {
+		n, err := flow.Read(buf[:])
+		if err == io.EOF {
+			return
+		}
+		if err == nil {
+			_, err = flow.Write(buf[:n])
+		}
+		if err != nil {
+			panic(err)
+		}
+	}
+}
+
+func (h *helper) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	if h.otherEnd != nil {
+		if err := h.otherEnd.VC.AcceptFlow(fid); err != nil {
+			panic(verror.DebugString(err))
+		}
+		h.otherEnd.VC.ReleaseCounters(fid, uint32(bytes))
+	}
+}
+
+func (h *helper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+	if h.otherEnd != nil {
+		h.otherEnd.VC.ReleaseCounters(fid, uint32(bytes))
+	}
+}
+
+func (h *helper) NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error) {
+	return h.bq.NewWriter(bqueue.ID(fid), priority, DefaultBytesBufferedPerFlow)
+}
+
+func (h *helper) Close() {
+	h.VC.Close(fmt.Errorf("helper closed"))
+	h.bq.Close()
+	h.mu.Lock()
+	otherEnd := h.otherEnd
+	h.otherEnd = nil
+	h.mu.Unlock()
+	if otherEnd != nil {
+		otherEnd.mu.Lock()
+		otherEnd.otherEnd = nil
+		otherEnd.mu.Unlock()
+		otherEnd.Close()
+	}
+}
+
+type endpoint naming.RoutingID
+
+func (e endpoint) Network() string                          { return "test" }
+func (e endpoint) VersionedString(int) string               { return e.String() }
+func (e endpoint) String() string                           { return naming.RoutingID(e).String() }
+func (e endpoint) Name() string                             { return naming.JoinAddressName(e.String(), "") }
+func (e endpoint) RoutingID() naming.RoutingID              { return naming.RoutingID(e) }
+func (e endpoint) Addr() net.Addr                           { return nil }
+func (e endpoint) ServesMountTable() bool                   { return false }
+func (e endpoint) ServesLeaf() bool                         { return false }
+func (e endpoint) BlessingNames() []string                  { return nil }
+func (e endpoint) RPCVersionRange() version.RPCVersionRange { return version.RPCVersionRange{} }
diff --git a/runtime/internal/rpc/stream/vc/writer.go b/runtime/internal/rpc/stream/vc/writer.go
new file mode 100644
index 0000000..32f51d3
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/writer.go
@@ -0,0 +1,200 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"io"
+	"sync"
+	"sync/atomic"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errWriterClosed     = reg(".errWriterClosed", "attempt to call Write on Flow that has been Closed")
+	errBQueuePutFailed  = reg(".errBqueuePutFailed", "bqueue.Writer.Put failed{:3}")
+	errFailedToGetQuota = reg(".errFailedToGetQuota", "failed to get quota from receive buffers shared by all new flows on a VC{:3}")
+	errCanceled         = reg(".errCanceled", "underlying queues canceled")
+)
+
+// writer implements the io.Writer and SetWriteDeadline interfaces for Flow.
+type writer struct {
+	MTU            int              // Maximum size (in bytes) of each slice Put into Sink.
+	Sink           bqueue.Writer    // Buffer queue writer where data from Write is sent as iobuf.Slice objects.
+	Alloc          *iobuf.Allocator // Allocator for iobuf.Slice objects. GUARDED_BY(mu)
+	SharedCounters *vsync.Semaphore // Semaphore hosting counters shared by all flows over a VC.
+
+	mu         sync.Mutex      // Guards call to Writes
+	wroteOnce  bool            // GUARDED_BY(mu)
+	isClosed   bool            // GUARDED_BY(mu)
+	closeError error           // GUARDED_BY(mu)
+	closed     chan struct{}   // GUARDED_BY(mu)
+	deadline   <-chan struct{} // GUARDED_BY(mu)
+
+	// Total number of bytes filled in by all Write calls on this writer.
+	// Atomic operations are used to manipulate it.
+	totalBytes uint32
+
+	// Accounting for counters borrowed from the shared pool.
+	muSharedCountersBorrowed sync.Mutex
+	sharedCountersBorrowed   int // GUARDED_BY(muSharedCountersBorrowed)
+}
+
+func newWriter(mtu int, sink bqueue.Writer, alloc *iobuf.Allocator, counters *vsync.Semaphore) *writer {
+	return &writer{
+		MTU:            mtu,
+		Sink:           sink,
+		Alloc:          alloc,
+		SharedCounters: counters,
+		closed:         make(chan struct{}),
+		closeError:     verror.New(errWriterClosed, nil),
+	}
+}
+
+// Shutdown closes the writer and discards any queued up write buffers, i.e.,
+// the bqueue.Get call will not see the buffers queued up at this writer.
+// If removeWriter is true the writer will also be removed entirely from the
+// bqueue, otherwise the now empty writer will eventually be returned by
+// bqueue.Get.
+func (w *writer) shutdown(removeWriter bool) {
+	w.Sink.Shutdown(removeWriter)
+	w.finishClose(true)
+}
+
+// Close closes the writer without discarding any queued up write buffers.
+func (w *writer) Close() {
+	w.Sink.Close()
+	w.finishClose(false)
+}
+
+func (w *writer) IsClosed() bool {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	return w.isClosed
+}
+
+func (w *writer) Closed() <-chan struct{} {
+	return w.closed
+}
+
+func (w *writer) finishClose(remoteShutdown bool) {
+	// IsClosed() and Closed() indicate that the writer is closed before
+	// finishClose() completes. This is safe because Alloc and shared counters
+	// are guarded, and are not accessed elsewhere after w.closed is closed.
+	w.mu.Lock()
+	// finishClose() is idempotent, but Go's builtin close is not.
+	if !w.isClosed {
+		w.isClosed = true
+		if remoteShutdown {
+			w.closeError = io.EOF
+		}
+		close(w.closed)
+	}
+
+	w.Alloc.Release()
+	w.mu.Unlock()
+
+	w.muSharedCountersBorrowed.Lock()
+	w.SharedCounters.IncN(uint(w.sharedCountersBorrowed))
+	w.sharedCountersBorrowed = 0
+	w.muSharedCountersBorrowed.Unlock()
+}
+
+// Write implements the Write call for a Flow.
+//
+// Flow control is achieved using receive buffers (aka counters), wherein the
+// receiving end sends out the number of bytes that it is willing to read. To
+// avoid an additional round-trip for the creation of new flows, the very first
+// write of a new flow borrows counters from a shared pool.
+func (w *writer) Write(b []byte) (int, error) {
+	written := 0
+	// net.Conn requires that multiple goroutines be able to invoke methods
+	// simulatenously.
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.isClosed {
+		if w.closeError == io.EOF {
+			return 0, io.EOF
+		}
+		return 0, verror.New(stream.ErrBadState, nil, w.closeError)
+	}
+
+	for len(b) > 0 {
+		n := len(b)
+		if n > w.MTU {
+			n = w.MTU
+		}
+		if !w.wroteOnce && w.SharedCounters != nil {
+			w.wroteOnce = true
+			if n > MaxSharedBytes {
+				n = MaxSharedBytes
+			}
+			if err := w.SharedCounters.DecN(uint(n), w.deadline); err != nil {
+				if err == vsync.ErrCanceled {
+					return 0, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
+				}
+				return 0, verror.New(stream.ErrNetwork, nil, verror.New(errFailedToGetQuota, nil, err))
+			}
+			w.muSharedCountersBorrowed.Lock()
+			w.sharedCountersBorrowed = n
+			w.muSharedCountersBorrowed.Unlock()
+			w.Sink.Release(n)
+		}
+		slice := w.Alloc.Copy(b[:n])
+		if err := w.Sink.Put(slice, w.deadline); err != nil {
+			slice.Release()
+			atomic.AddUint32(&w.totalBytes, uint32(written))
+			switch err {
+			case bqueue.ErrCancelled, vsync.ErrCanceled:
+				return written, stream.NewNetError(verror.New(stream.ErrNetwork, nil, verror.New(errCanceled, nil)), true, false)
+			case bqueue.ErrWriterIsClosed:
+				return written, verror.New(stream.ErrBadState, nil, verror.New(errWriterClosed, nil))
+			default:
+				return written, verror.New(stream.ErrNetwork, nil, verror.New(errBQueuePutFailed, nil, err))
+			}
+		}
+		written += n
+		b = b[n:]
+	}
+	atomic.AddUint32(&w.totalBytes, uint32(written))
+	return written, nil
+}
+
+func (w *writer) SetDeadline(deadline <-chan struct{}) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	w.deadline = deadline
+}
+
+// Release allows the next 'bytes' of data to be removed from the buffer queue
+// writer and passed to bqueue.Get.
+func (w *writer) Release(bytes int) {
+	w.muSharedCountersBorrowed.Lock()
+	switch {
+	case w.sharedCountersBorrowed == 0:
+		w.Sink.Release(bytes)
+	case w.sharedCountersBorrowed >= bytes:
+		w.SharedCounters.IncN(uint(bytes))
+		w.sharedCountersBorrowed -= bytes
+	default:
+		w.SharedCounters.IncN(uint(w.sharedCountersBorrowed))
+		w.Sink.Release(bytes - w.sharedCountersBorrowed)
+		w.sharedCountersBorrowed = 0
+	}
+	w.muSharedCountersBorrowed.Unlock()
+}
+
+func (w *writer) BytesWritten() uint32 {
+	return atomic.LoadUint32(&w.totalBytes)
+}
diff --git a/runtime/internal/rpc/stream/vc/writer_test.go b/runtime/internal/rpc/stream/vc/writer_test.go
new file mode 100644
index 0000000..baaebfd
--- /dev/null
+++ b/runtime/internal/rpc/stream/vc/writer_test.go
@@ -0,0 +1,223 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vc
+
+import (
+	"bytes"
+	"io"
+	"net"
+	"reflect"
+	"testing"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+)
+
+// TestWrite is a very basic, easy to follow, but not very thorough test of the
+// writer.  More thorough testing of flows (and implicitly the writer) is in
+// vc_test.go.
+func TestWrite(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+
+	if n, err := w.Write([]byte("abcd")); n != 4 || err != nil {
+		t.Errorf("Got (%d, %v) want (4, nil)", n, err)
+	}
+
+	// Should have used up 4 shared counters
+	if err := shared.TryDecN(1); err != sync.ErrTryAgain {
+		t.Errorf("Got %v want %v", err, sync.ErrTryAgain)
+	}
+
+	// Further Writes will block until some space has been released.
+	w.Release(10)
+	if n, err := w.Write([]byte("efghij")); n != 6 || err != nil {
+		t.Errorf("Got (%d, %v) want (5, nil)", n, err)
+	}
+	// And the release should have returned to the shared counters set
+	if err := shared.TryDecN(4); err != nil {
+		t.Errorf("Got %v want %v", err, nil)
+	}
+
+	// Further writes will block since all 10 bytes (provided to NewWriter)
+	// have been exhausted and Get hasn't been called on bq yet.
+	deadline := make(chan struct{}, 0)
+	w.SetDeadline(deadline)
+	close(deadline)
+
+	w.SetDeadline(deadline)
+	if n, err := w.Write([]byte("k")); n != 0 || !isTimeoutError(err) {
+		t.Errorf("Got (%d, %v) want (0, timeout error)", n, err)
+	}
+
+	w.Close()
+	if w.BytesWritten() != 10 {
+		t.Errorf("Got %d want %d", w.BytesWritten(), 10)
+	}
+
+	_, bufs, err := bq.Get(nil)
+	var read bytes.Buffer
+	for _, b := range bufs {
+		read.Write(b.Contents)
+		b.Release()
+	}
+	if g, w := read.String(), "abcdefghij"; g != w {
+		t.Errorf("Got %q want %q", g, w)
+	}
+}
+
+func TestCloseBeforeWrite(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+	w.Close()
+
+	if n, err := w.Write([]byte{1, 2}); n != 0 || verror.ErrorID(err) != stream.ErrBadState.ID {
+		t.Errorf("Got (%v, %v) want (0, %v)", n, err, stream.ErrBadState)
+	}
+}
+
+func TestShutdownBeforeWrite(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+	w.shutdown(true)
+
+	if n, err := w.Write([]byte{1, 2}); n != 0 || err != io.EOF {
+		t.Errorf("Got (%v, %v) want (0, %v)", n, err, io.EOF)
+	}
+}
+
+func TestCloseDoesNotDiscardPendingWrites(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(2)
+
+	w := newTestWriter(bw, shared)
+	data := []byte{1, 2}
+	if n, err := w.Write(data); n != len(data) || err != nil {
+		t.Fatalf("Got (%d, %v) want (%d, nil)", n, err, len(data))
+	}
+	w.Close()
+
+	gbw, bufs, err := bq.Get(nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if gbw != bw {
+		t.Fatalf("Got %v want %v", gbw, bw)
+	}
+	if len(bufs) != 1 {
+		t.Fatalf("Got %d bufs, want 1", len(bufs))
+	}
+	if !reflect.DeepEqual(bufs[0].Contents, data) {
+		t.Fatalf("Got %v want %v", bufs[0].Contents, data)
+	}
+	if !gbw.IsDrained() {
+		t.Fatal("Expected bqueue.Writer to be drained")
+	}
+}
+
+func TestWriterCloseIsIdempotent(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(1)
+	w := newTestWriter(bw, shared)
+	if n, err := w.Write([]byte{1}); n != 1 || err != nil {
+		t.Fatalf("Got (%d, %v) want (1, nil)", n, err)
+	}
+	// Should have used up the shared counter.
+	if err := shared.TryDec(); err != sync.ErrTryAgain {
+		t.Fatalf("Got %v want %v", err, sync.ErrTryAgain)
+	}
+	w.Close()
+	// The shared counter should have been returned
+	if err := shared.TryDec(); err != nil {
+		t.Fatalf("Got %v want nil", err)
+	}
+	// Closing again shouldn't affect the shared counters
+	w.Close()
+	if err := shared.TryDec(); err != sync.ErrTryAgain {
+		t.Fatalf("Got %v want %v", err, sync.ErrTryAgain)
+	}
+}
+
+func TestClosedChannel(t *testing.T) {
+	bq := drrqueue.New(128)
+	defer bq.Close()
+
+	bw, err := bq.NewWriter(0, 0, 10)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	shared := sync.NewSemaphore()
+	shared.IncN(4)
+
+	w := newTestWriter(bw, shared)
+	go w.Close()
+	<-w.Closed()
+
+	if n, err := w.Write([]byte{1, 2}); n != 0 || verror.ErrorID(err) != stream.ErrBadState.ID {
+		t.Errorf("Got (%v, %v) want (0, %v)", n, err, stream.ErrBadState.ID)
+	}
+}
+
+func newTestWriter(bqw bqueue.Writer, shared *sync.Semaphore) *writer {
+	alloc := iobuf.NewAllocator(iobuf.NewPool(0), 0)
+	return newWriter(16, bqw, alloc, shared)
+}
+
+func isTimeoutError(err error) bool {
+	neterr, ok := err.(net.Error)
+	return ok && neterr.Timeout()
+}
diff --git a/runtime/internal/rpc/stream/vif/auth.go b/runtime/internal/rpc/stream/vif/auth.go
new file mode 100644
index 0000000..c0590f0
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/auth.go
@@ -0,0 +1,245 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"crypto/rand"
+	"io"
+
+	"golang.org/x/crypto/nacl/box"
+
+	rpcversion "v.io/v23/rpc/version"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/version"
+)
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errAuthFailed                      = reg(".errAuthFailed", "authentication failed{:3}")
+	errUnsupportedEncryptVersion       = reg(".errUnsupportedEncryptVersion", "unsupported encryption version {4} < {5}")
+	errNaclBoxVersionNegotiationFailed = reg(".errNaclBoxVersionNegotiationFailed", "nacl box encryption version negotiation failed")
+	errVersionNegotiationFailed        = reg(".errVersionNegotiationFailed", "encryption version negotiation failed")
+	nullCipher                         crypto.NullControlCipher
+)
+
+// privateData includes secret data we need for encryption.
+type privateData struct {
+	naclBoxPrivateKey crypto.BoxKey
+}
+
+// AuthenticateAsClient sends a Setup message if possible.  If so, it chooses
+// encryption based on the max supported version.
+//
+// The sequence is initiated by the client.
+//
+//    - The client sends a Setup message to the server, containing the client's
+//      supported versions, and the client's crypto options.  The Setup message
+//      is sent in the clear.
+//
+//    - When the server receives the Setup message, it calls
+//      AuthenticateAsServer, which constructs a response Setup containing
+//      the server's version range, and any crypto options.
+//
+//    - The client and server use the public/private key pairs
+//      generated for the Setup messages to create an encrypted stream
+//      of SetupStream messages for the remainder of the authentication
+//      setup.  The encyrption uses NewControlCipherRPC6, which is based
+//      on code.google.com/p/go.crypto/nacl/box.
+//
+//    - Once the encrypted SetupStream channel is setup, the client and
+//      server authenticate using the vc.AuthenticateAs{Client,Server} protocol.
+//
+// Note that the Setup messages are sent in the clear, so they are subject to
+// modification by a man-in-the-middle, which can currently force a downgrade by
+// modifying the acceptable version ranges downward.  This can be addressed by
+// including a hash of the Setup message in the encrypted stream.  It is
+// likely that this will be addressed in subsequent protocol versions.
+func AuthenticateAsClient(writer io.Writer, reader *iobuf.Reader, versions *version.Range, params security.CallParams, auth *vc.ServerAuthorizer) (crypto.ControlCipher, error) {
+	if versions == nil {
+		versions = version.SupportedRange
+	}
+
+	// Send the client's public data.
+	pvt, pub, err := makeSetup(versions, params.LocalPrincipal != nil)
+	if err != nil {
+		return nil, verror.New(stream.ErrSecurity, nil, err)
+	}
+
+	errch := make(chan error, 1)
+	go func() {
+		errch <- message.WriteTo(writer, pub, nullCipher)
+	}()
+
+	pmsg, err := message.ReadFrom(reader, nullCipher)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	ppub, ok := pmsg.(*message.Setup)
+	if !ok {
+		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
+	}
+
+	// Wait for the write to succeed.
+	if err := <-errch; err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+
+	// Choose the max version in the intersection.
+	vrange, err := pub.Versions.Intersect(&ppub.Versions)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	v := vrange.Max
+
+	if params.LocalPrincipal == nil {
+		return nullCipher, nil
+	}
+
+	// Perform the authentication.
+	return authenticateAsClient(writer, reader, params, auth, pvt, pub, ppub, v)
+}
+
+func authenticateAsClient(writer io.Writer, reader *iobuf.Reader, params security.CallParams, auth *vc.ServerAuthorizer,
+	pvt *privateData, pub, ppub *message.Setup, version rpcversion.RPCVersion) (crypto.ControlCipher, error) {
+	pbox := ppub.NaclBox()
+	if pbox == nil {
+		return nil, verror.New(errNaclBoxVersionNegotiationFailed, nil)
+	}
+	c := crypto.NewControlCipherRPC6(&pbox.PublicKey, &pvt.naclBoxPrivateKey, false)
+	sconn := newSetupConn(writer, reader, c)
+	// TODO(jyh): act upon the authentication results.
+	_, _, _, err := vc.AuthenticateAsClient(sconn, crypto.NewNullCrypter(), params, auth, version)
+	if err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+// AuthenticateAsServer handles a Setup message, choosing authentication
+// based on the max common version.
+//
+// See AuthenticateAsClient for a description of the negotiation.
+func AuthenticateAsServer(writer io.Writer, reader *iobuf.Reader, versions *version.Range, principal security.Principal, lBlessings security.Blessings,
+	dc vc.DischargeClient) (crypto.ControlCipher, error) {
+	var err error
+	if versions == nil {
+		versions = version.SupportedRange
+	}
+
+	// Send server's public data.
+	pvt, pub, err := makeSetup(versions, principal != nil)
+	if err != nil {
+		return nil, err
+	}
+
+	errch := make(chan error, 1)
+	readch := make(chan struct{})
+	go func() {
+		// TODO(mattr,ribrdb): In the case of the agent, which is
+		// currently the only user of insecure connections, we need to
+		// wait for the client to initiate the communication.  The agent
+		// sends an extra first byte to clients, which clients read before
+		// dialing their side of the vif.  If we send this message before
+		// the magic byte has been sent the client will use the first
+		// byte of this message instead rendering the remainder of the
+		// stream uninterpretable.
+		if principal == nil {
+			<-readch
+		}
+		err := message.WriteTo(writer, pub, nullCipher)
+		errch <- err
+	}()
+
+	// Read client's public data.
+	pmsg, err := message.ReadFrom(reader, nullCipher)
+	close(readch) // Note: we need to close this whether we get an error or not.
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	ppub, ok := pmsg.(*message.Setup)
+	if !ok {
+		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
+	}
+
+	// Wait for the write to succeed.
+	if err := <-errch; err != nil {
+		return nil, err
+	}
+
+	// Choose the max version in the intersection.
+	vrange, err := versions.Intersect(&ppub.Versions)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, err)
+	}
+	v := vrange.Max
+
+	if principal == nil {
+		return nullCipher, nil
+	}
+
+	// Perform authentication.
+	return authenticateAsServerRPC6(writer, reader, principal, lBlessings, dc, pvt, pub, ppub, v)
+}
+
+func authenticateAsServerRPC6(writer io.Writer, reader *iobuf.Reader, principal security.Principal, lBlessings security.Blessings, dc vc.DischargeClient,
+	pvt *privateData, pub, ppub *message.Setup, version rpcversion.RPCVersion) (crypto.ControlCipher, error) {
+	box := ppub.NaclBox()
+	if box == nil {
+		return nil, verror.New(errNaclBoxVersionNegotiationFailed, nil)
+	}
+	c := crypto.NewControlCipherRPC6(&box.PublicKey, &pvt.naclBoxPrivateKey, true)
+	sconn := newSetupConn(writer, reader, c)
+	// TODO(jyh): act upon authentication results.
+	_, _, err := vc.AuthenticateAsServer(sconn, principal, lBlessings, dc, crypto.NewNullCrypter(), version)
+	if err != nil {
+		return nil, verror.New(errAuthFailed, nil, err)
+	}
+	return c, nil
+}
+
+// getDischargeClient returns the dischargeClient needed to fetch server discharges for this call.
+// TODO(suharshs): Perhaps we should pass dischargeClient explicitly?
+func getDischargeClient(lopts []stream.ListenerOpt) vc.DischargeClient {
+	for _, o := range lopts {
+		switch v := o.(type) {
+		case vc.DischargeClient:
+			return v
+		}
+	}
+	return nil
+}
+
+// makeSetup constructs the options that this process can support.
+func makeSetup(versions *version.Range, secure bool) (*privateData, *message.Setup, error) {
+	var options []message.SetupOption
+	var pvt *privateData
+	if secure {
+		pubKey, pvtKey, err := box.GenerateKey(rand.Reader)
+		if err != nil {
+			return nil, nil, err
+		}
+		options = []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}}
+		pvt = &privateData{
+			naclBoxPrivateKey: *pvtKey,
+		}
+	}
+
+	pub := &message.Setup{
+		Versions: *versions,
+		Options:  options,
+	}
+
+	return pvt, pub, nil
+}
diff --git a/runtime/internal/rpc/stream/vif/doc.go b/runtime/internal/rpc/stream/vif/doc.go
new file mode 100644
index 0000000..9f00db4
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vif implements a virtual network interface that wraps over a
+// net.Conn and provides the ability to Dial and Listen for virtual circuits
+// (v.io/x/ref/runtime/internal/rpc/stream.VC)
+package vif
diff --git a/runtime/internal/rpc/stream/vif/faketimer.go b/runtime/internal/rpc/stream/vif/faketimer.go
new file mode 100644
index 0000000..914c4e2
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/faketimer.go
@@ -0,0 +1,90 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"sync"
+	"time"
+)
+
+// Since an idle timer with a short timeout can expire before establishing a VC,
+// we provide a fake timer to reduce dependence on real time in unittests.
+type fakeTimer struct {
+	mu          sync.Mutex
+	timeout     time.Duration
+	timeoutFunc func()
+	timer       timer
+	stopped     bool
+}
+
+func newFakeTimer(d time.Duration, f func()) *fakeTimer {
+	return &fakeTimer{
+		timeout:     d,
+		timeoutFunc: f,
+		timer:       noopTimer{},
+	}
+}
+
+func (t *fakeTimer) Stop() bool {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.stopped = true
+	return t.timer.Stop()
+}
+
+func (t *fakeTimer) Reset(d time.Duration) bool {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.timeout = d
+	t.stopped = false
+	return t.timer.Reset(t.timeout)
+}
+
+func (t *fakeTimer) run(release <-chan struct{}, wg *sync.WaitGroup) {
+	defer wg.Done()
+	<-release // Wait until notified to run.
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.timeout > 0 {
+		t.timer = newTimer(t.timeout, t.timeoutFunc)
+	}
+	if t.stopped {
+		t.timer.Stop()
+	}
+}
+
+// SetFakeTimers causes the idle timers to use a fake timer instead of one
+// based on real time. The timers will be triggered when the returned function
+// is invoked. (at which point the timer setup will be restored to what it was
+// before calling this function)
+//
+// Usage:
+//   triggerTimers := SetFakeTimers()
+//   ...
+//   triggerTimers()
+//
+// This function cannot be called concurrently.
+func SetFakeTimers() func() {
+	backup := newTimer
+
+	var mu sync.Mutex
+	var wg sync.WaitGroup
+	release := make(chan struct{})
+	newTimer = func(d time.Duration, f func()) timer {
+		mu.Lock()
+		defer mu.Unlock()
+		wg.Add(1)
+		t := newFakeTimer(d, f)
+		go t.run(release, &wg)
+		return t
+	}
+	return func() {
+		mu.Lock()
+		defer mu.Unlock()
+		newTimer = backup
+		close(release)
+		wg.Wait()
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/idletimer.go b/runtime/internal/rpc/stream/vif/idletimer.go
new file mode 100644
index 0000000..a9910c7
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/idletimer.go
@@ -0,0 +1,139 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"sync"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+// idleTimerMap keeps track of all the flows of each VC and then calls the notify
+// function in its own goroutine if there is no flow in a VC for some duration.
+type idleTimerMap struct {
+	mu         sync.Mutex
+	m          map[id.VC]*idleTimer
+	notifyFunc func(id.VC)
+	stopped    bool
+}
+
+type idleTimer struct {
+	set     map[id.Flow]struct{}
+	timeout time.Duration
+	timer   timer
+	stopped bool
+}
+
+type timer interface {
+	// Stop prevents the Timer from firing.
+	Stop() bool
+	// Reset changes the timer to expire after duration d.
+	Reset(d time.Duration) bool
+}
+
+// newIdleTimerMap returns a new idle timer map.
+func newIdleTimerMap(f func(id.VC)) *idleTimerMap {
+	return &idleTimerMap{
+		m:          make(map[id.VC]*idleTimer),
+		notifyFunc: f,
+	}
+}
+
+// Stop stops idle timers for all VC.
+func (m *idleTimerMap) Stop() {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.stopped {
+		return
+	}
+	for _, t := range m.m {
+		if !t.stopped {
+			t.timer.Stop()
+			t.stopped = true
+		}
+	}
+	m.stopped = true
+}
+
+// Insert starts the idle timer for the given VC. If there is no active flows
+// in the VC for the duration d, the notify function will be called in its own
+// goroutine. If d is zero, the idle timer is disabled.
+func (m *idleTimerMap) Insert(vci id.VC, d time.Duration) bool {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.stopped {
+		return false
+	}
+	if _, exists := m.m[vci]; exists {
+		return false
+	}
+	t := &idleTimer{
+		set:     make(map[id.Flow]struct{}),
+		timeout: d,
+	}
+	if t.timeout > 0 {
+		t.timer = newTimer(t.timeout, func() { m.notifyFunc(vci) })
+	} else {
+		t.timer = noopTimer{}
+	}
+	m.m[vci] = t
+	return true
+}
+
+// Delete deletes the idle timer for the given VC.
+func (m *idleTimerMap) Delete(vci id.VC) {
+	m.mu.Lock()
+	if t, exists := m.m[vci]; exists {
+		if !t.stopped {
+			t.timer.Stop()
+		}
+		delete(m.m, vci)
+	}
+	m.mu.Unlock()
+}
+
+// InsertFlow inserts the given flow to the given VC. All system flows will be ignored.
+func (m *idleTimerMap) InsertFlow(vci id.VC, fid id.Flow) {
+	if fid < vc.NumReservedFlows {
+		return
+	}
+	m.mu.Lock()
+	if t, exists := m.m[vci]; exists {
+		t.set[fid] = struct{}{}
+		if !t.stopped {
+			t.timer.Stop()
+			t.stopped = true
+		}
+	}
+	m.mu.Unlock()
+}
+
+// DeleteFlow deletes the given flow from the VC vci.
+func (m *idleTimerMap) DeleteFlow(vci id.VC, fid id.Flow) {
+	m.mu.Lock()
+	if t, exists := m.m[vci]; exists {
+		delete(t.set, fid)
+		if len(t.set) == 0 && t.stopped && !m.stopped {
+			t.timer.Reset(t.timeout)
+			t.stopped = false
+		}
+	}
+	m.mu.Unlock()
+}
+
+// To avoid dependence on real times in unittests, the factory function for timers
+// can be overridden (with SetFakeTimers). This factory function should only be
+// overridden for unittests.
+var newTimer = defaultTimerFactory
+
+func defaultTimerFactory(d time.Duration, f func()) timer { return time.AfterFunc(d, f) }
+
+// A noop timer.
+type noopTimer struct{}
+
+func (t noopTimer) Stop() bool                 { return false }
+func (t noopTimer) Reset(d time.Duration) bool { return false }
diff --git a/runtime/internal/rpc/stream/vif/idletimer_test.go b/runtime/internal/rpc/stream/vif/idletimer_test.go
new file mode 100644
index 0000000..d5bd6f3
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/idletimer_test.go
@@ -0,0 +1,130 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"testing"
+	"time"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+func TestIdleTimer(t *testing.T) {
+	const (
+		idleTime = 5 * time.Millisecond
+		waitTime = idleTime * 2
+
+		vc1 id.VC = 1
+		vc2 id.VC = 2
+
+		flow1        id.Flow = vc.NumReservedFlows
+		flow2        id.Flow = vc.NumReservedFlows + 1
+		flowReserved id.Flow = vc.NumReservedFlows - 1
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vci id.VC) { notify <- vci }
+
+	m := newIdleTimerMap(notifyFunc)
+
+	// An empty map. Should not be notified.
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	m.Insert(vc1, idleTime)
+
+	// A new empty VC. Should be notified.
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+
+	// A VC with one flow. Should not be notified.
+	m.InsertFlow(vc1, flow1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Try to delete non-existent flow. Should not be notified.
+	m.DeleteFlow(vc1, flow2)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Delete the flow. Should be notified.
+	m.DeleteFlow(vc1, flow1)
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	// Try to delete the deleted flow again. Should not be notified.
+	m.DeleteFlow(vc1, flow1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+
+	// Delete an empty VC. Should not be notified.
+	m.Delete(vc1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	m.Insert(vc1, idleTime)
+
+	// A VC with two flows.
+	m.InsertFlow(vc1, flow1)
+	m.InsertFlow(vc1, flow2)
+
+	// Delete the first flow twice. Should not be notified.
+	m.DeleteFlow(vc1, flow1)
+	m.DeleteFlow(vc1, flow1)
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Delete the second flow. Should be notified.
+	m.DeleteFlow(vc1, flow2)
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+
+	// Insert a reserved flow. Should be notified.
+	m.InsertFlow(vc1, flowReserved)
+	if err := WaitForNotifications(notify, vc1); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Insert(vc1, idleTime)
+	m.Insert(vc2, idleTime)
+
+	// Multiple VCs. Should be notified for each.
+	if err := WaitForNotifications(notify, vc1, vc2); err != nil {
+		t.Error(err)
+	}
+
+	m.Delete(vc1)
+	m.Delete(vc2)
+	m.Insert(vc1, idleTime)
+
+	// Stop the timer. Should not be notified.
+	m.Stop()
+	if m.Insert(vc1, idleTime) {
+		t.Fatal("timer has been stopped, but can insert a vc")
+	}
+	if err := WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/set.go b/runtime/internal/rpc/stream/vif/set.go
new file mode 100644
index 0000000..3032dfc
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/set.go
@@ -0,0 +1,146 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"math/rand"
+	"net"
+	"runtime"
+	"sync"
+
+	"v.io/v23/rpc"
+)
+
+// Set implements a set of VIFs keyed by (network, address) of the underlying
+// connection.  Multiple goroutines can invoke methods on the Set
+// simultaneously.
+type Set struct {
+	mu      sync.RWMutex
+	set     map[string][]*VIF // GUARDED_BY(mu)
+	started map[string]bool   // GUARDED_BY(mu)
+	keys    map[*VIF]string   // GUARDED_BY(mu)
+	cond    *sync.Cond
+}
+
+// NewSet returns a new Set of VIFs.
+func NewSet() *Set {
+	s := &Set{
+		set:     make(map[string][]*VIF),
+		started: make(map[string]bool),
+		keys:    make(map[*VIF]string),
+	}
+	s.cond = sync.NewCond(&s.mu)
+	return s
+}
+
+// BlockingFind returns a VIF where the remote end of the underlying network connection
+// is identified by the provided (network, address). Returns nil if there is no
+// such VIF.
+//
+// The caller is required to call the returned unblock function, to avoid deadlock.
+// Until the returned function is called, all new BlockingFind calls for this
+// network and address will block.
+func (s *Set) BlockingFind(network, address string) (*VIF, func()) {
+	if isNonDistinctConn(network, address) {
+		return nil, func() {}
+	}
+
+	k := key(network, address)
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	for s.started[k] {
+		s.cond.Wait()
+	}
+
+	_, _, _, p := rpc.RegisteredProtocol(network)
+	for _, n := range p {
+		if vifs := s.set[key(n, address)]; len(vifs) > 0 {
+			return vifs[rand.Intn(len(vifs))], func() {}
+		}
+	}
+
+	s.started[k] = true
+	return nil, func() { s.unblock(network, address) }
+}
+
+// unblock marks the status of the network, address as no longer started, and
+// broadcasts waiting threads.
+func (s *Set) unblock(network, address string) {
+	s.mu.Lock()
+	delete(s.started, key(network, address))
+	s.cond.Broadcast()
+	s.mu.Unlock()
+}
+
+// Insert adds a VIF to the set.
+func (s *Set) Insert(vif *VIF, network, address string) {
+	k := key(network, address)
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.keys[vif] = k
+	vifs := s.set[k]
+	for _, v := range vifs {
+		if v == vif {
+			return
+		}
+	}
+	s.set[k] = append(vifs, vif)
+}
+
+// Delete removes a VIF from the set.
+func (s *Set) Delete(vif *VIF) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	k := s.keys[vif]
+	vifs := s.set[k]
+	for i, v := range vifs {
+		if v == vif {
+			if len(vifs) == 1 {
+				delete(s.set, k)
+			} else {
+				s.set[k] = append(vifs[:i], vifs[i+1:]...)
+			}
+			delete(s.keys, vif)
+			return
+		}
+	}
+}
+
+// List returns the elements in the set as a slice.
+func (s *Set) List() []*VIF {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	l := make([]*VIF, 0, len(s.set))
+	for _, vifs := range s.set {
+		l = append(l, vifs...)
+	}
+	return l
+}
+
+func key(network, address string) string {
+	if network == "tcp" || network == "ws" {
+		host, _, _ := net.SplitHostPort(address)
+		switch ip := net.ParseIP(host); {
+		case ip == nil:
+			// This may happen when address is a hostname. But we do not care
+			// about it, since vif cannot be found with a hostname anyway.
+		case ip.To4() != nil:
+			network += "4"
+		default:
+			network += "6"
+		}
+	}
+	return network + ":" + address
+}
+
+// Some network connections (like those created with net.Pipe or Unix sockets)
+// do not end up with distinct net.Addrs on distinct net.Conns.
+func isNonDistinctConn(network, address string) bool {
+	return len(address) == 0 ||
+		(network == "pipe" && address == "pipe") ||
+		(runtime.GOOS == "linux" && network == "unix" && address == "@")
+}
diff --git a/runtime/internal/rpc/stream/vif/set_test.go b/runtime/internal/rpc/stream/vif/set_test.go
new file mode 100644
index 0000000..08b8ea8
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/set_test.go
@@ -0,0 +1,340 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif_test
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+	"path"
+	"testing"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc"
+
+	_ "v.io/x/ref/runtime/factories/generic"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	"v.io/x/ref/test/testutil"
+)
+
+var supportsIPv6 bool
+
+func init() {
+	simpleResolver := func(network, address string) (string, string, error) {
+		return network, address, nil
+	}
+	rpc.RegisterProtocol("unix", net.DialTimeout, simpleResolver, net.Listen)
+
+	// Check whether the platform supports IPv6.
+	ln, err := net.Listen("tcp6", "[::1]:0")
+	defer ln.Close()
+	if err == nil {
+		supportsIPv6 = true
+	}
+}
+
+func newConn(network, address string) (net.Conn, net.Conn, error) {
+	dfunc, _, lfunc, _ := rpc.RegisteredProtocol(network)
+	ln, err := lfunc(network, address)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer ln.Close()
+
+	done := make(chan net.Conn)
+	go func() {
+		conn, err := ln.Accept()
+		if err != nil {
+			panic(err)
+		}
+		conn.Read(make([]byte, 1)) // Read a dummy byte.
+		done <- conn
+	}()
+
+	conn, err := dfunc(ln.Addr().Network(), ln.Addr().String(), 1*time.Second)
+	if err != nil {
+		return nil, nil, err
+	}
+	// Write a dummy byte since wsh listener waits for the magic bytes for ws.
+	conn.Write([]byte("."))
+	return conn, <-done, nil
+}
+
+func newVIF(c, s net.Conn) (*vif.VIF, *vif.VIF, error) {
+	done := make(chan *vif.VIF)
+	go func() {
+		principal := testutil.NewPrincipal("accepted")
+		blessings := principal.BlessingStore().Default()
+		vf, err := vif.InternalNewAcceptedVIF(s, naming.FixedRoutingID(0x5), principal, blessings, nil, nil)
+		if err != nil {
+			panic(err)
+		}
+		done <- vf
+	}()
+
+	vf, err := vif.InternalNewDialedVIF(c, naming.FixedRoutingID(0xc), testutil.NewPrincipal("dialed"), nil, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	return vf, <-done, nil
+}
+
+func diff(a, b []string) []string {
+	m := make(map[string]struct{})
+	for _, x := range b {
+		m[x] = struct{}{}
+	}
+	d := make([]string, 0, len(a))
+	for _, x := range a {
+		if _, ok := m[x]; !ok {
+			d = append(d, x)
+		}
+	}
+	return d
+}
+
+func find(set *vif.Set, n, a string) *vif.VIF {
+	found, unblock := set.BlockingFind(n, a)
+	unblock()
+	return found
+}
+
+func TestSetBasic(t *testing.T) {
+	sockdir, err := ioutil.TempDir("", "TestSetBasic")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(sockdir)
+
+	all := rpc.RegisteredProtocols()
+	unknown := naming.UnknownProtocol
+	tests := []struct {
+		network, address string
+		compatibles      []string
+	}{
+		{"tcp", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"tcp4", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"tcp", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"tcp6", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"ws", "127.0.0.1:0", []string{"ws", "ws4", "wsh", "wsh4", unknown}},
+		{"ws4", "127.0.0.1:0", []string{"ws", "ws4", "wsh", "wsh4", unknown}},
+		{"ws", "[::1]:0", []string{"ws", "ws6", "wsh", "wsh6", unknown}},
+		{"ws6", "[::1]:0", []string{"ws", "ws6", "wsh", "wsh6", unknown}},
+		// wsh dial always uses tcp.
+		{"wsh", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"wsh4", "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{"wsh", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"wsh6", "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{unknown, "127.0.0.1:0", []string{"tcp", "tcp4", "wsh", "wsh4", unknown}},
+		{unknown, "[::1]:0", []string{"tcp", "tcp6", "wsh", "wsh6", unknown}},
+		{"unix", path.Join(sockdir, "socket"), []string{"unix"}},
+	}
+
+	set := vif.NewSet()
+	for _, test := range tests {
+		if test.address == "[::1]:0" && !supportsIPv6 {
+			continue
+		}
+
+		name := fmt.Sprintf("(%q, %q)", test.network, test.address)
+
+		c, s, err := newConn(test.network, test.address)
+		if err != nil {
+			t.Fatal(err)
+		}
+		vf, _, err := newVIF(c, s)
+		if err != nil {
+			t.Fatal(err)
+		}
+		a := c.RemoteAddr()
+
+		set.Insert(vf, a.Network(), a.String())
+		for _, n := range test.compatibles {
+			if found := find(set, n, a.String()); found == nil {
+				t.Fatalf("%s: Got nil, but want [%v] on find(%q, %q))", name, vf, n, a)
+			}
+		}
+
+		for _, n := range diff(all, test.compatibles) {
+			if v := find(set, n, a.String()); v != nil {
+				t.Fatalf("%s: Got [%v], but want nil on find(%q, %q))", name, v, n, a)
+			}
+		}
+
+		set.Delete(vf)
+		for _, n := range all {
+			if v := find(set, n, a.String()); v != nil {
+				t.Fatalf("%s: Got [%v], but want nil on find(%q, %q))", name, v, n, a)
+			}
+		}
+	}
+}
+
+func TestSetWithPipes(t *testing.T) {
+	c1, s1 := net.Pipe()
+	c2, s2 := net.Pipe()
+	a1 := c1.RemoteAddr()
+	a2 := c2.RemoteAddr()
+	if a1.Network() != a2.Network() || a1.String() != a2.String() {
+		t.Fatalf("This test was intended for distinct connections that have duplicate RemoteAddrs. "+
+			"That does not seem to be the case with (%q, %q) and (%q, %q)",
+			a1.Network(), a1, a2.Network(), a2)
+	}
+
+	vf1, _, err := newVIF(c1, s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	vf2, _, err := newVIF(c2, s2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	set := vif.NewSet()
+	set.Insert(vf1, a1.Network(), a1.String())
+	if v := find(set, a1.Network(), a1.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a1.Network(), a1)
+	}
+	if l := set.List(); len(l) != 1 || l[0] != vf1 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Insert(vf2, a2.Network(), a2.String())
+	if v := find(set, a2.Network(), a2.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a2.Network(), a2)
+	}
+	if l := set.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Delete(vf1)
+	if l := set.List(); len(l) != 1 || l[0] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+	set.Delete(vf2)
+	if l := set.List(); len(l) != 0 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+}
+
+func TestSetWithUnixSocket(t *testing.T) {
+	dir, err := ioutil.TempDir("", "TestSetWithUnixSocket")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(dir)
+
+	c1, s1, err := newConn("unix", path.Join(dir, "socket1"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	c2, s2, err := newConn("unix", path.Join(dir, "socket2"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// The client side address is always unix:@ regardless of socket name.
+	a1 := s1.RemoteAddr()
+	a2 := s2.RemoteAddr()
+	if a1.Network() != a2.Network() || a1.String() != a2.String() {
+		t.Fatalf("This test was intended for distinct connections that have duplicate RemoteAddrs. "+
+			"That does not seem to be the case with (%q, %q) and (%q, %q)",
+			a1.Network(), a1, a2.Network(), a2)
+	}
+
+	_, vf1, err := newVIF(c1, s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, vf2, err := newVIF(c2, s2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	set := vif.NewSet()
+	set.Insert(vf1, a1.Network(), a1.String())
+	if v := find(set, a1.Network(), a1.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a1.Network(), a1)
+	}
+	if l := set.List(); len(l) != 1 || l[0] != vf1 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Insert(vf2, a2.Network(), a2.String())
+	if v := find(set, a2.Network(), a2.String()); v != nil {
+		t.Fatalf("Got [%v], but want nil on find(%q, %q))", v, a2.Network(), a2)
+	}
+	if l := set.List(); len(l) != 2 || l[0] != vf1 || l[1] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set.Delete(vf1)
+	if l := set.List(); len(l) != 1 || l[0] != vf2 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+	set.Delete(vf2)
+	if l := set.List(); len(l) != 0 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+}
+
+func TestSetInsertDelete(t *testing.T) {
+	c1, s1 := net.Pipe()
+	vf1, _, err := newVIF(c1, s1)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	set1 := vif.NewSet()
+
+	n1, a1 := c1.RemoteAddr().Network(), c1.RemoteAddr().String()
+	set1.Insert(vf1, n1, a1)
+	if l := set1.List(); len(l) != 1 || l[0] != vf1 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+
+	set1.Delete(vf1)
+	if l := set1.List(); len(l) != 0 {
+		t.Errorf("Unexpected list of VIFs: %v", l)
+	}
+}
+
+func TestBlockingFind(t *testing.T) {
+	network, address := "tcp", "127.0.0.1:1234"
+	set := vif.NewSet()
+
+	_, unblock := set.BlockingFind(network, address)
+
+	ch := make(chan *vif.VIF, 1)
+
+	// set.BlockingFind should block until set.Unblock is called with the corresponding VIF,
+	// since set.BlockingFind was called earlier.
+	go func(ch chan *vif.VIF) {
+		vf, _ := set.BlockingFind(network, address)
+		ch <- vf
+	}(ch)
+
+	// set.BlockingFind for a different network and address should not block.
+	set.BlockingFind("network", "address")
+
+	// Create and insert the VIF.
+	c, s, err := newConn(network, address)
+	if err != nil {
+		t.Fatal(err)
+	}
+	vf, _, err := newVIF(c, s)
+	if err != nil {
+		t.Fatal(err)
+	}
+	set.Insert(vf, network, address)
+	unblock()
+
+	// Now the set.BlockingFind should have returned the correct vif.
+	if cachedVif := <-ch; cachedVif != vf {
+		t.Errorf("got %v, want %v", cachedVif, vf)
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/setup_conn.go b/runtime/internal/rpc/stream/vif/setup_conn.go
new file mode 100644
index 0000000..c038ef3
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/setup_conn.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"io"
+
+	"v.io/v23/verror"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+)
+
+// setupConn writes the data to the net.Conn using SetupStream messages.
+type setupConn struct {
+	writer  io.Writer
+	reader  *iobuf.Reader
+	cipher  crypto.ControlCipher
+	rbuffer []byte // read buffer
+}
+
+var _ io.ReadWriteCloser = (*setupConn)(nil)
+
+const maxFrameSize = 8192
+
+func newSetupConn(writer io.Writer, reader *iobuf.Reader, c crypto.ControlCipher) *setupConn {
+	return &setupConn{writer: writer, reader: reader, cipher: c}
+}
+
+// Read implements the method from net.Conn.
+func (s *setupConn) Read(buf []byte) (int, error) {
+	for len(s.rbuffer) == 0 {
+		msg, err := message.ReadFrom(s.reader, s.cipher)
+		if err != nil {
+			return 0, err
+		}
+		emsg, ok := msg.(*message.SetupStream)
+		if !ok {
+			return 0, verror.New(stream.ErrSecurity, nil, verror.New(errVersionNegotiationFailed, nil))
+		}
+		s.rbuffer = emsg.Data
+	}
+	n := copy(buf, s.rbuffer)
+	s.rbuffer = s.rbuffer[n:]
+	return n, nil
+}
+
+// Write implements the method from net.Conn.
+func (s *setupConn) Write(buf []byte) (int, error) {
+	amount := 0
+	for len(buf) > 0 {
+		n := len(buf)
+		if n > maxFrameSize {
+			n = maxFrameSize
+		}
+		emsg := message.SetupStream{Data: buf[:n]}
+		if err := message.WriteTo(s.writer, &emsg, s.cipher); err != nil {
+			return 0, err
+		}
+		buf = buf[n:]
+		amount += n
+	}
+	return amount, nil
+}
+
+// Close does nothing.
+func (s *setupConn) Close() error { return nil }
diff --git a/runtime/internal/rpc/stream/vif/setup_conn_test.go b/runtime/internal/rpc/stream/vif/setup_conn_test.go
new file mode 100644
index 0000000..17622a7
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/setup_conn_test.go
@@ -0,0 +1,166 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"bytes"
+	"encoding/binary"
+	"io"
+	"net"
+	"sync"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+)
+
+const (
+	text = `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.`
+)
+
+func min(i, j int) int {
+	if i < j {
+		return i
+	}
+	return j
+}
+
+// testControlCipher is a super-simple cipher that xor's each byte of the
+// payload with 0xaa.
+type testControlCipher struct{}
+
+const testMACSize = 4
+
+func (*testControlCipher) MACSize() int {
+	return testMACSize
+}
+
+func testMAC(data []byte) []byte {
+	var h uint32
+	for _, b := range data {
+		h = (h << 1) ^ uint32(b)
+	}
+	var hash [4]byte
+	binary.BigEndian.PutUint32(hash[:], h)
+	return hash[:]
+}
+
+func (c *testControlCipher) Decrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Encrypt(data []byte) {
+	for i, _ := range data {
+		data[i] ^= 0xaa
+	}
+}
+
+func (c *testControlCipher) Open(data []byte) bool {
+	mac := testMAC(data[:len(data)-testMACSize])
+	if bytes.Compare(mac, data[len(data)-testMACSize:]) != 0 {
+		return false
+	}
+	c.Decrypt(data[:len(data)-testMACSize])
+	return true
+}
+
+func (c *testControlCipher) Seal(data []byte) error {
+	c.Encrypt(data[:len(data)-testMACSize])
+	mac := testMAC(data[:len(data)-testMACSize])
+	copy(data[len(data)-testMACSize:], mac)
+	return nil
+}
+
+// shortConn performs at most 3 bytes of IO at a time.
+type shortConn struct {
+	io.ReadWriteCloser
+}
+
+func (s *shortConn) Read(data []byte) (int, error) {
+	if len(data) > 3 {
+		data = data[:3]
+	}
+	return s.ReadWriteCloser.Read(data)
+}
+
+func (s *shortConn) Write(data []byte) (int, error) {
+	n := len(data)
+	for i := 0; i < n; i += 3 {
+		j := min(n, i+3)
+		m, err := s.ReadWriteCloser.Write(data[i:j])
+		if err != nil {
+			return i + m, err
+		}
+	}
+	return n, nil
+}
+
+func TestConn(t *testing.T) {
+	p1, p2 := net.Pipe()
+	pool := iobuf.NewPool(0)
+	r1 := iobuf.NewReader(pool, p1)
+	r2 := iobuf.NewReader(pool, p2)
+	f1 := newSetupConn(p1, r1, &testControlCipher{})
+	f2 := newSetupConn(p2, r2, &testControlCipher{})
+	testConn(t, f1, f2)
+}
+
+func TestShortInnerConn(t *testing.T) {
+	p1, p2 := net.Pipe()
+	s1 := &shortConn{p1}
+	s2 := &shortConn{p2}
+	pool := iobuf.NewPool(0)
+	r1 := iobuf.NewReader(pool, s1)
+	r2 := iobuf.NewReader(pool, s2)
+	f1 := newSetupConn(s1, r1, &testControlCipher{})
+	f2 := newSetupConn(s2, r2, &testControlCipher{})
+	testConn(t, f1, f2)
+}
+
+func TestShortOuterConn(t *testing.T) {
+	p1, p2 := net.Pipe()
+	pool := iobuf.NewPool(0)
+	r1 := iobuf.NewReader(pool, p1)
+	r2 := iobuf.NewReader(pool, p2)
+	e1 := newSetupConn(p1, r1, &testControlCipher{})
+	e2 := newSetupConn(p2, r2, &testControlCipher{})
+	f1 := &shortConn{e1}
+	f2 := &shortConn{e2}
+	testConn(t, f1, f2)
+}
+
+// Write prefixes of the text onto the framed pipe and verify the frame content.
+func testConn(t *testing.T, f1, f2 io.ReadWriteCloser) {
+	// Reader loop.
+	var pending sync.WaitGroup
+	pending.Add(1)
+	go func() {
+		var buf [1024]byte
+		for i := 1; i != len(text); i++ {
+			n, err := io.ReadFull(f1, buf[:i])
+			if err != nil {
+				t.Errorf("bad read: %s", err)
+			}
+			if n != i {
+				t.Errorf("bad read: got %d bytes, expected %d bytes", n, i)
+			}
+			actual := string(buf[:n])
+			expected := string(text[:n])
+			if actual != expected {
+				t.Errorf("got %q, expected %q", actual, expected)
+			}
+		}
+		pending.Done()
+	}()
+
+	// Writer.
+	for i := 1; i != len(text); i++ {
+		if n, err := f2.Write([]byte(text[:i])); err != nil || n != i {
+			t.Errorf("bad write: i=%d n=%d err=%s", i, n, err)
+		}
+	}
+	pending.Wait()
+}
diff --git a/runtime/internal/rpc/stream/vif/testutil_test.go b/runtime/internal/rpc/stream/vif/testutil_test.go
new file mode 100644
index 0000000..9d35a6a
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/testutil_test.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"fmt"
+	"time"
+)
+
+// WaitForNotifications waits till all notifications in 'wants' have been received.
+func WaitForNotifications(notify <-chan interface{}, wants ...interface{}) error {
+	expected := make(map[interface{}]struct{})
+	for _, w := range wants {
+		expected[w] = struct{}{}
+	}
+	for len(expected) > 0 {
+		n := <-notify
+		if _, exists := expected[n]; !exists {
+			return fmt.Errorf("unexpected notification %v", n)
+		}
+		delete(expected, n)
+	}
+	return nil
+}
+
+// WaitWithTimeout returns error if any notification has been received before
+// the timeout expires.
+func WaitWithTimeout(notify <-chan interface{}, timeout time.Duration) error {
+	timer := time.After(timeout)
+	for {
+		select {
+		case n := <-notify:
+			return fmt.Errorf("unexpected notification %v", n)
+		case <-timer:
+			return nil
+		}
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/v23_internal_test.go b/runtime/internal/rpc/stream/vif/v23_internal_test.go
new file mode 100644
index 0000000..161553c
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/v23_internal_test.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was auto-generated via go generate.
+// DO NOT UPDATE MANUALLY
+package vif
+
+import "testing"
+import "os"
+
+import "v.io/x/ref/test"
+
+func TestMain(m *testing.M) {
+	test.Init()
+	os.Exit(m.Run())
+}
diff --git a/runtime/internal/rpc/stream/vif/vcmap.go b/runtime/internal/rpc/stream/vif/vcmap.go
new file mode 100644
index 0000000..bc6aa28
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vcmap.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"sort"
+	"sync"
+
+	"v.io/x/ref/runtime/internal/lib/pcqueue"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+// vcMap implements a thread-safe map of vc.VC objects (vcInfo) keyed by their VCI.
+type vcMap struct {
+	mu     sync.Mutex
+	m      map[id.VC]vcInfo
+	frozen bool
+}
+
+// vcInfo represents per-VC information maintained by a VIF.
+type vcInfo struct {
+	VC *vc.VC
+	// Queues used to dispatch work to per-VC goroutines.
+	// RQ is where vif.readLoop can dispatch work to.
+	// WQ is where vif.writeLoop can dispatch work to.
+	RQ, WQ *pcqueue.T
+}
+
+func newVCMap() *vcMap { return &vcMap{m: make(map[id.VC]vcInfo)} }
+
+func (m *vcMap) Insert(c *vc.VC) (inserted bool, rq, wq *pcqueue.T) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if m.frozen {
+		return false, nil, nil
+	}
+	if _, exists := m.m[c.VCI()]; exists {
+		return false, nil, nil
+	}
+	info := vcInfo{
+		VC: c,
+		RQ: pcqueue.New(100),
+		WQ: pcqueue.New(100),
+	}
+	m.m[c.VCI()] = info
+	return true, info.RQ, info.WQ
+}
+
+func (m *vcMap) Find(vci id.VC) (vc *vc.VC, rq, wq *pcqueue.T) {
+	m.mu.Lock()
+	info := m.m[vci]
+	m.mu.Unlock()
+	return info.VC, info.RQ, info.WQ
+}
+
+// Delete deletes the given VC and returns true if the map is empty after deletion.
+func (m *vcMap) Delete(vci id.VC) bool {
+	m.mu.Lock()
+	if info, exists := m.m[vci]; exists {
+		info.RQ.Close()
+		info.WQ.Close()
+		delete(m.m, vci)
+	}
+	empty := len(m.m) == 0
+	m.mu.Unlock()
+	return empty
+}
+
+func (m *vcMap) Size() int {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	return len(m.m)
+}
+
+// Freeze causes all subsequent Inserts to fail.
+// Returns a list of all the VCs that are in the map.
+func (m *vcMap) Freeze() []vcInfo {
+	m.mu.Lock()
+	m.frozen = true
+	l := make([]vcInfo, 0, len(m.m))
+	for _, info := range m.m {
+		l = append(l, info)
+	}
+	m.mu.Unlock()
+	return l
+}
+
+type vcSlice []*vc.VC
+
+func (s vcSlice) Len() int           { return len(s) }
+func (s vcSlice) Less(i, j int) bool { return s[i].VCI() < s[j].VCI() }
+func (s vcSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the list of all VCs currently in the map, sorted by VCI
+func (m *vcMap) List() []*vc.VC {
+	m.mu.Lock()
+	l := make([]*vc.VC, 0, len(m.m))
+	for _, info := range m.m {
+		l = append(l, info.VC)
+	}
+	m.mu.Unlock()
+	sort.Sort(vcSlice(l))
+	return l
+}
diff --git a/runtime/internal/rpc/stream/vif/vcmap_test.go b/runtime/internal/rpc/stream/vif/vcmap_test.go
new file mode 100644
index 0000000..83c503f
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vcmap_test.go
@@ -0,0 +1,86 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+import (
+	"reflect"
+	"testing"
+
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+)
+
+func TestVCMap(t *testing.T) {
+	m := newVCMap()
+
+	vc12 := vc.InternalNew(vc.Params{VCI: 12})
+	vc34 := vc.InternalNew(vc.Params{VCI: 34})
+	vc45 := vc.InternalNew(vc.Params{VCI: 45})
+
+	if vc, _, _ := m.Find(12); vc != nil {
+		t.Errorf("Unexpected VC found: %+v", vc)
+	}
+	if ok, _, _ := m.Insert(vc34); !ok {
+		t.Errorf("Insert should have returned true on first insert")
+	}
+	if ok, _, _ := m.Insert(vc34); ok {
+		t.Errorf("Insert should have returned false on second insert")
+	}
+	if ok, _, _ := m.Insert(vc12); !ok {
+		t.Errorf("Insert should have returned true on first insert")
+	}
+	if ok, _, _ := m.Insert(vc45); !ok {
+		t.Errorf("Insert should have returned true on the first insert")
+	}
+	if g, w := m.List(), []*vc.VC{vc12, vc34, vc45}; !reflect.DeepEqual(g, w) {
+		t.Errorf("Did not get all VCs in expected order. Got %v, want %v", g, w)
+	}
+	m.Delete(vc34.VCI())
+	if g, w := m.List(), []*vc.VC{vc12, vc45}; !reflect.DeepEqual(g, w) {
+		t.Errorf("Did not get all VCs in expected order. Got %v, want %v", g, w)
+	}
+}
+
+func TestVCMapFreeze(t *testing.T) {
+	m := newVCMap()
+	vc1 := vc.InternalNew(vc.Params{VCI: 1})
+	vc2 := vc.InternalNew(vc.Params{VCI: 2})
+	if ok, _, _ := m.Insert(vc1); !ok {
+		t.Fatal("Should be able to insert the VC")
+	}
+	m.Freeze()
+	if ok, _, _ := m.Insert(vc2); ok {
+		t.Errorf("Should not be able to insert a VC after Freeze")
+	}
+	if vc, _, _ := m.Find(1); vc != vc1 {
+		t.Errorf("Got %v want %v", vc, vc1)
+	}
+	m.Delete(vc1.VCI())
+	if vc, _, _ := m.Find(1); vc != nil {
+		t.Errorf("Got %v want nil", vc)
+	}
+}
+
+func TestVCMapDelete(t *testing.T) {
+	m := newVCMap()
+
+	vc1 := vc.InternalNew(vc.Params{VCI: 1})
+	vc2 := vc.InternalNew(vc.Params{VCI: 2})
+
+	m.Insert(vc1)
+	if empty := m.Delete(vc1.VCI()); !empty {
+		t.Error("Want empty; got false")
+	}
+
+	m.Insert(vc1)
+	m.Insert(vc2)
+
+	m.Delete(vc1.VCI())
+	if empty := m.Delete(vc1.VCI()); empty {
+		t.Error("Want not empty; got true")
+	}
+	if empty := m.Delete(vc2.VCI()); !empty {
+		t.Error("Want empty; got false")
+	}
+}
diff --git a/runtime/internal/rpc/stream/vif/vif.go b/runtime/internal/rpc/stream/vif/vif.go
new file mode 100644
index 0000000..124ce98
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vif.go
@@ -0,0 +1,1092 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vif
+
+// Logging guidelines:
+// vlog.VI(1) for per-net.Conn information
+// vlog.VI(2) for per-VC information
+// vlog.VI(3) for per-Flow information
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"v.io/v23/context"
+	"v.io/v23/naming"
+	"v.io/v23/security"
+	"v.io/v23/verror"
+	"v.io/v23/vtrace"
+
+	"v.io/x/lib/vlog"
+	"v.io/x/ref/runtime/internal/lib/bqueue"
+	"v.io/x/ref/runtime/internal/lib/bqueue/drrqueue"
+	"v.io/x/ref/runtime/internal/lib/iobuf"
+	"v.io/x/ref/runtime/internal/lib/pcqueue"
+	vsync "v.io/x/ref/runtime/internal/lib/sync"
+	"v.io/x/ref/runtime/internal/lib/upcqueue"
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/crypto"
+	"v.io/x/ref/runtime/internal/rpc/stream/id"
+	"v.io/x/ref/runtime/internal/rpc/stream/message"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+)
+
+const pkgPath = "v.io/x/ref/runtime/internal/rpc/stream/vif"
+
+func reg(id, msg string) verror.IDAction {
+	return verror.Register(verror.ID(pkgPath+id), verror.NoRetry, msg)
+}
+
+var (
+	// These errors are intended to be used as arguments to higher
+	// level errors and hence {1}{2} is omitted from their format
+	// strings to avoid repeating these n-times in the final error
+	// message visible to the user.
+	errShuttingDown             = reg(".errShuttingDown", "underlying network connection({3}) shutting down")
+	errVCHandshakeFailed        = reg(".errVCHandshakeFailed", "VC handshake failed{:3}")
+	errSendOnExpressQFailed     = reg(".errSendOnExpressQFailed", "vif.sendOnExpressQ(OpenVC) failed{:3}")
+	errVIFIsBeingClosed         = reg(".errVIFIsBeingClosed", "VIF is being closed")
+	errVIFAlreadyAcceptingFlows = reg(".errVIFAlreadyAcceptingFlows", "already accepting flows on VIF {3}")
+	errVCsNotAcceptedOnVIF      = reg(".errVCsNotAcceptedOnVIF", "VCs not accepted on VIF {3}")
+	errAcceptFailed             = reg(".errAcceptFailed", "Accept failed{:3}")
+	errRemoteEndClosedVC        = reg(".errRemoteEndClosedVC", "remote end closed VC{:3}")
+	errFlowsNoLongerAccepted    = reg(".errFlowsNowLongerAccepted", "Flows no longer being accepted")
+	errVCAcceptFailed           = reg(".errVCAcceptFailed", "VC accept failed{:3}")
+	errIdleTimeout              = reg(".errIdleTimeout", "idle timeout")
+	errVIFAlreadySetup          = reg(".errVIFAlreadySetupt", "VIF is already setup")
+	errBqueueWriterForXpress    = reg(".errBqueueWriterForXpress", "failed to create bqueue.Writer for express messages{:3}")
+	errBqueueWriterForControl   = reg(".errBqueueWriterForControl", "failed to create bqueue.Writer for flow control counters{:3}")
+	errBqueueWriterForStopping  = reg(".errBqueueWriterForStopping", "failed to create bqueue.Writer for stopping the write loop{:3}")
+	errWriteFailed              = reg(".errWriteFailed", "write failed: got ({3}, {4}) for {5} byte message)")
+)
+
+// VIF implements a "virtual interface" over an underlying network connection
+// (net.Conn). Just like multiple network connections can be established over a
+// single physical interface, multiple Virtual Circuits (VCs) can be
+// established over a single VIF.
+type VIF struct {
+	// All reads must be performed through reader, and not directly through conn.
+	conn    net.Conn
+	pool    *iobuf.Pool
+	reader  *iobuf.Reader
+	localEP naming.Endpoint
+
+	// ctrlCipher is normally guarded by writeMu, however see the exception in
+	// readLoop.
+	ctrlCipher crypto.ControlCipher
+	writeMu    sync.Mutex
+
+	muStartTimer sync.Mutex
+	startTimer   timer
+
+	vcMap              *vcMap
+	idleTimerMap       *idleTimerMap
+	wpending, rpending vsync.WaitGroup
+
+	muListen     sync.Mutex
+	acceptor     *upcqueue.T          // GUARDED_BY(muListen)
+	listenerOpts []stream.ListenerOpt // GUARDED_BY(muListen)
+	principal    security.Principal
+	blessings    security.Blessings
+
+	muNextVCI sync.Mutex
+	nextVCI   id.VC
+
+	outgoing bqueue.T
+	expressQ bqueue.Writer
+
+	flowQ        bqueue.Writer
+	flowMu       sync.Mutex
+	flowCounters message.Counters
+
+	stopQ bqueue.Writer
+
+	// The RPC version range supported by this VIF.  In practice this is
+	// non-nil only in testing.  nil is equivalent to using the versions
+	// actually supported by this RPC implementation (which is always
+	// what you want outside of tests).
+	versions *iversion.Range
+
+	isClosedMu sync.Mutex
+	isClosed   bool // GUARDED_BY(isClosedMu)
+	onClose    func(*VIF)
+
+	// These counters track the number of messages sent and received by
+	// this VIF.
+	muMsgCounters sync.Mutex
+	msgCounters   map[string]int64
+}
+
+// ConnectorAndFlow represents a Flow and the Connector that can be used to
+// create another Flow over the same underlying VC.
+type ConnectorAndFlow struct {
+	Connector stream.Connector
+	Flow      stream.Flow
+}
+
+// Separate out constants that are not exported so that godoc looks nicer for
+// the exported ones.
+const (
+	// Priorities of the buffered queues used for flow control of writes.
+	expressPriority bqueue.Priority = iota
+	controlPriority
+	// The range of flow priorities is [flowPriority, flowPriority + NumFlowPriorities)
+	flowPriority
+	stopPriority = flowPriority + vc.NumFlowPriorities
+)
+
+const (
+	// Convenience aliases so that the package name "vc" does not
+	// conflict with the variables named "vc".
+	defaultBytesBufferedPerFlow = vc.DefaultBytesBufferedPerFlow
+	sharedFlowID                = vc.SharedFlowID
+)
+
+// InternalNewDialedVIF creates a new virtual interface over the provided
+// network connection, under the assumption that the conn object was created
+// using net.Dial. If onClose is given, it is run in its own goroutine when
+// the vif has been closed.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNewDialedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, versions *iversion.Range, onClose func(*VIF), opts ...stream.VCOpt) (*VIF, error) {
+	ctx := getDialContext(opts)
+	if ctx != nil {
+		var span vtrace.Span
+		ctx, span = vtrace.WithNewSpan(ctx, "InternalNewDialedVIF")
+		span.Annotatef("(%v, %v)", conn.RemoteAddr().Network(), conn.RemoteAddr())
+		defer span.Finish()
+	}
+	pool := iobuf.NewPool(0)
+	reader := iobuf.NewReader(pool, conn)
+	params := security.CallParams{LocalPrincipal: principal, LocalEndpoint: localEP(conn, rid, versions)}
+
+	// TODO(ataly, ashankar, suharshs): Figure out what authorization policy to use
+	// for authenticating the server during VIF establishment. Note that we cannot
+	// use the VC.ServerAuthorizer available in 'opts' as that applies to the end
+	// server and not the remote endpoint of the VIF.
+	c, err := AuthenticateAsClient(conn, reader, versions, params, nil)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, ctx, err)
+	}
+	var blessings security.Blessings
+
+	if principal != nil {
+		blessings = principal.BlessingStore().Default()
+	}
+	var startTimeout time.Duration
+	for _, o := range opts {
+		switch v := o.(type) {
+		case vc.StartTimeout:
+			startTimeout = v.Duration
+		}
+	}
+	return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs), versions, principal, blessings, startTimeout, onClose, nil, nil, c)
+}
+
+// InternalNewAcceptedVIF creates a new virtual interface over the provided
+// network connection, under the assumption that the conn object was created
+// using an Accept call on a net.Listener object. If onClose is given, it is
+// run in its own goroutine when the vif has been closed.
+//
+// The returned VIF is also setup for accepting new VCs and Flows with the provided
+// ListenerOpts.
+//
+// As the name suggests, this method is intended for use only within packages
+// placed inside v.io/x/ref/runtime/internal. Code outside the
+// v.io/x/ref/runtime/internal/* packages should never call this method.
+func InternalNewAcceptedVIF(conn net.Conn, rid naming.RoutingID, principal security.Principal, blessings security.Blessings, versions *iversion.Range, onClose func(*VIF), lopts ...stream.ListenerOpt) (*VIF, error) {
+	pool := iobuf.NewPool(0)
+	reader := iobuf.NewReader(pool, conn)
+
+	dischargeClient := getDischargeClient(lopts)
+
+	c, err := AuthenticateAsServer(conn, reader, versions, principal, blessings, dischargeClient)
+	if err != nil {
+		return nil, err
+	}
+
+	var startTimeout time.Duration
+	for _, o := range lopts {
+		switch v := o.(type) {
+		case vc.StartTimeout:
+			startTimeout = v.Duration
+		}
+	}
+	return internalNew(conn, pool, reader, rid, id.VC(vc.NumReservedVCs)+1, versions, principal, blessings, startTimeout, onClose, upcqueue.New(), lopts, c)
+}
+
+func internalNew(conn net.Conn, pool *iobuf.Pool, reader *iobuf.Reader, rid naming.RoutingID, initialVCI id.VC, versions *iversion.Range, principal security.Principal, blessings security.Blessings, startTimeout time.Duration, onClose func(*VIF), acceptor *upcqueue.T, listenerOpts []stream.ListenerOpt, c crypto.ControlCipher) (*VIF, error) {
+	var (
+		// Choose IDs that will not conflict with any other (VC, Flow)
+		// pairs.  VCI 0 is never used by the application (it is
+		// reserved for control messages), so steal from the Flow space
+		// there.
+		expressID bqueue.ID = packIDs(0, 0)
+		flowID    bqueue.ID = packIDs(0, 1)
+		stopID    bqueue.ID = packIDs(0, 2)
+	)
+	outgoing := drrqueue.New(vc.MaxPayloadSizeBytes)
+
+	expressQ, err := outgoing.NewWriter(expressID, expressPriority, defaultBytesBufferedPerFlow)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForXpress, nil, err))
+	}
+	expressQ.Release(-1) // Disable flow control
+
+	flowQ, err := outgoing.NewWriter(flowID, controlPriority, flowToken.Size())
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForControl, nil, err))
+	}
+	flowQ.Release(-1) // Disable flow control
+
+	stopQ, err := outgoing.NewWriter(stopID, stopPriority, 1)
+	if err != nil {
+		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errBqueueWriterForStopping, nil, err))
+	}
+	stopQ.Release(-1) // Disable flow control
+
+	if versions == nil {
+		versions = iversion.SupportedRange
+	}
+
+	vif := &VIF{
+		conn:         conn,
+		pool:         pool,
+		reader:       reader,
+		ctrlCipher:   c,
+		vcMap:        newVCMap(),
+		acceptor:     acceptor,
+		listenerOpts: listenerOpts,
+		principal:    principal,
+		localEP:      localEP(conn, rid, versions),
+		nextVCI:      initialVCI,
+		outgoing:     outgoing,
+		expressQ:     expressQ,
+		flowQ:        flowQ,
+		flowCounters: message.NewCounters(),
+		stopQ:        stopQ,
+		versions:     versions,
+		onClose:      onClose,
+		msgCounters:  make(map[string]int64),
+		blessings:    blessings,
+	}
+	if startTimeout > 0 {
+		vif.startTimer = newTimer(startTimeout, vif.Close)
+	}
+	vif.idleTimerMap = newIdleTimerMap(func(vci id.VC) {
+		vc, _, _ := vif.vcMap.Find(vci)
+		if vc != nil {
+			vif.closeVCAndSendMsg(vc, false, verror.New(errIdleTimeout, nil))
+		}
+	})
+	go vif.readLoop()
+	go vif.writeLoop()
+	return vif, nil
+}
+
+// Dial creates a new VC to the provided remote identity, authenticating the VC
+// with the provided local identity.
+func (vif *VIF) Dial(remoteEP naming.Endpoint, principal security.Principal, opts ...stream.VCOpt) (stream.VC, error) {
+	var idleTimeout time.Duration
+	for _, o := range opts {
+		switch v := o.(type) {
+		case vc.IdleTimeout:
+			idleTimeout = v.Duration
+		}
+	}
+	vc, err := vif.newVC(vif.allocVCI(), vif.localEP, remoteEP, idleTimeout, true)
+	if err != nil {
+		return nil, err
+	}
+	counters := message.NewCounters()
+	counters.Add(vc.VCI(), sharedFlowID, defaultBytesBufferedPerFlow)
+
+	sendPublicKey := func(pubKey *crypto.BoxKey) error {
+		var options []message.SetupOption
+		if pubKey != nil {
+			options = []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}}
+		}
+		err := vif.sendOnExpressQ(&message.SetupVC{
+			VCI:            vc.VCI(),
+			RemoteEndpoint: remoteEP,
+			LocalEndpoint:  vif.localEP,
+			Counters:       counters,
+			Setup: message.Setup{
+				Versions: *vif.versions,
+				Options:  options,
+			},
+		})
+		if err != nil {
+			err = verror.New(stream.ErrNetwork, nil,
+				verror.New(errSendOnExpressQFailed, nil, err))
+		}
+		return err
+	}
+	if err = vc.HandshakeDialedVC(principal, sendPublicKey, opts...); err != nil {
+		vif.deleteVC(vc.VCI())
+		vc.Close(err)
+		return nil, err
+	}
+	return vc, nil
+}
+
+// Close closes all VCs (and thereby Flows) over the VIF and then closes the
+// underlying network connection after draining all pending writes on those
+// VCs.
+func (vif *VIF) Close() {
+	vif.isClosedMu.Lock()
+	if vif.isClosed {
+		vif.isClosedMu.Unlock()
+		return
+	}
+	vif.isClosed = true
+	vif.isClosedMu.Unlock()
+
+	vlog.VI(1).Infof("Closing VIF %s", vif)
+	// Stop accepting new VCs.
+	vif.StopAccepting()
+	// Close local datastructures for all existing VCs.
+	vcs := vif.vcMap.Freeze()
+	// Stop the idle timers.
+	vif.idleTimerMap.Stop()
+	for _, vc := range vcs {
+		vc.VC.Close(verror.New(stream.ErrNetwork, nil, verror.New(errVIFIsBeingClosed, nil)))
+	}
+	// Wait for the vcWriteLoops to exit (after draining queued up messages).
+	vif.stopQ.Close()
+	vif.wpending.Wait()
+	// Close the underlying network connection.
+	// No need to send individual messages to close all pending VCs since
+	// the remote end should know to close all VCs when the VIF's
+	// connection breaks.
+	if err := vif.conn.Close(); err != nil {
+		vlog.VI(1).Infof("net.Conn.Close failed on VIF %s: %v", vif, err)
+	}
+	// Notify that the VIF has been closed.
+	if vif.onClose != nil {
+		go vif.onClose(vif)
+	}
+}
+
+// StartAccepting begins accepting Flows (and VCs) initiated by the remote end
+// of a VIF. opts is used to setup the listener on newly established VCs.
+func (vif *VIF) StartAccepting(opts ...stream.ListenerOpt) error {
+	vif.muListen.Lock()
+	defer vif.muListen.Unlock()
+	if vif.acceptor != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errVIFIsBeingClosed, nil, vif))
+	}
+	vif.acceptor = upcqueue.New()
+	vif.listenerOpts = opts
+	return nil
+}
+
+// StopAccepting prevents any Flows initiated by the remote end of a VIF from
+// being accepted and causes any existing and future calls to Accept to fail
+// immediately.
+func (vif *VIF) StopAccepting() {
+	vif.muListen.Lock()
+	defer vif.muListen.Unlock()
+	if vif.acceptor != nil {
+		vif.acceptor.Shutdown()
+		vif.acceptor = nil
+		vif.listenerOpts = nil
+	}
+}
+
+// Accept returns the (stream.Connector, stream.Flow) pair of a newly
+// established VC and/or Flow.
+//
+// Sample usage:
+//	for {
+//		cAndf, err := vif.Accept()
+//		switch {
+//		case err != nil:
+//			fmt.Println("Accept error:", err)
+//			return
+//		case cAndf.Flow == nil:
+//			fmt.Println("New VC established:", cAndf.Connector)
+//		default:
+//			fmt.Println("New flow established")
+//			go handleFlow(cAndf.Flow)
+//		}
+//	}
+func (vif *VIF) Accept() (ConnectorAndFlow, error) {
+	vif.muListen.Lock()
+	acceptor := vif.acceptor
+	vif.muListen.Unlock()
+	if acceptor == nil {
+		return ConnectorAndFlow{}, verror.New(stream.ErrNetwork, nil, verror.New(errVCsNotAcceptedOnVIF, nil, vif))
+	}
+	item, err := acceptor.Get(nil)
+	if err != nil {
+		return ConnectorAndFlow{}, verror.New(stream.ErrNetwork, nil, verror.New(errAcceptFailed, nil, err))
+	}
+	return item.(ConnectorAndFlow), nil
+}
+
+func (vif *VIF) String() string {
+	l := vif.conn.LocalAddr()
+	r := vif.conn.RemoteAddr()
+	return fmt.Sprintf("(%s, %s) <-> (%s, %s)", l.Network(), l, r.Network(), r)
+}
+
+func (vif *VIF) readLoop() {
+	defer vif.Close()
+	defer vif.stopVCDispatchLoops()
+	for {
+		// vif.ctrlCipher is guarded by vif.writeMu.  However, the only mutation
+		// to it is in handleMessage, which runs in the same goroutine, so a
+		// lock is not required here.
+		msg, err := message.ReadFrom(vif.reader, vif.ctrlCipher)
+		if err != nil {
+			vlog.VI(1).Infof("Exiting readLoop of VIF %s because of read error: %v", vif, err)
+			return
+		}
+		vlog.VI(3).Infof("Received %T = [%v] on VIF %s", msg, msg, vif)
+		if err := vif.handleMessage(msg); err != nil {
+			vlog.VI(1).Infof("Exiting readLoop of VIF %s because of message error: %v", vif, err)
+			return
+		}
+	}
+}
+
+// handleMessage handles a single incoming message.  Any error returned is
+// fatal, causing the VIF to close.
+func (vif *VIF) handleMessage(msg message.T) error {
+	vif.muMsgCounters.Lock()
+	vif.msgCounters[fmt.Sprintf("Recv(%T)", msg)]++
+	vif.muMsgCounters.Unlock()
+
+	switch m := msg.(type) {
+
+	case *message.Data:
+		_, rq, _ := vif.vcMap.Find(m.VCI)
+		if rq == nil {
+			vlog.VI(2).Infof("Ignoring message of %d bytes for unrecognized VCI %d on VIF %s", m.Payload.Size(), m.VCI, vif)
+			m.Release()
+			return nil
+		}
+		if err := rq.Put(m, nil); err != nil {
+			vlog.VI(2).Infof("Failed to put message(%v) on VC queue on VIF %v: %v", m, vif, err)
+			m.Release()
+		}
+
+	case *message.SetupVC:
+		// First, find the public key we need out of the message.
+		var theirPK *crypto.BoxKey
+		box := m.Setup.NaclBox()
+		if box != nil {
+			theirPK = &box.PublicKey
+		}
+
+		// If we dialed this VC, then this is a response and we should finish
+		// the vc handshake.  Otherwise, this message is opening a new VC.
+		if vif.dialedVCI(m.VCI) {
+			vif.distributeCounters(m.Counters)
+			if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+				intersection, err := vif.versions.Intersect(&m.Setup.Versions)
+				if err != nil {
+					vif.closeVCAndSendMsg(vc, false, err)
+				} else if err := vc.FinishHandshakeDialedVC(intersection.Max, theirPK); err != nil {
+					vif.closeVCAndSendMsg(vc, false, err)
+				}
+				return nil
+			}
+			vlog.VI(2).Infof("Ignoring SetupVC message %+v for unknown dialed VC", m)
+			return nil
+		}
+
+		// This is an accepted VC.
+		intersection, err := vif.versions.Intersect(&m.Setup.Versions)
+		if err != nil {
+			vlog.VI(2).Infof("SetupVC message %+v to VIF %s did not present compatible versions: %v", m, vif, err)
+			vif.sendOnExpressQ(&message.CloseVC{
+				VCI:   m.VCI,
+				Error: err.Error(),
+			})
+			return nil
+		}
+		vif.muListen.Lock()
+		closed := vif.acceptor == nil || vif.acceptor.IsClosed()
+		lopts := vif.listenerOpts
+		vif.muListen.Unlock()
+		if closed {
+			vlog.VI(2).Infof("Ignoring SetupVC message %+v as VIF %s does not accept VCs", m, vif)
+			vif.sendOnExpressQ(&message.CloseVC{
+				VCI:   m.VCI,
+				Error: "VCs not accepted",
+			})
+			return nil
+		}
+		var idleTimeout time.Duration
+		for _, o := range lopts {
+			switch v := o.(type) {
+			case vc.IdleTimeout:
+				idleTimeout = v.Duration
+			}
+		}
+		vc, err := vif.newVC(m.VCI, m.RemoteEndpoint, m.LocalEndpoint, idleTimeout, false)
+		if err != nil {
+			vif.sendOnExpressQ(&message.CloseVC{
+				VCI:   m.VCI,
+				Error: err.Error(),
+			})
+			return nil
+		}
+		vif.distributeCounters(m.Counters)
+		keyExchanger := func(pubKey *crypto.BoxKey) (*crypto.BoxKey, error) {
+			var options []message.SetupOption
+			if pubKey != nil {
+				options = []message.SetupOption{&message.NaclBox{PublicKey: *pubKey}}
+			}
+			err = vif.sendOnExpressQ(&message.SetupVC{
+				VCI: m.VCI,
+				Setup: message.Setup{
+					// Note that servers send clients not their actual supported versions,
+					// but the intersected range of the server and client ranges.  This
+					// is important because proxies may have adjusted the version ranges
+					// along the way, and we should negotiate a version that is compatible
+					// with all intermediate hops.
+					Versions: *intersection,
+					Options:  options,
+				},
+				RemoteEndpoint: m.LocalEndpoint,
+				LocalEndpoint:  vif.localEP,
+				// TODO(mattr): Consider adding counters.  See associated comment
+				// in vc.go:VC.HandshakeAcceptedVC for more details.
+			})
+			return theirPK, err
+		}
+		go vif.acceptFlowsLoop(vc, vc.HandshakeAcceptedVC(intersection.Max, vif.principal, vif.blessings, keyExchanger, lopts...))
+
+	case *message.CloseVC:
+		if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+			vif.deleteVC(vc.VCI())
+			vlog.VI(2).Infof("CloseVC(%+v) on VIF %s", m, vif)
+			// TODO(cnicolaou): it would be nice to have a method on VC
+			// to indicate a 'remote close' rather than a 'local one'. This helps
+			// with error reporting since we expect reads/writes to occur
+			// after a remote close, but not after a local close.
+			vc.Close(verror.New(stream.ErrNetwork, nil, verror.New(errRemoteEndClosedVC, nil, m.Error)))
+			return nil
+		}
+		vlog.VI(2).Infof("Ignoring CloseVC(%+v) for unrecognized VCI on VIF %s", m, vif)
+
+	case *message.AddReceiveBuffers:
+		vif.distributeCounters(m.Counters)
+
+	case *message.OpenFlow:
+		if vc, _, _ := vif.vcMap.Find(m.VCI); vc != nil {
+			if err := vc.AcceptFlow(m.Flow); err != nil {
+				vlog.VI(3).Infof("OpenFlow %+v on VIF %v failed:%v", m, vif, err)
+				cm := &message.Data{VCI: m.VCI, Flow: m.Flow}
+				cm.SetClose()
+				vif.sendOnExpressQ(cm)
+				return nil
+			}
+			vc.ReleaseCounters(m.Flow, m.InitialCounters)
+			return nil
+		}
+		vlog.VI(2).Infof("Ignoring OpenFlow(%+v) for unrecognized VCI on VIF %s", m, m, vif)
+
+	case *message.Setup:
+		vlog.Infof("Ignoring redundant Setup message %T on VIF %s", m, vif)
+
+	default:
+		vlog.Infof("Ignoring unrecognized message %T on VIF %s", m, vif)
+	}
+	return nil
+}
+
+func (vif *VIF) vcDispatchLoop(vc *vc.VC, messages *pcqueue.T) {
+	defer vlog.VI(2).Infof("Exiting vcDispatchLoop(%v) on VIF %v", vc, vif)
+	defer vif.rpending.Done()
+	for {
+		qm, err := messages.Get(nil)
+		if err != nil {
+			return
+		}
+		m := qm.(*message.Data)
+		if err := vc.DispatchPayload(m.Flow, m.Payload); err != nil {
+			vlog.VI(2).Infof("Ignoring data message %v for on VIF %s: %v", m, vif, err)
+		}
+		if m.Close() {
+			vif.shutdownFlow(vc, m.Flow)
+		}
+	}
+}
+
+func (vif *VIF) stopVCDispatchLoops() {
+	vcs := vif.vcMap.Freeze()
+	for _, v := range vcs {
+		v.RQ.Close()
+	}
+	vif.rpending.Wait()
+}
+
+func clientVCClosed(err error) bool {
+	// If we've encountered a networking error, then all likelihood the
+	// connection to the client is closed.
+	return verror.ErrorID(err) == stream.ErrNetwork.ID
+}
+
+func (vif *VIF) acceptFlowsLoop(vc *vc.VC, c <-chan vc.HandshakeResult) {
+	hr := <-c
+	if hr.Error != nil {
+		vif.closeVCAndSendMsg(vc, clientVCClosed(hr.Error), hr.Error)
+		return
+	}
+
+	vif.muListen.Lock()
+	acceptor := vif.acceptor
+	vif.muListen.Unlock()
+	if acceptor == nil {
+		vif.closeVCAndSendMsg(vc, false, verror.New(errFlowsNoLongerAccepted, nil))
+		return
+	}
+
+	// Notify any listeners that a new VC has been established
+	if err := acceptor.Put(ConnectorAndFlow{vc, nil}); err != nil {
+		vif.closeVCAndSendMsg(vc, clientVCClosed(err), verror.New(errVCAcceptFailed, nil, err))
+		return
+	}
+
+	vlog.VI(2).Infof("Running acceptFlowsLoop for VC %v on VIF %v", vc, vif)
+	for {
+		f, err := hr.Listener.Accept()
+		if err != nil {
+			vlog.VI(2).Infof("Accept failed on VC %v on VIF %v: %v", vc, vif, err)
+			return
+		}
+		if err := acceptor.Put(ConnectorAndFlow{vc, f}); err != nil {
+			vlog.VI(2).Infof("vif.acceptor.Put(%v, %T) on VIF %v failed: %v", vc, f, vif, err)
+			f.Close()
+			return
+		}
+	}
+}
+
+func (vif *VIF) distributeCounters(counters message.Counters) {
+	for cid, bytes := range counters {
+		vc, _, _ := vif.vcMap.Find(cid.VCI())
+		if vc == nil {
+			vlog.VI(2).Infof("Ignoring counters for non-existent VCI %d on VIF %s", cid.VCI(), vif)
+			continue
+		}
+		vc.ReleaseCounters(cid.Flow(), bytes)
+	}
+}
+
+func (vif *VIF) writeLoop() {
+	defer vif.outgoing.Close()
+	defer vif.stopVCWriteLoops()
+	for {
+		writer, bufs, err := vif.outgoing.Get(nil)
+		if err != nil {
+			vlog.VI(1).Infof("Exiting writeLoop of VIF %s because of bqueue.Get error: %v", vif, err)
+			return
+		}
+		vif.muMsgCounters.Lock()
+		vif.msgCounters[fmt.Sprintf("Send(%T)", writer)]++
+		vif.muMsgCounters.Unlock()
+		switch writer {
+		case vif.expressQ:
+			for _, b := range bufs {
+				if err := vif.writeSerializedMessage(b.Contents); err != nil {
+					vlog.VI(1).Infof("Exiting writeLoop of VIF %s because Control message write failed: %s", vif, err)
+					releaseBufs(bufs)
+					return
+				}
+				b.Release()
+			}
+		case vif.flowQ:
+			msg := &message.AddReceiveBuffers{}
+			// No need to call releaseBufs(bufs) as all bufs are
+			// the exact same value: flowToken.
+			vif.flowMu.Lock()
+			if len(vif.flowCounters) > 0 {
+				msg.Counters = vif.flowCounters
+				vif.flowCounters = message.NewCounters()
+			}
+			vif.flowMu.Unlock()
+			if len(msg.Counters) > 0 {
+				vlog.VI(3).Infof("Sending counters %v on VIF %s", msg.Counters, vif)
+				if err := vif.writeMessage(msg); err != nil {
+					vlog.VI(1).Infof("Exiting writeLoop of VIF %s because AddReceiveBuffers message write failed: %v", vif, err)
+					return
+				}
+			}
+		case vif.stopQ:
+			// Lowest-priority queue which will never have any
+			// buffers, Close is the only method called on it.
+			return
+		default:
+			vif.writeDataMessages(writer, bufs)
+		}
+	}
+}
+
+func (vif *VIF) vcWriteLoop(vc *vc.VC, messages *pcqueue.T) {
+	defer vlog.VI(2).Infof("Exiting vcWriteLoop(%v) on VIF %v", vc, vif)
+	defer vif.wpending.Done()
+	for {
+		qm, err := messages.Get(nil)
+		if err != nil {
+			return
+		}
+		m := qm.(*message.Data)
+		m.Payload, err = vc.Encrypt(m.Flow, m.Payload)
+		if err != nil {
+			vlog.Infof("Encryption failed. Flow:%v VC:%v Error:%v", m.Flow, vc, err)
+		}
+		if m.Close() {
+			// The last bytes written on the flow will be sent out
+			// on vif.conn. Local datastructures for the flow can
+			// be cleaned up now.
+			vif.shutdownFlow(vc, m.Flow)
+		}
+		if err == nil {
+			err = vif.writeMessage(m)
+		}
+		if err != nil {
+			// TODO(caprita): Calling closeVCAndSendMsg below causes
+			// a race as described in:
+			// https://docs.google.com/a/google.com/document/d/1C0kxfYhuOcStdV7tnLZELZpUhfQCZj47B0JrzbE29h8/edit
+			//
+			// There should be a finer grained way to fix this, and
+			// there are likely other instances where we should not
+			// be closing the VC.
+			//
+			// For now, commenting out the line below removes the
+			// flakiness from our existing unit tests, but this
+			// needs to be revisited and fixed correctly.
+			//
+			//   vif.closeVCAndSendMsg(vc, fmt.Sprintf("write failure: %v", err))
+
+			// Drain the queue and exit.
+			for {
+				qm, err := messages.Get(nil)
+				if err != nil {
+					return
+				}
+				qm.(*message.Data).Release()
+			}
+		}
+	}
+}
+
+func (vif *VIF) stopVCWriteLoops() {
+	vcs := vif.vcMap.Freeze()
+	vif.idleTimerMap.Stop()
+	for _, v := range vcs {
+		v.WQ.Close()
+	}
+}
+
+// sendOnExpressQ adds 'msg' to the expressQ (highest priority queue) of messages to write on the wire.
+func (vif *VIF) sendOnExpressQ(msg message.T) error {
+	vlog.VI(2).Infof("sendOnExpressQ(%T = %+v) on VIF %s", msg, msg, vif)
+	var buf bytes.Buffer
+	// Don't encrypt yet, because the message ordering isn't yet determined.
+	// Encryption is performed by vif.writeSerializedMessage() when the
+	// message is actually written to vif.conn.
+	vif.writeMu.Lock()
+	c := vif.ctrlCipher
+	vif.writeMu.Unlock()
+	if err := message.WriteTo(&buf, msg, crypto.NewDisabledControlCipher(c)); err != nil {
+		return err
+	}
+	return vif.expressQ.Put(iobuf.NewSlice(buf.Bytes()), nil)
+}
+
+// writeMessage writes the message to the channel.  Writes must be serialized so
+// that the control channel can be encrypted, so we acquire the writeMu.
+func (vif *VIF) writeMessage(msg message.T) error {
+	vif.writeMu.Lock()
+	defer vif.writeMu.Unlock()
+	return message.WriteTo(vif.conn, msg, vif.ctrlCipher)
+}
+
+// Write writes the message to the channel, encrypting the control data.  Writes
+// must be serialized so that the control channel can be encrypted, so we
+// acquire the writeMu.
+func (vif *VIF) writeSerializedMessage(msg []byte) error {
+	vif.writeMu.Lock()
+	defer vif.writeMu.Unlock()
+	if err := message.EncryptMessage(msg, vif.ctrlCipher); err != nil {
+		return err
+	}
+	if n, err := vif.conn.Write(msg); err != nil {
+		return verror.New(stream.ErrNetwork, nil, verror.New(errWriteFailed, nil, n, err, len(msg)))
+	}
+	return nil
+}
+
+func (vif *VIF) writeDataMessages(writer bqueue.Writer, bufs []*iobuf.Slice) {
+	vci, fid := unpackIDs(writer.ID())
+	// iobuf.Coalesce will coalesce buffers only if they are adjacent to
+	// each other.  In the worst case, each buf will be non-adjacent to the
+	// others and the code below will end up with multiple small writes
+	// instead of a single big one.
+	// Might want to investigate this and see if this needs to be
+	// revisited.
+	bufs = iobuf.Coalesce(bufs, uint(vc.MaxPayloadSizeBytes))
+	_, _, wq := vif.vcMap.Find(vci)
+	if wq == nil {
+		// VC has been removed, stop sending messages
+		vlog.VI(2).Infof("VCI %d on VIF %s was shutdown, dropping %d messages that were pending a write", vci, vif, len(bufs))
+		releaseBufs(bufs)
+		return
+	}
+	last := len(bufs) - 1
+	drained := writer.IsDrained()
+	for i, b := range bufs {
+		d := &message.Data{VCI: vci, Flow: fid, Payload: b}
+		if drained && i == last {
+			d.SetClose()
+		}
+		if err := wq.Put(d, nil); err != nil {
+			releaseBufs(bufs[i:])
+			return
+		}
+	}
+	if len(bufs) == 0 && drained {
+		d := &message.Data{VCI: vci, Flow: fid}
+		d.SetClose()
+		if err := wq.Put(d, nil); err != nil {
+			d.Release()
+		}
+	}
+}
+
+func (vif *VIF) dialedVCI(VCI id.VC) bool {
+	return vif.nextVCI%2 == VCI%2
+}
+
+func (vif *VIF) allocVCI() id.VC {
+	vif.muNextVCI.Lock()
+	ret := vif.nextVCI
+	vif.nextVCI += 2
+	vif.muNextVCI.Unlock()
+	return ret
+}
+
+func (vif *VIF) newVC(vci id.VC, localEP, remoteEP naming.Endpoint, idleTimeout time.Duration, dialed bool) (*vc.VC, error) {
+	vif.muStartTimer.Lock()
+	if vif.startTimer != nil {
+		vif.startTimer.Stop()
+		vif.startTimer = nil
+	}
+	vif.muStartTimer.Unlock()
+	macSize := vif.ctrlCipher.MACSize()
+	vc := vc.InternalNew(vc.Params{
+		VCI:          vci,
+		Dialed:       dialed,
+		LocalEP:      localEP,
+		RemoteEP:     remoteEP,
+		Pool:         vif.pool,
+		ReserveBytes: uint(message.HeaderSizeBytes + macSize),
+		Helper:       vcHelper{vif},
+	})
+	added, rq, wq := vif.vcMap.Insert(vc)
+	if added {
+		vif.idleTimerMap.Insert(vc.VCI(), idleTimeout)
+	}
+	// Start vcWriteLoop
+	if added = added && vif.wpending.TryAdd(); added {
+		go vif.vcWriteLoop(vc, wq)
+	}
+	// Start vcDispatchLoop
+	if added = added && vif.rpending.TryAdd(); added {
+		go vif.vcDispatchLoop(vc, rq)
+	}
+	if !added {
+		if rq != nil {
+			rq.Close()
+		}
+		if wq != nil {
+			wq.Close()
+		}
+		vc.Close(verror.New(stream.ErrAborted, nil, verror.New(errShuttingDown, nil, vif)))
+		vif.deleteVC(vci)
+		return nil, verror.New(stream.ErrAborted, nil, verror.New(errShuttingDown, nil, vif))
+	}
+	return vc, nil
+}
+
+func (vif *VIF) deleteVC(vci id.VC) {
+	vif.idleTimerMap.Delete(vci)
+	if vif.vcMap.Delete(vci) {
+		vif.Close()
+	}
+}
+
+func (vif *VIF) closeVCAndSendMsg(vc *vc.VC, clientVCClosed bool, errMsg error) {
+	vlog.VI(2).Infof("Shutting down VCI %d on VIF %v due to: %v", vc.VCI(), vif, errMsg)
+	vif.deleteVC(vc.VCI())
+	vc.Close(errMsg)
+	if clientVCClosed {
+		// No point in sending to the client if the VC is closed, or otherwise broken.
+		return
+	}
+	msg := ""
+	if errMsg != nil {
+		msg = errMsg.Error()
+	}
+	if err := vif.sendOnExpressQ(&message.CloseVC{
+		VCI:   vc.VCI(),
+		Error: msg,
+	}); err != nil {
+		vlog.VI(2).Infof("sendOnExpressQ(CloseVC{VCI:%d,...}) on VIF %v failed: %v", vc.VCI(), vif, err)
+	}
+}
+
+// shutdownFlow clears out all the datastructures associated with fid.
+func (vif *VIF) shutdownFlow(vc *vc.VC, fid id.Flow) {
+	vc.ShutdownFlow(fid)
+	vif.flowMu.Lock()
+	delete(vif.flowCounters, message.MakeCounterID(vc.VCI(), fid))
+	vif.flowMu.Unlock()
+	vif.idleTimerMap.DeleteFlow(vc.VCI(), fid)
+}
+
+// ShutdownVCs closes all VCs established to the provided remote endpoint.
+// Returns the number of VCs that were closed.
+func (vif *VIF) ShutdownVCs(remote naming.Endpoint) int {
+	vcs := vif.vcMap.List()
+	n := 0
+	for _, vc := range vcs {
+		if naming.Compare(vc.RemoteEndpoint().RoutingID(), remote.RoutingID()) {
+			vlog.VI(1).Infof("VCI %d on VIF %s being closed because of ShutdownVCs call", vc.VCI(), vif)
+			vif.closeVCAndSendMsg(vc, false, nil)
+			n++
+		}
+	}
+	return n
+}
+
+// NumVCs returns the number of VCs established over this VIF.
+func (vif *VIF) NumVCs() int { return vif.vcMap.Size() }
+
+// DebugString returns a descriptive state of the VIF.
+//
+// The returned string is meant for consumptions by humans. The specific format
+// should not be relied upon by any automated processing.
+func (vif *VIF) DebugString() string {
+	vcs := vif.vcMap.List()
+	l := make([]string, 0, len(vcs)+1)
+
+	vif.muNextVCI.Lock() // Needed for vif.nextVCI
+	l = append(l, fmt.Sprintf("VIF:[%s] -- #VCs:%d NextVCI:%d ControlChannelEncryption:%v IsClosed:%v", vif, len(vcs), vif.nextVCI, vif.ctrlCipher != nullCipher, vif.isClosed))
+	vif.muNextVCI.Unlock()
+
+	for _, vc := range vcs {
+		l = append(l, vc.DebugString())
+	}
+
+	l = append(l, "Message Counters:")
+	ctrs := len(l)
+	vif.muMsgCounters.Lock()
+	for k, v := range vif.msgCounters {
+		l = append(l, fmt.Sprintf(" %-32s %10d", k, v))
+	}
+	vif.muMsgCounters.Unlock()
+	sort.Strings(l[ctrs:])
+	return strings.Join(l, "\n")
+}
+
+// Methods and type that implement vc.Helper
+//
+// We create a separate type for vc.Helper to hide the vc.Helper methods
+// from the exported method set of VIF.
+type vcHelper struct{ vif *VIF }
+
+func (h vcHelper) NotifyOfNewFlow(vci id.VC, fid id.Flow, bytes uint) {
+	h.vif.sendOnExpressQ(&message.OpenFlow{VCI: vci, Flow: fid, InitialCounters: uint32(bytes)})
+}
+
+func (h vcHelper) AddReceiveBuffers(vci id.VC, fid id.Flow, bytes uint) {
+	if bytes == 0 {
+		return
+	}
+	h.vif.flowMu.Lock()
+	h.vif.flowCounters.Add(vci, fid, uint32(bytes))
+	h.vif.flowMu.Unlock()
+	h.vif.flowQ.TryPut(flowToken)
+}
+
+func (h vcHelper) NewWriter(vci id.VC, fid id.Flow, priority bqueue.Priority) (bqueue.Writer, error) {
+	h.vif.idleTimerMap.InsertFlow(vci, fid)
+	return h.vif.outgoing.NewWriter(packIDs(vci, fid), flowPriority+priority, defaultBytesBufferedPerFlow)
+}
+
+// The token added to vif.flowQ.
+var flowToken *iobuf.Slice
+
+func init() {
+	// flowToken must be non-empty otherwise bqueue.Writer.Put will ignore it.
+	flowToken = iobuf.NewSlice(make([]byte, 1))
+}
+
+func packIDs(vci id.VC, fid id.Flow) bqueue.ID {
+	return bqueue.ID(message.MakeCounterID(vci, fid))
+}
+
+func unpackIDs(b bqueue.ID) (id.VC, id.Flow) {
+	cid := message.CounterID(b)
+	return cid.VCI(), cid.Flow()
+}
+
+func releaseBufs(bufs []*iobuf.Slice) {
+	for _, b := range bufs {
+		b.Release()
+	}
+}
+
+// localEP creates a naming.Endpoint from the provided parameters.
+//
+// It intentionally does not include any blessings (present in endpoints in the
+// v5 format). At this point it is not clear whether the endpoint is being
+// created for a "client" or a "server". If the endpoint is used for clients
+// (i.e., for those sending an OpenVC message for example), then we do NOT want
+// to include the blessings in the endpoint to ensure client privacy.
+//
+// Servers should be happy to let anyone with access to their endpoint string
+// know their blessings, because they are willing to share those with anyone
+// that connects to them.
+//
+// The addition of the endpoints is left as an excercise to higher layers of
+// the stack, where the desire to share or hide blessings from the endpoint is
+// clearer.
+func localEP(conn net.Conn, rid naming.RoutingID, versions *iversion.Range) naming.Endpoint {
+	localAddr := conn.LocalAddr()
+	ep := &inaming.Endpoint{
+		Protocol: localAddr.Network(),
+		Address:  localAddr.String(),
+		RID:      rid,
+	}
+	return ep
+}
+
+// getDialContext returns the DialContext for this call.
+func getDialContext(vopts []stream.VCOpt) *context.T {
+	for _, o := range vopts {
+		switch v := o.(type) {
+		case vc.DialContext:
+			return v.T
+		}
+	}
+	return nil
+}
diff --git a/runtime/internal/rpc/stream/vif/vif_test.go b/runtime/internal/rpc/stream/vif/vif_test.go
new file mode 100644
index 0000000..fd820ff
--- /dev/null
+++ b/runtime/internal/rpc/stream/vif/vif_test.go
@@ -0,0 +1,925 @@
+// Copyright 2015 The Vanadium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests in a separate package to ensure that only the exported API is used in the tests.
+//
+// All tests are run with the default security level on VCs (SecurityConfidential).
+
+package vif_test
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"runtime"
+	"sort"
+	"sync"
+	"testing"
+	"time"
+
+	"v.io/v23/naming"
+	"v.io/v23/rpc/version"
+
+	inaming "v.io/x/ref/runtime/internal/naming"
+	"v.io/x/ref/runtime/internal/rpc/stream"
+	"v.io/x/ref/runtime/internal/rpc/stream/vc"
+	"v.io/x/ref/runtime/internal/rpc/stream/vif"
+	iversion "v.io/x/ref/runtime/internal/rpc/version"
+	"v.io/x/ref/test/testutil"
+)
+
+//go:generate v23 test generate
+
+func TestSingleFlowCreatedAtClient(t *testing.T) {
+	client, server := NewClientServer()
+	defer client.Close()
+
+	clientVC, _, err := createVC(client, server, makeEP(0x5))
+	if err != nil {
+		t.Fatal(err)
+	}
+	writer, err := clientVC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Test with an empty message to ensure that we correctly
+	// handle closing empty flows.
+	rwSingleFlow(t, writer, acceptFlowAtServer(server), "")
+	writer, err = clientVC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	rwSingleFlow(t, writer, acceptFlowAtServer(server), "the dark knight")
+}
+
+func TestSingleFlowCreatedAtServer(t *testing.T) {
+	client, server := NewClientServer()
+	defer client.Close()
+
+	clientVC, serverConnector, err := createVC(client, server, makeEP(0x5))
+	if err != nil {
+		t.Fatal(err)
+	}
+	ln, err := clientVC.Listen()
+	if err != nil {
+		t.Fatal(err)
+	}
+	writer, err := serverConnector.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	reader, err := ln.Accept()
+	if err != nil {
+		t.Fatal(err)
+	}
+	rwSingleFlow(t, writer, reader, "the dark knight")
+	ln.Close()
+}
+
+func testMultipleVCsAndMultipleFlows(t *testing.T, gomaxprocs int) {
+	// This test dials multiple VCs from the client to the server.
+	// On each VC, it creates multiple flows, writes to them and verifies
+	// that the other process received what was written.
+
+	// Knobs configuring this test
+	//
+	// In case the test breaks, the knobs can be tuned down to isolate the problem.
+	// In normal circumstances, the knows should be tuned up to stress test the code.
+	const (
+		nVCs                  = 6 // Number of VCs created by the client process Dialing.
+		nFlowsFromClientPerVC = 3 // Number of flows initiated by the client process, per VC
+		nFlowsFromServerPerVC = 4 // Number of flows initiated by the server process, per VC
+
+		// Maximum number of bytes to write and read per flow.
+		// The actual size is selected randomly.
+		maxBytesPerFlow = 512 << 10 // 512KB
+	)
+
+	mp := runtime.GOMAXPROCS(gomaxprocs)
+	defer runtime.GOMAXPROCS(mp)
+	client, server := NewClientServer()
+	defer client.Close()
+
+	// Create all the VCs
+	// clientVCs[i] is the VC at the client process
+	// serverConnectors[i] is the corresponding VC at the server process.
+	clientVCs, serverConnectors, err := createNVCs(client, server, 0, nVCs)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create listeners for flows on the client VCs.
+	// Flows are implicitly being listened to at the server (available through server.Accept())
+	clientLNs, err := createListeners(clientVCs)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create flows:
+	// Over each VC, create nFlowsFromClientPerVC initiated by the client
+	// and nFlowsFromServerPerVC initiated by the server.
+	nFlows := nVCs * (nFlowsFromClientPerVC + nFlowsFromServerPerVC)
+
+	// Fill in random strings that will be written over the Flows.
+	dataWritten := make([]string, nFlows)
+	for i := 0; i < nFlows; i++ {
+		dataWritten[i] = string(testutil.RandomBytes(maxBytesPerFlow))
+	}
+
+	// write writes data to flow in randomly sized chunks.
+	write := func(flow stream.Flow, data string) {
+		defer flow.Close()
+		buf := []byte(data)
+		// Split into a random number of Write calls.
+		for len(buf) > 0 {
+			size := 1 + testutil.Intn(len(buf)) // Random number in [1, len(buf)]
+			n, err := flow.Write(buf[:size])
+			if err != nil {
+				t.Errorf("Write failed: (%d, %v)", n, err)
+				return
+			}
+			buf = buf[size:]
+		}
+	}
+
+	dataReadChan := make(chan string, nFlows)
+	// read reads from a flow and writes out the data to dataReadChan
+	read := func(flow stream.Flow) {
+		var buf bytes.Buffer
+		var tmp [1024]byte
+		for {
+			n, err := flow.Read(tmp[:testutil.Intn(len(tmp))])
+			buf.Write(tmp[:n])
+			if err == io.EOF {
+				break
+			}
+			if err != nil {
+				t.Errorf("Read error: %v", err)
+				break
+			}
+		}
+		dataReadChan <- buf.String()
+	}
+
+	index := 0
+	for i := 0; i < len(clientVCs); i++ {
+		for j := 0; j < nFlowsFromClientPerVC; j++ {
+			// Flow initiated by client, read by server
+			writer, err := clientVCs[i].Connect()
+			if err != nil {
+				t.Errorf("clientVCs[%d], flow %d: %v", i, j, err)
+				continue
+			}
+			go write(writer, dataWritten[index])
+			go read(acceptFlowAtServer(server))
+			index++
+		}
+	}
+	for i := 0; i < len(serverConnectors); i++ {
+		for j := 0; j < nFlowsFromServerPerVC; j++ {
+			// Flow initiated by server, read by client
+			writer, err := serverConnectors[i].Connect()
+			if err != nil {
+				t.Errorf("serverConnectors[%d], flow %d: %v", i, j, err)
+				continue
+			}
+			go write(writer, dataWritten[index])
+			go read(acceptFlowAtClient(clientLNs[i]))
+			index++
+		}
+	}
+	if index != nFlows {
+		t.Errorf("Created %d flows, wanted %d", index, nFlows)
+	}
+
+	// Collect all data read and compare against the data written.
+	// Since flows might be accepted in arbitrary order, sort the data before comparing.
+	dataRead := make([]string, index)
+	for i := 0; i < index; i++ {
+		dataRead[i] = <-dataReadChan
+	}
+	sort.Strings(dataWritten)
+	sort.Strings(dataRead)
+	if !reflect.DeepEqual(dataRead, dataWritten) {
+		// Since the strings can be very large, only print out the first few diffs.
+		nDiffs := 0
+		for i := 0; i < len(dataRead); i++ {
+			if dataRead[i] != dataWritten[i] {
+				nDiffs++
+				t.Errorf("Diff %d out of %d items: Got %q want %q", nDiffs, i, atmostNbytes(dataRead[i], 20), atmostNbytes(dataWritten[i], 20))
+			}
+		}
+		if nDiffs > 0 {
+			t.Errorf("#Mismatches:%d #ReadSamples:%d #WriteSamples:%d", nDiffs, len(dataRead), len(dataWritten))
+		}
+	}
+}
+
+func TestMultipleVCsAndMultipleFlows_1(t *testing.T) {
+	// Test with a single goroutine since that is typically easier to debug
+	// in case of problems.
+	testMultipleVCsAndMultipleFlows(t, 1)
+}
+
+func TestMultipleVCsAndMultipleFlows_5(t *testing.T) {
+	// Test with multiple goroutines, particularly useful for checking
+	// races with
+	// go test -race
+	testMultipleVCsAndMultipleFlows(t, 5)
+}
+
+func TestClose(t *testing.T) {
+	client, server := NewClientServer()
+	vc, _, err := createVC(client, server, makeEP(0x5))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	clientFlow, err := vc.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	serverFlow := acceptFlowAtServer(server)
+
+	var message = []byte("bugs bunny")
+	go func() {
+		if n, err := clientFlow.Write(message); n != len(message) || err != nil {
+			t.Fatalf("Wrote (%d, %v), want (%d, nil)", n, err, len(message))
+		}
+		client.Close()
+	}()
+
+	buf := make([]byte, 1024)
+	// client.Close should drain all pending writes first.
+	if n, err := serverFlow.Read(buf); n != len(message) || err != nil {
+		t.Fatalf("Got (%d, %v) = %q, want (%d, nil) = %q", n, err, buf[:n], len(message), message)
+	}
+	// subsequent reads should fail, since the VIF should be closed.
+	if n, err := serverFlow.Read(buf); n != 0 || err == nil {
+		t.Fatalf("Got (%d, %v) = %q, want (0, nil)", n, err, buf[:n])
+	}
+	server.Close()
+}
+
+func TestOnClose(t *testing.T) {
+	notifyC, notifyS := make(chan *vif.VIF), make(chan *vif.VIF)
+	notifyFuncC := func(vf *vif.VIF) { notifyC <- vf }
+	notifyFuncS := func(vf *vif.VIF) { notifyS <- vf }
+
+	// Close the client VIF. Both client and server should be notified.
+	client, server, err := New(nil, nil, notifyFuncC, notifyFuncS, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	client.Close()
+	if got := <-notifyC; got != client {
+		t.Errorf("Want notification for %v; got %v", client, got)
+	}
+	if got := <-notifyS; got != server {
+		t.Errorf("Want notification for %v; got %v", server, got)
+	}
+
+	// Same as above, but close the server VIF at this time.
+	client, server, err = New(nil, nil, notifyFuncC, notifyFuncS, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	server.Close()
+	if got := <-notifyC; got != client {
+		t.Errorf("Want notification for %v; got %v", client, got)
+	}
+	if got := <-notifyS; got != server {
+		t.Errorf("Want notification for %v; got %v", server, got)
+	}
+}
+
+func testCloseWhenEmpty(t *testing.T, testServer bool) {
+	const (
+		waitTime = 5 * time.Millisecond
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+	newVIF := func() (vf, remote *vif.VIF) {
+		var err error
+		vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, nil, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if err = vf.StartAccepting(); err != nil {
+			t.Fatal(err)
+		}
+		if testServer {
+			vf, remote = remote, vf
+		}
+		return
+	}
+
+	// Initially empty. Should not be closed.
+	vf, remote := newVIF()
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Open one VC. Should not be closed.
+	vf, remote = newVIF()
+	if _, _, err := createVC(vf, remote, makeEP(0x10)); err != nil {
+		t.Fatal(err)
+	}
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the VC. Should be closed.
+	vf.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Same as above, but open a VC from the remote side.
+	vf, remote = newVIF()
+	_, _, err := createVC(remote, vf, makeEP(0x10))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+	remote.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Create two VCs.
+	vf, remote = newVIF()
+	if _, _, err := createNVCs(vf, remote, 0x10, 2); err != nil {
+		t.Fatal(err)
+	}
+
+	// Close the first VC twice. Should not be closed.
+	vf.ShutdownVCs(makeEP(0x10))
+	vf.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the second VC. Should be closed.
+	vf.ShutdownVCs(makeEP(0x10 + 1))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestCloseWhenEmpty(t *testing.T)       { testCloseWhenEmpty(t, false) }
+func TestCloseWhenEmptyServer(t *testing.T) { testCloseWhenEmpty(t, true) }
+
+func testStartTimeout(t *testing.T, testServer bool) {
+	const (
+		startTime = 5 * time.Millisecond
+		// We use a long wait time here since it takes some time for the underlying network
+		// connection of the other side to be closed especially in race testing.
+		waitTime = 150 * time.Millisecond
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+	newVIF := func() (vf, remote *vif.VIF, triggerTimers func()) {
+		triggerTimers = vif.SetFakeTimers()
+		var vfStartTime, remoteStartTime time.Duration = startTime, 0
+		if testServer {
+			vfStartTime, remoteStartTime = remoteStartTime, vfStartTime
+		}
+		var err error
+		vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, []stream.VCOpt{vc.StartTimeout{vfStartTime}}, []stream.ListenerOpt{vc.StartTimeout{remoteStartTime}})
+		if err != nil {
+			t.Fatal(err)
+		}
+		if err = vf.StartAccepting(); err != nil {
+			t.Fatal(err)
+		}
+		if testServer {
+			vf, remote = remote, vf
+		}
+		return
+	}
+
+	// No VC opened. Should be closed after the start timeout.
+	vf, remote, triggerTimers := newVIF()
+	triggerTimers()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Open one VC. Should not be closed.
+	vf, remote, triggerTimers = newVIF()
+	if _, _, err := createVC(vf, remote, makeEP(0x10)); err != nil {
+		t.Fatal(err)
+	}
+	triggerTimers()
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the VC. Should be closed.
+	vf.ShutdownVCs(makeEP(0x10))
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestStartTimeout(t *testing.T)       { testStartTimeout(t, false) }
+func TestStartTimeoutServer(t *testing.T) { testStartTimeout(t, true) }
+
+func testIdleTimeout(t *testing.T, testServer bool) {
+	const (
+		idleTime = 10 * time.Millisecond
+		waitTime = idleTime * 2
+	)
+
+	notify := make(chan interface{})
+	notifyFunc := func(vf *vif.VIF) { notify <- vf }
+
+	newVIF := func() (vf, remote *vif.VIF) {
+		var err error
+		if vf, remote, err = New(nil, nil, notifyFunc, notifyFunc, nil, nil); err != nil {
+			t.Fatal(err)
+		}
+		if err = vf.StartAccepting(); err != nil {
+			t.Fatal(err)
+		}
+		if testServer {
+			vf, remote = remote, vf
+		}
+		return
+	}
+	newVC := func(vf, remote *vif.VIF) (VC stream.VC, ln stream.Listener, remoteVC stream.Connector) {
+		triggerTimers := vif.SetFakeTimers()
+		defer triggerTimers()
+		var err error
+		VC, remoteVC, err = createVC(vf, remote, makeEP(0x10), vc.IdleTimeout{idleTime})
+		if err != nil {
+			t.Fatal(err)
+		}
+		if ln, err = VC.Listen(); err != nil {
+			t.Fatal(err)
+		}
+		return
+	}
+	newFlow := func(vc stream.VC, remote *vif.VIF) stream.Flow {
+		f, err := vc.Connect()
+		if err != nil {
+			t.Fatal(err)
+		}
+		acceptFlowAtServer(remote)
+		return f
+	}
+
+	// No active flow. Should be notified.
+	vf, remote := newVIF()
+	_, _, _ = newVC(vf, remote)
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Same as above, but with multiple VCs.
+	vf, remote = newVIF()
+	triggerTimers := vif.SetFakeTimers()
+	if _, _, err := createNVCs(vf, remote, 0x10, 5, vc.IdleTimeout{idleTime}); err != nil {
+		t.Fatal(err)
+	}
+	triggerTimers()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Open one flow. Should not be notified.
+	vf, remote = newVIF()
+	vc, _, _ := newVC(vf, remote)
+	f1 := newFlow(vc, remote)
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the flow. Should be notified.
+	f1.Close()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Open two flows.
+	vf, remote = newVIF()
+	vc, _, _ = newVC(vf, remote)
+	f1 = newFlow(vc, remote)
+	f2 := newFlow(vc, remote)
+
+	// Close the first flow twice. Should not be notified.
+	f1.Close()
+	f1.Close()
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+
+	// Close the second flow. Should be notified now.
+	f2.Close()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+
+	// Same as above, but open a flow from the remote side.
+	vf, remote = newVIF()
+	_, ln, remoteVC := newVC(vf, remote)
+	f1, err := remoteVC.Connect()
+	if err != nil {
+		t.Fatal(err)
+	}
+	acceptFlowAtClient(ln)
+	if err := vif.WaitWithTimeout(notify, waitTime); err != nil {
+		t.Error(err)
+	}
+	f1.Close()
+	if err := vif.WaitForNotifications(notify, vf, remote); err != nil {
+		t.Error(err)
+	}
+}
+
+func TestIdleTimeout(t *testing.T)       { testIdleTimeout(t, false) }
+func TestIdleTimeoutServer(t *testing.T) { testIdleTimeout(t, true) }
+
+func TestShutdownVCs(t *testing.T) {
+	client, server := NewClientServer()
+	defer server.Close()
+	defer client.Close()
+
+	testN := func(N int) error {
+		c := client.NumVCs()
+		if c != N {
+			return fmt.Errorf("%d VCs on client VIF, expected %d", c, N)
+		}
+		return nil
+	}
+
+	if _, _, err := createVC(client, server, makeEP(0x5)); err != nil {
+		t.Fatal(err)
+	}
+	if err := testN(1); err != nil {
+		t.Error(err)
+	}
+	if _, _, err := createVC(client, server, makeEP(0x5)); err != nil {
+		t.Fatal(err)
+	}
+	if err := testN(2); err != nil {
+		t.Error(err)
+	}
+	if _, _, err := createVC(client, server, makeEP(0x7)); err != nil {
+		t.Fatal(err)
+	}
+	if err := testN(3); err != nil {
+		t.Error(err)
+	}
+	// Client does not have any VCs to the endpoint with routing id 0x9,
+	// so nothing should be closed
+	if n := client.ShutdownVCs(makeEP(0x9)); n != 0 {
+		t.Errorf("Expected 0 VCs to be closed, closed %d", n)
+	}
+	if err := testN(3); err != nil {
+		t.Error(err)
+	}
+	// But it does have to 0x5
+	if n := client.ShutdownVCs(makeEP(0x5)); n != 2 {
+		t.Errorf("Expected 2 VCs to be closed, closed %d", n)
+	}
+	if err := testN(1); err != nil {
+		t.Error(err)
+	}
+	// And 0x7
+	if n := client.ShutdownVCs(makeEP(0x7)); n != 1 {
+		t.Errorf("Expected 2 VCs to be closed, closed %d", n)
+	}
+	if err := testN(0); err != nil {
+		t.Error(err)
+	}
+}
+
+type versionTestCase struct {
+	client, server, ep *iversion.Range
+	expectError        bool
+	expectVIFError     bool
+}
+
+func (tc *versionTestCase) Run(t *testing.T) {
+	client, server, err := NewVersionedClientServer(tc.client, tc.server)
+	if (err != nil) != tc.expectVIFError {
+		t.Errorf("Error mismatch.  Wanted error: %v, got %v; client: %v, server: %v", tc.expectVIFError, err, tc.client, tc.server)
+	}
+	if err != nil {
+		return
+	}
+	defer client.Close()
+
+	ep := &inaming.Endpoint{
+		Protocol: "test",
+		Address:  "addr",
+		RID:      naming.FixedRoutingID(0x5),
+	}
+	clientVC, _, err := createVC(client, server, ep)
+	if (err != nil) != tc.expectError {
+		t.Errorf("Error mismatch.  Wanted error: %v, got %v (client:%v, server:%v ep:%v)", tc.expectError, err, tc.client, tc.server, tc.ep)
+
+	}
+	if err != nil {
+		return
+	}
+
+	writer, err := clientVC.Connect()
+	if err != nil {
+		t.Errorf("Unexpected error on case %+v: %v", tc, err)
+		return
+	}
+
+	rwSingleFlow(t, writer, acceptFlowAtServer(server), "the dark knight")
+}
+
+// TestIncompatibleVersions tests many cases where the client and server
+// have compatible or incompatible supported version ranges.  It ensures
+// that overlapping ranges work properly, but non-overlapping ranges generate
+// errors.
+func TestIncompatibleVersions(t *testing.T) {
+	unknown := &iversion.Range{version.UnknownRPCVersion, version.UnknownRPCVersion}
+	tests := []versionTestCase{
+		{&iversion.Range{2, 2}, &iversion.Range{2, 2}, &iversion.Range{2, 2}, false, false},
+		{&iversion.Range{2, 3}, &iversion.Range{3, 5}, &iversion.Range{3, 5}, false, false},
+		{&iversion.Range{2, 3}, &iversion.Range{3, 5}, unknown, false, false},
+
+		// VIF error since there are no versions in common.
+		{&iversion.Range{2, 3}, &iversion.Range{4, 5}, &iversion.Range{4, 5}, true, true},
+		{&iversion.Range{2, 3}, &iversion.Range{4, 5}, unknown, true, true},
+	}
+
+	for _, tc := range tests {
+		tc.Run(t)
+	}
+}
+
+func TestNetworkFailure(t *testing.T) {
+	c1, c2 := pipe()
+	result := make(chan *vif.VIF)
+	pclient := testutil.NewPrincipal("client")
+	go func() {
+		client, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), pclient, nil, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		result <- client
+	}()
+	pserver := testutil.NewPrincipal("server")
+	blessings := pserver.BlessingStore().Default()
+	server, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, blessings, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	client := <-result
+	// If the network connection dies, Dial and Accept should fail.
+	c1.Close()
+	if _, err := client.Dial(makeEP(0x5), pclient); err == nil {
+		t.Errorf("Expected client.Dial to fail")
+	}
+	if _, err := server.Accept(); err == nil {
+		t.Errorf("Expected server.Accept to fail")
+	}
+}
+
+func makeEP(rid uint64) naming.Endpoint {
+	return &inaming.Endpoint{
+		Protocol: "test",
+		Address:  "addr",
+		RID:      naming.FixedRoutingID(rid),
+	}
+}
+
+// pipeAddr provides a more descriptive String implementation than provided by net.Pipe.
+type pipeAddr struct{ name string }
+
+func (a pipeAddr) Network() string { return "pipe" }
+func (a pipeAddr) String() string  { return a.name }
+
+// pipeConn provides a buffered net.Conn, with pipeAddr addressing.
+type pipeConn struct {
+	lock sync.Mutex
+	// w is guarded by lock, to prevent Close from racing with Write.  This is a
+	// quick way to prevent the race, but it allows a Write to block the Close.
+	// This isn't a problem in the tests currently.
+	w            chan<- []byte
+	r            <-chan []byte
+	rdata        []byte
+	laddr, raddr pipeAddr
+}
+
+func (c *pipeConn) Read(data []byte) (int, error) {
+	for len(c.rdata) == 0 {
+		d, ok := <-c.r
+		if !ok {
+			return 0, io.EOF
+		}
+		c.rdata = d
+	}
+	n := copy(data, c.rdata)
+	c.rdata = c.rdata[n:]
+	return n, nil
+}
+
+func (c *pipeConn) Write(data []byte) (int, error) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.w == nil {
+		return 0, io.EOF
+	}
+	d := make([]byte, len(data))
+	copy(d, data)
+	c.w <- d
+	return len(data), nil
+}
+
+func (c *pipeConn) Close() error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.w == nil {
+		return io.EOF
+	}
+	close(c.w)
+	c.w = nil
+	return nil
+}
+
+func (c *pipeConn) LocalAddr() net.Addr                { return c.laddr }
+func (c *pipeConn) RemoteAddr() net.Addr               { return c.raddr }
+func (c *pipeConn) SetDeadline(t time.Time) error      { return nil }
+func (c *pipeConn) SetReadDeadline(t time.Time) error  { return nil }
+func (c *pipeConn) SetWriteDeadline(t time.Time) error { return nil }
+
+func pipe() (net.Conn, net.Conn) {
+	clientAddr := pipeAddr{"client"}
+	serverAddr := pipeAddr{"server"}
+	c1 := make(chan []byte, 10)
+	c2 := make(chan []byte, 10)
+	p1 := &pipeConn{w: c1, r: c2, laddr: clientAddr, raddr: serverAddr}
+	p2 := &pipeConn{w: c2, r: c1, laddr: serverAddr, raddr: clientAddr}
+	return p1, p2
+}
+
+func NewClientServer() (client, server *vif.VIF) {
+	var err error
+	client, server, err = New(nil, nil, nil, nil, nil, nil)
+	if err != nil {
+		panic(err)
+	}
+	return
+}
+
+func NewVersionedClientServer(clientVersions, serverVersions *iversion.Range) (client, server *vif.VIF, verr error) {
+	return New(clientVersions, serverVersions, nil, nil, nil, nil)
+}
+
+func New(clientVersions, serverVersions *iversion.Range, clientOnClose, serverOnClose func(*vif.VIF), opts []stream.VCOpt, lopts []stream.ListenerOpt) (client, server *vif.VIF, verr error) {
+	c1, c2 := pipe()
+	var cerr error
+	cl := make(chan *vif.VIF)
+	go func() {
+		c, err := vif.InternalNewDialedVIF(c1, naming.FixedRoutingID(0xc), testutil.NewPrincipal("client"), clientVersions, clientOnClose, opts...)
+		if err != nil {
+			cerr = err
+			close(cl)
+		} else {
+			cl <- c
+		}
+	}()
+	pserver := testutil.NewPrincipal("server")
+	bserver := pserver.BlessingStore().Default()
+	s, err := vif.InternalNewAcceptedVIF(c2, naming.FixedRoutingID(0x5), pserver, bserver, serverVersions, serverOnClose, lopts...)
+	c, ok := <-cl
+	if err != nil {
+		verr = err
+		return
+	}
+	if !ok {
+		verr = cerr
+		return
+	}
+	server = s
+	client = c
+	return
+}
+
+// rwSingleFlow writes out data on writer and ensures that the reader sees the same string.
+func rwSingleFlow(t *testing.T, writer io.WriteCloser, reader io.Reader, data string) {
+	go func() {
+		if n, err := writer.Write([]byte(data)); n != len(data) || err != nil {
+			t.Errorf("Write failure. Got (%d, %v) want (%d, nil)", n, err, len(data))
+		}
+		writer.Close()
+	}()
+
+	var buf bytes.Buffer
+	var tmp [4096]byte
+	for {
+		n, err := reader.Read(tmp[:])
+		buf.Write(tmp[:n])
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Errorf("Read error: %v", err)
+		}
+	}
+	if buf.String() != data {
+		t.Errorf("Wrote %q but read %q", data, buf.String())
+	}
+}
+
+// createVC creates a VC by dialing from the client process to the server
+// process.  It returns the VC at the client and the Connector at the server
+// (which the server can use to create flows over the VC)).
+func createVC(client, server *vif.VIF, ep naming.Endpoint, opts ...stream.VCOpt) (clientVC stream.VC, serverConnector stream.Connector, err error) {
+	vcChan := make(chan stream.VC)
+	scChan := make(chan stream.Connector)
+	errChan := make(chan error)
+	go func() {
+		vc, err := client.Dial(ep, testutil.NewPrincipal("client"), opts...)
+		errChan <- err
+		vcChan <- vc
+	}()
+	go func() {
+		cAndf, err := server.Accept()
+		errChan <- err
+		if err == nil {
+			scChan <- cAndf.Connector
+		}
+	}()
+	if err = <-errChan; err != nil {
+		return
+	}
+	if err = <-errChan; err != nil {
+		return
+	}
+	clientVC = <-vcChan
+	serverConnector = <-scChan
+	return
+}
+
+func createNVCs(client, server *vif.VIF, startRID uint64, N int, opts ...stream.VCOpt) (clientVCs []stream.VC, serverConnectors []stream.Connector, err error) {
+	var c stream.VC
+	var s stream.Connector
+	for i := 0; i < N; i++ {
+		c, s, err = createVC(client, server, makeEP(startRID+uint64(i)), opts...)
+		if err != nil {
+			return
+		}
+		clientVCs = append(clientVCs, c)
+		serverConnectors = append(serverConnectors, s)
+	}
+	return
+}
+
+func createListeners(vcs []stream.VC) ([]stream.Listener, error) {
+	var ret []stream.Listener
+	for _, vc := range vcs {
+		ln, err := vc.Listen()
+		if err != nil {
+			return nil, err
+		}
+		ret = append(ret, ln)
+	}
+	return ret, nil
+}
+
+func acceptFlowAtServer(vf *vif.VIF) stream.Flow {
+	for {
+		cAndf, err := vf.Accept()
+		if err != nil {
+			panic(err)
+		}
+		if cAndf.Flow != nil {
+			return cAndf.Flow
+		}
+	}
+}
+
+func acceptFlowAtClient(ln stream.Listener) stream.Flow {
+	f, err := ln.Accept()
+	if err != nil {
+		panic(err)
+	}
+	return f
+}
+
+func atmostNbytes(s string, n int) string {
+	if n > len(s) {
+		return s
+	}
+	b := []byte(s)
+	return string(b[:n/2]) + "..." + string(b[len(s)-n/2:])
+}