iobuf: change the way to handle reserve bytes

  Currently, iobuf allocator reserves the space only when it allocates
  the first slice from the internal buf. Also we do not set the 'reserve'
  correctly. E.g., zero reserve in crypto where it is necessary), and
  header+macsize in vc.newWriter where it isn't.
  This leads unnecessary data copy for header+mac insertion and may prevent
  coalescing due to the reserved space.

  This CL change iobuf allocator to always reserve the space when
  requested, and give the correct 'reserve' bytes to each alloc.

                                      old                        new
  RPC Connection                      13.72 ms/rpc               13.70 ms/rpc              -0.1%
  RPC (echo 1000B)                    1.03 ms/rpc (971.45 qps)   1.02 ms/rpc (979.03 qps)  -1.0% (0.8%)
  RPC Streaming (echo 1000B)          0.11 ms/rpc                0.11 ms/rpc                0.0%
  RPC Streaming Throughput (echo 1MB) 293.60 MB/s                328.45 MB/s               11.9%

Change-Id: I081579544540771cf2dfe7aaa433f1a2c5d0f8f2
diff --git a/runtime/internal/lib/iobuf/allocator.go b/runtime/internal/lib/iobuf/allocator.go
index 1dc5c5e..d059564 100644
--- a/runtime/internal/lib/iobuf/allocator.go
+++ b/runtime/internal/lib/iobuf/allocator.go
@@ -6,10 +6,10 @@
 
 import "v.io/x/lib/vlog"
 
-// Allocator is an allocator for Slices that tries to allocate
-// contiguously.  That is, sequential allocations will tend to be contiguous,
-// which means that Coalesce() will usually be able to perform coalescing
-// (without copying the data).
+// Allocator is an allocator for Slices that tries to allocate contiguously.
+// That is, sequential allocations will tend to be contiguous, which means
+// that Coalesce() will usually be able to perform coalescing (without
+// copying the data).
 //
 //    calloc := iobuf.Allocator(...)
 //    slice1 := calloc.Alloc(10)
@@ -18,18 +18,23 @@
 //    // slices should contain 1 element with length 30.
 type Allocator struct {
 	pool    *Pool
+	iobuf   *buf
 	index   uint
 	reserve uint
-	iobuf   *buf
 }
 
 // NewAllocator returns a new Slice allocator.
 //
-// <reserve> is the number of spare bytes to reserve at the beginning of each
-// contiguous iobuf.  This can be used to reverse space for a header, for
-// example.
+// <reserve> is the number of spare bytes to reserve at the beginning of
+// each allocated Slice. This can be used to reserve space for a header,
+// for example.
+//
+// NOTE: It's a bit weird to set the number of reserve bytes in the NewAllocator
+// call; it seems more natural in the Alloc call. But it's convenient to set it
+// in NewAllocator, because in our use-cases, the code that calls Alloc doesn't
+// know the number of reserve bytes.
 func NewAllocator(pool *Pool, reserve uint) *Allocator {
-	return &Allocator{pool: pool, reserve: reserve, index: reserve}
+	return &Allocator{pool: pool, reserve: reserve}
 }
 
 // Release releases the allocator.
@@ -43,22 +48,21 @@
 
 // Alloc allocates a new Slice.
 func (a *Allocator) Alloc(bytes uint) *Slice {
+	n := bytes + a.reserve
 	if a.iobuf == nil {
 		if a.pool == nil {
 			vlog.Info("iobuf.Allocator has already been closed")
 			return nil
 		}
-		a.iobuf = a.pool.alloc(a.reserve + bytes)
+		a.iobuf = a.pool.alloc(n)
+	} else if uint(len(a.iobuf.Contents)) < a.index+n {
+		a.iobuf.release()
+		a.iobuf = a.pool.alloc(n)
+		a.index = 0
 	}
-	if uint(len(a.iobuf.Contents))-a.index < bytes {
-		a.allocIOBUF(bytes)
-	}
-	base := a.index
-	free := base
-	if free == a.reserve {
-		free = 0
-	}
-	a.index += uint(bytes)
+	free := a.index
+	base := free + a.reserve
+	a.index += uint(n)
 	return a.iobuf.slice(free, base, a.index)
 }
 
@@ -68,11 +72,3 @@
 	copy(slice.Contents, buf)
 	return slice
 }
-
-// allocIOBUF replaces the current iobuf with a new one that has at least
-// <bytes> of storage.
-func (a *Allocator) allocIOBUF(bytes uint) {
-	a.iobuf.release()
-	a.iobuf = a.pool.alloc(bytes + a.reserve)
-	a.index = a.reserve
-}
diff --git a/runtime/internal/lib/iobuf/allocator_test.go b/runtime/internal/lib/iobuf/allocator_test.go
index 01a5ad5..86a5bc4 100644
--- a/runtime/internal/lib/iobuf/allocator_test.go
+++ b/runtime/internal/lib/iobuf/allocator_test.go
@@ -9,51 +9,81 @@
 	"testing"
 )
 
-func TestAllocatorSmall(t *testing.T) {
+func testAllocatorAlloc(t *testing.T, bytes, reserve uint) {
 	pool := NewPool(iobufSize)
-	salloc := NewAllocator(pool, 0)
-	const count = 100
-	var slices [count]*Slice
-	for i := 0; i != count; i++ {
-		slices[i] = salloc.Copy([]byte(fmt.Sprintf("slice[%d]", i)))
-	}
-	for i := 0; i != count; i++ {
-		expectEq(t, fmt.Sprintf("slice[%d]", i), string(slices[i].Contents))
-		slices[i].Release()
-	}
-	salloc.Release()
-}
+	defer pool.Close()
+	alloc := NewAllocator(pool, reserve)
+	defer alloc.Release()
 
-func TestAllocatorLarge(t *testing.T) {
-	pool := NewPool(iobufSize)
-	salloc := NewAllocator(pool, 0)
-	const count = 100
+	const count = 1000
 	var slices [count]*Slice
 	for i := 0; i != count; i++ {
-		slices[i] = salloc.Alloc(10000)
+		slices[i] = alloc.Alloc(bytes)
 		copy(slices[i].Contents, []byte(fmt.Sprintf("slice[%d]", i)))
 	}
 	for i := 0; i != count; i++ {
 		expected := fmt.Sprintf("slice[%d]", i)
 		expectEq(t, expected, string(slices[i].Contents[0:len(expected)]))
+		if slices[i].ExpandFront(reserve + 1) {
+			t.Errorf("slice[%d] should not have a reserved byte %d", i, reserve+1)
+		}
+		if !slices[i].ExpandFront(reserve) {
+			t.Errorf("slice[%d] should have a reserved byte %d", i, reserve)
+		}
 		slices[i].Release()
 	}
-	salloc.Release()
 }
 
+func TestAllocatorAllocSmallWitReserve_0(t *testing.T)   { testAllocatorAlloc(t, 50, 0) }
+func TestAllocatorAllocSmallWitReserve_10(t *testing.T)  { testAllocatorAlloc(t, 50, 10) }
+func TestAllocatorAllocSmallWitReserve_100(t *testing.T) { testAllocatorAlloc(t, 50, 100) }
+
+func TestAllocatorAllocLargeWitReserve_0(t *testing.T)   { testAllocatorAlloc(t, 1000, 0) }
+func TestAllocatorAllocLargeWitReserve_10(t *testing.T)  { testAllocatorAlloc(t, 1000, 10) }
+func TestAllocatorAllocLargeWitReserve_100(t *testing.T) { testAllocatorAlloc(t, 1000, 100) }
+
+func testAllocatorCopy(t *testing.T, reserve uint) {
+	pool := NewPool(iobufSize)
+	defer pool.Close()
+	alloc := NewAllocator(pool, reserve)
+	defer alloc.Release()
+
+	const count = 1000
+	var slices [count]*Slice
+	for i := 0; i != count; i++ {
+		slices[i] = alloc.Copy([]byte(fmt.Sprintf("slice[%d]", i)))
+	}
+	for i := 0; i != count; i++ {
+		expectEq(t, fmt.Sprintf("slice[%d]", i), string(slices[i].Contents))
+		if slices[i].ExpandFront(reserve + 1) {
+			t.Errorf("slice[%d] should not have a reserved byte %d", i, reserve+1)
+		}
+		if !slices[i].ExpandFront(reserve) {
+			t.Errorf("slice[%d] should have a reserved byte %d", i, reserve)
+		}
+		slices[i].Release()
+	}
+}
+
+func TestAllocatorCopyWitReserve_0(t *testing.T)   { testAllocatorCopy(t, 0) }
+func TestAllocatorCopyWitReserve_10(t *testing.T)  { testAllocatorCopy(t, 10) }
+func TestAllocatorCopyWitReserve_100(t *testing.T) { testAllocatorCopy(t, 100) }
+
 // Check that the Allocator is unusable after it is closed.
 func TestAllocatorClose(t *testing.T) {
 	pool := NewPool(iobufSize)
-	alloc := NewAllocator(pool, 0)
+	defer pool.Close()
+	alloc := NewAllocator(NewPool(iobufSize), 0)
+
 	slice := alloc.Alloc(10)
 	if slice == nil {
 		t.Fatalf("slice should not be nil")
 	}
 	slice.Release()
+
 	alloc.Release()
 	slice = alloc.Alloc(10)
 	if slice != nil {
 		t.Errorf("slice should be nil")
 	}
-	pool.Close()
 }
diff --git a/runtime/internal/lib/iobuf/iobuf_test.go b/runtime/internal/lib/iobuf/iobuf_test.go
index b9f454a..377debb 100644
--- a/runtime/internal/lib/iobuf/iobuf_test.go
+++ b/runtime/internal/lib/iobuf/iobuf_test.go
@@ -24,6 +24,8 @@
 // Test basic reference counting.
 func TestFreelist(t *testing.T) {
 	pool := NewPool(iobufSize)
+	defer pool.Close()
+
 	iobuf := pool.alloc(0)
 	expectEq(t, iobufSize, len(iobuf.Contents))
 	expectEq(t, uint64(1), pool.allocated)
@@ -41,6 +43,8 @@
 // Test slice reference counting.
 func TestRefcount(t *testing.T) {
 	pool := NewPool(iobufSize)
+	defer pool.Close()
+
 	iobuf := pool.alloc(0)
 	slice1 := iobuf.slice(0, 0, 10)
 	slice2 := iobuf.slice(10, 10, 20)
@@ -55,6 +59,7 @@
 // Check that the Pool is unusable after it is closed.
 func TestPoolClose(t *testing.T) {
 	pool := NewPool(iobufSize)
+
 	iobuf := pool.alloc(1024)
 	if iobuf == nil {
 		t.Fatalf("iobuf should not be nil")
@@ -68,8 +73,10 @@
 }
 
 func TestIOBUFConcurrency(t *testing.T) {
-	const threadCount = 100
 	pool := NewPool(iobufSize)
+	defer pool.Close()
+
+	const threadCount = 100
 
 	var pending sync.WaitGroup
 	pending.Add(threadCount)
diff --git a/runtime/internal/lib/iobuf/reader_test.go b/runtime/internal/lib/iobuf/reader_test.go
index a78d546..a5cb1a9 100644
--- a/runtime/internal/lib/iobuf/reader_test.go
+++ b/runtime/internal/lib/iobuf/reader_test.go
@@ -32,8 +32,11 @@
 
 func TestReader(t *testing.T) {
 	pool := NewPool(iobufSize)
+	defer pool.Close()
+
 	var tr testReader
 	r := NewReader(pool, &tr)
+	defer r.Close()
 
 	const amount = 4
 	const loopCount = 1000
@@ -60,7 +63,4 @@
 	if err == nil {
 		t.Errorf("Expected error")
 	}
-
-	r.Close()
-	pool.Close()
 }
diff --git a/runtime/internal/lib/iobuf/slice_test.go b/runtime/internal/lib/iobuf/slice_test.go
index 0898b41..c5cc00e 100644
--- a/runtime/internal/lib/iobuf/slice_test.go
+++ b/runtime/internal/lib/iobuf/slice_test.go
@@ -10,8 +10,11 @@
 
 func TestExpandFront(t *testing.T) {
 	pool := NewPool(iobufSize)
-	calloc := NewAllocator(pool, 8)
-	slice := calloc.Alloc(10)
+	defer pool.Close()
+	alloc := NewAllocator(pool, 8)
+	defer alloc.Release()
+
+	slice := alloc.Alloc(10)
 	if slice.Size() != 10 {
 		t.Errorf("Expected length 10, got %d", slice.Size())
 	}
@@ -42,8 +45,11 @@
 
 func TestCoalesce(t *testing.T) {
 	pool := NewPool(iobufSize)
-	salloc := NewAllocator(pool, 0)
-	const count = 100
+	defer pool.Close()
+	alloc := NewAllocator(pool, 0)
+	defer alloc.Release()
+
+	const count = 1000
 	const blocksize = 1024
 	var slices [count]*Slice
 	for i := 0; i != count; i++ {
@@ -51,7 +57,7 @@
 		for j := 0; j != blocksize; j++ {
 			block[j] = charAt(i*blocksize + j)
 		}
-		slices[i] = salloc.Copy(block[:])
+		slices[i] = alloc.Copy(block[:])
 	}
 	coalesced := Coalesce(slices[:], blocksize*4)
 	expectEq(t, count/4, len(coalesced))
@@ -62,8 +68,6 @@
 		off += len(buf.Contents)
 		buf.Release()
 	}
-
-	salloc.Release()
 }
 
 func charAt(i int) byte {
diff --git a/runtime/internal/rpc/benchmark/simple/main.go b/runtime/internal/rpc/benchmark/simple/main.go
index 47ae752..b5ba01c 100644
--- a/runtime/internal/rpc/benchmark/simple/main.go
+++ b/runtime/internal/rpc/benchmark/simple/main.go
@@ -26,7 +26,7 @@
 
 const (
 	payloadSize = 1000
-	chunkCnt    = 1000
+	chunkCnt    = 10000
 
 	bulkPayloadSize = 1000000
 
diff --git a/runtime/internal/rpc/stream/crypto/box.go b/runtime/internal/rpc/stream/crypto/box.go
index 6760026..54e31fb 100644
--- a/runtime/internal/rpc/stream/crypto/box.go
+++ b/runtime/internal/rpc/stream/crypto/box.go
@@ -58,7 +58,7 @@
 // of NewBoxCrypter; the data sent has forward security with connection
 // granularity. One round-trip is required before any data can be sent.
 // BoxCrypter does NOT do anything to verify the identity of the peer.
-func NewBoxCrypter(exchange BoxKeyExchanger, pool *iobuf.Pool) (Crypter, error) {
+func NewBoxCrypter(exchange BoxKeyExchanger, alloc *iobuf.Allocator) (Crypter, error) {
 	pk, sk, err := GenerateBoxKey()
 	if err != nil {
 		return nil, err
@@ -70,12 +70,12 @@
 	if theirPK == nil {
 		return nil, verror.New(errRemotePublicKey, nil)
 	}
-	return NewBoxCrypterWithKey(pk, sk, theirPK, pool), nil
+	return NewBoxCrypterWithKey(pk, sk, theirPK, alloc), nil
 }
 
 // NewBoxCrypterWithKey is used when public keys have been already exchanged between peers.
-func NewBoxCrypterWithKey(myPublicKey, myPrivateKey, theirPublicKey *BoxKey, pool *iobuf.Pool) Crypter {
-	c := boxcrypter{alloc: iobuf.NewAllocator(pool, 0)}
+func NewBoxCrypterWithKey(myPublicKey, myPrivateKey, theirPublicKey *BoxKey, alloc *iobuf.Allocator) Crypter {
+	c := boxcrypter{alloc: alloc}
 	box.Precompute(&c.sharedKey, (*[32]byte)(theirPublicKey), (*[32]byte)(myPrivateKey))
 	// Distinct messages between the same {sender, receiver} set are required
 	// to have distinct nonces. The server with the lexicographically smaller
@@ -92,30 +92,33 @@
 }
 
 func (c *boxcrypter) Encrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
-	defer src.Release()
 	var nonce [24]byte
 	binary.LittleEndian.PutUint64(nonce[:], c.writeNonce)
 	c.writeNonce += 2
 	ret := c.alloc.Alloc(uint(len(src.Contents) + box.Overhead))
 	ret.Contents = box.SealAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
+	src.Release()
 	return ret, nil
 }
 
 func (c *boxcrypter) Decrypt(src *iobuf.Slice) (*iobuf.Slice, error) {
-	defer src.Release()
 	var nonce [24]byte
 	binary.LittleEndian.PutUint64(nonce[:], c.readNonce)
 	c.readNonce += 2
 	retLen := len(src.Contents) - box.Overhead
 	if retLen < 0 {
+		src.Release()
 		return nil, verror.New(stream.ErrNetwork, nil, verror.New(errCipherTextTooShort, nil))
 	}
 	ret := c.alloc.Alloc(uint(retLen))
 	var ok bool
 	ret.Contents, ok = box.OpenAfterPrecomputation(ret.Contents[:0], src.Contents, &nonce, &c.sharedKey)
 	if !ok {
+		src.Release()
+		ret.Release()
 		return nil, verror.New(stream.ErrSecurity, nil, verror.New(errMessageAuthFailed, nil))
 	}
+	src.Release()
 	return ret, nil
 }
 
diff --git a/runtime/internal/rpc/stream/crypto/crypto_test.go b/runtime/internal/rpc/stream/crypto/crypto_test.go
index 4f67cb6..db56407 100644
--- a/runtime/internal/rpc/stream/crypto/crypto_test.go
+++ b/runtime/internal/rpc/stream/crypto/crypto_test.go
@@ -158,7 +158,7 @@
 func tlsCrypters(t testing.TB, serverConn, clientConn net.Conn) (Crypter, Crypter) {
 	crypters := make(chan Crypter)
 	go func() {
-		server, err := NewTLSServer(serverConn, serverConn.LocalAddr(), serverConn.RemoteAddr(), iobuf.NewPool(0))
+		server, err := NewTLSServer(serverConn, serverConn.LocalAddr(), serverConn.RemoteAddr(), iobuf.NewAllocator(iobuf.NewPool(0), 0))
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -166,7 +166,7 @@
 	}()
 
 	go func() {
-		client, err := NewTLSClient(clientConn, clientConn.LocalAddr(), clientConn.RemoteAddr(), TLSClientSessionCache{}, iobuf.NewPool(0))
+		client, err := NewTLSClient(clientConn, clientConn.LocalAddr(), clientConn.RemoteAddr(), TLSClientSessionCache{}, iobuf.NewAllocator(iobuf.NewPool(0), 0))
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -190,7 +190,7 @@
 	crypters := make(chan Crypter)
 	for _, ex := range []BoxKeyExchanger{clientExchanger, serverExchanger} {
 		go func(exchanger BoxKeyExchanger) {
-			crypter, err := NewBoxCrypter(exchanger, iobuf.NewPool(0))
+			crypter, err := NewBoxCrypter(exchanger, iobuf.NewAllocator(iobuf.NewPool(0), 0))
 			if err != nil {
 				t.Fatal(err)
 			}
diff --git a/runtime/internal/rpc/stream/crypto/tls.go b/runtime/internal/rpc/stream/crypto/tls.go
index b210a11..3f5d07f 100644
--- a/runtime/internal/rpc/stream/crypto/tls.go
+++ b/runtime/internal/rpc/stream/crypto/tls.go
@@ -38,19 +38,19 @@
 
 // NewTLSClient returns a Crypter implementation that uses TLS, assuming
 // handshaker was initiated by a client.
-func NewTLSClient(handshaker io.ReadWriteCloser, local, remote net.Addr, sessionCache TLSClientSessionCache, pool *iobuf.Pool) (Crypter, error) {
+func NewTLSClient(handshaker io.ReadWriteCloser, local, remote net.Addr, sessionCache TLSClientSessionCache, alloc *iobuf.Allocator) (Crypter, error) {
 	var config tls.Config
 	// TLS + resumption + channel bindings is broken: <https://secure-resumption.com/#channelbindings>.
 	config.SessionTicketsDisabled = true
 	config.InsecureSkipVerify = true
 	config.ClientSessionCache = sessionCache.ClientSessionCache
-	return newTLSCrypter(handshaker, local, remote, &config, pool, false)
+	return newTLSCrypter(handshaker, local, remote, &config, alloc, false)
 }
 
 // NewTLSServer returns a Crypter implementation that uses TLS, assuming
 // handshaker was accepted by a server.
-func NewTLSServer(handshaker io.ReadWriteCloser, local, remote net.Addr, pool *iobuf.Pool) (Crypter, error) {
-	return newTLSCrypter(handshaker, local, remote, ServerTLSConfig(), pool, true)
+func NewTLSServer(handshaker io.ReadWriteCloser, local, remote net.Addr, alloc *iobuf.Allocator) (Crypter, error) {
+	return newTLSCrypter(handshaker, local, remote, ServerTLSConfig(), alloc, true)
 }
 
 type fakeConn struct {
@@ -114,7 +114,7 @@
 	fc    *fakeConn
 }
 
-func newTLSCrypter(handshaker io.ReadWriteCloser, local, remote net.Addr, config *tls.Config, pool *iobuf.Pool, server bool) (Crypter, error) {
+func newTLSCrypter(handshaker io.ReadWriteCloser, local, remote net.Addr, config *tls.Config, alloc *iobuf.Allocator, server bool) (Crypter, error) {
 	fc := &fakeConn{handshakeConn: handshaker, laddr: local, raddr: remote}
 	var t *tls.Conn
 	if server {
@@ -149,7 +149,7 @@
 	}
 	fc.handshakeConn = nil
 	return &tlsCrypter{
-		alloc: iobuf.NewAllocator(pool, 0),
+		alloc: alloc,
 		tls:   t,
 		fc:    fc,
 	}, nil
diff --git a/runtime/internal/rpc/stream/vc/vc.go b/runtime/internal/rpc/stream/vc/vc.go
index 75585d5..1e6e7d2 100644
--- a/runtime/internal/rpc/stream/vc/vc.go
+++ b/runtime/internal/rpc/stream/vc/vc.go
@@ -491,7 +491,7 @@
 			return nil, verror.New(stream.ErrNetwork, nil, verror.New(errClosedDuringHandshake, nil, vc.VCI))
 		}
 	}
-	crypter, err := crypto.NewBoxCrypter(exchange, vc.pool)
+	crypter, err := crypto.NewBoxCrypter(exchange, iobuf.NewAllocator(vc.pool, vc.reserveBytes))
 	if err != nil {
 		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToSetupEncryption, nil, err)))
 	}
@@ -540,7 +540,7 @@
 	if err != nil {
 		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToSetupEncryption, nil, err)))
 	}
-	crypter := crypto.NewBoxCrypterWithKey(pk, sk, remotePublicKeyPreauth, vc.pool)
+	crypter := crypto.NewBoxCrypterWithKey(pk, sk, remotePublicKeyPreauth, iobuf.NewAllocator(vc.pool, vc.reserveBytes))
 	sigPreauth, err := bindClientPrincipalToChannel(crypter, params.LocalPrincipal)
 	if err != nil {
 		return vc.appendCloseReason(verror.New(stream.ErrSecurity, nil, err))
@@ -637,7 +637,7 @@
 	}
 
 	go func() {
-		crypter, err := crypto.NewBoxCrypter(exchange, vc.pool)
+		crypter, err := crypto.NewBoxCrypter(exchange, iobuf.NewAllocator(vc.pool, vc.reserveBytes))
 		if err != nil {
 			vc.abortHandshakeAcceptedVC(verror.New(stream.ErrSecurity, nil, verror.New(errFailedToSetupEncryption, nil, err)), ln, result)
 			return
@@ -698,7 +698,7 @@
 	}
 
 	go func() {
-		crypter := crypto.NewBoxCrypterWithKey(lPublicKeyPreauth, lPrivateKeyPreauth, rPublicKey, vc.pool)
+		crypter := crypto.NewBoxCrypterWithKey(lPublicKeyPreauth, lPrivateKeyPreauth, rPublicKey, iobuf.NewAllocator(vc.pool, vc.reserveBytes))
 		if err := verifyClientPrincipalBoundToChannel(sigPreauth, crypter, params.RemoteBlessings.PublicKey()); err != nil {
 			vc.abortHandshakeAcceptedVC(verror.New(stream.ErrSecurity, nil, verror.New(errAuthFailed, nil, err)), ln, result)
 			return
@@ -1082,7 +1082,7 @@
 	if err != nil {
 		return nil, err
 	}
-	alloc := iobuf.NewAllocator(vc.pool, vc.reserveBytes)
+	alloc := iobuf.NewAllocator(vc.pool, 0)
 	return newWriter(MaxPayloadSizeBytes, bq, alloc, vc.sharedCounters), nil
 }