summaryrefslogtreecommitdiff
path: root/src/bytes/buffer_test.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/bytes/buffer_test.go')
-rw-r--r--src/bytes/buffer_test.go41
1 files changed, 41 insertions, 0 deletions
diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go
index c0855007c1..81476fbae1 100644
--- a/src/bytes/buffer_test.go
+++ b/src/bytes/buffer_test.go
@@ -9,6 +9,7 @@ import (
"fmt"
"io"
"math/rand"
+ "strconv"
"testing"
"unicode/utf8"
)
@@ -326,6 +327,33 @@ func TestWriteTo(t *testing.T) {
}
}
+func TestWriteAppend(t *testing.T) {
+ var got Buffer
+ var want []byte
+ for i := 0; i < 1000; i++ {
+ b := got.AvailableBuffer()
+ b = strconv.AppendInt(b, int64(i), 10)
+ want = strconv.AppendInt(want, int64(i), 10)
+ got.Write(b)
+ }
+ if !Equal(got.Bytes(), want) {
+ t.Fatalf("Bytes() = %q, want %q", got, want)
+ }
+
+ // With a sufficiently sized buffer, there should be no allocations.
+ n := testing.AllocsPerRun(100, func() {
+ got.Reset()
+ for i := 0; i < 1000; i++ {
+ b := got.AvailableBuffer()
+ b = strconv.AppendInt(b, int64(i), 10)
+ got.Write(b)
+ }
+ })
+ if n > 0 {
+ t.Errorf("allocations occurred while appending")
+ }
+}
+
func TestRuneIO(t *testing.T) {
const NRune = 1000
// Built a test slice while we write the data
@@ -687,3 +715,16 @@ func BenchmarkBufferWriteBlock(b *testing.B) {
})
}
}
+
+func BenchmarkBufferAppendNoCopy(b *testing.B) {
+ var bb Buffer
+ bb.Grow(16 << 20)
+ b.SetBytes(int64(bb.Available()))
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ bb.Reset()
+ b := bb.AvailableBuffer()
+ b = b[:cap(b)] // use max capacity to simulate a large append operation
+ bb.Write(b) // should be nearly infinitely fast
+ }
+}