summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/string_test.go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-07-22 18:15:38 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-07-22 18:15:38 +0000
commit22b955cca564a9a3a5b8c9d9dd1e295b7943c128 (patch)
treeabdbd898676e1f853fca2d7e031d105d7ebcf676 /libgo/go/runtime/string_test.go
parent9d04a3af4c6491536badf6bde9707c907e4d196b (diff)
downloadgcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.tar.gz
libgo: update to go1.7rc3
Reviewed-on: https://go-review.googlesource.com/25150 From-SVN: r238662
Diffstat (limited to 'libgo/go/runtime/string_test.go')
-rw-r--r--libgo/go/runtime/string_test.go53
1 files changed, 44 insertions, 9 deletions
diff --git a/libgo/go/runtime/string_test.go b/libgo/go/runtime/string_test.go
index 64abd57d28c..f9411c00161 100644
--- a/libgo/go/runtime/string_test.go
+++ b/libgo/go/runtime/string_test.go
@@ -9,6 +9,10 @@ import (
"testing"
)
+// Strings and slices that don't escape and fit into tmpBuf are stack allocated,
+// which defeats using AllocsPerRun to test other optimizations.
+const sizeNoStack = 100
+
func BenchmarkCompareStringEqual(b *testing.B) {
bytes := []byte("Hello Gophers!")
s1, s2 := string(bytes), string(bytes)
@@ -101,6 +105,17 @@ func BenchmarkRuneIterate2(b *testing.B) {
}
}
+func BenchmarkArrayEqual(b *testing.B) {
+ a1 := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ a2 := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if a1 != a2 {
+ b.Fatal("not equal")
+ }
+ }
+}
+
/*
func TestStringW(t *testing.T) {
strings := []string{
@@ -150,7 +165,7 @@ func TestGostringnocopy(t *testing.T) {
*/
func TestCompareTempString(t *testing.T) {
- s := "foo"
+ s := strings.Repeat("x", sizeNoStack)
b := []byte(s)
n := testing.AllocsPerRun(1000, func() {
if string(b) != s {
@@ -161,7 +176,8 @@ func TestCompareTempString(t *testing.T) {
t.Fatalf("strings are not equal: '%v' and '%v'", string(b), s)
}
})
- if n != 0 {
+ // was n != 0, changed for gccgo.
+ if n > 2 {
t.Fatalf("want 0 allocs, got %v", n)
}
}
@@ -213,7 +229,7 @@ func TestIntStringAllocs(t *testing.T) {
}
func TestRangeStringCast(t *testing.T) {
- s := "abc"
+ s := strings.Repeat("x", sizeNoStack)
n := testing.AllocsPerRun(1000, func() {
for i, c := range []byte(s) {
if c != s[i] {
@@ -221,22 +237,41 @@ func TestRangeStringCast(t *testing.T) {
}
}
})
- if n != 0 {
+ // was n != 0, changed for gccgo.
+ if n > 1 {
t.Fatalf("want 0 allocs, got %v", n)
}
}
+func isZeroed(b []byte) bool {
+ for _, x := range b {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func isZeroedR(r []rune) bool {
+ for _, x := range r {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
func TestString2Slice(t *testing.T) {
// Make sure we don't return slices that expose
// an unzeroed section of stack-allocated temp buf
- // between len and cap. See issue 14232.
+ // between len and cap. See issue 14232.
s := "foož"
b := ([]byte)(s)
- if cap(b) != 5 {
- t.Errorf("want cap of 5, got %d", cap(b))
+ if !isZeroed(b[len(b):cap(b)]) {
+ t.Errorf("extra bytes not zeroed")
}
r := ([]rune)(s)
- if cap(r) != 4 {
- t.Errorf("want cap of 4, got %d", cap(r))
+ if !isZeroedR(r[len(r):cap(r)]) {
+ t.Errorf("extra runes not zeroed")
}
}