summaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/append_test.go322
-rw-r--r--libgo/go/runtime/callers_test.go83
-rw-r--r--libgo/go/runtime/cgo_mips64x.go12
-rw-r--r--libgo/go/runtime/cgo_mmap.go8
-rw-r--r--libgo/go/runtime/cgo_ppc64x.go2
-rw-r--r--libgo/go/runtime/cgocheck.go31
-rw-r--r--libgo/go/runtime/chan_test.go109
-rw-r--r--libgo/go/runtime/chanbarrier_test.go2
-rw-r--r--libgo/go/runtime/compiler.go4
-rw-r--r--libgo/go/runtime/crash_cgo_test.go141
-rw-r--r--libgo/go/runtime/crash_nonunix_test.go13
-rw-r--r--libgo/go/runtime/crash_test.go168
-rw-r--r--libgo/go/runtime/crash_unix_test.go8
-rw-r--r--libgo/go/runtime/debug.go2
-rw-r--r--libgo/go/runtime/debug/garbage.go10
-rw-r--r--libgo/go/runtime/debug/garbage_test.go2
-rw-r--r--libgo/go/runtime/debug/heapdump_test.go2
-rw-r--r--libgo/go/runtime/debug/stack_test.go2
-rw-r--r--libgo/go/runtime/defs_linux_s390x.go167
-rw-r--r--libgo/go/runtime/defs_plan9_arm.go63
-rw-r--r--libgo/go/runtime/error.go18
-rw-r--r--libgo/go/runtime/export_arm_test.go2
-rw-r--r--libgo/go/runtime/export_linux_test.go2
-rw-r--r--libgo/go/runtime/export_mmap_test.go2
-rw-r--r--libgo/go/runtime/export_test.go130
-rw-r--r--libgo/go/runtime/export_windows_test.go8
-rw-r--r--libgo/go/runtime/extern.go123
-rw-r--r--libgo/go/runtime/fastlog2_test.go2
-rw-r--r--libgo/go/runtime/gc_test.go17
-rw-r--r--libgo/go/runtime/gcinfo_test.go9
-rw-r--r--libgo/go/runtime/lfstack_64bit.go48
-rw-r--r--libgo/go/runtime/lfstack_linux_mips64x.go32
-rw-r--r--libgo/go/runtime/map_test.go16
-rw-r--r--libgo/go/runtime/mmap.go7
-rw-r--r--libgo/go/runtime/msan.go8
-rw-r--r--libgo/go/runtime/msan0.go2
-rw-r--r--libgo/go/runtime/mstkbar.go32
-rw-r--r--libgo/go/runtime/norace_test.go2
-rw-r--r--libgo/go/runtime/os1_linux_generic.go27
-rw-r--r--libgo/go/runtime/os1_linux_mips64x.go26
-rw-r--r--libgo/go/runtime/os2_linux_mips64x.go25
-rw-r--r--libgo/go/runtime/os_linux_generic.go (renamed from libgo/go/runtime/os2_linux_generic.go)19
-rw-r--r--libgo/go/runtime/os_linux_mips64x.go48
-rw-r--r--libgo/go/runtime/os_linux_noauxv.go10
-rw-r--r--libgo/go/runtime/os_linux_s390x.go46
-rw-r--r--libgo/go/runtime/os_netbsd_386.go16
-rw-r--r--libgo/go/runtime/os_netbsd_amd64.go16
-rw-r--r--libgo/go/runtime/os_plan9_arm.go17
-rw-r--r--libgo/go/runtime/parfor_test.go128
-rw-r--r--libgo/go/runtime/pprof/mprof_test.go2
-rw-r--r--libgo/go/runtime/pprof/pprof.go168
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go83
-rw-r--r--libgo/go/runtime/print.go2
-rw-r--r--libgo/go/runtime/proc_runtime_test.go35
-rw-r--r--libgo/go/runtime/proc_test.go46
-rw-r--r--libgo/go/runtime/race/race_linux_test.go37
-rw-r--r--libgo/go/runtime/race/race_windows_test.go46
-rw-r--r--libgo/go/runtime/runtime-lldb_test.go4
-rw-r--r--libgo/go/runtime/runtime_mmap_test.go2
-rw-r--r--libgo/go/runtime/runtime_test.go8
-rw-r--r--libgo/go/runtime/runtime_unix_test.go2
-rw-r--r--libgo/go/runtime/signal2_unix.go6
-rw-r--r--libgo/go/runtime/signal_linux_mips64x.go2
-rw-r--r--libgo/go/runtime/signal_linux_s390x.go208
-rw-r--r--libgo/go/runtime/signal_mips64x.go2
-rw-r--r--libgo/go/runtime/signal_sigtramp.go14
-rw-r--r--libgo/go/runtime/sigtab_linux_generic.go2
-rw-r--r--libgo/go/runtime/sigtab_linux_mips64x.go2
-rw-r--r--libgo/go/runtime/stack.go262
-rw-r--r--libgo/go/runtime/string_test.go53
-rw-r--r--libgo/go/runtime/symtab.go129
-rw-r--r--libgo/go/runtime/sys_s390x.go45
-rw-r--r--libgo/go/runtime/testdata/testprog/crash.go2
-rw-r--r--libgo/go/runtime/testdata/testprog/deadlock.go27
-rw-r--r--libgo/go/runtime/testdata/testprog/gc.go37
-rw-r--r--libgo/go/runtime/testdata/testprog/main.go2
-rw-r--r--libgo/go/runtime/testdata/testprog/memprof.go49
-rw-r--r--libgo/go/runtime/testdata/testprog/misc.go2
-rw-r--r--libgo/go/runtime/testdata/testprog/signal.go16
-rw-r--r--libgo/go/runtime/testdata/testprog/stringconcat.go2
-rw-r--r--libgo/go/runtime/testdata/testprog/syscall_windows.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/aprof.go53
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/callback.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/cgo.go22
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/crash.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/deadlock.go30
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/dll_windows.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/dropm.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/dropm_stub.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/exec.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/main.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/pprof.go97
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/threadpanic.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/threadpprof.go112
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/threadprof.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/traceback.go81
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/tracebackctxt.go107
-rw-r--r--libgo/go/runtime/testdata/testprognet/main.go2
-rw-r--r--libgo/go/runtime/testdata/testprognet/net.go2
-rw-r--r--libgo/go/runtime/testdata/testprognet/signal.go2
-rw-r--r--libgo/go/runtime/vlrt.go3
101 files changed, 3171 insertions, 646 deletions
diff --git a/libgo/go/runtime/append_test.go b/libgo/go/runtime/append_test.go
index a67dc9b4948..6b8968e382d 100644
--- a/libgo/go/runtime/append_test.go
+++ b/libgo/go/runtime/append_test.go
@@ -3,10 +3,59 @@
// license that can be found in the LICENSE file.
package runtime_test
-import "testing"
+import (
+ "fmt"
+ "testing"
+)
const N = 20
+func BenchmarkMakeSlice(b *testing.B) {
+ var x []byte
+ for i := 0; i < b.N; i++ {
+ x = make([]byte, 32)
+ _ = x
+ }
+}
+
+func BenchmarkGrowSliceBytes(b *testing.B) {
+ b.StopTimer()
+ var x = make([]byte, 9)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ _ = append([]byte(nil), x...)
+ }
+}
+
+func BenchmarkGrowSliceInts(b *testing.B) {
+ b.StopTimer()
+ var x = make([]int, 9)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ _ = append([]int(nil), x...)
+ }
+}
+
+func BenchmarkGrowSlicePtr(b *testing.B) {
+ b.StopTimer()
+ var x = make([]*byte, 9)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ _ = append([]*byte(nil), x...)
+ }
+}
+
+type struct24 struct{ a, b, c int64 }
+
+func BenchmarkGrowSliceStruct24Bytes(b *testing.B) {
+ b.StopTimer()
+ var x = make([]struct24, 9)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ _ = append([]struct24(nil), x...)
+ }
+}
+
func BenchmarkAppend(b *testing.B) {
b.StopTimer()
x := make([]int, 0, N)
@@ -38,75 +87,37 @@ func BenchmarkAppendGrowString(b *testing.B) {
}
}
-func benchmarkAppendBytes(b *testing.B, length int) {
- b.StopTimer()
- x := make([]byte, 0, N)
- y := make([]byte, length)
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- x = x[0:0]
- x = append(x, y...)
+func BenchmarkAppendSlice(b *testing.B) {
+ for _, length := range []int{1, 4, 7, 8, 15, 16, 32} {
+ b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
+ x := make([]byte, 0, N)
+ y := make([]byte, length)
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ x = append(x, y...)
+ }
+ })
}
}
-func BenchmarkAppend1Byte(b *testing.B) {
- benchmarkAppendBytes(b, 1)
-}
-
-func BenchmarkAppend4Bytes(b *testing.B) {
- benchmarkAppendBytes(b, 4)
-}
-
-func BenchmarkAppend7Bytes(b *testing.B) {
- benchmarkAppendBytes(b, 7)
-}
-
-func BenchmarkAppend8Bytes(b *testing.B) {
- benchmarkAppendBytes(b, 8)
-}
-
-func BenchmarkAppend15Bytes(b *testing.B) {
- benchmarkAppendBytes(b, 15)
-}
-
-func BenchmarkAppend16Bytes(b *testing.B) {
- benchmarkAppendBytes(b, 16)
-}
-
-func BenchmarkAppend32Bytes(b *testing.B) {
- benchmarkAppendBytes(b, 32)
-}
-
-func benchmarkAppendStr(b *testing.B, str string) {
- b.StopTimer()
- x := make([]byte, 0, N)
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- x = x[0:0]
- x = append(x, str...)
+func BenchmarkAppendStr(b *testing.B) {
+ for _, str := range []string{
+ "1",
+ "1234",
+ "12345678",
+ "1234567890123456",
+ "12345678901234567890123456789012",
+ } {
+ b.Run(fmt.Sprint(len(str), "Bytes"), func(b *testing.B) {
+ x := make([]byte, 0, N)
+ for i := 0; i < b.N; i++ {
+ x = x[0:0]
+ x = append(x, str...)
+ }
+ })
}
}
-func BenchmarkAppendStr1Byte(b *testing.B) {
- benchmarkAppendStr(b, "1")
-}
-
-func BenchmarkAppendStr4Bytes(b *testing.B) {
- benchmarkAppendStr(b, "1234")
-}
-
-func BenchmarkAppendStr8Bytes(b *testing.B) {
- benchmarkAppendStr(b, "12345678")
-}
-
-func BenchmarkAppendStr16Bytes(b *testing.B) {
- benchmarkAppendStr(b, "1234567890123456")
-}
-
-func BenchmarkAppendStr32Bytes(b *testing.B) {
- benchmarkAppendStr(b, "12345678901234567890123456789012")
-}
-
func BenchmarkAppendSpecialCase(b *testing.B) {
b.StopTimer()
x := make([]int, 0, N)
@@ -149,42 +160,153 @@ func TestAppendOverlap(t *testing.T) {
}
}
-func benchmarkCopySlice(b *testing.B, l int) {
- s := make([]byte, l)
- buf := make([]byte, 4096)
- var n int
- for i := 0; i < b.N; i++ {
- n = copy(buf, s)
+func BenchmarkCopy(b *testing.B) {
+ for _, l := range []int{1, 2, 4, 8, 12, 16, 32, 128, 1024} {
+ buf := make([]byte, 4096)
+ b.Run(fmt.Sprint(l, "Byte"), func(b *testing.B) {
+ s := make([]byte, l)
+ var n int
+ for i := 0; i < b.N; i++ {
+ n = copy(buf, s)
+ }
+ b.SetBytes(int64(n))
+ })
+ b.Run(fmt.Sprint(l, "String"), func(b *testing.B) {
+ s := string(make([]byte, l))
+ var n int
+ for i := 0; i < b.N; i++ {
+ n = copy(buf, s)
+ }
+ b.SetBytes(int64(n))
+ })
}
- b.SetBytes(int64(n))
}
-func benchmarkCopyStr(b *testing.B, l int) {
- s := string(make([]byte, l))
- buf := make([]byte, 4096)
- var n int
- for i := 0; i < b.N; i++ {
- n = copy(buf, s)
- }
- b.SetBytes(int64(n))
-}
-
-func BenchmarkCopy1Byte(b *testing.B) { benchmarkCopySlice(b, 1) }
-func BenchmarkCopy2Byte(b *testing.B) { benchmarkCopySlice(b, 2) }
-func BenchmarkCopy4Byte(b *testing.B) { benchmarkCopySlice(b, 4) }
-func BenchmarkCopy8Byte(b *testing.B) { benchmarkCopySlice(b, 8) }
-func BenchmarkCopy12Byte(b *testing.B) { benchmarkCopySlice(b, 12) }
-func BenchmarkCopy16Byte(b *testing.B) { benchmarkCopySlice(b, 16) }
-func BenchmarkCopy32Byte(b *testing.B) { benchmarkCopySlice(b, 32) }
-func BenchmarkCopy128Byte(b *testing.B) { benchmarkCopySlice(b, 128) }
-func BenchmarkCopy1024Byte(b *testing.B) { benchmarkCopySlice(b, 1024) }
-
-func BenchmarkCopy1String(b *testing.B) { benchmarkCopyStr(b, 1) }
-func BenchmarkCopy2String(b *testing.B) { benchmarkCopyStr(b, 2) }
-func BenchmarkCopy4String(b *testing.B) { benchmarkCopyStr(b, 4) }
-func BenchmarkCopy8String(b *testing.B) { benchmarkCopyStr(b, 8) }
-func BenchmarkCopy12String(b *testing.B) { benchmarkCopyStr(b, 12) }
-func BenchmarkCopy16String(b *testing.B) { benchmarkCopyStr(b, 16) }
-func BenchmarkCopy32String(b *testing.B) { benchmarkCopyStr(b, 32) }
-func BenchmarkCopy128String(b *testing.B) { benchmarkCopyStr(b, 128) }
-func BenchmarkCopy1024String(b *testing.B) { benchmarkCopyStr(b, 1024) }
+var (
+ sByte []byte
+ s1Ptr []uintptr
+ s2Ptr [][2]uintptr
+ s3Ptr [][3]uintptr
+ s4Ptr [][4]uintptr
+)
+
+// BenchmarkAppendInPlace tests the performance of append
+// when the result is being written back to the same slice.
+// In order for the in-place optimization to occur,
+// the slice must be referred to by address;
+// using a global is an easy way to trigger that.
+// We test the "grow" and "no grow" paths separately,
+// but not the "normal" (occasionally grow) path,
+// because it is a blend of the other two.
+// We use small numbers and small sizes in an attempt
+// to avoid benchmarking memory allocation and copying.
+// We use scalars instead of pointers in an attempt
+// to avoid benchmarking the write barriers.
+// We benchmark four common sizes (byte, pointer, string/interface, slice),
+// and one larger size.
+func BenchmarkAppendInPlace(b *testing.B) {
+ b.Run("NoGrow", func(b *testing.B) {
+ const C = 128
+
+ b.Run("Byte", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sByte = make([]byte, C)
+ for j := 0; j < C; j++ {
+ sByte = append(sByte, 0x77)
+ }
+ }
+ })
+
+ b.Run("1Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s1Ptr = make([]uintptr, C)
+ for j := 0; j < C; j++ {
+ s1Ptr = append(s1Ptr, 0x77)
+ }
+ }
+ })
+
+ b.Run("2Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s2Ptr = make([][2]uintptr, C)
+ for j := 0; j < C; j++ {
+ s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
+ }
+ }
+ })
+
+ b.Run("3Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s3Ptr = make([][3]uintptr, C)
+ for j := 0; j < C; j++ {
+ s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
+ }
+ }
+ })
+
+ b.Run("4Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s4Ptr = make([][4]uintptr, C)
+ for j := 0; j < C; j++ {
+ s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
+ }
+ }
+ })
+
+ })
+
+ b.Run("Grow", func(b *testing.B) {
+ const C = 5
+
+ b.Run("Byte", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sByte = make([]byte, 0)
+ for j := 0; j < C; j++ {
+ sByte = append(sByte, 0x77)
+ sByte = sByte[:cap(sByte)]
+ }
+ }
+ })
+
+ b.Run("1Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s1Ptr = make([]uintptr, 0)
+ for j := 0; j < C; j++ {
+ s1Ptr = append(s1Ptr, 0x77)
+ s1Ptr = s1Ptr[:cap(s1Ptr)]
+ }
+ }
+ })
+
+ b.Run("2Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s2Ptr = make([][2]uintptr, 0)
+ for j := 0; j < C; j++ {
+ s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
+ s2Ptr = s2Ptr[:cap(s2Ptr)]
+ }
+ }
+ })
+
+ b.Run("3Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s3Ptr = make([][3]uintptr, 0)
+ for j := 0; j < C; j++ {
+ s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
+ s3Ptr = s3Ptr[:cap(s3Ptr)]
+ }
+ }
+ })
+
+ b.Run("4Ptr", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ s4Ptr = make([][4]uintptr, 0)
+ for j := 0; j < C; j++ {
+ s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
+ s4Ptr = s4Ptr[:cap(s4Ptr)]
+ }
+ }
+ })
+
+ })
+}
diff --git a/libgo/go/runtime/callers_test.go b/libgo/go/runtime/callers_test.go
new file mode 100644
index 00000000000..ad83f9969c4
--- /dev/null
+++ b/libgo/go/runtime/callers_test.go
@@ -0,0 +1,83 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func f1(pan bool) []uintptr {
+ return f2(pan) // line 14
+}
+
+func f2(pan bool) []uintptr {
+ return f3(pan) // line 18
+}
+
+func f3(pan bool) []uintptr {
+ if pan {
+ panic("f3") // line 23
+ }
+ ret := make([]uintptr, 20)
+ return ret[:runtime.Callers(0, ret)] // line 26
+}
+
+func testCallers(t *testing.T, pcs []uintptr, pan bool) {
+ m := make(map[string]int, len(pcs))
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ if frame.Function != "" {
+ m[frame.Function] = frame.Line
+ }
+ if !more {
+ break
+ }
+ }
+
+ var seen []string
+ for k := range m {
+ seen = append(seen, k)
+ }
+ t.Logf("functions seen: %s", strings.Join(seen, " "))
+
+ var f3Line int
+ if pan {
+ f3Line = 23
+ } else {
+ f3Line = 26
+ }
+ want := []struct {
+ name string
+ line int
+ }{
+ {"f1", 14},
+ {"f2", 18},
+ {"f3", f3Line},
+ }
+ for _, w := range want {
+ if got := m["runtime_test."+w.name]; got != w.line {
+ t.Errorf("%s is line %d, want %d", w.name, got, w.line)
+ }
+ }
+}
+
+func TestCallers(t *testing.T) {
+ testCallers(t, f1(false), false)
+}
+
+func TestCallersPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatal("did not panic")
+ }
+ pcs := make([]uintptr, 20)
+ pcs = pcs[:runtime.Callers(0, pcs)]
+ testCallers(t, pcs, true)
+ }()
+ f1(true)
+}
diff --git a/libgo/go/runtime/cgo_mips64x.go b/libgo/go/runtime/cgo_mips64x.go
new file mode 100644
index 00000000000..f718e929126
--- /dev/null
+++ b/libgo/go/runtime/cgo_mips64x.go
@@ -0,0 +1,12 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+
+package runtime
+
+// crosscall1 calls into the runtime to set up the registers the
+// Go runtime expects and so the symbol it calls needs to be exported
+// for external linking to work.
+//go:cgo_export_static _cgo_reginit
diff --git a/libgo/go/runtime/cgo_mmap.go b/libgo/go/runtime/cgo_mmap.go
index c0396bdde51..a23cc79b7e3 100644
--- a/libgo/go/runtime/cgo_mmap.go
+++ b/libgo/go/runtime/cgo_mmap.go
@@ -1,8 +1,8 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Support for memory sanitizer. See runtime/cgo/mmap.go.
+// Support for memory sanitizer. See runtime/cgo/mmap.go.
// +build linux,amd64
@@ -32,10 +32,10 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uns
return sysMmap(addr, n, prot, flags, fd, off)
}
-// sysMmap calls the mmap system call. It is implemented in assembly.
+// sysMmap calls the mmap system call. It is implemented in assembly.
func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
// cgoMmap calls the mmap function in the runtime/cgo package on the
// callCgoMmap calls the mmap function in the runtime/cgo package
-// using the GCC calling convention. It is implemented in assembly.
+// using the GCC calling convention. It is implemented in assembly.
func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
diff --git a/libgo/go/runtime/cgo_ppc64x.go b/libgo/go/runtime/cgo_ppc64x.go
index 6a1b3bb4172..fb2da32c7e0 100644
--- a/libgo/go/runtime/cgo_ppc64x.go
+++ b/libgo/go/runtime/cgo_ppc64x.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/cgocheck.go b/libgo/go/runtime/cgocheck.go
index aebce1506d1..2d064145a41 100644
--- a/libgo/go/runtime/cgocheck.go
+++ b/libgo/go/runtime/cgocheck.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -89,17 +89,25 @@ func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) {
}
// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
-// and throws if it finds a Go pointer. The type of the memory is typ,
+// and throws if it finds a Go pointer. The type of the memory is typ,
// and src is off bytes into that type.
//go:nosplit
//go:nowritebarrier
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
+ // Anything past typ.ptrdata is not a pointer.
+ if typ.ptrdata <= off {
+ return
+ }
+ if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
+ size = ptrdataSize
+ }
+
if typ.kind&kindGCProg == 0 {
cgoCheckBits(src, typ.gcdata, off, size)
return
}
- // The type has a GC program. Try to find GC bits somewhere else.
+ // The type has a GC program. Try to find GC bits somewhere else.
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if cgoInRange(src, datap.data, datap.edata) {
doff := uintptr(src) - datap.data
@@ -148,8 +156,8 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
}
// cgoCheckBits checks the block of memory at src, for up to size
-// bytes, and throws if it finds a Go pointer. The gcbits mark each
-// pointer value. The src pointer is off bytes into the gcbits.
+// bytes, and throws if it finds a Go pointer. The gcbits mark each
+// pointer value. The src pointer is off bytes into the gcbits.
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
@@ -184,15 +192,24 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
// fall back to look for pointers in src using the type information.
-// We only this when looking at a value on the stack when the type
+// We only use this when looking at a value on the stack when the type
// uses a GC program, because otherwise it's more efficient to use the
-// GC bits. This is called on the system stack.
+// GC bits. This is called on the system stack.
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
if typ.kind&kindNoPointers != 0 {
return
}
+
+ // Anything past typ.ptrdata is not a pointer.
+ if typ.ptrdata <= off {
+ return
+ }
+ if ptrdataSize := typ.ptrdata - off; size > ptrdataSize {
+ size = ptrdataSize
+ }
+
if typ.kind&kindGCProg == 0 {
cgoCheckBits(src, typ.gcdata, off, size)
return
diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go
index 6553509457d..4bd061dbc7b 100644
--- a/libgo/go/runtime/chan_test.go
+++ b/libgo/go/runtime/chan_test.go
@@ -578,7 +578,7 @@ func TestSelectDuplicateChannel(t *testing.T) {
}
e <- 9
}()
- time.Sleep(time.Millisecond) // make sure goroutine A gets qeueued first on c
+ time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
// goroutine B
go func() {
@@ -591,6 +591,84 @@ func TestSelectDuplicateChannel(t *testing.T) {
c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
}
+var selectSink interface{}
+
+func TestSelectStackAdjust(t *testing.T) {
+ // Test that channel receive slots that contain local stack
+ // pointers are adjusted correctly by stack shrinking.
+ c := make(chan *int)
+ d := make(chan *int)
+ ready1 := make(chan bool)
+ ready2 := make(chan bool)
+
+ f := func(ready chan bool, dup bool) {
+ // Temporarily grow the stack to 10K.
+ stackGrowthRecursive((10 << 10) / (128 * 8))
+
+ // We're ready to trigger GC and stack shrink.
+ ready <- true
+
+ val := 42
+ var cx *int
+ cx = &val
+
+ var c2 chan *int
+ var d2 chan *int
+ if dup {
+ c2 = c
+ d2 = d
+ }
+
+ // Receive from d. cx won't be affected.
+ select {
+ case cx = <-c:
+ case <-c2:
+ case <-d:
+ case <-d2:
+ }
+
+ // Check that pointer in cx was adjusted correctly.
+ if cx != &val {
+ t.Error("cx no longer points to val")
+ } else if val != 42 {
+ t.Error("val changed")
+ } else {
+ *cx = 43
+ if val != 43 {
+ t.Error("changing *cx failed to change val")
+ }
+ }
+ ready <- true
+ }
+
+ go f(ready1, false)
+ go f(ready2, true)
+
+ // Let the goroutines get into the select.
+ <-ready1
+ <-ready2
+ time.Sleep(10 * time.Millisecond)
+
+ // Force concurrent GC a few times.
+ var before, after runtime.MemStats
+ runtime.ReadMemStats(&before)
+ for i := 0; i < 100; i++ {
+ selectSink = new([1 << 20]byte)
+ runtime.ReadMemStats(&after)
+ if after.NumGC-before.NumGC >= 2 {
+ goto done
+ }
+ }
+ t.Fatal("failed to trigger concurrent GC")
+done:
+ selectSink = nil
+
+ // Wake selects.
+ close(d)
+ <-ready1
+ <-ready2
+}
+
func BenchmarkChanNonblocking(b *testing.B) {
myc := make(chan int)
b.RunParallel(func(pb *testing.PB) {
@@ -721,7 +799,7 @@ func BenchmarkChanContended(b *testing.B) {
})
}
-func BenchmarkChanSync(b *testing.B) {
+func benchmarkChanSync(b *testing.B, work int) {
const CallsPerSched = 1000
procs := 2
N := int32(b.N / CallsPerSched / procs * procs)
@@ -737,10 +815,14 @@ func BenchmarkChanSync(b *testing.B) {
for g := 0; g < CallsPerSched; g++ {
if i%2 == 0 {
<-myc
+ localWork(work)
myc <- 0
+ localWork(work)
} else {
myc <- 0
+ localWork(work)
<-myc
+ localWork(work)
}
}
}
@@ -752,6 +834,14 @@ func BenchmarkChanSync(b *testing.B) {
}
}
+func BenchmarkChanSync(b *testing.B) {
+ benchmarkChanSync(b, 0)
+}
+
+func BenchmarkChanSyncWork(b *testing.B) {
+ benchmarkChanSync(b, 1000)
+}
+
func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
@@ -925,3 +1015,18 @@ func BenchmarkChanPopular(b *testing.B) {
}
wg.Wait()
}
+
+var (
+ alwaysFalse = false
+ workSink = 0
+)
+
+func localWork(w int) {
+ foo := 0
+ for i := 0; i < w; i++ {
+ foo /= (foo + 1)
+ }
+ if alwaysFalse {
+ workSink += foo
+ }
+}
diff --git a/libgo/go/runtime/chanbarrier_test.go b/libgo/go/runtime/chanbarrier_test.go
index 770b850f874..b6029fb0445 100644
--- a/libgo/go/runtime/chanbarrier_test.go
+++ b/libgo/go/runtime/chanbarrier_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/compiler.go b/libgo/go/runtime/compiler.go
index b04be6181a7..02787ec5580 100644
--- a/libgo/go/runtime/compiler.go
+++ b/libgo/go/runtime/compiler.go
@@ -1,11 +1,11 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// Compiler is the name of the compiler toolchain that built the
-// running binary. Known toolchains are:
+// running binary. Known toolchains are:
//
// gc Also known as cmd/compile.
// gccgo The gccgo front end, part of the GCC compiler suite.
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index d7b367f941f..2504bd0f415 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -7,10 +7,15 @@
package runtime_test
import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "os"
"os/exec"
"runtime"
"strings"
"testing"
+ "time"
)
func TestCgoCrashHandler(t *testing.T) {
@@ -46,6 +51,8 @@ func TestCgoCallbackGC(t *testing.T) {
t.Skip("see golang.org/issue/11990")
case runtime.GOOS == "linux" && runtime.GOARCH == "arm":
t.Skip("too slow for arm builders")
+ case runtime.GOOS == "linux" && (runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le"):
+ t.Skip("too slow for mips64x builders")
}
}
got := runTestProg(t, "testprogcgo", "CgoCallbackGC")
@@ -147,3 +154,137 @@ func TestEnsureDropM(t *testing.T) {
t.Errorf("expected %q, got %v", want, got)
}
}
+
+// Test for issue 14387.
+// Test that the program that doesn't need any cgo pointer checking
+// takes about the same amount of time with it as without it.
+func TestCgoCheckBytes(t *testing.T) {
+ // Make sure we don't count the build time as part of the run time.
+ testenv.MustHaveGoBuild(t)
+ exe, err := buildTestProg(t, "testprogcgo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Try it 10 times to avoid flakiness.
+ const tries = 10
+ var tot1, tot2 time.Duration
+ for i := 0; i < tries; i++ {
+ cmd := testEnv(exec.Command(exe, "CgoCheckBytes"))
+ cmd.Env = append(cmd.Env, "GODEBUG=cgocheck=0", fmt.Sprintf("GO_CGOCHECKBYTES_TRY=%d", i))
+
+ start := time.Now()
+ cmd.Run()
+ d1 := time.Since(start)
+
+ cmd = testEnv(exec.Command(exe, "CgoCheckBytes"))
+ cmd.Env = append(cmd.Env, fmt.Sprintf("GO_CGOCHECKBYTES_TRY=%d", i))
+
+ start = time.Now()
+ cmd.Run()
+ d2 := time.Since(start)
+
+ if d1*20 > d2 {
+ // The slow version (d2) was less than 20 times
+ // slower than the fast version (d1), so OK.
+ return
+ }
+
+ tot1 += d1
+ tot2 += d2
+ }
+
+ t.Errorf("cgo check too slow: got %v, expected at most %v", tot2/tries, (tot1/tries)*20)
+}
+
+func TestCgoPanicDeadlock(t *testing.T) {
+ // test issue 14432
+ got := runTestProg(t, "testprogcgo", "CgoPanicDeadlock")
+ want := "panic: cgo error\n\n"
+ if !strings.HasPrefix(got, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, got)
+ }
+}
+
+func TestCgoCCodeSIGPROF(t *testing.T) {
+ got := runTestProg(t, "testprogcgo", "CgoCCodeSIGPROF")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q got %v", want, got)
+ }
+}
+
+func TestCgoCrashTraceback(t *testing.T) {
+ if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
+ t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+ got := runTestProg(t, "testprogcgo", "CrashTraceback")
+ for i := 1; i <= 3; i++ {
+ if !strings.Contains(got, fmt.Sprintf("cgo symbolizer:%d", i)) {
+ t.Errorf("missing cgo symbolizer:%d", i)
+ }
+ }
+}
+
+func TestCgoTracebackContext(t *testing.T) {
+ got := runTestProg(t, "testprogcgo", "TracebackContext")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q got %v", want, got)
+ }
+}
+
+func testCgoPprof(t *testing.T, buildArg, runArg string) {
+ if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
+ t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+ testenv.MustHaveGoRun(t)
+
+ exe, err := buildTestProg(t, "testprogcgo", buildArg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got, err := testEnv(exec.Command(exe, runArg)).CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ fn := strings.TrimSpace(string(got))
+ defer os.Remove(fn)
+
+ cmd := testEnv(exec.Command("go", "tool", "pprof", "-top", "-nodecount=1", exe, fn))
+
+ found := false
+ for i, e := range cmd.Env {
+ if strings.HasPrefix(e, "PPROF_TMPDIR=") {
+ cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
+ found = true
+ break
+ }
+ }
+ if !found {
+ cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
+ }
+
+ top, err := cmd.CombinedOutput()
+ t.Logf("%s", top)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Contains(top, []byte("cpuHog")) {
+ t.Error("missing cpuHog in pprof output")
+ }
+}
+
+func TestCgoPprof(t *testing.T) {
+ testCgoPprof(t, "", "CgoPprof")
+}
+
+func TestCgoPprofPIE(t *testing.T) {
+ testCgoPprof(t, "-ldflags=-extldflags=-pie", "CgoPprof")
+}
+
+func TestCgoPprofThread(t *testing.T) {
+ testCgoPprof(t, "", "CgoPprofThread")
+}
diff --git a/libgo/go/runtime/crash_nonunix_test.go b/libgo/go/runtime/crash_nonunix_test.go
new file mode 100644
index 00000000000..2ce995c0691
--- /dev/null
+++ b/libgo/go/runtime/crash_nonunix_test.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows plan9 nacl
+
+package runtime_test
+
+import "os"
+
+// sigquit is the signal to send to kill a hanging testdata program.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var sigquit = os.Kill
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index 5f0e77b0dc3..a2f7ff7dec8 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "bytes"
"fmt"
"internal/testenv"
"io/ioutil"
@@ -13,9 +14,11 @@ import (
"path/filepath"
"regexp"
"runtime"
+ "strconv"
"strings"
"sync"
"testing"
+ "time"
)
var toRemove []string
@@ -65,11 +68,48 @@ func runTestProg(t *testing.T, binary, name string) string {
if err != nil {
t.Fatal(err)
}
- got, _ := testEnv(exec.Command(exe, name)).CombinedOutput()
- return string(got)
+
+ cmd := testEnv(exec.Command(exe, name))
+ var b bytes.Buffer
+ cmd.Stdout = &b
+ cmd.Stderr = &b
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("starting %s %s: %v", binary, name, err)
+ }
+
+ // If the process doesn't complete within 1 minute,
+ // assume it is hanging and kill it to get a stack trace.
+ p := cmd.Process
+ done := make(chan bool)
+ go func() {
+ scale := 1
+ // This GOARCH/GOOS test is copied from cmd/dist/test.go.
+ // TODO(iant): Have cmd/dist update the environment variable.
+ if runtime.GOARCH == "arm" || runtime.GOOS == "windows" {
+ scale = 2
+ }
+ if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
+ if sc, err := strconv.Atoi(s); err == nil {
+ scale = sc
+ }
+ }
+
+ select {
+ case <-done:
+ case <-time.After(time.Duration(scale) * time.Minute):
+ p.Signal(sigquit)
+ }
+ }()
+
+ if err := cmd.Wait(); err != nil {
+ t.Logf("%s %s exit status: %v", binary, name, err)
+ }
+ close(done)
+
+ return b.String()
}
-func buildTestProg(t *testing.T, binary string) (string, error) {
+func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) {
checkStaleRuntime(t)
testprog.Lock()
@@ -86,23 +126,27 @@ func buildTestProg(t *testing.T, binary string) (string, error) {
if testprog.target == nil {
testprog.target = make(map[string]buildexe)
}
- target, ok := testprog.target[binary]
+ name := binary
+ if len(flags) > 0 {
+ name += "_" + strings.Join(flags, "_")
+ }
+ target, ok := testprog.target[name]
if ok {
return target.exe, target.err
}
- exe := filepath.Join(testprog.dir, binary+".exe")
- cmd := exec.Command("go", "build", "-o", exe)
+ exe := filepath.Join(testprog.dir, name+".exe")
+ cmd := exec.Command("go", append([]string{"build", "-o", exe}, flags...)...)
cmd.Dir = "testdata/" + binary
out, err := testEnv(cmd).CombinedOutput()
if err != nil {
exe = ""
- target.err = fmt.Errorf("building %s: %v\n%s", binary, err, out)
- testprog.target[binary] = target
+ target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
+ testprog.target[name] = target
return "", target.err
}
target.exe = exe
- testprog.target[binary] = target
+ testprog.target[name] = target
return exe, nil
}
@@ -273,6 +317,52 @@ func TestGoexitInPanic(t *testing.T) {
}
}
+// Issue 14965: Runtime panics should be of type runtime.Error
+func TestRuntimePanicWithRuntimeError(t *testing.T) {
+ testCases := [...]func(){
+ 0: func() {
+ var m map[uint64]bool
+ m[1234] = true
+ },
+ 1: func() {
+ ch := make(chan struct{})
+ close(ch)
+ close(ch)
+ },
+ 2: func() {
+ var ch = make(chan struct{})
+ close(ch)
+ ch <- struct{}{}
+ },
+ 3: func() {
+ var s = make([]int, 2)
+ _ = s[2]
+ },
+ 4: func() {
+ n := -1
+ _ = make(chan bool, n)
+ },
+ 5: func() {
+ close((chan bool)(nil))
+ },
+ }
+
+ for i, fn := range testCases {
+ got := panicValue(fn)
+ if _, ok := got.(runtime.Error); !ok {
+ t.Errorf("test #%d: recovered value %v(type %T) does not implement runtime.Error", i, got, got)
+ }
+ }
+}
+
+func panicValue(fn func()) (recovered interface{}) {
+ defer func() {
+ recovered = recover()
+ }()
+ fn()
+ return
+}
+
func TestPanicAfterGoexit(t *testing.T) {
// an uncaught panic should still work after goexit
output := runTestProg(t, "testprog", "PanicAfterGoexit")
@@ -294,9 +384,9 @@ func TestRecoverBeforePanicAfterGoexit(t *testing.T) {
// 1. defer a function that recovers
// 2. defer a function that panics
// 3. call goexit
- // Goexit should run the #2 defer. Its panic
+ // Goexit should run the #2 defer. Its panic
// should be caught by the #1 defer, and execution
- // should resume in the caller. Like the Goexit
+ // should resume in the caller. Like the Goexit
// never happened!
defer func() {
r := recover()
@@ -336,3 +426,59 @@ func TestPanicTraceback(t *testing.T) {
output = output[idx[1]:]
}
}
+
+func testPanicDeadlock(t *testing.T, name string, want string) {
+ // test issue 14432
+ output := runTestProg(t, "testprog", name)
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+}
+
+func TestPanicDeadlockGosched(t *testing.T) {
+ testPanicDeadlock(t, "GoschedInPanic", "panic: errorThatGosched\n\n")
+}
+
+func TestPanicDeadlockSyscall(t *testing.T) {
+ testPanicDeadlock(t, "SyscallInPanic", "1\n2\npanic: 3\n\n")
+}
+
+func TestMemPprof(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+
+ exe, err := buildTestProg(t, "testprog")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got, err := testEnv(exec.Command(exe, "MemProf")).CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ fn := strings.TrimSpace(string(got))
+ defer os.Remove(fn)
+
+ cmd := testEnv(exec.Command("go", "tool", "pprof", "-alloc_space", "-top", exe, fn))
+
+ found := false
+ for i, e := range cmd.Env {
+ if strings.HasPrefix(e, "PPROF_TMPDIR=") {
+ cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
+ found = true
+ break
+ }
+ }
+ if !found {
+ cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
+ }
+
+ top, err := cmd.CombinedOutput()
+ t.Logf("%s", top)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Contains(top, []byte("MemProf")) {
+ t.Error("missing MemProf in pprof output")
+ }
+}
diff --git a/libgo/go/runtime/crash_unix_test.go b/libgo/go/runtime/crash_unix_test.go
index 771b303f6ee..6e4d04bd200 100644
--- a/libgo/go/runtime/crash_unix_test.go
+++ b/libgo/go/runtime/crash_unix_test.go
@@ -19,6 +19,10 @@ import (
"testing"
)
+// sigquit is the signal to send to kill a hanging testdata program.
+// Send SIGQUIT to get a stack trace.
+var sigquit = syscall.SIGQUIT
+
func TestCrashDumpsAllThreads(t *testing.T) {
switch runtime.GOOS {
case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
@@ -149,10 +153,6 @@ func loop(i int, c chan bool) {
func TestSignalExitStatus(t *testing.T) {
testenv.MustHaveGoBuild(t)
- switch runtime.GOOS {
- case "netbsd", "solaris":
- t.Skipf("skipping on %s; see https://golang.org/issue/14063", runtime.GOOS)
- }
exe, err := buildTestProg(t, "testprog")
if err != nil {
t.Fatal(err)
diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go
index 0c915a2a768..56d19d7a4cf 100644
--- a/libgo/go/runtime/debug.go
+++ b/libgo/go/runtime/debug.go
@@ -17,7 +17,7 @@ func LockOSThread()
func UnlockOSThread()
// GOMAXPROCS sets the maximum number of CPUs that can be executing
-// simultaneously and returns the previous setting. If n < 1, it does not
+// simultaneously and returns the previous setting. If n < 1, it does not
// change the current setting.
// The number of logical CPUs on the local machine can be queried with NumCPU.
// This call will go away when the scheduler improves.
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index 0f8a44c4c63..8d52837a44a 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Go Authors. All rights reserved.
+// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -149,7 +149,13 @@ func SetPanicOnFault(enabled bool) bool
// WriteHeapDump writes a description of the heap and the objects in
// it to the given file descriptor.
-// The heap dump format is defined at https://golang.org/s/go13heapdump.
+//
+// WriteHeapDump suspends the execution of all goroutines until the heap
+// dump is completely written. Thus, the file descriptor must not be
+// connected to a pipe or socket whose other end is in the same Go
+// process; instead, use a temporary file or network socket.
+//
+// The heap dump format is defined at https://golang.org/s/go15heapdump.
func WriteHeapDump(fd uintptr)
// SetTraceback sets the amount of detail printed by the runtime in
diff --git a/libgo/go/runtime/debug/garbage_test.go b/libgo/go/runtime/debug/garbage_test.go
index 21bf6eb5582..3d07cbbe45f 100644
--- a/libgo/go/runtime/debug/garbage_test.go
+++ b/libgo/go/runtime/debug/garbage_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Go Authors. All rights reserved.
+// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/debug/heapdump_test.go b/libgo/go/runtime/debug/heapdump_test.go
index 5761c015b8e..7d5b950895d 100644
--- a/libgo/go/runtime/debug/heapdump_test.go
+++ b/libgo/go/runtime/debug/heapdump_test.go
@@ -38,7 +38,7 @@ type Obj struct {
}
func objfin(x *Obj) {
- println("finalized", x)
+ //println("finalized", x)
}
func TestWriteHeapDumpFinalizers(t *testing.T) {
diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go
index 0f769ee6cab..5f9f60c94f3 100644
--- a/libgo/go/runtime/debug/stack_test.go
+++ b/libgo/go/runtime/debug/stack_test.go
@@ -57,7 +57,7 @@ func TestStack(t *testing.T) {
}
func check(t *testing.T, line, has string) {
- if strings.Index(line, has) < 0 {
+ if !strings.Contains(line, has) {
t.Errorf("expected %q in %q", has, line)
}
}
diff --git a/libgo/go/runtime/defs_linux_s390x.go b/libgo/go/runtime/defs_linux_s390x.go
new file mode 100644
index 00000000000..5f55d5a8894
--- /dev/null
+++ b/libgo/go/runtime/defs_linux_s390x.go
@@ -0,0 +1,167 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_HUGEPAGE = 0xe
+ _MADV_NOHUGEPAGE = 0xf
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_SIGINFO = 0x4
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int64) {
+ ts.tv_sec = x
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint64
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ // below here is a union; si_addr is the only field we use
+ si_addr uint64
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type epollevent struct {
+ events uint32
+ pad_cgo_0 [4]byte
+ data [8]byte // unaligned uintptr
+}
+
+const (
+ _O_RDONLY = 0x0
+ _O_CLOEXEC = 0x80000
+ _SA_RESTORER = 0
+)
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_flags int32
+ ss_size uintptr
+}
+
+type sigcontext struct {
+ psw_mask uint64
+ psw_addr uint64
+ gregs [16]uint64
+ aregs [16]uint32
+ fpc uint32
+ fpregs [16]uint64
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_stack sigaltstackt
+ uc_mcontext sigcontext
+ uc_sigmask uint64
+}
diff --git a/libgo/go/runtime/defs_plan9_arm.go b/libgo/go/runtime/defs_plan9_arm.go
new file mode 100644
index 00000000000..9c700ae1c0b
--- /dev/null
+++ b/libgo/go/runtime/defs_plan9_arm.go
@@ -0,0 +1,63 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const _PAGESIZE = 0x1000
+
+type ureg struct {
+ r0 uint32 /* general registers */
+ r1 uint32 /* ... */
+ r2 uint32 /* ... */
+ r3 uint32 /* ... */
+ r4 uint32 /* ... */
+ r5 uint32 /* ... */
+ r6 uint32 /* ... */
+ r7 uint32 /* ... */
+ r8 uint32 /* ... */
+ r9 uint32 /* ... */
+ r10 uint32 /* ... */
+ r11 uint32 /* ... */
+ r12 uint32 /* ... */
+ sp uint32
+ link uint32 /* ... */
+ trap uint32 /* trap type */
+ psr uint32
+ pc uint32 /* interrupted addr */
+}
+
+type sigctxt struct {
+ u *ureg
+}
+
+func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) }
+func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) }
+func (c *sigctxt) lr() uintptr { return uintptr(c.u.link) }
+
+func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) }
+func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) }
+func (c *sigctxt) setlr(x uintptr) { c.u.link = uint32(x) }
+func (c *sigctxt) savelr(x uintptr) { c.u.r0 = uint32(x) }
+
+func dumpregs(u *ureg) {
+ print("r0 ", hex(u.r0), "\n")
+ print("r1 ", hex(u.r1), "\n")
+ print("r2 ", hex(u.r2), "\n")
+ print("r3 ", hex(u.r3), "\n")
+ print("r4 ", hex(u.r4), "\n")
+ print("r5 ", hex(u.r5), "\n")
+ print("r6 ", hex(u.r6), "\n")
+ print("r7 ", hex(u.r7), "\n")
+ print("r8 ", hex(u.r8), "\n")
+ print("r9 ", hex(u.r9), "\n")
+ print("r10 ", hex(u.r10), "\n")
+ print("r11 ", hex(u.r11), "\n")
+ print("r12 ", hex(u.r12), "\n")
+ print("sp ", hex(u.sp), "\n")
+ print("link ", hex(u.link), "\n")
+ print("pc ", hex(u.pc), "\n")
+ print("psr ", hex(u.psr), "\n")
+}
+
+func sigpanictramp()
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
index c4621b6f858..b9c73655255 100644
--- a/libgo/go/runtime/error.go
+++ b/libgo/go/runtime/error.go
@@ -101,11 +101,6 @@ func (e errorString) Error() string {
return "runtime error: " + string(e)
}
-// For calling from C.
-func NewErrorString(s string, ret *interface{}) {
- *ret = errorString(s)
-}
-
// An errorCString represents a runtime error described by a single C string.
// Not "type errorCString uintptr" because of http://golang.org/issue/7084.
type errorCString struct{ cstr uintptr }
@@ -123,6 +118,17 @@ func NewErrorCString(s uintptr, ret *interface{}) {
*ret = errorCString{s}
}
+// plainError represents a runtime error described a string without
+// the prefix "runtime error: " after invoking errorString.Error().
+// See Issue #14965.
+type plainError string
+
+func (e plainError) RuntimeError() {}
+
+func (e plainError) Error() string {
+ return string(e)
+}
+
type stringer interface {
String() string
}
@@ -152,5 +158,5 @@ func Printany(i interface{}) {
// called from generated code
func panicwrap(pkg, typ, meth string) {
- panic("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer")
+ panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
}
diff --git a/libgo/go/runtime/export_arm_test.go b/libgo/go/runtime/export_arm_test.go
index 446d26465c8..b8a89fc0d22 100644
--- a/libgo/go/runtime/export_arm_test.go
+++ b/libgo/go/runtime/export_arm_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/export_linux_test.go b/libgo/go/runtime/export_linux_test.go
index 4e76600e207..183a6eeee2a 100644
--- a/libgo/go/runtime/export_linux_test.go
+++ b/libgo/go/runtime/export_linux_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/export_mmap_test.go b/libgo/go/runtime/export_mmap_test.go
index 07b0a56e09e..7bde44dc430 100644
--- a/libgo/go/runtime/export_mmap_test.go
+++ b/libgo/go/runtime/export_mmap_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index fd328a1d363..7ba217eb782 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -1,4 +1,4 @@
-// Copyright 2010 The Go Authors. All rights reserved.
+// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -117,6 +117,113 @@ func Close(fd int32) int32 {
return close(fd)
}
+/*
+func RunSchedLocalQueueTest() {
+ _p_ := new(p)
+ gs := make([]g, len(_p_.runq))
+ for i := 0; i < len(_p_.runq); i++ {
+ if g, _ := runqget(_p_); g != nil {
+ throw("runq is not empty initially")
+ }
+ for j := 0; j < i; j++ {
+ runqput(_p_, &gs[i], false)
+ }
+ for j := 0; j < i; j++ {
+ if g, _ := runqget(_p_); g != &gs[i] {
+ print("bad element at iter ", i, "/", j, "\n")
+ throw("bad element")
+ }
+ }
+ if g, _ := runqget(_p_); g != nil {
+ throw("runq is not empty afterwards")
+ }
+ }
+}
+
+func RunSchedLocalQueueStealTest() {
+ p1 := new(p)
+ p2 := new(p)
+ gs := make([]g, len(p1.runq))
+ for i := 0; i < len(p1.runq); i++ {
+ for j := 0; j < i; j++ {
+ gs[j].sig = 0
+ runqput(p1, &gs[j], false)
+ }
+ gp := runqsteal(p2, p1, true)
+ s := 0
+ if gp != nil {
+ s++
+ gp.sig++
+ }
+ for {
+ gp, _ = runqget(p2)
+ if gp == nil {
+ break
+ }
+ s++
+ gp.sig++
+ }
+ for {
+ gp, _ = runqget(p1)
+ if gp == nil {
+ break
+ }
+ gp.sig++
+ }
+ for j := 0; j < i; j++ {
+ if gs[j].sig != 1 {
+ print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
+ throw("bad element")
+ }
+ }
+ if s != i/2 && s != i/2+1 {
+ print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
+ throw("bad steal")
+ }
+ }
+}
+
+func RunSchedLocalQueueEmptyTest(iters int) {
+ // Test that runq is not spuriously reported as empty.
+ // Runq emptiness affects scheduling decisions and spurious emptiness
+ // can lead to underutilization (both runnable Gs and idle Ps coexist
+ // for arbitrary long time).
+ done := make(chan bool, 1)
+ p := new(p)
+ gs := make([]g, 2)
+ ready := new(uint32)
+ for i := 0; i < iters; i++ {
+ *ready = 0
+ next0 := (i & 1) == 0
+ next1 := (i & 2) == 0
+ runqput(p, &gs[0], next0)
+ go func() {
+ for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
+ }
+ if runqempty(p) {
+ println("next:", next0, next1)
+ throw("queue is empty")
+ }
+ done <- true
+ }()
+ for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
+ }
+ runqput(p, &gs[1], next1)
+ runqget(p)
+ <-done
+ runqget(p)
+ }
+}
+
+var StringHash = stringHash
+var BytesHash = bytesHash
+var Int32Hash = int32Hash
+var Int64Hash = int64Hash
+var EfaceHash = efaceHash
+var IfaceHash = ifaceHash
+var MemclrBytes = memclrBytes
+*/
+
//extern read
func read(fd int32, buf unsafe.Pointer, size int32) int32
@@ -181,3 +288,24 @@ var ForceGCPeriod = &forcegcperiod
// the "environment" traceback level, so later calls to
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
func SetTracebackEnv(level string)
+
+/*
+var ReadUnaligned32 = readUnaligned32
+var ReadUnaligned64 = readUnaligned64
+
+func CountPagesInUse() (pagesInUse, counted uintptr) {
+ stopTheWorld("CountPagesInUse")
+
+ pagesInUse = uintptr(mheap_.pagesInUse)
+
+ for _, s := range h_allspans {
+ if s.state == mSpanInUse {
+ counted += s.npages
+ }
+ }
+
+ startTheWorld()
+
+ return
+}
+*/
diff --git a/libgo/go/runtime/export_windows_test.go b/libgo/go/runtime/export_windows_test.go
index 7b269ecccb4..536b398fd7a 100644
--- a/libgo/go/runtime/export_windows_test.go
+++ b/libgo/go/runtime/export_windows_test.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -8,7 +8,11 @@ package runtime
import "unsafe"
-var TestingWER = &testingWER
+var (
+ TestingWER = &testingWER
+ OsYield = osyield
+ TimeBeginPeriodRetValue = &timeBeginPeriodRetValue
+)
func NumberOfProcessors() int32 {
var info systeminfo
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
index eca54a75145..fbbf56ced70 100644
--- a/libgo/go/runtime/extern.go
+++ b/libgo/go/runtime/extern.go
@@ -82,6 +82,21 @@ It is a comma-separated list of name=val pairs setting these named variables:
If the line ends with "(forced)", this GC was forced by a
runtime.GC() call and all phases are STW.
+ Setting gctrace to any value > 0 also causes the garbage collector
+ to emit a summary when memory is released back to the system.
+ This process of returning memory to the system is called scavenging.
+ The format of this summary is subject to change.
+ Currently it is:
+ scvg#: # MB released printed only if non-zero
+ scvg#: inuse: # idle: # sys: # released: # consumed: # (MB)
+ where the fields are as follows:
+ scvg# the scavenge cycle number, incremented at each scavenge
+ inuse: # MB used or partially used spans
+ idle: # MB spans pending scavenging
+ sys: # MB mapped from the system
+ released: # MB released to the system
+ consumed: # MB allocated from the system
+
memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.
When set to 0 memory profiling is disabled. Refer to the description of
MemProfileRate for the default value.
@@ -156,65 +171,34 @@ func Gosched()
func Goexit()
// Caller reports file and line number information about function invocations on
-// the calling goroutine's stack. The argument skip is the number of stack frames
+// the calling goroutine's stack. The argument skip is the number of stack frames
// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
// meaning of skip differs between Caller and Callers.) The return values report the
// program counter, file name, and line number within the file of the corresponding
// call. The boolean ok is false if it was not possible to recover the information.
func Caller(skip int) (pc uintptr, file string, line int, ok bool)
-// Callers fills the slice pc with the program counters of function invocations
-// on the calling goroutine's stack. The argument skip is the number of stack frames
+// Callers fills the slice pc with the return program counters of function invocations
+// on the calling goroutine's stack. The argument skip is the number of stack frames
// to skip before recording in pc, with 0 identifying the frame for Callers itself and
// 1 identifying the caller of Callers.
// It returns the number of entries written to pc.
func Callers(skip int, pc []uintptr) int
-type Func struct {
- opaque struct{} // unexported field to disallow conversions
-}
-
-// FuncForPC returns a *Func describing the function that contains the
-// given program counter address, or else nil.
-func FuncForPC(pc uintptr) *Func
-
-// Name returns the name of the function.
-func (f *Func) Name() string {
- return funcname_go(f)
-}
-
-// Entry returns the entry address of the function.
-func (f *Func) Entry() uintptr {
- return funcentry_go(f)
-}
-
-// FileLine returns the file name and line number of the
-// source code corresponding to the program counter pc.
-// The result will not be accurate if pc is not a program
-// counter within f.
-func (f *Func) FileLine(pc uintptr) (file string, line int) {
- return funcline_go(f, pc)
-}
-
-// implemented in symtab.c
-func funcline_go(*Func, uintptr) (string, int)
-func funcname_go(*Func) string
-func funcentry_go(*Func) uintptr
-
-// SetFinalizer sets the finalizer associated with x to f.
-// When the garbage collector finds an unreachable block
+// SetFinalizer sets the finalizer associated with obj to the provided
+// finalizer function. When the garbage collector finds an unreachable block
// with an associated finalizer, it clears the association and runs
-// f(x) in a separate goroutine. This makes x reachable again, but
-// now without an associated finalizer. Assuming that SetFinalizer
+// finalizer(obj) in a separate goroutine. This makes obj reachable again,
+// but now without an associated finalizer. Assuming that SetFinalizer
// is not called again, the next time the garbage collector sees
-// that x is unreachable, it will free x.
+// that obj is unreachable, it will free obj.
//
-// SetFinalizer(x, nil) clears any finalizer associated with x.
+// SetFinalizer(obj, nil) clears any finalizer associated with obj.
//
-// The argument x must be a pointer to an object allocated by
+// The argument obj must be a pointer to an object allocated by
// calling new or by taking the address of a composite literal.
-// The argument f must be a function that takes a single argument
-// to which x's type can be assigned, and can have arbitrary ignored return
+// The argument finalizer must be a function that takes a single argument
+// to which obj's type can be assigned, and can have arbitrary ignored return
// values. If either of these is not true, SetFinalizer aborts the
// program.
//
@@ -226,8 +210,8 @@ func funcentry_go(*Func) uintptr
// is not guaranteed to run, because there is no ordering that
// respects the dependencies.
//
-// The finalizer for x is scheduled to run at some arbitrary time after
-// x becomes unreachable.
+// The finalizer for obj is scheduled to run at some arbitrary time after
+// obj becomes unreachable.
// There is no guarantee that finalizers will run before a program exits,
// so typically they are useful only for releasing non-memory resources
// associated with an object during a long-running program.
@@ -237,13 +221,56 @@ func funcentry_go(*Func) uintptr
// to depend on a finalizer to flush an in-memory I/O buffer such as a
// bufio.Writer, because the buffer would not be flushed at program exit.
//
-// It is not guaranteed that a finalizer will run if the size of *x is
+// It is not guaranteed that a finalizer will run if the size of *obj is
// zero bytes.
//
+// It is not guaranteed that a finalizer will run for objects allocated
+// in initializers for package-level variables. Such objects may be
+// linker-allocated, not heap-allocated.
+//
+// A finalizer may run as soon as an object becomes unreachable.
+// In order to use finalizers correctly, the program must ensure that
+// the object is reachable until it is no longer required.
+// Objects stored in global variables, or that can be found by tracing
+// pointers from a global variable, are reachable. For other objects,
+// pass the object to a call of the KeepAlive function to mark the
+// last point in the function where the object must be reachable.
+//
+// For example, if p points to a struct that contains a file descriptor d,
+// and p has a finalizer that closes that file descriptor, and if the last
+// use of p in a function is a call to syscall.Write(p.d, buf, size), then
+// p may be unreachable as soon as the program enters syscall.Write. The
+// finalizer may run at that moment, closing p.d, causing syscall.Write
+// to fail because it is writing to a closed file descriptor (or, worse,
+// to an entirely different file descriptor opened by a different goroutine).
+// To avoid this problem, call runtime.KeepAlive(p) after the call to
+// syscall.Write.
+//
// A single goroutine runs all finalizers for a program, sequentially.
// If a finalizer must run for a long time, it should do so by starting
// a new goroutine.
-func SetFinalizer(x, f interface{})
+func SetFinalizer(obj interface{}, finalizer interface{})
+
+// KeepAlive marks its argument as currently reachable.
+// This ensures that the object is not freed, and its finalizer is not run,
+// before the point in the program where KeepAlive is called.
+//
+// A very simplified example showing where KeepAlive is required:
+// type File struct { d int }
+// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
+// // ... do something if err != nil ...
+// p := &File{d}
+// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
+// var buf [10]byte
+// n, err := syscall.Read(p.d, buf[:])
+// // Ensure p is not finalized until Read returns.
+// runtime.KeepAlive(p)
+// // No more uses of p after this point.
+//
+// Without the KeepAlive call, the finalizer could run at the start of
+// syscall.Read, closing the file descriptor before syscall.Read makes
+// the actual system call.
+func KeepAlive(interface{})
func getgoroot() string
@@ -270,7 +297,7 @@ func Version() string {
const GOOS string = theGoos
// GOARCH is the running program's architecture target:
-// 386, amd64, arm, arm64, ppc64, ppc64le.
+// 386, amd64, arm, or s390x.
const GOARCH string = theGoarch
// GCCGOTOOLDIR is the Tool Dir for the gccgo build
diff --git a/libgo/go/runtime/fastlog2_test.go b/libgo/go/runtime/fastlog2_test.go
index 8f92dc66942..6e9fcd4d45e 100644
--- a/libgo/go/runtime/fastlog2_test.go
+++ b/libgo/go/runtime/fastlog2_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index 71d46561e03..11035c321ab 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -477,4 +477,21 @@ func testIfaceEqual(x interface{}) {
}
}
+func TestPageAccounting(t *testing.T) {
+ // Grow the heap in small increments. This used to drop the
+ // pages-in-use count below zero because of a rounding
+ // mismatch (golang.org/issue/15022).
+ const blockSize = 64 << 10
+ blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
+ for i := range blocks {
+ blocks[i] = new([blockSize]byte)
+ }
+
+ // Check that the running page count matches reality.
+ pagesInUse, counted := runtime.CountPagesInUse()
+ if pagesInUse != counted {
+ t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
+ }
+}
+
*/
diff --git a/libgo/go/runtime/gcinfo_test.go b/libgo/go/runtime/gcinfo_test.go
index d3262a656c5..2253d93a0ac 100644
--- a/libgo/go/runtime/gcinfo_test.go
+++ b/libgo/go/runtime/gcinfo_test.go
@@ -45,14 +45,14 @@ func TestGCInfo(t *testing.T) {
func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) {
mask := runtime.GCMask(p)
- if bytes.Compare(mask, mask0) != 0 {
+ if !bytes.Equal(mask, mask0) {
t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask)
return
}
}
func padDead(mask []byte) []byte {
- // Because the dead bit isn't encoded until the third word,
+ // Because the dead bit isn't encoded in the second word,
// and because on 32-bit systems a one-word allocation
// uses a two-word block, the pointer info for a one-word
// object needs to be expanded to include an extra scalar
@@ -67,6 +67,9 @@ func trimDead(mask []byte) []byte {
for len(mask) > 2 && mask[len(mask)-1] == typeScalar {
mask = mask[:len(mask)-1]
}
+ if len(mask) == 2 && mask[0] == typeScalar && mask[1] == typeScalar {
+ mask = mask[:0]
+ }
return mask
}
@@ -130,7 +133,7 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
- case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le":
+ case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
diff --git a/libgo/go/runtime/lfstack_64bit.go b/libgo/go/runtime/lfstack_64bit.go
new file mode 100644
index 00000000000..5367f08c561
--- /dev/null
+++ b/libgo/go/runtime/lfstack_64bit.go
@@ -0,0 +1,48 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x
+
+package runtime
+
+import "unsafe"
+
+const (
+ // addrBits is the number of bits needed to represent a virtual address.
+ //
+ // In Linux the user address space for each architecture is limited as
+ // follows (taken from the processor.h file for the architecture):
+ //
+ // Architecture Name Maximum Value (exclusive)
+ // ---------------------------------------------------------------------
+ // arm64 TASK_SIZE_64 Depends on configuration.
+ // ppc64{,le} TASK_SIZE_USER64 0x400000000000UL (46 bit addresses)
+ // mips64{,le} TASK_SIZE64 0x010000000000UL (40 bit addresses)
+ // s390x TASK_SIZE 0x020000000000UL (41 bit addresses)
+ //
+ // These values may increase over time.
+ //
+ // On AMD64, virtual addresses are 48-bit numbers sign extended to 64.
+ // We shift the address left 16 to eliminate the sign extended part and make
+ // room in the bottom for the count.
+ addrBits = 48
+
+ // In addition to the 16 bits taken from the top, we can take 3 from the
+ // bottom, because node must be pointer-aligned, giving a total of 19 bits
+ // of count.
+ cntBits = 64 - addrBits + 3
+)
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
+}
+
+func lfstackUnpack(val uint64) *lfnode {
+ if GOARCH == "amd64" {
+ // amd64 systems can place the stack above the VA hole, so we need to sign extend
+ // val before unpacking.
+ return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3)))
+ }
+ return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
+}
diff --git a/libgo/go/runtime/lfstack_linux_mips64x.go b/libgo/go/runtime/lfstack_linux_mips64x.go
deleted file mode 100644
index 49b65585f42..00000000000
--- a/libgo/go/runtime/lfstack_linux_mips64x.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build mips64 mips64le
-// +build linux
-
-package runtime
-
-import "unsafe"
-
-// On mips64, Linux limits the user address space to 40 bits (see
-// TASK_SIZE64 in the Linux kernel). This has grown over time,
-// so here we allow 48 bit addresses.
-//
-// In addition to the 16 bits taken from the top, we can take 3 from the
-// bottom, because node must be pointer-aligned, giving a total of 19 bits
-// of count.
-const (
- addrBits = 48
- cntBits = 64 - addrBits + 3
-)
-
-func lfstackPack(node *lfnode, cnt uintptr) uint64 {
- return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
-}
-
-func lfstackUnpack(val uint64) (node *lfnode, cnt uintptr) {
- node = (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
- cnt = uintptr(val & (1<<cntBits - 1))
- return
-}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index ed0347a453a..95200a47481 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -329,6 +329,22 @@ func TestBigItems(t *testing.T) {
}
}
+func TestMapHugeZero(t *testing.T) {
+ type T [4000]byte
+ m := map[int]T{}
+ x := m[0]
+ if x != (T{}) {
+ t.Errorf("map value not zero")
+ }
+ y, ok := m[0]
+ if ok {
+ t.Errorf("map value should be missing")
+ }
+ if y != (T{}) {
+ t.Errorf("map value not zero")
+ }
+}
+
type empty struct {
}
diff --git a/libgo/go/runtime/mmap.go b/libgo/go/runtime/mmap.go
index a0768428b40..53617e41e4a 100644
--- a/libgo/go/runtime/mmap.go
+++ b/libgo/go/runtime/mmap.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -12,5 +12,8 @@ package runtime
import "unsafe"
-// mmap calls the mmap system call. It is implemented in assembly.
+// mmap calls the mmap system call. It is implemented in assembly.
+// We only pass the lower 32 bits of file offset to the
+// assembly routine; the higher bits (if required), should be provided
+// by the assembly routine as 0.
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
diff --git a/libgo/go/runtime/msan.go b/libgo/go/runtime/msan.go
index 4dbdf05b21c..7177c8e611b 100644
--- a/libgo/go/runtime/msan.go
+++ b/libgo/go/runtime/msan.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -24,10 +24,10 @@ func MSanWrite(addr unsafe.Pointer, len int) {
const msanenabled = true
// If we are running on the system stack, the C program may have
-// marked part of that stack as uninitialized. We don't instrument
+// marked part of that stack as uninitialized. We don't instrument
// the runtime, but operations like a slice copy can call msanread
-// anyhow for values on the stack. Just ignore msanread when running
-// on the system stack. The other msan functions are fine.
+// anyhow for values on the stack. Just ignore msanread when running
+// on the system stack. The other msan functions are fine.
func msanread(addr unsafe.Pointer, sz uintptr) {
g := getg()
if g == g.m.g0 || g == g.m.gsignal {
diff --git a/libgo/go/runtime/msan0.go b/libgo/go/runtime/msan0.go
index e2067206976..117c5e5789c 100644
--- a/libgo/go/runtime/msan0.go
+++ b/libgo/go/runtime/msan0.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/mstkbar.go b/libgo/go/runtime/mstkbar.go
index 016625ae925..1bf9d573b70 100644
--- a/libgo/go/runtime/mstkbar.go
+++ b/libgo/go/runtime/mstkbar.go
@@ -214,14 +214,15 @@ func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
}
// gcRemoveStackBarriers removes all stack barriers installed in gp's stack.
+//
+// gp's stack barriers must be locked.
+//
//go:nowritebarrier
func gcRemoveStackBarriers(gp *g) {
if debugStackBarrier && gp.stkbarPos != 0 {
print("hit ", gp.stkbarPos, " stack barriers, goid=", gp.goid, "\n")
}
- gcLockStackBarriers(gp)
-
// Remove stack barriers that we didn't hit.
for _, stkbar := range gp.stkbar[gp.stkbarPos:] {
gcRemoveStackBarrier(gp, stkbar)
@@ -231,8 +232,6 @@ func gcRemoveStackBarriers(gp *g) {
// adjust them.
gp.stkbarPos = 0
gp.stkbar = gp.stkbar[:0]
-
- gcUnlockStackBarriers(gp)
}
// gcRemoveStackBarrier removes a single stack barrier. It is the
@@ -258,6 +257,31 @@ func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
*lrPtr = sys.Uintreg(stkbar.savedLRVal)
}
+// gcTryRemoveAllStackBarriers tries to remove stack barriers from all
+// Gs in gps. It is best-effort and efficient. If it can't remove
+// barriers from a G immediately, it will simply skip it.
+func gcTryRemoveAllStackBarriers(gps []*g) {
+ for _, gp := range gps {
+ retry:
+ for {
+ switch s := readgstatus(gp); s {
+ default:
+ break retry
+
+ case _Grunnable, _Gsyscall, _Gwaiting:
+ if !castogscanstatus(gp, s, s|_Gscan) {
+ continue
+ }
+ gcLockStackBarriers(gp)
+ gcRemoveStackBarriers(gp)
+ gcUnlockStackBarriers(gp)
+ restartg(gp)
+ break retry
+ }
+ }
+ }
+}
+
// gcPrintStkbars prints the stack barriers of gp for debugging. It
// places a "@@@" marker at gp.stkbarPos. If marker >= 0, it will also
// place a "==>" marker before the marker'th entry.
diff --git a/libgo/go/runtime/norace_test.go b/libgo/go/runtime/norace_test.go
index 3681bf190d2..e9b39b2f455 100644
--- a/libgo/go/runtime/norace_test.go
+++ b/libgo/go/runtime/norace_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// The file contains tests that can not run under race detector for some reason.
+// The file contains tests that cannot run under race detector for some reason.
// +build !race
package runtime_test
diff --git a/libgo/go/runtime/os1_linux_generic.go b/libgo/go/runtime/os1_linux_generic.go
deleted file mode 100644
index 2c8b743aeb0..00000000000
--- a/libgo/go/runtime/os1_linux_generic.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !mips64
-// +build !mips64le
-// +build linux
-
-package runtime
-
-var sigset_all = sigset{^uint32(0), ^uint32(0)}
-
-func sigaddset(mask *sigset, i int) {
- (*mask)[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
-}
-
-func sigdelset(mask *sigset, i int) {
- (*mask)[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
-}
-
-func sigfillset(mask *uint64) {
- *mask = ^uint64(0)
-}
-
-func sigcopyset(mask *sigset, m sigmask) {
- copy((*mask)[:], m[:])
-}
diff --git a/libgo/go/runtime/os1_linux_mips64x.go b/libgo/go/runtime/os1_linux_mips64x.go
deleted file mode 100644
index 701e9791026..00000000000
--- a/libgo/go/runtime/os1_linux_mips64x.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build mips64 mips64le
-// +build linux
-
-package runtime
-
-var sigset_all = sigset{^uint64(0), ^uint64(0)}
-
-func sigaddset(mask *sigset, i int) {
- (*mask)[(i-1)/64] |= 1 << ((uint32(i) - 1) & 63)
-}
-
-func sigdelset(mask *sigset, i int) {
- (*mask)[(i-1)/64] &^= 1 << ((uint32(i) - 1) & 63)
-}
-
-func sigfillset(mask *[2]uint64) {
- (*mask)[0], (*mask)[1] = ^uint64(0), ^uint64(0)
-}
-
-func sigcopyset(mask *sigset, m sigmask) {
- (*mask)[0] = uint64(m[0]) | uint64(m[1])<<32
-}
diff --git a/libgo/go/runtime/os2_linux_mips64x.go b/libgo/go/runtime/os2_linux_mips64x.go
deleted file mode 100644
index 9a6a92a87d3..00000000000
--- a/libgo/go/runtime/os2_linux_mips64x.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-// +build mips64 mips64le
-
-package runtime
-
-const (
- _SS_DISABLE = 2
- _NSIG = 65
- _SI_USER = 0
- _SIG_BLOCK = 1
- _SIG_UNBLOCK = 2
- _SIG_SETMASK = 3
- _RLIMIT_AS = 6
-)
-
-type sigset [2]uint64
-
-type rlimit struct {
- rlim_cur uintptr
- rlim_max uintptr
-}
diff --git a/libgo/go/runtime/os2_linux_generic.go b/libgo/go/runtime/os_linux_generic.go
index 01e6c8a5ec8..a16d1407761 100644
--- a/libgo/go/runtime/os2_linux_generic.go
+++ b/libgo/go/runtime/os_linux_generic.go
@@ -4,6 +4,7 @@
// +build !mips64
// +build !mips64le
+// +build !s390x
// +build linux
package runtime
@@ -27,3 +28,21 @@ type rlimit struct {
rlim_cur uintptr
rlim_max uintptr
}
+
+var sigset_all = sigset{^uint32(0), ^uint32(0)}
+
+func sigaddset(mask *sigset, i int) {
+ (*mask)[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
+}
+
+func sigdelset(mask *sigset, i int) {
+ (*mask)[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
+}
+
+func sigfillset(mask *uint64) {
+ *mask = ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ copy((*mask)[:], m[:])
+}
diff --git a/libgo/go/runtime/os_linux_mips64x.go b/libgo/go/runtime/os_linux_mips64x.go
index 4d2e9e8a20a..8039b2fac9b 100644
--- a/libgo/go/runtime/os_linux_mips64x.go
+++ b/libgo/go/runtime/os_linux_mips64x.go
@@ -2,13 +2,24 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build mips64 mips64le
// +build linux
+// +build mips64 mips64le
package runtime
var randomNumber uint32
+func archauxv(tag, val uintptr) {
+ switch tag {
+ case _AT_RANDOM:
+ // sysargs filled in startupRandomData, but that
+ // pointer may not be word aligned, so we must treat
+ // it as a byte array.
+ randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 |
+ uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24
+ }
+}
+
//go:nosplit
func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed fastrand1().
@@ -16,3 +27,38 @@ func cputicks() int64 {
// randomNumber provides better seeding of fastrand1.
return nanotime() + int64(randomNumber)
}
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_BLOCK = 1
+ _SIG_UNBLOCK = 2
+ _SIG_SETMASK = 3
+ _RLIMIT_AS = 6
+)
+
+type sigset [2]uint64
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
+
+var sigset_all = sigset{^uint64(0), ^uint64(0)}
+
+func sigaddset(mask *sigset, i int) {
+ (*mask)[(i-1)/64] |= 1 << ((uint32(i) - 1) & 63)
+}
+
+func sigdelset(mask *sigset, i int) {
+ (*mask)[(i-1)/64] &^= 1 << ((uint32(i) - 1) & 63)
+}
+
+func sigfillset(mask *[2]uint64) {
+ (*mask)[0], (*mask)[1] = ^uint64(0), ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ (*mask)[0] = uint64(m[0]) | uint64(m[1])<<32
+}
diff --git a/libgo/go/runtime/os_linux_noauxv.go b/libgo/go/runtime/os_linux_noauxv.go
new file mode 100644
index 00000000000..22522dd803d
--- /dev/null
+++ b/libgo/go/runtime/os_linux_noauxv.go
@@ -0,0 +1,10 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!arm,!arm64,!mips64,!mips64le
+
+package runtime
+
+func archauxv(tag, val uintptr) {
+}
diff --git a/libgo/go/runtime/os_linux_s390x.go b/libgo/go/runtime/os_linux_s390x.go
new file mode 100644
index 00000000000..e659dff7169
--- /dev/null
+++ b/libgo/go/runtime/os_linux_s390x.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_BLOCK = 0
+ _SIG_UNBLOCK = 1
+ _SIG_SETMASK = 2
+ _RLIMIT_AS = 9
+)
+
+type sigset uint64
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
+
+var sigset_all = sigset(^uint64(0))
+
+func sigaddset(mask *sigset, i int) {
+ if i > 64 {
+ throw("unexpected signal greater than 64")
+ }
+ *mask |= 1 << (uint(i) - 1)
+}
+
+func sigdelset(mask *sigset, i int) {
+ if i > 64 {
+ throw("unexpected signal greater than 64")
+ }
+ *mask &^= 1 << (uint(i) - 1)
+}
+
+func sigfillset(mask *uint64) {
+ *mask = ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ *mask = sigset(uint64(m[0]) | uint64(m[1])<<32)
+}
diff --git a/libgo/go/runtime/os_netbsd_386.go b/libgo/go/runtime/os_netbsd_386.go
new file mode 100644
index 00000000000..037f7e36dc6
--- /dev/null
+++ b/libgo/go/runtime/os_netbsd_386.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
+ // Machine dependent mcontext initialisation for LWP.
+ mc.__gregs[_REG_EIP] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_UESP] = uint32(uintptr(stk))
+ mc.__gregs[_REG_EBX] = uint32(uintptr(unsafe.Pointer(mp)))
+ mc.__gregs[_REG_EDX] = uint32(uintptr(unsafe.Pointer(gp)))
+ mc.__gregs[_REG_ESI] = uint32(fn)
+}
diff --git a/libgo/go/runtime/os_netbsd_amd64.go b/libgo/go/runtime/os_netbsd_amd64.go
new file mode 100644
index 00000000000..5118b0c4ffd
--- /dev/null
+++ b/libgo/go/runtime/os_netbsd_amd64.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
+ // Machine dependent mcontext initialisation for LWP.
+ mc.__gregs[_REG_RIP] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_RSP] = uint64(uintptr(stk))
+ mc.__gregs[_REG_R8] = uint64(uintptr(unsafe.Pointer(mp)))
+ mc.__gregs[_REG_R9] = uint64(uintptr(unsafe.Pointer(gp)))
+ mc.__gregs[_REG_R12] = uint64(fn)
+}
diff --git a/libgo/go/runtime/os_plan9_arm.go b/libgo/go/runtime/os_plan9_arm.go
new file mode 100644
index 00000000000..30cde8f74b0
--- /dev/null
+++ b/libgo/go/runtime/os_plan9_arm.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+func checkgoarm() {
+ return // TODO(minux)
+}
+
+//go:nosplit
+func cputicks() int64 {
+ // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
+ // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
+ // TODO: need more entropy to better seed fastrand1.
+ return nanotime()
+}
diff --git a/libgo/go/runtime/parfor_test.go b/libgo/go/runtime/parfor_test.go
deleted file mode 100644
index 5d22aecc9bb..00000000000
--- a/libgo/go/runtime/parfor_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// The race detector does not understand ParFor synchronization.
-// +build !race
-
-package runtime_test
-
-import (
- . "runtime"
- "testing"
-)
-
-// Simple serial sanity test for parallelfor.
-func TestParFor(t *testing.T) {
- const P = 1
- const N = 20
- data := make([]uint64, N)
- for i := uint64(0); i < N; i++ {
- data[i] = i
- }
- desc := NewParFor(P)
- ParForSetup(desc, P, N, true, func(desc *ParFor, i uint32) {
- data[i] = data[i]*data[i] + 1
- })
- ParForDo(desc)
- for i := uint64(0); i < N; i++ {
- if data[i] != i*i+1 {
- t.Fatalf("Wrong element %d: %d", i, data[i])
- }
- }
-}
-
-// Test that nonblocking parallelfor does not block.
-func TestParFor2(t *testing.T) {
- const P = 7
- const N = 1003
- data := make([]uint64, N)
- for i := uint64(0); i < N; i++ {
- data[i] = i
- }
- desc := NewParFor(P)
- ParForSetup(desc, P, N, false, func(desc *ParFor, i uint32) {
- data[i] = data[i]*data[i] + 1
- })
- for p := 0; p < P; p++ {
- ParForDo(desc)
- }
- for i := uint64(0); i < N; i++ {
- if data[i] != i*i+1 {
- t.Fatalf("Wrong element %d: %d", i, data[i])
- }
- }
-}
-
-// Test that iterations are properly distributed.
-func TestParForSetup(t *testing.T) {
- const P = 11
- const N = 101
- desc := NewParFor(P)
- for n := uint32(0); n < N; n++ {
- for p := uint32(1); p <= P; p++ {
- ParForSetup(desc, p, n, true, func(desc *ParFor, i uint32) {})
- sum := uint32(0)
- size0 := uint32(0)
- end0 := uint32(0)
- for i := uint32(0); i < p; i++ {
- begin, end := ParForIters(desc, i)
- size := end - begin
- sum += size
- if i == 0 {
- size0 = size
- if begin != 0 {
- t.Fatalf("incorrect begin: %d (n=%d, p=%d)", begin, n, p)
- }
- } else {
- if size != size0 && size != size0+1 {
- t.Fatalf("incorrect size: %d/%d (n=%d, p=%d)", size, size0, n, p)
- }
- if begin != end0 {
- t.Fatalf("incorrect begin/end: %d/%d (n=%d, p=%d)", begin, end0, n, p)
- }
- }
- end0 = end
- }
- if sum != n {
- t.Fatalf("incorrect sum: %d/%d (p=%d)", sum, n, p)
- }
- }
- }
-}
-
-// Test parallel parallelfor.
-func TestParForParallel(t *testing.T) {
- N := uint64(1e7)
- if testing.Short() {
- N /= 10
- }
- data := make([]uint64, N)
- for i := uint64(0); i < N; i++ {
- data[i] = i
- }
- P := GOMAXPROCS(-1)
- c := make(chan bool, P)
- desc := NewParFor(uint32(P))
- ParForSetup(desc, uint32(P), uint32(N), false, func(desc *ParFor, i uint32) {
- data[i] = data[i]*data[i] + 1
- })
- for p := 1; p < P; p++ {
- go func() {
- ParForDo(desc)
- c <- true
- }()
- }
- ParForDo(desc)
- for p := 1; p < P; p++ {
- <-c
- }
- for i := uint64(0); i < N; i++ {
- if data[i] != i*i+1 {
- t.Fatalf("Wrong element %d: %d", i, data[i])
- }
- }
-
- data, desc = nil, nil
- GC()
-}
diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go
index bfa7b3b4747..54daefa23d7 100644
--- a/libgo/go/runtime/pprof/mprof_test.go
+++ b/libgo/go/runtime/pprof/mprof_test.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 18e99366fb3..d2e5b63fedb 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -1,11 +1,11 @@
-// Copyright 2010 The Go Authors. All rights reserved.
+// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pprof writes runtime profiling data in the format expected
// by the pprof visualization tool.
// For more information about pprof, see
-// http://code.google.com/p/google-perftools/.
+// http://github.com/google/pprof/.
package pprof
import (
@@ -13,6 +13,7 @@ import (
"bytes"
"fmt"
"io"
+ "os"
"runtime"
"sort"
"strings"
@@ -31,7 +32,7 @@ import (
//
// A Profile's methods can be called from multiple goroutines simultaneously.
//
-// Each Profile has a unique name. A few profiles are predefined:
+// Each Profile has a unique name. A few profiles are predefined:
//
// goroutine - stack traces of all current goroutines
// heap - a sampling of all heap allocations
@@ -48,7 +49,7 @@ import (
// all known allocations. This exception helps mainly in programs running
// without garbage collection enabled, usually for debugging purposes.
//
-// The CPU profile is not available as a Profile. It has a special API,
+// The CPU profile is not available as a Profile. It has a special API,
// the StartCPUProfile and StopCPUProfile functions, because it streams
// output to a writer during profiling.
//
@@ -173,11 +174,11 @@ func (p *Profile) Count() int {
// Add adds the current execution stack to the profile, associated with value.
// Add stores value in an internal map, so value must be suitable for use as
// a map key and will not be garbage collected until the corresponding
-// call to Remove. Add panics if the profile already contains a stack for value.
+// call to Remove. Add panics if the profile already contains a stack for value.
//
// The skip parameter has the same meaning as runtime.Caller's skip
-// and controls where the stack trace begins. Passing skip=0 begins the
-// trace in the function calling Add. For example, given this
+// and controls where the stack trace begins. Passing skip=0 begins the
+// trace in the function calling Add. For example, given this
// execution stack:
//
// Add
@@ -266,7 +267,7 @@ func (x stackProfile) Less(i, j int) bool {
}
// A countProfile is a set of stack traces to be printed as counts
-// grouped by stack trace. There are multiple implementations:
+// grouped by stack trace. There are multiple implementations:
// all that matters is that we can find out how many traces there are
// and obtain each trace in turn.
type countProfile interface {
@@ -296,22 +297,25 @@ func printCountProfile(w io.Writer, debug int, name string, p countProfile) erro
}
return buf.String()
}
- m := map[string]int{}
+ count := map[string]int{}
+ index := map[string]int{}
+ var keys []string
n := p.Len()
for i := 0; i < n; i++ {
- m[key(p.Stack(i))]++
+ k := key(p.Stack(i))
+ if count[k] == 0 {
+ index[k] = i
+ keys = append(keys, k)
+ }
+ count[k]++
}
- // Print stacks, listing count on first occurrence of a unique stack.
- for i := 0; i < n; i++ {
- stk := p.Stack(i)
- s := key(stk)
- if count := m[s]; count != 0 {
- fmt.Fprintf(w, "%d %s\n", count, s)
- if debug > 0 {
- printStackRecord(w, stk, false)
- }
- delete(m, s)
+ sort.Sort(&keysByCount{keys, count})
+
+ for _, k := range keys {
+ fmt.Fprintf(w, "%d %s\n", count[k], k)
+ if debug > 0 {
+ printStackRecord(w, p.Stack(index[k]), false)
}
}
@@ -321,49 +325,56 @@ func printCountProfile(w io.Writer, debug int, name string, p countProfile) erro
return b.Flush()
}
+// keysByCount sorts keys with higher counts first, breaking ties by key string order.
+type keysByCount struct {
+ keys []string
+ count map[string]int
+}
+
+func (x *keysByCount) Len() int { return len(x.keys) }
+func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
+func (x *keysByCount) Less(i, j int) bool {
+ ki, kj := x.keys[i], x.keys[j]
+ ci, cj := x.count[ki], x.count[kj]
+ if ci != cj {
+ return ci > cj
+ }
+ return ki < kj
+}
+
// printStackRecord prints the function + source line information
// for a single stack trace.
func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
show := allFrames
- wasPanic := false
- for i, pc := range stk {
- f := runtime.FuncForPC(pc)
- if f == nil {
- show = true
- fmt.Fprintf(w, "#\t%#x\n", pc)
- wasPanic = false
- } else {
- tracepc := pc
- // Back up to call instruction.
- if i > 0 && pc > f.Entry() && !wasPanic {
- if runtime.GOARCH == "386" || runtime.GOARCH == "amd64" {
- tracepc--
- } else if runtime.GOARCH == "s390" || runtime.GOARCH == "s390x" {
- // only works if function was called
- // with the brasl instruction (or a
- // different 6-byte instruction).
- tracepc -= 6
- } else {
- tracepc -= 4 // arm, etc
- }
- }
- file, line := f.FileLine(tracepc)
- name := f.Name()
- // Hide runtime.goexit and any runtime functions at the beginning.
- // This is useful mainly for allocation traces.
- wasPanic = name == "runtime.gopanic"
- if name == "runtime.goexit" || !show && (strings.HasPrefix(name, "runtime.") || strings.HasPrefix(name, "runtime_")) {
- continue
- }
- if !show && !strings.Contains(name, ".") && strings.HasPrefix(name, "__go_") {
- continue
- }
- if !show && name == "" {
- // This can happen due to http://gcc.gnu.org/PR65797.
- continue
+ frames := runtime.CallersFrames(stk)
+ for {
+ frame, more := frames.Next()
+ name := frame.Function
+
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ // This is useful mainly for allocation traces.
+ skip := name == "runtime.goexit"
+ if !show {
+ switch {
+ case strings.HasPrefix(name, "runtime."):
+ skip = true
+ case strings.HasPrefix(name, "runtime_"):
+ skip = true
+ case !strings.Contains(name, ".") && strings.HasPrefix(name, "__go_"):
+ skip = true
}
+ }
+
+ if !show && name == "" {
+ // This can happen due to http://gcc.gnu.org/PR65797.
+ } else if name == "" {
+ fmt.Fprintf(w, "#\t%#x\n", frame.PC)
+ } else if !skip {
show = true
- fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", pc, name, pc-f.Entry(), file, line)
+ fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
+ }
+ if !more {
+ break
}
}
if !show {
@@ -486,7 +497,6 @@ func writeHeap(w io.Writer, debug int) error {
fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
- fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
if tw != nil {
@@ -521,7 +531,7 @@ func writeGoroutine(w io.Writer, debug int) error {
func writeGoroutineStacks(w io.Writer) error {
// We don't know how big the buffer needs to be to collect
- // all the goroutines. Start with 1 MB and try a few times, doubling each time.
+ // all the goroutines. Start with 1 MB and try a few times, doubling each time.
// Give up and use a truncated trace if 64 MB is not enough.
buf := make([]byte, 1<<20)
for i := 0; ; i++ {
@@ -584,7 +594,7 @@ var cpu struct {
// Go code built with -buildmode=c-archive or -buildmode=c-shared.
// StartCPUProfile relies on the SIGPROF signal, but that signal will
// be delivered to the main program's SIGPROF signal handler (if any)
-// not to the one used by Go. To make it work, call os/signal.Notify
+// not to the one used by Go. To make it work, call os/signal.Notify
// for syscall.SIGPROF, but note that doing so may break any profiling
// being done by the main program.
func StartCPUProfile(w io.Writer) error {
@@ -595,7 +605,7 @@ func StartCPUProfile(w io.Writer) error {
// 100 Hz is a reasonable choice: it is frequent enough to
// produce useful data, rare enough not to bog down the
// system, and a nice round number to make it easy to
- // convert sample counts to seconds. Instead of requiring
+ // convert sample counts to seconds. Instead of requiring
// each client to specify the frequency, we hard code it.
const hz = 100
@@ -622,6 +632,42 @@ func profileWriter(w io.Writer) {
}
w.Write(data)
}
+
+ // We are emitting the legacy profiling format, which permits
+ // a memory map following the CPU samples. The memory map is
+ // simply a copy of the GNU/Linux /proc/self/maps file. The
+ // profiler uses the memory map to map PC values in shared
+ // libraries to a shared library in the filesystem, in order
+ // to report the correct function and, if the shared library
+ // has debug info, file/line. This is particularly useful for
+ // PIE (position independent executables) as on ELF systems a
+ // PIE is simply an executable shared library.
+ //
+ // Because the profiling format expects the memory map in
+ // GNU/Linux format, we only do this on GNU/Linux for now. To
+ // add support for profiling PIE on other ELF-based systems,
+ // it may be necessary to map the system-specific mapping
+ // information to the GNU/Linux format. For a reasonably
+ // portable C++ version, see the FillProcSelfMaps function in
+ // https://github.com/gperftools/gperftools/blob/master/src/base/sysinfo.cc
+ //
+ // The code that parses this mapping for the pprof tool is
+ // ParseMemoryMap in cmd/internal/pprof/legacy_profile.go, but
+ // don't change that code, as similar code exists in other
+ // (non-Go) pprof readers. Change this code so that that code works.
+ //
+ // We ignore errors reading or copying the memory map; the
+ // profile is likely usable without it, and we have no good way
+ // to report errors.
+ if runtime.GOOS == "linux" {
+ f, err := os.Open("/proc/self/maps")
+ if err == nil {
+ io.WriteString(w, "\nMAPPED_LIBRARIES:\n")
+ io.Copy(w, f)
+ f.Close()
+ }
+ }
+
cpu.done <- true
}
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index e384e11eda0..1692d4210b7 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -86,10 +86,14 @@ func TestCPUProfileMultithreaded(t *testing.T) {
})
}
-func parseProfile(t *testing.T, bytes []byte, f func(uintptr, []uintptr)) {
+func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []uintptr)) {
// Convert []byte to []uintptr.
- l := len(bytes) / int(unsafe.Sizeof(uintptr(0)))
- val := *(*[]uintptr)(unsafe.Pointer(&bytes))
+ l := len(valBytes)
+ if i := bytes.Index(valBytes, []byte("\nMAPPED_LIBRARIES:\n")); i >= 0 {
+ l = i
+ }
+ l /= int(unsafe.Sizeof(uintptr(0)))
+ val := *(*[]uintptr)(unsafe.Pointer(&valBytes))
val = val[:l]
// 5 for the header, 3 for the trailer.
@@ -389,7 +393,7 @@ func TestStackBarrierProfiling(t *testing.T) {
args = append(args, "-test.short")
}
cmd := exec.Command(os.Args[0], args...)
- cmd.Env = append([]string{"GODEBUG=gcstackbarrierall=1", "GOGC=1"}, os.Environ()...)
+ cmd.Env = append([]string{"GODEBUG=gcstackbarrierall=1", "GOGC=1", "GOTRACEBACK=system"}, os.Environ()...)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("subprocess failed with %v:\n%s", err, out)
}
@@ -495,6 +499,10 @@ func TestBlockProfile(t *testing.T) {
t.Fatalf("Bad profile header:\n%v", prof)
}
+ if strings.HasSuffix(prof, "#\t0x0\n\n") {
+ t.Errorf("Useless 0 suffix:\n%v", prof)
+ }
+
for _, test := range tests {
if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
@@ -532,15 +540,20 @@ func blockChanClose() {
}
func blockSelectRecvAsync() {
+ const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
- time.Sleep(blockDelay)
- c <- true
+ for i := 0; i < numTries; i++ {
+ time.Sleep(blockDelay)
+ c <- true
+ }
}()
- select {
- case <-c:
- case <-c2:
+ for i := 0; i < numTries; i++ {
+ select {
+ case <-c:
+ case <-c2:
+ }
}
}
@@ -580,3 +593,53 @@ func blockCond() {
c.Wait()
mu.Unlock()
}
+
+func func1(c chan int) { <-c }
+func func2(c chan int) { <-c }
+func func3(c chan int) { <-c }
+func func4(c chan int) { <-c }
+
+func TestGoroutineCounts(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("goroutine stacks not supported on gccgo")
+ }
+ if runtime.GOOS == "openbsd" {
+ testenv.SkipFlaky(t, 15156)
+ }
+ c := make(chan int)
+ for i := 0; i < 100; i++ {
+ if i%10 == 0 {
+ go func1(c)
+ continue
+ }
+ if i%2 == 0 {
+ go func2(c)
+ continue
+ }
+ go func3(c)
+ }
+ time.Sleep(10 * time.Millisecond) // let goroutines block on channel
+
+ var w bytes.Buffer
+ Lookup("goroutine").WriteTo(&w, 1)
+ prof := w.String()
+
+ if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
+ t.Errorf("expected sorted goroutine counts:\n%s", prof)
+ }
+
+ close(c)
+
+ time.Sleep(10 * time.Millisecond) // let goroutines exit
+}
+
+func containsInOrder(s string, all ...string) bool {
+ for _, t := range all {
+ i := strings.Index(s, t)
+ if i < 0 {
+ return false
+ }
+ s = s[i+len(t):]
+ }
+ return true
+}
diff --git a/libgo/go/runtime/print.go b/libgo/go/runtime/print.go
index f789f890835..32626c1e9df 100644
--- a/libgo/go/runtime/print.go
+++ b/libgo/go/runtime/print.go
@@ -209,7 +209,7 @@ func printstring(s string) {
func printslice(s []byte) {
sp := (*slice)(unsafe.Pointer(&s))
print("[", len(s), "/", cap(s), "]")
- printpointer(unsafe.Pointer(sp.array))
+ printpointer(sp.array)
}
func printeface(e eface) {
diff --git a/libgo/go/runtime/proc_runtime_test.go b/libgo/go/runtime/proc_runtime_test.go
new file mode 100644
index 00000000000..d56f9b14636
--- /dev/null
+++ b/libgo/go/runtime/proc_runtime_test.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// Proc unit tests. In runtime package so can use runtime guts.
+
+package runtime
+
+func RunStealOrderTest() {
+ var ord randomOrder
+ for procs := 1; procs <= 64; procs++ {
+ ord.reset(uint32(procs))
+ if procs >= 3 && len(ord.coprimes) < 2 {
+ panic("too few coprimes")
+ }
+ for co := 0; co < len(ord.coprimes); co++ {
+ enum := ord.start(uint32(co))
+ checked := make([]bool, procs)
+ for p := 0; p < procs; p++ {
+ x := enum.position()
+ if checked[x] {
+ println("procs:", procs, "inc:", enum.inc)
+ panic("duplicate during enumeration")
+ }
+ checked[x] = true
+ enum.next()
+ }
+ if !enum.done() {
+ panic("not done")
+ }
+ }
+ }
+}
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 37adad570b7..cc390174a74 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -178,7 +178,14 @@ func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
}
if netpoll {
// Enable netpoller, affects schedler behavior.
- ln, err := net.Listen("tcp", "localhost:0")
+ laddr := "localhost:0"
+ if runtime.GOOS == "android" {
+ // On some Android devices, there are no records for localhost,
+ // see https://golang.org/issues/14486.
+ // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
+ laddr = "127.0.0.1:0"
+ }
+ ln, err := net.Listen("tcp", laddr)
if err != nil {
defer ln.Close() // yup, defer in a loop
}
@@ -339,6 +346,14 @@ func TestGCFairness(t *testing.T) {
}
}
+func TestGCFairness2(t *testing.T) {
+ output := runTestProg(t, "testprog", "GCFairness2")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
+
func TestNumGoroutine(t *testing.T) {
output := runTestProg(t, "testprog", "NumGoroutine")
want := "1\n"
@@ -423,6 +438,9 @@ func TestPingPongHog(t *testing.T) {
}
func BenchmarkPingPongHog(b *testing.B) {
+ if b.N == 0 {
+ return
+ }
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
// Create a CPU hog
@@ -550,6 +568,26 @@ func TestSchedLocalQueueSteal(t *testing.T) {
}
*/
+/*
+func TestSchedLocalQueueEmpty(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ // Takes too long and does not trigger the race.
+ t.Skip("skipping on uniprocessor")
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+
+ // If runtime triggers a forced GC during this test then it will deadlock,
+ // since the goroutines can't be stopped/preempted during spin wait.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+ iters := int(1e5)
+ if testing.Short() {
+ iters = 1e2
+ }
+ runtime.RunSchedLocalQueueEmptyTest(iters)
+}
+*/
+
func benchmarkStackGrowth(b *testing.B, rec int) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
@@ -686,3 +724,9 @@ func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, thres
done <- struct{}{}
}
}
+
+/*
+func TestStealOrder(t *testing.T) {
+ runtime.RunStealOrderTest()
+}
+*/
diff --git a/libgo/go/runtime/race/race_linux_test.go b/libgo/go/runtime/race/race_linux_test.go
new file mode 100644
index 00000000000..c00ce4d3dfc
--- /dev/null
+++ b/libgo/go/runtime/race/race_linux_test.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,race
+
+package race_test
+
+import (
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func TestAtomicMmap(t *testing.T) {
+ // Test that atomic operations work on "external" memory. Previously they crashed (#16206).
+ // Also do a sanity correctness check: under race detector atomic operations
+ // are implemented inside of race runtime.
+ mem, err := syscall.Mmap(-1, 0, 1<<20, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("mmap failed: %v", err)
+ }
+ defer syscall.Munmap(mem)
+ a := (*uint64)(unsafe.Pointer(&mem[0]))
+ if *a != 0 {
+ t.Fatalf("bad atomic value: %v, want 0", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 1 {
+ t.Fatalf("bad atomic value: %v, want 1", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 2 {
+ t.Fatalf("bad atomic value: %v, want 2", *a)
+ }
+}
diff --git a/libgo/go/runtime/race/race_windows_test.go b/libgo/go/runtime/race/race_windows_test.go
new file mode 100644
index 00000000000..307a1ea6c0d
--- /dev/null
+++ b/libgo/go/runtime/race/race_windows_test.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,race
+
+package race_test
+
+import (
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func TestAtomicMmap(t *testing.T) {
+ // Test that atomic operations work on "external" memory. Previously they crashed (#16206).
+ // Also do a sanity correctness check: under race detector atomic operations
+ // are implemented inside of race runtime.
+ kernel32 := syscall.NewLazyDLL("kernel32.dll")
+ VirtualAlloc := kernel32.NewProc("VirtualAlloc")
+ VirtualFree := kernel32.NewProc("VirtualFree")
+ const (
+ MEM_COMMIT = 0x00001000
+ MEM_RESERVE = 0x00002000
+ MEM_RELEASE = 0x8000
+ PAGE_READWRITE = 0x04
+ )
+ mem, _, err := syscall.Syscall6(VirtualAlloc.Addr(), 4, 0, 1<<20, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE, 0, 0)
+ if err != 0 {
+ t.Fatalf("VirtualAlloc failed: %v", err)
+ }
+ defer syscall.Syscall(VirtualFree.Addr(), 3, mem, 1<<20, MEM_RELEASE)
+ a := (*uint64)(unsafe.Pointer(mem))
+ if *a != 0 {
+ t.Fatalf("bad atomic value: %v, want 0", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 1 {
+ t.Fatalf("bad atomic value: %v, want 1", *a)
+ }
+ atomic.AddUint64(a, 1)
+ if *a != 2 {
+ t.Fatalf("bad atomic value: %v, want 2", *a)
+ }
+}
diff --git a/libgo/go/runtime/runtime-lldb_test.go b/libgo/go/runtime/runtime-lldb_test.go
index 2bd91c1ec03..4c379b9cdc2 100644
--- a/libgo/go/runtime/runtime-lldb_test.go
+++ b/libgo/go/runtime/runtime-lldb_test.go
@@ -232,7 +232,7 @@ func verifyAranges(t *testing.T, byteorder binary.ByteOrder, data io.ReadSeeker)
SegmentSize uint8
}
for {
- offset, err := data.Seek(0, 1)
+ offset, err := data.Seek(0, io.SeekCurrent)
if err != nil {
t.Fatalf("Seek error: %v", err)
}
@@ -246,7 +246,7 @@ func verifyAranges(t *testing.T, byteorder binary.ByteOrder, data io.ReadSeeker)
if lastTupleOffset%tupleSize != 0 {
t.Fatalf("Invalid arange length %d, (addr %d, seg %d)", header.UnitLength, header.AddressSize, header.SegmentSize)
}
- if _, err = data.Seek(lastTupleOffset, 0); err != nil {
+ if _, err = data.Seek(lastTupleOffset, io.SeekStart); err != nil {
t.Fatalf("Seek error: %v", err)
}
buf := make([]byte, tupleSize)
diff --git a/libgo/go/runtime/runtime_mmap_test.go b/libgo/go/runtime/runtime_mmap_test.go
index 3995305052b..97b44e2660d 100644
--- a/libgo/go/runtime/runtime_mmap_test.go
+++ b/libgo/go/runtime/runtime_mmap_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go
index a520f563475..2ea2dc22b6f 100644
--- a/libgo/go/runtime/runtime_test.go
+++ b/libgo/go/runtime/runtime_test.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -104,7 +104,7 @@ func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
// of the larger addresses must themselves be invalid addresses.
// We might get unlucky and the OS might have mapped one of these
// addresses, but probably not: they're all in the first page, very high
-// adderesses that normally an OS would reserve for itself, or malformed
+// addresses that normally an OS would reserve for itself, or malformed
// addresses. Even so, we might have to remove one or two on different
// systems. We will see.
@@ -249,8 +249,8 @@ func TestBadOpen(t *testing.T) {
if GOOS == "windows" || GOOS == "nacl" {
t.Skip("skipping OS that doesn't have open/read/write/close")
}
- // make sure we get the correct error code if open fails. Same for
- // read/write/close on the resulting -1 fd. See issue 10052.
+ // make sure we get the correct error code if open fails. Same for
+ // read/write/close on the resulting -1 fd. See issue 10052.
nonfile := []byte("/notreallyafile")
fd := Open(&nonfile[0], 0, 0)
if fd != -1 {
diff --git a/libgo/go/runtime/runtime_unix_test.go b/libgo/go/runtime/runtime_unix_test.go
index cfec3326bd7..e91216365ef 100644
--- a/libgo/go/runtime/runtime_unix_test.go
+++ b/libgo/go/runtime/runtime_unix_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Go Authors. All rights reserved.
+// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/signal2_unix.go b/libgo/go/runtime/signal2_unix.go
index 3fe625f83cc..b1371699407 100644
--- a/libgo/go/runtime/signal2_unix.go
+++ b/libgo/go/runtime/signal2_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -12,7 +12,7 @@ import "unsafe"
func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
// Determines if the signal should be handled by Go and if not, forwards the
-// signal to the handler that was installed before Go's. Returns whether the
+// signal to the handler that was installed before Go's. Returns whether the
// signal was forwarded.
// This is called by the signal handler, and the world may be stopped.
//go:nosplit
@@ -54,7 +54,7 @@ func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
if c.sigcode() == _SI_USER || flags&_SigPanic == 0 {
return false
}
- // Determine if the signal occurred inside Go code. We test that:
+ // Determine if the signal occurred inside Go code. We test that:
// (1) we were in a goroutine (i.e., m.curg != nil), and
// (2) we weren't in CGO (i.e., m.curg.syscallsp == 0).
g := getg()
diff --git a/libgo/go/runtime/signal_linux_mips64x.go b/libgo/go/runtime/signal_linux_mips64x.go
index 671b9167b83..0f590e49eab 100644
--- a/libgo/go/runtime/signal_linux_mips64x.go
+++ b/libgo/go/runtime/signal_linux_mips64x.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/signal_linux_s390x.go b/libgo/go/runtime/signal_linux_s390x.go
new file mode 100644
index 00000000000..155d3a326f5
--- /dev/null
+++ b/libgo/go/runtime/signal_linux_s390x.go
@@ -0,0 +1,208 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext {
+ return (*sigcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
+}
+func (c *sigctxt) r0() uint64 { return c.regs().gregs[0] }
+func (c *sigctxt) r1() uint64 { return c.regs().gregs[1] }
+func (c *sigctxt) r2() uint64 { return c.regs().gregs[2] }
+func (c *sigctxt) r3() uint64 { return c.regs().gregs[3] }
+func (c *sigctxt) r4() uint64 { return c.regs().gregs[4] }
+func (c *sigctxt) r5() uint64 { return c.regs().gregs[5] }
+func (c *sigctxt) r6() uint64 { return c.regs().gregs[6] }
+func (c *sigctxt) r7() uint64 { return c.regs().gregs[7] }
+func (c *sigctxt) r8() uint64 { return c.regs().gregs[8] }
+func (c *sigctxt) r9() uint64 { return c.regs().gregs[9] }
+func (c *sigctxt) r10() uint64 { return c.regs().gregs[10] }
+func (c *sigctxt) r11() uint64 { return c.regs().gregs[11] }
+func (c *sigctxt) r12() uint64 { return c.regs().gregs[12] }
+func (c *sigctxt) r13() uint64 { return c.regs().gregs[13] }
+func (c *sigctxt) r14() uint64 { return c.regs().gregs[14] }
+func (c *sigctxt) r15() uint64 { return c.regs().gregs[15] }
+func (c *sigctxt) link() uint64 { return c.regs().gregs[14] }
+func (c *sigctxt) sp() uint64 { return c.regs().gregs[15] }
+func (c *sigctxt) pc() uint64 { return c.regs().psw_addr }
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) set_r0(x uint64) { c.regs().gregs[0] = x }
+func (c *sigctxt) set_r13(x uint64) { c.regs().gregs[13] = x }
+func (c *sigctxt) set_link(x uint64) { c.regs().gregs[14] = x }
+func (c *sigctxt) set_sp(x uint64) { c.regs().gregs[15] = x }
+func (c *sigctxt) set_pc(x uint64) { c.regs().psw_addr = x }
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+}
+
+func dumpregs(c *sigctxt) {
+ print("r0 ", hex(c.r0()), "\t")
+ print("r1 ", hex(c.r1()), "\n")
+ print("r2 ", hex(c.r2()), "\t")
+ print("r3 ", hex(c.r3()), "\n")
+ print("r4 ", hex(c.r4()), "\t")
+ print("r5 ", hex(c.r5()), "\n")
+ print("r6 ", hex(c.r6()), "\t")
+ print("r7 ", hex(c.r7()), "\n")
+ print("r8 ", hex(c.r8()), "\t")
+ print("r9 ", hex(c.r9()), "\n")
+ print("r10 ", hex(c.r10()), "\t")
+ print("r11 ", hex(c.r11()), "\n")
+ print("r12 ", hex(c.r12()), "\t")
+ print("r13 ", hex(c.r13()), "\n")
+ print("r14 ", hex(c.r14()), "\t")
+ print("r15 ", hex(c.r15()), "\n")
+ print("pc ", hex(c.pc()), "\t")
+ print("link ", hex(c.link()), "\n")
+}
+
+var crashing int32
+
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+
+ if sig == _SIGPROF {
+ sigprof(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp, _g_.m)
+ return
+ }
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.sigaddr())
+ gp.sigpc = uintptr(c.pc())
+
+ // We arrange link, and pc to pretend the panicking
+ // function calls sigpanic directly.
+ // Always save LINK to stack so that panics in leaf
+ // functions are correctly handled. This smashes
+ // the stack frame but we're not going back there
+ // anyway.
+ sp := c.sp() - sys.MinFrameSize
+ c.set_sp(sp)
+ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
+
+ pc := uintptr(gp.sigpc)
+
+ // If we don't recognize the PC as code
+ // but we do recognize the link register as code,
+ // then assume this was a call to non-code and treat like
+ // pc == 0, to make unwinding show the context.
+ if pc != 0 && findfunc(pc) == nil && findfunc(uintptr(c.link())) != nil {
+ pc = 0
+ }
+
+ // Don't bother saving PC if it's zero, which is
+ // probably a call to a nil func: the old link register
+ // is more useful in the stack trace.
+ if pc != 0 {
+ c.set_link(uint64(pc))
+ }
+
+ // In case we are panicking from external C code
+ c.set_r0(0)
+ c.set_r13(uint64(uintptr(unsafe.Pointer(gp))))
+ c.set_pc(uint64(funcPC(sigpanic)))
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if c.sigcode() == _SI_USER && signal_ignored(sig) {
+ return
+ }
+
+ if flags&_SigKill != 0 {
+ dieFromSignal(int32(sig))
+ }
+
+ if flags&_SigThrow == 0 {
+ return
+ }
+
+ _g_.m.throwing = 1
+ _g_.m.caughtsig.set(gp)
+
+ if crashing == 0 {
+ startpanic()
+ }
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.pc()), " m=", _g_.m.id, "\n")
+ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
+ print("signal arrived during cgo execution\n")
+ gp = _g_.m.lockedg
+ }
+ print("\n")
+
+ level, _, docrash := gotraceback()
+ if level > 0 {
+ goroutineheader(gp)
+ tracebacktrap(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp)
+ if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
+ // tracebackothers on original m skipped this one; trace it now.
+ goroutineheader(_g_.m.curg)
+ traceback(^uintptr(0), ^uintptr(0), 0, gp)
+ } else if crashing == 0 {
+ tracebackothers(gp)
+ print("\n")
+ }
+ dumpregs(c)
+ }
+
+ if docrash {
+ crashing++
+ if crashing < sched.mcount {
+ // There are other m's that need to dump their stacks.
+ // Relay SIGQUIT to the next m by sending it to the current process.
+ // All m's that have already received SIGQUIT have signal masks blocking
+ // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
+ // When the last m receives the SIGQUIT, it will fall through to the call to
+ // crash below. Just in case the relaying gets botched, each m involved in
+ // the relay sleeps for 5 seconds and then does the crash/exit itself.
+ // In expected operation, the last m has received the SIGQUIT and run
+ // crash/exit and the process is gone, all long before any of the
+ // 5-second sleeps have finished.
+ print("\n-----\n\n")
+ raiseproc(_SIGQUIT)
+ usleep(5 * 1000 * 1000)
+ }
+ crash()
+ }
+
+ exit(2)
+}
diff --git a/libgo/go/runtime/signal_mips64x.go b/libgo/go/runtime/signal_mips64x.go
index 77c27148e86..4dbeb42fe5c 100644
--- a/libgo/go/runtime/signal_mips64x.go
+++ b/libgo/go/runtime/signal_mips64x.go
@@ -88,7 +88,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
- pc := uintptr(gp.sigpc)
+ pc := gp.sigpc
// If we don't recognize the PC as code
// but we do recognize the link register as code,
diff --git a/libgo/go/runtime/signal_sigtramp.go b/libgo/go/runtime/signal_sigtramp.go
index 00ab03846e1..dbbbcd0392b 100644
--- a/libgo/go/runtime/signal_sigtramp.go
+++ b/libgo/go/runtime/signal_sigtramp.go
@@ -18,7 +18,15 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
}
g := getg()
if g == nil {
- badsignal(uintptr(sig))
+ if sig == _SIGPROF {
+ // Ignore profiling signals that arrive on
+ // non-Go threads. On some systems they will
+ // be handled directly by the signal handler,
+ // by calling sigprofNonGo, in which case we won't
+ // get here anyhow.
+ return
+ }
+ badsignal(uintptr(sig), &sigctxt{info, ctx})
return
}
@@ -29,12 +37,12 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 {
setg(nil)
- cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
+ cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
if sp < stsp || sp >= stsp+st.ss_size {
setg(nil)
- cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
+ cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
g.m.gsignal.stack.lo = stsp
g.m.gsignal.stack.hi = stsp + st.ss_size
diff --git a/libgo/go/runtime/sigtab_linux_generic.go b/libgo/go/runtime/sigtab_linux_generic.go
index 32c40c4768e..ea36bf36452 100644
--- a/libgo/go/runtime/sigtab_linux_generic.go
+++ b/libgo/go/runtime/sigtab_linux_generic.go
@@ -45,7 +45,7 @@ var sigtable = [...]sigTabT{
/* 28 */ {_SigNotify, "SIGWINCH: window size change"},
/* 29 */ {_SigNotify, "SIGIO: i/o now possible"},
/* 30 */ {_SigNotify, "SIGPWR: power failure restart"},
- /* 31 */ {_SigNotify, "SIGSYS: bad system call"},
+ /* 31 */ {_SigThrow, "SIGSYS: bad system call"},
/* 32 */ {_SigSetStack + _SigUnblock, "signal 32"}, /* SIGCANCEL; see issue 6997 */
/* 33 */ {_SigSetStack + _SigUnblock, "signal 33"}, /* SIGSETXID; see issues 3871, 9400, 12498 */
/* 34 */ {_SigNotify, "signal 34"},
diff --git a/libgo/go/runtime/sigtab_linux_mips64x.go b/libgo/go/runtime/sigtab_linux_mips64x.go
index dbd50f7b1f6..201fe3deeb7 100644
--- a/libgo/go/runtime/sigtab_linux_mips64x.go
+++ b/libgo/go/runtime/sigtab_linux_mips64x.go
@@ -25,7 +25,7 @@ var sigtable = [...]sigTabT{
/* 9 */ {0, "SIGKILL: kill"},
/* 10 */ {_SigPanic + _SigUnblock, "SIGBUS: bus error"},
/* 11 */ {_SigPanic + _SigUnblock, "SIGSEGV: segmentation violation"},
- /* 12 */ {_SigNotify, "SIGSYS: bad system call"},
+ /* 12 */ {_SigThrow, "SIGSYS: bad system call"},
/* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
/* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
/* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
diff --git a/libgo/go/runtime/stack.go b/libgo/go/runtime/stack.go
index 81059965d96..8398a101fd8 100644
--- a/libgo/go/runtime/stack.go
+++ b/libgo/go/runtime/stack.go
@@ -93,7 +93,7 @@ const (
_StackGuard = 720*sys.StackGuardMultiplier + _StackSystem
// After a stack split check the SP is allowed to be this
- // many bytes below the stack guard. This saves an instruction
+ // many bytes below the stack guard. This saves an instruction
// in the checking sequence for tiny frames.
_StackSmall = 128
@@ -127,7 +127,6 @@ const (
const (
uintptrMask = 1<<(8*sys.PtrSize) - 1
- poisonStack = uintptrMask & 0x6868686868686868
// Goroutine preemption request.
// Stored into g->stackguard0 to cause split stack check failure.
@@ -155,9 +154,6 @@ var stackLarge struct {
free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
}
-// Cached value of haveexperiment("framepointer")
-var framepointer_enabled bool
-
func stackinit() {
if _StackCacheSize&_PageMask != 0 {
throw("cache size must be a multiple of page size")
@@ -180,57 +176,57 @@ func stacklog2(n uintptr) int {
return log2
}
-// Allocates a stack from the free pool. Must be called with
+// Allocates a stack from the free pool. Must be called with
// stackpoolmu held.
func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order]
s := list.first
if s == nil {
- // no free stacks. Allocate another span worth.
+ // no free stacks. Allocate another span worth.
s = mheap_.allocStack(_StackCacheSize >> _PageShift)
if s == nil {
throw("out of memory")
}
- if s.ref != 0 {
- throw("bad ref")
+ if s.allocCount != 0 {
+ throw("bad allocCount")
}
- if s.freelist.ptr() != nil {
- throw("bad freelist")
+ if s.stackfreelist.ptr() != nil {
+ throw("bad stackfreelist")
}
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
- x := gclinkptr(uintptr(s.start)<<_PageShift + i)
- x.ptr().next = s.freelist
- s.freelist = x
+ x := gclinkptr(s.base() + i)
+ x.ptr().next = s.stackfreelist
+ s.stackfreelist = x
}
list.insert(s)
}
- x := s.freelist
+ x := s.stackfreelist
if x.ptr() == nil {
throw("span has no free stacks")
}
- s.freelist = x.ptr().next
- s.ref++
- if s.freelist.ptr() == nil {
+ s.stackfreelist = x.ptr().next
+ s.allocCount++
+ if s.stackfreelist.ptr() == nil {
// all stacks in s are allocated.
list.remove(s)
}
return x
}
-// Adds stack x to the free pool. Must be called with stackpoolmu held.
+// Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) {
s := mheap_.lookup(unsafe.Pointer(x))
if s.state != _MSpanStack {
throw("freeing stack not in a stack span")
}
- if s.freelist.ptr() == nil {
+ if s.stackfreelist.ptr() == nil {
// s will now have a free stack
stackpool[order].insert(s)
}
- x.ptr().next = s.freelist
- s.freelist = x
- s.ref--
- if gcphase == _GCoff && s.ref == 0 {
+ x.ptr().next = s.stackfreelist
+ s.stackfreelist = x
+ s.allocCount--
+ if gcphase == _GCoff && s.allocCount == 0 {
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
@@ -247,13 +243,15 @@ func stackpoolfree(x gclinkptr, order uint8) {
//
// By not freeing, we prevent step #4 until GC is done.
stackpool[order].remove(s)
- s.freelist = 0
+ s.stackfreelist = 0
mheap_.freeStack(s)
}
}
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
+//
+//go:systemstack
func stackcacherefill(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherefill order=", order, "\n")
@@ -275,6 +273,7 @@ func stackcacherefill(c *mcache, order uint8) {
c.stackcache[order].size = size
}
+//go:systemstack
func stackcacherelease(c *mcache, order uint8) {
if stackDebug >= 1 {
print("stackcacherelease order=", order, "\n")
@@ -293,6 +292,7 @@ func stackcacherelease(c *mcache, order uint8) {
c.stackcache[order].size = size
}
+//go:systemstack
func stackcache_clear(c *mcache) {
if stackDebug >= 1 {
print("stackcache clear\n")
@@ -311,6 +311,12 @@ func stackcache_clear(c *mcache) {
unlock(&stackpoolmu)
}
+// stackalloc allocates an n byte stack.
+//
+// stackalloc must run on the system stack because it uses per-P
+// resources and must not split the stack.
+//
+//go:systemstack
func stackalloc(n uint32) (stack, []stkbar) {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
@@ -391,7 +397,7 @@ func stackalloc(n uint32) (stack, []stkbar) {
throw("out of memory")
}
}
- v = unsafe.Pointer(s.start << _PageShift)
+ v = unsafe.Pointer(s.base())
}
if raceenabled {
@@ -408,6 +414,12 @@ func stackalloc(n uint32) (stack, []stkbar) {
return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
}
+// stackfree frees an n byte stack allocation at stk.
+//
+// stackfree must run on the system stack because it uses per-P
+// resources and must not split the stack.
+//
+//go:systemstack
func stackfree(stk stack, n uintptr) {
gp := getg()
v := unsafe.Pointer(stk.lo)
@@ -456,7 +468,7 @@ func stackfree(stk stack, n uintptr) {
} else {
s := mheap_.lookup(v)
if s.state != _MSpanStack {
- println(hex(s.start<<_PageShift), v)
+ println(hex(s.base()), v)
throw("bad span state")
}
if gcphase == _GCoff {
@@ -516,20 +528,23 @@ type adjustinfo struct {
old stack
delta uintptr // ptr distance from old to new stack (newbase - oldbase)
cache pcvalueCache
+
+ // sghi is the highest sudog.elem on the stack.
+ sghi uintptr
}
// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
- pp := (*unsafe.Pointer)(vpp)
+ pp := (*uintptr)(vpp)
p := *pp
if stackDebug >= 4 {
- print(" ", pp, ":", p, "\n")
+ print(" ", pp, ":", hex(p), "\n")
}
- if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
- *pp = add(p, adjinfo.delta)
+ if adjinfo.old.lo <= p && p < adjinfo.old.hi {
+ *pp = p + adjinfo.delta
if stackDebug >= 3 {
- print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
+ print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
}
}
}
@@ -563,15 +578,22 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
minp := adjinfo.old.lo
maxp := adjinfo.old.hi
delta := adjinfo.delta
- num := uintptr(bv.n)
+ num := bv.n
+ // If this frame might contain channel receive slots, use CAS
+ // to adjust pointers. If the slot hasn't been received into
+ // yet, it may contain stack pointers and a concurrent send
+ // could race with adjusting those pointers. (The sent value
+ // itself can never contain stack pointers.)
+ useCAS := uintptr(scanp) < adjinfo.sghi
for i := uintptr(0); i < num; i++ {
if stackDebug >= 4 {
print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
}
if ptrbit(&bv, i) == 1 {
pp := (*uintptr)(add(scanp, i*sys.PtrSize))
+ retry:
p := *pp
- if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
+ if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 {
// Looks like a junk value in a pointer slot.
// Live analysis wrong?
getg().m.traceback = 2
@@ -582,7 +604,14 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
if stackDebug >= 3 {
print("adjust ptr ", p, " ", funcname(f), "\n")
}
- *pp = p + delta
+ if useCAS {
+ ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
+ if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
+ goto retry
+ }
+ } else {
+ *pp = p + delta
+ }
}
}
}
@@ -617,8 +646,8 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
// Adjust local variables if stack frame has been allocated.
size := frame.varp - frame.sp
var minsize uintptr
- switch sys.TheChar {
- case '7':
+ switch sys.ArchFamily {
+ case sys.ARM64:
minsize = sys.SpAlign
default:
minsize = sys.MinFrameSize
@@ -645,7 +674,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
}
// Adjust saved base pointer if there is one.
- if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
+ if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
if !framepointer_enabled {
print("runtime: found space for saved base pointer, but no framepointer experiment\n")
print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
@@ -665,7 +694,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
} else {
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
- print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
+ print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stackmap.n {
@@ -727,9 +756,76 @@ func fillstack(stk stack, b byte) {
}
}
+func findsghi(gp *g, stk stack) uintptr {
+ var sghi uintptr
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
+ if stk.lo <= p && p < stk.hi && p > sghi {
+ sghi = p
+ }
+ p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone)
+ if stk.lo <= p && p < stk.hi && p > sghi {
+ sghi = p
+ }
+ }
+ return sghi
+}
+
+// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
+// stack they refer to while synchronizing with concurrent channel
+// operations. It returns the number of bytes of stack copied.
+func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
+ if gp.waiting == nil {
+ return 0
+ }
+
+ // Lock channels to prevent concurrent send/receive.
+ // It's important that we *only* do this for async
+ // copystack; otherwise, gp may be in the middle of
+ // putting itself on wait queues and this would
+ // self-deadlock.
+ var lastc *hchan
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if sg.c != lastc {
+ lock(&sg.c.lock)
+ }
+ lastc = sg.c
+ }
+
+ // Adjust sudogs.
+ adjustsudogs(gp, adjinfo)
+
+ // Copy the part of the stack the sudogs point in to
+ // while holding the lock to prevent races on
+ // send/receive slots.
+ var sgsize uintptr
+ if adjinfo.sghi != 0 {
+ oldBot := adjinfo.old.hi - used
+ newBot := oldBot + adjinfo.delta
+ sgsize = adjinfo.sghi - oldBot
+ memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
+ }
+
+ // Unlock channels.
+ lastc = nil
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if sg.c != lastc {
+ unlock(&sg.c.lock)
+ }
+ lastc = sg.c
+ }
+
+ return sgsize
+}
+
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
-func copystack(gp *g, newsize uintptr) {
+//
+// If sync is true, this is a self-triggered stack growth and, in
+// particular, no other G may be writing to gp's stack (e.g., via a
+// channel operation). If sync is false, copystack protects against
+// concurrent channel operations.
+func copystack(gp *g, newsize uintptr, sync bool) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
@@ -748,28 +844,46 @@ func copystack(gp *g, newsize uintptr) {
print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
}
- // Disallow sigprof scans of this stack and block if there's
- // one in progress.
- gcLockStackBarriers(gp)
-
- // adjust pointers in the to-be-copied frames
+ // Compute adjustment.
var adjinfo adjustinfo
adjinfo.old = old
adjinfo.delta = new.hi - old.hi
- gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
- // adjust other miscellaneous things that have pointers into stacks.
+ // Adjust sudogs, synchronizing with channel ops if necessary.
+ ncopy := used
+ if sync {
+ adjustsudogs(gp, &adjinfo)
+ } else {
+ // sudogs can point in to the stack. During concurrent
+ // shrinking, these areas may be written to. Find the
+ // highest such pointer so we can handle everything
+ // there and below carefully. (This shouldn't be far
+ // from the bottom of the stack, so there's little
+ // cost in handling everything below it carefully.)
+ adjinfo.sghi = findsghi(gp, old)
+
+ // Synchronize with channel ops and copy the part of
+ // the stack they may interact with.
+ ncopy -= syncadjustsudogs(gp, used, &adjinfo)
+ }
+
+ // Copy the stack (or the rest of it) to the new location
+ memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
+
+ // Disallow sigprof scans of this stack and block if there's
+ // one in progress.
+ gcLockStackBarriers(gp)
+
+ // Adjust remaining structures that have pointers into stacks.
+ // We have to do most of these before we traceback the new
+ // stack because gentraceback uses them.
adjustctxt(gp, &adjinfo)
adjustdefers(gp, &adjinfo)
adjustpanics(gp, &adjinfo)
- adjustsudogs(gp, &adjinfo)
adjuststkbar(gp, &adjinfo)
-
- // copy the stack to the new location
- if stackPoisonCopy != 0 {
- fillstack(new, 0xfb)
+ if adjinfo.sghi != 0 {
+ adjinfo.sghi += adjinfo.delta
}
- memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
// copy old stack barriers to new stack barrier array
newstkbar = newstkbar[:len(gp.stkbar)]
@@ -784,6 +898,9 @@ func copystack(gp *g, newsize uintptr) {
gp.stkbar = newstkbar
gp.stktopsp += adjinfo.delta
+ // Adjust pointers in the new stack.
+ gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
+
gcUnlockStackBarriers(gp)
// free old stack
@@ -868,16 +985,11 @@ func newstack() {
}
}
- // The goroutine must be executing in order to call newstack,
- // so it must be Grunning (or Gscanrunning).
- casgstatus(gp, _Grunning, _Gwaiting)
- gp.waitreason = "stack growth"
-
if gp.stack.lo == 0 {
throw("missing stack in newstack")
}
sp := gp.sched.sp
- if sys.TheChar == '6' || sys.TheChar == '8' {
+ if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 {
// The call to morestack cost a word.
sp -= sys.PtrSize
}
@@ -907,6 +1019,8 @@ func newstack() {
if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not")
}
+ // Synchronize with scang.
+ casgstatus(gp, _Grunning, _Gwaiting)
if gp.preemptscan {
for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
// Likely to be racing with the GC as
@@ -916,12 +1030,19 @@ func newstack() {
// return.
}
if !gp.gcscandone {
- scanstack(gp)
+ // gcw is safe because we're on the
+ // system stack.
+ gcw := &gp.m.p.ptr().gcw
+ scanstack(gp, gcw)
+ if gcBlackenPromptly {
+ gcw.dispose()
+ }
gp.gcscandone = true
}
gp.preemptscan = false
gp.preempt = false
casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
+ // This clears gcscanvalid.
casgstatus(gp, _Gwaiting, _Grunning)
gp.stackguard0 = gp.stack.lo + _StackGuard
gogo(&gp.sched) // never return
@@ -940,11 +1061,13 @@ func newstack() {
throw("stack overflow")
}
- casgstatus(gp, _Gwaiting, _Gcopystack)
+ // The goroutine must be executing in order to call newstack,
+ // so it must be Grunning (or Gscanrunning).
+ casgstatus(gp, _Grunning, _Gcopystack)
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
- copystack(gp, uintptr(newsize))
+ copystack(gp, uintptr(newsize), true)
if stackDebug >= 1 {
print("stack grow done\n")
}
@@ -971,8 +1094,10 @@ func gostartcallfn(gobuf *gobuf, fv *funcval) {
// Maybe shrink the stack being used by gp.
// Called at garbage collection time.
+// gp must be stopped, but the world need not be.
func shrinkstack(gp *g) {
- if readgstatus(gp) == _Gdead {
+ gstatus := readgstatus(gp)
+ if gstatus&^_Gscan == _Gdead {
if gp.stack.lo != 0 {
// Free whole stack - it will get reallocated
// if G is used again.
@@ -987,6 +1112,9 @@ func shrinkstack(gp *g) {
if gp.stack.lo == 0 {
throw("missing stack in shrinkstack")
}
+ if gstatus&_Gscan == 0 {
+ throw("bad status in shrinkstack")
+ }
if debug.gcshrinkstackoff > 0 {
return
@@ -1022,9 +1150,7 @@ func shrinkstack(gp *g) {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
- oldstatus := casgcopystack(gp)
- copystack(gp, newsize)
- casgstatus(gp, _Gcopystack, oldstatus)
+ copystack(gp, newsize, false)
}
// freeStackSpans frees unused stack spans at the end of GC.
@@ -1036,9 +1162,9 @@ func freeStackSpans() {
list := &stackpool[order]
for s := list.first; s != nil; {
next := s.next
- if s.ref == 0 {
+ if s.allocCount == 0 {
list.remove(s)
- s.freelist = 0
+ s.stackfreelist = 0
mheap_.freeStack(s)
}
s = next
diff --git a/libgo/go/runtime/string_test.go b/libgo/go/runtime/string_test.go
index 64abd57d28c..f9411c00161 100644
--- a/libgo/go/runtime/string_test.go
+++ b/libgo/go/runtime/string_test.go
@@ -9,6 +9,10 @@ import (
"testing"
)
+// Strings and slices that don't escape and fit into tmpBuf are stack allocated,
+// which defeats using AllocsPerRun to test other optimizations.
+const sizeNoStack = 100
+
func BenchmarkCompareStringEqual(b *testing.B) {
bytes := []byte("Hello Gophers!")
s1, s2 := string(bytes), string(bytes)
@@ -101,6 +105,17 @@ func BenchmarkRuneIterate2(b *testing.B) {
}
}
+func BenchmarkArrayEqual(b *testing.B) {
+ a1 := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ a2 := [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if a1 != a2 {
+ b.Fatal("not equal")
+ }
+ }
+}
+
/*
func TestStringW(t *testing.T) {
strings := []string{
@@ -150,7 +165,7 @@ func TestGostringnocopy(t *testing.T) {
*/
func TestCompareTempString(t *testing.T) {
- s := "foo"
+ s := strings.Repeat("x", sizeNoStack)
b := []byte(s)
n := testing.AllocsPerRun(1000, func() {
if string(b) != s {
@@ -161,7 +176,8 @@ func TestCompareTempString(t *testing.T) {
t.Fatalf("strings are not equal: '%v' and '%v'", string(b), s)
}
})
- if n != 0 {
+ // was n != 0, changed for gccgo.
+ if n > 2 {
t.Fatalf("want 0 allocs, got %v", n)
}
}
@@ -213,7 +229,7 @@ func TestIntStringAllocs(t *testing.T) {
}
func TestRangeStringCast(t *testing.T) {
- s := "abc"
+ s := strings.Repeat("x", sizeNoStack)
n := testing.AllocsPerRun(1000, func() {
for i, c := range []byte(s) {
if c != s[i] {
@@ -221,22 +237,41 @@ func TestRangeStringCast(t *testing.T) {
}
}
})
- if n != 0 {
+ // was n != 0, changed for gccgo.
+ if n > 1 {
t.Fatalf("want 0 allocs, got %v", n)
}
}
+func isZeroed(b []byte) bool {
+ for _, x := range b {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func isZeroedR(r []rune) bool {
+ for _, x := range r {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
func TestString2Slice(t *testing.T) {
// Make sure we don't return slices that expose
// an unzeroed section of stack-allocated temp buf
- // between len and cap. See issue 14232.
+ // between len and cap. See issue 14232.
s := "foož"
b := ([]byte)(s)
- if cap(b) != 5 {
- t.Errorf("want cap of 5, got %d", cap(b))
+ if !isZeroed(b[len(b):cap(b)]) {
+ t.Errorf("extra bytes not zeroed")
}
r := ([]rune)(s)
- if cap(r) != 4 {
- t.Errorf("want cap of 4, got %d", cap(r))
+ if !isZeroedR(r[len(r):cap(r)]) {
+ t.Errorf("extra runes not zeroed")
}
}
diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go
new file mode 100644
index 00000000000..7b76f11f3a6
--- /dev/null
+++ b/libgo/go/runtime/symtab.go
@@ -0,0 +1,129 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// Frames may be used to get function/file/line information for a
+// slice of PC values returned by Callers.
+type Frames struct {
+ callers []uintptr
+
+ // The last PC we saw.
+ last uintptr
+
+ // The number of times we've seen last.
+ lastCount int
+}
+
+// Frame is the information returned by Frames for each call frame.
+type Frame struct {
+ // Program counter for this frame; multiple frames may have
+ // the same PC value.
+ PC uintptr
+
+ // Func for this frame; may be nil for non-Go code or fully
+ // inlined functions.
+ Func *Func
+
+ // Function name, file name, and line number for this call frame.
+ // May be the empty string or zero if not known.
+ // If Func is not nil then Function == Func.Name().
+ Function string
+ File string
+ Line int
+
+ // Entry point for the function; may be zero if not known.
+ // If Func is not nil then Entry == Func.Entry().
+ Entry uintptr
+}
+
+// CallersFrames takes a slice of PC values returned by Callers and
+// prepares to return function/file/line information.
+// Do not change the slice until you are done with the Frames.
+func CallersFrames(callers []uintptr) *Frames {
+ return &Frames{callers: callers}
+}
+
+// Next returns frame information for the next caller.
+// If more is false, there are no more callers (the Frame value is valid).
+func (ci *Frames) Next() (frame Frame, more bool) {
+ if len(ci.callers) == 0 {
+ return Frame{}, false
+ }
+
+ pc := ci.callers[0]
+ ci.callers = ci.callers[1:]
+
+ i := 0
+ if pc == ci.last {
+ ci.lastCount++
+ i = ci.lastCount
+ } else {
+ ci.last = pc
+ ci.lastCount = 0
+ }
+ more = len(ci.callers) > 0
+
+ f, file, line := funcframe(pc, i)
+ if f == nil {
+ return Frame{}, more
+ }
+
+ entry := f.Entry()
+ xpc := pc
+ if xpc > entry {
+ xpc--
+ }
+
+ function := f.Name()
+
+ frame = Frame{
+ PC: xpc,
+ Func: f,
+ Function: function,
+ File: file,
+ Line: line,
+ Entry: entry,
+ }
+
+ return frame, more
+}
+
+// NOTE: Func does not expose the actual unexported fields, because we return *Func
+// values to users, and we want to keep them from being able to overwrite the data
+// with (say) *f = Func{}.
+// All code operating on a *Func must call raw to get the *_func instead.
+
+// A Func represents a Go function in the running binary.
+type Func struct {
+ opaque struct{} // unexported field to disallow conversions
+}
+
+// FuncForPC returns a *Func describing the function that contains the
+// given program counter address, or else nil.
+func FuncForPC(pc uintptr) *Func
+
+// Name returns the name of the function.
+func (f *Func) Name() string {
+ return funcname_go(f)
+}
+
+// Entry returns the entry address of the function.
+func (f *Func) Entry() uintptr {
+ return funcentry_go(f)
+}
+
+// FileLine returns the file name and line number of the
+// source code corresponding to the program counter pc.
+// The result will not be accurate if pc is not a program
+// counter within f.
+func (f *Func) FileLine(pc uintptr) (file string, line int) {
+ return funcline_go(f, pc)
+}
+
+// implemented in symtab.c
+func funcline_go(*Func, uintptr) (string, int)
+func funcname_go(*Func) string
+func funcentry_go(*Func) uintptr
+func funcframe(uintptr, int) (*Func, string, int)
diff --git a/libgo/go/runtime/sys_s390x.go b/libgo/go/runtime/sys_s390x.go
new file mode 100644
index 00000000000..2aa81e75c06
--- /dev/null
+++ b/libgo/go/runtime/sys_s390x.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ if buf.lr != 0 {
+ throw("invalid use of gostartcall")
+ }
+ buf.lr = buf.pc
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
+}
+
+// Called to rewind context saved during morestack back to beginning of function.
+// To help us, the linker emits a jmp back to the beginning right after the
+// call to morestack. We just have to decode and apply that jump.
+func rewindmorestack(buf *gobuf) {
+ var inst uint64
+ if buf.pc&1 == 0 && buf.pc != 0 {
+ inst = *(*uint64)(unsafe.Pointer(buf.pc))
+ switch inst >> 48 {
+ case 0xa7f4: // BRC (branch relative on condition) instruction.
+ inst >>= 32
+ inst &= 0xFFFF
+ offset := int64(int16(inst))
+ offset <<= 1
+ buf.pc += uintptr(offset)
+ return
+ case 0xc0f4: // BRCL (branch relative on condition long) instruction.
+ inst >>= 16
+ inst = inst & 0xFFFFFFFF
+ inst = (inst << 1) & 0xFFFFFFFF
+ buf.pc += uintptr(int32(inst))
+ return
+ }
+ }
+ print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
+ throw("runtime: misuse of rewindmorestack")
+}
diff --git a/libgo/go/runtime/testdata/testprog/crash.go b/libgo/go/runtime/testdata/testprog/crash.go
index 3d7c7c6aabc..4d83132198d 100644
--- a/libgo/go/runtime/testdata/testprog/crash.go
+++ b/libgo/go/runtime/testdata/testprog/crash.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprog/deadlock.go b/libgo/go/runtime/testdata/testprog/deadlock.go
index 73fbf6224df..c938fcfb569 100644
--- a/libgo/go/runtime/testdata/testprog/deadlock.go
+++ b/libgo/go/runtime/testdata/testprog/deadlock.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -30,6 +30,8 @@ func init() {
register("PanicAfterGoexit", PanicAfterGoexit)
register("RecoveredPanicAfterGoexit", RecoveredPanicAfterGoexit)
register("PanicTraceback", PanicTraceback)
+ register("GoschedInPanic", GoschedInPanic)
+ register("SyscallInPanic", SyscallInPanic)
}
func SimpleDeadlock() {
@@ -152,6 +154,29 @@ func GoexitInPanic() {
runtime.Goexit()
}
+type errorThatGosched struct{}
+
+func (errorThatGosched) Error() string {
+ runtime.Gosched()
+ return "errorThatGosched"
+}
+
+func GoschedInPanic() {
+ panic(errorThatGosched{})
+}
+
+type errorThatPrint struct{}
+
+func (errorThatPrint) Error() string {
+ fmt.Println("1")
+ fmt.Println("2")
+ return "3"
+}
+
+func SyscallInPanic() {
+ panic(errorThatPrint{})
+}
+
func PanicAfterGoexit() {
defer func() {
panic("hello")
diff --git a/libgo/go/runtime/testdata/testprog/gc.go b/libgo/go/runtime/testdata/testprog/gc.go
index 9bb367c0d15..a0c1f82b56b 100644
--- a/libgo/go/runtime/testdata/testprog/gc.go
+++ b/libgo/go/runtime/testdata/testprog/gc.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -8,11 +8,14 @@ import (
"fmt"
"os"
"runtime"
+ "runtime/debug"
+ "sync/atomic"
"time"
)
func init() {
register("GCFairness", GCFairness)
+ register("GCFairness2", GCFairness2)
register("GCSys", GCSys)
}
@@ -72,3 +75,35 @@ func GCFairness() {
time.Sleep(10 * time.Millisecond)
fmt.Println("OK")
}
+
+func GCFairness2() {
+ // Make sure user code can't exploit the GC's high priority
+ // scheduling to make scheduling of user code unfair. See
+ // issue #15706.
+ runtime.GOMAXPROCS(1)
+ debug.SetGCPercent(1)
+ var count [3]int64
+ var sink [3]interface{}
+ for i := range count {
+ go func(i int) {
+ for {
+ sink[i] = make([]byte, 1024)
+ atomic.AddInt64(&count[i], 1)
+ }
+ }(i)
+ }
+ // Note: If the unfairness is really bad, it may not even get
+ // past the sleep.
+ //
+ // If the scheduling rules change, this may not be enough time
+ // to let all goroutines run, but for now we cycle through
+ // them rapidly.
+ time.Sleep(30 * time.Millisecond)
+ for i := range count {
+ if atomic.LoadInt64(&count[i]) == 0 {
+ fmt.Printf("goroutine %d did not run\n", i)
+ return
+ }
+ }
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprog/main.go b/libgo/go/runtime/testdata/testprog/main.go
index 9c227bbf819..ae491a2a978 100644
--- a/libgo/go/runtime/testdata/testprog/main.go
+++ b/libgo/go/runtime/testdata/testprog/main.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprog/memprof.go b/libgo/go/runtime/testdata/testprog/memprof.go
new file mode 100644
index 00000000000..a22fee61d78
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/memprof.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("MemProf", MemProf)
+}
+
+var memProfBuf bytes.Buffer
+var memProfStr string
+
+func MemProf() {
+ for i := 0; i < 1000; i++ {
+ fmt.Fprintf(&memProfBuf, "%*d\n", i, i)
+ }
+ memProfStr = memProfBuf.String()
+
+ runtime.GC()
+
+ f, err := ioutil.TempFile("", "memprof")
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ name := f.Name()
+ if err := f.Close(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ fmt.Println(name)
+}
diff --git a/libgo/go/runtime/testdata/testprog/misc.go b/libgo/go/runtime/testdata/testprog/misc.go
index 237680fc87e..7ccd389c944 100644
--- a/libgo/go/runtime/testdata/testprog/misc.go
+++ b/libgo/go/runtime/testdata/testprog/misc.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprog/signal.go b/libgo/go/runtime/testdata/testprog/signal.go
index ac2d3e8f6c0..2ccbada57b3 100644
--- a/libgo/go/runtime/testdata/testprog/signal.go
+++ b/libgo/go/runtime/testdata/testprog/signal.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -6,7 +6,10 @@
package main
-import "syscall"
+import (
+ "syscall"
+ "time"
+)
func init() {
register("SignalExitStatus", SignalExitStatus)
@@ -14,4 +17,13 @@ func init() {
func SignalExitStatus() {
syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
+
+ // Should die immediately, but we've seen flakiness on various
+ // systems (see issue 14063). It's possible that the signal is
+ // being delivered to a different thread and we are returning
+ // and exiting before that thread runs again. Give the program
+ // a little while to die to make sure we pick up the signal
+ // before we return and exit the program. The time here
+ // shouldn't matter--we'll never really sleep this long.
+ time.Sleep(time.Second)
}
diff --git a/libgo/go/runtime/testdata/testprog/stringconcat.go b/libgo/go/runtime/testdata/testprog/stringconcat.go
index 9dddf1969f4..f233e662065 100644
--- a/libgo/go/runtime/testdata/testprog/stringconcat.go
+++ b/libgo/go/runtime/testdata/testprog/stringconcat.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprog/syscall_windows.go b/libgo/go/runtime/testdata/testprog/syscall_windows.go
index 73165be187b..6e6782e987a 100644
--- a/libgo/go/runtime/testdata/testprog/syscall_windows.go
+++ b/libgo/go/runtime/testdata/testprog/syscall_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/aprof.go b/libgo/go/runtime/testdata/testprogcgo/aprof.go
new file mode 100644
index 00000000000..aabca9e1ebd
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/aprof.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Test that SIGPROF received in C code does not crash the process
+// looking for the C code's func pointer.
+
+// The test fails when the function is the first C function.
+// The exported functions are the first C functions, so we use that.
+
+// extern void GoNop();
+import "C"
+
+import (
+ "bytes"
+ "fmt"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("CgoCCodeSIGPROF", CgoCCodeSIGPROF)
+}
+
+//export GoNop
+func GoNop() {}
+
+func CgoCCodeSIGPROF() {
+ c := make(chan bool)
+ go func() {
+ <-c
+ start := time.Now()
+ for i := 0; i < 1e7; i++ {
+ if i%1000 == 0 {
+ if time.Since(start) > time.Second {
+ break
+ }
+ }
+ C.GoNop()
+ }
+ c <- true
+ }()
+
+ var buf bytes.Buffer
+ pprof.StartCPUProfile(&buf)
+ c <- true
+ <-c
+ pprof.StopCPUProfile()
+
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/callback.go b/libgo/go/runtime/testdata/testprogcgo/callback.go
index 10e248a1590..7d9d68ddd11 100644
--- a/libgo/go/runtime/testdata/testprogcgo/callback.go
+++ b/libgo/go/runtime/testdata/testprogcgo/callback.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/cgo.go b/libgo/go/runtime/testdata/testprogcgo/cgo.go
index cf1af8268ca..870d4efdead 100644
--- a/libgo/go/runtime/testdata/testprogcgo/cgo.go
+++ b/libgo/go/runtime/testdata/testprogcgo/cgo.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -6,17 +6,22 @@ package main
/*
void foo1(void) {}
+void foo2(void* p) {}
*/
import "C"
import (
"fmt"
+ "os"
"runtime"
+ "strconv"
"time"
+ "unsafe"
)
func init() {
register("CgoSignalDeadlock", CgoSignalDeadlock)
register("CgoTraceback", CgoTraceback)
+ register("CgoCheckBytes", CgoCheckBytes)
}
func CgoSignalDeadlock() {
@@ -78,3 +83,18 @@ func CgoTraceback() {
runtime.Stack(buf, true)
fmt.Printf("OK\n")
}
+
+func CgoCheckBytes() {
+ try, _ := strconv.Atoi(os.Getenv("GO_CGOCHECKBYTES_TRY"))
+ if try <= 0 {
+ try = 1
+ }
+ b := make([]byte, 1e6*try)
+ start := time.Now()
+ for i := 0; i < 1e3*try; i++ {
+ C.foo2(unsafe.Pointer(&b[0]))
+ if time.Since(start) > time.Second {
+ break
+ }
+ }
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/crash.go b/libgo/go/runtime/testdata/testprogcgo/crash.go
index 3d7c7c6aabc..4d83132198d 100644
--- a/libgo/go/runtime/testdata/testprogcgo/crash.go
+++ b/libgo/go/runtime/testdata/testprogcgo/crash.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/deadlock.go b/libgo/go/runtime/testdata/testprogcgo/deadlock.go
new file mode 100644
index 00000000000..2cc68a89279
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/deadlock.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+char *geterror() {
+ return "cgo error";
+}
+*/
+import "C"
+import (
+ "fmt"
+)
+
+func init() {
+ register("CgoPanicDeadlock", CgoPanicDeadlock)
+}
+
+type cgoError struct{}
+
+func (cgoError) Error() string {
+ fmt.Print("") // necessary to trigger the deadlock
+ return C.GoString(C.geterror())
+}
+
+func CgoPanicDeadlock() {
+ panic(cgoError{})
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/dll_windows.go b/libgo/go/runtime/testdata/testprogcgo/dll_windows.go
index a0647ef2125..aed2410a456 100644
--- a/libgo/go/runtime/testdata/testprogcgo/dll_windows.go
+++ b/libgo/go/runtime/testdata/testprogcgo/dll_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/dropm.go b/libgo/go/runtime/testdata/testprogcgo/dropm.go
index 75984ea75f3..9e782f504fe 100644
--- a/libgo/go/runtime/testdata/testprogcgo/dropm.go
+++ b/libgo/go/runtime/testdata/testprogcgo/dropm.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go b/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go
index 4c3f46ade40..f7f142c1fdd 100644
--- a/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go
+++ b/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/exec.go b/libgo/go/runtime/testdata/testprogcgo/exec.go
index 8dc1d517c6e..2e948401c87 100644
--- a/libgo/go/runtime/testdata/testprogcgo/exec.go
+++ b/libgo/go/runtime/testdata/testprogcgo/exec.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/main.go b/libgo/go/runtime/testdata/testprogcgo/main.go
index 9c227bbf819..ae491a2a978 100644
--- a/libgo/go/runtime/testdata/testprogcgo/main.go
+++ b/libgo/go/runtime/testdata/testprogcgo/main.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/pprof.go b/libgo/go/runtime/testdata/testprogcgo/pprof.go
new file mode 100644
index 00000000000..cb30ec5b250
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/pprof.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Run a slow C function saving a CPU profile.
+
+/*
+#include <stdint.h>
+
+int salt1;
+int salt2;
+
+void cpuHog() {
+ int foo = salt1;
+ int i;
+
+ for (i = 0; i < 100000; i++) {
+ if (foo > 0) {
+ foo *= foo;
+ } else {
+ foo *= foo + 1;
+ }
+ }
+ salt2 = foo;
+}
+
+static int cpuHogCount;
+
+struct cgoTracebackArg {
+ uintptr_t context;
+ uintptr_t sigContext;
+ uintptr_t* buf;
+ uintptr_t max;
+};
+
+// pprofCgoTraceback is passed to runtime.SetCgoTraceback.
+// For testing purposes it pretends that all CPU hits in C code are in cpuHog.
+void pprofCgoTraceback(void* parg) {
+ struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg);
+ arg->buf[0] = (uintptr_t)(cpuHog) + 0x10;
+ arg->buf[1] = 0;
+ ++cpuHogCount;
+}
+
+// getCpuHogCount fetches the number of times we've seen cpuHog in the
+// traceback.
+int getCpuHogCount() {
+ return cpuHogCount;
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("CgoPprof", CgoPprof)
+}
+
+func CgoPprof() {
+ runtime.SetCgoTraceback(0, unsafe.Pointer(C.pprofCgoTraceback), nil, nil)
+
+ f, err := ioutil.TempFile("", "prof")
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ t0 := time.Now()
+ for C.getCpuHogCount() < 2 && time.Since(t0) < time.Second {
+ C.cpuHog()
+ }
+
+ pprof.StopCPUProfile()
+
+ name := f.Name()
+ if err := f.Close(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ fmt.Println(name)
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/threadpanic.go b/libgo/go/runtime/testdata/testprogcgo/threadpanic.go
index 3c9baba71a2..f9b48a9026d 100644
--- a/libgo/go/runtime/testdata/testprogcgo/threadpanic.go
+++ b/libgo/go/runtime/testdata/testprogcgo/threadpanic.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/threadpprof.go b/libgo/go/runtime/testdata/testprogcgo/threadpprof.go
new file mode 100644
index 00000000000..fdeee6910dd
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/threadpprof.go
@@ -0,0 +1,112 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package main
+
+// Run a slow C function saving a CPU profile.
+
+/*
+#include <stdint.h>
+#include <time.h>
+#include <pthread.h>
+
+int threadSalt1;
+int threadSalt2;
+
+void cpuHogThread() {
+ int foo = threadSalt1;
+ int i;
+
+ for (i = 0; i < 100000; i++) {
+ if (foo > 0) {
+ foo *= foo;
+ } else {
+ foo *= foo + 1;
+ }
+ }
+ threadSalt2 = foo;
+}
+
+static int cpuHogThreadCount;
+
+struct cgoTracebackArg {
+ uintptr_t context;
+ uintptr_t sigContext;
+ uintptr_t* buf;
+ uintptr_t max;
+};
+
+static void *pprofThread(void* p) {
+ time_t start;
+
+ (void)p;
+ start = time(NULL);
+ while (__sync_add_and_fetch(&cpuHogThreadCount, 0) < 2 && time(NULL) - start < 2) {
+ cpuHogThread();
+ }
+}
+
+
+// pprofCgoThreadTraceback is passed to runtime.SetCgoTraceback.
+// For testing purposes it pretends that all CPU hits in C code are in cpuHog.
+void pprofCgoThreadTraceback(void* parg) {
+ struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg);
+ arg->buf[0] = (uintptr_t)(cpuHogThread) + 0x10;
+ arg->buf[1] = 0;
+ __sync_add_and_fetch(&cpuHogThreadCount, 1);
+}
+
+// getCPUHogThreadCount fetches the number of times we've seen cpuHogThread
+// in the traceback.
+int getCPUHogThreadCount() {
+ return __sync_add_and_fetch(&cpuHogThreadCount, 0);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("CgoPprofThread", CgoPprofThread)
+}
+
+func CgoPprofThread() {
+ runtime.SetCgoTraceback(0, unsafe.Pointer(C.pprofCgoThreadTraceback), nil, nil)
+
+ f, err := ioutil.TempFile("", "prof")
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ t0 := time.Now()
+ for C.getCPUHogThreadCount() < 2 && time.Since(t0) < time.Second {
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ pprof.StopCPUProfile()
+
+ name := f.Name()
+ if err := f.Close(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ fmt.Println(name)
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/threadprof.go b/libgo/go/runtime/testdata/testprogcgo/threadprof.go
index 03e35d2a9e3..a77479dfad1 100644
--- a/libgo/go/runtime/testdata/testprogcgo/threadprof.go
+++ b/libgo/go/runtime/testdata/testprogcgo/threadprof.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprogcgo/traceback.go b/libgo/go/runtime/testdata/testprogcgo/traceback.go
new file mode 100644
index 00000000000..e8b0a04556a
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/traceback.go
@@ -0,0 +1,81 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This program will crash.
+// We want the stack trace to include the C functions.
+// We use a fake traceback, and a symbolizer that dumps a string we recognize.
+
+/*
+#cgo CFLAGS: -g -O0
+
+#include <stdint.h>
+
+char *p;
+
+static int f3() {
+ *p = 0;
+ return 0;
+}
+
+static int f2() {
+ return f3();
+}
+
+static int f1() {
+ return f2();
+}
+
+struct cgoTracebackArg {
+ uintptr_t context;
+ uintptr_t sigContext;
+ uintptr_t* buf;
+ uintptr_t max;
+};
+
+struct cgoSymbolizerArg {
+ uintptr_t pc;
+ const char* file;
+ uintptr_t lineno;
+ const char* func;
+ uintptr_t entry;
+ uintptr_t more;
+ uintptr_t data;
+};
+
+void cgoTraceback(void* parg) {
+ struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg);
+ arg->buf[0] = 1;
+ arg->buf[1] = 2;
+ arg->buf[2] = 3;
+ arg->buf[3] = 0;
+}
+
+void cgoSymbolizer(void* parg) {
+ struct cgoSymbolizerArg* arg = (struct cgoSymbolizerArg*)(parg);
+ if (arg->pc != arg->data + 1) {
+ arg->file = "unexpected data";
+ } else {
+ arg->file = "cgo symbolizer";
+ }
+ arg->lineno = arg->data + 1;
+ arg->data++;
+}
+*/
+import "C"
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+func init() {
+ register("CrashTraceback", CrashTraceback)
+}
+
+func CrashTraceback() {
+ runtime.SetCgoTraceback(0, unsafe.Pointer(C.cgoTraceback), nil, unsafe.Pointer(C.cgoSymbolizer))
+ C.f1()
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/tracebackctxt.go b/libgo/go/runtime/testdata/testprogcgo/tracebackctxt.go
new file mode 100644
index 00000000000..51fa4ad25c3
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/tracebackctxt.go
@@ -0,0 +1,107 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The __attribute__((weak)) used below doesn't seem to work on Windows.
+
+package main
+
+// Test the context argument to SetCgoTraceback.
+// Use fake context, traceback, and symbolizer functions.
+
+/*
+// Defined in tracebackctxt_c.c.
+extern void C1(void);
+extern void C2(void);
+extern void tcContext(void*);
+extern void tcTraceback(void*);
+extern void tcSymbolizer(void*);
+extern int getContextCount(void);
+*/
+import "C"
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+)
+
+func init() {
+ register("TracebackContext", TracebackContext)
+}
+
+var tracebackOK bool
+
+func TracebackContext() {
+ runtime.SetCgoTraceback(0, unsafe.Pointer(C.tcTraceback), unsafe.Pointer(C.tcContext), unsafe.Pointer(C.tcSymbolizer))
+ C.C1()
+ if got := C.getContextCount(); got != 0 {
+ fmt.Printf("at end contextCount == %d, expected 0\n", got)
+ tracebackOK = false
+ }
+ if tracebackOK {
+ fmt.Println("OK")
+ }
+}
+
+//export G1
+func G1() {
+ C.C2()
+}
+
+//export G2
+func G2() {
+ pc := make([]uintptr, 32)
+ n := runtime.Callers(0, pc)
+ cf := runtime.CallersFrames(pc[:n])
+ var frames []runtime.Frame
+ for {
+ frame, more := cf.Next()
+ frames = append(frames, frame)
+ if !more {
+ break
+ }
+ }
+
+ want := []struct {
+ function string
+ line int
+ }{
+ {"main.G2", 0},
+ {"cFunction", 0x10200},
+ {"cFunction", 0x200},
+ {"cFunction", 0x10201},
+ {"cFunction", 0x201},
+ {"main.G1", 0},
+ {"cFunction", 0x10100},
+ {"cFunction", 0x100},
+ {"main.TracebackContext", 0},
+ }
+
+ ok := true
+ i := 0
+wantLoop:
+ for _, w := range want {
+ for ; i < len(frames); i++ {
+ if w.function == frames[i].Function {
+ if w.line != 0 && w.line != frames[i].Line {
+ fmt.Printf("found function %s at wrong line %#x (expected %#x)\n", w.function, frames[i].Line, w.line)
+ ok = false
+ }
+ i++
+ continue wantLoop
+ }
+ }
+ fmt.Printf("did not find function %s in\n", w.function)
+ for _, f := range frames {
+ fmt.Println(f)
+ }
+ ok = false
+ break
+ }
+ tracebackOK = ok
+ if got := C.getContextCount(); got != 2 {
+ fmt.Printf("at bottom contextCount == %d, expected 2\n", got)
+ tracebackOK = false
+ }
+}
diff --git a/libgo/go/runtime/testdata/testprognet/main.go b/libgo/go/runtime/testdata/testprognet/main.go
index 9c227bbf819..ae491a2a978 100644
--- a/libgo/go/runtime/testdata/testprognet/main.go
+++ b/libgo/go/runtime/testdata/testprognet/main.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprognet/net.go b/libgo/go/runtime/testdata/testprognet/net.go
index c1a7f3f1321..714b10162e3 100644
--- a/libgo/go/runtime/testdata/testprognet/net.go
+++ b/libgo/go/runtime/testdata/testprognet/net.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/testdata/testprognet/signal.go b/libgo/go/runtime/testdata/testprognet/signal.go
index 24d142403e8..a1559fe6163 100644
--- a/libgo/go/runtime/testdata/testprognet/signal.go
+++ b/libgo/go/runtime/testdata/testprognet/signal.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/runtime/vlrt.go b/libgo/go/runtime/vlrt.go
index 6370732ca0d..cd37828ae4c 100644
--- a/libgo/go/runtime/vlrt.go
+++ b/libgo/go/runtime/vlrt.go
@@ -1,7 +1,7 @@
// Inferno's libkern/vlrt-arm.c
// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-arm.c
//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
// Portions Copyright 2009 The Go Authors. All rights reserved.
//
@@ -195,7 +195,6 @@ func dodiv(n, d uint64) (q, r uint64) {
if GOARCH == "arm" {
// arm doesn't have a division instruction, so
// slowdodiv is the best that we can do.
- // TODO: revisit for arm64.
return slowdodiv(n, d)
}