summaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorRob Findley <rfindley@google.com>2020-09-11 14:23:34 -0400
committerRob Findley <rfindley@google.com>2020-09-11 14:23:34 -0400
commitf8b1c17aced24a1618c6984794be9770c5d260be (patch)
tree45af8d39b5c3d9f43d439ebec0a2ba42b49efe70 /src/runtime
parente5d91ab096a9ff9673311f1a7f3f860a7f9c2062 (diff)
parent07c1788357cfe6a4ee5f6f6a54d4fe9f579fa844 (diff)
downloadgo-git-dev.types.tar.gz
[dev.types] all: merge master into dev.typesdev.types
Change-Id: Ia6964cb4e09153c15cc9c5b441373d1b3cb8f757
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/cgocall.go9
-rw-r--r--src/runtime/debug.go5
-rw-r--r--src/runtime/debugcall.go2
-rw-r--r--src/runtime/defs_linux_arm.go4
-rw-r--r--src/runtime/defs_linux_mips64x.go4
-rw-r--r--src/runtime/defs_openbsd_arm64.go4
-rw-r--r--src/runtime/defs_plan9_386.go4
-rw-r--r--src/runtime/defs_plan9_amd64.go4
-rw-r--r--src/runtime/export_test.go24
-rw-r--r--src/runtime/lockrank.go2
-rw-r--r--src/runtime/lockrank_off.go10
-rw-r--r--src/runtime/map.go9
-rw-r--r--src/runtime/map_fast32.go5
-rw-r--r--src/runtime/map_fast64.go5
-rw-r--r--src/runtime/map_faststr.go5
-rw-r--r--src/runtime/mgcmark.go3
-rw-r--r--src/runtime/mgcstack.go2
-rw-r--r--src/runtime/mkduff.go1
-rw-r--r--src/runtime/mpagealloc.go13
-rw-r--r--src/runtime/os_linux.go15
-rw-r--r--src/runtime/preempt.go2
-rw-r--r--src/runtime/proc.go10
-rw-r--r--src/runtime/rt0_linux_ppc64.s4
-rw-r--r--src/runtime/rt0_linux_ppc64le.s4
-rw-r--r--src/runtime/runtime2.go13
-rw-r--r--src/runtime/select.go1
-rw-r--r--src/runtime/stack.go8
-rw-r--r--src/runtime/symtab.go3
-rw-r--r--src/runtime/trace/annotation.go4
-rw-r--r--src/runtime/trace/annotation_test.go4
-rw-r--r--src/runtime/traceback.go6
31 files changed, 127 insertions, 62 deletions
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 099aa540e0..427ed0ffb9 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -286,13 +286,8 @@ func cgocallbackg1(ctxt uintptr) {
// Additional two words (16-byte alignment) are for saving FP.
cb = (*args)(unsafe.Pointer(sp + 7*sys.PtrSize))
case "amd64":
- // On amd64, stack frame is two words, plus caller PC.
- if framepointer_enabled {
- // In this case, there's also saved BP.
- cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
- break
- }
- cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize))
+ // On amd64, stack frame is two words, plus caller PC and BP.
+ cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
case "386":
// On 386, stack frame is three words, plus caller PC.
cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index 76eeb2e41a..f411b22676 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -10,9 +10,8 @@ import (
)
// GOMAXPROCS sets the maximum number of CPUs that can be executing
-// simultaneously and returns the previous setting. If n < 1, it does not
-// change the current setting.
-// The number of logical CPUs on the local machine can be queried with NumCPU.
+// simultaneously and returns the previous setting. It defaults to
+// the value of runtime.NumCPU. If n < 1, it does not change the current setting.
// This call will go away when the scheduler improves.
func GOMAXPROCS(n int) int {
if GOARCH == "wasm" && n > 1 {
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
index 6c285ec829..b5480c73ae 100644
--- a/src/runtime/debugcall.go
+++ b/src/runtime/debugcall.go
@@ -87,7 +87,7 @@ func debugCallCheck(pc uintptr) string {
pcdata = 0 // in prologue
}
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_RegPointerMaps))
- if pcdata == -2 || stkmap == nil {
+ if pcdata == _PCDATA_RegMapUnsafe || stkmap == nil {
// Not at a safe point.
ret = debugCallUnsafePoint
return
diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go
index ea29fd9d98..5bc0916f8b 100644
--- a/src/runtime/defs_linux_arm.go
+++ b/src/runtime/defs_linux_arm.go
@@ -1,3 +1,7 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package runtime
// Constants
diff --git a/src/runtime/defs_linux_mips64x.go b/src/runtime/defs_linux_mips64x.go
index 0fb53d5737..1fb423b198 100644
--- a/src/runtime/defs_linux_mips64x.go
+++ b/src/runtime/defs_linux_mips64x.go
@@ -1,3 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
// +build mips64 mips64le
// +build linux
diff --git a/src/runtime/defs_openbsd_arm64.go b/src/runtime/defs_openbsd_arm64.go
index 8b8d5cddf2..628f4bc5a5 100644
--- a/src/runtime/defs_openbsd_arm64.go
+++ b/src/runtime/defs_openbsd_arm64.go
@@ -1,3 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package runtime
import "unsafe"
diff --git a/src/runtime/defs_plan9_386.go b/src/runtime/defs_plan9_386.go
index 220169d280..49129b3c3f 100644
--- a/src/runtime/defs_plan9_386.go
+++ b/src/runtime/defs_plan9_386.go
@@ -1,3 +1,7 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package runtime
const _PAGESIZE = 0x1000
diff --git a/src/runtime/defs_plan9_amd64.go b/src/runtime/defs_plan9_amd64.go
index 29a2643c3a..0099563034 100644
--- a/src/runtime/defs_plan9_amd64.go
+++ b/src/runtime/defs_plan9_amd64.go
@@ -1,3 +1,7 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package runtime
const _PAGESIZE = 0x1000
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index d591fdc4e9..929bb35db6 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -358,7 +358,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
+ pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
@@ -756,11 +760,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
- l2 := (*pageAlloc)(p).chunks[ci.l1()]
- if l2 == nil {
- return nil
- }
- return (*PallocData)(&l2[ci.l2()])
+ return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}
// AddrRange represents a range over addresses.
@@ -900,7 +900,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
- chunk := mheap_.pages.chunkOf(i)
+ chunk := mheap_.pages.tryChunkOf(i)
+ if chunk == nil {
+ continue
+ }
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
@@ -981,9 +984,8 @@ func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
}
func MSpanCountAlloc(bits []byte) int {
- s := mspan{
- nelems: uintptr(len(bits) * 8),
- gcmarkBits: (*gcBits)(unsafe.Pointer(&bits[0])),
- }
+ s := (*mspan)(mheap_.spanalloc.alloc())
+ s.nelems = uintptr(len(bits) * 8)
+ s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
return s.countAlloc()
}
diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go
index b23cf767be..042f10b1d3 100644
--- a/src/runtime/lockrank.go
+++ b/src/runtime/lockrank.go
@@ -230,7 +230,7 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan},
lockRankDefer: {},
lockRankSudog: {lockRankNotifyList, lockRankHchan},
- lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog},
+ lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProf, lockRankRoot, lockRankGscan, lockRankDefer, lockRankSudog},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankPollDesc, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan, lockRankMspanSpecial, lockRankProf, lockRankGcBitsArenas, lockRankRoot, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans, lockRankSpanSetSpine},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankNotifyList, lockRankTraceBuf, lockRankTraceStrings, lockRankHchan},
lockRankGlobalAlloc: {lockRankProf, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
diff --git a/src/runtime/lockrank_off.go b/src/runtime/lockrank_off.go
index 425ca8dd93..32378a9627 100644
--- a/src/runtime/lockrank_off.go
+++ b/src/runtime/lockrank_off.go
@@ -18,19 +18,29 @@ func getLockRank(l *mutex) lockRank {
return 0
}
+// The following functions may be called in nosplit context.
+// Nosplit is not strictly required for lockWithRank, unlockWithRank
+// and lockWithRankMayAcquire, but these nosplit annotations must
+// be kept consistent with the equivalent functions in lockrank_on.go.
+
+//go:nosplit
func lockWithRank(l *mutex, rank lockRank) {
lock2(l)
}
+//go:nosplit
func acquireLockRank(rank lockRank) {
}
+//go:nosplit
func unlockWithRank(l *mutex) {
unlock2(l)
}
+//go:nosplit
func releaseLockRank(rank lockRank) {
}
+//go:nosplit
func lockWithRankMayAcquire(l *mutex, rank lockRank) {
}
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 399c1b071f..8be1d3991d 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -780,6 +780,11 @@ search:
}
notLast:
h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
break search
}
}
@@ -993,6 +998,10 @@ func mapclear(t *maptype, h *hmap) {
h.noverflow = 0
h.count = 0
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+
// Keep the mapextra allocation but clear any extra information.
if h.extra != nil {
*h.extra = mapextra{}
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index d035ed0386..d80f5eac78 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -344,6 +344,11 @@ search:
}
notLast:
h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
break search
}
}
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index f1f3927598..3bc84bbdd3 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -346,6 +346,11 @@ search:
}
notLast:
h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
break search
}
}
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 069cda6554..108c502394 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -369,6 +369,11 @@ search:
}
notLast:
h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
break search
}
}
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 2b84945471..79df59d6d6 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -837,7 +837,8 @@ func scanstack(gp *g, gcw *gcWork) {
x := state.head
state.head = x.next
if stackTraceDebug {
- for _, obj := range x.obj[:x.nobj] {
+ for i := 0; i < x.nobj; i++ {
+ obj := &x.obj[i]
if obj.typ == nil { // reachable
continue
}
diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go
index 211d882fa6..8eb941a328 100644
--- a/src/runtime/mgcstack.go
+++ b/src/runtime/mgcstack.go
@@ -167,8 +167,6 @@ func (obj *stackObject) setType(typ *_type) {
// A stackScanState keeps track of the state used during the GC walk
// of a goroutine.
-//
-//go:notinheap
type stackScanState struct {
cache pcvalueCache
diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go
index 6c7a4cf8dc..8859ed68cc 100644
--- a/src/runtime/mkduff.go
+++ b/src/runtime/mkduff.go
@@ -83,7 +83,6 @@ func copyAMD64(w io.Writer) {
//
// This is equivalent to a sequence of MOVSQ but
// for some reason that is 3.5x slower than this code.
- // The STOSQ in duffzero seem fine, though.
fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0")
for i := 0; i < 64; i++ {
fmt.Fprintln(w, "\tMOVUPS\t(SI), X0")
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 8b3c62c375..c90a6378bd 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}
+// tryChunkOf returns the bitmap data for the given chunk.
+//
+// Returns nil if the chunk data has not been mapped.
+func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
+ l2 := s.chunks[ci.l1()]
+ if l2 == nil {
+ return nil
+ }
+ return &l2[ci.l2()]
+}
+
// chunkOf returns the chunk at the given chunk index.
+//
+// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 9702920bcf..371db73502 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -5,7 +5,6 @@
package runtime
import (
- "runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
@@ -476,21 +475,7 @@ func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
func getpid() int
func tgkill(tgid, tid, sig int)
-// touchStackBeforeSignal stores an errno value. If non-zero, it means
-// that we should touch the signal stack before sending a signal.
-// This is used on systems that have a bug when the signal stack must
-// be faulted in. See #35777 and #37436.
-//
-// This is accessed atomically as it is set and read in different threads.
-//
-// TODO(austin): Remove this after Go 1.15 when we remove the
-// mlockGsignal workaround.
-var touchStackBeforeSignal uint32
-
// signalM sends a signal to mp.
func signalM(mp *m, sig int) {
- if atomic.Load(&touchStackBeforeSignal) != 0 {
- atomic.Cas((*uint32)(unsafe.Pointer(mp.gsignal.stack.hi-4)), 0, 0)
- }
tgkill(getpid(), int(mp.procid), sig)
}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 761856576a..17ef2c90d3 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -406,7 +406,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
var startpc uintptr
if !go115ReduceLiveness {
smi := pcdatavalue(f, _PCDATA_RegMapIndex, pc, nil)
- if smi == -2 {
+ if smi == _PCDATA_RegMapUnsafe {
// Unsafe-point marked by compiler. This includes
// atomic sequences (e.g., write barrier) and nosplit
// functions (except at calls).
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 5e38b3194c..739745aa26 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -3928,6 +3928,13 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
return
}
+ // If mp.profilehz is 0, then profiling is not enabled for this thread.
+ // We must check this to avoid a deadlock between setcpuprofilerate
+ // and the call to cpuprof.add, below.
+ if mp != nil && mp.profilehz == 0 {
+ return
+ }
+
// On mips{,le}, 64bit atomics are emulated with spinlocks, in
// runtime/internal/atomic. If SIGPROF arrives while the program is inside
// the critical section, it creates a deadlock (when writing the sample).
@@ -5459,9 +5466,6 @@ func setMaxThreads(in int) (out int) {
}
func haveexperiment(name string) bool {
- if name == "framepointer" {
- return framepointer_enabled // set by linker
- }
x := sys.Goexperiment
for x != "" {
xname := ""
diff --git a/src/runtime/rt0_linux_ppc64.s b/src/runtime/rt0_linux_ppc64.s
index 1265b15853..897d61052a 100644
--- a/src/runtime/rt0_linux_ppc64.s
+++ b/src/runtime/rt0_linux_ppc64.s
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
#include "textflag.h"
// actually a function descriptor for _main<>(SB)
diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s
index 54ea9d58f7..4f7c6e6c99 100644
--- a/src/runtime/rt0_linux_ppc64le.s
+++ b/src/runtime/rt0_linux_ppc64le.s
@@ -1,3 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
#include "go_asm.h"
#include "textflag.h"
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index b7d0739e54..a3157037e7 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -329,7 +329,7 @@ type gobuf struct {
ctxt unsafe.Pointer
ret sys.Uintreg
lr uintptr
- bp uintptr // for GOEXPERIMENT=framepointer
+ bp uintptr // for framepointer-enabled architectures
}
// sudog represents a g in a wait list, such as for sending/receiving
@@ -909,15 +909,12 @@ type _defer struct {
// A _panic holds information about an active panic.
//
-// This is marked go:notinheap because _panic values must only ever
-// live on the stack.
+// A _panic value must only ever live on the stack.
//
// The argp and link fields are stack pointers, but don't need special
// handling during stack growth: because they are pointer-typed and
// _panic values only live on the stack, regular stack pointer
// adjustment takes care of them.
-//
-//go:notinheap
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
@@ -1049,8 +1046,7 @@ var (
isIntel bool
lfenceBeforeRdtsc bool
- goarm uint8 // set by cmd/link on arm systems
- framepointer_enabled bool // set by cmd/link
+ goarm uint8 // set by cmd/link on arm systems
)
// Set by the linker so the runtime can determine the buildmode.
@@ -1058,3 +1054,6 @@ var (
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
+
+// Must agree with cmd/internal/objabi.Framepointer_enabled.
+const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" && (GOOS == "linux" || GOOS == "darwin")
diff --git a/src/runtime/select.go b/src/runtime/select.go
index 80768b285b..a506747910 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -118,6 +118,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
scases := cas1[:ncases:ncases]
pollorder := order1[:ncases:ncases]
lockorder := order1[ncases:][:ncases:ncases]
+ // NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.
// Even when raceenabled is true, there might be select
// statements in packages compiled without -race (e.g.,
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 0e930f60db..821c2e8436 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -66,7 +66,7 @@ const (
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
- _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
+ _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm64*1024
// The minimum size of stack used by Go code
_StackMin = 2048
@@ -648,12 +648,8 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
}
// Adjust saved base pointer if there is one.
+ // TODO what about arm64 frame pointer adjustment?
if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
- if !framepointer_enabled {
- print("runtime: found space for saved base pointer, but no framepointer experiment\n")
- print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
- throw("bad frame layout")
- }
if stackDebug >= 3 {
print(" saved bp\n")
}
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index ddb5ea82b4..fa8d17035e 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -284,6 +284,9 @@ const (
)
const (
+ // Only if !go115ReduceLiveness.
+ _PCDATA_RegMapUnsafe = _PCDATA_UnsafePointUnsafe // Unsafe for async preemption
+
// PCDATA_UnsafePoint values.
_PCDATA_UnsafePointSafe = -1 // Safe for async preemption
_PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption
diff --git a/src/runtime/trace/annotation.go b/src/runtime/trace/annotation.go
index 82cb232dba..6e18bfb755 100644
--- a/src/runtime/trace/annotation.go
+++ b/src/runtime/trace/annotation.go
@@ -1,3 +1,7 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package trace
import (
diff --git a/src/runtime/trace/annotation_test.go b/src/runtime/trace/annotation_test.go
index 71abbfcfa6..31fccef206 100644
--- a/src/runtime/trace/annotation_test.go
+++ b/src/runtime/trace/annotation_test.go
@@ -1,3 +1,7 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package trace_test
import (
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 7850eceafa..94f4a44976 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -269,9 +269,9 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.varp -= sys.RegSize
}
- // If framepointer_enabled and there's a frame, then
- // there's a saved bp here.
- if frame.varp > frame.sp && (framepointer_enabled && GOARCH == "amd64" || GOARCH == "arm64") {
+ // For architectures with frame pointers, if there's
+ // a frame, then there's a saved frame pointer here.
+ if frame.varp > frame.sp && (GOARCH == "amd64" || GOARCH == "arm64") {
frame.varp -= sys.RegSize
}