summaryrefslogtreecommitdiff
path: root/src/sync
diff options
context:
space:
mode:
authorCherry Zhang <cherryyz@google.com>2020-10-28 09:12:20 -0400
committerCherry Zhang <cherryyz@google.com>2020-10-28 09:12:20 -0400
commita16e30d162c1c7408db7821e7b9513cefa09c6ca (patch)
treeaf752ba9ba44c547df39bb0af9bff79f610ba9d5 /src/sync
parent91e4d2d57bc341dd82c98247117114c851380aef (diff)
parentcf6cfba4d5358404dd890f6025e573a4b2156543 (diff)
downloadgo-git-dev.link.tar.gz
[dev.link] all: merge branch 'master' into dev.linkdev.link
Clean merge. Change-Id: Ia7b2808bc649790198d34c226a61d9e569084dc5
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/atomic/atomic_test.go9
-rw-r--r--src/sync/once.go2
-rw-r--r--src/sync/pool.go20
-rw-r--r--src/sync/poolqueue.go2
4 files changed, 26 insertions, 7 deletions
diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go
index 83e7c8d763..eadc962f70 100644
--- a/src/sync/atomic/atomic_test.go
+++ b/src/sync/atomic/atomic_test.go
@@ -1397,8 +1397,15 @@ func TestStoreLoadRelAcq64(t *testing.T) {
func shouldPanic(t *testing.T, name string, f func()) {
defer func() {
- if recover() == nil {
+ // Check that all GC maps are sane.
+ runtime.GC()
+
+ err := recover()
+ want := "unaligned 64-bit atomic operation"
+ if err == nil {
t.Errorf("%s did not panic", name)
+ } else if s, _ := err.(string); s != want {
+ t.Errorf("%s: wanted panic %q, got %q", name, want, err)
}
}()
f()
diff --git a/src/sync/once.go b/src/sync/once.go
index ca04408224..bf4b80c867 100644
--- a/src/sync/once.go
+++ b/src/sync/once.go
@@ -9,6 +9,8 @@ import (
)
// Once is an object that will perform exactly one action.
+//
+// A Once must not be copied after first use.
type Once struct {
// done indicates whether the action has been performed.
// It is first in the struct because it is used in the hot path.
diff --git a/src/sync/pool.go b/src/sync/pool.go
index ca7afdb12f..1ae70127ac 100644
--- a/src/sync/pool.go
+++ b/src/sync/pool.go
@@ -152,8 +152,8 @@ func (p *Pool) Get() interface{} {
func (p *Pool) getSlow(pid int) interface{} {
// See the comment in pin regarding ordering of the loads.
- size := atomic.LoadUintptr(&p.localSize) // load-acquire
- locals := p.local // load-consume
+ size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ locals := p.local // load-consume
// Try to steal one element from other procs.
for i := 0; i < int(size); i++ {
l := indexLocal(locals, (pid+i+1)%int(size))
@@ -198,8 +198,8 @@ func (p *Pool) pin() (*poolLocal, int) {
// Since we've disabled preemption, GC cannot happen in between.
// Thus here we must observe local at least as large localSize.
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
- s := atomic.LoadUintptr(&p.localSize) // load-acquire
- l := p.local // load-consume
+ s := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ l := p.local // load-consume
if uintptr(pid) < s {
return indexLocal(l, pid), pid
}
@@ -226,7 +226,7 @@ func (p *Pool) pinSlow() (*poolLocal, int) {
size := runtime.GOMAXPROCS(0)
local := make([]poolLocal, size)
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
- atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
+ runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
return &local[pid], pid
}
@@ -282,3 +282,13 @@ func indexLocal(l unsafe.Pointer, i int) *poolLocal {
func runtime_registerPoolCleanup(cleanup func())
func runtime_procPin() int
func runtime_procUnpin()
+
+// The below are implemented in runtime/internal/atomic and the
+// compiler also knows to intrinsify the symbol we linkname into this
+// package.
+
+//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr
+func runtime_LoadAcquintptr(ptr *uintptr) uintptr
+
+//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr
+func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr
diff --git a/src/sync/poolqueue.go b/src/sync/poolqueue.go
index 22f74969d9..9be83e9a43 100644
--- a/src/sync/poolqueue.go
+++ b/src/sync/poolqueue.go
@@ -57,7 +57,7 @@ const dequeueBits = 32
// the index. We divide by 4 so this fits in an int on 32-bit.
const dequeueLimit = (1 << dequeueBits) / 4
-// dequeueNil is used in poolDeqeue to represent interface{}(nil).
+// dequeueNil is used in poolDequeue to represent interface{}(nil).
// Since we use nil to represent empty slots, we need a sentinel value
// to represent nil.
type dequeueNil *struct{}