summaryrefslogtreecommitdiff
path: root/libgo/go/sync/waitgroup.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/sync/waitgroup.go')
-rw-r--r--libgo/go/sync/waitgroup.go29
1 files changed, 28 insertions, 1 deletions
diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go
index bc9e738e784..9b0ffec58b3 100644
--- a/libgo/go/sync/waitgroup.go
+++ b/libgo/go/sync/waitgroup.go
@@ -4,7 +4,10 @@
package sync
-import "sync/atomic"
+import (
+ "sync/atomic"
+ "unsafe"
+)
// A WaitGroup waits for a collection of goroutines to finish.
// The main goroutine calls Add to set the number of
@@ -34,6 +37,11 @@ type WaitGroup struct {
// If the counter becomes zero, all goroutines blocked on Wait() are released.
// If the counter goes negative, Add panics.
func (wg *WaitGroup) Add(delta int) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(wg))
+ raceDisable()
+ defer raceEnable()
+ }
v := atomic.AddInt32(&wg.counter, int32(delta))
if v < 0 {
panic("sync: negative WaitGroup counter")
@@ -57,7 +65,14 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
+ if raceenabled {
+ raceDisable()
+ }
if atomic.LoadInt32(&wg.counter) == 0 {
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ }
return
}
wg.m.Lock()
@@ -68,7 +83,15 @@ func (wg *WaitGroup) Wait() {
// to avoid missing an Add.
if atomic.LoadInt32(&wg.counter) == 0 {
atomic.AddInt32(&wg.waiters, -1)
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ raceDisable()
+ }
wg.m.Unlock()
+ if raceenabled {
+ raceEnable()
+ }
return
}
if wg.sema == nil {
@@ -77,4 +100,8 @@ func (wg *WaitGroup) Wait() {
s := wg.sema
wg.m.Unlock()
runtime_Semacquire(s)
+ if raceenabled {
+ raceEnable()
+ raceAcquire(unsafe.Pointer(wg))
+ }
}