summaryrefslogtreecommitdiff
path: root/libgo/runtime/lock_sema.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/lock_sema.c')
-rw-r--r--libgo/runtime/lock_sema.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/libgo/runtime/lock_sema.c b/libgo/runtime/lock_sema.c
index 8c4b3973bdc..2663c5463de 100644
--- a/libgo/runtime/lock_sema.c
+++ b/libgo/runtime/lock_sema.c
@@ -43,7 +43,7 @@ runtime_lock(Lock *l)
runtime_throw("runtime_lock: lock count");
// Speculative grab for lock.
- if(runtime_casp(&l->waitm, nil, (void*)LOCKED))
+ if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
return;
if(m->waitsema == 0)
@@ -56,10 +56,10 @@ runtime_lock(Lock *l)
spin = ACTIVE_SPIN;
for(i=0;; i++) {
- v = (uintptr)runtime_atomicloadp(&l->waitm);
+ v = (uintptr)runtime_atomicloadp((void**)&l->key);
if((v&LOCKED) == 0) {
unlocked:
- if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED)))
+ if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
return;
i = 0;
}
@@ -74,9 +74,9 @@ unlocked:
// Queue this M.
for(;;) {
m->nextwaitm = (void*)(v&~LOCKED);
- if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED)))
+ if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
break;
- v = (uintptr)runtime_atomicloadp(&l->waitm);
+ v = (uintptr)runtime_atomicloadp((void**)&l->key);
if((v&LOCKED) == 0)
goto unlocked;
}
@@ -99,15 +99,15 @@ runtime_unlock(Lock *l)
runtime_throw("runtime_unlock: lock count");
for(;;) {
- v = (uintptr)runtime_atomicloadp(&l->waitm);
+ v = (uintptr)runtime_atomicloadp((void**)&l->key);
if(v == LOCKED) {
- if(runtime_casp(&l->waitm, (void*)LOCKED, nil))
+ if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
break;
} else {
// Other M's are waiting for the lock.
// Dequeue an M.
mp = (void*)(v&~LOCKED);
- if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) {
+ if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
// Dequeued an M. Wake it.
runtime_semawakeup(mp);
break;
@@ -120,7 +120,7 @@ runtime_unlock(Lock *l)
void
runtime_noteclear(Note *n)
{
- n->waitm = nil;
+ n->key = 0;
}
void
@@ -129,8 +129,8 @@ runtime_notewakeup(Note *n)
M *mp;
do
- mp = runtime_atomicloadp(&n->waitm);
- while(!runtime_casp(&n->waitm, mp, (void*)LOCKED));
+ mp = runtime_atomicloadp((void**)&n->key);
+ while(!runtime_casp((void**)&n->key, mp, (void*)LOCKED));
// Successfully set waitm to LOCKED.
// What was it before?
@@ -153,8 +153,8 @@ runtime_notesleep(Note *n)
m = runtime_m();
if(m->waitsema == 0)
m->waitsema = runtime_semacreate();
- if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup)
- if(n->waitm != (void*)LOCKED)
+ if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup)
+ if(n->key != LOCKED)
runtime_throw("notesleep - waitm out of sync");
return;
}
@@ -183,8 +183,8 @@ runtime_notetsleep(Note *n, int64 ns)
m->waitsema = runtime_semacreate();
// Register for wakeup on n->waitm.
- if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup already)
- if(n->waitm != (void*)LOCKED)
+ if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already)
+ if(n->key != LOCKED)
runtime_throw("notetsleep - waitm out of sync");
return;
}
@@ -219,10 +219,10 @@ runtime_notetsleep(Note *n, int64 ns)
// so that any notewakeup racing with the return does not
// try to grant us the semaphore when we don't expect it.
for(;;) {
- mp = runtime_atomicloadp(&n->waitm);
+ mp = runtime_atomicloadp((void**)&n->key);
if(mp == m) {
// No wakeup yet; unregister if possible.
- if(runtime_casp(&n->waitm, mp, nil))
+ if(runtime_casp((void**)&n->key, mp, nil))
return;
} else if(mp == (M*)LOCKED) {
// Wakeup happened so semaphore is available.