summaryrefslogtreecommitdiff
path: root/src/runtime/stack1.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/stack1.go')
-rw-r--r--src/runtime/stack1.go78
1 files changed, 45 insertions, 33 deletions
diff --git a/src/runtime/stack1.go b/src/runtime/stack1.go
index 1fd61ce1a..28000864d 100644
--- a/src/runtime/stack1.go
+++ b/src/runtime/stack1.go
@@ -58,7 +58,7 @@ func stackinit() {
// Allocates a stack from the free pool. Must be called with
// stackpoolmu held.
-func stackpoolalloc(order uint8) *mlink {
+func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order]
s := list.next
if s == list {
@@ -70,23 +70,23 @@ func stackpoolalloc(order uint8) *mlink {
if s.ref != 0 {
gothrow("bad ref")
}
- if s.freelist != nil {
+ if s.freelist.ptr() != nil {
gothrow("bad freelist")
}
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
- x := (*mlink)(unsafe.Pointer(uintptr(s.start)<<_PageShift + i))
- x.next = s.freelist
+ x := gclinkptr(uintptr(s.start)<<_PageShift + i)
+ x.ptr().next = s.freelist
s.freelist = x
}
mSpanList_Insert(list, s)
}
x := s.freelist
- if x == nil {
+ if x.ptr() == nil {
gothrow("span has no free stacks")
}
- s.freelist = x.next
+ s.freelist = x.ptr().next
s.ref++
- if s.freelist == nil {
+ if s.freelist.ptr() == nil {
// all stacks in s are allocated.
mSpanList_Remove(s)
}
@@ -94,22 +94,22 @@ func stackpoolalloc(order uint8) *mlink {
}
// Adds stack x to the free pool. Must be called with stackpoolmu held.
-func stackpoolfree(x *mlink, order uint8) {
+func stackpoolfree(x gclinkptr, order uint8) {
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
if s.state != _MSpanStack {
gothrow("freeing stack not in a stack span")
}
- if s.freelist == nil {
+ if s.freelist.ptr() == nil {
// s will now have a free stack
mSpanList_Insert(&stackpool[order], s)
}
- x.next = s.freelist
+ x.ptr().next = s.freelist
s.freelist = x
s.ref--
if s.ref == 0 {
// span is completely free - return to heap
mSpanList_Remove(s)
- s.freelist = nil
+ s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
}
@@ -123,12 +123,12 @@ func stackcacherefill(c *mcache, order uint8) {
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
- var list *mlink
+ var list gclinkptr
var size uintptr
lock(&stackpoolmu)
for size < _StackCacheSize/2 {
x := stackpoolalloc(order)
- x.next = list
+ x.ptr().next = list
list = x
size += _FixedStack << order
}
@@ -145,7 +145,7 @@ func stackcacherelease(c *mcache, order uint8) {
size := c.stackcache[order].size
lock(&stackpoolmu)
for size > _StackCacheSize/2 {
- y := x.next
+ y := x.ptr().next
stackpoolfree(x, order)
x = y
size -= _FixedStack << order
@@ -162,12 +162,12 @@ func stackcache_clear(c *mcache) {
lock(&stackpoolmu)
for order := uint8(0); order < _NumStackOrders; order++ {
x := c.stackcache[order].list
- for x != nil {
- y := x.next
+ for x.ptr() != nil {
+ y := x.ptr().next
stackpoolfree(x, order)
x = y
}
- c.stackcache[order].list = nil
+ c.stackcache[order].list = 0
c.stackcache[order].size = 0
}
unlock(&stackpoolmu)
@@ -207,7 +207,7 @@ func stackalloc(n uint32) stack {
order++
n2 >>= 1
}
- var x *mlink
+ var x gclinkptr
c := thisg.m.mcache
if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or
@@ -219,11 +219,11 @@ func stackalloc(n uint32) stack {
unlock(&stackpoolmu)
} else {
x = c.stackcache[order].list
- if x == nil {
+ if x.ptr() == nil {
stackcacherefill(c, order)
x = c.stackcache[order].list
}
- c.stackcache[order].list = x.next
+ c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
v = (unsafe.Pointer)(x)
@@ -270,7 +270,7 @@ func stackfree(stk stack) {
order++
n2 >>= 1
}
- x := (*mlink)(v)
+ x := gclinkptr(v)
c := gp.m.mcache
if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
lock(&stackpoolmu)
@@ -280,7 +280,7 @@ func stackfree(stk stack) {
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}
- x.next = c.stackcache[order].list
+ x.ptr().next = c.stackcache[order].list
c.stackcache[order].list = x
c.stackcache[order].size += n
}
@@ -526,6 +526,7 @@ func fillstack(stk stack, b byte) {
}
// Copies gp's stack to a new stack of a different size.
+// Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
gothrow("stack growth not allowed in system call")
@@ -563,15 +564,11 @@ func copystack(gp *g, newsize uintptr) {
}
memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
- oldstatus := casgcopystack(gp) // cas from Gwaiting or Grunnable to Gcopystack, return old status
-
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
- casgstatus(gp, _Gcopystack, oldstatus) // oldstatus is Gwaiting or Grunnable
-
// free old stack
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
@@ -669,6 +666,14 @@ func newstack() {
gothrow("runtime: split stack overflow")
}
+ if gp.sched.ctxt != nil {
+ // morestack wrote sched.ctxt on its way in here,
+ // without a write barrier. Run the write barrier now.
+ // It is not possible to be preempted between then
+ // and now, so it's okay.
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
+ }
+
if gp.stackguard0 == stackPreempt {
if gp == thisg.m.g0 {
gothrow("runtime: preempt g0")
@@ -677,7 +682,12 @@ func newstack() {
gothrow("runtime: g is running but p is not")
}
if gp.preemptscan {
+ for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
+ // Likely to be racing with the GC as it sees a _Gwaiting and does the stack scan.
+ // If so this stack will be scanned twice which does not change correctness.
+ }
gcphasework(gp)
+ casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
casgstatus(gp, _Gwaiting, _Grunning)
gp.stackguard0 = gp.stack.lo + _StackGuard
gp.preempt = false
@@ -708,13 +718,15 @@ func newstack() {
gothrow("stack overflow")
}
- // Note that the concurrent GC might be scanning the stack as we try to replace it.
- // copystack takes care of the appropriate coordination with the stack scanner.
+ casgstatus(gp, _Gwaiting, _Gcopystack)
+
+ // The concurrent GC will not scan the stack while we are doing the copy since
+ // the gp is in a Gcopystack status.
copystack(gp, uintptr(newsize))
if stackDebug >= 1 {
print("stack grow done\n")
}
- casgstatus(gp, _Gwaiting, _Grunning)
+ casgstatus(gp, _Gcopystack, _Grunning)
gogo(&gp.sched)
}
@@ -767,17 +779,17 @@ func shrinkstack(gp *g) {
if gp.syscallsp != 0 {
return
}
-
- /* TODO
- if goos_windows && gp.m != nil && gp.m.libcallsp != 0 {
+ if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
return
}
- */
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
+
+ oldstatus := casgcopystack(gp)
copystack(gp, newsize)
+ casgstatus(gp, _Gcopystack, oldstatus)
}
// Do any delayed stack freeing that was queued up during GC.