diff options
author | Russ Cox <rsc@golang.org> | 2014-10-03 12:22:19 -0400 |
---|---|---|
committer | Russ Cox <rsc@golang.org> | 2014-10-03 12:22:19 -0400 |
commit | c5aca6fedbdd83d9153f6a00dc656aaabb0774c5 (patch) | |
tree | 26952b971b307783cde4b126c0a133f62ac3c893 /src/runtime | |
parent | 4eb6792aa572c7e6d3448d4cf22223b61b65724f (diff) | |
parent | 338c7ea5df93e25ac4dc143970603a1e88b26124 (diff) | |
download | go-c5aca6fedbdd83d9153f6a00dc656aaabb0774c5.tar.gz |
[dev.garbage] merge default into dev.garbage
Diffstat (limited to 'src/runtime')
72 files changed, 759 insertions, 529 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 21065b6d6..1495246a2 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -646,15 +646,13 @@ TEXT gosave<>(SB),NOSPLIT,$0 // Call fn(arg) on the scheduler stack, // aligned appropriately for the gcc ABI. // See cgocall.c for more details. -TEXT runtime·asmcgocall(SB),NOSPLIT,$0-8 - GO_ARGS +TEXT ·asmcgocall(SB),NOSPLIT,$0-8 MOVL fn+0(FP), AX MOVL arg+4(FP), BX CALL asmcgocall<>(SB) RET -TEXT runtime·asmcgocall_errno(SB),NOSPLIT,$0-12 - GO_ARGS +TEXT ·asmcgocall_errno(SB),NOSPLIT,$0-12 MOVL fn+0(FP), AX MOVL arg+4(FP), BX CALL asmcgocall<>(SB) @@ -714,8 +712,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$12-12 // cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize) // See cgocall.c for more details. -TEXT runtime·cgocallback_gofunc(SB),NOSPLIT,$12-12 - GO_ARGS +TEXT ·cgocallback_gofunc(SB),NOSPLIT,$12-12 NO_LOCAL_POINTERS // If g is nil, Go did not create the current thread. @@ -906,8 +903,6 @@ TEXT runtime·emptyfunc(SB),0,$0-0 TEXT runtime·abort(SB),NOSPLIT,$0-0 INT $0x3 -GLOBL runtime·tls0(SB), $32 - // hash function using AES hardware instructions TEXT runtime·aeshash(SB),NOSPLIT,$0-16 MOVL p+0(FP), AX // ptr to data @@ -2280,3 +2275,13 @@ TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·return0(SB), NOSPLIT, $0 MOVL $0, AX RET + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$0 + get_tls(CX) + MOVL g(CX), AX + MOVL g_m(AX), AX + MOVL m_curg(AX), AX + MOVL (g_stack+stack_hi)(AX), AX + RET diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index da29f61ed..3f7f60841 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -623,15 +623,13 @@ TEXT gosave<>(SB),NOSPLIT,$0 // Call fn(arg) on the scheduler stack, // aligned appropriately for the gcc ABI. // See cgocall.c for more details. -TEXT runtime·asmcgocall(SB),NOSPLIT,$0-16 - GO_ARGS +TEXT ·asmcgocall(SB),NOSPLIT,$0-16 MOVQ fn+0(FP), AX MOVQ arg+8(FP), BX CALL asmcgocall<>(SB) RET -TEXT runtime·asmcgocall_errno(SB),NOSPLIT,$0-20 - GO_ARGS +TEXT ·asmcgocall_errno(SB),NOSPLIT,$0-20 MOVQ fn+0(FP), AX MOVQ arg+8(FP), BX CALL asmcgocall<>(SB) @@ -700,8 +698,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$24-24 // cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize) // See cgocall.c for more details. -TEXT runtime·cgocallback_gofunc(SB),NOSPLIT,$8-24 - GO_ARGS +TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-24 NO_LOCAL_POINTERS // If g is nil, Go did not create the current thread. @@ -874,8 +871,6 @@ TEXT runtime·gocputicks(SB),NOSPLIT,$0-8 MOVQ AX, ret+0(FP) RET -GLOBL runtime·tls0(SB), $64 - // hash function using AES hardware instructions TEXT runtime·aeshash(SB),NOSPLIT,$0-32 MOVQ p+0(FP), AX // ptr to data @@ -2225,3 +2220,14 @@ TEXT runtime·fastrand1(SB), NOSPLIT, $0-4 TEXT runtime·return0(SB), NOSPLIT, $0 MOVL $0, AX RET + + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$0 + get_tls(CX) + MOVQ g(CX), AX + MOVQ g_m(AX), AX + MOVQ m_curg(AX), AX + MOVQ (g_stack+stack_hi)(AX), AX + RET diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s index bbbd886a5..13a164256 100644 --- a/src/runtime/asm_amd64p32.s +++ b/src/runtime/asm_amd64p32.s @@ -674,8 +674,6 @@ TEXT runtime·gocputicks(SB),NOSPLIT,$0-8 MOVQ AX, ret+0(FP) RET -GLOBL runtime·tls0(SB), $64 - // hash function using AES hardware instructions // For now, our one amd64p32 system (NaCl) does not // support using AES instructions, so have not bothered to diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index 3e78d9114..36fb022f9 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -96,7 +96,7 @@ TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 #ifdef GOOS_nacl WORD $0xe125be7f // BKPT 0x5bef, NACL_INSTR_ARM_BREAKPOINT #else - WORD $0xe1200071 // BKPT 0x0001 + WORD $0xe7f001f0 // undefined instruction that gdb understands is a software breakpoint #endif RET @@ -480,15 +480,13 @@ TEXT gosave<>(SB),NOSPLIT,$0 // Call fn(arg) on the scheduler stack, // aligned appropriately for the gcc ABI. // See cgocall.c for more details. -TEXT runtime·asmcgocall(SB),NOSPLIT,$0-8 - GO_ARGS +TEXT ·asmcgocall(SB),NOSPLIT,$0-8 MOVW fn+0(FP), R1 MOVW arg+4(FP), R0 BL asmcgocall<>(SB) RET -TEXT runtime·asmcgocall_errno(SB),NOSPLIT,$0-12 - GO_ARGS +TEXT ·asmcgocall_errno(SB),NOSPLIT,$0-12 MOVW fn+0(FP), R1 MOVW arg+4(FP), R0 BL asmcgocall<>(SB) @@ -551,8 +549,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$12-12 // cgocallback_gofunc(void (*fn)(void*), void *frame, uintptr framesize) // See cgocall.c for more details. -TEXT runtime·cgocallback_gofunc(SB),NOSPLIT,$8-12 - GO_ARGS +TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-12 NO_LOCAL_POINTERS // Load m and g from thread-local storage. @@ -1303,3 +1300,20 @@ yieldloop: RET SUB $1, R1 B yieldloop + +// Called from cgo wrappers, this function returns g->m->curg.stack.hi. +// Must obey the gcc calling convention. +TEXT _cgo_topofstack(SB),NOSPLIT,$8 + // R11 and g register are clobbered by load_g. They are + // callee-save in the gcc calling convention, so save them here. + MOVW R11, saveR11-4(SP) + MOVW g, saveG-8(SP) + + BL runtime·load_g(SB) + MOVW g_m(g), R0 + MOVW m_curg(R0), R0 + MOVW (g_stack+stack_hi)(R0), R0 + + MOVW saveG-8(SP), g + MOVW saveR11-4(SP), R11 + RET diff --git a/src/runtime/cgo/callbacks.c b/src/runtime/cgo/callbacks.c index 16614d03d..282beeea8 100644 --- a/src/runtime/cgo/callbacks.c +++ b/src/runtime/cgo/callbacks.c @@ -78,3 +78,6 @@ void (*_cgo_free)(void*) = x_cgo_free; #pragma cgo_import_static x_cgo_thread_start extern void x_cgo_thread_start(void*); void (*_cgo_thread_start)(void*) = x_cgo_thread_start; + +#pragma cgo_export_static _cgo_topofstack +#pragma cgo_export_dynamic _cgo_topofstack diff --git a/src/runtime/cgo/dragonfly.c b/src/runtime/cgo/dragonfly.c index acf53e265..3c95ff354 100644 --- a/src/runtime/cgo/dragonfly.c +++ b/src/runtime/cgo/dragonfly.c @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + // Supply environ and __progname, because we don't // link against the standard DragonFly crt0.o and the // libc dynamic library needs them. +#pragma dataflag NOPTR char *environ[1]; +#pragma dataflag NOPTR char *__progname; #pragma dynexport environ environ diff --git a/src/runtime/cgo/freebsd.c b/src/runtime/cgo/freebsd.c index dfcfa3a21..aefc481e6 100644 --- a/src/runtime/cgo/freebsd.c +++ b/src/runtime/cgo/freebsd.c @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + // Supply environ and __progname, because we don't // link against the standard FreeBSD crt0.o and the // libc dynamic library needs them. +#pragma dataflag NOPTR char *environ[1]; +#pragma dataflag NOPTR char *__progname; #pragma dynexport environ environ diff --git a/src/runtime/cgo/gcc_setenv.c b/src/runtime/cgo/gcc_setenv.c index 8b128b946..af0fc5d8d 100644 --- a/src/runtime/cgo/gcc_setenv.c +++ b/src/runtime/cgo/gcc_setenv.c @@ -14,3 +14,10 @@ x_cgo_setenv(char **arg) { setenv(arg[0], arg[1], 1); } + +/* Stub for calling unsetenv */ +void +x_cgo_unsetenv(char *arg) +{ + unsetenv(arg); +} diff --git a/src/runtime/cgo/netbsd.c b/src/runtime/cgo/netbsd.c index b6403f686..de38bb770 100644 --- a/src/runtime/cgo/netbsd.c +++ b/src/runtime/cgo/netbsd.c @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + // Supply environ and __progname, because we don't // link against the standard NetBSD crt0.o and the // libc dynamic library needs them. +#pragma dataflag NOPTR char *environ[1]; +#pragma dataflag NOPTR char *__progname; #pragma dynexport environ environ diff --git a/src/runtime/cgo/openbsd.c b/src/runtime/cgo/openbsd.c index 84e9f9eff..7c2b6c173 100644 --- a/src/runtime/cgo/openbsd.c +++ b/src/runtime/cgo/openbsd.c @@ -2,11 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + // Supply environ, __progname and __guard_local, because // we don't link against the standard OpenBSD crt0.o and // the libc dynamic library needs them. +#pragma dataflag NOPTR char *environ[1]; +#pragma dataflag NOPTR char *__progname; long __guard_local; diff --git a/src/runtime/cgo/setenv.c b/src/runtime/cgo/setenv.c index ee529904f..76d88cbf1 100644 --- a/src/runtime/cgo/setenv.c +++ b/src/runtime/cgo/setenv.c @@ -5,6 +5,9 @@ // +build darwin dragonfly freebsd linux netbsd openbsd #pragma cgo_import_static x_cgo_setenv +#pragma cgo_import_static x_cgo_unsetenv void x_cgo_setenv(char**); void (*runtime·_cgo_setenv)(char**) = x_cgo_setenv; +void x_cgo_unsetenv(char**); +void (*runtime·_cgo_unsetenv)(char**) = x_cgo_unsetenv; diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index a21474b01..7fd91469e 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -177,14 +177,22 @@ func cfree(p unsafe.Pointer) { // Call from C back to Go. //go:nosplit func cgocallbackg() { - if gp := getg(); gp != gp.m.curg { + gp := getg() + if gp != gp.m.curg { println("runtime: bad g in cgocallback") exit(2) } + // entersyscall saves the caller's SP to allow the GC to trace the Go + // stack. However, since we're returning to an earlier stack frame and + // need to pair with the entersyscall() call made by cgocall, we must + // save syscall* and let reentersyscall restore them. + savedsp := unsafe.Pointer(gp.syscallsp) + savedpc := gp.syscallpc exitsyscall() // coming out of cgo call cgocallbackg1() - entersyscall() // going back to cgo call + // going back to cgo call + reentersyscall(savedpc, savedsp) } func cgocallbackg1() { diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index d1577fb5f..783b4c48f 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -412,3 +412,91 @@ func main() { runtime.Breakpoint() } ` + +func TestGoexitInPanic(t *testing.T) { + // see issue 8774: this code used to trigger an infinite recursion + output := executeTest(t, goexitInPanicSource, nil) + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const goexitInPanicSource = ` +package main +import "runtime" +func main() { + go func() { + defer func() { + runtime.Goexit() + }() + panic("hello") + }() + runtime.Goexit() +} +` + +func TestPanicAfterGoexit(t *testing.T) { + // an uncaught panic should still work after goexit + output := executeTest(t, panicAfterGoexitSource, nil) + want := "panic: hello" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const panicAfterGoexitSource = ` +package main +import "runtime" +func main() { + defer func() { + panic("hello") + }() + runtime.Goexit() +} +` + +func TestRecoveredPanicAfterGoexit(t *testing.T) { + output := executeTest(t, recoveredPanicAfterGoexitSource, nil) + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const recoveredPanicAfterGoexitSource = ` +package main +import "runtime" +func main() { + defer func() { + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + panic("hello") + }() + runtime.Goexit() +} +` + +func TestRecoverBeforePanicAfterGoexit(t *testing.T) { + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit should run the #2 defer. Its panic + // should be caught by the #1 defer, and execution + // should resume in the caller. Like the Goexit + // never happened! + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} diff --git a/src/runtime/debug.go b/src/runtime/debug.go index bb4bd60ed..4414dd55d 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -24,15 +24,29 @@ func UnlockOSThread() // The number of logical CPUs on the local machine can be queried with NumCPU. // This call will go away when the scheduler improves. func GOMAXPROCS(n int) int { - g := getg() - g.m.scalararg[0] = uintptr(n) - onM(gomaxprocs_m) - n = int(g.m.scalararg[0]) - g.m.scalararg[0] = 0 - return n -} + if n > _MaxGomaxprocs { + n = _MaxGomaxprocs + } + lock(&sched.lock) + ret := int(gomaxprocs) + unlock(&sched.lock) + if n <= 0 || n == ret { + return ret + } -func gomaxprocs_m() // proc.c + semacquire(&worldsema, false) + gp := getg() + gp.m.gcing = 1 + onM(stoptheworld) + + // newprocs will be processed by starttheworld + newprocs = int32(n) + + gp.m.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + return ret +} // NumCPU returns the number of logical CPUs on the local machine. func NumCPU() int { diff --git a/src/runtime/debug/debug.c b/src/runtime/debug/debug.s index a7292c477..a7292c477 100644 --- a/src/runtime/debug/debug.c +++ b/src/runtime/debug/debug.s diff --git a/src/runtime/defs_windows.go b/src/runtime/defs_windows.go index 01aea92de..cb0f54d8a 100644 --- a/src/runtime/defs_windows.go +++ b/src/runtime/defs_windows.go @@ -49,7 +49,6 @@ const ( CONTEXT_FULL = C.CONTEXT_FULL EXCEPTION_ACCESS_VIOLATION = C.STATUS_ACCESS_VIOLATION - EXCEPTION_BREAKPOINT = C.STATUS_BREAKPOINT EXCEPTION_FLT_DENORMAL_OPERAND = C.STATUS_FLOAT_DENORMAL_OPERAND EXCEPTION_FLT_DIVIDE_BY_ZERO = C.STATUS_FLOAT_DIVIDE_BY_ZERO EXCEPTION_FLT_INEXACT_RESULT = C.STATUS_FLOAT_INEXACT_RESULT diff --git a/src/runtime/defs_windows_386.h b/src/runtime/defs_windows_386.h index db3629a1d..295e422c6 100644 --- a/src/runtime/defs_windows_386.h +++ b/src/runtime/defs_windows_386.h @@ -22,7 +22,6 @@ enum { CONTEXT_FULL = 0x10007, EXCEPTION_ACCESS_VIOLATION = 0xc0000005, - EXCEPTION_BREAKPOINT = 0x80000003, EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d, EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e, EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f, diff --git a/src/runtime/defs_windows_amd64.h b/src/runtime/defs_windows_amd64.h index fe26f5a84..2516c8412 100644 --- a/src/runtime/defs_windows_amd64.h +++ b/src/runtime/defs_windows_amd64.h @@ -22,7 +22,6 @@ enum { CONTEXT_FULL = 0x10000b, EXCEPTION_ACCESS_VIOLATION = 0xc0000005, - EXCEPTION_BREAKPOINT = 0x80000003, EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d, EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e, EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f, diff --git a/src/runtime/env_posix.go b/src/runtime/env_posix.go index 6c04f6cc7..dd57872d7 100644 --- a/src/runtime/env_posix.go +++ b/src/runtime/env_posix.go @@ -32,7 +32,8 @@ func gogetenv(key string) string { return "" } -var _cgo_setenv uintptr // pointer to C function +var _cgo_setenv uintptr // pointer to C function +var _cgo_unsetenv uintptr // pointer to C function // Update the C environment if cgo is loaded. // Called from syscall.Setenv. @@ -44,6 +45,16 @@ func syscall_setenv_c(k string, v string) { asmcgocall(unsafe.Pointer(_cgo_setenv), unsafe.Pointer(&arg)) } +// Update the C environment if cgo is loaded. +// Called from syscall.unsetenv. +func syscall_unsetenv_c(k string) { + if _cgo_unsetenv == 0 { + return + } + arg := [1]unsafe.Pointer{cstring(k)} + asmcgocall(unsafe.Pointer(_cgo_unsetenv), unsafe.Pointer(&arg)) +} + func cstring(s string) unsafe.Pointer { p := make([]byte, len(s)+1) sp := (*_string)(unsafe.Pointer(&s)) diff --git a/src/runtime/error.go b/src/runtime/error.go index 3ea93680c..0b40c702b 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -71,28 +71,6 @@ func (e errorString) Error() string { return "runtime error: " + string(e) } -// For calling from C. -func newErrorString(s string, ret *interface{}) { - *ret = errorString(s) -} - -// An errorCString represents a runtime error described by a single C string. -// Not "type errorCString unsafe.Pointer" because of http://golang.org/issue/7084. -// Not uintptr because we want to avoid an allocation if interfaces can't hold -// uintptrs directly (and cstr _is_ a pointer). -type errorCString struct{ cstr unsafe.Pointer } - -func (e errorCString) RuntimeError() {} - -func (e errorCString) Error() string { - return "runtime error: " + gostringnocopy((*byte)(e.cstr)) -} - -// For calling from C. -func newErrorCString(s unsafe.Pointer, ret *interface{}) { - *ret = errorCString{s} -} - type stringer interface { String() string } diff --git a/src/runtime/heapdump.c b/src/runtime/heapdump.c index 8bbc7d8a5..54b9666b5 100644 --- a/src/runtime/heapdump.c +++ b/src/runtime/heapdump.c @@ -59,6 +59,8 @@ static BitVector makeheapobjbv(byte *p, uintptr size); // fd to write the dump to. static uintptr dumpfd; + +#pragma dataflag NOPTR /* tmpbuf not a heap pointer at least */ static byte *tmpbuf; static uintptr tmpbufsize; @@ -109,6 +111,7 @@ typedef struct TypeCacheBucket TypeCacheBucket; struct TypeCacheBucket { Type *t[TypeCacheAssoc]; }; +#pragma dataflag NOPTR /* only initialized and used while world is stopped */ static TypeCacheBucket typecache[TypeCacheBuckets]; // dump a uint64 in a varint format parseable by encoding/binary @@ -737,33 +740,16 @@ mdump(void) flush(); } -static void writeheapdump_m(void); - -#pragma textflag NOSPLIT void -runtime∕debug·WriteHeapDump(uintptr fd) -{ - void (*fn)(void); - - g->m->scalararg[0] = fd; - fn = writeheapdump_m; - runtime·onM(&fn); -} - -static void -writeheapdump_m(void) +runtime·writeheapdump_m(void) { uintptr fd; fd = g->m->scalararg[0]; g->m->scalararg[0] = 0; - // Stop the world. runtime·casgstatus(g->m->curg, Grunning, Gwaiting); g->waitreason = runtime·gostringnocopy((byte*)"dumping heap"); - runtime·semacquire(&runtime·worldsema, false); - g->m->gcing = 1; - runtime·stoptheworld(); // Update stats so we can dump them. // As a side effect, flushes all the MCaches so the MSpan.freelist @@ -784,13 +770,7 @@ writeheapdump_m(void) tmpbufsize = 0; } - // Start up the world again. - g->m->gcing = 0; - g->m->locks++; - runtime·semrelease(&runtime·worldsema); - runtime·starttheworld(); runtime·casgstatus(g->m->curg, Gwaiting, Grunning); - g->m->locks--; } // dumpint() the kind & offset of each field in an object. diff --git a/src/runtime/malloc.c b/src/runtime/malloc.c index cfb698ac2..b79c30b72 100644 --- a/src/runtime/malloc.c +++ b/src/runtime/malloc.c @@ -79,6 +79,8 @@ runtime·purgecachedstats(MCache *c) h = &runtime·mheap; mstats.heap_alloc += c->local_cachealloc; c->local_cachealloc = 0; + mstats.tinyallocs += c->local_tinyallocs; + c->local_tinyallocs = 0; mstats.nlookup += c->local_nlookup; c->local_nlookup = 0; h->largefree += c->local_largefree; @@ -92,9 +94,10 @@ runtime·purgecachedstats(MCache *c) } // Size of the trailing by_size array differs between Go and C, +// and all data after by_size is local to C, not exported to Go. // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility. // sizeof_C_MStats is what C thinks about size of Go struct. -uintptr runtime·sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]); +uintptr runtime·sizeof_C_MStats = offsetof(MStats, by_size[61]); #define MaxArena32 (2U<<30) @@ -326,29 +329,6 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n) return p; } -// Runtime stubs. - -static void* -cnew(Type *typ, intgo n) -{ - if(n < 0 || (typ->size > 0 && n > MaxMem/typ->size)) - runtime·panicstring("runtime: allocation size out of range"); - return runtime·mallocgc(typ->size*n, typ, typ->kind&KindNoPointers ? FlagNoScan : 0); -} - -// same as runtime·new, but callable from C -void* -runtime·cnew(Type *typ) -{ - return cnew(typ, 1); -} - -void* -runtime·cnewarray(Type *typ, intgo n) -{ - return cnew(typ, n); -} - void runtime·setFinalizer_m(void) { diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index acf6b48f8..fc22cc29e 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -103,7 +103,6 @@ func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { // standalone escaping variables. On a json benchmark // the allocator reduces number of allocations by ~12% and // reduces heap size by ~20%. - tinysize := uintptr(c.tinysize) if size <= tinysize { tiny := unsafe.Pointer(c.tiny) @@ -121,6 +120,7 @@ func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { x = tiny c.tiny = (*byte)(add(x, size)) c.tinysize -= uintptr(size1) + c.local_tinyallocs++ if debugMalloc { mp := acquirem() if mp.mallocing == 0 { diff --git a/src/runtime/malloc.h b/src/runtime/malloc.h index 413870c9f..520c783df 100644 --- a/src/runtime/malloc.h +++ b/src/runtime/malloc.h @@ -279,6 +279,8 @@ struct MStats uint64 nmalloc; uint64 nfree; } by_size[NumSizeClasses]; + + uint64 tinyallocs; // number of tiny allocations that didn't cause actual allocation; not exported to Go directly }; #define mstats runtime·memstats @@ -332,6 +334,7 @@ struct MCache // See "Tiny allocator" comment in malloc.goc. byte* tiny; uintptr tinysize; + uintptr local_tinyallocs; // number of tiny allocs not counted in other stats // The rest is not accessed on every malloc. MSpan* alloc[NumSizeClasses]; // spans to allocate from @@ -527,8 +530,6 @@ uintptr runtime·sweepone(void); void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover); void runtime·unmarkspan(void *v, uintptr size); void runtime·purgecachedstats(MCache*); -void* runtime·cnew(Type*); -void* runtime·cnewarray(Type*, intgo); void runtime·tracealloc(void*, uintptr, Type*); void runtime·tracefree(void*, uintptr); void runtime·tracegc(void); diff --git a/src/runtime/mcache.c b/src/runtime/mcache.c index bb1fc5403..5fdbe3266 100644 --- a/src/runtime/mcache.c +++ b/src/runtime/mcache.c @@ -13,7 +13,7 @@ extern volatile intgo runtime·MemProfileRate; // dummy MSpan that contains no free objects. -static MSpan emptymspan; +MSpan runtime·emptymspan; MCache* runtime·allocmcache(void) @@ -27,7 +27,7 @@ runtime·allocmcache(void) runtime·unlock(&runtime·mheap.lock); runtime·memclr((byte*)c, sizeof(*c)); for(i = 0; i < NumSizeClasses; i++) - c->alloc[i] = &emptymspan; + c->alloc[i] = &runtime·emptymspan; // Set first allocation sample size. rate = runtime·MemProfileRate; @@ -83,7 +83,7 @@ runtime·MCache_Refill(MCache *c, int32 sizeclass) s = c->alloc[sizeclass]; if(s->freelist != nil) runtime·throw("refill on a nonempty span"); - if(s != &emptymspan) + if(s != &runtime·emptymspan) s->incache = false; // Get a new cached span from the central lists. @@ -107,9 +107,9 @@ runtime·MCache_ReleaseAll(MCache *c) for(i=0; i<NumSizeClasses; i++) { s = c->alloc[i]; - if(s != &emptymspan) { + if(s != &runtime·emptymspan) { runtime·MCentral_UncacheSpan(&runtime·mheap.central[i].mcentral, s); - c->alloc[i] = &emptymspan; + c->alloc[i] = &runtime·emptymspan; } } } diff --git a/src/runtime/mem.go b/src/runtime/mem.go index 34391b2eb..438f22ec0 100644 --- a/src/runtime/mem.go +++ b/src/runtime/mem.go @@ -41,8 +41,8 @@ type MemStats struct { OtherSys uint64 // other system allocations // Garbage collector statistics. - NextGC uint64 // next run in HeapAlloc time (bytes) - LastGC uint64 // last run in absolute time (ns) + NextGC uint64 // next collection will happen when HeapAlloc ≥ this amount + LastGC uint64 // end time of last collection (nanoseconds since 1970) PauseTotalNs uint64 PauseNs [256]uint64 // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256] NumGC uint32 @@ -64,9 +64,44 @@ func init() { var memStats MemStats if sizeof_C_MStats != unsafe.Sizeof(memStats) { println(sizeof_C_MStats, unsafe.Sizeof(memStats)) - panic("MStats vs MemStatsType size mismatch") + gothrow("MStats vs MemStatsType size mismatch") } } // ReadMemStats populates m with memory allocator statistics. -func ReadMemStats(m *MemStats) +func ReadMemStats(m *MemStats) { + // Have to acquire worldsema to stop the world, + // because stoptheworld can only be used by + // one goroutine at a time, and there might be + // a pending garbage collection already calling it. + semacquire(&worldsema, false) + gp := getg() + gp.m.gcing = 1 + onM(stoptheworld) + + gp.m.ptrarg[0] = noescape(unsafe.Pointer(m)) + onM(readmemstats_m) + + gp.m.gcing = 0 + gp.m.locks++ + semrelease(&worldsema) + onM(starttheworld) + gp.m.locks-- +} + +// Implementation of runtime/debug.WriteHeapDump +func writeHeapDump(fd uintptr) { + semacquire(&worldsema, false) + gp := getg() + gp.m.gcing = 1 + onM(stoptheworld) + + gp.m.scalararg[0] = fd + onM(writeheapdump_m) + + gp.m.gcing = 0 + gp.m.locks++ + semrelease(&worldsema) + onM(starttheworld) + gp.m.locks-- +} diff --git a/src/runtime/mem_plan9.c b/src/runtime/mem_plan9.c index 402869f39..d673d6f83 100644 --- a/src/runtime/mem_plan9.c +++ b/src/runtime/mem_plan9.c @@ -10,6 +10,7 @@ #include "textflag.h" extern byte runtime·end[]; +#pragma dataflag NOPTR static byte *bloc = { runtime·end }; static Mutex memlock; diff --git a/src/runtime/mem_windows.c b/src/runtime/mem_windows.c index 7bc028bf3..6ea992020 100644 --- a/src/runtime/mem_windows.c +++ b/src/runtime/mem_windows.c @@ -68,10 +68,22 @@ void runtime·SysUsed(void *v, uintptr n) { void *r; + uintptr small; r = runtime·stdcall4(runtime·VirtualAlloc, (uintptr)v, n, MEM_COMMIT, PAGE_READWRITE); if(r != v) runtime·throw("runtime: failed to commit pages"); + + // Commit failed. See SysUnused. + while(n > 0) { + small = n; + while(small >= 4096 && runtime·stdcall4(runtime·VirtualAlloc, (uintptr)v, small, MEM_COMMIT, PAGE_READWRITE) == nil) + small = (small / 2) & ~(4096-1); + if(small < 4096) + runtime·throw("runtime: failed to decommit pages"); + v = (byte*)v + small; + n -= small; + } } void diff --git a/src/runtime/mgc0.c b/src/runtime/mgc0.c index b4cd3474d..39fae9bbe 100644 --- a/src/runtime/mgc0.c +++ b/src/runtime/mgc0.c @@ -119,7 +119,7 @@ FinBlock* runtime·finc; // cache of free blocks static byte finptrmask[FinBlockSize/PtrSize/PointersPerByte]; bool runtime·fingwait; bool runtime·fingwake; -static FinBlock *allfin; // list of all blocks +FinBlock *runtime·allfin; // list of all blocks BitVector runtime·gcdatamask; BitVector runtime·gcbssmask; @@ -146,7 +146,8 @@ static void slottombits(byte*, Markbits*); void runtime·bgsweep(void); static FuncVal bgsweepv = {runtime·bgsweep}; -static struct { +typedef struct WorkData WorkData; +struct WorkData { uint64 full; // lock-free list of full blocks uint64 empty; // lock-free list of empty blocks byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait @@ -160,7 +161,8 @@ static struct { // Copy of mheap.allspans for marker or sweeper. MSpan** spans; uint32 nspan; -} work; +}; +WorkData runtime·work; // Is address b in the known heap. If it doesn't have a valid gcmap // returns false. For example pointers into stacks will return false. @@ -286,8 +288,7 @@ greyobject(byte *obj, Markbits *mbits, Workbuf *wbuf) // but the object it shares the byte with is already marked, // then all the possible concurrent updates are trying to set the same bit, // so we can use a non-atomic update. - if((mbits->xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || - work.nproc == 1) + if((mbits->xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1) *mbits->bitp = mbits->xbits | (bitMarked<<mbits->shift); else runtime·atomicor8(mbits->bitp, bitMarked<<mbits->shift); @@ -424,7 +425,7 @@ scanblock(byte *b, uintptr n, byte *ptrmask) } // If another proc wants a pointer, give it some. - if(work.nwait > 0 && wbuf->nobj > 4 && work.full == 0) { + if(runtime·work.nwait > 0 && wbuf->nobj > 4 && runtime·work.full == 0) { wbuf = handoff(wbuf); } @@ -461,18 +462,18 @@ markroot(ParFor *desc, uint32 i) break; case RootFinalizers: - for(fb=allfin; fb; fb=fb->alllink) + for(fb=runtime·allfin; fb; fb=fb->alllink) scanblock((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), finptrmask); break; case RootSpans: // mark MSpan.specials sg = runtime·mheap.sweepgen; - for(spanidx=0; spanidx<work.nspan; spanidx++) { + for(spanidx=0; spanidx<runtime·work.nspan; spanidx++) { Special *sp; SpecialFinalizer *spf; - s = work.spans[spanidx]; + s = runtime·work.spans[spanidx]; if(s->state != MSpanInUse) continue; if(s->sweepgen != sg) { @@ -507,7 +508,7 @@ markroot(ParFor *desc, uint32 i) // needed only to output in traceback status = runtime·readgstatus(gp); // We are not in a scan state if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0) - gp->waitsince = work.tstart; + gp->waitsince = runtime·work.tstart; // Shrink a stack if not much of it is being used. runtime·shrinkstack(gp); if(runtime·readgstatus(gp) == Gdead) @@ -554,7 +555,7 @@ getempty(Workbuf *b) MCache *c; if(b != nil) - runtime·lfstackpush(&work.full, &b->node); + runtime·lfstackpush(&runtime·work.full, &b->node); b = nil; c = g->m->mcache; if(c->gcworkbuf != nil) { @@ -562,7 +563,7 @@ getempty(Workbuf *b) c->gcworkbuf = nil; } if(b == nil) - b = (Workbuf*)runtime·lfstackpop(&work.empty); + b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty); if(b == nil) { b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys); b->nobj = 0; @@ -585,7 +586,7 @@ putempty(Workbuf *b) c->gcworkbuf = b; return; } - runtime·lfstackpush(&work.empty, &b->node); + runtime·lfstackpush(&runtime·work.empty, &b->node); } // Get an partially empty work buffer from the mcache structure @@ -619,7 +620,7 @@ putpartial(Workbuf *b) runtime·throw("putpartial: c->gcworkbuf is not nil\n"); - runtime·lfstackpush(&work.full, &b->node); + runtime·lfstackpush(&runtime·work.full, &b->node); } void @@ -650,22 +651,22 @@ getfull(Workbuf *b) if(b != nil) { if(b->nobj != 0) runtime·printf("runtime:getfull: b->nobj=%D not 0.", b->nobj); - runtime·lfstackpush(&work.empty, &b->node); + runtime·lfstackpush(&runtime·work.empty, &b->node); } - b = (Workbuf*)runtime·lfstackpop(&work.full); - if(b != nil || work.nproc == 1) + b = (Workbuf*)runtime·lfstackpop(&runtime·work.full); + if(b != nil || runtime·work.nproc == 1) return b; - runtime·xadd(&work.nwait, +1); + runtime·xadd(&runtime·work.nwait, +1); for(i=0;; i++) { - if(work.full != 0) { - runtime·xadd(&work.nwait, -1); - b = (Workbuf*)runtime·lfstackpop(&work.full); + if(runtime·work.full != 0) { + runtime·xadd(&runtime·work.nwait, -1); + b = (Workbuf*)runtime·lfstackpop(&runtime·work.full); if(b != nil) return b; - runtime·xadd(&work.nwait, +1); + runtime·xadd(&runtime·work.nwait, +1); } - if(work.nwait == work.nproc) + if(runtime·work.nwait == runtime·work.nproc) return nil; if(i < 10) { g->m->gcstats.nprocyield++; @@ -696,7 +697,7 @@ handoff(Workbuf *b) g->m->gcstats.nhandoffcnt += n; // Put b on full list - let first half of b get stolen. - runtime·lfstackpush(&work.full, &b->node); + runtime·lfstackpush(&runtime·work.full, &b->node); return b1; } @@ -856,6 +857,7 @@ runtime·gcphasework(G *gp) gp->gcworkdone = true; } +#pragma dataflag NOPTR static byte finalizer1[] = { // Each Finalizer is 5 words, ptr ptr uintptr ptr ptr. // Each byte describes 4 words. @@ -890,8 +892,8 @@ runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType if(runtime·finc == nil) { runtime·finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys); runtime·finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1; - runtime·finc->alllink = allfin; - allfin = runtime·finc; + runtime·finc->alllink = runtime·allfin; + runtime·allfin = runtime·finc; if(finptrmask[0] == 0) { // Build pointer mask for Finalizer array in block. // Check assumptions made in finalizer1 array above. @@ -931,7 +933,7 @@ runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType* Finalizer *f; uintptr i; - for(fb = allfin; fb; fb = fb->alllink) { + for(fb = runtime·allfin; fb; fb = fb->alllink) { for(i = 0; i < fb->cnt; i++) { f = &fb->fin[i]; callback(f->fn, f->arg, f->nret, f->fint, f->ot); @@ -1155,8 +1157,8 @@ runtime·MSpan_Sweep(MSpan *s, bool preserve) // State of background runtime·sweep. // Protected by runtime·gclock. -// Must match mgc0.go. -struct +typedef struct SweepData SweepData; +struct SweepData { G* g; bool parked; @@ -1165,7 +1167,8 @@ struct uint32 nbgsweep; uint32 npausesweep; -} runtime·sweep; +}; +SweepData runtime·sweep; // sweeps one span // returns number of pages returned to heap, or -1 if there is nothing to sweep @@ -1182,12 +1185,12 @@ runtime·sweepone(void) sg = runtime·mheap.sweepgen; for(;;) { idx = runtime·xadd(&runtime·sweep.spanidx, 1) - 1; - if(idx >= work.nspan) { + if(idx >= runtime·work.nspan) { runtime·mheap.sweepdone = true; g->m->locks--; return -1; } - s = work.spans[idx]; + s = runtime·work.spans[idx]; if(s->state != MSpanInUse) { s->sweepgen = sg; continue; @@ -1236,12 +1239,12 @@ runtime·gchelper(void) gchelperstart(); // parallel mark for over gc roots - runtime·parfordo(work.markfor); + runtime·parfordo(runtime·work.markfor); if(runtime·gcphase != GCscan) scanblock(nil, 0, nil); // blocks in getfull - nproc = work.nproc; // work.nproc can change right after we increment work.ndone - if(runtime·xadd(&work.ndone, +1) == nproc-1) - runtime·notewakeup(&work.alldone); + nproc = runtime·work.nproc; // work.nproc can change right after we increment work.ndone + if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1) + runtime·notewakeup(&runtime·work.alldone); g->m->traceback = 0; } @@ -1361,6 +1364,7 @@ runtime·updatememstats(GCStats *stats) mstats.by_size[i].nmalloc += runtime·mheap.nsmallfree[i]; smallfree += runtime·mheap.nsmallfree[i] * runtime·class_to_size[i]; } + mstats.nfree += mstats.tinyallocs; mstats.nmalloc += mstats.nfree; // Calculate derived stats. @@ -1378,7 +1382,6 @@ struct gc_args }; static void gc(struct gc_args *args); -static void mgc(G *gp); int32 runtime·readgogc(void) @@ -1399,7 +1402,7 @@ runtime·gcinit(void) if(sizeof(Workbuf) != WorkbufSize) runtime·throw("runtime: size of Workbuf is suboptimal"); - work.markfor = runtime·parforalloc(MaxGcproc); + runtime·work.markfor = runtime·parforalloc(MaxGcproc); runtime·gcpercent = runtime·readgogc(); runtime·gcdatamask = unrollglobgcprog(runtime·gcdata, runtime·edata - runtime·data); runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss); @@ -1436,7 +1439,7 @@ gc(struct gc_args *args) g->m->traceback = 2; t0 = args->start_time; - work.tstart = args->start_time; + runtime·work.tstart = args->start_time; t1 = 0; if(runtime·debug.gctrace) @@ -1456,24 +1459,24 @@ gc(struct gc_args *args) // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap. runtime·lock(&runtime·mheap.lock); // Free the old cached sweep array if necessary. - if(work.spans != nil && work.spans != runtime·mheap.allspans) - runtime·SysFree(work.spans, work.nspan*sizeof(work.spans[0]), &mstats.other_sys); + if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans) + runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys); // Cache the current array for marking. runtime·mheap.gcspans = runtime·mheap.allspans; - work.spans = runtime·mheap.allspans; - work.nspan = runtime·mheap.nspan; + runtime·work.spans = runtime·mheap.allspans; + runtime·work.nspan = runtime·mheap.nspan; runtime·unlock(&runtime·mheap.lock); oldphase = runtime·gcphase; - work.nwait = 0; - work.ndone = 0; - work.nproc = runtime·gcprocs(); + runtime·work.nwait = 0; + runtime·work.ndone = 0; + runtime·work.nproc = runtime·gcprocs(); runtime·gcphase = GCmark; //^^ vv - runtime·parforsetup(work.markfor, work.nproc, RootCount + runtime·allglen, nil, false, markroot); - if(work.nproc > 1) { - runtime·noteclear(&work.alldone); - runtime·helpgc(work.nproc); + runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot); + if(runtime·work.nproc > 1) { + runtime·noteclear(&runtime·work.alldone); + runtime·helpgc(runtime·work.nproc); } t2 = 0; @@ -1481,7 +1484,7 @@ gc(struct gc_args *args) t2 = runtime·nanotime(); gchelperstart(); - runtime·parfordo(work.markfor); + runtime·parfordo(runtime·work.markfor); scanblock(nil, 0, nil); runtime·gcphase = oldphase; //^^ vv @@ -1489,8 +1492,8 @@ gc(struct gc_args *args) if(runtime·debug.gctrace) t3 = runtime·nanotime(); - if(work.nproc > 1) - runtime·notesleep(&work.alldone); + if(runtime·work.nproc > 1) + runtime·notesleep(&runtime·work.alldone); cachestats(); // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap @@ -1517,19 +1520,21 @@ gc(struct gc_args *args) } obj = mstats.nmalloc - mstats.nfree; - stats.nprocyield += work.markfor->nprocyield; - stats.nosyield += work.markfor->nosyield; - stats.nsleep += work.markfor->nsleep; + stats.nprocyield += runtime·work.markfor->nprocyield; + stats.nosyield += runtime·work.markfor->nosyield; + stats.nsleep += runtime·work.markfor->nsleep; runtime·printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects," + " %d goroutines," " %d/%d/%d sweeps," " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n", - mstats.numgc, work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000, + mstats.numgc, runtime·work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000, heap0>>20, heap1>>20, obj, mstats.nmalloc, mstats.nfree, - work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep, + runtime·gcount(), + runtime·work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep, stats.nhandoff, stats.nhandoffcnt, - work.markfor->nsteal, work.markfor->nstealcnt, + runtime·work.markfor->nsteal, runtime·work.markfor->nstealcnt, stats.nprocyield, stats.nosyield, stats.nsleep); runtime·sweep.nbgsweep = runtime·sweep.npausesweep = 0; } @@ -1538,14 +1543,14 @@ gc(struct gc_args *args) // Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap. runtime·lock(&runtime·mheap.lock); // Free the old cached mark array if necessary. - if(work.spans != nil && work.spans != runtime·mheap.allspans) - runtime·SysFree(work.spans, work.nspan*sizeof(work.spans[0]), &mstats.other_sys); + if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans) + runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys); // Cache the current array for sweeping. runtime·mheap.gcspans = runtime·mheap.allspans; runtime·mheap.sweepgen += 2; runtime·mheap.sweepdone = false; - work.spans = runtime·mheap.allspans; - work.nspan = runtime·mheap.nspan; + runtime·work.spans = runtime·mheap.allspans; + runtime·work.nspan = runtime·mheap.nspan; runtime·sweep.spanidx = 0; runtime·unlock(&runtime·mheap.lock); @@ -1573,32 +1578,14 @@ extern uintptr runtime·sizeof_C_MStats; static void readmemstats_m(void); -#pragma textflag NOSPLIT void -runtime·ReadMemStats(MStats *stats) -{ - void (*fn)(void); - - g->m->ptrarg[0] = stats; - fn = readmemstats_m; - runtime·onM(&fn); -} - -static void -readmemstats_m(void) +runtime·readmemstats_m(void) { MStats *stats; stats = g->m->ptrarg[0]; g->m->ptrarg[0] = nil; - // Have to acquire worldsema to stop the world, - // because stoptheworld can only be used by - // one goroutine at a time, and there might be - // a pending garbage collection already calling it. - runtime·semacquire(&runtime·worldsema, false); - g->m->gcing = 1; - runtime·stoptheworld(); runtime·updatememstats(nil); // Size of the trailing by_size array differs between Go and C, // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility. @@ -1608,12 +1595,6 @@ readmemstats_m(void) stats->stacks_sys = stats->stacks_inuse; stats->heap_inuse -= stats->stacks_inuse; stats->heap_sys -= stats->stacks_inuse; - - g->m->gcing = 0; - g->m->locks++; - runtime·semrelease(&runtime·worldsema); - runtime·starttheworld(); - g->m->locks--; } static void readgcstats_m(void); diff --git a/src/runtime/mheap.c b/src/runtime/mheap.c index 902a5c71a..bb203d5ce 100644 --- a/src/runtime/mheap.c +++ b/src/runtime/mheap.c @@ -184,6 +184,8 @@ mheap_alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large) // transfer stats from cache to global mstats.heap_alloc += g->m->mcache->local_cachealloc; g->m->mcache->local_cachealloc = 0; + mstats.tinyallocs += g->m->mcache->local_tinyallocs; + g->m->mcache->local_tinyallocs = 0; s = MHeap_AllocSpanLocked(h, npage); if(s != nil) { @@ -465,6 +467,8 @@ mheap_free(MHeap *h, MSpan *s, int32 acct) runtime·lock(&h->lock); mstats.heap_alloc += g->m->mcache->local_cachealloc; g->m->mcache->local_cachealloc = 0; + mstats.tinyallocs += g->m->mcache->local_tinyallocs; + g->m->mcache->local_tinyallocs = 0; if(acct) { mstats.heap_alloc -= s->npages<<PageShift; mstats.heap_objects--; diff --git a/src/runtime/os_windows.c b/src/runtime/os_windows.c index 6c8f137ee..77f99062c 100644 --- a/src/runtime/os_windows.c +++ b/src/runtime/os_windows.c @@ -72,6 +72,7 @@ extern void *runtime·WaitForSingleObject; extern void *runtime·WriteFile; extern void *runtime·timeBeginPeriod; +#pragma dataflag NOPTR void *runtime·GetQueuedCompletionStatusEx; extern uintptr runtime·externalthreadhandlerp; @@ -147,7 +148,7 @@ runtime·get_random_data(byte **rnd, int32 *rnd_len) void runtime·goenvs(void) { - extern Slice syscall·envs; + extern Slice runtime·envs; uint16 *env; String *s; @@ -160,8 +161,8 @@ runtime·goenvs(void) for(p=env; *p; n++) p += runtime·findnullw(p)+1; - syscall·envs = runtime·makeStringSlice(n); - s = (String*)syscall·envs.array; + runtime·envs = runtime·makeStringSlice(n); + s = (String*)runtime·envs.array; p = env; for(i=0; i<n; i++) { @@ -278,6 +279,8 @@ runtime·minit(void) void runtime·unminit(void) { + runtime·stdcall1(runtime·CloseHandle, (uintptr)g->m->thread); + g->m->thread = nil; } // Described in http://www.dcl.hpi.uni-potsdam.de/research/WRK/2007/08/getting-os-information-the-kuser_shared_data-structure/ @@ -287,7 +290,9 @@ typedef struct KSYSTEM_TIME { int32 High2Time; } KSYSTEM_TIME; +#pragma dataflag NOPTR const KSYSTEM_TIME* INTERRUPT_TIME = (KSYSTEM_TIME*)0x7ffe0008; +#pragma dataflag NOPTR const KSYSTEM_TIME* SYSTEM_TIME = (KSYSTEM_TIME*)0x7ffe0014; static void badsystime(void); @@ -498,6 +503,7 @@ runtime·ctrlhandler1(uint32 type) extern void runtime·dosigprof(Context *r, G *gp, M *mp); extern void runtime·profileloop(void); +#pragma dataflag NOPTR static void *profiletimer; static void diff --git a/src/runtime/os_windows_386.c b/src/runtime/os_windows_386.c index 15a5ea5d1..e2ae8db27 100644 --- a/src/runtime/os_windows_386.c +++ b/src/runtime/os_windows_386.c @@ -24,8 +24,6 @@ runtime·dumpregs(Context *r) runtime·printf("gs %x\n", r->SegGs); } -#define DBG_PRINTEXCEPTION_C 0x40010006 - // Called by sigtramp from Windows VEH handler. // Return value signals whether the exception has been handled (-1) // or should be made available to other handlers in the chain (0). @@ -36,37 +34,11 @@ runtime·sighandler(ExceptionRecord *info, Context *r, G *gp) uintptr *sp; extern byte runtime·text[], runtime·etext[]; - if(info->ExceptionCode == DBG_PRINTEXCEPTION_C) { - // This exception is intended to be caught by debuggers. - // There is a not-very-informational message like - // "Invalid parameter passed to C runtime function" - // sitting at info->ExceptionInformation[0] (a wchar_t*), - // with length info->ExceptionInformation[1]. - // The default behavior is to ignore this exception, - // but somehow returning 0 here (meaning keep going) - // makes the program crash instead. Maybe Windows has no - // other handler registered? In any event, ignore it. - return -1; - } - // Only handle exception if executing instructions in Go binary // (not Windows library code). if(r->Eip < (uint32)runtime·text || (uint32)runtime·etext < r->Eip) return 0; - switch(info->ExceptionCode) { - case EXCEPTION_BREAKPOINT: - // It is unclear whether this is needed, unclear whether it - // would work, and unclear how to test it. Leave out for now. - // This only handles breakpoint instructions written in the - // assembly sources, not breakpoints set by a debugger, and - // there are very few of the former. - // - // r->Eip--; // because 8l generates 2 bytes for INT3 - // return 0; - break; - } - if(gp != nil && runtime·issigpanic(info->ExceptionCode)) { // Make it look like a call to the signal func. // Have to pass arguments out of band since diff --git a/src/runtime/os_windows_amd64.c b/src/runtime/os_windows_amd64.c index 9a69d73c0..261880d45 100644 --- a/src/runtime/os_windows_amd64.c +++ b/src/runtime/os_windows_amd64.c @@ -32,8 +32,6 @@ runtime·dumpregs(Context *r) runtime·printf("gs %X\n", (uint64)r->SegGs); } -#define DBG_PRINTEXCEPTION_C 0x40010006 - // Called by sigtramp from Windows VEH handler. // Return value signals whether the exception has been handled (-1) // or should be made available to other handlers in the chain (0). @@ -44,34 +42,11 @@ runtime·sighandler(ExceptionRecord *info, Context *r, G *gp) uintptr *sp; extern byte runtime·text[], runtime·etext[]; - if(info->ExceptionCode == DBG_PRINTEXCEPTION_C) { - // This exception is intended to be caught by debuggers. - // There is a not-very-informational message like - // "Invalid parameter passed to C runtime function" - // sitting at info->ExceptionInformation[0] (a wchar_t*), - // with length info->ExceptionInformation[1]. - // The default behavior is to ignore this exception, - // but somehow returning 0 here (meaning keep going) - // makes the program crash instead. Maybe Windows has no - // other handler registered? In any event, ignore it. - return -1; - } - // Only handle exception if executing instructions in Go binary // (not Windows library code). if(r->Rip < (uint64)runtime·text || (uint64)runtime·etext < r->Rip) return 0; - switch(info->ExceptionCode) { - case EXCEPTION_BREAKPOINT: - // It is unclear whether this is needed, unclear whether it - // would work, and unclear how to test it. Leave out for now. - // This only handles breakpoint instructions written in the - // assembly sources, not breakpoints set by a debugger, and - // there are very few of the former. - break; - } - if(gp != nil && runtime·issigpanic(info->ExceptionCode)) { // Make it look like a call to the signal func. // Have to pass arguments out of band since diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 12c85e7ca..7eb2d6055 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -238,7 +238,8 @@ func deferreturn(arg0 uintptr) { } // Goexit terminates the goroutine that calls it. No other goroutine is affected. -// Goexit runs all deferred calls before terminating the goroutine. +// Goexit runs all deferred calls before terminating the goroutine. Because Goexit +// is not panic, however, any recover calls in those deferred functions will return nil. // // Calling Goexit from the main goroutine terminates that goroutine // without func main returning. Since func main has not returned, @@ -246,11 +247,27 @@ func deferreturn(arg0 uintptr) { // If all other goroutines exit, the program crashes. func Goexit() { // Run all deferred functions for the current goroutine. + // This code is similar to gopanic, see that implementation + // for detailed comments. gp := getg() - for gp._defer != nil { + for { d := gp._defer + if d == nil { + break + } + if d.started { + if d._panic != nil { + d._panic.aborted = true + } + gp._defer = d.link + freedefer(d) + continue + } d.started = true reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + if gp._defer != d { + gothrow("bad defer entry in Goexit") + } gp._defer = d.link freedefer(d) // Note: we ignore recovers here because Goexit isn't a panic @@ -280,6 +297,35 @@ func gopanic(e interface{}) { if gp.m.curg != gp { gothrow("panic on m stack") } + + // m.softfloat is set during software floating point. + // It increments m.locks to avoid preemption. + // We moved the memory loads out, so there shouldn't be + // any reason for it to panic anymore. + if gp.m.softfloat != 0 { + gp.m.locks-- + gp.m.softfloat = 0 + gothrow("panic during softfloat") + } + if gp.m.mallocing != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic during malloc") + } + if gp.m.gcing != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic during gc") + } + if gp.m.locks != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic holding locks") + } + var p _panic p.arg = e p.link = gp._panic @@ -430,33 +476,3 @@ func gothrow(s string) { dopanic(0) *(*int)(nil) = 0 // not reached } - -func panicstring(s *int8) { - // m.softfloat is set during software floating point, - // which might cause a fault during a memory load. - // It increments m.locks to avoid preemption. - // If we're panicking, the software floating point frames - // will be unwound, so decrement m.locks as they would. - gp := getg() - if gp.m.softfloat != 0 { - gp.m.locks-- - gp.m.softfloat = 0 - } - - if gp.m.mallocing != 0 { - print("panic: ", s, "\n") - gothrow("panic during malloc") - } - if gp.m.gcing != 0 { - print("panic: ", s, "\n") - gothrow("panic during gc") - } - if gp.m.locks != 0 { - print("panic: ", s, "\n") - gothrow("panic holding locks") - } - - var err interface{} - newErrorCString(unsafe.Pointer(s), &err) - gopanic(err) -} diff --git a/src/runtime/proc.c b/src/runtime/proc.c index 1f1044d1d..ea50ff43a 100644 --- a/src/runtime/proc.c +++ b/src/runtime/proc.c @@ -24,42 +24,6 @@ // // Design doc at http://golang.org/s/go11sched. -typedef struct Sched Sched; -struct Sched { - Mutex lock; - - uint64 goidgen; - - M* midle; // idle m's waiting for work - int32 nmidle; // number of idle m's waiting for work - int32 nmidlelocked; // number of locked m's waiting for work - int32 mcount; // number of m's that have been created - int32 maxmcount; // maximum number of m's allowed (or die) - - P* pidle; // idle P's - uint32 npidle; - uint32 nmspinning; - - // Global runnable queue. - G* runqhead; - G* runqtail; - int32 runqsize; - - // Global cache of dead G's. - Mutex gflock; - G* gfree; - int32 ngfree; - - uint32 gcwaiting; // gc is waiting to run - int32 stopwait; - Note stopnote; - uint32 sysmonwait; - Note sysmonnote; - uint64 lastpoll; - - int32 profilehz; // cpu profiling rate -}; - enum { // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once. @@ -67,7 +31,7 @@ enum GoidCacheBatch = 16, }; -Sched runtime·sched; +SchedT runtime·sched; int32 runtime·gomaxprocs; uint32 runtime·needextram; bool runtime·iscgo; @@ -79,7 +43,7 @@ M* runtime·extram; P* runtime·allp[MaxGomaxprocs+1]; int8* runtime·goos; int32 runtime·ncpu; -static int32 newprocs; +int32 runtime·newprocs; Mutex runtime·allglock; // the following vars are protected by this lock or by stoptheworld G** runtime·allg; @@ -138,9 +102,9 @@ extern String runtime·buildVersion; #pragma cgo_export_static main // Filled in by dynamic linker when Cgo is available. -void* _cgo_init; -void* _cgo_malloc; -void* _cgo_free; +void (*_cgo_init)(void); +void (*_cgo_malloc)(void); +void (*_cgo_free)(void); // Copy for Go code. void* runtime·cgoMalloc; @@ -159,7 +123,6 @@ runtime·schedinit(void) { int32 n, procs; byte *p; - Eface i; // raceinit must be the first call to race detector. // In particular, it must be done before mallocinit below calls racemapshadow. @@ -168,17 +131,12 @@ runtime·schedinit(void) runtime·sched.maxmcount = 10000; + runtime·tracebackinit(); runtime·symtabinit(); runtime·stackinit(); runtime·mallocinit(); mcommoninit(g->m); - // Initialize the itable value for newErrorCString, - // so that the next time it gets called, possibly - // in a fault during a garbage collection, it will not - // need to allocated memory. - runtime·newErrorCString(0, &i); - runtime·goargs(); runtime·goenvs(); runtime·parsedebugvars(); @@ -764,9 +722,9 @@ runtime·starttheworld(void) injectglist(gp); add = needaddgcproc(); runtime·lock(&runtime·sched.lock); - if(newprocs) { - procresize(newprocs); - newprocs = 0; + if(runtime·newprocs) { + procresize(runtime·newprocs); + runtime·newprocs = 0; } else procresize(runtime·gomaxprocs); runtime·sched.gcwaiting = 0; @@ -896,24 +854,19 @@ struct CgoThreadStart void (*fn)(void); }; +M *runtime·newM(void); // in proc.go + // Allocate a new m unassociated with any thread. // Can use p for allocation context if needed. M* runtime·allocm(P *p) { M *mp; - static Type *mtype; // The Go type M g->m->locks++; // disable GC because it can be called from sysmon if(g->m->p == nil) acquirep(p); // temporarily borrow p for mallocs in this function - if(mtype == nil) { - Eface e; - runtime·gc_m_ptr(&e); - mtype = ((PtrType*)e.type)->elem; - } - - mp = runtime·cnew(mtype); + mp = runtime·newM(); mcommoninit(mp); // In case of cgo or Solaris, pthread_create will make us a stack. @@ -933,19 +886,12 @@ runtime·allocm(P *p) return mp; } +G *runtime·newG(void); // in proc.go + static G* allocg(void) { - G *gp; - static Type *gtype; - - if(gtype == nil) { - Eface e; - runtime·gc_g_ptr(&e); - gtype = ((PtrType*)e.type)->elem; - } - gp = runtime·cnew(gtype); - return gp; + return runtime·newG(); } static M* lockextra(bool nilokay); @@ -1744,9 +1690,9 @@ goexit0(G *gp) #pragma textflag NOSPLIT static void -save(void *pc, uintptr sp) +save(uintptr pc, uintptr sp) { - g->sched.pc = (uintptr)pc; + g->sched.pc = pc; g->sched.sp = sp; g->sched.lr = 0; g->sched.ret = 0; @@ -1774,9 +1720,15 @@ static void entersyscall_gcwait(void); // In practice, this means that we make the fast path run through // entersyscall doing no-split things, and the slow path has to use onM // to run bigger things on the m stack. +// +// reentersyscall is the entry point used by cgo callbacks, where explicitly +// saved SP and PC are restored. This is needed when exitsyscall will be called +// from a function further up in the call stack than the parent, as g->syscallsp +// must always point to a valid stack frame. entersyscall below is the normal +// entry point for syscalls, which obtains the SP and PC from the caller. #pragma textflag NOSPLIT void -·entersyscall(int32 dummy) +runtime·reentersyscall(uintptr pc, uintptr sp) { void (*fn)(void); @@ -1792,9 +1744,9 @@ void g->throwsplit = 1; // Leave SP around for GC and traceback. - save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); - g->syscallsp = g->sched.sp; - g->syscallpc = g->sched.pc; + save(pc, sp); + g->syscallsp = sp; + g->syscallpc = pc; runtime·casgstatus(g, Grunning, Gsyscall); if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) { fn = entersyscall_bad; @@ -1804,7 +1756,7 @@ void if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomic fn = entersyscall_sysmon; runtime·onM(&fn); - save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); + save(pc, sp); } g->m->mcache = nil; @@ -1813,7 +1765,7 @@ void if(runtime·sched.gcwaiting) { fn = entersyscall_gcwait; runtime·onM(&fn); - save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); + save(pc, sp); } // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). @@ -1823,6 +1775,14 @@ void g->m->locks--; } +// Standard syscall entry used by the go syscall library and normal cgo calls. +#pragma textflag NOSPLIT +void +·entersyscall(int32 dummy) +{ + runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); +} + static void entersyscall_bad(void) { @@ -1870,7 +1830,7 @@ void g->stackguard0 = StackPreempt; // see comment in entersyscall // Leave SP around for GC and traceback. - save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); + save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); g->syscallsp = g->sched.sp; g->syscallpc = g->sched.pc; runtime·casgstatus(g, Grunning, Gsyscall); @@ -1883,7 +1843,7 @@ void runtime·onM(&fn); // Resave for traceback during blocked call. - save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); + save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); g->m->locks--; } @@ -1900,12 +1860,15 @@ entersyscallblock_handoff(void) // from the low-level system calls used by the runtime. #pragma textflag NOSPLIT void -runtime·exitsyscall(void) +·exitsyscall(int32 dummy) { void (*fn)(G*); g->m->locks++; // see comment in entersyscall + if(runtime·getcallersp(&dummy) > g->syscallsp) + runtime·throw("exitsyscall: syscall frame is no longer valid"); + g->waitsince = 0; if(exitsyscallfast()) { // There's a cpu for us, so we can run. @@ -2199,11 +2162,11 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp siz = narg + nret; siz = (siz+7) & ~7; - // We could instead create a secondary stack frame - // and make it look like goexit was on the original but - // the call to the actual goroutine function was split. + // We could allocate a larger initial stack if necessary. // Not worth it: this is almost always an error. - if(siz > StackMin - 1024) + // 4*sizeof(uintreg): extra space added below + // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). + if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg)) runtime·throw("runtime.newproc: function arguments too large for new goroutine"); p = g->m->p; @@ -2365,39 +2328,6 @@ runtime·Breakpoint(void) runtime·breakpoint(); } -// Implementation of runtime.GOMAXPROCS. -// delete when scheduler is even stronger -void -runtime·gomaxprocs_m(void) -{ - int32 n, ret; - - n = g->m->scalararg[0]; - g->m->scalararg[0] = 0; - - if(n > MaxGomaxprocs) - n = MaxGomaxprocs; - runtime·lock(&runtime·sched.lock); - ret = runtime·gomaxprocs; - if(n <= 0 || n == ret) { - runtime·unlock(&runtime·sched.lock); - g->m->scalararg[0] = ret; - return; - } - runtime·unlock(&runtime·sched.lock); - - runtime·semacquire(&runtime·worldsema, false); - g->m->gcing = 1; - runtime·stoptheworld(); - newprocs = n; - g->m->gcing = 0; - runtime·semrelease(&runtime·worldsema); - runtime·starttheworld(); - - g->m->scalararg[0] = ret; - return; -} - // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below // after they modify m->locked. Do not allow preemption during this call, // or else the m might be different in this function than in the caller. diff --git a/src/runtime/proc.go b/src/runtime/proc.go index eefe8239f..b8ea62b05 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -215,6 +215,14 @@ func newP() *p { return new(p) } +func newM() *m { + return new(m) +} + +func newG() *g { + return new(g) +} + func allgadd(gp *g) { if readgstatus(gp) == _Gidle { gothrow("allgadd: bad status Gidle") diff --git a/src/runtime/rt0_linux_386.s b/src/runtime/rt0_linux_386.s index 74ddc94da..352e594d5 100644 --- a/src/runtime/rt0_linux_386.s +++ b/src/runtime/rt0_linux_386.s @@ -21,5 +21,5 @@ TEXT _fallback_vdso(SB),NOSPLIT,$0 RET DATA runtime·_vdso(SB)/4, $_fallback_vdso(SB) -GLOBL runtime·_vdso(SB), $4 +GLOBL runtime·_vdso(SB), NOPTR, $4 diff --git a/src/runtime/rt0_linux_arm.s b/src/runtime/rt0_linux_arm.s index 8af3d3505..5f521d24b 100644 --- a/src/runtime/rt0_linux_arm.s +++ b/src/runtime/rt0_linux_arm.s @@ -77,7 +77,7 @@ DATA bad_abi_msg+0x18(SB)/8, $" run on " DATA bad_abi_msg+0x20(SB)/8, $"EABI ker" DATA bad_abi_msg+0x28(SB)/4, $"nels" DATA bad_abi_msg+0x2c(SB)/1, $0xa -GLOBL bad_abi_msg(SB), $45 +GLOBL bad_abi_msg(SB), RODATA, $45 TEXT oabi_syscall<>(SB),NOSPLIT,$-4 ADD $1, PC, R4 diff --git a/src/runtime/rt0_plan9_386.s b/src/runtime/rt0_plan9_386.s index 7e2887b85..c451299ee 100644 --- a/src/runtime/rt0_plan9_386.s +++ b/src/runtime/rt0_plan9_386.s @@ -17,7 +17,7 @@ TEXT _rt0_386_plan9(SB),NOSPLIT,$12 CALL runtime·rt0_go(SB) DATA runtime·isplan9(SB)/4, $1 -GLOBL runtime·isplan9(SB), $4 -GLOBL _tos(SB), $4 -GLOBL _privates(SB), $4 -GLOBL _nprivates(SB), $4 +GLOBL runtime·isplan9(SB), NOPTR, $4 +GLOBL _tos(SB), NOPTR, $4 +GLOBL _privates(SB), NOPTR, $4 +GLOBL _nprivates(SB), NOPTR, $4 diff --git a/src/runtime/rt0_plan9_amd64.s b/src/runtime/rt0_plan9_amd64.s index a372a0ba8..ec2d9ec82 100644 --- a/src/runtime/rt0_plan9_amd64.s +++ b/src/runtime/rt0_plan9_amd64.s @@ -15,7 +15,7 @@ TEXT _rt0_amd64_plan9(SB),NOSPLIT,$24 JMP AX DATA runtime·isplan9(SB)/4, $1 -GLOBL runtime·isplan9(SB), $4 -GLOBL _tos(SB), $8 -GLOBL _privates(SB), $8 -GLOBL _nprivates(SB), $4 +GLOBL runtime·isplan9(SB), NOPTR, $4 +GLOBL _tos(SB), NOPTR, $8 +GLOBL _privates(SB), NOPTR, $8 +GLOBL _nprivates(SB), NOPTR, $4 diff --git a/src/runtime/rt0_solaris_amd64.s b/src/runtime/rt0_solaris_amd64.s index 92a9fc295..5997cbf8e 100644 --- a/src/runtime/rt0_solaris_amd64.s +++ b/src/runtime/rt0_solaris_amd64.s @@ -15,4 +15,4 @@ TEXT main(SB),NOSPLIT,$-8 JMP AX DATA runtime·issolaris(SB)/4, $1 -GLOBL runtime·issolaris(SB), $4 +GLOBL runtime·issolaris(SB), NOPTR, $4 diff --git a/src/runtime/rt0_windows_386.s b/src/runtime/rt0_windows_386.s index 00604372f..3c2deda90 100644 --- a/src/runtime/rt0_windows_386.s +++ b/src/runtime/rt0_windows_386.s @@ -17,4 +17,4 @@ TEXT main(SB),NOSPLIT,$0 DATA runtime·iswindows(SB)/4, $1 -GLOBL runtime·iswindows(SB), $4 +GLOBL runtime·iswindows(SB), NOPTR, $4 diff --git a/src/runtime/rt0_windows_amd64.s b/src/runtime/rt0_windows_amd64.s index 890a570d1..197f52e11 100644 --- a/src/runtime/rt0_windows_amd64.s +++ b/src/runtime/rt0_windows_amd64.s @@ -16,4 +16,4 @@ TEXT main(SB),NOSPLIT,$-8 JMP AX DATA runtime·iswindows(SB)/4, $1 -GLOBL runtime·iswindows(SB), $4 +GLOBL runtime·iswindows(SB), NOPTR, $4 diff --git a/src/runtime/runtime.c b/src/runtime/runtime.c index ae754dc5c..b3503fb90 100644 --- a/src/runtime/runtime.c +++ b/src/runtime/runtime.c @@ -62,10 +62,12 @@ runtime·mchr(byte *p, byte c, byte *ep) } static int32 argc; + +#pragma dataflag NOPTR /* argv not a heap pointer */ static uint8** argv; -Slice os·Args; -Slice syscall·envs; +extern Slice runtime·argslice; +extern Slice runtime·envs; void (*runtime·sysargs)(int32, uint8**); @@ -97,8 +99,8 @@ runtime·goargs(void) if(Windows) return; - os·Args = runtime·makeStringSlice(argc); - s = (String*)os·Args.array; + runtime·argslice = runtime·makeStringSlice(argc); + s = (String*)runtime·argslice.array; for(i=0; i<argc; i++) s[i] = runtime·gostringnocopy(argv[i]); } @@ -112,8 +114,8 @@ runtime·goenvs_unix(void) for(n=0; argv[argc+1+n] != 0; n++) ; - syscall·envs = runtime·makeStringSlice(n); - s = (String*)syscall·envs.array; + runtime·envs = runtime·makeStringSlice(n); + s = (String*)runtime·envs.array; for(i=0; i<n; i++) s[i] = runtime·gostringnocopy(argv[argc+1+i]); } @@ -122,7 +124,7 @@ runtime·goenvs_unix(void) Slice runtime·environ() { - return syscall·envs; + return runtime·envs; } int32 @@ -267,10 +269,15 @@ runtime·check(void) #pragma dataflag NOPTR DebugVars runtime·debug; -static struct { +typedef struct DbgVar DbgVar; +struct DbgVar +{ int8* name; int32* value; -} dbgvar[] = { +}; + +#pragma dataflag NOPTR /* dbgvar has no heap pointers */ +static DbgVar dbgvar[] = { {"allocfreetrace", &runtime·debug.allocfreetrace}, {"efence", &runtime·debug.efence}, {"gctrace", &runtime·debug.gctrace}, @@ -287,18 +294,18 @@ runtime·parsedebugvars(void) intgo i, n; p = runtime·getenv("GODEBUG"); - if(p == nil) - return; - for(;;) { - for(i=0; i<nelem(dbgvar); i++) { - n = runtime·findnull((byte*)dbgvar[i].name); - if(runtime·mcmp(p, (byte*)dbgvar[i].name, n) == 0 && p[n] == '=') - *dbgvar[i].value = runtime·atoi(p+n+1); + if(p != nil){ + for(;;) { + for(i=0; i<nelem(dbgvar); i++) { + n = runtime·findnull((byte*)dbgvar[i].name); + if(runtime·mcmp(p, (byte*)dbgvar[i].name, n) == 0 && p[n] == '=') + *dbgvar[i].value = runtime·atoi(p+n+1); + } + p = runtime·strstr(p, (byte*)","); + if(p == nil) + break; + p++; } - p = runtime·strstr(p, (byte*)","); - if(p == nil) - break; - p++; } p = runtime·getenv("GOTRACEBACK"); diff --git a/src/runtime/runtime.go b/src/runtime/runtime.go index dbaea45a6..4e4e1d17a 100644 --- a/src/runtime/runtime.go +++ b/src/runtime/runtime.go @@ -9,6 +9,8 @@ var ticks struct { val uint64 } +var tls0 [8]uintptr // available storage for m0's TLS; not necessarily used; opaque to GC + // Note: Called by runtime/pprof in addition to runtime code. func tickspersecond() int64 { r := int64(atomicload64(&ticks.val)) @@ -47,3 +49,12 @@ func parforalloc(nthrmax uint32) *parfor { nthrmax: nthrmax, } } + +var envs []string +var argslice []string + +// called from syscall +func runtime_envs() []string { return envs } + +// called from os +func runtime_args() []string { return argslice } diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index 74d7ba4f5..36748c3a1 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -60,6 +60,7 @@ typedef struct SudoG SudoG; typedef struct Mutex Mutex; typedef struct M M; typedef struct P P; +typedef struct SchedT SchedT; typedef struct Note Note; typedef struct Slice Slice; typedef struct String String; @@ -434,6 +435,42 @@ enum { MaxGomaxprocs = 1<<8, }; +struct SchedT +{ + Mutex lock; + + uint64 goidgen; + + M* midle; // idle m's waiting for work + int32 nmidle; // number of idle m's waiting for work + int32 nmidlelocked; // number of locked m's waiting for work + int32 mcount; // number of m's that have been created + int32 maxmcount; // maximum number of m's allowed (or die) + + P* pidle; // idle P's + uint32 npidle; + uint32 nmspinning; + + // Global runnable queue. + G* runqhead; + G* runqtail; + int32 runqsize; + + // Global cache of dead G's. + Mutex gflock; + G* gfree; + int32 ngfree; + + uint32 gcwaiting; // gc is waiting to run + int32 stopwait; + Note stopnote; + uint32 sysmonwait; + Note sysmonnote; + uint64 lastpoll; + + int32 profilehz; // cpu profiling rate +}; + // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. // External locks are not recursive; a second lock is silently ignored. @@ -729,6 +766,8 @@ extern DebugVars runtime·debug; extern uintptr runtime·maxstacksize; extern Note runtime·signote; extern ForceGCState runtime·forcegc; +extern SchedT runtime·sched; +extern int32 runtime·newprocs; /* * common functions and data @@ -778,7 +817,6 @@ void runtime·goenvs(void); void runtime·goenvs_unix(void); void* runtime·getu(void); void runtime·throw(int8*); -void runtime·panicstring(int8*); bool runtime·canpanic(G*); void runtime·prints(int8*); void runtime·printf(int8*, ...); @@ -816,6 +854,7 @@ void runtime·mpreinit(M*); void runtime·minit(void); void runtime·unminit(void); void runtime·signalstack(byte*, int32); +void runtime·tracebackinit(void); void runtime·symtabinit(void); Func* runtime·findfunc(uintptr); int32 runtime·funcline(Func*, uintptr, String*); @@ -876,6 +915,7 @@ void runtime·goexit(void); void runtime·asmcgocall(void (*fn)(void*), void*); int32 runtime·asmcgocall_errno(void (*fn)(void*), void*); void runtime·entersyscall(void); +void runtime·reentersyscall(uintptr, uintptr); void runtime·entersyscallblock(void); void runtime·exitsyscall(void); G* runtime·newproc1(FuncVal*, byte*, int32, int32, void*); @@ -1037,8 +1077,6 @@ void runtime·panicdivide(void); */ void runtime·printany(Eface); void runtime·newTypeAssertionError(String*, String*, String*, String*, Eface*); -void runtime·newErrorString(String, Eface*); -void runtime·newErrorCString(int8*, Eface*); void runtime·fadd64c(uint64, uint64, uint64*); void runtime·fsub64c(uint64, uint64, uint64*); void runtime·fmul64c(uint64, uint64, uint64*); diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go index cffc9f7d3..1688364a8 100644 --- a/src/runtime/runtime_test.go +++ b/src/runtime/runtime_test.go @@ -157,8 +157,8 @@ var faultAddrs = []uint64{ // or else malformed. 0xffffffffffffffff, 0xfffffffffffff001, - // no 0xffffffffffff0001; 0xffff0001 is mapped for 32-bit user space on OS X - // no 0xfffffffffff00001; 0xfff00001 is mapped for 32-bit user space sometimes on Linux + 0xffffffffffff0001, + 0xfffffffffff00001, 0xffffffffff000001, 0xfffffffff0000001, 0xffffffff00000001, @@ -182,26 +182,32 @@ func TestSetPanicOnFault(t *testing.T) { old := debug.SetPanicOnFault(true) defer debug.SetPanicOnFault(old) + nfault := 0 for _, addr := range faultAddrs { - testSetPanicOnFault(t, uintptr(addr)) + testSetPanicOnFault(t, uintptr(addr), &nfault) + } + if nfault == 0 { + t.Fatalf("none of the addresses faulted") } } -func testSetPanicOnFault(t *testing.T, addr uintptr) { +func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) { if GOOS == "nacl" { t.Skip("nacl doesn't seem to fault on high addresses") } defer func() { - if err := recover(); err == nil { - t.Fatalf("did not find error in recover") + if err := recover(); err != nil { + *nfault++ } }() - var p *int - p = (*int)(unsafe.Pointer(addr)) - println(*p) - t.Fatalf("still here - should have faulted on address %#x", addr) + // The read should fault, except that sometimes we hit + // addresses that have had C or kernel pages mapped there + // readable by user code. So just log the content. + // If no addresses fault, we'll fail the test. + v := *(*byte)(unsafe.Pointer(addr)) + t.Logf("addr %#x: %#x\n", addr, v) } func eqstring_generic(s1, s2 string) bool { diff --git a/src/runtime/sema.go b/src/runtime/sema.go index 142d3082c..a42a29988 100644 --- a/src/runtime/sema.go +++ b/src/runtime/sema.go @@ -49,6 +49,11 @@ func asyncsemrelease(addr *uint32) { // Called from runtime. func semacquire(addr *uint32, profile bool) { + gp := getg() + if gp != gp.m.curg { + gothrow("semacquire not on the G stack") + } + // Easy case. if cansemacquire(addr) { return diff --git a/src/runtime/signals_darwin.h b/src/runtime/signals_darwin.h index 229b58590..8761e1bd9 100644 --- a/src/runtime/signals_darwin.h +++ b/src/runtime/signals_darwin.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_dragonfly.h b/src/runtime/signals_dragonfly.h index 4d27e050d..07343a766 100644 --- a/src/runtime/signals_dragonfly.h +++ b/src/runtime/signals_dragonfly.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_freebsd.h b/src/runtime/signals_freebsd.h index 8d45c50c3..39e0a947e 100644 --- a/src/runtime/signals_freebsd.h +++ b/src/runtime/signals_freebsd.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_linux.h b/src/runtime/signals_linux.h index 368afc1c8..374107609 100644 --- a/src/runtime/signals_linux.h +++ b/src/runtime/signals_linux.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_nacl.h b/src/runtime/signals_nacl.h index 229b58590..8761e1bd9 100644 --- a/src/runtime/signals_nacl.h +++ b/src/runtime/signals_nacl.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_netbsd.h b/src/runtime/signals_netbsd.h index 7140de86f..950a2fe62 100644 --- a/src/runtime/signals_netbsd.h +++ b/src/runtime/signals_netbsd.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_openbsd.h b/src/runtime/signals_openbsd.h index 7140de86f..950a2fe62 100644 --- a/src/runtime/signals_openbsd.h +++ b/src/runtime/signals_openbsd.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: terminal line hangup", diff --git a/src/runtime/signals_plan9.h b/src/runtime/signals_plan9.h index 818f508cf..4ee8e542c 100644 --- a/src/runtime/signals_plan9.h +++ b/src/runtime/signals_plan9.h @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow @@ -16,6 +18,7 @@ // If you add entries to this table, you must respect the prefix ordering // and also update the constant values is os_plan9.h. +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { // Traps that we cannot be recovered. T, "sys: trap: debug exception", diff --git a/src/runtime/signals_solaris.h b/src/runtime/signals_solaris.h index c272cad29..1f0a65ea6 100644 --- a/src/runtime/signals_solaris.h +++ b/src/runtime/signals_solaris.h @@ -2,12 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +#include "textflag.h" + #define N SigNotify #define K SigKill #define T SigThrow #define P SigPanic #define D SigDefault +#pragma dataflag NOPTR SigTab runtime·sigtab[] = { /* 0 */ 0, "SIGNONE: no trap", /* 1 */ N+K, "SIGHUP: hangup", diff --git a/src/runtime/stack.c b/src/runtime/stack.c index 143b645e4..8562b9407 100644 --- a/src/runtime/stack.c +++ b/src/runtime/stack.c @@ -32,8 +32,8 @@ enum // Stacks are assigned an order according to size. // order = log_2(size/FixedStack) // There is a free list for each order. -static MSpan stackpool[NumStackOrders]; -static Mutex stackpoolmu; +MSpan runtime·stackpool[NumStackOrders]; +Mutex runtime·stackpoolmu; // TODO: one lock per order? void @@ -45,7 +45,7 @@ runtime·stackinit(void) runtime·throw("cache size must be a multiple of page size"); for(i = 0; i < NumStackOrders; i++) - runtime·MSpanList_Init(&stackpool[i]); + runtime·MSpanList_Init(&runtime·stackpool[i]); } // Allocates a stack from the free pool. Must be called with @@ -58,7 +58,7 @@ poolalloc(uint8 order) MLink *x; uintptr i; - list = &stackpool[order]; + list = &runtime·stackpool[order]; s = list->next; if(s == list) { // no free stacks. Allocate another span worth. @@ -99,7 +99,7 @@ poolfree(MLink *x, uint8 order) runtime·throw("freeing stack not in a stack span"); if(s->freelist == nil) { // s will now have a free stack - runtime·MSpanList_Insert(&stackpool[order], s); + runtime·MSpanList_Insert(&runtime·stackpool[order], s); } x->next = s->freelist; s->freelist = x; @@ -127,14 +127,14 @@ stackcacherefill(MCache *c, uint8 order) // Grab half of the allowed capacity (to prevent thrashing). list = nil; size = 0; - runtime·lock(&stackpoolmu); + runtime·lock(&runtime·stackpoolmu); while(size < StackCacheSize/2) { x = poolalloc(order); x->next = list; list = x; size += FixedStack << order; } - runtime·unlock(&stackpoolmu); + runtime·unlock(&runtime·stackpoolmu); c->stackcache[order].list = list; c->stackcache[order].size = size; @@ -150,14 +150,14 @@ stackcacherelease(MCache *c, uint8 order) runtime·printf("stackcacherelease order=%d\n", order); x = c->stackcache[order].list; size = c->stackcache[order].size; - runtime·lock(&stackpoolmu); + runtime·lock(&runtime·stackpoolmu); while(size > StackCacheSize/2) { y = x->next; poolfree(x, order); x = y; size -= FixedStack << order; } - runtime·unlock(&stackpoolmu); + runtime·unlock(&runtime·stackpoolmu); c->stackcache[order].list = x; c->stackcache[order].size = size; } @@ -170,7 +170,7 @@ runtime·stackcache_clear(MCache *c) if(StackDebug >= 1) runtime·printf("stackcache clear\n"); - runtime·lock(&stackpoolmu); + runtime·lock(&runtime·stackpoolmu); for(order = 0; order < NumStackOrders; order++) { x = c->stackcache[order].list; while(x != nil) { @@ -181,7 +181,7 @@ runtime·stackcache_clear(MCache *c) c->stackcache[order].list = nil; c->stackcache[order].size = 0; } - runtime·unlock(&stackpoolmu); + runtime·unlock(&runtime·stackpoolmu); } Stack @@ -227,9 +227,9 @@ runtime·stackalloc(uint32 n) // procresize. Just get a stack from the global pool. // Also don't touch stackcache during gc // as it's flushed concurrently. - runtime·lock(&stackpoolmu); + runtime·lock(&runtime·stackpoolmu); x = poolalloc(order); - runtime·unlock(&stackpoolmu); + runtime·unlock(&runtime·stackpoolmu); } else { x = c->stackcache[order].list; if(x == nil) { @@ -289,9 +289,9 @@ runtime·stackfree(Stack stk) x = (MLink*)v; c = g->m->mcache; if(c == nil || g->m->gcing || g->m->helpgc) { - runtime·lock(&stackpoolmu); + runtime·lock(&runtime·stackpoolmu); poolfree(x, order); - runtime·unlock(&stackpoolmu); + runtime·unlock(&runtime·stackpoolmu); } else { if(c->stackcache[order].size >= StackCacheSize) stackcacherelease(c, order); @@ -463,7 +463,7 @@ adjustframe(Stkframe *frame, void *arg) StackMap *stackmap; int32 pcdata; BitVector bv; - uintptr targetpc; + uintptr targetpc, size, minsize; adjinfo = arg; targetpc = frame->continpc; @@ -486,27 +486,47 @@ adjustframe(Stkframe *frame, void *arg) if(pcdata == -1) pcdata = 0; // in prologue - // adjust local pointers - if((byte*)frame->varp != (byte*)frame->sp) { + // Adjust local variables if stack frame has been allocated. + size = frame->varp - frame->sp; + if(thechar != '6' && thechar != '8') + minsize = sizeof(uintptr); + else + minsize = 0; + if(size > minsize) { stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps); - if(stackmap == nil) - runtime·throw("no locals info"); - if(stackmap->n <= 0) - runtime·throw("locals size info only"); + if(stackmap == nil || stackmap->n <= 0) { + runtime·printf("runtime: frame %s untyped locals %p+%p\n", runtime·funcname(f), (byte*)(frame->varp-size), size); + runtime·throw("missing stackmap"); + } + // Locals bitmap information, scan just the pointers in locals. + if(pcdata < 0 || pcdata >= stackmap->n) { + // don't know where we are + runtime·printf("runtime: pcdata is %d and %d locals stack map entries for %s (targetpc=%p)\n", + pcdata, stackmap->n, runtime·funcname(f), targetpc); + runtime·throw("bad symbol table"); + } bv = runtime·stackmapdata(stackmap, pcdata); + size = (bv.n * PtrSize) / BitsPerPointer; if(StackDebug >= 3) runtime·printf(" locals\n"); - adjustpointers((byte**)frame->varp - bv.n / BitsPerPointer, &bv, adjinfo, f); + adjustpointers((byte**)(frame->varp - size), &bv, adjinfo, f); } - // adjust inargs and outargs - if(frame->arglen != 0) { + + // Adjust arguments. + if(frame->arglen > 0) { if(frame->argmap != nil) { bv = *frame->argmap; } else { stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps); - if(stackmap == nil) { - runtime·printf("size %d\n", (int32)frame->arglen); - runtime·throw("no arg info"); + if(stackmap == nil || stackmap->n <= 0) { + runtime·printf("runtime: frame %s untyped args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen); + runtime·throw("missing stackmap"); + } + if(pcdata < 0 || pcdata >= stackmap->n) { + // don't know where we are + runtime·printf("runtime: pcdata is %d and %d args stack map entries for %s (targetpc=%p)\n", + pcdata, stackmap->n, runtime·funcname(f), targetpc); + runtime·throw("bad symbol table"); } bv = runtime·stackmapdata(stackmap, pcdata); } @@ -675,7 +695,7 @@ runtime·newstack(void) runtime·traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g); runtime·throw("runtime: wrong goroutine in newstack"); } - if(g->throwsplit) + if(g->m->curg->throwsplit) runtime·throw("runtime: stack split at bad time"); // The goroutine must be executing in order to call newstack, @@ -786,8 +806,16 @@ runtime·shrinkstack(G *gp) { uintptr used, oldsize, newsize; - if(runtime·readgstatus(gp) == Gdead) + if(runtime·readgstatus(gp) == Gdead) { + if(gp->stack.lo != 0) { + // Free whole stack - it will get reallocated + // if G is used again. + runtime·stackfree(gp->stack); + gp->stack.lo = 0; + gp->stack.hi = 0; + } return; + } if(gp->stack.lo == 0) runtime·throw("missing stack in shrinkstack"); @@ -799,7 +827,9 @@ runtime·shrinkstack(G *gp) if(used >= oldsize / 4) return; // still using at least 1/4 of the segment. - if(gp->syscallsp != 0) // TODO: can we handle this case? + // We can't copy the stack if we're in a syscall. + // The syscall might have pointers into the stack. + if(gp->syscallsp != 0) return; #ifdef GOOS_windows diff --git a/src/runtime/stack.h b/src/runtime/stack.h index b30e32216..f97dc4ed8 100644 --- a/src/runtime/stack.h +++ b/src/runtime/stack.h @@ -69,16 +69,19 @@ enum { #endif // Plan 9 #endif // Windows - // The amount of extra stack to allocate beyond the size - // needed for the single frame that triggered the split. - StackExtra = 2048, + // The minimum size of stack used by Go code + StackMin = 2048, - // The minimum stack segment size to allocate. - // If the amount needed for the splitting frame + StackExtra - // is less than this number, the stack will have this size instead. - StackMin = 8192, - StackSystemRounded = StackSystem + (-StackSystem & (StackMin-1)), - FixedStack = StackMin + StackSystemRounded, + // The minimum stack size to allocate. + // The hackery here rounds FixedStack0 up to a power of 2. + FixedStack0 = StackMin + StackSystem, + FixedStack1 = FixedStack0 - 1, + FixedStack2 = FixedStack1 | (FixedStack1 >> 1), + FixedStack3 = FixedStack2 | (FixedStack2 >> 2), + FixedStack4 = FixedStack3 | (FixedStack3 >> 4), + FixedStack5 = FixedStack4 | (FixedStack4 >> 8), + FixedStack6 = FixedStack5 | (FixedStack5 >> 16), + FixedStack = FixedStack6 + 1, // Functions that need frames bigger than this use an extra // instruction to do the stack split check, to avoid overflow diff --git a/src/runtime/stack_test.go b/src/runtime/stack_test.go index 3a0802a1c..652c72eee 100644 --- a/src/runtime/stack_test.go +++ b/src/runtime/stack_test.go @@ -71,10 +71,6 @@ func TestStackMem(t *testing.T) { // Test stack growing in different contexts. func TestStackGrowth(t *testing.T) { - switch GOARCH { - case "386", "arm": - t.Skipf("skipping test on %q; see issue 8083", GOARCH) - } t.Parallel() var wg sync.WaitGroup diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index ff443c4cd..c6a9cf9f5 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -119,6 +119,8 @@ func deferproc_m() func goexit_m() func startpanic_m() func dopanic_m() +func readmemstats_m() +func writeheapdump_m() // memclr clears n bytes starting at ptr. // in memclr_*.s @@ -146,10 +148,6 @@ func fastrand1() uint32 //go:noescape func memeq(a, b unsafe.Pointer, size uintptr) bool -// Code pointers for the nohash/noequal algorithms. Used for producing better error messages. -var nohashcode uintptr -var noequalcode uintptr - // noescape hides a pointer from escape analysis. noescape is // the identity function but escape analysis doesn't think the // output depends on the input. noescape is inlined and currently @@ -162,6 +160,7 @@ func noescape(p unsafe.Pointer) unsafe.Pointer { } func entersyscall() +func reentersyscall(pc uintptr, sp unsafe.Pointer) func entersyscallblock() func exitsyscall() diff --git a/src/runtime/sys_dragonfly_386.s b/src/runtime/sys_dragonfly_386.s index dd0e27e26..161eaec19 100644 --- a/src/runtime/sys_dragonfly_386.s +++ b/src/runtime/sys_dragonfly_386.s @@ -378,4 +378,4 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32 NEGL AX RET -GLOBL runtime·tlsoffset(SB),$4 +GLOBL runtime·tlsoffset(SB),NOPTR,$4 diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s index ffc28560e..2c40fc433 100644 --- a/src/runtime/sys_freebsd_386.s +++ b/src/runtime/sys_freebsd_386.s @@ -388,4 +388,4 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32 NEGL AX RET -GLOBL runtime·tlsoffset(SB),$4 +GLOBL runtime·tlsoffset(SB),NOPTR,$4 diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s index 83a76cb34..23f2f6bd1 100644 --- a/src/runtime/sys_netbsd_386.s +++ b/src/runtime/sys_netbsd_386.s @@ -350,7 +350,7 @@ TEXT runtime·sysctl(SB),NOSPLIT,$28 MOVL $0, AX RET -GLOBL runtime·tlsoffset(SB),$4 +GLOBL runtime·tlsoffset(SB),NOPTR,$4 // int32 runtime·kqueue(void) TEXT runtime·kqueue(SB),NOSPLIT,$0 diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s index 12d9c5c6b..5cda7768a 100644 --- a/src/runtime/sys_openbsd_386.s +++ b/src/runtime/sys_openbsd_386.s @@ -395,4 +395,4 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32 NEGL AX RET -GLOBL runtime·tlsoffset(SB),$4 +GLOBL runtime·tlsoffset(SB),NOPTR,$4 diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s index 9b1fc7a20..1bf4d062a 100644 --- a/src/runtime/sys_windows_386.s +++ b/src/runtime/sys_windows_386.s @@ -212,7 +212,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0 POPL BP RET -GLOBL runtime·cbctxts(SB), $4 +GLOBL runtime·cbctxts(SB), NOPTR, $4 TEXT runtime·callbackasm1+0(SB),NOSPLIT,$0 MOVL 0(SP), AX // will use to find our callback context diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index f701d157e..05750398e 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -249,7 +249,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0 POPQ BP RET -GLOBL runtime·cbctxts(SB), $8 +GLOBL runtime·cbctxts(SB), NOPTR, $8 TEXT runtime·callbackasm1(SB),NOSPLIT,$0 // Construct args vector for cgocallback(). diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index a82851218..9ed016ccc 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -488,3 +488,9 @@ func TestRegisterClass(t *testing.T) { t.Fatalf("UnregisterClass failed: %v", err) } } + +func TestOutputDebugString(t *testing.T) { + d := GetDLL(t, "kernel32.dll") + p := syscall.StringToUTF16Ptr("testing OutputDebugString") + d.Proc("OutputDebugStringW").Call(uintptr(unsafe.Pointer(p))) +} diff --git a/src/runtime/thunk.s b/src/runtime/thunk.s index 3b66cf47d..0a0f147c4 100644 --- a/src/runtime/thunk.s +++ b/src/runtime/thunk.s @@ -80,6 +80,9 @@ TEXT reflect·memmove(SB), NOSPLIT, $0-0 TEXT runtime∕debug·freeOSMemory(SB), NOSPLIT, $0-0 JMP runtime·freeOSMemory(SB) +TEXT runtime∕debug·WriteHeapDump(SB), NOSPLIT, $0-0 + JMP runtime·writeHeapDump(SB) + TEXT net·runtime_pollServerInit(SB),NOSPLIT,$0-0 JMP runtime·netpollServerInit(SB) @@ -107,6 +110,9 @@ TEXT net·runtime_pollUnblock(SB),NOSPLIT,$0-0 TEXT syscall·setenv_c(SB), NOSPLIT, $0-0 JMP runtime·syscall_setenv_c(SB) +TEXT syscall·unsetenv_c(SB), NOSPLIT, $0-0 + JMP runtime·syscall_unsetenv_c(SB) + TEXT reflect·makemap(SB),NOSPLIT,$0-0 JMP runtime·reflect_makemap(SB) @@ -161,5 +167,17 @@ TEXT runtime·main_init(SB),NOSPLIT,$0-0 TEXT runtime·main_main(SB),NOSPLIT,$0-0 JMP main·main(SB) -TEXT runtime·timenow(SB), NOSPLIT, $0-0 +TEXT runtime·timenow(SB),NOSPLIT,$0-0 JMP time·now(SB) + +TEXT sync∕atomic·runtime_procPin(SB),NOSPLIT,$0-0 + JMP sync·runtime_procPin(SB) + +TEXT sync∕atomic·runtime_procUnpin(SB),NOSPLIT,$0-0 + JMP sync·runtime_procUnpin(SB) + +TEXT syscall·runtime_envs(SB),NOSPLIT,$0-0 + JMP runtime·runtime_envs(SB) + +TEXT os·runtime_args(SB),NOSPLIT,$0-0 + JMP runtime·runtime_args(SB) diff --git a/src/runtime/tls_arm.s b/src/runtime/tls_arm.s index 7a247ab19..85c3940bf 100644 --- a/src/runtime/tls_arm.s +++ b/src/runtime/tls_arm.s @@ -29,6 +29,9 @@ TEXT runtime·save_g(SB),NOSPLIT,$-4 MOVW g, R0 // preserve R0 across call to setg<> RET #endif + // If the host does not support MRC the linker will replace it with + // a call to runtime.read_tls_fallback which jumps to __kuser_get_tls. + // The replacement function saves LR in R11 over the call to read_tls_fallback. MRC 15, 0, R0, C13, C0, 3 // fetch TLS base pointer // $runtime.tlsg(SB) is a special linker symbol. // It is the offset from the TLS base pointer to our @@ -51,7 +54,8 @@ TEXT runtime·load_g(SB),NOSPLIT,$0 // nothing to do as nacl/arm does not use TLS at all. RET #endif - MRC 15, 0, R0, C13, C0, 3 // fetch TLS base pointer + // See save_g + MRC 15, 0, R0, C13, C0, 3 // fetch TLS base pointer // $runtime.tlsg(SB) is a special linker symbol. // It is the offset from the TLS base pointer to our // thread-local storage for g. diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 9e95fa33d..24dc3eea9 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -31,20 +31,36 @@ import "unsafe" const usesLR = GOARCH != "amd64" && GOARCH != "amd64p32" && GOARCH != "386" var ( - deferprocPC = funcPC(deferproc) - goexitPC = funcPC(goexit) - jmpdeferPC = funcPC(jmpdefer) - mcallPC = funcPC(mcall) - morestackPC = funcPC(morestack) - mstartPC = funcPC(mstart) - newprocPC = funcPC(newproc) - newstackPC = funcPC(newstack) - rt0_goPC = funcPC(rt0_go) - sigpanicPC = funcPC(sigpanic) + // initialized in tracebackinit + deferprocPC uintptr + goexitPC uintptr + jmpdeferPC uintptr + mcallPC uintptr + morestackPC uintptr + mstartPC uintptr + newprocPC uintptr + rt0_goPC uintptr + sigpanicPC uintptr externalthreadhandlerp uintptr // initialized elsewhere ) +func tracebackinit() { + // Go variable initialization happens late during runtime startup. + // Instead of initializing the variables above in the declarations, + // schedinit calls this function so that the variables are + // initialized and available earlier in the startup sequence. + deferprocPC = funcPC(deferproc) + goexitPC = funcPC(goexit) + jmpdeferPC = funcPC(jmpdefer) + mcallPC = funcPC(mcall) + morestackPC = funcPC(morestack) + mstartPC = funcPC(mstart) + newprocPC = funcPC(newproc) + rt0_goPC = funcPC(rt0_go) + sigpanicPC = funcPC(sigpanic) +} + // Traceback over the deferred function calls. // Report them like calls that have been invoked but not started executing yet. func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) { @@ -81,6 +97,9 @@ func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v uns // collector (callback != nil). A little clunky to merge these, but avoids // duplicating the code and all its subtlety. func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, printall bool) int { + if goexitPC == 0 { + gothrow("gentraceback before goexitPC initialization") + } g := getg() gotraceback := gotraceback(nil) if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp. @@ -499,7 +518,14 @@ func showframe(f *_func, gp *g) bool { return true } - return traceback > 1 || f != nil && contains(name, ".") && !hasprefix(name, "runtime.") + return traceback > 1 || f != nil && contains(name, ".") && (!hasprefix(name, "runtime.") || isExportedRuntime(name)) +} + +// isExportedRuntime reports whether name is an exported runtime function. +// It is only for runtime functions, so ASCII A-Z is fine. +func isExportedRuntime(name string) bool { + const n = len("runtime.") + return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' } var gStatusStrings = [...]string{ diff --git a/src/runtime/vdso_linux_amd64.c b/src/runtime/vdso_linux_amd64.c index 38e115243..41a41fdd6 100644 --- a/src/runtime/vdso_linux_amd64.c +++ b/src/runtime/vdso_linux_amd64.c @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. #include "runtime.h" +#include "textflag.h" // Look up symbols in the Linux vDSO. @@ -171,14 +172,18 @@ struct vdso_info { Elf64_Verdef *verdef; }; +#pragma dataflag NOPTR static version_key linux26 = { (byte*)"LINUX_2.6", 0x3ae75f6 }; // initialize with vsyscall fallbacks +#pragma dataflag NOPTR void* runtime·__vdso_time_sym = (void*)0xffffffffff600400ULL; +#pragma dataflag NOPTR void* runtime·__vdso_gettimeofday_sym = (void*)0xffffffffff600000ULL; +#pragma dataflag NOPTR void* runtime·__vdso_clock_gettime_sym = (void*)0; -#define SYM_KEYS_COUNT 3 +#pragma dataflag NOPTR static symbol_key sym_keys[] = { { (byte*)"__vdso_time", 0xa33c485, &runtime·__vdso_time_sym }, { (byte*)"__vdso_gettimeofday", 0x315ca59, &runtime·__vdso_gettimeofday_sym }, @@ -301,7 +306,7 @@ vdso_parse_symbols(struct vdso_info *vdso_info, int32 version) if(vdso_info->valid == false) return; - for(i=0; i<SYM_KEYS_COUNT; i++) { + for(i=0; i<nelem(sym_keys); i++) { for(chain = vdso_info->bucket[sym_keys[i].sym_hash % vdso_info->nbucket]; chain != 0; chain = vdso_info->chain[chain]) { |