diff options
Diffstat (limited to 'libgo/runtime')
-rw-r--r-- | libgo/runtime/go-main.c | 2 | ||||
-rw-r--r-- | libgo/runtime/lock_futex.c | 12 | ||||
-rw-r--r-- | libgo/runtime/lock_sema.c | 11 | ||||
-rw-r--r-- | libgo/runtime/malloc.goc | 11 | ||||
-rw-r--r-- | libgo/runtime/mgc0.c | 8 | ||||
-rw-r--r-- | libgo/runtime/mheap.c | 2 | ||||
-rw-r--r-- | libgo/runtime/proc.c | 20 | ||||
-rw-r--r-- | libgo/runtime/runtime.c | 15 | ||||
-rw-r--r-- | libgo/runtime/runtime.h | 15 | ||||
-rw-r--r-- | libgo/runtime/thread-linux.c | 10 | ||||
-rw-r--r-- | libgo/runtime/thread.c | 33 |
11 files changed, 119 insertions, 20 deletions
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c index 5871981f2b8..7e8bb9b234f 100644 --- a/libgo/runtime/go-main.c +++ b/libgo/runtime/go-main.c @@ -40,7 +40,7 @@ static void mainstart (void *); int main (int argc, char **argv) { - runtime_initsig (); + runtime_check (); runtime_args (argc, (byte **) argv); runtime_osinit (); runtime_schedinit (); diff --git a/libgo/runtime/lock_futex.c b/libgo/runtime/lock_futex.c index cdc12d7c75c..9a533a577a4 100644 --- a/libgo/runtime/lock_futex.c +++ b/libgo/runtime/lock_futex.c @@ -118,8 +118,12 @@ runtime_notewakeup(Note *n) void runtime_notesleep(Note *n) { + if(runtime_m()->profilehz > 0) + runtime_setprof(false); while(runtime_atomicload(&n->key) == 0) runtime_futexsleep(&n->key, 0, -1); + if(runtime_m()->profilehz > 0) + runtime_setprof(true); } void @@ -135,14 +139,18 @@ runtime_notetsleep(Note *n, int64 ns) if(runtime_atomicload(&n->key) != 0) return; + if(runtime_m()->profilehz > 0) + runtime_setprof(false); deadline = runtime_nanotime() + ns; for(;;) { runtime_futexsleep(&n->key, 0, ns); if(runtime_atomicload(&n->key) != 0) - return; + break; now = runtime_nanotime(); if(now >= deadline) - return; + break; ns = deadline - now; } + if(runtime_m()->profilehz > 0) + runtime_setprof(true); } diff --git a/libgo/runtime/lock_sema.c b/libgo/runtime/lock_sema.c index b2a8f53be41..8c4b3973bdc 100644 --- a/libgo/runtime/lock_sema.c +++ b/libgo/runtime/lock_sema.c @@ -159,7 +159,11 @@ runtime_notesleep(Note *n) return; } // Queued. Sleep. + if(m->profilehz > 0) + runtime_setprof(false); runtime_semasleep(-1); + if(m->profilehz > 0) + runtime_setprof(true); } void @@ -185,12 +189,16 @@ runtime_notetsleep(Note *n, int64 ns) return; } + if(m->profilehz > 0) + runtime_setprof(false); deadline = runtime_nanotime() + ns; for(;;) { // Registered. Sleep. if(runtime_semasleep(ns) >= 0) { // Acquired semaphore, semawakeup unregistered us. // Done. + if(m->profilehz > 0) + runtime_setprof(true); return; } @@ -203,6 +211,9 @@ runtime_notetsleep(Note *n, int64 ns) ns = deadline - now; } + if(m->profilehz > 0) + runtime_setprof(true); + // Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc index 23641e8298f..3fde250af3b 100644 --- a/libgo/runtime/malloc.goc +++ b/libgo/runtime/malloc.goc @@ -277,6 +277,7 @@ runtime_mallocinit(void) uintptr arena_size, bitmap_size; extern byte end[]; byte *want; + uintptr limit; runtime_sizeof_C_MStats = sizeof(MStats); @@ -291,10 +292,12 @@ runtime_mallocinit(void) runtime_InitSizes(); + limit = runtime_memlimit(); + // Set up the allocation arena, a contiguous area of memory where // allocated data will be found. The arena begins with a bitmap large // enough to hold 4 bits per allocated word. - if(sizeof(void*) == 8) { + if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { // On a 64-bit machine, allocate from a single contiguous reservation. // 16 GB should be big enough for now. // @@ -343,6 +346,10 @@ runtime_mallocinit(void) // of address space, which is probably too much in a 32-bit world. bitmap_size = MaxArena32 / (sizeof(void*)*8/4); arena_size = 512<<20; + if(limit > 0 && arena_size+bitmap_size > limit) { + bitmap_size = (limit / 9) & ~((1<<PageShift) - 1); + arena_size = bitmap_size * 8; + } // SysReserve treats the address we ask for, end, as a hint, // not as an absolute requirement. If we ask for the end @@ -359,6 +366,8 @@ runtime_mallocinit(void) p = runtime_SysReserve(want, bitmap_size + arena_size); if(p == nil) runtime_throw("runtime: cannot reserve arena virtual address space"); + if((uintptr)p & (((uintptr)1<<PageShift)-1)) + runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, (void*)(bitmap_size+arena_size)); } if((uintptr)p & (((uintptr)1<<PageShift)-1)) runtime_throw("runtime: SysReserve returned unaligned address"); diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c index d852946cdbb..4aa7c45dcb3 100644 --- a/libgo/runtime/mgc0.c +++ b/libgo/runtime/mgc0.c @@ -654,14 +654,6 @@ markfin(void *v) scanblock(v, size); } -struct root_list { - struct root_list *next; - struct root { - void *decl; - size_t size; - } roots[]; -}; - static struct root_list* roots; void diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c index 79359d9dfca..6bf38aa9934 100644 --- a/libgo/runtime/mheap.c +++ b/libgo/runtime/mheap.c @@ -326,7 +326,7 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) } // Release (part of) unused memory to OS. -// Goroutine created in runtime_schedinit. +// Goroutine created at startup. // Loop forever. void runtime_MHeap_Scavenger(void* dummy) diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c index d0ae09c45a0..31e8287e704 100644 --- a/libgo/runtime/proc.c +++ b/libgo/runtime/proc.c @@ -416,8 +416,6 @@ runtime_schedinit(void) // Can not enable GC until all roots are registered. // mstats.enablegc = 1; m->nomemprof--; - - scvg = __go_go(runtime_MHeap_Scavenger, nil); } extern void main_init(void) __asm__ ("__go_init_main"); @@ -435,6 +433,7 @@ runtime_main(void) // to preserve the lock. runtime_LockOSThread(); runtime_sched.init = true; + scvg = __go_go(runtime_MHeap_Scavenger, nil); main_init(); runtime_sched.init = false; if(!runtime_sched.lockmain) @@ -548,7 +547,7 @@ mcommoninit(M *m) m->mcache = runtime_allocmcache(); runtime_callers(1, m->createstack, nelem(m->createstack)); - + // Add to runtime_allm so garbage collector doesn't free m // when it is just in a register or thread-local storage. m->alllink = runtime_allm; @@ -791,10 +790,11 @@ top: mput(m); } - // Look for deadlock situation: one single active g which happens to be scvg. - if(runtime_sched.grunning == 1 && runtime_sched.gwait == 0) { - if(scvg->status == Grunning || scvg->status == Gsyscall) - runtime_throw("all goroutines are asleep - deadlock!"); + // Look for deadlock situation. + if((scvg == nil && runtime_sched.grunning == 0) || + (scvg != nil && runtime_sched.grunning == 1 && runtime_sched.gwait == 0 && + (scvg->status == Grunning || scvg->status == Gsyscall))) { + runtime_throw("all goroutines are asleep - deadlock!"); } m->nextg = nil; @@ -1135,6 +1135,9 @@ runtime_entersyscall(void) { uint32 v; + if(m->profilehz > 0) + runtime_setprof(false); + // Leave SP around for gc and traceback. #ifdef USING_SPLIT_STACK g->gcstack = __splitstack_find(NULL, NULL, &g->gcstack_size, @@ -1205,6 +1208,9 @@ runtime_exitsyscall(void) #endif gp->gcnext_sp = nil; runtime_memclr(gp->gcregs, sizeof gp->gcregs); + + if(m->profilehz > 0) + runtime_setprof(true); return; } diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c index 7c8c436deeb..78c865ba175 100644 --- a/libgo/runtime/runtime.c +++ b/libgo/runtime/runtime.c @@ -184,6 +184,21 @@ runtime_fastrand1(void) return x; } +static struct root_list runtime_roots = +{ NULL, + { { &syscall_Envs, sizeof syscall_Envs }, + { &os_Args, sizeof os_Args }, + { NULL, 0 } }, +}; + +void +runtime_check(void) +{ + __go_register_gc_roots(&runtime_roots); + + runtime_initsig (); +} + int64 runtime_cputicks(void) { diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index 113bb7163c5..40c59a82777 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -427,6 +427,8 @@ void runtime_osyield(void); void runtime_LockOSThread(void) __asm__("libgo_runtime.runtime.LockOSThread"); void runtime_UnlockOSThread(void) __asm__("libgo_runtime.runtime.UnlockOSThread"); +uintptr runtime_memlimit(void); + // If appropriate, ask the operating system to control whether this // thread should receive profiling signals. This is only necessary on OS X. // An operating system should not deliver a profiling signal to a @@ -441,3 +443,16 @@ void runtime_time_scan(void (*)(byte*, int64)); void runtime_setsig(int32, bool, bool); #define runtime_setitimer setitimer + +void runtime_check(void); + +// A list of global variables that the garbage collector must scan. +struct root_list { + struct root_list *next; + struct root { + void *decl; + size_t size; + } roots[]; +}; + +void __go_register_gc_roots(struct root_list*); diff --git a/libgo/runtime/thread-linux.c b/libgo/runtime/thread-linux.c index 8dd5fc4b481..6a69fb429a4 100644 --- a/libgo/runtime/thread-linux.c +++ b/libgo/runtime/thread-linux.c @@ -3,6 +3,16 @@ // license that can be found in the LICENSE file. #include "runtime.h" +#include "defs.h" + +// Linux futex. +// +// futexsleep(uint32 *addr, uint32 val) +// futexwakeup(uint32 *addr) +// +// Futexsleep atomically checks if *addr == val and if so, sleeps on addr. +// Futexwakeup wakes up threads sleeping on addr. +// Futexsleep is allowed to wake up spuriously. #include <errno.h> #include <string.h> diff --git a/libgo/runtime/thread.c b/libgo/runtime/thread.c index 748a62d59f5..12d009926e3 100644 --- a/libgo/runtime/thread.c +++ b/libgo/runtime/thread.c @@ -4,6 +4,8 @@ #include <errno.h> #include <signal.h> +#include <sys/time.h> +#include <sys/resource.h> #include "runtime.h" #include "go-assert.h" @@ -138,6 +140,7 @@ runtime_minit(void) byte* stack; size_t stacksize; stack_t ss; + sigset_t sigs; // Initialize signal handling. runtime_m()->gsignal = runtime_malg(32*1024, &stack, &stacksize); // OS X wants >=8K, Linux >=2K @@ -146,4 +149,34 @@ runtime_minit(void) ss.ss_size = stacksize; if(sigaltstack(&ss, nil) < 0) *(int *)0xf1 = 0xf1; + if (sigemptyset(&sigs) != 0) + runtime_throw("sigemptyset"); + sigprocmask(SIG_SETMASK, &sigs, nil); +} + +uintptr +runtime_memlimit(void) +{ + struct rlimit rl; + uintptr used; + + if(getrlimit(RLIMIT_AS, &rl) != 0) + return 0; + if(rl.rlim_cur >= 0x7fffffff) + return 0; + + // Estimate our VM footprint excluding the heap. + // Not an exact science: use size of binary plus + // some room for thread stacks. + used = (64<<20); + if(used >= rl.rlim_cur) + return 0; + + // If there's not at least 16 MB left, we're probably + // not going to be able to do much. Treat as no limit. + rl.rlim_cur -= used; + if(rl.rlim_cur < (16<<20)) + return 0; + + return rl.rlim_cur - used; } |