diff options
Diffstat (limited to 'libgo/runtime/mprof.goc')
-rw-r--r-- | libgo/runtime/mprof.goc | 156 |
1 files changed, 145 insertions, 11 deletions
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc index d143d19e5ba..c61c65ccee3 100644 --- a/libgo/runtime/mprof.goc +++ b/libgo/runtime/mprof.goc @@ -26,6 +26,10 @@ struct Bucket uintptr frees; uintptr alloc_bytes; uintptr free_bytes; + uintptr recent_allocs; // since last gc + uintptr recent_frees; + uintptr recent_alloc_bytes; + uintptr recent_free_bytes; uintptr hash; uintptr nstk; uintptr stk[1]; @@ -39,7 +43,7 @@ static uintptr bucketmem; // Return the bucket for stk[0:nstk], allocating new bucket if needed. static Bucket* -stkbucket(uintptr *stk, int32 nstk) +stkbucket(uintptr *stk, int32 nstk, bool alloc) { int32 i; uintptr h; @@ -66,6 +70,9 @@ stkbucket(uintptr *stk, int32 nstk) runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0) return b; + if(!alloc) + return nil; + b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], FlagNoProfiling, 0, 1); bucketmem += sizeof *b + nstk*sizeof stk[0]; runtime_memmove(b->stk, stk, nstk*sizeof stk[0]); @@ -78,6 +85,26 @@ stkbucket(uintptr *stk, int32 nstk) return b; } +// Record that a gc just happened: all the 'recent' statistics are now real. +void +runtime_MProf_GC(void) +{ + Bucket *b; + + runtime_lock(&proflock); + for(b=buckets; b; b=b->allnext) { + b->allocs += b->recent_allocs; + b->frees += b->recent_frees; + b->alloc_bytes += b->recent_alloc_bytes; + b->free_bytes += b->recent_free_bytes; + b->recent_allocs = 0; + b->recent_frees = 0; + b->recent_alloc_bytes = 0; + b->recent_free_bytes = 0; + } + runtime_unlock(&proflock); +} + // Map from pointer to Bucket* that allocated it. // Three levels: // Linked-list hash table for top N-20 bits. @@ -198,15 +225,11 @@ runtime_MProf_Malloc(void *p, uintptr size) return; m->nomemprof++; -#if 0 nstk = runtime_callers(1, stk, 32); -#else - nstk = 0; -#endif runtime_lock(&proflock); - b = stkbucket(stk, nstk); - b->allocs++; - b->alloc_bytes += size; + b = stkbucket(stk, nstk, true); + b->recent_allocs++; + b->recent_alloc_bytes += size; setaddrbucket((uintptr)p, b); runtime_unlock(&proflock); m = runtime_m(); @@ -228,8 +251,8 @@ runtime_MProf_Free(void *p, uintptr size) runtime_lock(&proflock); b = getaddrbucket((uintptr)p); if(b != nil) { - b->frees++; - b->free_bytes += size; + b->recent_frees++; + b->recent_free_bytes += size; } runtime_unlock(&proflock); m = runtime_m(); @@ -240,7 +263,7 @@ runtime_MProf_Free(void *p, uintptr size) // Go interface to profile data. (Declared in extern.go) // Assumes Go sizeof(int) == sizeof(int32) -// Must match MemProfileRecord in extern.go. +// Must match MemProfileRecord in debug.go. typedef struct Record Record; struct Record { int64 alloc_bytes, free_bytes; @@ -292,3 +315,114 @@ runtime_MProf_Mark(void (*scan)(byte *, int64)) scan((byte*)&addrhash, sizeof addrhash); scan((byte*)&addrfree, sizeof addrfree); } + +// Must match StackRecord in debug.go. +typedef struct TRecord TRecord; +struct TRecord { + uintptr stk[32]; +}; + +func ThreadCreateProfile(p Slice) (n int32, ok bool) { + TRecord *r; + M *first, *m; + + first = runtime_atomicloadp(&runtime_allm); + n = 0; + for(m=first; m; m=m->alllink) + n++; + ok = false; + if(n <= p.__count) { + ok = true; + r = (TRecord*)p.__values; + for(m=first; m; m=m->alllink) { + runtime_memmove(r->stk, m->createstack, sizeof r->stk); + r++; + } + } +} + +func Stack(b Slice, all bool) (n int32) { + byte *pc, *sp; + + sp = runtime_getcallersp(&b); + pc = runtime_getcallerpc(&b); + + if(all) { + runtime_semacquire(&runtime_worldsema); + runtime_m()->gcing = 1; + runtime_stoptheworld(); + } + + if(b.__count == 0) + n = 0; + else{ + G* g = runtime_g(); + g->writebuf = (byte*)b.__values; + g->writenbuf = b.__count; + USED(pc); + USED(sp); + // runtime_goroutineheader(g); + // runtime_traceback(pc, sp, 0, g); + // if(all) + // runtime_tracebackothers(g); + n = b.__count - g->writenbuf; + g->writebuf = nil; + g->writenbuf = 0; + } + + if(all) { + runtime_m()->gcing = 0; + runtime_semrelease(&runtime_worldsema); + runtime_starttheworld(false); + } +} + +static void +saveg(byte *pc, byte *sp, G *g, TRecord *r) +{ + int32 n; + + USED(pc); + USED(sp); + USED(g); + // n = runtime_gentraceback(pc, sp, 0, g, 0, r->stk, nelem(r->stk)); + n = 0; + if((size_t)n < nelem(r->stk)) + r->stk[n] = 0; +} + +func GoroutineProfile(b Slice) (n int32, ok bool) { + byte *pc, *sp; + TRecord *r; + G *gp; + + sp = runtime_getcallersp(&b); + pc = runtime_getcallerpc(&b); + + ok = false; + n = runtime_gcount(); + if(n <= b.__count) { + runtime_semacquire(&runtime_worldsema); + runtime_m()->gcing = 1; + runtime_stoptheworld(); + + n = runtime_gcount(); + if(n <= b.__count) { + G* g = runtime_g(); + ok = true; + r = (TRecord*)b.__values; + saveg(pc, sp, g, r++); + for(gp = runtime_allg; gp != nil; gp = gp->alllink) { + if(gp == g || gp->status == Gdead) + continue; + //saveg(gp->sched.pc, gp->sched.sp, gp, r++); + r++; + } + } + + runtime_m()->gcing = 0; + runtime_semrelease(&runtime_worldsema); + runtime_starttheworld(false); + } +} + |