summaryrefslogtreecommitdiff
path: root/libgo/runtime/mprof.goc
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/mprof.goc')
-rw-r--r--libgo/runtime/mprof.goc48
1 files changed, 32 insertions, 16 deletions
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc
index 2cf2afba433..c1b09bea7f5 100644
--- a/libgo/runtime/mprof.goc
+++ b/libgo/runtime/mprof.goc
@@ -11,6 +11,7 @@ package runtime
#include "malloc.h"
#include "defs.h"
#include "go-type.h"
+#include "go-string.h"
// NOTE(rsc): Everything here could use cas if contention became an issue.
static Lock proflock;
@@ -46,7 +47,7 @@ struct Bucket
};
uintptr hash;
uintptr nstk;
- uintptr stk[1];
+ Location stk[1];
};
enum {
BuckHashSize = 179999,
@@ -58,9 +59,9 @@ static uintptr bucketmem;
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
static Bucket*
-stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
+stkbucket(int32 typ, Location *stk, int32 nstk, bool alloc)
{
- int32 i;
+ int32 i, j;
uintptr h;
Bucket *b;
@@ -72,7 +73,7 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
// Hash stack.
h = 0;
for(i=0; i<nstk; i++) {
- h += stk[i];
+ h += stk[i].pc;
h += h<<10;
h ^= h>>6;
}
@@ -80,10 +81,18 @@ stkbucket(int32 typ, uintptr *stk, int32 nstk, bool alloc)
h ^= h>>11;
i = h%BuckHashSize;
- for(b = buckhash[i]; b; b=b->next)
- if(b->typ == typ && b->hash == h && b->nstk == (uintptr)nstk &&
- runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
- return b;
+ for(b = buckhash[i]; b; b=b->next) {
+ if(b->typ == typ && b->hash == h && b->nstk == (uintptr)nstk) {
+ for(j = 0; j < nstk; j++) {
+ if(b->stk[j].pc != stk[j].pc ||
+ b->stk[j].lineno != stk[j].lineno ||
+ !__go_strings_equal(b->stk[j].filename, stk[j].filename))
+ break;
+ }
+ if (j == nstk)
+ return b;
+ }
+ }
if(!alloc)
return nil;
@@ -241,7 +250,7 @@ runtime_MProf_Malloc(void *p, uintptr size)
{
M *m;
int32 nstk;
- uintptr stk[32];
+ Location stk[32];
Bucket *b;
m = runtime_m();
@@ -298,7 +307,7 @@ runtime_blockevent(int64 cycles, int32 skip)
{
int32 nstk;
int64 rate;
- uintptr stk[32];
+ Location stk[32];
Bucket *b;
if(cycles <= 0)
@@ -336,7 +345,7 @@ record(Record *r, Bucket *b)
r->alloc_objects = b->allocs;
r->free_objects = b->frees;
for(i=0; i<b->nstk && i<nelem(r->stk); i++)
- r->stk[i] = b->stk[i];
+ r->stk[i] = b->stk[i].pc;
for(; i<nelem(r->stk); i++)
r->stk[i] = 0;
}
@@ -396,7 +405,7 @@ func BlockProfile(p Slice) (n int, ok bool) {
r->count = b->count;
r->cycles = b->cycles;
for(i=0; (uintptr)i<b->nstk && (uintptr)i<nelem(r->stk); i++)
- r->stk[i] = b->stk[i];
+ r->stk[i] = b->stk[i].pc;
for(; (uintptr)i<nelem(r->stk); i++)
r->stk[i] = 0;
}
@@ -413,6 +422,7 @@ struct TRecord {
func ThreadCreateProfile(p Slice) (n int, ok bool) {
TRecord *r;
M *first, *mp;
+ int32 i;
first = runtime_atomicloadp(&runtime_allm);
n = 0;
@@ -423,7 +433,9 @@ func ThreadCreateProfile(p Slice) (n int, ok bool) {
ok = true;
r = (TRecord*)p.__values;
for(mp=first; mp; mp=mp->alllink) {
- runtime_memmove(r->stk, mp->createstack, sizeof r->stk);
+ for(i = 0; (uintptr)i < nelem(r->stk); i++) {
+ r->stk[i] = mp->createstack[i].pc;
+ }
r++;
}
}
@@ -473,10 +485,14 @@ func Stack(b Slice, all bool) (n int) {
static void
saveg(G *gp, TRecord *r)
{
- int32 n;
+ int32 n, i;
+ Location locstk[nelem(r->stk)];
- if(gp == runtime_g())
- n = runtime_callers(0, r->stk, nelem(r->stk));
+ if(gp == runtime_g()) {
+ n = runtime_callers(0, locstk, nelem(r->stk));
+ for(i = 0; i < n; i++)
+ r->stk[i] = locstk[i].pc;
+ }
else {
// FIXME: Not implemented.
n = 0;