summaryrefslogtreecommitdiff
path: root/libgo/runtime/mgc0.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/mgc0.c')
-rw-r--r--libgo/runtime/mgc0.c60
1 files changed, 5 insertions, 55 deletions
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index c4ab1454c5b..26633ab1f18 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -62,9 +62,6 @@ enum {
#define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
// TODO: Make these per-M.
-static uint64 nlookup;
-static uint64 nsizelookup;
-static uint64 naddrlookup;
static uint64 nhandoff;
static int32 gctrace;
@@ -218,8 +215,6 @@ scanblock(byte *b, int64 n)
// Otherwise consult span table to find beginning.
// (Manually inlined copy of MHeap_LookupMaybe.)
- nlookup++;
- naddrlookup++;
k = (uintptr)obj>>PageShift;
x = k;
if(sizeof(void*) == 8)
@@ -307,49 +302,8 @@ scanblock(byte *b, int64 n)
b = *--wp;
nobj--;
- // Figure out n = size of b. Start by loading bits for b.
- off = (uintptr*)b - (uintptr*)arena_start;
- bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
- xbits = *bitp;
- bits = xbits >> shift;
-
- // Might be small; look for nearby block boundary.
- // A block boundary is marked by either bitBlockBoundary
- // or bitAllocated being set (see notes near their definition).
- enum {
- boundary = bitBlockBoundary|bitAllocated
- };
- // Look for a block boundary both after and before b
- // in the same bitmap word.
- //
- // A block boundary j words after b is indicated by
- // bits>>j & boundary
- // assuming shift+j < bitShift. (If shift+j >= bitShift then
- // we'll be bleeding other bit types like bitMarked into our test.)
- // Instead of inserting the conditional shift+j < bitShift into the loop,
- // we can let j range from 1 to bitShift as long as we first
- // apply a mask to keep only the bits corresponding
- // to shift+j < bitShift aka j < bitShift-shift.
- bits &= (boundary<<(bitShift-shift)) - boundary;
-
- // A block boundary j words before b is indicated by
- // xbits>>(shift-j) & boundary
- // (assuming shift >= j). There is no cleverness here
- // avoid the test, because when j gets too large the shift
- // turns negative, which is undefined in C.
-
- for(j=1; j<bitShift; j++) {
- if(((bits>>j)&boundary) != 0 || (shift>=j && ((xbits>>(shift-j))&boundary) != 0)) {
- n = j*PtrSize;
- goto scan;
- }
- }
-
- // Fall back to asking span about size class.
+ // Ask span about size class.
// (Manually inlined copy of MHeap_Lookup.)
- nlookup++;
- nsizelookup++;
x = (uintptr)b>>PageShift;
if(sizeof(void*) == 8)
x -= (uintptr)arena_start>>PageShift;
@@ -358,7 +312,6 @@ scanblock(byte *b, int64 n)
n = s->npages<<PageShift;
else
n = runtime_class_to_size[s->sizeclass];
- scan:;
}
}
@@ -1018,9 +971,6 @@ runtime_gc(int32 force)
}
t0 = runtime_nanotime();
- nlookup = 0;
- nsizelookup = 0;
- naddrlookup = 0;
nhandoff = 0;
m->gcing = 1;
@@ -1085,11 +1035,11 @@ runtime_gc(int32 force)
runtime_printf("pause %llu\n", (unsigned long long)t3-t0);
if(gctrace) {
- runtime_printf("gc%d: %llu+%llu+%llu ms %llu -> %llu MB %llu -> %llu (%llu-%llu) objects %llu pointer lookups (%llu size, %llu addr) %llu handoff\n",
- mstats.numgc, (unsigned long long)(t1-t0)/1000000, (unsigned long long)(t2-t1)/1000000, (unsigned long long)(t3-t2)/1000000,
+ runtime_printf("gc%d(%d): %llu+%llu+%llu ms %llu -> %llu MB %llu -> %llu (%llu-%llu) objects %llu handoff\n",
+ mstats.numgc, work.nproc, (unsigned long long)(t1-t0)/1000000, (unsigned long long)(t2-t1)/1000000, (unsigned long long)(t3-t2)/1000000,
(unsigned long long)heap0>>20, (unsigned long long)heap1>>20, (unsigned long long)obj0, (unsigned long long)obj1,
- (unsigned long long)mstats.nmalloc, (unsigned long long)mstats.nfree,
- (unsigned long long)nlookup, (unsigned long long)nsizelookup, (unsigned long long)naddrlookup, (unsigned long long) nhandoff);
+ (unsigned long long) mstats.nmalloc, (unsigned long long)mstats.nfree,
+ (unsigned long long) nhandoff);
}
runtime_semrelease(&gcsema);