summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorhboehm <hboehm>2008-02-20 18:48:42 +0000
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 21:06:42 +0400
commitd2972463672f2bfd85716059136852bb6abe7a91 (patch)
treeb0e62f5da2ae5e2e0a610a1ad2e2a3219e1ba724
parentd6b11ce91f2ef4c7bcacd98326f5b27bed0e25a0 (diff)
downloadbdwgc-d2972463672f2bfd85716059136852bb6abe7a91.tar.gz
2008-02-20 Hans Boehm <Hans.Boehm@hp.com>
* allchblk.c (GC_enough_large_bytes_left): No longer take parameters; return free list index bound. (GC_merge_unmapped): Don't access nexthdr until after null test. (Fixes bug in 1/29/08 check-in.) (GC_allochblk): Calculate when splitting is allowable only once here, not when considering each block. (GC_allchblk_nth): Accept new may_split parameter. Avoid some redundant tests for exact size matches. * alloc.c (GC_should_collect): Cache min_bytes_allocd. (GC_maybe_gc): Make locking assertion testable. * mark_rts.c: Fix indentation. * pthread_stop_world.c: Replace old GC_err_printf1 reference. * tests/test.c: Remove (void) casts. Optionally print some timing information.
-rw-r--r--ChangeLog16
-rw-r--r--allchblk.c116
-rw-r--r--alloc.c10
-rw-r--r--mark_rts.c2
-rw-r--r--pthread_stop_world.c4
-rw-r--r--tests/test.c78
6 files changed, 142 insertions, 84 deletions
diff --git a/ChangeLog b/ChangeLog
index a6e8dee8..a0207ad4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+2008-02-20 Hans Boehm <Hans.Boehm@hp.com>
+
+ * allchblk.c (GC_enough_large_bytes_left): No longer take
+ parameters; return free list index bound.
+ (GC_merge_unmapped): Don't access nexthdr until after null test.
+ (Fixes bug in 1/29/08 check-in.) (GC_allochblk): Calculate
+ when splitting is allowable only once here, not when considering each
+ block. (GC_allchblk_nth): Accept new may_split parameter.
+ Avoid some redundant tests for exact size matches.
+ * alloc.c (GC_should_collect): Cache min_bytes_allocd.
+ (GC_maybe_gc): Make locking assertion testable.
+ * mark_rts.c: Fix indentation.
+ * pthread_stop_world.c: Replace old GC_err_printf1 reference.
+ * tests/test.c: Remove (void) casts. Optionally print some
+ timing information.
+
2008-02-15 Hans Boehm <Hans.Boehm@hp.com>
* windows-untested/gc.def: Remove CreateThread line.
diff --git a/allchblk.c b/allchblk.c
index 49cc863b..ed6a60b1 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -51,21 +51,24 @@ struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
/* Number of free bytes on each list. */
- /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
- /* > GC_max_large_allocd_bytes? */
+ /* Return the largest n such that */
+ /* Is GC_large_allocd_bytes + the number of free bytes on lists */
+ /* n .. N_HBLK_FLS > GC_max_large_allocd_bytes. */
+ /* If there is no such n, return 0. */
# ifdef __GNUC__
__inline__
# endif
- static GC_bool GC_enough_large_bytes_left(word bytes, int n)
+ static int GC_enough_large_bytes_left(void)
{
- int i;
+ int n;
+ word bytes = GC_large_allocd_bytes;
GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
- for (i = N_HBLK_FLS; i >= n; --i) {
- bytes += GC_free_bytes[i];
- if (bytes > GC_max_large_allocd_bytes) return TRUE;
+ for (n = N_HBLK_FLS; n >= 0; --n) {
+ bytes += GC_free_bytes[n];
+ if (bytes >= GC_max_large_allocd_bytes) return n;
}
- return FALSE;
+ return 0;
}
# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
@@ -429,10 +432,10 @@ void GC_merge_unmapped(void)
size = hhdr->hb_sz;
next = (struct hblk *)((word)h + size);
GET_HDR(next, nexthdr);
- nextsize = nexthdr -> hb_sz;
/* Coalesce with successor, if possible */
if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
- && (signed_word) (size + next_size) > 0 /* no pot. overflow */) {
+ && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
+ /* no pot. overflow */) {
if (IS_MAPPED(hhdr)) {
GC_ASSERT(!IS_MAPPED(nexthdr));
/* make both consistent, so that we can merge */
@@ -557,7 +560,8 @@ void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
}
struct hblk *
-GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n);
+GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n,
+ GC_bool may_split);
/*
* Allocate (and return pointer to) a heap block
@@ -574,15 +578,49 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
word blocks;
int start_list;
int i;
+ struct hblk *result;
+ int split_limit; /* Highest index of free list whose blocks we */
+ /* split. */
GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
blocks = OBJ_SZ_TO_BLOCKS(sz);
start_list = GC_hblk_fl_from_blocks(blocks);
- for (i = start_list; i <= N_HBLK_FLS; ++i) {
- struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
- if (0 != result) {
- return result;
+ /* Try for an exact match first. */
+ result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
+ if (0 != result) return result;
+ if (GC_use_entire_heap || GC_dont_gc
+ || USED_HEAP_SIZE < GC_requested_heapsize
+ || TRUE_INCREMENTAL || !GC_should_collect()) {
+ /* Should use more of the heap, even if it requires splitting. */
+ split_limit = N_HBLK_FLS;
+ } else {
+# ifdef USE_MUNMAP
+ /* avoid splitting, since that might require remapping */
+ split_limit = 0;
+# else
+ if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
+ /* If we are deallocating lots of memory from */
+ /* finalizers, fail and collect sooner rather */
+ /* than later. */
+ split_limit = 0;
+ } else {
+ /* If we have enough large blocks left to cover any */
+ /* previous request for large blocks, we go ahead */
+ /* and split. Assuming a steady state, that should */
+ /* be safe. It means that we can use the full */
+ /* heap if we allocate only small objects. */
+ split_limit = GC_enough_large_bytes_left();
}
+# endif
+ }
+ if (start_list < UNIQUE_THRESHOLD) {
+ /* No reason to try start_list again, since all blocks are exact */
+ /* matches. */
+ ++start_list;
+ }
+ for (i = start_list; i <= split_limit; ++i) {
+ struct hblk * result = GC_allochblk_nth(sz, kind, flags, i, TRUE);
+ if (0 != result) return result;
}
return 0;
}
@@ -590,9 +628,10 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
* The same, but with search restricted to nth free list.
* Flags is IGNORE_OFF_PAGE or zero.
* Unlike the above, sz is in bytes.
+ * The may_split flag indicates whether it's OK to split larger blocks.
*/
struct hblk *
-GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n)
+GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
{
struct hblk *hbp;
hdr * hhdr; /* Header corr. to hbp */
@@ -611,44 +650,21 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n)
GET_HDR(hbp, hhdr);
size_avail = hhdr->hb_sz;
if (size_avail < size_needed) continue;
- if (size_avail != size_needed
- && !GC_use_entire_heap
- && !GC_dont_gc
- && USED_HEAP_SIZE >= GC_requested_heapsize
- && !TRUE_INCREMENTAL && GC_should_collect()) {
-# ifdef USE_MUNMAP
- continue;
-# else
- /* If we have enough large blocks left to cover any */
- /* previous request for large blocks, we go ahead */
- /* and split. Assuming a steady state, that should */
- /* be safe. It means that we can use the full */
- /* heap if we allocate only small objects. */
- if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
- continue;
- }
- /* If we are deallocating lots of memory from */
- /* finalizers, fail and collect sooner rather */
- /* than later. */
- if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
- continue;
- }
-# endif /* !USE_MUNMAP */
- }
- /* If the next heap block is obviously better, go on. */
- /* This prevents us from disassembling a single large block */
- /* to get tiny blocks. */
- {
+ if (size_avail != size_needed) {
signed_word next_size;
-
+
+ if (!may_split) continue;
+ /* If the next heap block is obviously better, go on. */
+ /* This prevents us from disassembling a single large block */
+ /* to get tiny blocks. */
thishbp = hhdr -> hb_next;
if (thishbp != 0) {
- GET_HDR(thishbp, thishdr);
+ GET_HDR(thishbp, thishdr);
next_size = (signed_word)(thishdr -> hb_sz);
if (next_size < size_avail
- && next_size >= size_needed
- && !GC_is_black_listed(thishbp, (word)size_needed)) {
- continue;
+ && next_size >= size_needed
+ && !GC_is_black_listed(thishbp, (word)size_needed)) {
+ continue;
}
}
}
@@ -737,7 +753,7 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n)
/* Restore hbp to point at free block */
hbp = prev;
if (0 == hbp) {
- return GC_allochblk_nth(sz, kind, flags, n);
+ return GC_allochblk_nth(sz, kind, flags, n, may_split);
}
hhdr = HDR(hbp);
}
diff --git a/alloc.c b/alloc.c
index 5772b2d8..cecdf82e 100644
--- a/alloc.c
+++ b/alloc.c
@@ -234,7 +234,13 @@ static word GC_collect_at_heapsize = (word)(-1);
/* Have we allocated enough to amortize a collection? */
GC_bool GC_should_collect(void)
{
- return(GC_adj_bytes_allocd() >= min_bytes_allocd()
+ static word last_min_bytes_allocd;
+ static word last_gc_no;
+ if (last_gc_no != GC_gc_no) {
+ last_gc_no = GC_gc_no;
+ last_min_bytes_allocd = min_bytes_allocd();
+ }
+ return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
|| GC_heapsize >= GC_collect_at_heapsize);
}
@@ -252,12 +258,12 @@ GC_bool GC_is_full_gc = FALSE;
* Initiate a garbage collection if appropriate.
* Choose judiciously
* between partial, full, and stop-world collections.
- * Assumes lock held, signals disabled.
*/
void GC_maybe_gc(void)
{
static int n_partial_gcs = 0;
+ GC_ASSERT(I_HOLD_LOCK());
if (GC_should_collect()) {
if (!GC_incremental) {
GC_gcollect_inner();
diff --git a/mark_rts.c b/mark_rts.c
index 0def44f3..1c81f58d 100644
--- a/mark_rts.c
+++ b/mark_rts.c
@@ -296,7 +296,7 @@ void GC_remove_tmp_roots(void)
GC_remove_root_at_pos(i);
} else {
i++;
- }
+ }
}
#if !defined(MSWIN32) && !defined(MSWINCE)
GC_rebuild_root_index();
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 003eddeb..c542aebc 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -489,8 +489,8 @@ void GC_start_world()
for (i = 0; i < n_live_threads; i++)
while (0 != (code = sem_wait(&GC_restart_ack_sem)))
if (errno != EINTR) {
- GC_err_printf1("sem_wait() returned %ld\n",
- (unsigned long)code);
+ GC_err_printf("sem_wait() returned %d\n",
+ code);
ABORT("sem_wait() for restart handler failed");
}
# endif
diff --git a/tests/test.c b/tests/test.c
index fde6e6cc..70130782 100644
--- a/tests/test.c
+++ b/tests/test.c
@@ -1029,12 +1029,17 @@ void run_one_test()
# else
char *y = (char *)(size_t)fail_proc1;
# endif
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE reverse_time;
+ CLOCK_TYPE typed_time;
+ CLOCK_TYPE tree_time;
+ unsigned long time_diff;
DCL_LOCK_STATE;
# ifdef FIND_LEAK
- (void)GC_printf(
+ GC_printf(
"This test program is not designed for leak detection mode\n");
- (void)GC_printf("Expect lots of problems.\n");
+ GC_printf("Expect lots of problems.\n");
# endif
GC_FREE(0);
# ifndef DBG_HDRS_ALL
@@ -1042,46 +1047,46 @@ void run_one_test()
if ((GC_size(GC_malloc(7)) != 8 &&
GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
|| GC_size(GC_malloc(15)) != 16) {
- (void)GC_printf("GC_size produced unexpected results\n");
+ GC_printf("GC_size produced unexpected results\n");
FAIL;
}
collectable_count += 1;
if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
- (void)GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
+ GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
(unsigned long)GC_size(GC_malloc(0)));
- FAIL;
+ FAIL;
}
collectable_count += 1;
if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
- (void)GC_printf("GC_malloc_uncollectable(0) failed\n");
- FAIL;
+ GC_printf("GC_malloc_uncollectable(0) failed\n");
+ FAIL;
}
GC_is_valid_displacement_print_proc = fail_proc1;
GC_is_visible_print_proc = fail_proc1;
collectable_count += 1;
x = GC_malloc(16);
if (GC_base(x + 13) != x) {
- (void)GC_printf("GC_base(heap ptr) produced incorrect result\n");
+ GC_printf("GC_base(heap ptr) produced incorrect result\n");
FAIL;
}
# ifndef PCR
if (GC_base(y) != 0) {
- (void)GC_printf("GC_base(fn_ptr) produced incorrect result\n");
+ GC_printf("GC_base(fn_ptr) produced incorrect result\n");
FAIL;
}
# endif
if (GC_same_obj(x+5, x) != x + 5) {
- (void)GC_printf("GC_same_obj produced incorrect result\n");
+ GC_printf("GC_same_obj produced incorrect result\n");
FAIL;
}
if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
- (void)GC_printf("GC_is_visible produced incorrect result\n");
+ GC_printf("GC_is_visible produced incorrect result\n");
FAIL;
}
z = GC_malloc(8);
GC_PTR_STORE(z, x);
if (*z != x) {
- (void)GC_printf("GC_PTR_STORE failed: %p != %p\n", *z, x);
+ GC_printf("GC_PTR_STORE failed: %p != %p\n", *z, x);
FAIL;
}
if (!TEST_FAIL_COUNT(1)) {
@@ -1090,14 +1095,14 @@ void run_one_test()
/* data segment, so there should have been no failures. */
/* The same applies to IA64. Something similar seems to */
/* be going on with NetBSD/M68K. */
- (void)GC_printf("GC_is_visible produced wrong failure indication\n");
+ GC_printf("GC_is_visible produced wrong failure indication\n");
FAIL;
# endif
}
if (GC_is_valid_displacement(y) != y
|| GC_is_valid_displacement(x) != x
|| GC_is_valid_displacement(x + 3) != x + 3) {
- (void)GC_printf(
+ GC_printf(
"GC_is_valid_displacement produced incorrect result\n");
FAIL;
}
@@ -1121,7 +1126,7 @@ void run_one_test()
if (GC_all_interior_pointers && !TEST_FAIL_COUNT(1)
|| !GC_all_interior_pointers && !TEST_FAIL_COUNT(2)) {
# endif
- (void)GC_printf("GC_is_valid_displacement produced wrong failure indication\n");
+ GC_printf("GC_is_valid_displacement produced wrong failure indication\n");
FAIL;
}
# endif
@@ -1165,17 +1170,30 @@ void run_one_test()
GC_free(GC_malloc(0));
GC_free(GC_malloc_atomic(0));
/* Repeated list reversal test. */
+ GET_TIME(start_time);
reverse_test();
-# ifdef PRINTSTATS
- GC_printf("-------------Finished reverse_test\n");
-# endif
+ if (GC_print_stats) {
+ GET_TIME(reverse_time);
+ time_diff = MS_TIME_DIFF(reverse_time, start_time);
+ GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
+ (unsigned) time_diff, &start_time);
+ }
# ifndef DBG_HDRS_ALL
typed_test();
-# ifdef PRINTSTATS
- GC_printf("-------------Finished typed_test\n");
-# endif
+ if (GC_print_stats) {
+ GET_TIME(typed_time);
+ time_diff = MS_TIME_DIFF(typed_time, start_time);
+ GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
+ (unsigned) time_diff, &start_time);
+ }
# endif /* DBG_HDRS_ALL */
tree_test();
+ if (GC_print_stats) {
+ GET_TIME(tree_time);
+ time_diff = MS_TIME_DIFF(tree_time, start_time);
+ GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
+ (unsigned) time_diff, &start_time);
+ }
LOCK();
n_tests++;
UNLOCK();
@@ -1184,11 +1202,13 @@ void run_one_test()
GC_gcollect();
tiny_reverse_test(0);
GC_gcollect();
- GC_printf("Finished a child process\n");
+ if (GC_print_stats)
+ GC_log_printf("Finished a child process\n");
exit(0);
}
# endif
- /* GC_printf("Finished %x\n", pthread_self()); */
+ if (GC_print_stats)
+ GC_log_printf("Finished %x\n", pthread_self());
}
void check_heap_stats()
@@ -1343,25 +1363,25 @@ void SetMinimumStack(long minSize)
printf("Testing GC Macintosh port.\n");
# endif
GC_COND_INIT();
- (void) GC_set_warn_proc(warn_proc);
+ GC_set_warn_proc(warn_proc);
# if (defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(GWW_VDB)) \
&& !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
GC_enable_incremental();
- (void) GC_printf("Switched to incremental mode\n");
+ GC_printf("Switched to incremental mode\n");
# if defined(MPROTECT_VDB)
- (void)GC_printf("Emulating dirty bits with mprotect/signals\n");
+ GC_printf("Emulating dirty bits with mprotect/signals\n");
# else
# ifdef PROC_VDB
- (void)GC_printf("Reading dirty bits from /proc\n");
+ GC_printf("Reading dirty bits from /proc\n");
# else
- (void)GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
+ GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
# endif
# endif
# endif
run_one_test();
check_heap_stats();
# ifndef MSWINCE
- (void)fflush(stdout);
+ fflush(stdout);
# endif
# ifdef LINT
/* Entry points we should be testing, but aren't. */