summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Boehm <boehm@acm.org>1993-10-02 00:00:00 +0000
committerIvan Maidanski <ivmai@mail.ru>2014-05-17 16:13:09 +0400
commit31310c690972a9806cd8add17569a860dede7373 (patch)
tree1241ee02c25fc8404c3dc55eacb8a451c29ad1c3
parent87cf2ac8587a863708ad53520a01dc6285f11f1b (diff)
downloadbdwgc-31310c690972a9806cd8add17569a860dede7373.tar.gz
gc3.3 tarball importgc3_3
-rw-r--r--Makefile58
-rw-r--r--OS2_MAKEFILE6
-rw-r--r--PCR-Makefile14
-rw-r--r--README169
-rw-r--r--README.amiga83
-rw-r--r--SCoptions.amiga15
-rw-r--r--SMakefile.amiga43
-rw-r--r--alloc.c321
-rw-r--r--allochblk.c119
-rw-r--r--alpha_mach_dep.s58
-rw-r--r--black_list.c87
-rw-r--r--checksums.c138
-rw-r--r--config.h87
-rw-r--r--debug_malloc.c132
-rw-r--r--dynamic_load.c45
-rw-r--r--finalize.c97
-rw-r--r--gc.h126
-rw-r--r--gc_headers.h100
-rw-r--r--gc_private.h348
-rw-r--r--headers.c204
-rw-r--r--interface.c86
-rw-r--r--mach_dep.c179
-rw-r--r--malloc.c398
-rw-r--r--mark.c876
-rw-r--r--mark_roots.c198
-rw-r--r--mips_mach_dep.s32
-rw-r--r--misc.c394
-rw-r--r--obj_map.c18
-rw-r--r--os_dep.c691
-rw-r--r--pcr_interface.c2
-rw-r--r--reclaim.c298
-rw-r--r--rs6000_mach_dep.s50
-rw-r--r--setjmp_test.c10
-rw-r--r--stubborn.c314
-rw-r--r--test.c199
35 files changed, 4643 insertions, 1352 deletions
diff --git a/Makefile b/Makefile
index ab77025e..3b57c5d9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,13 @@
-OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o
+# Redefining srcdir allows object code for the nonPCR version of the collector
+# to be generated in different directories
+srcdir = .
+VPATH = $(srcdir)
-CSRCS= reclaim.c allochblk.c misc.c alloc.c mach_dep.c os_dep.c mark_roots.c headers.c mark.c obj_map.c pcr_interface.c black_list.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_malloc.c
+OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o malloc.o stubborn.o checksums.o
-SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s interface.c gc.h gc_headers.h gc_private.h config.h gc_inline.h gc.man if_mach.c if_not_there.c
+CSRCS= reclaim.c allochblk.c misc.c alloc.c mach_dep.c os_dep.c mark_roots.c headers.c mark.c obj_map.c pcr_interface.c black_list.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_malloc.c malloc.c stubborn.c checksums.c
+
+SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.s gc.h gc_headers.h gc_private.h config.h gc_inline.h gc.man if_mach.c if_not_there.c
# The following is irrelevant on most systems. But a few
# versions of make otherwise fork the shell specified in
@@ -10,9 +15,14 @@ SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s interface.c gc.h gc_headers.h g
SHELL= /bin/sh
CC= cc
-CFLAGS= -O
+CFLAGS= -O -DSILENT
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
+# -DSILENT disables statistics printing, and improves performance.
+# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+# altered stubborn objects, at substantial performance cost.
+# -DFIND_LEAK causes the collector to assume that all inaccessible
+# objects should have been explicitly deallocated, and reports exceptions
SPECIALCFLAGS =
# Alternative flags to the C compiler for mach_dep.c.
@@ -26,46 +36,52 @@ pcr: PCR-Makefile gc_private.h gc_headers.h gc.h config.h mach_dep.o $(SRCS)
make -f PCR-Makefile depend
make -f PCR-Makefile
-$(OBJS) test.o: gc_private.h gc_headers.h gc.h config.h Makefile
+$(OBJS) test.o: $(srcdir)/gc_private.h $(srcdir)/gc_headers.h $(srcdir)/gc.h $(srcdir)/config.h
gc.a: $(OBJS)
ar ru gc.a $(OBJS)
ranlib gc.a || cat /dev/null
# ignore ranlib failure; that usually means it doesn't exist, and isn't needed
-mach_dep.o: mach_dep.c mips_mach_dep.s rs6000_mach_dep.s if_mach if_not_there
+mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_mach_dep.s $(srcdir)/rs6000_mach_dep.s if_mach if_not_there
rm -f mach_dep.o
- ./if_mach MIPS "" as -o mach_dep.o mips_mach_dep.s
- ./if_mach RS6000 "" as -o mach_dep.o rs6000_mach_dep.s
- ./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) mach_dep.c
+ ./if_mach MIPS "" as -o mach_dep.o $(srcdir)/mips_mach_dep.s
+ ./if_mach RS6000 "" as -o mach_dep.o $(srcdir)/rs6000_mach_dep.s
+ ./if_mach ALPHA "" as -o mach_dep.o $(srcdir)/alpha_mach_dep.s
+ ./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
+
+if_mach: $(srcdir)/if_mach.c $(srcdir)/config.h
+ $(CC) $(CFLAGS) -o if_mach $(srcdir)/if_mach.c
-if_mach: if_mach.c config.h
- $(CC) $(CFLAGS) -o if_mach if_mach.c
-
-if_not_there: if_not_there.c
- $(CC) $(CFLAGS) -o if_not_there if_not_there.c
+if_not_there: $(srcdir)/if_not_there.c
+ $(CC) $(CFLAGS) -o if_not_there $(srcdir)/if_not_there.c
clean:
rm -f gc.a test.o gctest output-local output-diff $(OBJS) \
- setjmp_test mon.out gmon.out a.out core
+ setjmp_test mon.out gmon.out a.out core if_not_there if_mach
-rm -f *~
-gctest: test.o gc.a
- $(CC) $(CFLAGS) -o gctest test.o gc.a
+gctest: test.o gc.a if_mach if_not_there
+ rm -f gctest
+ ./if_mach ALPHA "" $(CC) $(CFLAGS) -o gctest -non_shared test.o gc.a
+ ./if_not_there gctest $(CC) $(CFLAGS) -o gctest test.o gc.a
# If an optimized setjmp_test generates a segmentation fault,
# odds are your compiler is broken. Gctest may still work.
# Try compiling setjmp_test unoptimized.
-setjmp_test: setjmp_test.c gc.h
- $(CC) $(CFLAGS) -o setjmp_test setjmp_test.c
+setjmp_test: $(srcdir)/setjmp_test.c $(srcdir)/gc.h if_mach if_not_there
+ rm -f setjmp_test
+ ./if_mach ALPHA "" $(CC) $(CFLAGS) -o setjmp_test -non_shared $(srcdir)/setjmp_test.c
+ ./if_not_there setjmp_test $(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_test.c
test: setjmp_test gctest
./setjmp_test
./gctest
tar:
- tar cvf gc.tar $(SRCS) Makefile PCR-Makefile OS2_MAKEFILE README test.c setjmp_test.c
+ tar cvf gc.tar $(SRCS) Makefile PCR-Makefile OS2_MAKEFILE README test.c setjmp_test.c \
+ SMakefile.amiga SCoptions.amiga README.amiga
compress gc.tar
lint: $(CSRCS) test.c
- lint $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit"
+ lint -DLINT $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit|sbrk|mprotect|syscall"
diff --git a/OS2_MAKEFILE b/OS2_MAKEFILE
index 07460d84..a5d98f8c 100644
--- a/OS2_MAKEFILE
+++ b/OS2_MAKEFILE
@@ -6,10 +6,10 @@
# We also haven't figured out how to do partial links or build static libraries. Hence a
# client currently needs to link against all of the following:
-OBJS= alloc.obj reclaim.obj allochblk.obj misc.obj mach_dep.obj os_dep.obj mark_roots.obj headers.obj mark.obj obj_map.obj black_list.obj finalize.obj new_hblk.obj real_malloc.obj dynamic_load.obj debug_malloc.obj
+OBJS= alloc.obj reclaim.obj allochblk.obj misc.obj mach_dep.obj os_dep.obj mark_roots.obj headers.obj mark.obj obj_map.obj black_list.obj finalize.obj new_hblk.obj real_malloc.obj dynamic_load.obj debug_malloc.obj malloc.obj stubborn.obj
CC= icc
-CFLAGS= /O /Q
+CFLAGS= /O /Q /DSILENT
# Use /Ti instead of /O for debugging
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
@@ -22,5 +22,5 @@ mach_dep.obj: mach_dep.c
$(CC) $(CFLAGS) /C mach_dep.c
gctest: test.obj $(OBJS)
- $(CC) $(CFLAGS) /Fegctest test.obj $(OBJS)
+ $(CC) $(CFLAGS) /B"/STACK:524288" /Fegctest test.obj $(OBJS)
diff --git a/PCR-Makefile b/PCR-Makefile
index a432999b..0be383aa 100644
--- a/PCR-Makefile
+++ b/PCR-Makefile
@@ -1,21 +1,23 @@
-OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o pcr_interface.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o
+OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o pcr_interface.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o malloc.o stubborn.o
-CSRCS= reclaim.c allochblk.c misc.c alloc.c mach_dep.c os_dep.c mark_roots.c headers.c mark.c obj_map.c pcr_interface.c black_list.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_malloc.c
+CSRCS= reclaim.c allochblk.c misc.c alloc.c mach_dep.c os_dep.c mark_roots.c headers.c mark.c obj_map.c pcr_interface.c black_list.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_malloc.c malloc.c stubborn.c
SHELL= /bin/sh
# Fix to point to local pcr installation directory.
-PCRDIR= /project/ppcr/v1.5
+PCRDIR= /project/ppcr/dev
CC= gcc
-CFLAGS= -g -DPCR -I$(PCRDIR) -I$(PCRDIR)/pcr -I$(PCRDIR)/pcr/ansi -I$(PCRDIR)/pcr/posix
+CFLAGS= -g -DPCR -I$(PCRDIR) -I$(PCRDIR)/ansi -I$(PCRDIR)/posix
# We assume that mach_dep.o has already been built by top level makefile. It doesn't
# care about pcr vs UNIX, and we don't want to repeat that cruft.
+default: gc.o
+
all: gc.o test.o gcpcr
-gcpcr: gc.o test.o $(PCRDIR)/pcr/base/pcr.o $(PCRDIR)/pcr/base/PCR_BaseMain.o
- $(CC) -o gcpcr $(PCRDIR)/pcr/base/pcr.o $(PCRDIR)/pcr/base/PCR_BaseMain.o gc.o test.o -ldl
+gcpcr: gc.o test.o $(PCRDIR)/base/pcr.o $(PCRDIR)/base/PCR_BaseMain.o
+ $(CC) -o gcpcr $(PCRDIR)/base/pcr.o $(PCRDIR)/base/PCR_BaseMain.o gc.o test.o -ldl
gc.o: $(OBJS)
-ld -r -o gc.o $(OBJS)
diff --git a/README b/README
index 390f50b2..ff244220 100644
--- a/README
+++ b/README
@@ -1,5 +1,5 @@
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
-Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -8,7 +8,7 @@ Permission is hereby granted to copy this garbage collector for any purpose,
provided the above notices are retained on all copies.
-This is version 2.6. Note that functions were renamed since version 1.9
+This is version 3.3. Note that functions were renamed since version 1.9
to make naming consistent with PCR collectors.
HISTORY -
@@ -26,13 +26,15 @@ Robert Brazile (brazile@diamond.bbn.com) originally supplied the ULTRIX code.
Al Dosser (dosser@src.dec.com) and Regis Cridlig (Regis.Cridlig@cl.cam.ac.uk)
subsequently provided updates and information on variation between ULTRIX
systems. Parag Patel (parag@netcom.com) supplied the A/UX code.
+Jesper Peterson(jep@mtiame.mtia.oz.au) supplied the Amiga port.
+Thomas Funke (thf@zelator.in-berlin.de(?)) supplied the NeXT port.
Bill Janssen (janssen@parc.xerox.com) supplied the SunOS dynamic loader
specific code. Manuel Serrano (serrano@cornas.inria.fr) supplied linux and
-Sony News specific code.
-
- (Blame for misinstallation of those modifications goes to the first author,
-however.) Some of the improvements incorporated in this version were
-suggested by David Chase, then at Olivetti Research.
+Sony News specific code. Al Dosser provided Alpha/OSF/1 code. He and
+Dave Detlefs(detlefs@src.dec.com) also provided several generic bug fixes.
+David Chase, then at Olivetti Research, suggested several improvements.
+(Blame for misinstallation of these modifications goes to the first author,
+however.)
Much of the code was rewritten by Hans-J. Boehm at Xerox PARC.
@@ -42,13 +44,25 @@ allocator. The algorithms used are described in:
Boehm, H., and M. Weiser, "Garbage Collection in an Uncooperative Environment",
Software Practice & Experience, September 1988, pp. 807-820.
+Boehm, H., A. Demers, and S. Shenker, "Mostly Parallel Garbage Collection",
+Proceedings of the ACM SIGPLAN '91 Conference on Programming Language Design
+and Implementation, SIGPLAN Notices 26, 6 (June 1991), pp. 157-164.
+
+Boehm, H., "Space Efficient Conservative Garbage Collection", Proceedings
+of the ACM SIGPLAN '91 Conference on Programming Language Design and
+Implementation, SIGPLAN Notices 28, 6 (June 1993), pp. 197-206.
+
+ Unlike the collector described in the second reference, this collector
+operates either with the mutator stopped during the entire collection
+(default) or incrementally during allocations. (The latter is supported
+on only a few machines.) It does not rely on threads, but is intended
+to be thread-safe.
+
Some of the ideas underlying the collector have previously been explored
by others. (Doug McIlroy wrote a vaguely similar collector that is part of
version 8 UNIX (tm).) However none of this work appears to have been widely
disseminated.
- This collector includes numerous refinements not described in the above paper.
-
Rudimentary tools for use of the collector as a leak detector are included.
@@ -114,7 +128,7 @@ which is usually unacceptable.)
INSTALLATION AND PORTABILITY
- As distributed, the macro SILENT is defined at the top of gc_private.h.
+ As distributed, the macro SILENT is defined in Makefile.
In the event of problems, this can be removed to obtain a moderate
amount of descriptive output for each collection.
(The given statistics exhibit a few peculiarities.
@@ -145,25 +159,28 @@ the following machines:
Sun 3
Sun 4 under SunOS 4.X or Solaris2.X
Vax under 4.3BSD, Ultrix
- Intel 386 or 486 under OS/2 (no threads) or linux.
- Sequent Symmetry (no concurrency)
- Encore Multimax (no concurrency)
+ Intel 386 or 486 under OS/2 (single threaded) or linux.
+ Sequent Symmetry (single threaded)
+ Encore Multimax (single threaded)
MIPS M/120 (and presumably M/2000) (RISC/os 4.0 with BSD libraries)
IBM PC/RT (Berkeley UNIX)
IBM RS/6000
HP9000/300
HP9000/700
DECstations under Ultrix
+ DEC Alpha running OSF/1
SGI workstations under IRIX
Sony News
Apple MacIntosh under A/UX
+ Commodore Amiga (see README.amiga)
+ NeXT machines
- For these machines you should check the beginning of gc.h
+ For these machines you should check in config.h
to verify that the machine type is correctly defined. On
nonSun machines, you may also need to make changes to the
Makefile, as described by comments there.
- Dynamic libraries are completely supported only under SunOS4.X
+ Dynamic libraries are completely supported only under SunOS
(and even that support is not functional on the last Sun 3 release).
On other machines we recommend that you do one of the following:
@@ -332,9 +349,7 @@ and cause all allocations to simply grow the heap.
The variable GC_non_gc_bytes, which is normally 0, may be changed to reflect
the amount of memory allocated by the above routines that should not be
-considered as a candidate for collection. Collections are inhibited
-if this exceeds a given fraction (currently 3/4) of the total heap size.
-The heap is simply expanded instead. Careless use may, of course, result
+considered as a candidate for collection. Careless use may, of course, result
in excessive memory consumption.
Some additional tuning is possible through the parameters defined
@@ -349,14 +364,6 @@ near the top of gc_private.h.
includes some allocation macros that may be used in place of GC_malloc
and friends.
- Somewhat different emulations of the standard C allocation routines are
-contained and described in "interface.c" (contributed by David Chase, but
-subsequently mangled by Hans Boehm). These are appropriate for mixed
-systems, where part of the system uses explicit deallocation, and does not
-leak. Exclusive use of interface.c routines can result in needless
-fragmentation, since certain kinds of object coalescing are only done
-by the collector.
-
All externally visible names in the garbage collector start with "GC_".
To avoid name conflicts, client code should avoid this prefix, except when
accessing garbage collector routines or variables.
@@ -440,6 +447,62 @@ of 16 bytes form the object beginning, and some translation is necessary
when finalization routines are invoked. For details, about what's stored
in the header, see the definition of the type oh in debug_malloc.c)
+INCREMENTAL/GENERATIONAL COLLECTION:
+
+The collector normally interrupts client code for the duration of
+a garbage collection mark phase. This may be unacceptable if interactive
+response is needed for programs with large heaps. The collector
+can also run in a "generational" mode, in which it usually attempts to
+collect only objects allocated since the last garbage collection.
+Furthermore, in this mode, garbage collections run mostly incrementally,
+with a small amount of work performed in response to each of a large number of
+GC_malloc requests.
+
+This mode is enabled by a call to GC_enable_incremental().
+
+Incremental and generational collection is effective in reducing
+pause times only if the collector has some way to tell which objects
+or pages have been recently modified. The collector uses two sources
+of information:
+
+1. Information provided by the VM system. This may be provided in
+one of several forms. Under Solaris 2.X (and potentially under other
+similar systems) information on dirty pages can be read from the
+/proc file system. Under other systems (currently SunOS4.X) it is
+possible to write-protect the heap, and catch the resulting faults.
+On these systems we require that system calls writing to the heap
+(other than read) be handled specially by client code.
+See os_dep.c for details.
+
+2. Information supplied by the programmer. We define "stubborn"
+objects to be objects that are rarely changed. Such an object
+can be allocated (and enabled for writing) with GC_malloc_stubborn.
+Once it has been initialized, the collector should be informed with
+a call to GC_end_stubborn_change. Subsequent writes that store
+pointers into the object must be preceded by a call to
+GC_change_stubborn.
+
+This mechanism performs best for objects that are written only for
+initialization, and such that only one stubborn object is writable
+at once. It is typically not worth using for short-lived
+objects. Stubborn objects are treated less efficiently than pointerfree
+(atomic) objects.
+
+A rough rule of thumb is that, in the absence of VM information, garbage
+collection pauses are proportional to the amount of pointerful storage
+plus the amount of modified "stubborn" storage that is reachable during
+the collection.
+
+Initial allocation of stubborn objects takes longer than allocation
+of other objects, since other data structures need to be maintained.
+
+We recommend against random use of stubborn objects in client
+code, since bugs caused by inappropriate writes to stubborn objects
+are likely to be very infrequently observed and hard to trace.
+However, their use may be appropriate in a few carefully written
+library routines that do not make the objects themselves available
+for writing by client code.
+
BUGS:
@@ -455,10 +518,8 @@ percentage of time required for collection should be constant across
heap sizes. But collection pauses will increase for larger heaps.
(On SPARCstation 2s collection times will be on the order of 300 msecs
per MB of accessible memory that needs to be scanned. Your mileage
-may vary.) Much better real-time behavior would be possible if we
-had a portable way to identify sections of memory that were recently
-modified. Experience with PCR indicates that 100 msec pause times
-are probably possible, almost independent of heap size.
+may vary.) The incremental/generational collection facility helps,
+but is portable only if "stubborn" allocation is used.
RECENT VERSIONS:
@@ -542,8 +603,7 @@ for PPCR.
under 4.1.1U1, but apparently not 4.1.1. If you have such a machine,
use -Bstatic.)
- Version 2.5 added Solaris dynamic libary support, Solaris/Intel support,
- and fixed the following bugs:
+ Version 2.5 fixed the following bugs:
- Removed an explicit call to exit(1)
- Fixed calls to GC_printf and GC_err_printf, so the correct number of
arguments are always supplied. The OS/2 C compiler gets confused if
@@ -551,7 +611,44 @@ for PPCR.
doesn't require this to work. The ANSI sanctioned way of doing things
causes too many compatibility problems.)
- Version 2.6 fixed a bug diagnosed by Al Dosser at DEC. The marker
- could lose some pointers in the event of a mark stack overflow, a case
- it was intended to handle correctly. (He also pointed out a performance
- bug that was tickled under the same circumstances.)
+ Version 3.0 added generational/incremental collection and stubborn
+ objects.
+
+ Version 3.1 added the following features:
+- A workaround for a SunOS 4.X SPARC C compiler
+ misfeature that caused problems when the collector was turned into
+ a dynamic library.
+- A fix for a bug in GC_base that could result in a memory fault.
+- A fix for a performance bug (and several other misfeatures) pointed
+ out by Dave Detelfs and Al Dosser.
+- Use of dirty bit information for static data under Solaris 2.X.
+- DEC Alpha/OSF1 support (thanks to Al Dosser).
+- Incremental collection on more platforms.
+- A more refined heap expansion policy. Less space usage by default.
+- Various minor enhancements to reduce space usage, and to reduce
+ the amount of memory scanned by the collector.
+- Uncollectable allocation without per object overhead.
+- More conscientious handling of out-of-memory conditions.
+- Fixed a bug in debugging stubborn allocation.
+- Fixed a bug that resulted in occasional erroneous reporting of smashed
+ objects with debugging allocation.
+- Fixed bogus leak reports of size 4096 blocks with FIND_LEAK.
+
+ Version 3.2 fixed a serious and not entirely repeatable bug in
+ the incremental collector. It appeared only when dirty bit info
+ on the roots was available, which is normally only under Solaris.
+ It also added GC_general_register_disappearing_link, and some
+ testing code. Interface.c disappeared.
+
+ Version 3.3 fixes several bugs and adds new ports:
+- PCR-specific bugs.
+- Missing locking in GC_free, redundant FASTUNLOCK
+ in GC_malloc_stubborn, and 2 bugs in
+ GC_unregister_disappearing_link.
+ All of the above were pointed out by Neil Sharman
+ (neil@cs.mu.oz.au).
+- Common symbols allocated by the SunOS4.X dynamic loader were not included in the root set.
+- Bug in GC_finalize (reported by Brian Beuning and Al Dosser)
+- Merged Amiga port from Jesper Peterson (untested)
+- Merged NeXT port from Thomas Funke (significantly modified and untested)
+ \ No newline at end of file
diff --git a/README.amiga b/README.amiga
new file mode 100644
index 00000000..cfb1fe81
--- /dev/null
+++ b/README.amiga
@@ -0,0 +1,83 @@
+
+ADDITIONAL NOTES FOR AMIGA PORT
+
+These notes assume some familiarity with Amiga internals.
+
+WHY I PORTED TO THE AMIGA
+
+The sole reason why I made this port was as a first step in getting
+the Sather(*) language on the Amiga. A port of this language will
+be done as soon as the Sather 1.0 sources are made available to me.
+Given this motivation, the garbage collection (GC) port is rather
+minimal.
+
+(*) For information on Sather read the comp.lang.sather newsgroup.
+
+LIMITATIONS
+
+This port assumes that the startup code linked with target programs
+is that supplied with SAS/C versions 6.0 or later. This allows
+assumptions to be made about where to find the stack base pointer
+and data segments when programs are run from WorkBench, as opposed
+to running from the CLI. The compiler dependent code is all in the
+GC_get_stack_base() and GC_register_data_segments() functions, but
+may spread as I add Amiga specific features.
+
+Given that SAS/C was assumed, the port is set up to be built with
+"smake" using the "SMakefile". Compiler options in "SCoptions" can
+be set with "scopts" program. Both "smake" and "scopts" are part of
+the SAS/C commercial development system.
+
+In keeping with the porting philosophy outlined above, this port
+will not behave well with Amiga specific code. Especially not inter-
+process comms via messages, and setting up public structures like
+Intuition objects or anything else in the system lists. For the
+time being the use of this library is limited to single threaded
+ANSI/POSIX compliant or near-complient code. (ie. Stick to stdio
+for now). Given this limitation there is currently no mechanism for
+allocating "CHIP" or "PUBLIC" memory under the garbage collector.
+I'll add this after giving it considerable thought. The major
+problem is the entire physical address space may have to me scanned,
+since there is no telling who we may have passed memory to.
+
+If you allocate your own stack in client code, you will have to
+assign the pointer plus stack size to GC_stackbottom.
+
+The initial stack size of the target program can be compiled in by
+setting the __stack symbol (see SAS documentaion). It can be over-
+ridden from the CLI by running the AmigaDOS "stack" program, or from
+the WorkBench by setting the stack size in the tool types window.
+
+SAS/C COMPILER OPTIONS (SCoptions)
+
+You may wish to check the "CPU" code option is appropriate for your
+intended target system.
+
+Under no circumstances set the "StackExtend" code option in either
+compiling the library or *ANY* client code.
+
+All benign compiler warnings have been suppressed. These mainly
+involve lack of prototypes in the code, and dead assignments
+detected by the optimizer.
+
+THE GOOD NEWS
+
+The library as it stands is compatible with the GigaMem commercial
+virtual memory software, and probably similar PD software.
+
+The performance of "gctest" on an Amiga 2630 (68030 @ 25Mhz)
+compares favourably with an HP9000 with similar architecture (a 325
+with a 68030 I think).
+
+-----------------------------------------------------------------------
+
+The Amiga port has been brought to you by:
+
+Jesper Peterson.
+
+jep@mtiame.mtia.oz.au (preferred, but 1 week turnaround)
+jep@orca1.vic.design.telecom.au (that's orca<one>, 1 day turnaround)
+
+At least one of these addresses should be around for a while, even
+though I don't work for either of the companies involved.
+
diff --git a/SCoptions.amiga b/SCoptions.amiga
new file mode 100644
index 00000000..9207e13e
--- /dev/null
+++ b/SCoptions.amiga
@@ -0,0 +1,15 @@
+CPU=68030
+NOSTACKCHECK
+ERRORREXX
+OPTIMIZE
+MAPHUNK
+NOVERSION
+NOICONS
+OPTIMIZERTIME
+DEFINE SILENT
+IGNORE=105
+IGNORE=304
+IGNORE=154
+IGNORE=85
+IGNORE=100
+IGNORE=161
diff --git a/SMakefile.amiga b/SMakefile.amiga
new file mode 100644
index 00000000..10a8b64b
--- /dev/null
+++ b/SMakefile.amiga
@@ -0,0 +1,43 @@
+OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o malloc.o stubborn.o checksums.o
+
+INC= gc_private.h gc_headers.h gc.h config.h
+
+all: gctest setjmp_test
+
+alloc.o : alloc.c $(INC)
+reclaim.o : reclaim.c $(INC)
+allochblk.o : allochblk.c $(INC)
+misc.o : misc.c $(INC)
+mach_dep.o : mach_dep.c $(INC)
+os_dep.o : os_dep.c $(INC)
+mark_roots.o : mark_roots.c $(INC)
+headers.o : headers.c $(INC)
+mark.o : mark.c $(INC)
+obj_map.o : obj_map.c $(INC)
+black_list.o : black_list.c $(INC)
+finalize.o : finalize.c $(INC)
+new_hblk.o : new_hblk.c $(INC)
+real_malloc.o : real_malloc.c $(INC)
+dynamic_load.o : dynamic_load.c $(INC)
+debug_malloc.o : debug_malloc.c $(INC)
+malloc.o : malloc.c $(INC)
+stubborn.o : stubborn.c $(INC)
+checksums.o : checksums.c $(INC)
+test.o : test.c $(INC)
+
+gc.lib: $(OBJS)
+ oml gc.lib r $(OBJS)
+
+clean:
+ delete gc.lib gctest setjmp_test \#?.o
+
+gctest: gc.lib test.o
+ slink LIB:c.o test.o to $@ lib gc.lib LIB:sc.lib LIB:scm.lib
+
+setjmp_test: setjmp_test.c gc.h
+ sc setjmp_test.c
+ slink LIB:c.o $@.o to $@ lib LIB:sc.lib
+
+test: setjmp_test gctest
+ setjmp_test
+ gctest
diff --git a/alloc.c b/alloc.c
index 1456d529..1d88c2fa 100644
--- a/alloc.c
+++ b/alloc.c
@@ -1,6 +1,6 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -8,12 +8,6 @@
* Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*
- * This file contains the functions:
- * static void clear_marks()
- * bool GC_gcollect_inner(force)
- * void GC_gcollect()
- * bool GC_expand_hp(n)
- * ptr_t GC_allocobj(sz, kind)
*/
@@ -23,23 +17,6 @@
# include "gc_private.h"
/*
- * This is a garbage collecting storage allocator
- * that should run on most UNIX systems. The garbage
- * collector is overly conservative in that it may fail to GC_reclaim
- * inaccessible storage. On the other hand, it does not assume
- * any runtime tag information.
- * We make the following assumptions:
- * 1. We are running under something that looks like Berkeley UNIX,
- * on one of the supported architectures.
- * 2. For every accessible object, a pointer to it is stored in
- * a) the stack segment, or
- * b) the data or bss segment, or
- * c) the registers, or
- * d) an accessible block.
- *
- */
-
-/*
* Separate free lists are maintained for different sized objects
* up to MAXOBJSZ.
* The call GC_allocobj(i,k) ensures that the freelist for
@@ -51,7 +28,7 @@
* opp = &(GC_objfreelist[i]);
* if (*opp == 0) GC_allocobj(i, NORMAL);
* ptr = *opp;
- * *opp = ptr->next;
+ * *opp = obj_link(ptr);
*
* Note that this is very fast if the free list is non-empty; it should
* only involve the execution of 4 or 5 simple instructions.
@@ -74,9 +51,14 @@ word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
word GC_gc_no = 0;
+int GC_incremental = 0; /* By default, stop the world. */
+
+int GC_full_freq = 3; /* Every 4th collection is a full */
+ /* collection. */
+
char * GC_copyright[] =
{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers",
-"Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.",
+"Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.",
"THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK."};
@@ -86,34 +68,6 @@ char * GC_copyright[] =
extern signed_word GC_mem_found; /* Number of reclaimed longwords */
/* after garbage collection */
-/* clear all mark bits in the header */
-void GC_clear_hdr_marks(hhdr)
-register hdr * hhdr;
-{
- bzero((char *)hhdr -> hb_marks, (int)(MARK_BITS_SZ*sizeof(word)));
-}
-
-/*
- * Clear all mark bits associated with block h.
- */
-/*ARGSUSED*/
-static void clear_marks_for_block(h, dummy)
-struct hblk *h;
-word dummy;
-{
- register hdr * hhdr = HDR(h);
-
- GC_clear_hdr_marks(hhdr);
-}
-
-/*
- * Clear mark bits in all allocated heap blocks
- */
-static void clear_marks()
-{
- GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
-}
-
bool GC_dont_expand = 0;
word GC_free_space_divisor = 4;
@@ -135,11 +89,17 @@ static word min_words_allocd()
if (stack_size < 0) stack_size = -stack_size;
total_root_size = 2 * stack_size + GC_root_size;
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)/GC_free_space_divisor);
+ if (GC_incremental) {
+ return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
+ / (2 * GC_free_space_divisor));
+ } else {
+ return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
+ / GC_free_space_divisor);
+ }
}
/* Return the number of words allocated, adjusted for explicit storage */
-/* management. This number can be used in deciding when to trigger */
+/* management, etc.. This number is used in deciding when to trigger */
/* collections. */
word GC_adj_words_allocd()
{
@@ -156,6 +116,10 @@ word GC_adj_words_allocd()
- (signed_word)GC_mem_freed - expl_managed;
if (result > (signed_word)GC_words_allocd) result = GC_words_allocd;
/* probably client bug or unfortunate scheduling */
+ result += GC_words_wasted;
+ /* This doesn't reflect useful work. But if there is lots of */
+ /* new fragmentation, the same is probably true of the heap, */
+ /* and the collection will be correspondingly cheaper. */
if (result < (signed_word)(GC_words_allocd >> 2)) {
/* Always count at least 1/8 of the allocations. We don't want */
/* to collect too infrequently, since that would inhibit */
@@ -182,29 +146,91 @@ void GC_clear_a_few_frames()
for (i = 0; i < NWORDS; i++) frames[i] = 0;
}
+/* Have we allocated enough to amortize a collection? */
+bool GC_should_collect()
+{
+ return(GC_adj_words_allocd() >= min_words_allocd());
+}
+
+/*
+ * Initiate a garbage collection if appropriate.
+ * Choose judiciously
+ * between partial, full, and stop-world collections.
+ * Assumes lock held, signals disabled.
+ */
+void GC_maybe_gc()
+{
+ static int n_partial_gcs = 0;
+ if (GC_should_collect()) {
+ if (!GC_incremental) {
+ GC_gcollect_inner();
+ n_partial_gcs = 0;
+ } else if (n_partial_gcs >= GC_full_freq) {
+ GC_initiate_full();
+ n_partial_gcs = 0;
+ } else {
+ GC_initiate_partial(GC_gc_no+1);
+ n_partial_gcs++;
+ }
+ }
+}
+
/*
- * Restore inaccessible objects to the free list
- * update GC_mem_found (number of reclaimed longwords after
- * garbage collection)
- * We assume we hold the allocation lock, and are not interruptable by
- * signals, if that matters.
- * If force is FALSE and we didn't do anything, return FALSE.
- * Otherwise return TRUE
+ * Stop the world garbage collection. Assumes lock held, signals disabled.
*/
-bool GC_gcollect_inner(force)
-bool force; /* Collect even if only a small amount of allocation */
- /* has taken place. Otherwise we refuse, allowing the */
- /* heap to grow. */
+void GC_gcollect_inner()
{
-# ifdef PRINTTIMES
- CLOCK_TYPE start_time;
- CLOCK_TYPE mark_time;
- CLOCK_TYPE done_time;
+# ifdef PRINTSTATS
+ GC_printf2(
+ "Initiating full world-stop collection %lu after %ld allocd bytes\n",
+ (unsigned long) GC_gc_no+1,
+ (long)WORDS_TO_BYTES(GC_words_allocd));
# endif
+ GC_promote_black_lists();
+ /* GC_reclaim_or_delete_all(); -- not needed: no intervening allocation */
+ GC_clear_marks();
+ STOP_WORLD();
+ GC_stopped_mark();
+ START_WORLD();
+ GC_finish_collection();
+}
+
+/*
+ * Perform n units of garbage collection work. A unit is intended to touch
+ * roughly a GC_RATE pages. Every once in a while, we do more than that.
+ */
+# define GC_RATE 8
+void GC_collect_a_little(n)
+int n;
+{
+ register int i;
+
+ if (GC_collection_in_progress()) {
+ for (i = 0; i < GC_RATE*n; i++) {
+ if (GC_mark_some()) {
+ /* Need to finish a collection */
+ STOP_WORLD();
+ GC_stopped_mark();
+ START_WORLD();
+ GC_finish_collection();
+ break;
+ }
+ }
+ } else {
+ GC_maybe_gc();
+ }
+}
- if (!force && !GC_dont_expand
- && GC_adj_words_allocd() < min_words_allocd()) return(FALSE);
+/*
+ * World-stopped mark phase. Assumes lock is held, signals are disabled,
+ * and the world is stopped.
+ */
+void GC_stopped_mark()
+{
# ifdef PRINTTIMES
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE done_time;
+
GET_TIME(start_time);
# endif
# ifdef PRINTSTATS
@@ -213,45 +239,56 @@ bool force; /* Collect even if only a small amount of allocation */
(long)WORDS_TO_BYTES(GC_mem_found));
# endif
GC_gc_no++;
-# ifdef GATHERSTATS
- GC_mem_found = 0;
- GC_composite_in_use = 0;
- GC_atomic_in_use = 0;
-# endif
# ifdef PRINTSTATS
- GC_printf2("Collection number %lu after %lu allocated bytes ",
- (unsigned long) GC_gc_no,
- (unsigned long) WORDS_TO_BYTES(GC_words_allocd));
- GC_printf1("(heapsize = %lu bytes)\n",
+ GC_printf3(
+ "--> Collection number %lu after %lu allocated + %lu wasted bytes\n",
+ (unsigned long) GC_gc_no,
+ (unsigned long) WORDS_TO_BYTES(GC_words_allocd),
+ (unsigned long) WORDS_TO_BYTES(GC_words_wasted));
+ GC_printf1("---> heapsize = %lu bytes\n",
(unsigned long) GC_heapsize);
/* Printf arguments may be pushed in funny places. Clear the */
/* space. */
GC_printf0("");
# endif
- clear_marks();
-
- STOP_WORLD();
-
/* Mark from all roots. */
/* Minimize junk left in my registers and on the stack */
GC_clear_a_few_frames();
GC_noop(0,0,0,0,0,0);
- GC_mark_roots();
- GC_promote_black_lists();
+ GC_initiate_partial(GC_gc_no);
+ while(!GC_mark_some());
/* Check all debugged objects for consistency */
if (GC_debugging_started) {
- GC_check_heap();
+ (*GC_check_heap)();
}
-
- START_WORLD();
# ifdef PRINTTIMES
- GET_TIME(mark_time);
+ GET_TIME(done_time);
+ GC_printf1("World-stopped marking took %lu msecs\n",
+ MS_TIME_DIFF(done_time,start_time));
# endif
+}
+
+/* Finish up a collection. Assumes lock is held, signals are disabled, */
+/* but the world is otherwise running. */
+void GC_finish_collection()
+{
+# ifdef PRINTTIMES
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE finalize_time;
+ CLOCK_TYPE done_time;
+
+ GET_TIME(start_time);
+ finalize_time = start_time;
+# endif
+
+# ifdef GATHERSTATS
+ GC_mem_found = 0;
+# endif
# ifdef FIND_LEAK
/* Mark all objects on the free list. All objects should be */
/* marked when we're done. */
@@ -280,6 +317,13 @@ bool force; /* Collect even if only a small amount of allocation */
# else
GC_finalize();
+# ifdef STUBBORN_ALLOC
+ GC_clean_changing_list();
+# endif
+
+# ifdef PRINTTIMES
+ GET_TIME(finalize_time);
+# endif
/* Clear free list mark bits, in case they got accidentally marked */
/* Note: HBLKPTR(p) == pointer to head of block containing *p */
@@ -303,7 +347,9 @@ bool force; /* Collect even if only a small amount of allocation */
hhdr = HDR(h);
word_no = (((word *)p) - ((word *)h));
clear_mark_bit_from_hdr(hhdr, word_no);
- GC_mem_found -= size;
+# ifdef GATHERSTATS
+ GC_mem_found -= size;
+# endif
}
}
}
@@ -311,14 +357,14 @@ bool force; /* Collect even if only a small amount of allocation */
# ifdef PRINTSTATS
- GC_printf1("Bytes recovered before GC_reclaim - f.l. count = %ld\n",
+ GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
(long)WORDS_TO_BYTES(GC_mem_found));
# endif
/* Reconstruct free lists to contain everything not marked */
GC_start_reclaim(FALSE);
-# endif /* FIND_LEAK */
+# endif /* !FIND_LEAK */
# ifdef PRINTSTATS
GC_printf2(
@@ -334,19 +380,18 @@ bool force; /* Collect even if only a small amount of allocation */
GC_words_allocd_before_gc += GC_words_allocd;
GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
GC_words_allocd = 0;
+ GC_words_wasted = 0;
GC_mem_freed = 0;
-
- /* Get final time */
+
# ifdef PRINTTIMES
GET_TIME(done_time);
- GC_printf2("Garbage collection took %lu + %lu msecs\n",
- MS_TIME_DIFF(mark_time,start_time),
- MS_TIME_DIFF(done_time,mark_time));
+ GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
+ MS_TIME_DIFF(finalize_time,start_time),
+ MS_TIME_DIFF(done_time,finalize_time));
# endif
- return(TRUE);
}
-/* Externally callable version of above */
+/* Externally callable routine to invoke full, stop-world collection */
void GC_gcollect()
{
DCL_LOCK_STATE;
@@ -356,11 +401,13 @@ void GC_gcollect()
if (!GC_is_initialized) GC_init_inner();
/* Minimize junk left in my registers */
GC_noop(0,0,0,0,0,0);
- (void) GC_gcollect_inner(TRUE);
+ GC_gcollect_inner();
UNLOCK();
ENABLE_SIGNALS();
}
+word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
+
/*
* Use the chunk of memory starting at p of syze bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
@@ -371,7 +418,20 @@ word bytes;
{
word words;
- GC_install_header(p);
+ if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
+ GC_err_printf0(
+ "Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
+ ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
+ }
+ if (!GC_install_header(p)) {
+ /* This is extremely unlikely. Can't add it. This will */
+ /* almost certainly result in a 0 return from the allocator, */
+ /* which is entirely appropriate. */
+ return;
+ }
+ GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
+ GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
+ GC_n_heap_sects++;
words = BYTES_TO_WORDS(bytes - HDR_BYTES);
HDR(p) -> hb_sz = words;
GC_freehblk(p);
@@ -406,27 +466,29 @@ ptr_t x, y;
/*
* this explicitly increases the size of the heap. It is used
- * internally, but my also be invoked from GC_expand_hp by the user.
+ * internally, but may also be invoked from GC_expand_hp by the user.
* The argument is in units of HBLKSIZE.
+ * Tiny values of n are rounded up.
* Returns FALSE on failure.
*/
bool GC_expand_hp_inner(n)
word n;
{
- word bytes = n * HBLKSIZE;
- struct hblk * space = GET_MEM(bytes);
+ word bytes;
+ struct hblk * space;
word expansion_slop; /* Number of bytes by which we expect the */
/* heap to expand soon. */
- if (n > 2*GC_hincr) {
- GC_hincr = n/2;
- }
+ if (n < MINHINCR) n = MINHINCR;
+ bytes = n * HBLKSIZE;
+ space = GET_MEM(bytes);
if( space == 0 ) {
return(FALSE);
}
# ifdef PRINTSTATS
- GC_printf1("Increasing heap size by %lu\n",
- (unsigned long)bytes);
+ GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n",
+ (unsigned long)bytes,
+ (unsigned long)WORDS_TO_BYTES(GC_words_allocd));
# endif
expansion_slop = 8 * WORDS_TO_BYTES(min_words_allocd());
if (5 * HBLKSIZE * MAXHINCR > expansion_slop) {
@@ -466,30 +528,43 @@ int n;
return(result);
}
-void GC_collect_or_expand(needed_blocks)
+bool GC_collect_or_expand(needed_blocks)
word needed_blocks;
{
static int count = 0; /* How many failures? */
- if (GC_dont_gc || !GC_gcollect_inner(FALSE)) {
- if (!GC_expand_hp_inner(GC_hincr + needed_blocks)
+ if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
+ GC_gcollect_inner();
+ } else {
+ word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
+ + needed_blocks;
+
+ if (blocks_to_get > MAXHINCR) {
+ if (needed_blocks > MAXHINCR) {
+ blocks_to_get = needed_blocks;
+ } else {
+ blocks_to_get = MAXHINCR;
+ }
+ }
+ if (!GC_expand_hp_inner(blocks_to_get)
&& !GC_expand_hp_inner(needed_blocks)) {
if (count++ < 20) {
WARN("Out of Memory! Trying to continue ...\n");
- (void) GC_gcollect_inner(TRUE);
+ GC_gcollect_inner();
} else {
- GC_err_printf0("Out of Memory! Giving up!\n");
- EXIT();
+ WARN("Out of Memory! Returning NIL!\n");
+ return(FALSE);
}
}
- update_GC_hincr;
}
+ return(TRUE);
}
/*
* Make sure the object free list for sz is not empty.
* Return a pointer to the first object on the free list.
* The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
+ * Assumes we hold the allocator lock and signals are disabled.
*
*/
ptr_t GC_allocobj(sz, kind)
@@ -501,13 +576,17 @@ int kind;
if (sz == 0) return(0);
while (*flh == 0) {
- GC_continue_reclaim(sz, kind);
+ /* Do our share of marking work */
+ if(GC_incremental && !GC_dont_gc) GC_collect_a_little(1);
+ /* Sweep blocks for objects of this size */
+ GC_continue_reclaim(sz, kind);
if (*flh == 0) {
GC_new_hblk(sz, kind);
}
if (*flh == 0) {
- GC_collect_or_expand((word)1);
+ if (!GC_collect_or_expand((word)1)) return(0);
}
}
+
return(*flh);
}
diff --git a/allochblk.c b/allochblk.c
index 48212b7f..5c22f2b9 100644
--- a/allochblk.c
+++ b/allochblk.c
@@ -38,24 +38,52 @@ struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
/* block to be examined by */
/* GC_allochblk. */
+void GC_print_hblkfreelist()
+{
+ struct hblk * h = GC_hblkfreelist;
+ word total_free = 0;
+ hdr * hhdr = HDR(h);
+ word sz;
+
+ while (h != 0) {
+ sz = hhdr -> hb_sz;
+ GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ total_free += sz;
+ if (GC_is_black_listed(h, HBLKSIZE) != 0) {
+ GC_printf0("start black listed\n");
+ } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
+ GC_printf0("partially black listed\n");
+ } else {
+ GC_printf0("not black listed\n");
+ }
+ h = hhdr -> hb_next;
+ hhdr = HDR(h);
+ }
+ GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
+}
+
/* Initialize hdr for a block containing the indicated size and */
/* kind of objects. */
-static setup_header(hhdr, sz, kind)
+/* Return FALSE on failure. */
+static bool setup_header(hhdr, sz, kind)
register hdr * hhdr;
word sz; /* object size in words */
int kind;
{
+ /* Add description of valid object pointers */
+ if (!GC_add_map_entry(sz)) return(FALSE);
+ hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
+
/* Set size, kind and mark proc fields */
hhdr -> hb_sz = sz;
hhdr -> hb_obj_kind = kind;
hhdr -> hb_mark_proc = GC_obj_kinds[kind].ok_mark_proc;
- /* Add description of valid object pointers */
- GC_add_map_entry(sz);
- hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
-
/* Clear mark bits */
GC_clear_hdr_marks(hhdr);
+
+ hhdr -> hb_last_reclaimed = GC_gc_no;
+ return(TRUE);
}
/*
@@ -83,8 +111,7 @@ int kind;
signed_word size_avail; /* bytes available in this block */
bool first_time = TRUE;
- size_needed = WORDS_TO_BYTES(sz);
- size_needed = (size_needed+HDR_BYTES+HBLKSIZE-1) & ~HBLKMASK;
+ size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
/* search for a big enough block in free list */
hbp = GC_savhbp;
@@ -124,17 +151,18 @@ int kind;
}
if ( kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC) {
struct hblk * lasthbp = hbp;
+ ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
- while (size_avail >= size_needed
+ while ((ptr_t)lasthbp <= search_end
&& (thishbp = GC_is_black_listed(lasthbp,
(word)size_needed))) {
lasthbp = thishbp;
}
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
thishbp = lasthbp;
- if (size_avail >= size_needed && thishbp != hbp) {
+ if (size_avail >= size_needed && thishbp != hbp
+ && GC_install_header(thishbp)) {
/* Split the block at thishbp */
- GC_install_header(thishbp);
thishdr = HDR(thishbp);
/* GC_invalidate_map not needed, since we will */
/* allocate this block. */
@@ -150,6 +178,7 @@ int kind;
} else if (size_avail == 0
&& size_needed == HBLKSIZE
&& prevhbp != 0) {
+# ifndef FIND_LEAK
static unsigned count = 0;
/* The block is completely blacklisted. We need */
@@ -159,17 +188,24 @@ int kind;
/* A dropped block will be reconsidered at next GC. */
if ((++count & 3) == 0) {
/* Allocate and drop the block */
- phdr -> hb_next = hhdr -> hb_next;
- GC_install_counts(hbp, hhdr->hb_sz);
- setup_header(hhdr,
- BYTES_TO_WORDS(hhdr->hb_sz - HDR_BYTES),
- PTRFREE);
- if (GC_savhbp == hbp) GC_savhbp = prevhbp;
+ if (GC_install_counts(hbp, hhdr->hb_sz)) {
+ phdr -> hb_next = hhdr -> hb_next;
+ (void) setup_header(
+ hhdr,
+ BYTES_TO_WORDS(hhdr->hb_sz - HDR_BYTES),
+ PTRFREE); /* Cant fail */
+ if (GC_debugging_started) {
+ bzero((char *)hbp + HDR_BYTES,
+ (int)(hhdr->hb_sz - HDR_BYTES));
+ }
+ if (GC_savhbp == hbp) GC_savhbp = prevhbp;
+ }
/* Restore hbp to point at free block */
hbp = prevhbp;
hhdr = phdr;
if (hbp == GC_savhbp) first_time = TRUE;
}
+# endif
}
}
if( size_avail >= size_needed ) {
@@ -183,8 +219,8 @@ int kind;
hhdr = HDR(hbp);
} else {
hbp = (struct hblk *)
- (((unsigned)thishbp) + size_needed);
- GC_install_header(hbp);
+ (((word)thishbp) + size_needed);
+ if (!GC_install_header(hbp)) continue;
hhdr = HDR(hbp);
GC_invalidate_map(hhdr);
hhdr->hb_next = thishdr->hb_next;
@@ -201,21 +237,31 @@ int kind;
break;
}
}
-
+
+ /* Notify virtual dirty bit implementation that we are about to write. */
+ GC_write_hint(thishbp);
+
+ /* Add it to map of valid blocks */
+ if (!GC_install_counts(thishbp, (word)size_needed)) return(0);
+ /* This leaks memory under very rare conditions. */
+
+ /* Set up header */
+ if (!setup_header(thishdr, sz, kind)) {
+ GC_remove_counts(thishbp, (word)size_needed);
+ return(0); /* ditto */
+ }
+
/* Clear block if necessary */
- if (sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
+ if (GC_debugging_started
+ || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
bzero((char *)thishbp + HDR_BYTES, (int)(size_needed - HDR_BYTES));
}
-
- /* Set up header */
- setup_header(thishdr, sz, kind);
-
- /* Add it to map of valid blocks */
- GC_install_counts(thishbp, (word)size_needed);
-
+
return( thishbp );
}
+struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
+
/*
* Free a heap block.
*
@@ -237,15 +283,23 @@ register signed_word size;
phdr = HDR(p);
size = phdr->hb_sz;
- size =
- ((WORDS_TO_BYTES(size)+HDR_BYTES+HBLKSIZE-1)
- & (~HBLKMASK));
+ size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
GC_remove_counts(p, (word)size);
phdr->hb_sz = size;
GC_invalidate_map(phdr);
-
prevhbp = 0;
- hbp = GC_hblkfreelist;
+
+ /* The following optimization was suggested by David Detlefs. */
+ /* Note that the header cannot be NIL, since there cannot be an */
+ /* intervening call to GC_freehblk without resetting */
+ /* GC_freehblk_ptr. */
+ if (GC_freehblk_ptr != 0 &&
+ HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&
+ (ptr_t)GC_freehblk_ptr < (ptr_t)p) {
+ hbp = GC_freehblk_ptr;
+ } else {
+ hbp = GC_hblkfreelist;
+ };
hhdr = HDR(hbp);
while( (hbp != 0) && (hbp < p) ) {
@@ -254,6 +308,7 @@ register signed_word size;
hbp = hhdr->hb_next;
hhdr = HDR(hbp);
}
+ GC_freehblk_ptr = prevhbp;
/* Check for duplicate deallocation in the easy case */
if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
diff --git a/alpha_mach_dep.s b/alpha_mach_dep.s
new file mode 100644
index 00000000..265c3141
--- /dev/null
+++ b/alpha_mach_dep.s
@@ -0,0 +1,58 @@
+ # $Id: alpha_mach_dep.s,v 1.2 1993/01/18 22:54:51 dosser Exp $
+
+# define call_push(x) lda $16, 0(x); jsr $26, GC_push_one
+
+ .text
+ .align 4
+ .globl GC_push_regs
+ .ent GC_push_regs 2
+GC_push_regs:
+ ldgp $gp, 0($27)
+ lda $sp, -32($sp)
+ stq $26, 8($sp)
+ .mask 0x04000000, -8
+ .frame $sp, 16, $26, 0
+
+ # call_push($0) # expression eval and int func result
+
+ # call_push($1) # temp regs - not preserved cross calls
+ # call_push($2)
+ # call_push($3)
+ # call_push($4)
+ # call_push($5)
+ # call_push($6)
+ # call_push($7)
+ # call_push($8)
+
+ call_push($9) # Saved regs
+ call_push($10)
+ call_push($11)
+ call_push($12)
+ call_push($13)
+ call_push($14)
+
+ call_push($15) # frame ptr or saved reg
+
+ # call_push($16) # argument regs - not preserved cross calls
+ # call_push($17)
+ # call_push($18)
+ # call_push($19)
+ # call_push($20)
+ # call_push($21)
+
+ # call_push($22) # temp regs - not preserved cross calls
+ # call_push($23)
+ # call_push($24)
+ # call_push($25)
+
+ # call_push($26) # return address - expression eval
+ # call_push($27) # procedure value or temporary reg
+ # call_push($28) # assembler temp - not presrved
+ call_push($29) # Global Pointer
+ # call_push($30) # Stack Pointer
+
+ ldgp $gp, 0($26)
+ ldq $26, 8($sp)
+ lda $sp, 32($sp)
+ ret $31, ($26), 1
+ .end GC_push_regs
diff --git a/black_list.c b/black_list.c
index ba54f892..8dee33d8 100644
--- a/black_list.c
+++ b/black_list.c
@@ -14,6 +14,7 @@
* We maintain several hash tables of hblks that have had false hits.
* Each contains one bit per hash bucket; If any page in the bucket
* has had a false hit, we assume that all of them have.
+ * See the definition of page_hash_table in gc_private.h.
* False hits from the stack(s) are much more dangerous than false hits
* from elsewhere, since the former can pin a large object that spans the
* block, eventhough it does not start on the dangerous block.
@@ -30,55 +31,42 @@
* All require that the allocator lock is held.
*/
-# define LOG_HT_ENTRIES 14 /* Collisions are likely if heap grows */
- /* to more than 16K hblks = 64MB. */
- /* Each hash table occupies 2K bytes. */
-# define HT_ENTRIES ((word)1 << LOG_HT_ENTRIES)
-# define HT_SIZE (HT_ENTRIES >> LOGWL)
-typedef word black_list_t[HT_SIZE];
-
-# define HASH(addr) (((addr) >> LOG_HBLKSIZE) & (HT_ENTRIES - 1))
-
/* Pointers to individual tables. We replace one table by another by */
-/* switching these pointers. GC_black_lists is not used directly. */
-word * GC_new_normal_bl;
- /* Nonstack false references seen at last complete */
- /* collection. */
+/* switching these pointers. */
word * GC_old_normal_bl;
- /* Nonstack false references seen at preceding */
+ /* Nonstack false references seen at last full */
/* collection. */
word * GC_incomplete_normal_bl;
- /* Nonstack false references seen at current, */
- /* not yet completed collection. */
-word * GC_new_stack_bl;
+ /* Nonstack false references seen since last */
+ /* full collection. */
word * GC_old_stack_bl;
word * GC_incomplete_stack_bl;
-# define get_bl_entry_from_index(bl, index) \
- (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
-# define set_bl_entry_from_index(bl, index) \
- (bl)[divWORDSZ(index)] |= 1 << modWORDSZ(index)
-# define clear_bl_entry_from_index(bl, index) \
- (bl)[divWORDSZ(index)] &= ~(1 << modWORDSZ(index))
-
GC_bl_init()
{
# ifndef ALL_INTERIOR_POINTERS
- GC_new_normal_bl = (word *)GC_scratch_alloc((word)(sizeof(black_list_t)));
- GC_old_normal_bl = (word *)GC_scratch_alloc((word)(sizeof (black_list_t)));
+ GC_old_normal_bl = (word *)
+ GC_scratch_alloc((word)(sizeof (page_hash_table)));
GC_incomplete_normal_bl = (word *)GC_scratch_alloc
- ((word)(sizeof(black_list_t)));
+ ((word)(sizeof(page_hash_table)));
+ if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
+ GC_err_printf0("Insufficient memory for black list\n");
+ EXIT();
+ }
# endif
- GC_new_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(black_list_t)));
- GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(black_list_t)));
+ GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(page_hash_table)));
GC_incomplete_stack_bl = (word *)GC_scratch_alloc
- ((word)(sizeof(black_list_t)));
+ ((word)(sizeof(page_hash_table)));
+ if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
+ GC_err_printf0("Insufficient memory for black list\n");
+ EXIT();
+ }
}
void GC_clear_bl(doomed)
word *doomed;
{
- bzero((char *)doomed, (int)HT_SIZE*sizeof(word));
+ bzero((char *)doomed, (int)sizeof(page_hash_table));
}
/* Signal the completion of a collection. Turn the incomplete black */
@@ -88,10 +76,8 @@ void GC_promote_black_lists()
word * very_old_normal_bl = GC_old_normal_bl;
word * very_old_stack_bl = GC_old_stack_bl;
- GC_old_normal_bl = GC_new_normal_bl;
- GC_new_normal_bl = GC_incomplete_normal_bl;
- GC_old_stack_bl = GC_new_stack_bl;
- GC_new_stack_bl = GC_incomplete_stack_bl;
+ GC_old_normal_bl = GC_incomplete_normal_bl;
+ GC_old_stack_bl = GC_incomplete_stack_bl;
# ifndef ALL_INTERIOR_POINTERS
GC_clear_bl(very_old_normal_bl);
# endif
@@ -109,16 +95,16 @@ word p;
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
{
- register int index = HASH(p);
+ register int index = PHT_HASH(p);
- if (HDR(p) == 0 || get_bl_entry_from_index(GC_new_normal_bl, index)) {
+ if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
# ifdef PRINTBLACKLIST
- if (!get_bl_entry_from_index(GC_incomplete_normal_bl, index)) {
+ if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
GC_printf1("Black listing (normal) 0x%lx\n",
(unsigned long) p);
}
# endif
- set_bl_entry_from_index(GC_incomplete_normal_bl, index);
+ set_pht_entry_from_index(GC_incomplete_normal_bl, index);
} /* else this is probably just an interior pointer to an allocated */
/* object, and isn't worth black listing. */
}
@@ -129,16 +115,16 @@ word p;
void GC_add_to_black_list_stack(p)
word p;
{
- register int index = HASH(p);
+ register int index = PHT_HASH(p);
- if (HDR(p) == 0 || get_bl_entry_from_index(GC_new_stack_bl, index)) {
+ if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
# ifdef PRINTBLACKLIST
- if (!get_bl_entry_from_index(GC_incomplete_stack_bl, index)) {
+ if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
GC_printf1("Black listing (stack) 0x%lx\n",
(unsigned long)p);
}
# endif
- set_bl_entry_from_index(GC_incomplete_stack_bl, index);
+ set_pht_entry_from_index(GC_incomplete_stack_bl, index);
}
}
@@ -154,30 +140,31 @@ struct hblk * GC_is_black_listed(h, len)
struct hblk * h;
word len;
{
- register int index = HASH((word)h);
+ register int index = PHT_HASH((word)h);
register word i;
word nblocks = divHBLKSZ(len);
# ifndef ALL_INTERIOR_POINTERS
- if (get_bl_entry_from_index(GC_new_normal_bl, index)
- && get_bl_entry_from_index(GC_old_normal_bl, index)) {
+ if (get_pht_entry_from_index(GC_old_normal_bl, index)
+ || get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
return(h+1);
}
# endif
for (i = 0; ; ) {
- if (GC_new_stack_bl[divWORDSZ(index)] == 0) {
+ if (GC_old_stack_bl[divWORDSZ(index)] == 0
+ && GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
/* An easy case */
i += WORDSZ - modWORDSZ(index);
} else {
- if (get_bl_entry_from_index(GC_new_stack_bl, index)
- && get_bl_entry_from_index(GC_old_stack_bl, index)) {
+ if (get_pht_entry_from_index(GC_old_stack_bl, index)
+ || get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
return(h+i+1);
}
i++;
}
if (i >= nblocks) break;
- index = HASH((word)(h+i));
+ index = PHT_HASH((word)(h+i));
}
return(0);
}
diff --git a/checksums.c b/checksums.c
new file mode 100644
index 00000000..c284db7e
--- /dev/null
+++ b/checksums.c
@@ -0,0 +1,138 @@
+# ifdef CHECKSUMS
+
+# include "gc_private.h"
+
+/* This is debugging code intended to verify the results of dirty bit */
+/* computations. Works only in a single threaded environment. */
+/* We assume that stubborn objects are changed only when they are */
+/* enabled for writing. (Certain kinds of writing are actually */
+/* safe under other conditions.) */
+# define NSUMS 2000
+
+# define OFFSET 100000
+
+typedef struct {
+ bool new_valid;
+ word old_sum;
+ word new_sum;
+ struct hblk * block; /* Block to which this refers + OFFSET */
+ /* to hide it from colector. */
+} page_entry;
+
+page_entry GC_sums [NSUMS];
+
+word GC_checksum(h)
+struct hblk *h;
+{
+ register word *p = (word *)h;
+ register word *lim = (word *)(h+1);
+ register word result = 0;
+
+ while (p < lim) {
+ result += *p++;
+ }
+ return(result);
+}
+
+# ifdef STUBBORN_ALLOC
+/* Check whether a stubborn object from the given block appears on */
+/* the appropriate free list. */
+bool GC_on_free_list(h)
+struct hblk *h;
+{
+ register hdr * hhdr = HDR(h);
+ register int sz = hhdr -> hb_sz;
+ ptr_t p;
+
+ if (sz > MAXOBJSZ) return(FALSE);
+ for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
+ if (HBLKPTR(p) == h) return(TRUE);
+ }
+ return(FALSE);
+}
+# endif
+
+int GC_n_dirty_errors;
+int GC_n_changed_errors;
+int GC_n_clean;
+int GC_n_dirty;
+
+void GC_update_check_page(h, index)
+struct hblk *h;
+int index;
+{
+ page_entry *pe = GC_sums + index;
+ register hdr * hhdr = HDR(h);
+
+ if (pe -> block != 0 && pe -> block != h + OFFSET) ABORT("goofed");
+ pe -> old_sum = pe -> new_sum;
+ pe -> new_sum = GC_checksum(h);
+ if (GC_page_was_dirty(h)) {
+ GC_n_dirty++;
+ } else {
+ GC_n_clean++;
+ }
+ if (pe -> new_valid && pe -> old_sum != pe -> new_sum) {
+ if (!GC_page_was_dirty(h)) {
+ /* Set breakpoint here */GC_n_dirty_errors++;
+ }
+# ifdef STUBBORN_ALLOC
+ if (!IS_FORWARDING_ADDR_OR_NIL(hhdr)
+ && hhdr -> hb_map != GC_invalid_map
+ && hhdr -> hb_obj_kind == STUBBORN
+ && !GC_page_was_changed(h)
+ && !GC_on_free_list(h)) {
+ /* if GC_on_free_list(h) then reclaim may have touched it */
+ /* without any allocations taking place. */
+ /* Set breakpoint here */GC_n_changed_errors++;
+ }
+# endif
+ }
+ pe -> new_valid = TRUE;
+ pe -> block = h + OFFSET;
+}
+
+/* Should be called immediately after GC_read_dirty and GC_read_changed. */
+void GC_check_dirty()
+{
+ register int index;
+ register int i;
+ register struct hblk *h;
+ register ptr_t start;
+
+ GC_n_dirty_errors = 0;
+ GC_n_changed_errors = 0;
+ GC_n_clean = 0;
+ GC_n_dirty = 0;
+
+ index = 0;
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ start = GC_heap_sects[i].hs_start;
+ for (h = (struct hblk *)start;
+ h < (struct hblk *)(start + GC_heap_sects[i].hs_bytes);
+ h++) {
+ GC_update_check_page(h, index);
+ index++;
+ if (index >= NSUMS) goto out;
+ }
+ }
+out:
+ GC_printf2("Checked %lu clean and %lu dirty pages\n",
+ (unsigned long) GC_n_clean, (unsigned long) GC_n_dirty);
+ if (GC_n_dirty_errors > 0) {
+ GC_printf1("Found %lu dirty bit errors\n",
+ (unsigned long)GC_n_dirty_errors);
+ }
+ if (GC_n_changed_errors > 0) {
+ GC_printf1("Found %lu changed bit errors\n",
+ (unsigned long)GC_n_changed_errors);
+ }
+}
+
+# else
+
+extern int GC_quiet;
+ /* ANSI C doesn't allow translation units to be empty. */
+ /* So we guarantee this one is nonempty. */
+
+# endif /* CHECKSUMS */
diff --git a/config.h b/config.h
index 48464e43..239bf517 100644
--- a/config.h
+++ b/config.h
@@ -10,7 +10,7 @@
/* Determine the machine type: */
# if defined(sun) && defined(mc68000)
# define M68K
-# define SUNOS
+# define SUNOS4
# define mach_type_known
# endif
# if defined(hp9000s300)
@@ -32,7 +32,11 @@
# ifdef ultrix
# define ULTRIX
# else
-# define RISCOS
+# ifdef _SYSTYPE_SVR4
+# define IRIX5
+# else
+# define RISCOS /* or IRIX 4.X */
+# endif
# endif
# define mach_type_known
# endif
@@ -67,7 +71,7 @@
# define mach_type_known
# endif
# if defined(_IBMR2)
-# define IBMRS6000
+# define RS6000
# define mach_type_known
# endif
# if defined(SCO)
@@ -90,6 +94,20 @@
# define LINUX
# define mach_type_known
# endif
+# if defined(__alpha)
+# define ALPHA
+# define mach_type_known
+# endif
+# if defined(_AMIGA)
+# define AMIGA
+# define M68K
+# define mach_type_known
+# endif
+# if defined(NeXT) && defined(mc68000)
+# define M68K
+# define NEXT
+# define mach_type_known
+# endif
/* Feel free to add more clauses here */
@@ -104,9 +122,8 @@
--> unknown machine type
# endif
/* Mapping is: M68K ==> Motorola 680X0 */
- /* (SUNOS, HP, and SYSV (A/UX) variants)*/
- /* M68K_HP ==> HP9000/300 */
- /* M68K_SYSV ==> A/UX, maybe others */
+ /* (SUNOS4,HP,NEXT, and SYSV (A/UX), */
+ /* and AMIGA variants) */
/* I386 ==> Intel 386 */
/* (SEQUENT, OS2, SCO, LINUX variants) */
/* SCO is incomplete. */
@@ -120,7 +137,8 @@
/* HP_PA ==> HP9000/700 & /800 */
/* HP/UX */
/* SPARC ==> SPARC under SunOS */
- /* (SUNOS4, SUNOS5 variants)
+ /* (SUNOS4, SUNOS5 variants) */
+ /* ALPHA ==> DEC Alpha OSF/1 */
/*
@@ -192,14 +210,20 @@
* GC_stackbottom = (ptr_t)(&dummy);
* return(real_main(argc, argv, envp));
* }
+ *
+ *
+ * Each architecture may also define the style of virtual dirty bit
+ * implementation to be used:
+ * MPROTECT_VDB: Write protect the heap and catch faults.
+ * PROC_VDB: Use the SVR4 /proc primitives to read dirty bits.
*/
# ifdef M68K
# define MACH_TYPE "M68K"
# define ALIGNMENT 2
-# ifdef SUNOS
-# define OS_TYPE "SUNOS"
+# ifdef SUNOS4
+# define OS_TYPE "SUNOS4"
extern char etext;
# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ffff) & ~0x1ffff))
# define HEURISTIC1 /* differs */
@@ -230,6 +254,16 @@
/* bytes down from 0x0 should be safe enough. */
/* --Parag */
# endif
+# ifdef AMIGA
+# define OS_TYPE "AMIGA"
+ /* STACKBOTTOM and DATASTART handled specially */
+ /* in os_dep.c */
+# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0x4000000)
+# endif
# endif
# ifdef VAX
@@ -265,11 +299,13 @@
/* Experimentally determined. */
/* Inconsistent with man a.out, which appears */
/* to be wrong. */
+# define PROC_VDB
# endif
# ifdef SUNOS4
# define OS_TYPE "SUNOS4"
# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
/* On very old SPARCs this is too conservative. */
+# define MPROTECT_VDB
# endif
# define HEURISTIC1
# endif
@@ -289,6 +325,7 @@
# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1003) & ~0x3))
extern int _start();
# define STACKBOTTOM ((ptr_t)(&_start))
+# define PROC_VDB
# endif
# ifdef SCO
# define OS_TYPE "SCO"
@@ -332,6 +369,16 @@
/* Could probably be slightly higher since */
/* startup code allocates lots of junk */
# define HEURISTIC2
+# ifdef ULTRIX
+# define OS_TYPE "ULTRIX"
+# endif
+# ifdef RISCOS
+# define OS_TYPE "RISCOS"
+# endif
+# ifdef IRIX5
+# define OS_TYPE "IRIX5"
+# define MPROTECT_VDB
+# endif
# endif
# ifdef RS6000
@@ -350,6 +397,15 @@
# define STACK_GROWS_UP
# endif
+# ifdef ALPHA
+# define MACH_TYPE "ALPHA"
+# define ALIGNMENT 8
+# define DATASTART ((ptr_t) 0x140000000)
+# define HEURISTIC2
+# define CPP_WORDSZ 64
+# define MPROTECT_VDB
+# endif
+
# ifndef STACK_GROWS_UP
# define STACK_GROWS_DOWN
# endif
@@ -370,6 +426,19 @@
# undef STACKBOTTOM
# undef HEURISTIC1
# undef HEURISTIC2
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# define PCR_VDB
+# endif
+
+# ifdef SRC_M3
+/* Postponed for now. */
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# endif
+
+# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
+# define DEFAULT_VDB
# endif
# endif
diff --git a/debug_malloc.c b/debug_malloc.c
index 6bb61980..4b7b224e 100644
--- a/debug_malloc.c
+++ b/debug_malloc.c
@@ -15,10 +15,9 @@ typedef struct {
/* and to be a multiple of the word length. */
#define DEBUG_BYTES (sizeof (oh) + sizeof (word))
+#undef ROUNDED_UP_WORDS
#define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
-bool GC_debugging_started = FALSE;
-
/* Check whether object with base pointer p has debugging info */
/* p is assumed to point to a legitimate object in our part */
/* of the heap. */
@@ -116,6 +115,15 @@ ptr_t p, clobbered_addr;
}
}
+void GC_check_heap_proc();
+
+void GC_start_debugging()
+{
+ GC_check_heap = GC_check_heap_proc;
+ GC_debugging_started = TRUE;
+ GC_register_displacement((word)sizeof(oh));
+}
+
# ifdef __STDC__
extern_ptr_t GC_debug_malloc(size_t lb, char * s, int i)
# else
@@ -135,12 +143,78 @@ ptr_t p, clobbered_addr;
return(0);
}
if (!GC_debugging_started) {
- GC_debugging_started = TRUE;
- GC_register_displacement((word)sizeof(oh));
+ GC_start_debugging();
+ }
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+#ifdef STUBBORN_ALLOC
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc_stubborn(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc_stubborn(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc_stubborn(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
}
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
+void GC_debug_change_stubborn(p)
+extern_ptr_t p;
+{
+ register extern_ptr_t q = GC_base(p);
+ register hdr * hhdr;
+
+ if (q == 0) {
+ GC_err_printf1("Bad argument: 0x%lx to GC_debug_change_stubborn\n",
+ (unsigned long) p);
+ ABORT("GC_debug_change_stubborn: bad arg");
+ }
+ hhdr = HDR(q);
+ if (hhdr -> hb_obj_kind != STUBBORN) {
+ GC_err_printf1("GC_debug_change_stubborn arg not stubborn: 0x%lx\n",
+ (unsigned long) p);
+ ABORT("GC_debug_change_stubborn: arg not stubborn");
+ }
+ GC_change_stubborn(q);
+}
+
+void GC_debug_end_stubborn_change(p)
+extern_ptr_t p;
+{
+ register extern_ptr_t q = GC_base(p);
+ register hdr * hhdr;
+
+ if (q == 0) {
+ GC_err_printf1("Bad argument: 0x%lx to GC_debug_end_stubborn_change\n",
+ (unsigned long) p);
+ ABORT("GC_debug_end_stubborn_change: bad arg");
+ }
+ hhdr = HDR(q);
+ if (hhdr -> hb_obj_kind != STUBBORN) {
+ GC_err_printf1("debug_end_stubborn_change arg not stubborn: 0x%lx\n",
+ (unsigned long) p);
+ ABORT("GC_debug_end_stubborn_change: arg not stubborn");
+ }
+ GC_end_stubborn_change(q);
+}
+
+#endif /* STUBBORN_ALLOC */
+
# ifdef __STDC__
extern_ptr_t GC_debug_malloc_atomic(size_t lb, char * s, int i)
# else
@@ -160,11 +234,36 @@ ptr_t p, clobbered_addr;
return(0);
}
if (!GC_debugging_started) {
- GC_debugging_started = TRUE;
- GC_register_displacement((word)sizeof(oh));
+ GC_start_debugging();
+ }
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc_uncollectable(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc_uncollectable(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc_uncollectable(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf1("GC_debug_malloc_uncollectable(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf1(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_start_debugging();
}
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
+
+
# ifdef __STDC__
void GC_debug_free(extern_ptr_t p)
# else
@@ -209,7 +308,9 @@ ptr_t p, clobbered_addr;
register extern_ptr_t result = GC_debug_malloc(lb, s, i);
register size_t copy_sz = lb;
register size_t old_sz;
+ register hdr * hhdr;
+ if (p == 0) return(GC_debug_malloc(lb, s, i));
if (base == 0) {
GC_err_printf1(
"Attempt to free invalid pointer %lx\n", (unsigned long)p);
@@ -221,6 +322,23 @@ ptr_t p, clobbered_addr;
(unsigned long)p);
return(GC_realloc(p, lb));
}
+ hhdr = HDR(base);
+ switch (hhdr -> hb_obj_kind) {
+# ifdef STUBBORN_ALLOC
+ case STUBBORN:
+ result = GC_debug_malloc_stubborn(lb, s, i);
+ break;
+# endif
+ case NORMAL:
+ result = GC_debug_malloc(lb, s, i);
+ break;
+ case PTRFREE:
+ result = GC_debug_malloc_atomic(lb, s, i);
+ break;
+ default:
+ GC_err_printf0("GC_debug_realloc: encountered bad kind\n");
+ ABORT("bad kind");
+ }
clobbered = GC_check_annotated_obj((oh *)base);
if (clobbered != 0) {
GC_err_printf0("GC_debug_realloc: found smashed object at ");
@@ -269,7 +387,7 @@ word dummy;
/* This assumes that all accessible objects are marked, and that */
/* I hold the allocation lock. Normally called by collector. */
-void GC_check_heap()
+void GC_check_heap_proc()
{
GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
}
diff --git a/dynamic_load.c b/dynamic_load.c
index c5e111e1..c6c10528 100644
--- a/dynamic_load.c
+++ b/dynamic_load.c
@@ -42,6 +42,10 @@
#ifdef SUNOS5
+#ifdef LINT
+ Elf32_Dyn _DYNAMIC;
+#endif
+
static struct link_map *
GC_FirstDLOpenedLinkMap()
{
@@ -71,6 +75,10 @@ GC_FirstDLOpenedLinkMap()
# ifdef SUNOS4
+#ifdef LINT
+ struct link_dynamic _DYNAMIC;
+#endif
+
static struct link_map *
GC_FirstDLOpenedLinkMap()
{
@@ -82,12 +90,34 @@ GC_FirstDLOpenedLinkMap()
return(_DYNAMIC.ld_un.ld_1->ld_loaded);
}
+/* Return the address of the ld.so allocated common symbol */
+/* with the least address, or 0 if none. */
+static ptr_t GC_first_common()
+{
+ ptr_t result = 0;
+ extern struct link_dynamic _DYNAMIC;
+ struct rtc_symb * curr_symbol;
+
+ if( &_DYNAMIC == 0) {
+ return(0);
+ }
+ curr_symbol = _DYNAMIC.ldd -> ldd_cp;
+ for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
+ if (result == 0
+ || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
+ result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
+ }
+ }
+ return(result);
+}
# endif
/* Add dynamic library data sections to the root set. */
# if !defined(PCR) && defined(THREADS)
+# ifndef SRC_M3
--> fix mutual exclusion with dlopen
+# endif /* We assume M3 programs don't call dlopen for now */
# endif
void GC_register_dynamic_libraries()
{
@@ -115,7 +145,7 @@ void GC_register_dynamic_libraries()
e = (Elf32_Ehdr *) lm->l_addr;
p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
offset = ((unsigned long)(lm->l_addr));
- for( i = 0; i < e->e_phnum; ((i++),(p++)) ) {
+ for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
switch( p->p_type ) {
case PT_LOAD:
{
@@ -133,6 +163,19 @@ void GC_register_dynamic_libraries()
}
# endif
}
+# ifdef SUNOS4
+ {
+ static ptr_t common_start = 0;
+ ptr_t common_end;
+ extern ptr_t GC_find_limit();
+
+ if (common_start == 0) common_start = GC_first_common();
+ if (common_start != 0) {
+ common_end = GC_find_limit(common_start, TRUE);
+ GC_add_roots_inner((char *)common_start, (char *)common_end);
+ }
+ }
+# endif
}
#else
diff --git a/finalize.c b/finalize.c
index 0ab5926c..9deba275 100644
--- a/finalize.c
+++ b/finalize.c
@@ -24,8 +24,8 @@
& (TSIZE - 1))
static struct disappearing_link {
- word dl_hidden_base; /* Pointer to object base */
- word dl_offset; /* byte offset within object. */
+ word dl_hidden_obj; /* Pointer to object base */
+ word dl_hidden_link; /* Field to be cleared. */
struct disappearing_link * dl_next;
} * dl_head[TSIZE] = {0};
@@ -37,30 +37,46 @@ static struct finalizable_object {
struct finalizable_object * fo_next;
} * fo_head[TSIZE] = {0};
+# ifdef SRC_M3
+void GC_push_finalizer_structures()
+{
+ GC_push_all((ptr_t)dl_head, (ptr_t)(dl_head + TSIZE));
+ GC_push_all((ptr_t)fo_head, (ptr_t)(fo_head + TSIZE));
+}
+# endif
+
# define ALLOC(x, t) t *x = (t *)GC_malloc(sizeof (t))
int GC_register_disappearing_link(link)
void_star * link;
{
ptr_t base;
- unsigned long offset;
+
+ base = (ptr_t)GC_base((void_star)link);
+ if (base == 0)
+ ABORT("Bad arg to GC_register_disappearing_link");
+ return(GC_general_register_disappearing_link(link, base));
+}
+
+int GC_general_register_disappearing_link(link, obj)
+void_star * link;
+void_star obj;
+{
struct disappearing_link *curr_dl;
int index;
/* Allocate before acquiring lock */
ALLOC(new_dl, struct disappearing_link);
DCL_LOCK_STATE;
-
+
+ index = HASH(link);
+ if ((word)link & (ALIGNMENT-1))
+ ABORT("Bad arg to GC_general_register_disappearing_link");
DISABLE_SIGNALS();
LOCK();
- base = (ptr_t)GC_base((void_star)link);
- index = HASH(base);
- offset = (ptr_t)link - base;
- if (base == 0 || ((word)link & (ALIGNMENT-1)))
- ABORT("Bad arg to GC_register_disappearing_link");
curr_dl = dl_head[index];
for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = curr_dl -> dl_next) {
- if (curr_dl -> dl_hidden_base == HIDE_POINTER(base)
- && curr_dl -> dl_offset == offset) {
+ if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
+ curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
UNLOCK();
ENABLE_SIGNALS();
GC_free((extern_ptr_t)new_dl);
@@ -68,8 +84,8 @@ void_star * link;
}
}
{
- new_dl -> dl_hidden_base = HIDE_POINTER(base);
- new_dl -> dl_offset = offset;
+ new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
+ new_dl -> dl_hidden_link = HIDE_POINTER(link);
new_dl -> dl_next = dl_head[index];
dl_head[index] = new_dl;
UNLOCK();
@@ -78,27 +94,21 @@ void_star * link;
}
}
-
int GC_unregister_disappearing_link(link)
void_star * link;
{
- ptr_t base;
- unsigned long offset;
struct disappearing_link *curr_dl, *prev_dl;
int index;
DCL_LOCK_STATE;
+ index = HASH(link);
+ if (((unsigned long)link & (ALIGNMENT-1)))
+ return(0);
DISABLE_SIGNALS();
LOCK();
- base = (ptr_t)GC_base((void_star)link);
- index = HASH(base);
- offset = (ptr_t)link - base;
- if (base == 0 || ((unsigned long)link & (ALIGNMENT-1)))
- return(0);
prev_dl = 0; curr_dl = dl_head[index];
while (curr_dl != 0) {
- if (curr_dl -> dl_hidden_base == HIDE_POINTER(base)
- && curr_dl -> dl_offset == offset) {
+ if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
if (prev_dl == 0) {
dl_head[index] = curr_dl -> dl_next;
} else {
@@ -109,44 +119,14 @@ void_star * link;
GC_free((extern_ptr_t)curr_dl);
return(1);
}
- curr_dl = curr_dl -> dl_next;
prev_dl = curr_dl;
+ curr_dl = curr_dl -> dl_next;
}
UNLOCK();
ENABLE_SIGNALS();
return(0);
}
-bool GC_is_marked(p)
-ptr_t p;
-{
- register struct hblk *h = HBLKPTR(p);
- register hdr * hhdr = HDR(h);
- register int word_no = (word *)p - (word *)h;
-
- return(mark_bit_from_hdr(hhdr, word_no));
-}
-
-void GC_set_mark_bit(p)
-ptr_t p;
-{
- register struct hblk *h = HBLKPTR(p);
- register hdr * hhdr = HDR(h);
- register int word_no = (word *)p - (word *)h;
-
- set_mark_bit_from_hdr(hhdr, word_no);
-}
-
-void GC_clear_mark_bit(p)
-ptr_t p;
-{
- register struct hblk *h = HBLKPTR(p);
- register hdr * hhdr = HDR(h);
- register int word_no = (word *)p - (word *)h;
-
- clear_mark_bit_from_hdr(hhdr, word_no);
-}
-
void GC_register_finalizer(obj, fn, cd, ofn, ocd)
void_star obj;
GC_finalization_proc fn;
@@ -191,8 +171,8 @@ void_star * ocd;
GC_free((extern_ptr_t)new_fo);
return;
}
- curr_fo = curr_fo -> fo_next;
prev_fo = curr_fo;
+ curr_fo = curr_fo -> fo_next;
}
{
if (ofn) *ofn = 0;
@@ -228,9 +208,9 @@ void GC_finalize()
curr_dl = dl_head[i];
prev_dl = 0;
while (curr_dl != 0) {
- real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_base);
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
if (!GC_is_marked(real_ptr)) {
- *(word *)(real_ptr + curr_dl -> dl_offset) = 0;
+ *(word *)(REVEAL_POINTER(curr_dl -> dl_hidden_link)) = 0;
next_dl = curr_dl -> dl_next;
if (prev_dl == 0) {
dl_head[i] = next_dl;
@@ -251,7 +231,8 @@ void GC_finalize()
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = curr_fo -> fo_next) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
- GC_mark_all(real_ptr, real_ptr + curr_fo -> fo_object_size);
+ GC_push_all(real_ptr, real_ptr + curr_fo -> fo_object_size);
+ while (!GC_mark_stack_empty()) GC_mark_from_mark_stack();
}
/*
if (GC_is_marked(real_ptr)) {
diff --git a/gc.h b/gc.h
index 7814f46c..101fd32b 100644
--- a/gc.h
+++ b/gc.h
@@ -23,30 +23,42 @@
/* better choices. But those appear to have incorrect definitions */
/* on may systems. Notably "typedef int size_t" seems to be both */
/* frequent and WRONG. */
-typedef unsigned long word;
-typedef long signed_word;
+typedef unsigned long GC_word;
+typedef long GC_signed_word;
/* Public read-only variables */
-extern word GC_heapsize; /* Heap size in bytes */
+extern GC_word GC_heapsize; /* Heap size in bytes */
-extern word GC_gc_no; /* Counter incremented per collection. */
+extern GC_word GC_gc_no;/* Counter incremented per collection. */
/* Includes empty GCs at startup. */
+
+extern int GC_incremental; /* Using incremental/generational collection. */
+
/* Public R/W variables */
+extern int GC_quiet; /* Disable statistics output. Only matters if */
+ /* collector has been compiled with statistics */
+ /* enabled. This involves a performance cost, */
+ /* and is thus not the default. */
+
extern int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* beacuse it's not safe. */
extern int GC_dont_expand;
/* Dont expand heap unless explicitly requested */
/* or forced to. */
+
+extern int GC_full_freq; /* Number of partial collections between */
+ /* full collections. Matters only if */
+ /* GC_incremental is set. */
-extern word GC_non_gc_bytes;
+extern GC_word GC_non_gc_bytes;
/* Bytes not considered candidates for collection. */
/* Used only to control scheduling of collections. */
-extern word GC_free_space_divisor;
+extern GC_word GC_free_space_divisor;
/* We try to make sure that we allocate at */
/* least N/GC_free_space_divisor bytes between */
/* collections, where N is the heap size plus */
@@ -63,30 +75,60 @@ extern word GC_free_space_divisor;
/*
* general purpose allocation routines, with roughly malloc calling conv.
* The atomic versions promise that no relevant pointers are contained
- * in the object. The nonatomic version guarantees that the new object
- * is cleared.
+ * in the object. The nonatomic versions guarantee that the new object
+ * is cleared. GC_malloc_stubborn promises that no changes to the object
+ * will occur after GC_end_stubborn_change has been called on the
+ * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
+ * that is scanned for pointers to collectable objects, but is not itself
+ * collectable. GC_malloc_uncollectable and GC_free called on the resulting
+ * object implicitly update GC_non_gc_bytes appropriately.
*/
-# ifdef __STDC__
+#if defined(__STDC__) || defined(__cplusplus)
extern void * GC_malloc(size_t size_in_bytes);
extern void * GC_malloc_atomic(size_t size_in_bytes);
+ extern void * GC_malloc_uncollectable(size_t size_in_bytes);
+ extern void * GC_malloc_stubborn(size_t size_in_bytes);
# else
extern char * GC_malloc(/* size_in_bytes */);
extern char * GC_malloc_atomic(/* size_in_bytes */);
+ extern char * GC_malloc_uncollectable(/* size_in_bytes */);
+ extern char * GC_malloc_stubborn(/* size_in_bytes */);
# endif
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
/* Requires a pointer to the base of an object. */
-# ifdef __STDC__
+/* If the argument is stubborn, it should not be changeable when freed. */
+/* An object should not be enable for finalization when it is */
+/* explicitly deallocated. */
+#if defined(__STDC__) || defined(__cplusplus)
extern void GC_free(void * object_addr);
# else
extern void GC_free(/* object_addr */);
# endif
+/*
+ * Stubborn objects may be changed only if the collector is explicitly informed.
+ * The collector is implicitly informed of coming change when such
+ * an object is first allocated. The following routines inform the
+ * collector that an object will no longer be changed, or that it will
+ * once again be changed. Only nonNIL pointer stores into the object
+ * are considered to be changes. The argument to GC_end_stubborn_change
+ * must be exacly the value returned by GC_malloc_stubborn or passed to
+ * GC_change_stubborn. (In the second case it may be an interior pointer
+ * within 512 bytes of the beginning of the objects.)
+ * There is a performance penalty for allowing more than
+ * one stubborn object to be changed at once, but it is acceptable to
+ * do so. The same applies to dropping stubborn objects that are still
+ * changeable.
+ */
+void GC_change_stubborn(/* p */);
+void GC_end_stubborn_change(/* p */);
+
/* Return a pointer to the base (lowest address) of an object given */
/* a pointer to a location within the object. */
/* Return 0 if displaced_pointer doesn't point to within a valid */
/* object. */
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
void * GC_base(void * displaced_pointer);
# else
char * GC_base(/* char * displaced_pointer */);
@@ -95,7 +137,7 @@ extern word GC_free_space_divisor;
/* Given a pointer to the base of an object, return its size in bytes. */
/* The returned size may be slightly larger than what was originally */
/* requested. */
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
size_t GC_size(void * object_addr);
# else
size_t GC_size(/* char * object_addr */);
@@ -105,7 +147,10 @@ extern word GC_free_space_divisor;
/* a malloc followed by a bcopy. But if you rely on that, either here */
/* or with the standard C library, your code is broken. In my */
/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
-# ifdef __STDC__
+/* The resulting object has the same kind as the original. */
+/* If the argument is stubborn, the result will have changes enabled. */
+/* It is an error to have changes enabled for the original object. */
+# if defined(__STDC__) || defined(__cplusplus)
extern void * GC_realloc(void * old_object, size_t new_size_in_bytes);
# else
extern char * GC_realloc(/* old_object, new_size_in_bytes */);
@@ -137,13 +182,24 @@ void GC_register_displacement(/* n */);
/* Explicitly trigger a collection. */
void GC_gcollect();
+/* Enable incremental/generational collection. */
+/* Not advisable unless dirty bits are */
+/* available or most heap objects are */
+/* pointerfree(atomic) or immutable. */
+/* Don't use in leak finding mode. */
+void GC_enable_incremental();
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
extern void * GC_debug_malloc(size_t size_in_bytes,
char * descr_string, int descr_int);
extern void * GC_debug_malloc_atomic(size_t size_in_bytes,
char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_uncollectable(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_stubborn(size_t size_in_bytes,
+ char * descr_string, int descr_int);
extern void GC_debug_free(void * object_addr);
extern void * GC_debug_realloc(void * old_object,
size_t new_size_in_bytes,
@@ -152,26 +208,42 @@ void GC_gcollect();
extern char * GC_debug_malloc(/* size_in_bytes, descr_string, descr_int */);
extern char * GC_debug_malloc_atomic(/* size_in_bytes, descr_string,
descr_int */);
+ extern char * GC_debug_malloc_uncollectable(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern char * GC_debug_malloc_stubborn(/* size_in_bytes, descr_string,
+ descr_int */);
extern void GC_debug_free(/* object_addr */);
extern char * GC_debug_realloc(/* old_object, new_size_in_bytes,
descr_string, descr_int */);
# endif
+void GC_debug_change_stubborn(/* p */);
+void GC_debug_end_stubborn_change(/* p */);
# ifdef GC_DEBUG
# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
+ __FILE__, __LINE__)
# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
__LINE__)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
GC_make_closure(f,d), of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
+ __LINE__)
+# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# else
# define GC_MALLOC(sz) GC_malloc(sz)
# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
# define GC_REALLOC(old, sz) GC_realloc(old, sz)
# define GC_FREE(p) GC_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(p, f, d, of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
+# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
# endif
/* Finalization. Some of these primitives are grossly unsafe. */
@@ -181,7 +253,7 @@ void GC_gcollect();
/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
typedef void (*GC_finalization_proc)(void * obj, void * client_data);
# else
typedef void (*GC_finalization_proc)(/* void * obj, void * client_data */);
@@ -219,7 +291,7 @@ void GC_register_finalizer(/* void * obj,
/* The following routine may be used to break cycles between */
/* finalizable objects, thus causing cyclic finalizable */
-/* objects to be finalized in the cirrect order. Standard */
+/* objects to be finalized in the correct order. Standard */
/* use involves calling GC_register_disappearing_link(&p), */
/* where p is a pointer that is not followed by finalization */
/* code, and should not be considered in determining */
@@ -241,13 +313,27 @@ int GC_register_disappearing_link(/* void ** link */);
/* But this causes problems if that action alters, or */
/* examines connectivity. */
/* Returns 1 if link was already registered, 0 */
- /* otherise. */
+ /* otherwise. */
+ /* Only exists for backward compatibility. See below: */
+int GC_general_register_disappearing_link(/* void ** link, void * obj */);
+ /* A slight generalization of the above. *link is */
+ /* cleared when obj first becomes inaccessible. This */
+ /* can be used to implement weak pointers easily and */
+ /* safely. Typically link will point to a location */
+ /* holding a disguised pointer to obj. In this way */
+ /* soft pointers are broken before any object */
+ /* reachable from them are finalized. Each link */
+ /* May be registered only once, i.e. with one obj */
+ /* value. This was added after a long email discussion */
+ /* with John Ellis. */
int GC_unregister_disappearing_link(/* void ** link */);
/* Returns 0 if link was not actually registered. */
+ /* Undoes a registration by either of the above two */
+ /* routines. */
/* Auxiliary fns to make finalization work correctly with displaced */
/* pointers introduced by the debugging allocators. */
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
void * GC_make_closure(GC_finalization_proc fn, void * data);
void GC_debug_invoke_finalizer(void * obj, void * data);
# else
@@ -262,7 +348,7 @@ int GC_unregister_disappearing_link(/* void ** link */);
/* disappear. Otherwise objects can be accessed after they */
/* have been collected. */
# ifdef I_HIDE_POINTERS
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
# define HIDE_POINTER(p) (~(size_t)(p))
# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
# else
@@ -273,7 +359,7 @@ int GC_unregister_disappearing_link(/* void ** link */);
/* that the object still exists. This involves acquiring the */
/* allocator lock to avoid a race with the collector. */
typedef char * (*GC_fn_type)();
-# ifdef __STDC__
+# if defined(__STDC__) || defined(__cplusplus)
void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data);
# else
char * GC_call_with_alloc_lock(/* GC_fn_type fn, char * client_data */);
diff --git a/gc_headers.h b/gc_headers.h
index 7c2b06d5..115ea447 100644
--- a/gc_headers.h
+++ b/gc_headers.h
@@ -11,19 +11,103 @@
# ifndef GC_HEADERS_H
# define GC_HEADERS_H
typedef struct hblkhdr hdr;
-
-# define LOG_TOP_SZ 11
-# define LOG_BOTTOM_SZ (WORDSZ - LOG_TOP_SZ - LOG_HBLKSIZE)
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
+ --> Get a real machine.
+# endif
+
+/*
+ * The 2 level tree data structure that is used to find block headers.
+ * If there are more than 32 bits in a pointer, the top level is a hash
+ * table.
+ */
+
+# if CPP_WORDSZ > 32
+# define HASH_TL
+# endif
+
+/* Define appropriate out-degrees for each of the two tree levels */
+# define LOG_BOTTOM_SZ 10
+# ifndef HASH_TL
+# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
+# else
+# define LOG_TOP_SZ 11
+# endif
# define TOP_SZ (1 << LOG_TOP_SZ)
# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+typedef struct bi {
+ hdr * index[BOTTOM_SZ];
+ /*
+ * The bottom level index contains one of three kinds of values:
+ * 0 means we're not responsible for this block.
+ * 1 < (long)X <= MAX_JUMP means the block starts at least
+ * X * HBLKSIZE bytes before the current address.
+ * A valid pointer points to a hdr structure. (The above can't be
+ * valid pointers due to the GET_MEM return convention.)
+ */
+ struct bi * asc_link; /* All indices are linked in */
+ /* ascending order. */
+ word key; /* high order address bits. */
+# ifdef HASH_TL
+ struct bi * hash_link; /* Hash chain link. */
+# endif
+} bottom_index;
+
+/* extern bottom_index GC_all_nils; - really part of GC_arrays */
+
+/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
+ /* Each entry points to a bottom_index. */
+ /* On a 32 bit machine, it points to */
+ /* the index for a set of high order */
+ /* bits equal to the index. For longer */
+ /* addresses, we hash the high order */
+ /* bits to compute the index in */
+ /* GC_top_index, and each entry points */
+ /* to a hash chain. */
+ /* The last entry in each chain is */
+ /* GC_all_nils. */
+
+
# define MAX_JUMP (HBLKSIZE - 1)
-
-extern hdr ** GC_top_index [];
-# define HDR(p) (GC_top_index \
- [(unsigned long)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)] \
- [((unsigned long)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+# ifndef HASH_TL
+# define BI(p) (GC_top_index \
+ [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
+# define HDR(p) (BI(p)->index \
+ [((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
+# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
+# define SET_HDR(p, hhdr) HDR(p) = (hhdr)
+# define GET_HDR_ADDR(p, ha) (ha) = &(HDR(p))
+# else /* hash */
+/* Hash function for tree top level */
+# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
+/* Set bottom_indx to point to the bottom index for address p */
+# define GET_BI(p, bottom_indx) \
+ { \
+ register word hi = \
+ (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+ register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+ \
+ while (_bi -> key != hi && _bi != &GC_all_nils) \
+ _bi = _bi -> hash_link; \
+ (bottom_indx) = _bi; \
+ }
+# define GET_HDR_ADDR(p, ha) \
+ { \
+ register bottom_index * bi; \
+ \
+ GET_BI(p, bi); \
+ (ha) = &(bi->index[((unsigned long)(p)>>LOG_HBLKSIZE) \
+ & (BOTTOM_SZ - 1)]); \
+ }
+# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ (hhdr) = *_ha; }
+# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ *_ha = (hhdr); }
+# define HDR(p) GC_find_header(p)
+# endif
/* Is the result a forwarding address to someplace closer to the */
/* beginning of the block or NIL? */
diff --git a/gc_private.h b/gc_private.h
index 1e09370f..360cbb88 100644
--- a/gc_private.h
+++ b/gc_private.h
@@ -1,7 +1,6 @@
-# define SILENT
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -18,6 +17,9 @@
# include "gc.h"
# endif
+typedef GC_word word;
+typedef GC_signed_word signed_word;
+
# ifndef CONFIG_H
# include "config.h"
# endif
@@ -40,8 +42,16 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# include <stddef.h>
# endif
typedef void * extern_ptr_t;
+# define VOLATILE volatile
#else
typedef char * extern_ptr_t;
+# define VOLATILE
+#endif
+
+#ifdef AMIGA
+# define FAR __far
+#else
+# define FAR
#endif
# ifndef OS2
@@ -61,6 +71,11 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* */
/*********************************/
+#define STUBBORN_ALLOC /* Define stubborn allocation primitives */
+#ifdef SRC_M3
+# undef STUBBORN_ALLOC
+#endif
+
#define ALL_INTERIOR_POINTERS
/* Forces all pointers into the interior of an */
@@ -131,7 +146,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* apparently required by SPARC architecture. */
#endif
-#if defined(SPARC) || defined(M68K) && defined(SUNOS)
+#if defined(SPARC) || defined(M68K) && defined(SUNOS4)
# if !defined(PCR)
# define DYNAMIC_LOADING /* Search dynamic libraries for roots. */
# else
@@ -156,19 +171,9 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# endif
-# define HINCR 16 /* Initial heap increment, in blocks of 4K */
+# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
# define MAXHINCR 512 /* Maximum heap increment, in blocks */
-# define HINCR_MULT 3 /* After each new allocation, GC_hincr is multiplied */
-# define HINCR_DIV 2 /* by HINCR_MULT/HINCR_DIV */
-# define GC_MULT 3 /* Don't collect if the fraction of */
- /* non-collectable memory in the heap */
- /* exceeds GC_MUL/GC_DIV */
-# define GC_DIV 4
-
-# define NON_GC_HINCR ((word)8)
- /* Heap increment if most of heap if collection */
- /* was suppressed because most of heap is not */
- /* collectable */
+
/*********************************/
/* */
@@ -177,6 +182,9 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/*********************************/
#include <time.h>
+#if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
+ clock_t clock(); /* Not in time.h, where it belongs */
+#endif
#if !defined(CLOCKS_PER_SEC)
# define CLOCKS_PER_SEC 1000000
/*
@@ -199,12 +207,19 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# if defined(SPARC) && defined(SUNOS4)
# define BCOPY_EXISTS
# endif
-# if defined(M68K) && defined(SUNOS)
+# if defined(M68K) && defined(AMIGA)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(NEXT)
# define BCOPY_EXISTS
# endif
# if defined(VAX)
# define BCOPY_EXISTS
# endif
+# if defined(AMIGA)
+# include <string.h>
+# define BCOPY_EXISTS
+# endif
# ifndef BCOPY_EXISTS
# include <string.h>
@@ -212,26 +227,28 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define bzero(x,n) memset(x, 0, n)
# endif
+# if defined(PCR) || defined(SRC_M3)
+# define THREADS
+# endif
+
/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
/* space is assumed to be cleared. */
# ifdef PCR
char * real_malloc();
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + HBLKSIZE) \
+ HBLKSIZE-1)
-# define THREADS
# else
# ifdef OS2
void * os2_alloc(size_t bytes);
# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes + HBLKSIZE) \
+ HBLKSIZE-1)
# else
- caddr_t sbrk();
-# ifdef __STDC__
-# define GET_MEM(bytes) HBLKPTR(sbrk((size_t)(bytes + HBLKSIZE)) \
- + HBLKSIZE-1)
+# if defined(AMIGA) || defined(NEXT)
+# define GET_MEM(bytes) HBLKPTR(calloc(1, (size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
# else
-# define GET_MEM(bytes) HBLKPTR(sbrk((int)(bytes + HBLKSIZE)) \
- + HBLKSIZE-1)
+ extern ptr_t GC_unix_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
# endif
# endif
# endif
@@ -243,6 +260,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
* dirty way that is acceptable for a few instructions, e.g. by
* inhibiting preemption. This is assumed to have succeeded only
* if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
+ * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
* If signals cannot be tolerated with the FASTLOCK held, then
* FASTLOCK should disable signals. The code executed under
* FASTLOCK is otherwise immune to interruption, provided it is
@@ -251,9 +269,10 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
* and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
* (There is currently no equivalent for FASTLOCK.)
*/
-# ifdef PCR
-# include "pcr/th/PCR_Th.h"
-# include "pcr/th/PCR_ThCrSec.h"
+# ifdef THREADS
+# ifdef PCR
+# include "th/PCR_Th.h"
+# include "th/PCR_ThCrSec.h"
extern struct PCR_Th_MLRep GC_allocate_ml;
# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
@@ -263,13 +282,25 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
/* TRUE if nobody currently holds the lock */
# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
+# endif
+# ifdef SRC_M3
+ extern word RT0u__inCritical;
+# define LOCK() RT0u__inCritical++
+# define UNLOCK() RT0u__inCritical--
+# endif
# else
-# define DCL_LOCK_STATE
# define LOCK()
# define UNLOCK()
-# define FASTLOCK() LOCK()
-# define FASTLOCK_SUCCEEDED() TRUE
-# define FASTUNLOCK() UNLOCK()
+
+# endif
+
+# ifndef DCL_LOCK_STATE
+# define DCL_LOCK_STATE
+# endif
+# ifndef FASTLOCK
+# define FASTLOCK() LOCK()
+# define FASTLOCK_SUCCEEDED() TRUE
+# define FASTUNLOCK() UNLOCK()
# endif
/* Delay any interrupts or signals that may abort this thread. Data */
@@ -284,8 +315,9 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define ENABLE_SIGNALS() \
PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
# else
-# if 0 /* Useful for debugging, and unusually */
- /* correct client code. */
+# if defined(SRC_M3) || defined(AMIGA)
+ /* Also useful for debugging, and unusually */
+ /* correct client code. */
# define DISABLE_SIGNALS()
# define ENABLE_SIGNALS()
# else
@@ -300,7 +332,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
* Stop and restart mutator threads.
*/
# ifdef PCR
-# include "pcr/th/PCR_ThCtl.h"
+# include "th/PCR_ThCtl.h"
# define STOP_WORLD() \
PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
PCR_allSigsBlocked, \
@@ -367,7 +399,11 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* heap block size, bytes. Should be power of 2 */
-#define CPP_LOG_HBLKSIZE 12
+#if CPP_WORDSZ == 32
+# define CPP_LOG_HBLKSIZE 12
+#else
+# define CPP_LOG_HBLKSIZE 13
+#endif
#define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
#define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
#define HBLKSIZE ((word)CPP_HBLKSIZE)
@@ -380,6 +416,14 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
+
+# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
+ /* Equivalent to subtracting 2 hblk pointers. */
+ /* We do it this way because a compiler should */
+ /* find it hard to use an integer division */
+ /* instead of a shift. The bundled SunOS 4.1 */
+ /* o.w. sometimes pessimizes the subtraction to */
+ /* involve a call to .div. */
# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
@@ -387,6 +431,37 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
+/* Round up byte allocation requests to integral number of words: */
+# ifdef ALL_INTERIOR_POINTERS
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
+# else
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
+# endif
+
+
+/*
+ * Hash table representation of sets of pages. This assumes it is
+ * OK to add spurious entries to sets.
+ * Used by black-listing code, and perhaps by dirty bit maintenance code.
+ */
+
+# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
+ /* to more than 16K hblks = 64MB. */
+ /* Each hash table occupies 2K bytes. */
+# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
+# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
+typedef word page_hash_table[PHT_SIZE];
+
+# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
+
+# define get_pht_entry_from_index(bl, index) \
+ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define set_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+# define clear_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
+
+
/********************************************/
/* */
@@ -439,24 +514,28 @@ struct hblkhdr {
/* See GC_obj_map. */
/* Valid for all blocks with headers. */
/* Free blocks point to GC_invalid_map. */
- int hb_obj_kind; /* Kind of objects in the block. Each kind */
- /* identifies a mark procedure and a set of */
- /* list headers. sometimes called regions. */
-
+ unsigned short hb_obj_kind;
+ /* Kind of objects in the block. Each kind */
+ /* identifies a mark procedure and a set of */
+ /* list headers. sometimes called regions. */
+ unsigned short hb_last_reclaimed;
+ /* Value of GC_gc_no when block was */
+ /* last allocated or swept. May wrap. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
/* INCLUDED) in the heap block. */
+ /* The lsb of word 0 is numbered 0. */
};
/* heap block body */
# define DISCARD_WORDS 0
/* Number of words to be dropped at the beginning of each block */
- /* Must be a multiple of 32. May reasonably be nonzero */
- /* on mcachines that don't guarantee longword alignment of */
+ /* Must be a multiple of WORDSZ. May reasonably be nonzero */
+ /* on machines that don't guarantee longword alignment of */
/* pointers, so that the number of false hits is minimized. */
- /* 0 and 32 are probably the only reasonable values. */
+ /* 0 and WORDSZ are probably the only reasonable values. */
# define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word))
@@ -470,6 +549,11 @@ struct hblk {
# define HDR_WORDS ((word)DISCARD_WORDS)
# define HDR_BYTES ((word)WORDS_TO_BYTES(DISCARD_WORDS))
+# define OBJ_SZ_TO_BLOCKS(sz) \
+ divHBLKSZ(HDR_BYTES + WORDS_TO_BYTES(sz) + HBLKSIZE-1)
+ /* Size of block (in units of HBLKSIZE) needed to hold objects of */
+ /* given sz (in words). */
+
/* Object free list link */
# define obj_link(p) (*(ptr_t *)(p))
@@ -501,6 +585,9 @@ struct _GC_arrays {
# endif
word _words_allocd;
/* Number of words allocated during this collection cycle */
+ word _words_wasted;
+ /* Number of words wasted due to internal fragmentation */
+ /* in large objects allocated since last gc. Approximate.*/
word _non_gc_bytes_at_gc;
/* Number of explicitly managed bytes of storage */
/* at last collection. */
@@ -516,7 +603,15 @@ struct _GC_arrays {
/* bytes. */
# endif
ptr_t _aobjfreelist[MAXOBJSZ+1];
- /* free list for atomic objs*/
+ /* free list for atomic objs */
+
+ ptr_t _uobjfreelist[MAXOBJSZ+1];
+ /* uncollectable but traced objs */
+
+# ifdef STUBBORN_ALLOC
+ ptr_t _sobjfreelist[MAXOBJSZ+1];
+# endif
+ /* free list for immutable objects */
ptr_t _obj_map[MAXOBJSZ+1];
/* If not NIL, then a pointer to a map of valid */
/* object addresses. hbh_map[sz][i] is j if the */
@@ -566,24 +661,60 @@ struct _GC_arrays {
# endif
struct hblk * _reclaim_list[MAXOBJSZ+1];
struct hblk * _areclaim_list[MAXOBJSZ+1];
+ struct hblk * _ureclaim_list[MAXOBJSZ+1];
+# ifdef STUBBORN_ALLOC
+ struct hblk * _sreclaim_list[MAXOBJSZ+1];
+ page_hash_table _changed_pages;
+ /* Stubborn object pages that were changes since last call to */
+ /* GC_read_changed. */
+ page_hash_table _prev_changed_pages;
+ /* Stubborn object pages that were changes before last call to */
+ /* GC_read_changed. */
+# endif
+# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+ page_hash_table _grungy_pages; /* Pages that were dirty at last */
+ /* GC_read_dirty. */
+# endif
+# define MAX_HEAP_SECTS 256 /* Separately added heap sections. */
+ struct HeapSect {
+ ptr_t hs_start; word hs_bytes;
+ } _heap_sects[MAX_HEAP_SECTS];
+ /* Block header index; see gc_headers.h */
+ bottom_index _all_nils;
+ bottom_index * _top_index [TOP_SZ];
};
-extern struct _GC_arrays GC_arrays;
+extern FAR struct _GC_arrays GC_arrays;
# define GC_objfreelist GC_arrays._objfreelist
# define GC_aobjfreelist GC_arrays._aobjfreelist
+# define GC_uobjfreelist GC_arrays._uobjfreelist
+# define GC_sobjfreelist GC_arrays._sobjfreelist
# define GC_valid_offsets GC_arrays._valid_offsets
# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
# define GC_reclaim_list GC_arrays._reclaim_list
# define GC_areclaim_list GC_arrays._areclaim_list
+# define GC_ureclaim_list GC_arrays._ureclaim_list
+# ifdef STUBBORN_ALLOC
+# define GC_sreclaim_list GC_arrays._sreclaim_list
+# define GC_changed_pages GC_arrays._changed_pages
+# define GC_prev_changed_pages GC_arrays._prev_changed_pages
+# endif
# define GC_obj_map GC_arrays._obj_map
# define GC_last_heap_addr GC_arrays._last_heap_addr
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
+# define GC_words_wasted GC_arrays._words_wasted
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
# define GC_heapsize GC_arrays._heapsize
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
+# define GC_heap_sects GC_arrays._heap_sects
+# define GC_all_nils GC_arrays._all_nils
+# define GC_top_index GC_arrays._top_index
+# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+# define GC_grungy_pages GC_arrays._grungy_pages
+# endif
# ifdef GATHERSTATS
# define GC_composite_in_use GC_arrays._composite_in_use
# define GC_atomic_in_use GC_arrays._atomic_in_use
@@ -613,6 +744,11 @@ extern struct obj_kind {
/* Predefined kinds: */
# define PTRFREE 0
# define NORMAL 1
+# define UNCOLLECTABLE 2
+# define STUBBORN 3
+
+extern word GC_n_heap_sects; /* Number of separately added heap */
+ /* sections. */
extern int GC_n_kinds;
@@ -628,12 +764,13 @@ extern struct hblk * GC_hblkfreelist;
extern bool GC_is_initialized; /* GC_init() has been run. */
+extern bool GC_objects_are_marked; /* There are marked objects in */
+ /* the heap. */
+
# ifndef PCR
extern ptr_t GC_stackbottom; /* Cool end of user stack */
# endif
-extern word GC_hincr; /* current heap increment, in blocks */
-
extern word GC_root_size; /* Total size of registered root sections */
extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
@@ -644,18 +781,10 @@ extern ptr_t GC_greatest_plausible_heap_addr;
/* Likely to include future heap expansion. */
/* Operations */
-# define update_GC_hincr GC_hincr = (GC_hincr * HINCR_MULT)/HINCR_DIV; \
- if (GC_hincr > MAXHINCR) {GC_hincr = MAXHINCR;}
# ifndef abs
# define abs(x) ((x) < 0? (-(x)) : (x))
# endif
-/****************************/
-/* */
-/* Objects */
-/* */
-/****************************/
-
/* Marks are in a reserved area in */
/* each heap block. Each word has one mark bit associated */
@@ -663,7 +792,7 @@ extern ptr_t GC_greatest_plausible_heap_addr;
/* object are used. */
-/* Operations */
+/* Mark bit perations */
/*
* Retrieve, set, clear the mark bit corresponding
@@ -674,32 +803,64 @@ extern ptr_t GC_greatest_plausible_heap_addr;
*/
# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
- >> (modWORDSZ(n))) & 1)
+ >> (modWORDSZ(n))) & (word)1)
# define set_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
- |= 1 << modWORDSZ(n)
+ |= (word)1 << modWORDSZ(n)
# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
- &= ~(1 << modWORDSZ(n))
+ &= ~((word)1 << modWORDSZ(n))
/* Important internal collector routines */
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
+struct hblk * GC_next_block(/* struct hblk * h */);
mse * GC_no_mark_proc(/*addr,hhdr,msp,msl*/);
/* Mark procedure for PTRFREE objects. */
mse * GC_normal_mark_proc(/*addr,hhdr,msp,msl*/);
/* Mark procedure for NORMAL objects. */
void GC_mark_init();
-void GC_mark(); /* Mark from everything on the mark stack. */
-void GC_mark_reliable(); /* as above, but fix things up after */
- /* a mark stack overflow. */
-void GC_mark_all(/*b,t*/); /* Mark from everything in a range. */
-void GC_mark_all_stack(/*b,t*/); /* Mark from everything in a range, */
- /* consider interior pointers as valid */
+void GC_clear_marks(); /* Clear mark bits for all heap objects. */
+void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
+ /* Return after about one pages worth of */
+ /* work. */
+bool GC_mark_stack_empty();
+bool GC_mark_some(); /* Perform about one pages worth of marking */
+ /* work of whatever kind is needed. Returns */
+ /* quickly if no collection is in progress. */
+ /* Return TRUE if mark phase finished. */
+void GC_initiate_full(); /* initiate full collection. */
+void GC_initiate_partial(); /* initiate partial collection. */
+void GC_push_all(/*b,t*/); /* Push everything in a range */
+ /* onto mark stack. */
+void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
+ /* subintervals of [b,t) onto */
+ /* mark stack. */
+void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+ /* Do either of the above, depending */
+ /* on the third arg. */
+void GC_push_all_stack(/*b,t*/); /* As above, but consider */
+ /* interior pointers as valid */
+void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+void GC_push_regs(); /* Push register contents onto mark stack. */
void GC_remark(); /* Mark from all marked objects. Used */
/* only if we had to drop something. */
-void GC_tl_mark(/*p*/); /* Mark from a single root. */
+void GC_push_one(/*p*/); /* If p points to an object, mark it */
+ /* and push contents on the mark stack */
+void GC_push_one_checked(/*p*/); /* Ditto, omits plausibility test */
+void GC_push_marked(/* struct hblk h, hdr * hhdr */);
+ /* Push contents of all marked objects in h onto */
+ /* mark stack. */
+struct hblk * GC_push_next_marked_dirty(/* h */);
+ /* Invoke GC_push_marked on next dirty block above h. */
+ /* Return a pointer just past the end of this block. */
+struct hblk * GC_push_next_marked(/* h */);
+ /* Ditto, but also mark from clean pages. */
+struct hblk * GC_push_next_marked_uncollectable(/* h */);
+ /* Ditto, but mark only from uncollectable pages. */
+void GC_stopped_mark(); /* Mark from all roots and rescuers */
+ /* with the world stopped. */
void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
void GC_add_roots_inner();
void GC_register_dynamic_libraries();
@@ -738,9 +899,10 @@ void GC_invalidate_map(/* hdr */);
/* with the block. This identifies */
/* the block as invalid to the mark */
/* routines. */
-void GC_add_map_entry(/*sz*/);
+bool GC_add_map_entry(/*sz*/);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
+ /* Return FALSE on failure. */
void GC_register_displacement_inner(/*offset*/);
/* Version of GC_register_displacement */
/* that assumes lock is already held */
@@ -771,18 +933,29 @@ void GC_continue_reclaim(/*size, kind*/);
/* kind, as long as possible, and */
/* as long as the corr. free list is */
/* empty. */
-bool GC_gcollect_inner(/* force */);
+void GC_reclaim_or_delete_all();
+ /* Arrange for all reclaim lists to be */
+ /* empty. Judiciously choose between */
+ /* sweeping and discarding each page. */
+bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+void GC_gcollect_inner();
/* Collect; caller must have acquired */
/* lock and disabled signals. */
/* FALSE return indicates nothing was */
/* done due to insufficient allocation. */
-void GC_collect_or_expand(/* needed_blocks */);
+void GC_finish_collection(); /* Finish collection. Mark bits are */
+ /* consistent and lock is still held. */
+bool GC_collect_or_expand(/* needed_blocks */);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
- /* until it succeeds or exits. */
+ /* until it fails by returning FALSE. */
void GC_init(); /* Initialize collector. */
-
+void GC_collect_a_little(/* n */);
+ /* Do n units worth of garbage */
+ /* collection work, if appropriate. */
+ /* A unit is an amount appropriate for */
+ /* HBLKSIZE bytes of allocation. */
ptr_t GC_generic_malloc(/* bytes, kind */);
/* Allocate an object of the given */
/* kind. By default, there are only */
@@ -792,6 +965,8 @@ ptr_t GC_generic_malloc(/* bytes, kind */);
/* internals to add more, e.g. to */
/* communicate object layout info */
/* to the collector. */
+ptr_t GC_generic_malloc_inner(/* bytes, kind */);
+ /* Ditto, but I already hold lock, etc. */
ptr_t GC_generic_malloc_words_small(/*words, kind*/);
/* As above, but size in units of words */
/* Bypasses MERGE_SIZES. Assumes */
@@ -801,11 +976,13 @@ ptr_t GC_allocobj(/* sz_inn_words, kind */);
/* free list nonempty, and return its */
/* head. */
-void GC_install_header(/*h*/);
+bool GC_install_header(/*h*/);
/* Install a header for block h. */
-void GC_install_counts(/*h, sz*/);
+ /* Return FALSE on failure. */
+bool GC_install_counts(/*h, sz*/);
/* Set up forwarding counts for block */
/* h of size sz. */
+ /* Return FALSE on failure. */
void GC_remove_header(/*h*/);
/* Remove the header for block h. */
void GC_remove_counts(/*h, sz*/);
@@ -822,11 +999,36 @@ void GC_print_obj(/* ptr_t p */);
/* P points to somewhere inside an object with */
/* debugging info. Print a human readable */
/* description of the object to stderr. */
-void GC_check_heap();
+extern void (*GC_check_heap)();
/* Check that all objects in the heap with */
/* debugging info are intact. Print */
/* descriptions of any that are not. */
-
+
+/* Virtual dirty bit implementation: */
+/* Each implementation exports the following: */
+void GC_read_dirty(); /* Retrueve dirty bits. */
+bool GC_page_was_dirty(/* struct hblk * h */);
+ /* Read retrieved dirty bits. */
+void GC_write_hint(/* struct hblk * h */);
+ /* h is about to be written. */
+void GC_dirty_init();
+
+/* Slow/general mark bit manipulation: */
+bool GC_is_marked();
+void GC_clear_mark_bit();
+void GC_set_mark_bit();
+
+/* Stubborn objects: */
+void GC_read_changed(); /* Analogous to GC_read_dirty */
+bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+void GC_clean_changing_list(); /* Collect obsolete changing list entries */
+void GC_stubborn_init();
+
+/* Debugging print routines: */
+void GC_print_block_list();
+void GC_print_hblkfreelist();
+
+/* Logging and diagnostic output: */
void GC_printf(/* format, a, b, c, d, e, f */);
/* A version of printf that doesn't allocate, */
/* is restricted to long arguments, and */
diff --git a/headers.c b/headers.c
index 15fda415..3cb76b94 100644
--- a/headers.c
+++ b/headers.c
@@ -1,6 +1,6 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -16,72 +16,57 @@
*
* Access speed is crucial. We implement an index structure based on a 2
* level tree.
- * For 64 bit machines this will have to be rewritten. We expect that the
- * winning strategy there is to use a hash table as a cache, with
- * collisions resolved through a 4 or 5 level tree.
*/
# include "gc_private.h"
-# if CPP_WORDSZ != 32
-# if CPP_WORDSZ > 32
- --> This needs to be reimplemented. See above.
-# else
- --> Get a real machine.
-# endif
-# endif
-
-hdr ** GC_top_index [TOP_SZ];
-
-typedef hdr * bottom_index[BOTTOM_SZ];
-
-/*
- * The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block.
- * 1 < (long)X <= MAX_JUMP means the block starts at least
- * X * HBLKSIZE bytes before the current address.
- * A valid pointer points to a hdr structure. (The above can't be
- * valid pointers due to the GET_MEM return convention.)
- */
-
-static bottom_index all_nils = { 0 };
+bottom_index * GC_all_bottom_indices = 0;
/* Non-macro version of header location routine */
hdr * GC_find_header(h)
ptr_t h;
{
- return(HDR(h));
+# ifdef TL_HASH
+ register hdr * result;
+ GET_HDR(h, result);
+ return(result);
+# else
+ return(HDR(h));
+# endif
}
/* Routines to dynamically allocate collector data structures that will */
/* never be freed. */
-static char * scratch_free_ptr = 0;
+static ptr_t scratch_free_ptr = 0;
-static char * scratch_end_ptr = 0;
+static ptr_t scratch_end_ptr = 0;
ptr_t GC_scratch_alloc(bytes)
register word bytes;
{
- register char * result = scratch_free_ptr;
+ register ptr_t result = scratch_free_ptr;
scratch_free_ptr += bytes;
if (scratch_free_ptr <= scratch_end_ptr) {
return(result);
}
{
- long bytes_to_get = ((HINCR+1) * HBLKSIZE + bytes) & ~(HBLKSIZE - 1);
+ word bytes_to_get = MINHINCR * HBLKSIZE;
- scratch_free_ptr = (char *)GET_MEM(bytes_to_get);
- if (scratch_free_ptr == 0) {
- GC_err_printf0("Out of memory - trying to allocate less\n");
- result = (char *)GET_MEM(bytes);
- if (result == 0) {
- GC_err_printf0("Out of memory - giving up\n");
- } else {
- scratch_free_ptr -= bytes;
- return(result);
- }
+ if (bytes_to_get <= bytes) {
+ /* Undo the damage, and get memory directly */
+ scratch_free_ptr -= bytes;
+ return((ptr_t)GET_MEM(bytes));
+ }
+ result = (ptr_t)GET_MEM(bytes_to_get);
+ if (result == 0) {
+# ifdef PRINTSTATS
+ GC_printf0("Out of memory - trying to allocate less\n");
+# endif
+ scratch_free_ptr -= bytes;
+ return((ptr_t)GET_MEM(bytes));
}
+ scratch_free_ptr = result;
scratch_end_ptr = scratch_free_ptr + bytes_to_get;
return(GC_scratch_alloc(bytes));
}
@@ -115,35 +100,66 @@ GC_init_headers()
register int i;
for (i = 0; i < TOP_SZ; i++) {
- GC_top_index[i] = all_nils;
+ GC_top_index[i] = &GC_all_nils;
}
}
/* Make sure that there is a bottom level index block for address addr */
-static void get_index(addr)
+/* Return FALSE on failure. */
+static bool get_index(addr)
register word addr;
{
- register word indx =
+ register word hi =
(word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
-
- if (GC_top_index[indx] == all_nils) {
- GC_top_index[indx] = (hdr **)
- GC_scratch_alloc((word)(sizeof (bottom_index)));
- bzero((char *)(GC_top_index[indx]), (int)(sizeof (bottom_index)));
- }
+ register bottom_index * r;
+ register bottom_index * p;
+ register bottom_index ** prev;
+# ifdef HASH_TL
+ register i = TL_HASH(hi);
+ register bottom_index * old;
+
+ old = p = GC_top_index[i];
+ while(p != &GC_all_nils) {
+ if (p -> key == hi) return(TRUE);
+ p = p -> hash_link;
+ }
+ r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
+ if (r == 0) return(FALSE);
+ bzero((char *)r, (int)(sizeof (bottom_index)));
+ r -> hash_link = old;
+ GC_top_index[i] = r;
+# else
+ if (GC_top_index[hi] != &GC_all_nils) return(TRUE);
+ r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
+ if (r == 0) return(FALSE);
+ GC_top_index[hi] = r;
+ bzero((char *)r, (int)(sizeof (bottom_index)));
+# endif
+ r -> key = hi;
+ /* Add it to the list of bottom indices */
+ prev = &GC_all_bottom_indices;
+ while ((p = *prev) != 0 && p -> key < hi) prev = &(p -> asc_link);
+ r -> asc_link = p;
+ *prev = r;
+ return(TRUE);
}
/* Install a header for block h. */
/* The header is uninitialized. */
-void GC_install_header(h)
+/* Returns FALSE on failure. */
+bool GC_install_header(h)
register struct hblk * h;
{
- get_index((word) h);
- HDR(h) = alloc_hdr();
+ hdr * result;
+
+ if (!get_index((word) h)) return(FALSE);
+ result = alloc_hdr();
+ SET_HDR(h, result);
+ return(result != 0);
}
/* Set up forwarding counts for block h of size sz */
-void GC_install_counts(h, sz)
+bool GC_install_counts(h, sz)
register struct hblk * h;
register word sz; /* bytes */
{
@@ -151,21 +167,25 @@ register word sz; /* bytes */
register int i;
for (hbp = h; (char *)hbp < (char *)h + sz; hbp += BOTTOM_SZ) {
- get_index((word) hbp);
+ if (!get_index((word) hbp)) return(FALSE);
}
- get_index((word)h + sz - 1);
+ if (!get_index((word)h + sz - 1)) return(FALSE);
for (hbp = h + 1; (char *)hbp < (char *)h + sz; hbp += 1) {
- i = hbp - h;
- HDR(hbp) = (hdr *)(i > MAX_JUMP? MAX_JUMP : i);
+ i = HBLK_PTR_DIFF(hbp, h);
+ SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
}
+ return(TRUE);
}
/* Remove the header for block h */
void GC_remove_header(h)
register struct hblk * h;
{
- free_hdr(HDR(h));
- HDR(h) = 0;
+ hdr ** ha;
+
+ GET_HDR_ADDR(h, ha);
+ free_hdr(*ha);
+ *ha = 0;
}
/* Remove forwarding counts for h */
@@ -176,7 +196,7 @@ register word sz; /* bytes */
register struct hblk * hbp;
for (hbp = h+1; (char *)hbp < (char *)h + sz; hbp += 1) {
- HDR(hbp) = 0;
+ SET_HDR(hbp, 0);
}
}
@@ -186,26 +206,60 @@ void GC_apply_to_all_blocks(fn, client_data)
void (*fn)(/* struct hblk *h, word client_data */);
word client_data;
{
- register int i, j;
- register hdr ** index_p;
+ register int j;
+ register bottom_index * index_p;
- for (i = 0; i < TOP_SZ; i++) {
- index_p = GC_top_index[i];
- if (index_p != all_nils) {
- for (j = BOTTOM_SZ-1; j >= 0;) {
- if (!IS_FORWARDING_ADDR_OR_NIL(index_p[j])) {
- if (index_p[j]->hb_map != GC_invalid_map) {
+ for (index_p = GC_all_bottom_indices; index_p != 0;
+ index_p = index_p -> asc_link) {
+ for (j = BOTTOM_SZ-1; j >= 0;) {
+ if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
+ if (index_p->index[j]->hb_map != GC_invalid_map) {
(*fn)(((struct hblk *)
- (((i << LOG_BOTTOM_SZ) + j) << LOG_HBLKSIZE)),
+ (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
+ << LOG_HBLKSIZE)),
client_data);
- }
- j--;
- } else if (index_p[j] == 0) {
- j--;
+ }
+ j--;
+ } else if (index_p->index[j] == 0) {
+ j--;
+ } else {
+ j -= (int)(index_p->index[j]);
+ }
+ }
+ }
+}
+
+/* Get the next valid block whose address is at least h */
+/* Return 0 if there is none. */
+struct hblk * GC_next_block(h)
+struct hblk * h;
+{
+ register bottom_index * bi;
+ register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+
+ GET_BI(h, bi);
+ if (bi == &GC_all_nils) {
+ register int hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bi = GC_all_bottom_indices;
+ while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
+ j = 0;
+ }
+ while(bi != 0) {
+ while (j < BOTTOM_SZ) {
+ if (IS_FORWARDING_ADDR_OR_NIL(bi -> index[j])) {
+ j++;
+ } else {
+ if (bi->index[j]->hb_map != GC_invalid_map) {
+ return((struct hblk *)
+ (((bi -> key << LOG_BOTTOM_SZ) + j)
+ << LOG_HBLKSIZE));
} else {
- j -= (int)(index_p[j]);
+ j += divHBLKSZ(bi->index[j] -> hb_sz);
}
}
}
+ j = 0;
+ bi = bi -> asc_link;
}
+ return(0);
}
diff --git a/interface.c b/interface.c
deleted file mode 100644
index 66a1771f..00000000
--- a/interface.c
+++ /dev/null
@@ -1,86 +0,0 @@
-#include "gc_private.h"
-#include <stddef.h>
-
-/* These are some additional routines to interface the collector to C */
-/* This is a rather special purpose interface that tries to keep down the */
-/* number of collections in the presence of explicit deallocations. */
-/* A call to this malloc effectively declares that the resulting object */
-/* will be explicitly deallocated with very high probability. */
-/* The reduced collection frequency may interfere with object */
-/* coalescing. */
-/* If you just want to rename GC_malloc and friends, this is NOT */
-/* the right way to do it. */
-
-/* This contributed by David Chase (chase@eng.sun.com) a long time */
-/* ago. Much of its original functionality has since been absorbed */
-/* elsewhere. */
-/* They illustrates the use of GC_non_gc_bytes */
-/* Hacked by H. Boehm (11/16/89) to accomodate GC_realloc. */
-/* Further updated (2/20/92) to reflect changes in interfaces and data */
-/* structures. */
-/* Further updated (8/25/92) to correct previously introduced bugs and */
-/* make it compile with semi-modern compilers. */
-/* Note that extern_ptr_t is either void * or char *, as appropriate. */
-
-
-/* This free routine is merely advisory -- it reduces the estimate of
- storage that won't be reclaimed in the next collection, thus
- making it more likely that the collector will run next time more
- memory is needed. */
-
-void free(p)
-extern_ptr_t p;
-{
- size_t inc = GC_size(p);
- GC_non_gc_bytes -= inc;
-}
-
-/* This free routine adjusts the collector estimates of space in use,
- but also actually releases the memory for reuse. It is thus "unsafe"
- if the programmer "frees" memory that is actually still in use. */
-
-void unsafe_free(p)
-extern_ptr_t p;
-{
- size_t inc = GC_size(p);
- GC_non_gc_bytes -= inc;
- GC_free(p);
-}
-
-
-/* malloc and malloc_atomic are obvious substitutes for the C library
- malloc. Note that the storage so allocated is regarded as not likely
- to be reclaimed by the collector (until explicitly freed), and thus
- its size is added to non_gc_bytes.
-*/
-
-extern_ptr_t malloc(bytesize)
-size_t bytesize;
-{
- extern_ptr_t result;
-
- result = (extern_ptr_t) GC_malloc (bytesize);
- GC_non_gc_bytes += (bytesize + 3) & ~3;
- return result;
-}
-
-extern_ptr_t malloc_atomic(bytesize)
-size_t bytesize;
-{
- extern_ptr_t result;
-
- result = (extern_ptr_t) GC_malloc_atomic (bytesize);
- GC_non_gc_bytes += (bytesize + 3) & ~3;
- return result;
-}
-
-extern_ptr_t realloc(old,size)
-extern_ptr_t old;
-size_t size;
-{
- int inc = GC_size(old);
-
- GC_non_gc_bytes += ((size + 3) & ~3) - inc;
- return(GC_realloc(old, size));
-}
-
diff --git a/mach_dep.c b/mach_dep.c
index 8e3ac078..8dc15d8a 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -11,7 +11,7 @@
/* version at the end, that is likely, but not guaranteed to work */
/* on your architecture. Run the test_setjmp program to see whether */
/* there is any chance it will work. */
-GC_mark_regs()
+void GC_push_regs()
{
# ifdef RT
register long TMP_SP; /* must be bound to r11 */
@@ -20,32 +20,32 @@ GC_mark_regs()
/* VAX - generic code below does not work under 4.2 */
/* r1 through r5 are caller save, and therefore */
/* on the stack or dead. */
- asm("pushl r11"); asm("calls $1,_GC_tl_mark");
- asm("pushl r10"); asm("calls $1,_GC_tl_mark");
- asm("pushl r9"); asm("calls $1,_GC_tl_mark");
- asm("pushl r8"); asm("calls $1,_GC_tl_mark");
- asm("pushl r7"); asm("calls $1,_GC_tl_mark");
- asm("pushl r6"); asm("calls $1,_GC_tl_mark");
+ asm("pushl r11"); asm("calls $1,_GC_push_one");
+ asm("pushl r10"); asm("calls $1,_GC_push_one");
+ asm("pushl r9"); asm("calls $1,_GC_push_one");
+ asm("pushl r8"); asm("calls $1,_GC_push_one");
+ asm("pushl r7"); asm("calls $1,_GC_push_one");
+ asm("pushl r6"); asm("calls $1,_GC_push_one");
# endif
-# if defined(M68K) && defined(SUNOS)
+# if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
/* M68K SUNOS - could be replaced by generic code */
/* a0, a1 and d1 are caller save */
/* and therefore are on stack or dead. */
asm("subqw #0x4,sp"); /* allocate word on top of stack */
- asm("movl a2,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl a3,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl a4,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl a5,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl a2,sp@"); asm("jbsr _GC_push_one");
+ asm("movl a3,sp@"); asm("jbsr _GC_push_one");
+ asm("movl a4,sp@"); asm("jbsr _GC_push_one");
+ asm("movl a5,sp@"); asm("jbsr _GC_push_one");
/* Skip frame pointer and stack pointer */
- asm("movl d1,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl d2,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl d3,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl d4,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl d5,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl d6,sp@"); asm("jbsr _GC_tl_mark");
- asm("movl d7,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d1,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d2,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d3,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d4,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d5,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d6,sp@"); asm("jbsr _GC_push_one");
+ asm("movl d7,sp@"); asm("jbsr _GC_push_one");
asm("addqw #0x4,sp"); /* put stack back where it was */
# endif
@@ -56,18 +56,18 @@ GC_mark_regs()
asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
- asm("mov.l %a2,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %a3,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %a4,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %a5,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
/* Skip frame pointer and stack pointer */
- asm("mov.l %d1,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %d2,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %d3,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %d4,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %d5,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %d6,(%sp)"); asm("jsr _GC_tl_mark");
- asm("mov.l %d7,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
asm("addq.w &0x4,%sp"); /* put stack back where it was */
# endif /* M68K HP */
@@ -75,51 +75,55 @@ GC_mark_regs()
# if defined(I386) && !defined(OS2) && !defined(SUNOS5)
/* I386 code, generic code does not appear to work */
/* It does appear to work under OS2, and asms dont */
- asm("pushl %eax"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %ecx"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %edx"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %esi"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %edi"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %ebx"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
# endif
# if defined(I386) && defined(SUNOS5)
/* I386 code, generic code does not appear to work */
/* It does appear to work under OS2, and asms dont */
- asm("pushl %eax"); asm("call GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %ecx"); asm("call GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %edx"); asm("call GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %esi"); asm("call GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %edi"); asm("call GC_tl_mark"); asm("addl $4,%esp");
- asm("pushl %ebx"); asm("call GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
# endif
# ifdef NS32K
- asm ("movd r3, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
- asm ("movd r4, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
- asm ("movd r5, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
- asm ("movd r6, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
- asm ("movd r7, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
+ asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
+ asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
# endif
# ifdef SPARC
- /* generic code will not work */
- GC_save_regs_in_stack();
+ {
+ void GC_save_regs_in_stack();
+
+ /* generic code will not work */
+ GC_save_regs_in_stack();
+ }
# endif
# ifdef RT
- GC_tl_mark(TMP_SP); /* GC_tl_mark from r11 */
-
- asm("cas r11, r6, r0"); GC_tl_mark(TMP_SP); /* r6 */
- asm("cas r11, r7, r0"); GC_tl_mark(TMP_SP); /* through */
- asm("cas r11, r8, r0"); GC_tl_mark(TMP_SP); /* r10 */
- asm("cas r11, r9, r0"); GC_tl_mark(TMP_SP);
- asm("cas r11, r10, r0"); GC_tl_mark(TMP_SP);
-
- asm("cas r11, r12, r0"); GC_tl_mark(TMP_SP); /* r12 */
- asm("cas r11, r13, r0"); GC_tl_mark(TMP_SP); /* through */
- asm("cas r11, r14, r0"); GC_tl_mark(TMP_SP); /* r15 */
- asm("cas r11, r15, r0"); GC_tl_mark(TMP_SP);
+ GC_push_one(TMP_SP); /* GC_push_one from r11 */
+
+ asm("cas r11, r6, r0"); GC_push_one(TMP_SP); /* r6 */
+ asm("cas r11, r7, r0"); GC_push_one(TMP_SP); /* through */
+ asm("cas r11, r8, r0"); GC_push_one(TMP_SP); /* r10 */
+ asm("cas r11, r9, r0"); GC_push_one(TMP_SP);
+ asm("cas r11, r10, r0"); GC_push_one(TMP_SP);
+
+ asm("cas r11, r12, r0"); GC_push_one(TMP_SP); /* r12 */
+ asm("cas r11, r13, r0"); GC_push_one(TMP_SP); /* through */
+ asm("cas r11, r14, r0"); GC_push_one(TMP_SP); /* r15 */
+ asm("cas r11, r15, r0"); GC_push_one(TMP_SP);
# endif
# if defined(M68K) && defined(SYSV)
@@ -129,35 +133,35 @@ GC_mark_regs()
# ifdef __GNUC__
asm("subqw #0x4,%sp"); /* allocate word on top of stack */
- asm("movl %a2,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %a3,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %a4,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %a5,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
/* Skip frame pointer and stack pointer */
- asm("movl %d1,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %d2,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %d3,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %d4,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %d5,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %d6,%sp@"); asm("jbsr GC_tl_mark");
- asm("movl %d7,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
+ asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
asm("addqw #0x4,%sp"); /* put stack back where it was */
# else /* !__GNUC__*/
asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
- asm("mov.l %a2,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %a3,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %a4,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %a5,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
/* Skip frame pointer and stack pointer */
- asm("mov.l %d1,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %d2,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %d3,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %d4,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %d5,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %d6,(%sp)"); asm("jsr GC_tl_mark");
- asm("mov.l %d7,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
asm("addq.w &0x4,%sp"); /* put stack back where it was */
# endif /* !__GNUC__ */
@@ -170,7 +174,7 @@ GC_mark_regs()
/* We're not sure whether he would like */
/* to be he acknowledged for it or not. */
{
- jmp_buf regs;
+ static jmp_buf regs;
register word * i = (word *) regs;
register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
@@ -180,7 +184,7 @@ GC_mark_regs()
*i = 0;
}
(void) _setjmp(regs);
- GC_mark_all_stack((ptr_t)regs, lim);
+ GC_push_all_stack((ptr_t)regs, lim);
}
# endif
@@ -197,6 +201,7 @@ GC_mark_regs()
/* On register window machines, we need a way to force registers into */
/* the stack. */
# ifdef SPARC
+ asm(" .seg \"text\"");
# ifdef SUNOS5
asm(" .globl GC_save_regs_in_stack");
asm("GC_save_regs_in_stack:");
@@ -208,5 +213,9 @@ GC_mark_regs()
asm(" mov %sp,%o0");
asm(" retl");
asm(" nop");
+
+# ifdef LINT
+ void GC_save_regs_in_stack() {}
+# endif
# endif
diff --git a/malloc.c b/malloc.c
new file mode 100644
index 00000000..0f08a30f
--- /dev/null
+++ b/malloc.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+#include <stdio.h>
+#include "gc_private.h"
+
+extern void GC_clear_stack(); /* in misc.c */
+
+# ifdef ALL_INTERIOR_POINTERS
+# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) ((bytes)+1)
+# else
+# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) (bytes)
+# endif
+
+
+/* allocate lb bytes for an object of kind. */
+/* Should not be used to directly to allocate */
+/* objects such as STUBBORN objects that */
+/* require special handling on allocation. */
+/* First a version that assumes we already */
+/* hold lock: */
+ptr_t GC_generic_malloc_inner(lb, k)
+register word lb;
+register int k;
+{
+register word lw;
+register ptr_t op;
+register ptr_t *opp;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+ if (lw == 0) lw = 1;
+# endif
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ return(GC_generic_malloc_inner(lb, k));
+ }
+ GC_clear_stack();
+ op = GC_allocobj(lw, k);
+ if (op == 0) goto out;
+ }
+ /* Here everything is in a consistent state. */
+ /* We assume the following assignment is */
+ /* atomic. If we get aborted */
+ /* after the assignment, we lose an object, */
+ /* but that's benign. */
+ /* Volatile declarations may need to be added */
+ /* to prevent the compiler from breaking things.*/
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ } else {
+ register struct hblk * h;
+ register word n_blocks = divHBLKSZ(lb + HDR_BYTES + HBLKSIZE-1);
+
+ if (!GC_is_initialized) GC_init_inner();
+ /* Do our share of marking work */
+ if(GC_incremental && !GC_dont_gc) GC_collect_a_little((int)n_blocks);
+ lw = ROUNDED_UP_WORDS(lb);
+ while ((h = GC_allochblk(lw, k)) == 0
+ && GC_collect_or_expand(n_blocks));
+ if (h == 0) {
+ op = 0;
+ } else {
+ op = (ptr_t) (h -> hb_body);
+ GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
+ }
+ }
+ GC_words_allocd += lw;
+
+out:
+ return((ptr_t)op);
+}
+
+ptr_t GC_generic_malloc(lb, k)
+register word lb;
+register int k;
+{
+ ptr_t result;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ result = GC_generic_malloc_inner(lb, k);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
+
+/* Analogous to the above, but assumes a small object size, and */
+/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
+ptr_t GC_generic_malloc_words_small(lw, k)
+register word lw;
+register int k;
+{
+register ptr_t op;
+register ptr_t *opp;
+DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ }
+ GC_clear_stack();
+ op = GC_allocobj(lw, k);
+ if (op == 0) goto out;
+ }
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((ptr_t)op);
+}
+
+/* Allocate lb bytes of atomic (pointerfree) data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_atomic(size_t lb)
+# else
+ extern_ptr_t GC_malloc_atomic(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t * opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_aobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ return(GC_generic_malloc((word)lb, PTRFREE));
+ }
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ } else {
+ return((extern_ptr_t)
+ GC_generic_malloc((word)lb, PTRFREE));
+ }
+}
+
+/* Allocate lb bytes of composite (pointerful) data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc(size_t lb)
+# else
+ extern_ptr_t GC_malloc(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_objfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ return(GC_generic_malloc((word)lb, NORMAL));
+ }
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ } else {
+ return((extern_ptr_t)
+ GC_generic_malloc((word)lb, NORMAL));
+ }
+}
+
+/* Allocate lb bytes of pointerful, traced, but not collectable data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_uncollectable(size_t lb)
+# else
+ extern_ptr_t GC_malloc_uncollectable(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_uobjfreelist[lw]);
+ FASTLOCK();
+ if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ GC_set_mark_bit(op);
+ GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ }
+ FASTUNLOCK();
+ op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ } else {
+ op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ }
+ /* We don't need the lock here, since we have an undisguised */
+ /* pointer. We do need to hold the lock while we adjust */
+ /* mark bits. */
+ {
+ register struct hblk * h;
+
+ h = HBLKPTR(op);
+ lw = HDR(h) -> hb_sz;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_set_mark_bit(op);
+ GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((extern_ptr_t) op);
+ }
+}
+
+extern_ptr_t GC_generic_or_special_malloc(lb,knd)
+word lb;
+int knd;
+{
+ switch(knd) {
+# ifdef STUBBORN_ALLOC
+ case STUBBORN:
+ return(GC_malloc_stubborn((size_t)lb));
+# endif
+ case UNCOLLECTABLE:
+ return(GC_malloc_uncollectable((size_t)lb));
+ default:
+ return(GC_generic_malloc(lb,knd));
+ }
+}
+
+
+/* Change the size of the block pointed to by p to contain at least */
+/* lb bytes. The object may be (and quite likely will be) moved. */
+/* The kind (e.g. atomic) is the same as that of the old. */
+/* Shrinking of large blocks is not implemented well. */
+# ifdef __STDC__
+ extern_ptr_t GC_realloc(extern_ptr_t p, size_t lb)
+# else
+ extern_ptr_t GC_realloc(p,lb)
+ extern_ptr_t p;
+ size_t lb;
+# endif
+{
+register struct hblk * h;
+register hdr * hhdr;
+register signed_word sz; /* Current size in bytes */
+register word orig_sz; /* Original sz in bytes */
+int obj_kind;
+
+ if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ obj_kind = hhdr -> hb_obj_kind;
+ sz = WORDS_TO_BYTES(sz);
+ orig_sz = sz;
+
+ if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
+ /* Round it up to the next whole heap block */
+
+ sz = (sz+HDR_BYTES+HBLKSIZE-1)
+ & (~HBLKMASK);
+ sz -= HDR_BYTES;
+ hhdr -> hb_sz = BYTES_TO_WORDS(sz);
+ if (obj_kind == UNCOLLECTABLE) GC_non_gc_bytes += (sz - orig_sz);
+ /* Extra area is already cleared by allochblk. */
+ }
+ if (ADD_SLOP(lb) <= sz) {
+ if (lb >= (sz >> 1)) {
+# ifdef STUBBORN_ALLOC
+ if (obj_kind == STUBBORN) GC_change_stubborn(p);
+# endif
+ if (orig_sz > lb) {
+ /* Clear unneeded part of object to avoid bogus pointer */
+ /* tracing. */
+ /* Safe for stubborn objects. */
+ bzero(((char *)p) + lb, (int)(orig_sz - lb));
+ }
+ return(p);
+ } else {
+ /* shrink */
+ extern_ptr_t result =
+ GC_generic_or_special_malloc((word)lb, obj_kind);
+
+ if (result == 0) return(0);
+ /* Could also return original object. But this */
+ /* gives the client warning of imminent disaster. */
+ bcopy(p, result, (int)lb);
+ GC_free(p);
+ return(result);
+ }
+ } else {
+ /* grow */
+ extern_ptr_t result =
+ GC_generic_or_special_malloc((word)lb, obj_kind);
+
+ if (result == 0) return(0);
+ bcopy(p, result, (int)sz);
+ GC_free(p);
+ return(result);
+ }
+}
+
+/* Explicitly deallocate an object p. */
+# ifdef __STDC__
+ void GC_free(extern_ptr_t p)
+# else
+ void GC_free(p)
+ extern_ptr_t p;
+# endif
+{
+ register struct hblk *h;
+ register hdr *hhdr;
+ register signed_word sz;
+ register ptr_t * flh;
+ register int knd;
+ register struct obj_kind * ok;
+ DCL_LOCK_STATE;
+
+ if (p == 0) return;
+ /* Required by ANSI. It's not my fault ... */
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ knd = hhdr -> hb_obj_kind;
+ sz = hhdr -> hb_sz;
+ ok = &GC_obj_kinds[knd];
+ if (sz <= MAXOBJSZ) {
+ LOCK();
+ GC_mem_freed += sz;
+ /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
+ /* inconsistent. We claim this is benign. */
+ if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= sz;
+ if (ok -> ok_init) {
+ bzero((char *)((word *)p + 1), (int)(WORDS_TO_BYTES(sz-1)));
+ }
+ flh = &(ok -> ok_freelist[sz]);
+ obj_link(p) = *flh;
+ *flh = (ptr_t)p;
+ UNLOCK();
+ } else {
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_mem_freed += sz;
+ if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= sz;
+ GC_freehblk(h);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ }
+}
diff --git a/mark.c b/mark.c
index 96935fb4..b96c4fcd 100644
--- a/mark.c
+++ b/mark.c
@@ -9,15 +9,6 @@
* Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*
- * This file contains the functions:
- * GC_mark() - Mark from the mark stack
- * GC_mark_reliable() - as above, but fix things up after
- * a mark stack overflow.
- * GC_mark_all(b,t) - Mark from everything in a range
- * GC_mark_all_stack(b,t) - Mark from everything in a range,
- * consider interior pointers as valid
- * GC_remark() - Mark from all marked objects. Used
- * only if we had to drop something.
*/
@@ -29,20 +20,316 @@
/* multiple of HBLKSIZE. */
/*
- * Limits of stack for GC_mark routine. Set by caller to GC_mark.
- * All items between GC_mark_stack_top and GC_mark_stack_bottom-1 still need
- * to be marked.
+ * Limits of stack for GC_mark routine.
+ * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
+ * need to be marked from.
*/
-
+
+word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
+ /* excludes ptrfree pages, etc. */
+
mse * GC_mark_stack;
word GC_mark_stack_size = 0;
mse * GC_mark_stack_top;
-static bool dropped_some = FALSE;
- /* We ran out of space and were forced */
- /* to drop some pointers during marking */
+static struct hblk * scan_ptr;
+
+
+typedef int mark_state_t; /* Current state of marking, as follows:*/
+ /* Used to remember where we are during */
+ /* concurrent marking. */
+
+ /* We say something is dirty if it was */
+ /* written since the last time we */
+ /* retrieved dirty bits. We say it's */
+ /* grungy if it was marked dirty in the */
+ /* last set of bits we retrieved. */
+
+ /* Invariant I: all roots and marked */
+ /* objects p are either dirty, or point */
+ /* objects q that are either marked or */
+ /* a pointer to q appears in a range */
+ /* on the mark stack. */
+
+# define MS_NONE 0 /* No marking in progress. I holds. */
+ /* Mark stack is empty. */
+
+# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
+ /* being pushed. I holds, except */
+ /* that grungy roots may point to */
+ /* unmarked objects, as may marked */
+ /* grungy objects above scan_ptr. */
+
+# define MS_PUSH_UNCOLLECTABLE 2
+ /* I holds, except that marked */
+ /* uncollectable objects above scan_ptr */
+ /* may point to unmarked objects. */
+ /* Roots may point to unmarked objects */
+
+# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
+
+# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
+ /* overflow. However marked heap */
+ /* objects below scan_ptr point to */
+ /* marked or stacked objects. */
+
+# define MS_INVALID 5 /* I may not hold. */
+
+mark_state_t GC_mark_state = MS_NONE;
+
+bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
+ /* objects in the heap? */
+
+bool GC_collection_in_progress()
+{
+ return(GC_mark_state != MS_NONE);
+}
+
+/* clear all mark bits in the header */
+void GC_clear_hdr_marks(hhdr)
+register hdr * hhdr;
+{
+ bzero((char *)(hhdr -> hb_marks), (int)(MARK_BITS_SZ*sizeof(word)));
+}
+
+/*
+ * Clear all mark bits associated with block h.
+ */
+/*ARGSUSED*/
+static void clear_marks_for_block(h, dummy)
+struct hblk *h;
+word dummy;
+{
+ register hdr * hhdr = HDR(h);
+
+ if (hhdr -> hb_obj_kind == UNCOLLECTABLE) return;
+ /* Mark bit for these is cleared only once the object is */
+ /* explicitly deallocated. This either frees the block, or */
+ /* the bit is cleared once the object is on the free list. */
+ GC_clear_hdr_marks(hhdr);
+}
+
+/* Slow but general routines for setting/clearing/asking about mark bits */
+void GC_set_mark_bit(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ set_mark_bit_from_hdr(hhdr, word_no);
+}
+
+void GC_clear_mark_bit(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ clear_mark_bit_from_hdr(hhdr, word_no);
+}
+
+bool GC_is_marked(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ return(mark_bit_from_hdr(hhdr, word_no));
+}
+
+
+/*
+ * Clear mark bits in all allocated heap blocks. This invalidates
+ * the marker invariant, and sets GC_mark_state to reflect this.
+ * (This implicitly starts marking to reestablish the
+ */
+void GC_clear_marks()
+{
+ GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
+ GC_objects_are_marked = FALSE;
+ GC_mark_state = MS_INVALID;
+ scan_ptr = 0;
+# ifdef GATHERSTATS
+ /* Counters reflect currently marked objects: reset here */
+ GC_composite_in_use = 0;
+ GC_atomic_in_use = 0;
+# endif
+
+}
+
+
+/*
+ * Push some dummy entries onto bottom of mark stack to allow
+ * marker to operate in bigger chunks between bounds checks.
+ * This is a pretty extreme performance hack ...
+ */
+void GC_prime_marker()
+{
+ register int i;
+ static word dummy = 0;
+
+ for (i = 0; i < INITIAL_MARK_STACK_SIZE/64; i++) {
+ GC_push_all((ptr_t)(&dummy), (ptr_t)(&dummy + 1));
+ }
+}
+
+/* Initiate full marking. */
+void GC_initiate_full()
+{
+# ifdef PRINTSTATS
+ GC_printf2("Full mark for collection %lu after %ld allocd bytes\n",
+ (unsigned long) GC_gc_no+1,
+ (long)WORDS_TO_BYTES(GC_words_allocd));
+# endif
+ GC_promote_black_lists();
+ GC_reclaim_or_delete_all();
+ GC_clear_marks();
+ GC_read_dirty();
+# ifdef STUBBORN_ALLOC
+ GC_read_changed();
+# endif
+# ifdef CHECKSUMS
+ {
+ extern void GC_check_dirty();
+
+ GC_check_dirty();
+ }
+# endif
+# ifdef GATHERSTATS
+ GC_n_rescuing_pages = 0;
+# endif
+ GC_prime_marker();
+}
+
+/* Initiate partial marking. */
+/*ARGSUSED*/
+void GC_initiate_partial(gc_no)
+word gc_no;
+{
+# ifdef PRINTSTATS
+ if (gc_no > GC_gc_no) {
+ GC_printf2("Partial mark for collection %lu after %ld allocd bytes\n",
+ (unsigned long) gc_no,
+ (long)WORDS_TO_BYTES(GC_words_allocd));
+ } /* else the world is stopped, and we just printed this */
+# endif
+ if (GC_incremental) GC_read_dirty();
+# ifdef STUBBORN_ALLOC
+ GC_read_changed();
+# endif
+# ifdef CHECKSUMS
+ {
+ extern void GC_check_dirty();
+
+ if (GC_incremental) GC_check_dirty();
+ }
+# endif
+# ifdef GATHERSTATS
+ GC_n_rescuing_pages = 0;
+# endif
+ if (GC_mark_state == MS_NONE) {
+ GC_mark_state = MS_PUSH_RESCUERS;
+ } else if (GC_mark_state != MS_INVALID) {
+ ABORT("unexpected state");
+ } /* else this is really a full collection, and mark */
+ /* bits are invalid. */
+ scan_ptr = 0;
+ GC_prime_marker();
+}
+
+
+static void alloc_mark_stack();
+
+/* Perform a small amount of marking. */
+/* We try to touch roughly a page of memory. */
+/* Return TRUE if we just finished a mark phase. */
+bool GC_mark_some()
+{
+ switch(GC_mark_state) {
+ case MS_NONE:
+ return(FALSE);
+
+ case MS_PUSH_RESCUERS:
+ if (GC_mark_stack_top
+ >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ } else {
+ scan_ptr = GC_push_next_marked_dirty(scan_ptr);
+ if (scan_ptr == 0) {
+# ifdef PRINTSTATS
+ GC_printf1("Marked from %lu dirty pages\n",
+ (unsigned long)GC_n_rescuing_pages);
+# endif
+ GC_push_roots(FALSE);
+ if (GC_mark_state != MS_INVALID) {
+ GC_mark_state = MS_ROOTS_PUSHED;
+ }
+ }
+ }
+ return(FALSE);
+
+ case MS_PUSH_UNCOLLECTABLE:
+ if (GC_mark_stack_top
+ >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ } else {
+ scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
+ if (scan_ptr == 0) {
+ GC_push_roots(TRUE);
+ if (GC_mark_state != MS_INVALID) {
+ GC_mark_state = MS_ROOTS_PUSHED;
+ }
+ }
+ }
+ return(FALSE);
+
+ case MS_ROOTS_PUSHED:
+ if (GC_mark_stack_top >= GC_mark_stack) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ } else {
+ GC_mark_state = MS_NONE;
+ return(TRUE);
+ }
+
+ case MS_INVALID:
+ case MS_PARTIALLY_INVALID:
+ if (!GC_objects_are_marked) {
+ GC_mark_state = MS_PUSH_UNCOLLECTABLE;
+ return(FALSE);
+ }
+ if (GC_mark_stack_top >= GC_mark_stack) {
+ GC_mark_from_mark_stack();
+ return(FALSE);
+ }
+ if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
+ alloc_mark_stack(2*GC_mark_stack_size);
+# ifdef PRINTSTATS
+ GC_printf1("Grew mark stack to %lu frames\n",
+ (unsigned long) GC_mark_stack_size);
+# endif
+ GC_mark_state = MS_PARTIALLY_INVALID;
+ }
+ scan_ptr = GC_push_next_marked(scan_ptr);
+ if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
+ GC_push_roots(TRUE);
+ if (GC_mark_state != MS_INVALID) {
+ GC_mark_state = MS_ROOTS_PUSHED;
+ }
+ }
+ return(FALSE);
+ default:
+ ABORT("GC_mark_some: bad state");
+ return(FALSE);
+ }
+}
/* Mark procedure for objects that may contain arbitrary pointers. */
/* Msp is the current mark stack pointer. Msl limits the stack. */
@@ -59,8 +346,12 @@ register mse * msp, * msl;
msp++;
/* Push the contents of the object on the mark stack. */
if (msp >= msl) {
- dropped_some = TRUE;
- return(msp-1);
+ GC_mark_state = MS_INVALID;
+# ifdef PRINTSTATS
+ GC_printf1("Mark stack overflow; current size = %lu entries\n",
+ GC_mark_stack_size);
+# endif
+ return(msp-INITIAL_MARK_STACK_SIZE/8);
}
msp -> mse_start = addr;
msp -> mse_end = addr + sz;
@@ -82,19 +373,31 @@ register mse * msp, * msl;
# endif
return(msp);
}
-
+
+
+bool GC_mark_stack_empty()
+{
+ return(GC_mark_stack_top < GC_mark_stack);
+}
/*
- * Mark all objects pointed to by the regions described by
+ * Mark objects pointed to by the regions described by
* mark stack entries between GC_mark_stack and GC_mark_stack_top,
* inclusive. Assumes the upper limit of a mark stack entry
- * is never 0.
+ * is never 0. A mark stack entry never has size 0.
+ * We try to traverse on the order of a hblk of memory before we return.
+ * Caller is responsible for calling this until the mark stack is empty.
*/
-void GC_mark()
+void GC_mark_from_mark_stack()
{
mse * GC_mark_stack_reg = GC_mark_stack;
mse * GC_mark_stack_top_reg = GC_mark_stack_top;
mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
+ int credit = HBLKSIZE; /* Remaining credit for marking work */
+ register int safe_credit; /* Amount of credit we can safely use */
+ /* before checking stack bounds. */
+ /* Gross hack to safe a couple of */
+ /* instructions in the loop. */
register word * current_p; /* Pointer to current candidate ptr. */
register word current; /* Candidate pointer. */
register word * limit; /* (Incl) limit of current candidate */
@@ -103,13 +406,24 @@ void GC_mark()
register ptr_t least_ha = GC_least_plausible_heap_addr;
# define SPLIT_RANGE_WORDS 128
- while (GC_mark_stack_top_reg >= GC_mark_stack_reg) {
+ GC_objects_are_marked = TRUE;
+ while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit > 0) {
+ safe_credit = WORDS_TO_BYTES(GC_mark_stack_top_reg - GC_mark_stack_reg + 1);
+ if (safe_credit > credit) {
+ safe_credit = credit;
+ credit = 0;
+ } else {
+ credit -= safe_credit;
+ }
+ while (safe_credit > 0) {
+ /* Stack must be nonempty */
register int displ; /* Displacement in block; first bytes, then words */
register hdr * hhdr;
register map_entry_type map_entry;
current_p = GC_mark_stack_top_reg -> mse_start;
limit = GC_mark_stack_top_reg -> mse_end;
+ safe_credit -= (ptr_t)limit - (ptr_t)current_p;
if (limit - current_p > SPLIT_RANGE_WORDS) {
/* Process part of the range to avoid pushing too much on the */
/* stack. */
@@ -129,7 +443,7 @@ void GC_mark()
if ((ptr_t)current < least_ha) continue;
if ((ptr_t)current >= greatest_ha) continue;
- hhdr = HDR(current);
+ GET_HDR(current,hhdr);
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
# ifdef ALL_INTERIOR_POINTERS
if (hhdr != 0) {
@@ -168,13 +482,12 @@ void GC_mark()
{
register word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(displ);
register word mark_word = *mark_word_addr;
- register word mark_bit = 1 << modWORDSZ(displ);
+ register word mark_bit = (word)1 << modWORDSZ(displ);
if (mark_word & mark_bit) {
/* Mark bit is already set */
continue;
}
-
*mark_word_addr = mark_word | mark_bit;
}
@@ -182,6 +495,7 @@ void GC_mark()
(* (hhdr -> hb_mark_proc))((word *)(HBLKPTR(current)) + displ, hhdr,
GC_mark_stack_top_reg, mark_stack_limit);
}
+ }
}
GC_mark_stack_top = GC_mark_stack_top_reg;
}
@@ -191,7 +505,7 @@ void GC_mark()
static void alloc_mark_stack(n)
word n;
{
- mse * new_stack = (mse *)GET_MEM(n * sizeof(struct ms_entry));
+ mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry));
if (GC_mark_stack_size != 0) {
if (new_stack != 0) {
@@ -217,143 +531,483 @@ void GC_mark_init()
alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
}
-/* Identical to GC_mark, but guarantee that dropped_some is false */
-/* when we finish. */
-void GC_mark_reliable()
+/*
+ * Push all locations between b and t onto the mark stack.
+ * b is the first location to be checked. t is one past the last
+ * location to be checked.
+ * Should only be used if there is no possibility of mark stack
+ * overflow.
+ */
+void GC_push_all(bottom, top)
+ptr_t bottom;
+ptr_t top;
{
- dropped_some = FALSE;
- GC_mark();
- while (dropped_some) {
- dropped_some = FALSE;
-# ifdef PRINTSTATS
- GC_printf1("Mark stack overflow; current size = %lu entries\n",
- GC_mark_stack_size);
-# endif
- alloc_mark_stack(2*GC_mark_stack_size);
- GC_remark();
+ bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
+
+ if (top == 0 || bottom == top) return;
+ GC_mark_stack_top++;
+ if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
+ ABORT("unexpected mark stack overflow");
}
+ GC_mark_stack_top -> mse_start = (word *)bottom;
+ GC_mark_stack_top -> mse_end = (word *)top;
}
-/*********************************************************************/
-/* Mark all locations reachable via pointers located between b and t */
-/* b is the first location to be checked. t is one past the last */
-/* location to be checked. */
-/*********************************************************************/
-void GC_mark_all(bottom, top)
+/*
+ * Analogous to the above, but push only those pages that may have been
+ * dirtied.
+ * Should not overflow mark stack.
+ */
+void GC_push_dirty(bottom, top)
ptr_t bottom;
ptr_t top;
{
- word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
- word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
-
- if (GC_mark_stack_top != GC_mark_stack-1) {
- ABORT("GC_mark_all: bad mark stack\n");
+ register struct hblk * h;
+
+ bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
+
+ if (top == 0 || bottom == top) return;
+ h = HBLKPTR(bottom + HBLKSIZE);
+ if (top <= (ptr_t) h) {
+ if (GC_page_was_dirty(h-1)) {
+ GC_push_all(bottom, top);
+ }
+ return;
+ }
+ if (GC_page_was_dirty(h-1)) {
+ GC_push_all(bottom, (ptr_t)h);
+ }
+ while ((ptr_t)(h+1) <= top) {
+ if (GC_page_was_dirty(h)) {
+ GC_mark_stack_top++;
+ GC_mark_stack_top -> mse_start = (word *)h;
+ h++;
+ if (GC_mark_stack_top - GC_mark_stack
+ > 3 * GC_mark_stack_size / 4) {
+ /* Danger of mark stack overflow */
+ GC_mark_stack_top -> mse_end = (word *)top;
+ return;
+ } else {
+ GC_mark_stack_top -> mse_end = (word *)h;
+ }
+ } else {
+ h++;
+ }
+ }
+ if ((ptr_t)h != top) {
+ if (GC_page_was_dirty(h)) {
+ GC_push_all((ptr_t)h, top);
+ }
+ }
+ if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
+ ABORT("unexpected mark stack overflow");
}
- if (top == 0) return;
- GC_mark_stack_top++;
- GC_mark_stack_top -> mse_start = b;
- GC_mark_stack_top -> mse_end = t;
- GC_mark_reliable();
}
-word * GC_buffer; /* Buffer for stack marking */
-# define BUFSIZE 1024
+void GC_push_conditional(bottom, top, all)
+ptr_t bottom;
+ptr_t top;
+{
+ if (all) {
+ GC_push_all(bottom, top);
+ } else {
+ GC_push_dirty(bottom, top);
+ }
+}
+
+/*
+ * Push a single value onto mark stack. Mark from the object pointed to by p.
+ * GC_push_one is normally called by GC_push_regs, and thus must be defined.
+ * P is considered valid even if it is an interior pointer.
+ * Previously marked objects are not pushed. Hence we make progress even
+ * if the mark stack overflows.
+ */
+# define GC_PUSH_ONE_STACK(p) \
+ if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
+ GC_push_one_checked(p,TRUE); \
+ }
/*
- * A version of GC_mark_all that treats all interior pointers as valid
+ * As above, but interior pointer recognition as for
+ * normal for heap pointers.
*/
-void GC_mark_all_stack(bottom, top)
+# ifdef ALL_INTERIOR_POINTERS
+# define AIP TRUE
+# else
+# define AIP FALSE
+# endif
+# define GC_PUSH_ONE_HEAP(p) \
+ if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
+ GC_push_one_checked(p,AIP); \
+ }
+
+void GC_push_one(p)
+word p;
+{
+ GC_PUSH_ONE_STACK(p);
+}
+
+# ifdef __STDC__
+# define BASE(p) (word)GC_base((void *)(p))
+# else
+# define BASE(p) (word)GC_base((char *)(p))
+# endif
+
+/* As above, but argument passed preliminary test. */
+void GC_push_one_checked(p, interior_ptrs)
+register word p;
+register bool interior_ptrs;
+{
+ register word r;
+ register hdr * hhdr;
+ register int displ;
+
+ GET_HDR(p, hhdr);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ if (hhdr != 0 && interior_ptrs) {
+ r = BASE(p);
+ hhdr = HDR(r);
+ displ = BYTES_TO_WORDS(HBLKDISPL(r));
+ } else {
+ hhdr = 0;
+ }
+ } else {
+ register map_entry_type map_entry;
+
+ displ = HBLKDISPL(p);
+ map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
+ if (map_entry == OBJ_INVALID) {
+ if (interior_ptrs) {
+ r = BASE(p);
+ displ = BYTES_TO_WORDS(HBLKDISPL(r));
+ if (r == 0) hhdr = 0;
+ } else {
+ hhdr = 0;
+ }
+ } else {
+ displ = BYTES_TO_WORDS(displ);
+ displ -= map_entry;
+ r = (word)((word *)(HBLKPTR(p)) + displ);
+ }
+ }
+ /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
+ /* displ is the word index within the block. */
+ if (hhdr == 0) {
+ if (interior_ptrs) {
+ GC_add_to_black_list_stack(p);
+ } else {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p);
+ }
+ } else {
+ if (!mark_bit_from_hdr(hhdr, displ)) {
+ set_mark_bit_from_hdr(hhdr, displ);
+ GC_mark_stack_top =
+ (* (hhdr -> hb_mark_proc))((word *)r,
+ hhdr,
+ GC_mark_stack_top,
+ &(GC_mark_stack[GC_mark_stack_size]));
+ }
+ }
+}
+
+/*
+ * A version of GC_push_all that treats all interior pointers as valid
+ */
+void GC_push_all_stack(bottom, top)
ptr_t bottom;
ptr_t top;
{
# ifdef ALL_INTERIOR_POINTERS
- GC_mark_all(bottom, top);
+ GC_push_all(bottom, top);
# else
word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
register word *p;
register word q;
- register word r;
register word *lim;
- word * bufptr;
- word * limit;
register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
if (top == 0) return;
- /* Allocate GC_buffer someplace where collector won't accidentally */
- /* see old sections. */
- if (GC_buffer == 0) {
- GC_buffer = (word *)GC_scratch_alloc((word)(BUFSIZE * sizeof(word)));
- }
- bufptr = GC_buffer;
- limit = GC_buffer+BUFSIZE;
- /* check all pointers in range and put in buffer if they appear */
- /* to be valid. */
- lim = t - 1 /* longword */;
- for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
+ /* check all pointers in range and put in push if they appear */
+ /* to be valid. */
+ lim = t - 1 /* longword */;
+ for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
q = *p;
- if ((ptr_t)q < least_ha
- || (ptr_t)q >= greatest_ha) {
- continue;
+ GC_PUSH_ONE_STACK(q);
+ }
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+# endif
+}
+
+/* Push all objects reachable from marked objects in the given block */
+/* of size 1 objects. */
+void GC_push_marked1(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p;
+ word *plim;
+ register int i;
+ register word q;
+ register word mark_word;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ p = (word *)(h->hb_body);
+ plim = (word *)(((word)h) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ i = 0;
+ while(mark_word != 0) {
+ if (mark_word & 1) {
+ q = p[i];
+ GC_PUSH_ONE_HEAP(q);
+ }
+ i++;
+ mark_word >>= 1;
+ }
+ p += WORDSZ;
}
-# ifdef __STDC__
- r = (word)GC_base((void *)q);
-# else
- r = (word)GC_base((char *)q);
-# endif
- if (r == 0) {
- GC_add_to_black_list_stack(*p);
- } else {
- *(bufptr++) = r;
- if (bufptr == limit) {
- GC_mark_all((ptr_t)GC_buffer, (ptr_t)limit);
- bufptr = GC_buffer;
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+}
+
+
+/* Push all objects reachable from marked objects in the given block */
+/* of size 2 objects. */
+void GC_push_marked2(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p;
+ word *plim;
+ register int i;
+ register word q;
+ register word mark_word;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ p = (word *)(h->hb_body);
+ plim = (word *)(((word)h) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ i = 0;
+ while(mark_word != 0) {
+ if (mark_word & 1) {
+ q = p[i];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+1];
+ GC_PUSH_ONE_HEAP(q);
+ }
+ i += 2;
+ mark_word >>= 2;
}
+ p += WORDSZ;
}
- }
- if (bufptr != GC_buffer) GC_mark_all((ptr_t)GC_buffer, (ptr_t)bufptr);
-# endif
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
}
-/* Mark all objects reachable from marked objects in the given block */
-/*ARGSUSED*/
-static void remark_block(h, dummy)
+/* Push all objects reachable from marked objects in the given block */
+/* of size 4 objects. */
+/* There is a risk of mark stack overflow here. But we handle that. */
+/* And only unmarked objects get pushed, so it's not very likely. */
+void GC_push_marked4(h, hhdr)
struct hblk *h;
-word dummy;
+register hdr * hhdr;
+{
+ word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p;
+ word *plim;
+ register int i;
+ register word q;
+ register word mark_word;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define GC_greatest_plausible_heap_addr greatest_ha
+# define GC_least_plausible_heap_addr least_ha
+
+ p = (word *)(h->hb_body);
+ plim = (word *)(((word)h) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ i = 0;
+ while(mark_word != 0) {
+ if (mark_word & 1) {
+ q = p[i];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+1];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+2];
+ GC_PUSH_ONE_HEAP(q);
+ q = p[i+3];
+ GC_PUSH_ONE_HEAP(q);
+ }
+ i += 4;
+ mark_word >>= 4;
+ }
+ p += WORDSZ;
+ }
+# undef GC_greatest_plausible_heap_addr
+# undef GC_least_plausible_heap_addr
+}
+
+
+
+/* Push all objects reachable from marked objects in the given block */
+void GC_push_marked(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
{
- register hdr * hhdr = HDR(h);
register int sz = hhdr -> hb_sz;
register word * p;
register int word_no;
register word * lim;
- register mse * GC_mark_stack_top_reg = GC_mark_stack_top;
+ register mse * GC_mark_stack_top_reg;
+ register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
- if (hhdr -> hb_obj_kind == PTRFREE) return;
+ /* Some quick shortcuts: */
+ if (hhdr -> hb_obj_kind == PTRFREE) return;
+ if (GC_block_empty(hhdr)/* nothing marked */) return;
+# ifdef GATHERSTATS
+ GC_n_rescuing_pages++;
+# endif
if (sz > MAXOBJSZ) {
lim = (word *)(h + 1);
} else {
lim = (word *)(h + 1) - sz;
}
- for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
+ switch(sz) {
+ case 1:
+ GC_push_marked1(h, hhdr);
+ break;
+ case 2:
+ GC_push_marked2(h, hhdr);
+ break;
+ case 4:
+ GC_push_marked4(h, hhdr);
+ break;
+ default:
+ GC_mark_stack_top_reg = GC_mark_stack_top;
+ for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
p += sz, word_no += sz) {
+ /* This needs manual optimization: */
if (mark_bit_from_hdr(hhdr, word_no)) {
/* Mark from fields inside the object */
- GC_mark_stack_top_reg++;
- GC_mark_stack_top_reg -> mse_start = p;
- GC_mark_stack_top_reg -> mse_end = p + sz;
+ GC_mark_stack_top_reg =
+ (* (hhdr -> hb_mark_proc))((word *)p,
+ hhdr,
+ GC_mark_stack_top_reg,
+ mark_stack_limit);
+# ifdef GATHERSTATS
+ /* Subtract this object from total, since it was */
+ /* added in twice. */
+ GC_composite_in_use -= sz;
+# endif
}
+ }
+ GC_mark_stack_top = GC_mark_stack_top_reg;
}
- GC_mark_stack_top = GC_mark_stack_top_reg;
- GC_mark();
}
-/*
- * Traverse the heap. Mark all objects reachable from marked objects.
- */
-void GC_remark()
+/* Test whether any page in the given block is dirty */
+bool GC_block_was_dirty(h, hhdr)
+struct hblk *h;
+register hdr * hhdr;
+{
+ register int sz = hhdr -> hb_sz;
+
+ if (sz < MAXOBJSZ) {
+ return(GC_page_was_dirty(h));
+ } else {
+ register ptr_t p = (ptr_t)h;
+ sz += HDR_WORDS;
+ sz = WORDS_TO_BYTES(sz);
+ while (p < (ptr_t)h + sz) {
+ if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
+ p += HBLKSIZE;
+ }
+ return(FALSE);
+ }
+}
+
+/* Identical to above, but return address of next block */
+struct hblk * GC_push_next_marked(h)
+struct hblk *h;
+{
+ register hdr * hhdr;
+
+ h = GC_next_block(h);
+ if (h == 0) return(0);
+ hhdr = HDR(h);
+ GC_push_marked(h, hhdr);
+ return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
+}
+
+/* Identical to above, but mark only from dirty pages */
+struct hblk * GC_push_next_marked_dirty(h)
+struct hblk *h;
{
- GC_apply_to_all_blocks(remark_block, 0);
+ register hdr * hhdr = HDR(h);
+
+ if (!GC_incremental) { ABORT("dirty bits not set up"); }
+ for (;;) {
+ h = GC_next_block(h);
+ if (h == 0) return(0);
+ hhdr = HDR(h);
+# ifdef STUBBORN_ALLOC
+ if (hhdr -> hb_obj_kind == STUBBORN) {
+ if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
+ break;
+ }
+ } else {
+ if (GC_block_was_dirty(h, hhdr)) break;
+ }
+# else
+ if (GC_block_was_dirty(h, hhdr)) break;
+# endif
+ h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ GC_push_marked(h, hhdr);
+ return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
+}
+
+/* Similar to above, but for uncollectable pages. Needed since we */
+/* do not clear marks for such pages, even for full collections. */
+struct hblk * GC_push_next_marked_uncollectable(h)
+struct hblk *h;
+{
+ register hdr * hhdr = HDR(h);
+
+ for (;;) {
+ h = GC_next_block(h);
+ if (h == 0) return(0);
+ hhdr = HDR(h);
+ if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
+ h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ GC_push_marked(h, hhdr);
+ return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
}
diff --git a/mark_roots.c b/mark_roots.c
index 132d59e3..ef8938f0 100644
--- a/mark_roots.c
+++ b/mark_roots.c
@@ -3,9 +3,9 @@
# ifdef PCR
# define MAX_ROOT_SETS 1024
-# include "pcr/il/PCR_IL.h"
-# include "pcr/th/PCR_ThCtl.h"
-# include "pcr/mm/PCR_MM.h"
+# include "il/PCR_IL.h"
+# include "th/PCR_ThCtl.h"
+# include "mm/PCR_MM.h"
# else
# define MAX_ROOT_SETS 64
# endif
@@ -47,15 +47,16 @@ char * addr;
return(result);
}
-/* Is a range starting at addr already in the table? */
-static bool roots_present(b, e)
-char *b, *e;
+/* Is a range starting at b already in the table? If so return a */
+/* pointer to it, else NIL. */
+static struct roots * roots_present(b)
+char *b;
{
register int h = rt_hash(b);
register struct roots *p = root_index[h];
while (p != 0) {
- if (p -> r_start == (ptr_t)b && p -> r_end >= (ptr_t)e) return(TRUE);
+ if (p -> r_start == (ptr_t)b) return(p);
p = p -> r_next;
}
return(FALSE);
@@ -94,6 +95,8 @@ char * b; char * e;
void GC_add_roots_inner(b, e)
char * b; char * e;
{
+ struct roots * old;
+
/* We exclude GC data structures from root sets. It's usually safe */
/* to mark from those, but it is a waste of time. */
if ( (ptr_t)b < beginGC_arrays && (ptr_t)e > beginGC_arrays) {
@@ -107,7 +110,14 @@ char * b; char * e;
} else if ((ptr_t)b < endGC_arrays && (ptr_t)e > endGC_arrays) {
b = (char *)endGC_arrays;
}
- if (roots_present(b,e)) return;
+ old = roots_present(b);
+ if (old != 0) {
+ if ((ptr_t)e <= old -> r_end) /* already there */ return;
+ /* else extend */
+ GC_root_size += (ptr_t)e - old -> r_end;
+ old -> r_end = (ptr_t)e;
+ return;
+ }
if (n_root_sets == MAX_ROOT_SETS) {
ABORT("Too many root sets\n");
}
@@ -131,25 +141,40 @@ void GC_clear_roots()
ENABLE_SIGNALS();
}
-# ifdef PCR
-PCR_ERes GC_mark_thread_stack(PCR_Th_T *t, PCR_Any dummy)
+# ifdef THREADS
+# ifdef PCR
+PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
{
struct PCR_ThCtl_TInfoRep info;
PCR_ERes result;
info.ti_stkLow = info.ti_stkHi = 0;
result = PCR_ThCtl_GetInfo(t, &info);
- GC_mark_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
+ GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
return(result);
}
-PCR_ERes GC_mark_old_obj(void *p, size_t size, PCR_Any data)
+/* Push the contents of an old object. We treat this as stack */
+/* data only becasue that makes it robust against mark stack */
+/* overflow. */
+PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
{
- GC_mark_all((ptr_t)p, (ptr_t)p + size);
+ GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
return(PCR_ERes_okay);
}
+# endif
-# else
+# ifdef SRC_M3
+extern void ThreadF__ProcessStacks();
+
+void GC_push_thread_stack(start, stop)
+word start, stop;
+{
+ GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
+}
+# endif
+
+# else /* ! THREADS */
ptr_t GC_approx_sp()
{
word dummy;
@@ -157,35 +182,64 @@ ptr_t GC_approx_sp()
return((ptr_t)(&dummy));
}
# endif
+
+# ifdef SRC_M3
+# ifdef ALL_INTERIOR_POINTERS
+ --> misconfigured
+# endif
+/* Push routine with M3 specific calling convention. */
+GC_m3_push_root(dummy1, p, dummy2, dummy3)
+word *p;
+ptr_t dummy1, dummy2;
+int dummy3;
+{
+ word q = *p;
+
+ if ((ptr_t)(q) >= GC_least_plausible_heap_addr
+ && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
+ GC_push_one_checked(q,FALSE);
+ }
+}
+
+/* M3 set equivalent to RTHeap.TracedRefTypes */
+typedef struct { int elts[1]; } RefTypeSet;
+RefTypeSet GC_TracedRefTypes = {{0x1}};
+
+/* From finalize.c */
+extern void GC_push_finalizer_structures();
+
+/* From stubborn.c: */
+# ifdef STUBBORN_ALLOC
+ extern extern_ptr_t * GC_changing_list_start;
+# endif
+# endif
+
/*
- * Call the mark routines (GC_tl_mark for a single pointer, GC_mark_all
+ * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
* on groups of pointers) on every top level accessible pointer.
- * This is source language specific. The following works for C.
+ * If all is FALSE, arrange to push only possibly altered values.
*/
-GC_mark_roots()
+void GC_push_roots(all)
+bool all;
{
register int i;
/*
- * mark from registers - i.e., call GC_tl_mark(i) for each
- * register i
+ * push registers - i.e., call GC_push_one(r) for each
+ * register contents r.
*/
- GC_mark_regs(); /* usually defined in machine_dep.c */
-
-
-# ifdef PCR
- /* Traverse data allocated by previous memory managers. */
- {
- extern struct PCR_MM_ProcsRep * GC_old_allocator;
-
- if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
- GC_mark_old_obj, 0)
- != PCR_ERes_okay) {
- ABORT("Old object enumeration failed");
- }
- }
-
+ GC_push_regs(); /* usually defined in machine_dep.c */
+
+ /*
+ * Next push static data. This must happen early on, since it's
+ * not robust against mark stack overflow.
+ */
+ /* Reregister dynamic libraries, in case one got added. */
+# ifndef SRC_M3
+ GC_register_dynamic_libraries();
+# endif
+# ifdef PCR
/* Add new static data areas of dynamically loaded modules. */
{
PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
@@ -211,41 +265,65 @@ GC_mark_roots()
}
}
}
-
-
+# endif
+ /* Mark everything in static data areas */
+ for (i = 0; i < n_root_sets; i++) {
+ GC_push_conditional(static_roots[i].r_start,
+ static_roots[i].r_end, all);
+ }
+
+# ifdef SRC_M3
+ /* Use the M3 provided routine for finding static roots. */
+ /* This is a bit dubious, since it presumes no C roots. */
+ /* We handle the collector roots explicitly. */
+ {
+# ifdef STUBBORN_ALLOC
+ GC_push_one(GC_changing_list_start);
+# endif
+ GC_push_finalizer_structures();
+ RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
+ }
+# endif
+
+# ifdef PCR
+ /* Traverse data allocated by previous memory managers. */
+ {
+ extern struct PCR_MM_ProcsRep * GC_old_allocator;
+
+ if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
+ GC_push_old_obj, all)
+ != PCR_ERes_okay) {
+ ABORT("Old object enumeration failed");
+ }
+ }
+# endif
+
+ /*
+ * Now traverse stacks.
+ */
+# ifdef PCR
/* Traverse all thread stacks. */
if (PCR_ERes_IsErr(
- PCR_ThCtl_ApplyToAllOtherThreads(GC_mark_thread_stack,0))
- || PCR_ERes_IsErr(GC_mark_thread_stack(PCR_Th_CurrThread(), 0))) {
+ PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
+ || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
ABORT("Thread stack marking failed\n");
}
-# else
+# endif
+# ifdef SRC_M3
+ if (GC_words_allocd > 0) {
+ ThreadF__ProcessStacks(GC_push_thread_stack);
+ }
+ /* Otherwise this isn't absolutely necessary, and we have */
+ /* startup ordering problems. */
+# endif
+# ifndef THREADS
/* Mark everything on the stack. */
# ifdef STACK_GROWS_DOWN
- GC_mark_all_stack( GC_approx_sp(), GC_stackbottom );
+ GC_push_all_stack( GC_approx_sp(), GC_stackbottom );
# else
- GC_mark_all_stack( GC_stackbottom, GC_approx_sp() );
+ GC_push_all_stack( GC_stackbottom, GC_approx_sp() );
# endif
-
# endif
- /* Reregister dynamic libraries, in case one got added. */
- GC_register_dynamic_libraries();
- /* Mark everything in static data areas */
- for (i = 0; i < n_root_sets; i++) {
- GC_mark_all(static_roots[i].r_start, static_roots[i].r_end);
- }
}
-/*
- * Top level GC_mark routine. Mark from the object pointed to by p.
- * GC_tl_mark is normally called by GC_mark_regs, and thus must be defined.
- */
-void GC_tl_mark(p)
-word p;
-{
- word q;
-
- q = p;
- GC_mark_all_stack((ptr_t)(&q), (ptr_t)((&q)+1));
-}
diff --git a/mips_mach_dep.s b/mips_mach_dep.s
index 319c24e0..d58568b4 100644
--- a/mips_mach_dep.s
+++ b/mips_mach_dep.s
@@ -1,26 +1,26 @@
-# define call_GC_mark(x) move $4,x; jal GC_tl_mark
+# define call_push(x) move $4,x; jal GC_push_one
.text
# Mark from machine registers that are saved by C compiler
- .globl GC_mark_regs
- .ent GC_mark_regs
-GC_mark_regs:
+ .globl GC_push_regs
+ .ent GC_push_regs
+GC_push_regs:
subu $sp,4 ## Need to save only return address
sw $31,4($sp)
.mask 0x80000000,0
.frame $sp,4,$31
- call_GC_mark($2)
- call_GC_mark($3)
- call_GC_mark($16)
- call_GC_mark($17)
- call_GC_mark($18)
- call_GC_mark($19)
- call_GC_mark($20)
- call_GC_mark($21)
- call_GC_mark($22)
- call_GC_mark($23)
- call_GC_mark($30)
+ call_push($2)
+ call_push($3)
+ call_push($16)
+ call_push($17)
+ call_push($18)
+ call_push($19)
+ call_push($20)
+ call_push($21)
+ call_push($22)
+ call_push($23)
+ call_push($30)
lw $31,4($sp)
addu $sp,4
j $31
- .end GC_mark_regs
+ .end GC_push_regs
diff --git a/misc.c b/misc.c
index 3179ef44..079de9e1 100644
--- a/misc.c
+++ b/misc.c
@@ -1,6 +1,6 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -21,14 +21,19 @@
# ifdef THREADS
# ifdef PCR
-# include "pcr/il/PCR_IL.h"
+# include "il/PCR_IL.h"
struct PCR_Th_MLRep GC_allocate_ml;
# else
+# ifdef SRC_M3
+ /* Critical section counter is defined in the M3 runtime */
+ /* That's all we use. */
+# else
--> declare allocator lock here
+# endif
# endif
# endif
-struct _GC_arrays GC_arrays = { 0 };
+FAR struct _GC_arrays GC_arrays = { 0 };
/* Initialize GC_obj_kinds properly and standard free lists properly. */
/* This must be done statically since they may be accessed before */
@@ -38,23 +43,33 @@ struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
GC_no_mark_proc, FALSE },
/* NORMAL */ { &GC_objfreelist[0], &GC_reclaim_list[0],
GC_normal_mark_proc, TRUE },
+/* UNCOLLECTABLE */
+ { &GC_uobjfreelist[0], &GC_ureclaim_list[0],
+ GC_normal_mark_proc, TRUE },
+# ifdef STUBBORN_ALLOC
+/*STUBBORN*/ { &GC_sobjfreelist[0], &GC_sreclaim_list[0],
+ GC_normal_mark_proc, TRUE },
+# endif
};
-ptr_t GC_stackbottom = 0;
+# ifdef STUBBORN_ALLOC
+ int GC_n_kinds = 4;
+# else
+ int GC_n_kinds = 3;
+# endif
+
+bool GC_debugging_started = FALSE;
+ /* defined here so we don't have to load debug_malloc.o */
-word GC_hincr;
+void (*GC_check_heap)() = (void (*)())0;
-int GC_n_kinds = 2;
+ptr_t GC_stackbottom = 0;
bool GC_dont_gc = 0;
-extern signed_word GC_mem_found;
+bool GC_quiet = 0;
-# ifdef ALL_INTERIOR_POINTERS
-# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
-# else
-# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
-# endif
+extern signed_word GC_mem_found;
# ifdef MERGE_SIZES
/* Set things up so that GC_size_map[i] >= words(i), */
@@ -62,11 +77,6 @@ extern signed_word GC_mem_found;
/* and so that size_map contains relatively few distinct entries */
/* This is stolen from Russ Atkinson's Cedar quantization */
/* alogrithm (but we precompute it). */
-
-# if (CPP_WORDSZ != 32)
- --> fix the following code
-# endif
-
void GC_init_size_map()
@@ -77,11 +87,11 @@ extern signed_word GC_mem_found;
/* Map size 0 to 1. This avoids problems at lower levels. */
GC_size_map[0] = 1;
/* One word objects don't have to be 2 word aligned. */
- GC_size_map[1] = 1;
- GC_size_map[2] = 1;
- GC_size_map[3] = 1;
- GC_size_map[4] = ROUNDED_UP_WORDS(4);
- for (i = 5; i <= 32; i++) {
+ for (i = 1; i < sizeof(word); i++) {
+ GC_size_map[i] = 1;
+ }
+ GC_size_map[sizeof(word)] = ROUNDED_UP_WORDS(sizeof(word));
+ for (i = sizeof(word) + 1; i <= 8 * sizeof(word); i++) {
# ifdef ALIGN_DOUBLE
GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
# else
@@ -89,7 +99,7 @@ extern signed_word GC_mem_found;
# endif
}
- for (i = 33; i <= WORDS_TO_BYTES(MAXOBJSZ); i++) {
+ for (i = 8*sizeof(word)+1; i <= WORDS_TO_BYTES(MAXOBJSZ); i++) {
if (sz_rounded_up < ROUNDED_UP_WORDS(i)) {
register int size = ROUNDED_UP_WORDS(i);
register unsigned m = 0;
@@ -109,14 +119,6 @@ extern signed_word GC_mem_found;
}
# endif
-# ifdef ALL_INTERIOR_POINTERS
-# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
-# define ADD_SLOP(bytes) ((bytes)+1)
-# else
-# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
-# define ADD_SLOP(bytes) (bytes)
-# endif
-
/*
* The following is a gross hack to deal with a problem that can occur
* on machines that are sloppy about stack frame sizes, notably SPARC.
@@ -191,237 +193,6 @@ void GC_clear_stack()
# endif
}
-/* allocate lb bytes for an object of kind k */
-ptr_t GC_generic_malloc(lb, k)
-register word lb;
-register int k;
-{
-register word lw;
-register ptr_t op;
-register ptr_t *opp;
-DCL_LOCK_STATE;
-
- DISABLE_SIGNALS();
- LOCK();
- if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ROUNDED_UP_WORDS(lb);
- if (lw == 0) lw = 1;
-# endif
- opp = &(GC_obj_kinds[k].ok_freelist[lw]);
- if( (op = *opp) == 0 ) {
- if (!GC_is_initialized) {
- GC_init_inner();
- ENABLE_SIGNALS();
- /* This may have fixed GC_size_map */
- UNLOCK();
- return(GC_generic_malloc(lb, k));
- }
- GC_clear_stack();
- op = GC_allocobj(lw, k);
- if (op == 0) goto out;
- }
- /* Here everything is in a consistent state. */
- /* We assume the following assignment is */
- /* atomic. If we get aborted */
- /* after the assignment, we lose an object, */
- /* but that's benign. */
- /* Volatile declarations may need to be added */
- /* to prevent the compiler from breaking things.*/
- *opp = obj_link(op);
- obj_link(op) = 0;
- } else {
- register struct hblk * h;
-
- if (!GC_is_initialized) GC_init_inner();
- lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k)) == 0) {
- GC_collect_or_expand(divHBLKSZ(lb) + 1);
- }
- if (h == 0) {
- op = 0;
- } else {
- op = (ptr_t) (h -> hb_body);
- }
- }
- GC_words_allocd += lw;
-
-out:
- UNLOCK();
- ENABLE_SIGNALS();
- return((ptr_t)op);
-}
-
-/* Analogous to the above, but assumes a small object size, and */
-/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
-ptr_t GC_generic_malloc_words_small(lw, k)
-register word lw;
-register int k;
-{
-register ptr_t op;
-register ptr_t *opp;
-DCL_LOCK_STATE;
-
- LOCK();
- DISABLE_SIGNALS();
- opp = &(GC_obj_kinds[k].ok_freelist[lw]);
- if( (op = *opp) == 0 ) {
- if (!GC_is_initialized) {
- GC_init_inner();
- }
- GC_clear_stack();
- op = GC_allocobj(lw, k);
- if (op == 0) goto out;
- }
- *opp = obj_link(op);
- obj_link(op) = 0;
- GC_words_allocd += lw;
-
-out:
- UNLOCK();
- ENABLE_SIGNALS();
- return((ptr_t)op);
-}
-
-/* Allocate lb bytes of atomic (pointerfree) data */
-# ifdef __STDC__
- extern_ptr_t GC_malloc_atomic(size_t lb)
-# else
- extern_ptr_t GC_malloc_atomic(lb)
- size_t lb;
-# endif
-{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
-DCL_LOCK_STATE;
-
- if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ROUNDED_UP_WORDS(lb);
-# endif
- opp = &(GC_aobjfreelist[lw]);
- FASTLOCK();
- if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
- FASTUNLOCK();
- return(GC_generic_malloc((word)lb, PTRFREE));
- }
- /* See above comment on signals. */
- *opp = obj_link(op);
- GC_words_allocd += lw;
- FASTUNLOCK();
- return((extern_ptr_t) op);
- } else {
- return((extern_ptr_t)
- GC_generic_malloc((word)lb, PTRFREE));
- }
-}
-
-/* Allocate lb bytes of composite (pointerful) data */
-# ifdef __STDC__
- extern_ptr_t GC_malloc(size_t lb)
-# else
- extern_ptr_t GC_malloc(lb)
- size_t lb;
-# endif
-{
-register ptr_t op;
-register ptr_t *opp;
-register word lw;
-DCL_LOCK_STATE;
-
- if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ROUNDED_UP_WORDS(lb);
-# endif
- opp = &(GC_objfreelist[lw]);
- FASTLOCK();
- if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
- FASTUNLOCK();
- return(GC_generic_malloc((word)lb, NORMAL));
- }
- /* See above comment on signals. */
- *opp = obj_link(op);
- obj_link(op) = 0;
- GC_words_allocd += lw;
- FASTUNLOCK();
- return((extern_ptr_t) op);
- } else {
- return((extern_ptr_t)
- GC_generic_malloc((word)lb, NORMAL));
- }
-}
-
-/* Change the size of the block pointed to by p to contain at least */
-/* lb bytes. The object may be (and quite likely will be) moved. */
-/* The kind (e.g. atomic) is the same as that of the old. */
-/* Shrinking of large blocks is not implemented well. */
-# ifdef __STDC__
- extern_ptr_t GC_realloc(extern_ptr_t p, size_t lb)
-# else
- extern_ptr_t GC_realloc(p,lb)
- extern_ptr_t p;
- size_t lb;
-# endif
-{
-register struct hblk * h;
-register hdr * hhdr;
-register signed_word sz; /* Current size in bytes */
-register word orig_sz; /* Original sz in bytes */
-int obj_kind;
-
- if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
- h = HBLKPTR(p);
- hhdr = HDR(h);
- sz = hhdr -> hb_sz;
- obj_kind = hhdr -> hb_obj_kind;
- sz = WORDS_TO_BYTES(sz);
- orig_sz = sz;
-
- if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
- /* Round it up to the next whole heap block */
-
- sz = (sz+HDR_BYTES+HBLKSIZE-1)
- & (~HBLKMASK);
- sz -= HDR_BYTES;
- hhdr -> hb_sz = BYTES_TO_WORDS(sz);
- /* Extra area is already cleared by allochblk. */
- }
- if (ADD_SLOP(lb) <= sz) {
- if (lb >= (sz >> 1)) {
- if (orig_sz > lb) {
- /* Clear unneeded part of object to avoid bogus pointer */
- /* tracing. */
- bzero(((char *)p) + lb, (int)(orig_sz - lb));
- }
- return(p);
- } else {
- /* shrink */
- extern_ptr_t result = GC_generic_malloc((word)lb, obj_kind);
-
- if (result == 0) return(0);
- /* Could also return original object. But this */
- /* gives the client warning of imminent disaster. */
- bcopy(p, result, (int)lb);
- GC_free(p);
- return(result);
- }
- } else {
- /* grow */
- extern_ptr_t result = GC_generic_malloc((word)lb, obj_kind);
-
- if (result == 0) return(0);
- bcopy(p, result, (int)sz);
- GC_free(p);
- return(result);
- }
-}
/* Return a pointer to the base address of p, given a pointer to a */
/* an address within an object. Return 0 o.w. */
@@ -459,7 +230,7 @@ int obj_kind;
correction = offset % sz;
r -= (WORDS_TO_BYTES(correction));
if (((word *)r + sz) > (word *)(h + 1)
- && sz <= MAXOBJSZ) {
+ && sz <= BYTES_TO_WORDS(HBLKSIZE) - HDR_WORDS) {
return(0);
}
}
@@ -487,41 +258,6 @@ int obj_kind;
}
}
-/* Explicitly deallocate an object p. */
-# ifdef __STDC__
- void GC_free(extern_ptr_t p)
-# else
- void GC_free(p)
- extern_ptr_t p;
-# endif
-{
- register struct hblk *h;
- register hdr *hhdr;
- register signed_word sz;
- register ptr_t * flh;
- register struct obj_kind * ok;
-
- if (p == 0) return;
- /* Required by ANSI. It's not my fault ... */
- h = HBLKPTR(p);
- hhdr = HDR(h);
- sz = hhdr -> hb_sz;
- GC_mem_freed += sz;
- ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
-
- if (sz > MAXOBJSZ) {
- GC_freehblk(h);
- } else {
- ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
- if (ok -> ok_init) {
- bzero((char *)((word *)p + 1), (int)(WORDS_TO_BYTES(sz-1)));
- }
- flh = &(ok -> ok_freelist[sz]);
- obj_link(p) = *flh;
- *flh = (ptr_t)p;
- }
-}
-
bool GC_is_initialized = FALSE;
void GC_init()
@@ -604,14 +340,19 @@ void GC_init_inner()
ABORT("signed_word");
}
- GC_hincr = HINCR;
GC_init_headers();
GC_bl_init();
GC_mark_init();
- if (!GC_expand_hp_inner(GC_hincr)) {
- GC_printf0("Can't start up: no memory\n");
+ if (!GC_expand_hp_inner((word)MINHINCR)) {
+ GC_err_printf0("Can't start up: not enough memory\n");
EXIT();
}
+ /* Preallocate large object map. It's otherwise inconvenient to */
+ /* deal with failure. */
+ if (!GC_add_map_entry((word)0)) {
+ GC_err_printf0("Can't start up: not enough memory\n");
+ EXIT();
+ }
GC_register_displacement_inner(0L);
# ifdef MERGE_SIZES
GC_init_size_map();
@@ -624,14 +365,45 @@ void GC_init_inner()
GC_pcr_install();
# endif
/* Get black list set up */
- (void)GC_gcollect_inner(TRUE);
- (void)GC_gcollect_inner(TRUE);
+ GC_gcollect_inner();
+# ifdef STUBBORN_ALLOC
+ GC_stubborn_init();
+# endif
/* Convince lint that some things are used */
+# ifdef LINT
{
extern char * GC_copyright[];
- GC_noop(GC_copyright, GC_find_header,
- GC_tl_mark, GC_call_with_alloc_lock);
+ extern GC_read();
+
+ GC_noop(GC_copyright, GC_find_header, GC_print_block_list,
+ GC_push_one, GC_call_with_alloc_lock, GC_read,
+ GC_print_hblkfreelist, GC_dont_expand);
}
+# endif
+}
+
+void GC_enable_incremental()
+{
+ DCL_LOCK_STATE;
+
+# ifndef FIND_LEAK
+ DISABLE_SIGNALS();
+ LOCK();
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ }
+ if (GC_words_allocd > 0) {
+ /* There may be unmarked reachable objects */
+ GC_gcollect_inner();
+ } /* else we're OK in assumeing everything's */
+ /* clean since nothing can point to an */
+ /* unmarked object. */
+ GC_dirty_init();
+ GC_read_dirty();
+ GC_incremental = TRUE;
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
}
/* A version of printf that is unlikely to call malloc, and is thus safer */
@@ -646,6 +418,7 @@ long a, b, c, d, e, f;
{
char buf[1025];
+ if (GC_quiet) return;
buf[1024] = 0x15;
(void) sprintf(buf, format, a, b, c, d, e, f);
if (buf[1024] != 0x15) ABORT("GC_printf clobbered stack");
@@ -688,3 +461,14 @@ char *s;
# endif
}
+# ifdef SRC_M3
+void GC_enable()
+{
+ GC_dont_gc--;
+}
+
+void GC_disable()
+{
+ GC_dont_gc++;
+}
+# endif
diff --git a/obj_map.c b/obj_map.c
index fc3bfd99..40b2d881 100644
--- a/obj_map.c
+++ b/obj_map.c
@@ -30,6 +30,11 @@ hdr *hhdr;
if (GC_invalid_map == 0) {
GC_invalid_map = GC_scratch_alloc(MAP_SIZE);
+ if (GC_invalid_map == 0) {
+ GC_err_printf0(
+ "Cant initialize GC_invalid_map: insufficient memory\n");
+ EXIT();
+ }
for (displ = 0; displ < HBLKSIZE; displ++) {
MAP_ENTRY(GC_invalid_map, displ) = OBJ_INVALID;
}
@@ -68,14 +73,14 @@ word offset;
for (i = 0; i <= MAXOBJSZ; i++) {
if (GC_obj_map[i] != 0) {
if (i == 0) {
- GC_obj_map[i][offset + HDR_BYTES] = offset >> 2;
+ GC_obj_map[i][offset + HDR_BYTES] = BYTES_TO_WORDS(offset);
} else {
register int j;
register int lb = WORDS_TO_BYTES(i);
if (offset < lb) {
for (j = offset + HDR_BYTES; j < HBLKSIZE; j += lb) {
- GC_obj_map[i][j] = offset >> 2;
+ GC_obj_map[i][j] = BYTES_TO_WORDS(offset);
}
}
}
@@ -86,8 +91,9 @@ word offset;
}
-/* Add a heap block map for objects of size sz to obj_map. */
-void GC_add_map_entry(sz)
+/* Add a heap block map for objects of size sz to obj_map. */
+/* Return FALSE on failure. */
+bool GC_add_map_entry(sz)
word sz;
{
register int obj_start;
@@ -96,9 +102,10 @@ word sz;
if (sz > MAXOBJSZ) sz = 0;
if (GC_obj_map[sz] != 0) {
- return;
+ return(TRUE);
}
new_map = GC_scratch_alloc(MAP_SIZE);
+ if (new_map == 0) return(FALSE);
# ifdef PRINTSTATS
GC_printf1("Adding block map for size %lu\n", (unsigned long)sz);
# endif
@@ -124,4 +131,5 @@ word sz;
}
}
GC_obj_map[sz] = new_map;
+ return(TRUE);
}
diff --git a/os_dep.c b/os_dep.c
index 9a8df5f8..0e96d084 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -14,7 +14,12 @@
/* Blatantly OS dependent routines, except for those that are related */
/* dynamic loading. */
-/* Disable and enable signals during nontrivial allocations */
+#ifdef AMIGA
+# include <proto/exec.h>
+# include <proto/dos.h>
+# include <dos/dosextens.h>
+# include <workbench/startup.h>
+#endif
# ifdef OS2
@@ -36,6 +41,8 @@
# include <newexe.h>
# include <exe386.h>
+/* Disable and enable signals during nontrivial allocations */
+
void GC_disable_signals(void)
{
ULONG nest;
@@ -55,7 +62,7 @@ void GC_enable_signals(void)
# else
-# ifndef PCR
+# if !defined(PCR) && !defined(AMIGA)
# ifdef sigmask
/* Use the traditional BSD interface */
@@ -139,11 +146,48 @@ ptr_t GC_get_stack_base()
# else
+# ifdef AMIGA
+
+ptr_t GC_get_stack_base()
+{
+ extern struct WBStartup *_WBenchMsg;
+ extern long __base;
+ extern long __stack;
+ struct Task *task;
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ long size;
+
+ if ((task = FindTask(0)) == 0) {
+ GC_err_puts("Cannot find own task structure\n");
+ ABORT("task missing");
+ }
+ proc = (struct Process *)task;
+ cli = BADDR(proc->pr_CLI);
+
+ if (_WBenchMsg != 0 || cli == 0) {
+ size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
+ } else {
+ size = cli->cli_DefaultStack * 4;
+ }
+ return (ptr_t)(__base + GC_max(size, __stack));
+}
+
+# else
+
# if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
+# define NEED_FIND_LIMIT
+# endif
+
+# if defined(SUNOS4) & defined(DYNAMIC_LOADING)
+# define NEED_FIND_LIMIT
+# endif
+
+# ifdef NEED_FIND_LIMIT
/* Some tools to implement HEURISTIC2 */
# define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
# include <setjmp.h>
- /* static */ jmp_buf GC_jmp_buf;
+ /* static */ VOLATILE jmp_buf GC_jmp_buf;
/*ARGSUSED*/
void GC_fault_handler(sig)
@@ -151,25 +195,61 @@ ptr_t GC_get_stack_base()
{
longjmp(GC_jmp_buf, 1);
}
-# endif
-ptr_t GC_get_stack_base()
-{
- word dummy;
- static ptr_t result;
- /* Needs to be static, since otherwise it may not be */
- /* preserved across the longjmp. Can safely be */
- /* static since it's only called once, with the */
- /* allocation lock held. */
# ifdef __STDC__
typedef void (*handler)(int);
# else
typedef void (*handler)();
# endif
-# ifdef HEURISTIC2
- static handler old_segv_handler, old_bus_handler;
+
+ /* Return the first nonaddressible location > p (up) or */
+ /* the smallest location q s.t. [q,p] is addressible (!up). */
+ ptr_t GC_find_limit(p, up)
+ ptr_t p;
+ bool up;
+ {
+ static VOLATILE ptr_t result;
+ /* Needs to be static, since otherwise it may not be */
+ /* preserved across the longjmp. Can safely be */
+ /* static since it's only called once, with the */
+ /* allocation lock held. */
+
+ static handler old_segv_handler, old_bus_handler;
/* See above for static declaration. */
-# endif
+
+ old_segv_handler = signal(SIGSEGV, GC_fault_handler);
+# ifdef SIGBUS
+ old_bus_handler = signal(SIGBUS, GC_fault_handler);
+# endif
+ if (setjmp(GC_jmp_buf) == 0) {
+ result = (ptr_t)(((word)(p))
+ & ~(MIN_PAGE_SIZE-1));
+ for (;;) {
+ if (up) {
+ result += MIN_PAGE_SIZE;
+ } else {
+ result -= MIN_PAGE_SIZE;
+ }
+ GC_noop(*result);
+ }
+ }
+ (void) signal(SIGSEGV, old_segv_handler);
+# ifdef SIGBUS
+ (void) signal(SIGBUS, old_bus_handler);
+# endif
+ if (!up) {
+ result += MIN_PAGE_SIZE;
+ }
+ return(result);
+ }
+# endif
+
+
+ptr_t GC_get_stack_base()
+{
+ word dummy;
+ ptr_t result;
+
# define STACKBOTTOM_ALIGNMENT_M1 0xffffff
# ifdef STACKBOTTOM
@@ -186,34 +266,17 @@ ptr_t GC_get_stack_base()
# endif
# endif /* HEURISTIC1 */
# ifdef HEURISTIC2
- old_segv_handler = signal(SIGSEGV, GC_fault_handler);
-# ifdef SIGBUS
- old_bus_handler = signal(SIGBUS, GC_fault_handler);
-# endif
- if (setjmp(GC_jmp_buf) == 0) {
- result = (ptr_t)(((word)(&dummy))
- & ~(MIN_PAGE_SIZE-1));
- for (;;) {
-# ifdef STACK_GROWS_DOWN
- result += MIN_PAGE_SIZE;
-# else
- result -= MIN_PAGE_SIZE;
-# endif
- GC_noop(*result);
- }
- }
- (void) signal(SIGSEGV, old_segv_handler);
-# ifdef SIGBUS
- (void) signal(SIGBUS, old_bus_handler);
-# endif
-# ifdef STACK_GROWS_UP
- result += MIN_PAGE_SIZE;
-# endif
+# ifdef STACK_GROWS_DOWN
+ result = GC_find_limit((ptr_t)(&dummy), TRUE);
+# else
+ result = GC_find_limit((ptr_t)(&dummy), FALSE);
+# endif
# endif /* HEURISTIC2 */
return(result);
# endif /* STACKBOTTOM */
}
+# endif /* ! AMIGA */
# endif /* ! OS2 */
/*
@@ -315,16 +378,564 @@ void GC_register_data_segments()
}
# else
+# ifdef AMIGA
+
+ void GC_register_data_segments()
+ {
+ extern struct WBStartup *_WBenchMsg;
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ BPTR myseglist;
+ ULONG *data;
+
+ if ( _WBenchMsg != 0 ) {
+ if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
+ GC_err_puts("No seglist from workbench\n");
+ return;
+ }
+ } else {
+ if ((proc = (struct Process *)FindTask(0)) == 0) {
+ GC_err_puts("Cannot find process structure\n");
+ return;
+ }
+ if ((cli = BADDR(proc->pr_CLI)) == 0) {
+ GC_err_puts("No CLI\n");
+ return;
+ }
+ if ((myseglist = cli->cli_Module) == 0) {
+ GC_err_puts("No seglist from CLI\n");
+ return;
+ }
+ }
+
+ for (data = (ULONG *)BADDR(myseglist); data != 0;
+ data = (ULONG *)BADDR(data[0])) {
+ GC_add_roots_inner((char *)&data[1], ((char *)&data[1]) + data[-1]);
+ }
+ }
+
+
+# else
void GC_register_data_segments()
{
- extern int end;
+# ifndef NEXT
+ extern int end;
+# endif
-# ifndef PCR
+# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT)
GC_add_roots_inner(DATASTART, (char *)(&end));
# endif
+# if !defined(PCR) && defined(NEXT)
+ GC_add_roots_inner(DATASTART, (char *) get_end());
+# endif
/* Dynamic libraries are added at every collection, since they may */
/* change. */
}
+# endif /* ! AMIGA */
# endif /* ! OS2 */
+
+# if !defined(OS2) && !defined(PCR) && !defined(AMIGA)
+
+extern caddr_t sbrk();
+# ifdef __STDC__
+# define SBRK_ARG_T size_t
+# else
+# define SBRK_ARG_T int
+# endif
+
+ptr_t GC_unix_get_mem(bytes)
+word bytes;
+{
+ caddr_t cur_brk = sbrk(0);
+ caddr_t result;
+ SBRK_ARG_T lsbs = (word)cur_brk & (HBLKSIZE-1);
+
+ if (lsbs != 0) {
+ if(sbrk(HBLKSIZE - lsbs) == (caddr_t)(-1)) return(0);
+ }
+ result = sbrk((SBRK_ARG_T)bytes);
+ if (result == (caddr_t)(-1)) return(0);
+ return((ptr_t)result);
+}
+
+# endif
+
+/*
+ * Routines for accessing dirty bits on virtual pages.
+ * We plan to eventaually implement four strategies for doing so:
+ * DEFAULT_VDB: A simple dummy implementation that treats every page
+ * as possibly dirty. This makes incremental collection
+ * useless, but the implementation is still correct.
+ * PCR_VDB: Use PPCRs virtual dirty bit facility.
+ * PROC_VDB: Use the /proc facility for reading dirty bits. Only
+ * works under some SVR4 variants. Even then, it may be
+ * too slow to be entirely satisfactory. Requires reading
+ * dirty bits for entire address space. Implementations tend
+ * to assume that the client is a (slow) debugger.
+ * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
+ * dirtied pages. The implementation (and implementability)
+ * is highly system dependent. This usually fails when system
+ * calls write to a protected page. We prevent the read system
+ * call from doing so. It is the clients responsibility to
+ * make sure that other system calls are similarly protected
+ * or write only to the stack.
+ */
+
+# ifdef DEFAULT_VDB
+
+/* All of the following assume the allocation lock is held, and */
+/* signals are disabled. */
+
+/* The client asserts that unallocated pages in the heap are never */
+/* written. */
+
+/* Initialize virtual dirty bit implementation. */
+void GC_dirty_init()
+{
+}
+
+/* Retrieve system dirty bits for heap to a local buffer. */
+/* Restore the systems notion of which pages are dirty. */
+void GC_read_dirty()
+{}
+
+/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
+/* If the actual page size is different, this returns TRUE if any */
+/* of the pages overlapping h are dirty. This routine may err on the */
+/* side of labelling pages as dirty (and this implementation does). */
+/*ARGSUSED*/
+bool GC_page_was_dirty(h)
+struct hblk *h;
+{
+ return(TRUE);
+}
+
+/* A call hints that h is about to be written */
+/*ARGSUSED*/
+void GC_write_hint(h)
+struct hblk *h;
+{
+}
+
+# endif /* DEFAULT_VDB */
+
+
+# ifdef MPROTECT_VDB
+
+/*
+ * See DEFAULT_VDB for interface descriptions.
+ */
+
+/*
+ * This implementation maintains dirty bits itself by catching write
+ * faults and keeping track of them. We assume nobody else catches
+ * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
+ * except as a result of a read system call. This means clients must
+ * either ensure that system calls do not touch the heap, or must
+ * provide their own wrappers analogous to the one for read.
+ * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
+ * tried to use portable code where easily possible. It is known
+ * not to work under a number of other systems.
+ */
+
+# include <sys/mman.h>
+# include <signal.h>
+# include <sys/syscall.h>
+
+VOLATILE page_hash_table GC_dirty_pages;
+ /* Pages dirtied since last GC_read_dirty. */
+
+word GC_page_size;
+
+/*ARGSUSED*/
+# ifdef SUNOS4
+ void GC_write_fault_handler(sig, code, scp, addr)
+ int sig, code;
+ struct sigcontext *scp;
+ char * addr;
+# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
+# define CODE_OK (FC_CODE(code) == FC_PROT \
+ || (FC_CODE(code) == FC_OBJERR \
+ && FC_ERRNO(code) == FC_PROT))
+
+# else
+# if defined(IRIX5) || defined(ALPHA) /* OSF1 */
+# include <errno.h>
+ void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
+# define SIG_OK (sig == SIGSEGV)
+# ifdef ALPHA
+# define SIG_PF void (*)(int)
+# define CODE_OK (code == 2 /* experimentally determined */)
+# else
+# define CODE_OK (code == EACCES)
+# endif
+# endif
+# endif
+{
+ register int i;
+# ifdef IRIX5
+ char * addr = (char *) (scp -> sc_badvaddr);
+# endif
+# ifdef ALPHA
+ char * addr = (char *) (scp -> sc_traparg_a0);
+# endif
+
+ if (SIG_OK && CODE_OK) {
+ register struct hblk * h =
+ (struct hblk *)((word)addr & ~(GC_page_size-1));
+
+ for (i = 0; i < GC_page_size/HBLKSIZE; i++) {
+ register int index = PHT_HASH(h+i);
+
+ if (HDR(h+i) == 0) {
+ ABORT("Unexpected bus error or segmentation fault");
+ }
+ set_pht_entry_from_index(GC_dirty_pages, index);
+ }
+ if (mprotect((caddr_t)h, (int)GC_page_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed in handler");
+ }
+# if defined(IRIX5) || defined(ALPHA)
+ /* IRIX resets the signal handler each time. */
+ signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
+# endif
+ /* The write may not take place before dirty bits are read. */
+ /* But then we'll fault again ... */
+ return;
+ }
+
+ ABORT("Unexpected bus error or segmentation fault");
+}
+
+void GC_write_hint(h)
+struct hblk *h;
+{
+ register struct hblk * h_trunc =
+ (struct hblk *)((word)h & ~(GC_page_size-1));
+ register int i;
+ register bool found_clean = FALSE;
+
+ for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
+ register int index = PHT_HASH(h_trunc+i);
+
+ if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
+ found_clean = TRUE;
+ set_pht_entry_from_index(GC_dirty_pages, index);
+ }
+ }
+ if (found_clean) {
+ if (mprotect((caddr_t)h_trunc, (int)GC_page_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed in GC_write_hint");
+ }
+ }
+}
+
+void GC_dirty_init()
+{
+ GC_page_size = getpagesize();
+ if (GC_page_size % HBLKSIZE != 0) {
+ GC_err_printf0("Page size not multiple of HBLKSIZE\n");
+ ABORT("Page size not multiple of HBLKSIZE");
+ }
+# ifdef SUNOS4
+ if (signal(SIGBUS, GC_write_fault_handler) != SIG_DFL) {
+ GC_err_printf0("Clobbered other SIGBUS handler\n");
+ }
+ if (signal(SIGSEGV, GC_write_fault_handler) != SIG_DFL) {
+ GC_err_printf0("Clobbered other SIGSEGV handler\n");
+ }
+# endif
+# if defined(IRIX5) || defined(ALPHA)
+ if (signal(SIGSEGV, (SIG_PF)GC_write_fault_handler) != SIG_DFL) {
+ GC_err_printf0("Clobbered other SIGSEGV handler\n");
+ }
+# endif
+}
+
+
+
+void GC_protect_heap()
+{
+ word ps = GC_page_size;
+ word pmask = (ps-1);
+ ptr_t start;
+ word offset;
+ word len;
+ int i;
+
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ offset = (word)(GC_heap_sects[i].hs_start) & pmask;
+ start = GC_heap_sects[i].hs_start - offset;
+ len = GC_heap_sects[i].hs_bytes + offset;
+ len += ps-1; len &= ~pmask;
+ if (mprotect((caddr_t)start, (int)len, PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed");
+ }
+ }
+}
+
+# ifdef THREADS
+--> The following is broken. We can lose dirty bits. We would need
+--> the signal handler to cooperate, as in PCR.
+# endif
+
+void GC_read_dirty()
+{
+ bcopy((char *)GC_dirty_pages, (char *)GC_grungy_pages,
+ (int)(sizeof GC_dirty_pages));
+ bzero((char *)GC_dirty_pages, (int)(sizeof GC_dirty_pages));
+ GC_protect_heap();
+}
+
+bool GC_page_was_dirty(h)
+struct hblk * h;
+{
+ register word index = PHT_HASH(h);
+
+ return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
+}
+
+void GC_begin_syscall()
+{
+ DISABLE_SIGNALS();
+ LOCK();
+}
+
+void GC_end_syscall()
+{
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+void GC_unprotect_range(addr, len)
+ptr_t addr;
+word len;
+{
+ struct hblk * start_block;
+ struct hblk * end_block;
+ register struct hblk *h;
+ ptr_t obj_start;
+
+ if (!GC_incremental) return;
+ obj_start = GC_base(addr);
+ if (obj_start == 0) return;
+ if (GC_base(addr + len - 1) != obj_start) {
+ ABORT("GC_unprotect_range(range bigger than object)");
+ }
+ start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
+ end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
+ end_block += GC_page_size/HBLKSIZE - 1;
+ for (h = start_block; h <= end_block; h++) {
+ register word index = PHT_HASH(h);
+
+ set_pht_entry_from_index(GC_dirty_pages, index);
+ }
+ if (mprotect((caddr_t)start_block,
+ (int)((ptr_t)end_block - (ptr_t)start_block)
+ + HBLKSIZE,
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
+ ABORT("mprotect failed in GC_unprotect_range");
+ }
+}
+
+/* Replacement for UNIX system call. */
+/* Other calls that write to the heap */
+/* should be handled similarly. */
+# ifndef LINT
+ int read(fd, buf, nbyte)
+# else
+ int GC_read(fd, buf, nbyte)
+# endif
+int fd;
+char *buf;
+int nbyte;
+{
+ int result;
+
+ GC_begin_syscall();
+ GC_unprotect_range(buf, (word)nbyte);
+ result = syscall(SYS_read, fd, buf, nbyte);
+ GC_end_syscall();
+ return(result);
+}
+
+# endif /* MPROTECT_VDB */
+
+# ifdef PROC_VDB
+
+/*
+ * See DEFAULT_VDB for interface descriptions.
+ */
+
+/*
+ * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
+ * from which we can read page modified bits. This facility is far from
+ * optimal (e.g. we would like to get the info for only some of the
+ * address space), but it avoids intercepting system calls.
+ */
+
+#include <sys/types.h>
+#include <sys/signal.h>
+#include <sys/fault.h>
+#include <sys/syscall.h>
+#include <sys/procfs.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define BUFSZ 20000
+char *GC_proc_buf;
+
+int GC_proc_fd;
+
+void GC_dirty_init()
+{
+ int fd;
+ char buf[20];
+
+ sprintf(buf, "/proc/%d", getpid());
+ fd = open(buf, O_RDONLY);
+ if (fd < 0) {
+ ABORT("/proc open failed");
+ }
+ GC_proc_fd = ioctl(fd, PIOCOPENPD, 0);
+ if (GC_proc_fd < 0) {
+ ABORT("/proc ioctl failed");
+ }
+ GC_proc_buf = GC_scratch_alloc(BUFSZ);
+}
+
+/* Ignore write hints. They don't help us here. */
+/*ARGSUSED*/
+void GC_write_hint(h)
+struct hblk *h;
+{
+}
+
+void GC_read_dirty()
+{
+ unsigned long ps, np;
+ int nmaps;
+ ptr_t vaddr;
+ struct prasmap * map;
+ char * bufp;
+ ptr_t current_addr, limit;
+ int i;
+
+ bzero((char *)GC_grungy_pages, (int)(sizeof GC_grungy_pages));
+
+ bufp = GC_proc_buf;
+ if (read(GC_proc_fd, bufp, BUFSZ) <= 0) {
+ ABORT("/proc read failed: BUFSZ too small?\n");
+ }
+ /* Copy dirty bits into GC_grungy_pages */
+ nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
+ /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
+ nmaps, PG_REFERENCED, PG_MODIFIED); */
+ bufp = bufp + sizeof(struct prpageheader);
+ for (i = 0; i < nmaps; i++) {
+ map = (struct prasmap *)bufp;
+ vaddr = (ptr_t)(map -> pr_vaddr);
+ ps = map -> pr_pagesize;
+ np = map -> pr_npage;
+ /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
+ limit = vaddr + ps * np;
+ bufp += sizeof (struct prasmap);
+ for (current_addr = vaddr;
+ current_addr < limit; current_addr += ps){
+ if ((*bufp++) & PG_MODIFIED) {
+ register struct hblk * h = (struct hblk *) current_addr;
+
+ while ((ptr_t)h < current_addr + ps) {
+ register word index = PHT_HASH(h);
+
+ set_pht_entry_from_index(GC_grungy_pages, index);
+ h++;
+ }
+ }
+ }
+ bufp += sizeof(long) - 1;
+ bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
+ }
+}
+
+bool GC_page_was_dirty(h)
+struct hblk *h;
+{
+ register word index = PHT_HASH(h);
+
+ return(get_pht_entry_from_index(GC_grungy_pages, index));
+}
+
+# endif /* PROC_VDB */
+
+
+# ifdef PCR_VDB
+
+# include "vd/PCR_VD.h"
+
+# define NPAGES (32*1024) /* 128 MB */
+
+PCR_VD_DB GC_grungy_bits[NPAGES];
+
+ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
+ /* HBLKSIZE aligned. */
+
+void GC_dirty_init()
+{
+ /* For the time being, we assume the heap generally grows up */
+ GC_vd_base = GC_heap_sects[0].hs_start;
+ if (GC_vd_base == 0) {
+ ABORT("Bad initial heap segment");
+ }
+ if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
+ != PCR_ERes_okay) {
+ ABORT("dirty bit initialization failed");
+ }
+}
+
+void GC_read_dirty()
+{
+ /* lazily enable dirty bits on newly added heap sects */
+ {
+ static int onhs = 0;
+ int nhs = GC_n_heap_sects;
+ for( ; onhs < nhs; onhs++ ) {
+ PCR_VD_WriteProtectEnable(
+ GC_heap_sects[onhs].hs_start,
+ GC_heap_sects[onhs].hs_bytes );
+ }
+ }
+
+
+ if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
+ != PCR_ERes_okay) {
+ ABORT("dirty bit read failed");
+ }
+}
+
+bool GC_page_was_dirty(h)
+struct hblk *h;
+{
+ if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
+ return(TRUE);
+ }
+ return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
+}
+
+/*ARGSUSED*/
+void GC_write_hint(h)
+struct hblk *h;
+{
+ PCR_VD_WriteProtectDisable(h, HBLKSIZE);
+ PCR_VD_WriteProtectEnable(h, HBLKSIZE);
+}
+
+# endif /* PCR_VDB */
+
+
+
+
diff --git a/pcr_interface.c b/pcr_interface.c
index 573d34f7..2a2684b2 100644
--- a/pcr_interface.c
+++ b/pcr_interface.c
@@ -17,7 +17,7 @@
* We wrap all of the allocator functions to avoid questions of
* compatibility between the prototyped and nonprototyped versions of the f
*/
-# include "pcr/mm/PCR_MM.h"
+# include "mm/PCR_MM.h"
# define MY_MAGIC 17L
diff --git a/reclaim.c b/reclaim.c
index 9bf10158..8b3ffbfe 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -1,6 +1,6 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -28,7 +28,8 @@ word sz;
if (GC_debugging_started && GC_has_debug_info(p)) {
GC_print_obj(p);
} else {
- GC_err_printf1("0x%lx (appr. size = %ld)\n",
+ GC_err_printf2("0x%lx (appr. size = %ld)\n",
+ (unsigned long)p,
(unsigned long)WORDS_TO_BYTES(sz));
}
}
@@ -135,9 +136,10 @@ register ptr_t list;
# ifdef GATHERSTATS
register int n_words_found = 0;
# endif
- register int mark_word;
+ register word mark_word;
+ register int i;
# define DO_OBJ(start_displ) \
- if (!(mark_word & (1 << start_displ))) { \
+ if (!(mark_word & ((word)1 << start_displ))) { \
FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
@@ -146,28 +148,19 @@ register ptr_t list;
}
p = (word *)(hbp->hb_body);
- plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
/* go through all words in block */
while( p < plim ) {
mark_word = *mark_word_addr++;
- DO_OBJ(0);
- DO_OBJ(2);
- DO_OBJ(4);
- DO_OBJ(6);
- DO_OBJ(8);
- DO_OBJ(10);
- DO_OBJ(12);
- DO_OBJ(14);
- DO_OBJ(16);
- DO_OBJ(18);
- DO_OBJ(20);
- DO_OBJ(22);
- DO_OBJ(24);
- DO_OBJ(26);
- DO_OBJ(28);
- DO_OBJ(30);
- p+=32;
+ for (i = 0; i < WORDSZ; i += 8) {
+ DO_OBJ(0);
+ DO_OBJ(2);
+ DO_OBJ(4);
+ DO_OBJ(6);
+ p += 8;
+ mark_word >>= 8;
+ }
}
# ifdef GATHERSTATS
GC_mem_found += n_words_found;
@@ -191,9 +184,9 @@ register ptr_t list;
# ifdef GATHERSTATS
register int n_words_found = 0;
# endif
- register int mark_word;
+ register word mark_word;
# define DO_OBJ(start_displ) \
- if (!(mark_word & (1 << start_displ))) { \
+ if (!(mark_word & ((word)1 << start_displ))) { \
FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
@@ -204,7 +197,7 @@ register ptr_t list;
}
p = (word *)(hbp->hb_body);
- plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
/* go through all words in block */
while( p < plim ) {
@@ -217,7 +210,17 @@ register ptr_t list;
DO_OBJ(20);
DO_OBJ(24);
DO_OBJ(28);
- p+=32;
+# if CPP_WORDSZ == 64
+ DO_OBJ(32);
+ DO_OBJ(36);
+ DO_OBJ(40);
+ DO_OBJ(44);
+ DO_OBJ(48);
+ DO_OBJ(52);
+ DO_OBJ(56);
+ DO_OBJ(60);
+# endif
+ p += WORDSZ;
}
# ifdef GATHERSTATS
GC_mem_found += n_words_found;
@@ -243,7 +246,7 @@ register word sz;
p = (word *)(hbp->hb_body);
word_no = HDR_WORDS;
- plim = (word *)((((unsigned)hbp) + HBLKSIZE)
+ plim = (word *)((((word)hbp) + HBLKSIZE)
- WORDS_TO_BYTES(sz));
/* go through all words in block */
@@ -279,9 +282,10 @@ register ptr_t list;
# ifdef GATHERSTATS
register int n_words_found = 0;
# endif
- register int mark_word;
+ register word mark_word;
+ register int i;
# define DO_OBJ(start_displ) \
- if (!(mark_word & (1 << start_displ))) { \
+ if (!(mark_word & ((word)1 << start_displ))) { \
FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
@@ -289,28 +293,19 @@ register ptr_t list;
}
p = (word *)(hbp->hb_body);
- plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
/* go through all words in block */
while( p < plim ) {
mark_word = *mark_word_addr++;
- DO_OBJ(0);
- DO_OBJ(2);
- DO_OBJ(4);
- DO_OBJ(6);
- DO_OBJ(8);
- DO_OBJ(10);
- DO_OBJ(12);
- DO_OBJ(14);
- DO_OBJ(16);
- DO_OBJ(18);
- DO_OBJ(20);
- DO_OBJ(22);
- DO_OBJ(24);
- DO_OBJ(26);
- DO_OBJ(28);
- DO_OBJ(30);
- p+=32;
+ for (i = 0; i < WORDSZ; i += 8) {
+ DO_OBJ(0);
+ DO_OBJ(2);
+ DO_OBJ(4);
+ DO_OBJ(6);
+ p += 8;
+ mark_word >>= 8;
+ }
}
# ifdef GATHERSTATS
GC_mem_found += n_words_found;
@@ -334,9 +329,9 @@ register ptr_t list;
# ifdef GATHERSTATS
register int n_words_found = 0;
# endif
- register int mark_word;
+ register word mark_word;
# define DO_OBJ(start_displ) \
- if (!(mark_word & (1 << start_displ))) { \
+ if (!(mark_word & ((word)1 << start_displ))) { \
FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
@@ -344,7 +339,7 @@ register ptr_t list;
}
p = (word *)(hbp->hb_body);
- plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
/* go through all words in block */
while( p < plim ) {
@@ -357,7 +352,62 @@ register ptr_t list;
DO_OBJ(20);
DO_OBJ(24);
DO_OBJ(28);
- p+=32;
+# if CPP_WORDSZ == 64
+ DO_OBJ(32);
+ DO_OBJ(36);
+ DO_OBJ(40);
+ DO_OBJ(44);
+ DO_OBJ(48);
+ DO_OBJ(52);
+ DO_OBJ(56);
+ DO_OBJ(60);
+# endif
+ p += WORDSZ;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/* Finally the one word case, which never requires any clearing: */
+/*ARGSUSED*/
+ptr_t GC_reclaim1(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register word mark_word;
+ register int i;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & ((word)1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ INCR_WORDS(1); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((word)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ for (i = 0; i < WORDSZ; i += 4) {
+ DO_OBJ(0);
+ DO_OBJ(1);
+ DO_OBJ(2);
+ DO_OBJ(3);
+ p += 4;
+ mark_word >>= 4;
+ }
}
# ifdef GATHERSTATS
GC_mem_found += n_words_found;
@@ -383,11 +433,16 @@ int abort_if_found; /* Abort if a reclaimable object is found */
hhdr = HDR(hbp);
sz = hhdr -> hb_sz;
+ hhdr -> hb_last_reclaimed = GC_gc_no;
ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
flh = &(ok -> ok_freelist[sz]);
+ GC_write_hint(hbp);
if (ok -> ok_init) {
switch(sz) {
+ case 1:
+ *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ break;
case 2:
*flh = GC_reclaim_clear2(hbp, hhdr, *flh, abort_if_found);
break;
@@ -400,6 +455,9 @@ int abort_if_found; /* Abort if a reclaimable object is found */
}
} else {
switch(sz) {
+ case 1:
+ *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ break;
case 2:
*flh = GC_reclaim_uninit2(hbp, hhdr, *flh, abort_if_found);
break;
@@ -422,43 +480,29 @@ int abort_if_found; /* Abort if a reclaimable object is found */
*/
void GC_reclaim_block(hbp, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
-int abort_if_found; /* Abort if a reclaimable object is found */
+word abort_if_found; /* Abort if a reclaimable object is found */
{
register hdr * hhdr;
register word sz; /* size of objects in current block */
- bool empty; /* used only for PRINTBLOCKS */
register struct obj_kind * ok;
struct hblk ** rlh;
hhdr = HDR(hbp);
sz = hhdr -> hb_sz;
ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
-# ifdef PRINTBLOCKS
- GC_printf1("%ld(", (unsigned long)sz);
- if (hhdr -> hb_obj_kind == PTRFREE) {
- GC_printf0("a");
- } else if (hhdr -> hb_obj_kind == NORMAL){
- GC_printf0("c");
- } else {
- GC_printf0("o");
- }
-# endif
if( sz > MAXOBJSZ ) { /* 1 big object */
- if( mark_bit_from_hdr(hhdr, HDR_WORDS) ) {
- empty = FALSE;
- } else {
+ if( !mark_bit_from_hdr(hhdr, HDR_WORDS) ) {
FOUND_FREE(hbp, HDR_WORDS);
# ifdef GATHERSTATS
GC_mem_found += sz;
# endif
GC_freehblk(hbp);
- empty = TRUE;
}
} else {
- empty = GC_block_empty(hhdr);
+ bool empty = GC_block_empty(hhdr);
if (abort_if_found) {
- GC_reclaim_small_nonempty_block(hbp, abort_if_found);
+ GC_reclaim_small_nonempty_block(hbp, (int)abort_if_found);
} else if (empty) {
# ifdef GATHERSTATS
GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
@@ -471,9 +515,67 @@ int abort_if_found; /* Abort if a reclaimable object is found */
*rlh = hbp;
}
}
-# ifdef PRINTBLOCKS
- if (empty) {GC_printf0("e),");} else {GC_printf0("n),");}
-# endif
+}
+
+/* Routines to gather and print heap block info */
+/* intended for debugging. Otherwise should be called */
+/* with lock. */
+static number_of_blocks;
+static total_bytes;
+
+/* Number of set bits in a word. Not performance critical. */
+static int set_bits(n)
+word n;
+{
+ register word m = n;
+ register int result = 0;
+
+ while (m > 0) {
+ if (m & 1) result++;
+ m >>= 1;
+ }
+ return(result);
+}
+
+/* Return the number of set mark bits in the given header */
+int GC_n_set_marks(hhdr)
+hdr * hhdr;
+{
+ register int result = 0;
+ register int i;
+
+ for (i = 0; i < MARK_BITS_SZ; i++) {
+ result += set_bits(hhdr -> hb_marks[i]);
+ }
+ return(result);
+}
+
+/*ARGSUSED*/
+void GC_print_block_descr(h, dummy)
+struct hblk *h;
+word dummy;
+{
+ register hdr * hhdr = HDR(h);
+ register bytes = WORDS_TO_BYTES(hhdr -> hb_sz);
+
+ GC_printf3("(%lu:%lu,%lu)", (unsigned long)(hhdr -> hb_obj_kind),
+ (unsigned long)bytes,
+ (unsigned long)(GC_n_set_marks(hhdr)));
+ bytes += HDR_BYTES + HBLKSIZE-1;
+ bytes &= ~(HBLKSIZE-1);
+ total_bytes += bytes;
+ number_of_blocks++;
+}
+
+void GC_print_block_list()
+{
+ GC_printf0("(kind(0=ptrfree,1=normal,2=unc.,3=stubborn):size_in_bytes, #_marks_set)\n");
+ number_of_blocks = 0;
+ total_bytes = 0;
+ GC_apply_to_all_blocks(GC_print_block_descr, (word)0);
+ GC_printf2("\nblocks = %lu, bytes = %lu\n",
+ (unsigned long)number_of_blocks,
+ (unsigned long)total_bytes);
}
/*
@@ -508,15 +610,13 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
# ifdef PRINTBLOCKS
GC_printf0("GC_reclaim: current block sizes:\n");
+ GC_print_block_list();
# endif
/* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
/* or enqueue the block for later processing. */
- GC_apply_to_all_blocks(GC_reclaim_block, abort_if_found);
+ GC_apply_to_all_blocks(GC_reclaim_block, (word)abort_if_found);
-# ifdef PRINTBLOCKS
- GC_printf0("\n");
-# endif
}
/*
@@ -542,3 +642,49 @@ int kind;
if (*flh != 0) break;
}
}
+
+/*
+ * Reclaim all blocks that have been recently reclaimed.
+ * Clear lists of blocks waiting to be reclaimed.
+ * Must be done before clearing mark bits with the world running,
+ * since otherwise a subsequent reclamation of block would see
+ * the wrong mark bits.
+ * SHOULD PROBABLY BE INCREMENTAL
+ */
+void GC_reclaim_or_delete_all()
+{
+ register word sz;
+ register int kind;
+ register hdr * hhdr;
+ register struct hblk * hbp;
+ register struct obj_kind * ok;
+ struct hblk ** rlh;
+# ifdef PRINTTIMES
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE done_time;
+
+ GET_TIME(start_time);
+# endif
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ ok = &(GC_obj_kinds[kind]);
+ for (sz = 1; sz <= MAXOBJSZ; sz++) {
+ rlh = &(ok -> ok_reclaim_list[sz]);
+ while ((hbp = *rlh) != 0) {
+ hhdr = HDR(hbp);
+ *rlh = hhdr -> hb_next;
+ if (hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
+ /* It's likely we'll need it this time, too */
+ /* It's been touched recently, so this */
+ /* shouldn't trigger paging. */
+ GC_reclaim_small_nonempty_block(hbp, FALSE);
+ }
+ }
+ }
+ }
+# ifdef PRINTTIMES
+ GET_TIME(done_time);
+ GC_printf1("Disposing of reclaim lists took %lu msecs\n",
+ MS_TIME_DIFF(done_time,start_time));
+# endif
+}
diff --git a/rs6000_mach_dep.s b/rs6000_mach_dep.s
index c056f039..e0dbe808 100644
--- a/rs6000_mach_dep.s
+++ b/rs6000_mach_dep.s
@@ -33,71 +33,71 @@
.set r31,31
# Mark from machine registers that are saved by C compiler
- .globl .GC_mark_regs
-.GC_mark_regs:
- .extern .GC_tl_mark
+ .globl .GC_push_regs
+.GC_push_regs:
+ .extern .GC_push_one
stu r1,-64(r1) # reserve stack frame
mflr r0 # save link register
st r0,0x48(r1)
- oril r3,r2,0x0 # GC_mark from r2
- bl .GC_tl_mark
+ oril r3,r2,0x0 # mark from r2
+ bl .GC_push_one
cror 15,15,15
- oril r3,r13,0x0 # GC_mark from r13-r31
- bl .GC_tl_mark
+ oril r3,r13,0x0 # mark from r13-r31
+ bl .GC_push_one
cror 15,15,15
oril r3,r14,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r15,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r16,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r17,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r18,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r19,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r20,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r21,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r22,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r23,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r24,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r25,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r26,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r27,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r28,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r29,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r30,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
oril r3,r31,0x0
- bl .GC_tl_mark
+ bl .GC_push_one
cror 15,15,15
l r0,0x48(r1)
mtlr r0
diff --git a/setjmp_test.c b/setjmp_test.c
index 6c2c8737..5d7426a4 100644
--- a/setjmp_test.c
+++ b/setjmp_test.c
@@ -45,6 +45,14 @@ getpagesize()
}
#endif
+#ifdef AMIGA
+int
+getpagesize()
+{
+ return(4096);
+}
+#endif
+
#ifdef __OS2__
#define INCL_DOSFILEMGR
#define INCL_DOSMISC
@@ -105,7 +113,7 @@ main()
if (y == 1) {
if (x == 2) {
printf("Generic mark_regs code probably wont work\n");
-# if defined(SPARC) || defined(IBMRS6000) || defined(VAX) || defined(MIPS) || defined(M68K) || defined(I386) || defined(NS32K) || defined(RT)
+# if defined(SPARC) || defined(RS6000) || defined(VAX) || defined(MIPS) || defined(M68K) || defined(I386) || defined(NS32K) || defined(RT)
printf("Assembly code supplied\n");
# else
printf("Need assembly code\n");
diff --git a/stubborn.c b/stubborn.c
new file mode 100644
index 00000000..7b5e56a8
--- /dev/null
+++ b/stubborn.c
@@ -0,0 +1,314 @@
+
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+
+#include "gc_private.h"
+
+# ifdef STUBBORN_ALLOC
+/* Stubborn object (hard to change, nearly immutable) allocation. */
+
+
+# ifdef ALL_INTERIOR_POINTERS
+# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
+# else
+# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
+# endif
+
+/* Data structure representing immutable objects that */
+/* are still being initialized. */
+/* This is a bit baroque in order to avoid acquiring */
+/* the lock twice for a typical allocation. */
+
+extern_ptr_t * GC_changing_list_start;
+
+# ifdef THREADS
+ VOLATILE extern_ptr_t * VOLATILE GC_changing_list_current;
+# else
+ extern_ptr_t * GC_changing_list_current;
+# endif
+ /* Points at last added element. Also (ab)used for */
+ /* synchronization. Updates and reads are assumed atomic. */
+
+extern_ptr_t * GC_changing_list_limit;
+ /* Points at the last word of the buffer, which is always 0 */
+ /* All entries in (GC_changing_list_current, */
+ /* GC_changing_list_limit] are 0 */
+
+
+void GC_stubborn_init()
+{
+# define INIT_SIZE 10
+
+ GC_changing_list_start = (extern_ptr_t *)
+ GC_generic_malloc_inner(
+ (word)(INIT_SIZE * sizeof(extern_ptr_t)),
+ PTRFREE);
+ bzero((char *)GC_changing_list_start,
+ (int)(INIT_SIZE * sizeof(extern_ptr_t)));
+ if (GC_changing_list_start == 0) {
+ GC_err_printf0("Insufficient space to start up\n");
+ ABORT("GC_stubborn_init: put of space");
+ }
+ GC_changing_list_current = GC_changing_list_start;
+ GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
+ * GC_changing_list_limit = 0;
+}
+
+/* Compact and possibly grow GC_uninit_list. The old copy is */
+/* left alone. Lock must be held. */
+/* When called GC_changing_list_current == GC_changing_list_limit */
+/* which is one past the current element. */
+/* When we finish GC_changing_list_current again points one past last */
+/* element. */
+/* Invariant while this is running: GC_changing_list_current */
+/* points at a word containing 0. */
+/* Returns FALSE on failure. */
+bool GC_compact_changing_list()
+{
+ register extern_ptr_t *p, *q;
+ register int count = 0;
+ word old_size = GC_changing_list_limit-GC_changing_list_start+1;
+ register word new_size = old_size;
+ extern_ptr_t * new_list;
+
+ for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
+ if (*p != 0) count++;
+ }
+ if (2 * count > old_size) new_size = 2 * count;
+ new_list = (extern_ptr_t *)
+ GC_generic_malloc_inner(
+ new_size * sizeof(extern_ptr_t), PTRFREE);
+ /* PTRFREE is a lie. But we don't want the collector to */
+ /* consider these. We do want the list itself to be */
+ /* collectable. */
+ if (new_list == 0) return(FALSE);
+ bzero((char *)new_list, (int)(new_size * sizeof(extern_ptr_t)));
+ q = new_list;
+ for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
+ if (*p != 0) *q++ = *p;
+ }
+ GC_changing_list_start = new_list;
+ GC_changing_list_limit = new_list + new_size - 1;
+ GC_changing_list_current = q;
+ return(TRUE);
+}
+
+/* Add p to changing list. Clear p on failure. */
+# define ADD_CHANGING(p) \
+ { \
+ register struct hblk * h = HBLKPTR(p); \
+ register word index = PHT_HASH(h); \
+ \
+ set_pht_entry_from_index(GC_changed_pages, index); \
+ } \
+ if (*GC_changing_list_current != 0 \
+ && ++GC_changing_list_current == GC_changing_list_limit) { \
+ if (!GC_compact_changing_list()) (p) = 0; \
+ } \
+ *GC_changing_list_current = p;
+
+void GC_change_stubborn(p)
+extern_ptr_t p;
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ ADD_CHANGING(p);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+void GC_end_stubborn_change(p)
+extern_ptr_t p;
+{
+# ifdef THREADS
+ register VOLATILE extern_ptr_t * my_current = GC_changing_list_current;
+# else
+ register extern_ptr_t * my_current = GC_changing_list_current;
+# endif
+ register bool tried_quick;
+ DCL_LOCK_STATE;
+
+ if (*my_current == p) {
+ /* Hopefully the normal case. */
+ /* Compaction could not have been running when we started. */
+ *my_current = 0;
+# ifdef THREADS
+ if (my_current == GC_changing_list_current) {
+ /* Compaction can't have run in the interim. */
+ /* We got away with the quick and dirty approach. */
+ return;
+ }
+ tried_quick = TRUE;
+# else
+ return;
+# endif
+ } else {
+ tried_quick = FALSE;
+ }
+ DISABLE_SIGNALS();
+ LOCK();
+ my_current = GC_changing_list_current;
+ for (; my_current >= GC_changing_list_start; my_current--) {
+ if (*my_current == p) {
+ *my_current = 0;
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return;
+ }
+ }
+ if (!tried_quick) {
+ GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
+ (unsigned long)p);
+ ABORT("Bad arg to GC_end_stubborn_change");
+ }
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+/* Allocate lb bytes of composite (pointerful) data */
+/* No pointer fields may be changed after a call to */
+/* GC_end_stubborn_change(p) where p is the value */
+/* returned by GC_malloc_stubborn. */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_stubborn(size_t lb)
+# else
+ extern_ptr_t GC_malloc_stubborn(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+extern_ptr_t result;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_sobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ result = GC_generic_malloc((word)lb, STUBBORN);
+ goto record;
+ }
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ result = (extern_ptr_t) op;
+ ADD_CHANGING(result);
+ FASTUNLOCK();
+ return(result);
+ } else {
+ result = (extern_ptr_t)
+ GC_generic_malloc((word)lb, STUBBORN);
+ }
+record:
+ DISABLE_SIGNALS();
+ LOCK();
+ ADD_CHANGING(result);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
+
+/* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
+/* Report pages on which stubborn objects were changed. */
+void GC_read_changed()
+{
+ register extern_ptr_t * p = GC_changing_list_start;
+ register extern_ptr_t q;
+ register struct hblk * h;
+ register word index;
+
+ if (p == 0) /* initializing */ return;
+ bcopy((char *)GC_changed_pages, (char *)GC_prev_changed_pages,
+ (int)(sizeof GC_changed_pages));
+ bzero((char *)GC_changed_pages, (int)(sizeof GC_changed_pages));
+ for (; p <= GC_changing_list_current; p++) {
+ if ((q = *p) != 0) {
+ h = HBLKPTR(q);
+ index = PHT_HASH(h);
+ set_pht_entry_from_index(GC_changed_pages, index);
+ }
+ }
+}
+
+bool GC_page_was_changed(h)
+struct hblk * h;
+{
+ register word index = PHT_HASH(h);
+
+ return(get_pht_entry_from_index(GC_prev_changed_pages, index));
+}
+
+/* Remove unreachable entries from changed list. Should only be */
+/* called with mark bits consistent and lock held. */
+void GC_clean_changing_list()
+{
+ register extern_ptr_t * p = GC_changing_list_start;
+ register extern_ptr_t q;
+ register ptr_t r;
+ register unsigned long count = 0;
+ register unsigned long dropped_count = 0;
+
+ if (p == 0) /* initializing */ return;
+ for (; p <= GC_changing_list_current; p++) {
+ if ((q = *p) != 0) {
+ count++;
+ r = (ptr_t)GC_base(q);
+ if (r == 0 || !GC_is_marked(r)) {
+ *p = 0;
+ dropped_count++;
+ }
+ }
+ }
+# ifdef PRINTSTATS
+ if (count > 0) {
+ GC_printf2("%lu entries in changing list: reclaimed %lu\n",
+ (unsigned long)count, (unsigned long)dropped_count);
+ }
+# endif
+}
+
+#else /* !STUBBORN_ALLOC */
+
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_stubborn(size_t lb)
+# else
+ extern_ptr_t GC_malloc_stubborn(lb)
+ size_t lb;
+# endif
+{
+ return(GC_malloc(lb));
+}
+
+/*ARGSUSED*/
+void GC_end_stubborn_change(p)
+extern_ptr_t p;
+{
+}
+
+/*ARGSUSED*/
+void GC_change_stubborn(p)
+extern_ptr_t p;
+{
+}
+
+
+#endif
diff --git a/test.c b/test.c
index be4fa067..c8f39511 100644
--- a/test.c
+++ b/test.c
@@ -8,6 +8,15 @@
# include "th/PCR_Th.h"
# endif
+# ifdef AMIGA
+ long __stack = 200000;
+# define FAR __far
+# else
+# define FAR
+# endif
+
+# define FAIL abort()
+
/* AT_END may be defined to excercise the interior pointer test */
/* if the collector is configured with ALL_INTERIOR_POINTERS. */
/* As it stands, this test should succeed with either */
@@ -47,7 +56,7 @@ sexpr y;
register int *p;
register my_extra = extra_count;
- r = (sexpr) GC_MALLOC(sizeof(struct SEXPR) + my_extra);
+ r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
if (r == 0) {
(void)printf("Out of memory\n");
exit(1);
@@ -55,8 +64,8 @@ sexpr y;
for (p = (int *)r;
((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
if (*p) {
- (void)printf("Found nonzero at %X\n - allocator is broken", p);
- exit(1);
+ (void)printf("Found nonzero at %X - allocator is broken\n", p);
+ FAIL;
}
*p = 13;
}
@@ -65,7 +74,13 @@ sexpr y;
# endif
r -> sexpr_car = x;
r -> sexpr_cdr = y;
- extra_count = (my_extra + 1) % 5000;
+ my_extra++;
+ if ( my_extra >= 5000 ) {
+ extra_count = 0;
+ } else {
+ extra_count = my_extra;
+ }
+ GC_END_STUBBORN_CHANGE((char *)r);
return(r);
}
@@ -74,7 +89,6 @@ sexpr x;
sexpr y;
{
register sexpr r;
- register int *p;
r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
if (r == 0) {
@@ -86,6 +100,21 @@ sexpr y;
return(r);
}
+sexpr small_cons_uncollectable (x, y)
+sexpr x;
+sexpr y;
+{
+ register sexpr r;
+
+ r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
+ if (r == 0) {
+ (void)printf("Out of memory\n");
+ exit(1);
+ }
+ r -> sexpr_car = x;
+ r -> sexpr_cdr = (sexpr) (~(unsigned long)y);
+ return(r);
+}
/* Return reverse(x) concatenated with y */
sexpr reverse1(x, y)
@@ -110,7 +139,20 @@ int low, up;
if (low > up) {
return(nil);
} else {
- return(small_cons(small_cons((sexpr)low, 0), ints(low+1, up)));
+ return(small_cons(small_cons((sexpr)low, (sexpr)0), ints(low+1, up)));
+ }
+}
+
+/* Too check uncollectable allocation we build lists with disguised cdr */
+/* pointers, and make sure they don't go away. */
+sexpr uncollectable_ints(low, up)
+int low, up;
+{
+ if (low > up) {
+ return(nil);
+ } else {
+ return(small_cons_uncollectable(small_cons((sexpr)low, (sexpr)0),
+ uncollectable_ints(low+1, up)));
}
}
@@ -133,6 +175,27 @@ int low, up;
}
}
+# define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
+
+void check_uncollectable_ints(list, low, up)
+sexpr list;
+int low, up;
+{
+ if ((int)(car(car(list))) != low) {
+ (void)printf(
+ "Uncollectable list corrupted - collector is broken\n");
+ exit(1);
+ }
+ if (low == up) {
+ if (UNCOLLECTABLE_CDR(list) != nil) {
+ (void)printf("Uncollectable ist too long - collector is broken\n");
+ exit(1);
+ }
+ } else {
+ check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
+ }
+}
+
/* Not used, but useful for debugging: */
void print_int_list(x)
sexpr x;
@@ -166,14 +229,27 @@ reverse_test()
int i;
sexpr b;
sexpr c;
+ sexpr d;
+ sexpr e;
+# define BIG 4500
- a = ints(1, 100);
+ a = ints(1, 49);
b = ints(1, 50);
- c = ints(1, 4500);
+ c = ints(1, BIG);
+ d = uncollectable_ints(1, 100);
+ e = uncollectable_ints(1, 1);
+ /* Superficially test interior pointer recognition on stack */
+ c = (sexpr)((char *)c + sizeof(char *));
+ d = (sexpr)((char *)d + sizeof(char *));
+# ifdef __STDC__
+ GC_FREE((void *)e);
+# else
+ GC_FREE((char *)e);
+# endif
for (i = 0; i < 50; i++) {
b = reverse(reverse(b));
}
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 60; i++) {
/* This maintains the invariant that a always points to a list of */
/* 100 integers. Thus this is thread safe without locks. */
a = reverse(reverse(a));
@@ -182,13 +258,16 @@ reverse_test()
if (i & 1) {
a = (sexpr)GC_REALLOC((void_star)a, 500);
} else {
- a = (sexpr)GC_REALLOC((void_star)a, 4200);
+ a = (sexpr)GC_REALLOC((void_star)a, 8200);
}
# endif
}
- check_ints(a,1,100);
+ check_ints(a,1,49);
check_ints(b,1,50);
- check_ints(c,1,4500);
+ c = (sexpr)((char *)c - sizeof(char *));
+ d = (sexpr)((char *)d - sizeof(char *));
+ check_ints(c,1,BIG);
+ check_uncollectable_ints(d, 1, 100);
a = b = c = 0;
}
@@ -217,13 +296,17 @@ int dropped_something = 0;
tn * t = (tn *)obj;
if ((int)client_data != t -> level) {
(void)printf("Wrong finalization data - collector is broken\n");
- exit(1);
+ FAIL;
}
finalized_count++;
}
size_t counter = 0;
+# define MAX_FINALIZED 8000
+FAR GC_word live_indicators[MAX_FINALIZED] = {0};
+int live_indicators_count = 0;
+
tn * mktree(n)
int n;
{
@@ -237,9 +320,35 @@ int n;
result -> level = n;
result -> lchild = mktree(n-1);
result -> rchild = mktree(n-1);
+ if (counter++ % 17 == 0 && n >= 2) {
+ tn * tmp = result -> lchild -> rchild;
+
+ result -> lchild -> rchild = result -> rchild -> lchild;
+ result -> rchild -> lchild = tmp;
+ }
if (counter++ % 119 == 0) {
GC_REGISTER_FINALIZER((void_star)result, finalizer, (void_star)n,
(GC_finalization_proc *)0, (void_star *)0);
+ live_indicators[live_indicators_count] = 13;
+ if (GC_general_register_disappearing_link(
+ (void_star *)(&(live_indicators[live_indicators_count])),
+ (void_star)result) != 0) {
+ printf("GC_general_register_disappearing_link failed\n");
+ FAIL;
+ }
+ if (GC_unregister_disappearing_link(
+ (void_star *)
+ (&(live_indicators[live_indicators_count]))) == 0) {
+ printf("GC_unregister_disappearing_link failed\n");
+ FAIL;
+ }
+ if (GC_general_register_disappearing_link(
+ (void_star *)(&(live_indicators[live_indicators_count])),
+ (void_star)result) != 0) {
+ printf("GC_general_register_disappearing_link failed 2\n");
+ FAIL;
+ }
+ live_indicators_count++;
# ifdef PCR
PCR_ThCrSec_EnterSys();
/* Losing a count here causes erroneous report of failure. */
@@ -258,12 +367,12 @@ int n;
{
if (n == 0 && t != 0) {
(void)printf("Clobbered a leaf - collector is broken\n");
- exit(1);
+ FAIL;
}
if (n == 0) return;
if (t -> level != n) {
(void)printf("Lost a node at level %d - collector is broken\n", n);
- exit(1);
+ FAIL;
}
if (counter++ % 373 == 0) (void) GC_MALLOC(counter%5001);
chktree(t -> lchild, n-1);
@@ -279,7 +388,7 @@ int n;
for (i = 0; i < n; i += 8) {
if (GC_MALLOC_ATOMIC(8) == 0) {
(void)printf("Out of memory\n");
- exit(1);
+ FAIL;
}
}
}
@@ -293,7 +402,7 @@ tree_test()
chktree(root, 16);
if (finalized_count && ! dropped_something) {
(void)printf("Premature finalization - collector is broken\n");
- exit(1);
+ FAIL;
}
dropped_something = 1;
root = mktree(16);
@@ -323,27 +432,55 @@ void run_one_test()
void check_heap_stats()
{
+ unsigned long max_heap_sz;
+ register int i;
+ int still_live;
+
+ if (sizeof(char *) > 4) {
+ max_heap_sz = 13000000;
+ } else {
+ max_heap_sz = 10000000;
+ }
+# ifdef GC_DEBUG
+ max_heap_sz *= 2;
+# endif
+ /* Garbage collect repeatedly so that all inaccessible objects */
+ /* can be finalized. */
+ for (i = 0; i < 16; i++) {
+ GC_gcollect();
+ }
(void)printf("Completed %d tests\n", n_tests);
(void)printf("Finalized %d/%d objects - ",
finalized_count, finalizable_count);
if (finalized_count > finalizable_count
|| finalized_count < finalizable_count/2) {
(void)printf ("finalization is probably broken\n");
- exit(1);
+ FAIL;
} else {
(void)printf ("finalization is probably ok\n");
}
+ still_live = 0;
+ for (i = 0; i < MAX_FINALIZED; i++) {
+ if (live_indicators[i] != 0) {
+ still_live++;
+ }
+ }
+ if (still_live != finalizable_count - finalized_count) {
+ (void)printf
+ ("%d disappearing links remain - disappearing links are broken\n");
+ FAIL;
+ }
(void)printf("Total number of bytes allocated is %d\n",
WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
(void)printf("Final heap size is %d bytes\n", GC_heapsize);
if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
< 33500000*n_tests) {
(void)printf("Incorrect execution - missed some allocations\n");
- exit(1);
+ FAIL;
}
- if (GC_heapsize > 10000000*n_tests) {
+ if (GC_heapsize > max_heap_sz*n_tests) {
(void)printf("Unexpected heap growth - collector may be broken\n");
- exit(1);
+ FAIL;
}
(void)printf("Collector appears to work\n");
}
@@ -352,9 +489,28 @@ void check_heap_stats()
main()
{
n_tests = 0;
+# if defined(MPROTECT_VDB) || defined(PROC_VDB)
+ GC_enable_incremental();
+ (void) printf("Switched to incremental mode\n");
+# if defined(MPROTECT_VDB)
+ (void)printf("Emulating dirty bits with mprotect/signals\n");
+# else
+ (void)printf("Reading dirty bits from /proc\n");
+# endif
+# endif
run_one_test();
check_heap_stats();
(void)fflush(stdout);
+# ifdef LINT
+ /* Entry points we should be testing, but aren't */
+ /* Some can be tested by defining GC_DEBUG at the top of this file */
+ GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
+ GC_register_disappearing_link,
+ GC_print_obj, GC_debug_change_stubborn,
+ GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
+ GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
+ GC_init, GC_make_closure, GC_debug_invoke_finalizer);
+# endif
return(0);
}
# else
@@ -365,6 +521,7 @@ test()
int code;
n_tests = 0;
+ GC_enable_incremental();
th1 = PCR_Th_Fork(run_one_test, 0);
th2 = PCR_Th_Fork(run_one_test, 0);
run_one_test();